kernel_vmem/page_table/pdpt.rs
1//! # x86-64 Page Directory Pointer Table (PDPT / L3)
2//!
3//! This module wraps the third paging level (a.k.a. L3, PDPT):
4//!
5//! - [`L3Index`]: index type for VA bits `[38:30]`.
6//! - [`PdptEntry`]: a single PDPT entry; may be a next-level pointer or a 1 GiB leaf.
7//! - [`PdptEntryKind`]: decoded view of an entry (next PD or 1 GiB leaf).
8//! - [`PageDirectoryPointerTable`]: a 4 KiB-aligned array of 512 entries.
9//!
10//! ## Semantics
11//!
12//! At the PDPT level, the `PS` bit controls whether the entry is a 1 GiB leaf
13//! (`PS=1`) or points to a Page Directory (`PS=0`). Leaf entries map 1 GiB and
14//! require 1 GiB physical alignment of the base address. Non-leaf entries hold
15//! the physical base of the next-level Page Directory (4 KiB aligned).
16//!
17//! ## Invariants & Safety Notes
18//!
19//! - [`PageDirectoryPointerTable`] is 4 KiB-aligned and contains exactly 512 entries.
20//! - [`PdptEntry::present_next_with`] enforces `PS=0`; [`PdptEntry::present_leaf_with`] enforces `PS=1`.
21//! - Callers must handle TLB maintenance after changing active mappings.
22//! - Raw constructors perform no validation; use with care.
23
24use crate::VirtualMemoryPageBits;
25use crate::addresses::{PhysicalAddress, PhysicalPage, Size1G, Size4K, VirtualAddress};
26use crate::page_table::{PRESENT_BIT, PS_BIT};
27use bitfield_struct::bitfield;
28
29/// **Borrowed view** into an L3 PDPTE.
30///
31/// Returned by [`PdptEntry::view`].
32pub enum L3View {
33    /// Non-leaf PDPTE view (PS=0).
34    Entry(Pdpte),
35    /// 1 GiB leaf PDPTE view (PS=1).
36    Leaf1G(Pdpte1G),
37}
38
39/// **L3 PDPTE union** — overlays non-leaf [`Pdpte`] and leaf [`Pdpte1G`]
40/// on the same 64-bit storage.
41///
42/// Use [`PdptEntry::view`] to obtain a **typed**
43/// reference. These methods inspect the **PS** bit to decide which variant is
44/// active and return a safe borrowed view.
45///
46/// Storing/retrieving raw bits is possible via `from_bits`/`into_bits`.
47#[derive(Copy, Clone)]
48#[repr(C)]
49pub union PdptEntry {
50    /// Raw 64-bit storage of the entry.
51    bits: u64,
52    /// Non-leaf form: next-level Page Directory (PS=0).
53    entry: Pdpte,
54    /// Leaf form: 1 GiB mapping (PS=1).
55    leaf_1g: Pdpte1G,
56}
57
58/// L3 **PDPTE** — pointer to a **Page Directory** (non-leaf; PS **= 0**).
59///
60/// - Physical address (bits **51:12**) is a 4 KiB-aligned PD.
61/// - Leaf-only fields (Dirty/Global) are ignored.
62/// - Setting PS here would mean a 1 GiB leaf; use [`Pdpte1G`] for that.
63#[bitfield(u64)]
64pub struct Pdpte {
65    /// Present (bit 0): valid entry if set.
66    pub present: bool,
67    /// Writable (bit 1): write permission.
68    pub writable: bool,
69    /// User (bit 2): user-mode access if set.
70    pub user: bool,
71    /// Write-Through (bit 3).
72    pub write_through: bool,
73    /// Cache Disable (bit 4).
74    pub cache_disable: bool,
75    /// Accessed (bit 5).
76    pub accessed: bool,
77    /// Dirty (bit 6): **ignored** in non-leaf form.
78    #[bits(1)]
79    __d_ignored: u8,
80    /// PS (bit 7): **must be 0** in non-leaf.
81    #[bits(1)]
82    __ps_must_be_0: u8,
83    /// Global (bit 8): **ignored** in non-leaf.
84    #[bits(1)]
85    __g_ignored: u8,
86    /// OS-available low (bits 9..11).
87    #[bits(3)]
88    pub os_available_low: u8,
89    /// Next-level table physical address (bits 12..51, 4 KiB-aligned).
90    #[bits(40)]
91    phys_addr_51_12: u64,
92    /// OS-available high (bits 52..58).
93    #[bits(7)]
94    pub os_available_high: u8,
95    /// Protection Key / OS use (59..62).
96    #[bits(4)]
97    pub protection_key: u8,
98    /// No-Execute (bit 63).
99    pub no_execute: bool,
100}
101
102/// L3 **PDPTE (1 GiB leaf)** — maps a single 1 GiB page (`PS = 1`).
103///
104/// - **PAT** (Page Attribute Table) selector lives at bit **12** in this form.
105/// - Physical address uses bits **51:30** and must be **1 GiB aligned**.
106/// - `Dirty` is set by CPU on first write; `Global` keeps TLB entries across
107///   CR3 reload unless explicitly invalidated.
108///
109/// This is a terminal mapping (leaf).
110#[bitfield(u64)]
111pub struct Pdpte1G {
112    /// Present (bit 0).
113    pub present: bool,
114    /// Writable (bit 1).
115    pub writable: bool,
116    /// User (bit 2).
117    pub user: bool,
118    /// Write-Through (bit 3).
119    pub write_through: bool,
120    /// Cache Disable (bit 4).
121    pub cache_disable: bool,
122    /// Accessed (bit 5).
123    pub accessed: bool,
124    /// **Dirty** (bit 6): set by CPU on first write to this 1 GiB page.
125    pub dirty: bool,
126    /// **Page Size** (bit 7): **must be 1** for 1 GiB leaf.
127    #[bits(default = true)]
128    page_size: bool,
129    /// **Global** (bit 8): TLB entry not flushed on CR3 reload.
130    pub global: bool,
131    /// OS-available low (bits 9..11).
132    #[bits(3)]
133    pub os_available_low: u8,
134    /// **PAT** (Page Attribute Table) selector for 1 GiB mappings (bit 12).
135    pub pat_large: bool,
136    /// Reserved (bits 13..29): must be 0.
137    #[bits(17)]
138    __res_13_29: u32,
139    /// Physical address bits **51:30** (1 GiB-aligned base).
140    #[bits(22)]
141    phys_addr_51_30: u32,
142    /// OS-available high (bits 52..58).
143    #[bits(7)]
144    pub os_available_high: u8,
145    /// Protection Key / OS use (59..62).
146    #[bits(4)]
147    pub protection_key: u8,
148    /// No-Execute (bit 63).
149    pub no_execute: bool,
150}
151
152impl Pdpte {
153    /// Set the Page Directory base (4 KiB-aligned).
154    #[inline]
155    #[must_use]
156    pub const fn with_physical_page(mut self, phys: PhysicalPage<Size4K>) -> Self {
157        self.set_physical_page(phys);
158        self
159    }
160
161    /// Set the Page Directory base (4 KiB-aligned).
162    #[inline]
163    pub const fn set_physical_page(&mut self, phys: PhysicalPage<Size4K>) {
164        self.set_phys_addr_51_12(phys.base().as_u64() >> 12);
165    }
166
167    /// Get the Page Directory base (4 KiB-aligned).
168    #[inline]
169    #[must_use]
170    pub const fn physical_page(self) -> PhysicalPage<Size4K> {
171        PhysicalPage::from_addr(PhysicalAddress::new(self.phys_addr_51_12() << 12))
172    }
173
174    /// Non-leaf PDPTE with common kernel RW flags.
175    #[inline]
176    #[must_use]
177    pub const fn new_common_rw() -> Self {
178        Self::new()
179            .with_present(true)
180            .with_writable(true)
181            .with_user(false)
182            .with_write_through(false)
183            .with_cache_disable(false)
184            .with_no_execute(false)
185    }
186}
187
188impl Pdpte1G {
189    /// Set the 1 GiB page base (must be 1 GiB-aligned).
190    #[inline]
191    #[must_use]
192    pub const fn with_physical_page(mut self, phys: PhysicalPage<Size1G>) -> Self {
193        self.set_physical_page(phys);
194        self
195    }
196
197    /// Set the 1 GiB page base (must be 1 GiB-aligned).
198    #[inline]
199    #[allow(clippy::cast_possible_truncation)]
200    pub const fn set_physical_page(&mut self, phys: PhysicalPage<Size1G>) {
201        self.set_phys_addr_51_30((phys.base().as_u64() >> 30) as u32);
202        self.set_page_size(true);
203    }
204
205    /// Get the 1 GiB page base.
206    #[inline]
207    #[must_use]
208    pub const fn physical_page(self) -> PhysicalPage<Size1G> {
209        PhysicalPage::from_addr(PhysicalAddress::new((self.phys_addr_51_30() as u64) << 30))
210    }
211
212    /// Leaf PDPTE with common kernel RW flags.
213    #[inline]
214    #[must_use]
215    pub const fn new_common_rw() -> Self {
216        Self::new()
217            .with_present(true)
218            .with_writable(true)
219            .with_user(false)
220            .with_write_through(false)
221            .with_cache_disable(false)
222            .with_no_execute(false)
223            .with_page_size(true)
224    }
225}
226
227/// Index into the PDPT (derived from virtual-address bits `[38:30]`).
228///
229/// This strongly-typed index avoids mixing levels and constrains the range
230/// to `0..512` (checked in debug builds).
231#[repr(transparent)]
232#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
233pub struct L3Index(u16);
234
235/// Decoded PDPT entry kind.
236///
237/// - [`NextPageDirectory`](PdptEntryKind::NextPageDirectory): non-leaf; `PS=0`; holds the 4 KiB-aligned PD base.
238/// - [`Leaf1GiB`](PdptEntryKind::Leaf1GiB): leaf; `PS=1`; holds the 1 GiB-aligned large-page base.
239pub enum PdptEntryKind {
240    NextPageDirectory(PhysicalPage<Size4K>, Pdpte),
241    Leaf1GiB(PhysicalPage<Size1G>, Pdpte1G),
242}
243
244/// The PDPT (L3) table: 512 entries, 4 KiB aligned.
245#[doc(alias = "PDPT")]
246#[repr(C, align(4096))]
247pub struct PageDirectoryPointerTable {
248    entries: [PdptEntry; 512],
249}
250
251impl L3Index {
252    /// Build an index from a canonical virtual address (extracts bits `[38:30]`).
253    ///
254    /// Returns a value in `0..512`.
255    #[inline]
256    #[must_use]
257    pub const fn from(va: VirtualAddress) -> Self {
258        Self::new(((va.as_u64() >> 30) & 0x1FF) as u16)
259    }
260
261    /// Construct from a raw `u16`.
262    ///
263    /// ### Debug assertions
264    /// - Asserts `v < 512` in debug builds.
265    #[inline]
266    #[must_use]
267    pub const fn new(v: u16) -> Self {
268        debug_assert!(v < 512);
269        Self(v)
270    }
271
272    /// Return the index as `usize` for table access.
273    #[inline]
274    #[must_use]
275    pub const fn as_usize(self) -> usize {
276        self.0 as usize
277    }
278}
279
280impl Default for PdptEntry {
281    #[inline]
282    fn default() -> Self {
283        Self::new()
284    }
285}
286
287impl PdptEntry {
288    #[inline]
289    #[must_use]
290    pub const fn new() -> Self {
291        Self { bits: 0 }
292    }
293
294    #[inline]
295    #[must_use]
296    pub const fn new_entry(entry: Pdpte) -> Self {
297        Self { entry }
298    }
299
300    #[inline]
301    #[must_use]
302    pub const fn new_leaf(leaf: Pdpte1G) -> Self {
303        Self { leaf_1g: leaf }
304    }
305
306    #[inline]
307    #[must_use]
308    pub const fn present(self) -> bool {
309        unsafe { self.bits & PRESENT_BIT != 0 }
310    }
311
312    /// Construct union from raw `bits` (no validation).
313    #[inline]
314    #[must_use]
315    pub const fn from_bits(bits: u64) -> Self {
316        Self { bits }
317    }
318
319    /// Extract raw `bits` back from the union.
320    #[inline]
321    #[must_use]
322    pub const fn into_bits(self) -> u64 {
323        unsafe { self.bits }
324    }
325
326    /// **Typed read-only view** chosen by the **PS** bit.
327    ///
328    /// - If PS=1 → [`L3View::Leaf1G`]
329    /// - If PS=0 → [`L3View::Entry`]
330    ///
331    /// This function is safe: it returns a view consistent with the PS bit.
332    #[inline]
333    #[must_use]
334    pub const fn view(self) -> L3View {
335        unsafe {
336            if (self.bits & PS_BIT) != 0 {
337                L3View::Leaf1G(self.leaf_1g)
338            } else {
339                L3View::Entry(self.entry)
340            }
341        }
342    }
343
344    /// Create a zero (non-present) entry.
345    #[inline]
346    #[must_use]
347    pub const fn zero() -> Self {
348        Self::new()
349    }
350
351    /// Decode the entry into its semantic kind, or `None` if not present.
352    ///
353    /// - When `PS=1`, returns [`PdptEntryKind::Leaf1GiB`] with a 1 GiB page base.
354    /// - When `PS=0`, returns [`PdptEntryKind::NextPageDirectory`] with a PD base.
355    #[inline]
356    #[must_use]
357    pub const fn kind(self) -> Option<PdptEntryKind> {
358        if !self.present() {
359            return None;
360        }
361
362        Some(match self.view() {
363            L3View::Entry(entry) => {
364                let base = entry.physical_page();
365                PdptEntryKind::NextPageDirectory(base, entry)
366            }
367            L3View::Leaf1G(entry) => {
368                let page = entry.physical_page();
369                PdptEntryKind::Leaf1GiB(page, entry)
370            }
371        })
372    }
373
374    /// Create a non-leaf PDPTE that points to a Page Directory (`PS=0`).
375    ///
376    /// Sets `present=1`, forces `PS=0`, and writes the PD base address.
377    /// The PD base must be 4 KiB-aligned.
378    #[inline]
379    #[must_use]
380    pub const fn present_next_with(
381        flags: VirtualMemoryPageBits,
382        page: PhysicalPage<Size4K>,
383    ) -> Self {
384        let flags = flags.to_pdpte(page).with_present(true);
385        Self::new_entry(flags)
386    }
387
388    /// Create a 1 GiB leaf PDPTE (`PS=1`).
389    ///
390    /// Sets `present=1`, forces `PS=1`, and writes the large-page base address.
391    /// The page base must be 1 GiB-aligned.
392    #[inline]
393    #[must_use]
394    pub const fn present_leaf_with(
395        flags: VirtualMemoryPageBits,
396        page: PhysicalPage<Size1G>,
397    ) -> Self {
398        let flags = flags
399            .to_pdpte_1g(page)
400            .with_present(true)
401            .with_page_size(true);
402        Self::new_leaf(flags)
403    }
404}
405
406impl From<Pdpte> for PdptEntry {
407    #[inline]
408    fn from(e: Pdpte) -> Self {
409        Self::new_entry(e)
410    }
411}
412
413impl From<Pdpte1G> for PdptEntry {
414    #[inline]
415    fn from(e: Pdpte1G) -> Self {
416        Self::new_leaf(e)
417    }
418}
419
420impl PageDirectoryPointerTable {
421    /// Create a fully zeroed PDPT (all entries non-present).
422    #[inline]
423    #[must_use]
424    pub const fn zeroed() -> Self {
425        Self {
426            entries: [PdptEntry::zero(); 512],
427        }
428    }
429
430    /// Read an entry at `i`.
431    ///
432    /// Plain load; does not imply any TLB maintenance.
433    #[inline]
434    #[must_use]
435    pub const fn get(&self, i: L3Index) -> PdptEntry {
436        self.entries[i.as_usize()]
437    }
438
439    /// Write an entry at `i`.
440    ///
441    /// Caller is responsible for necessary TLB invalidations if this affects an
442    /// active address space.
443    #[inline]
444    pub const fn set(&mut self, i: L3Index, e: PdptEntry) {
445        self.entries[i.as_usize()] = e;
446    }
447
448    /// Set the entry at `i` to [`PdptEntry::zero`].
449    ///
450    /// Caller is responsible for necessary TLB invalidations if this affects an
451    /// active address space.
452    #[inline]
453    pub const fn set_zero(&mut self, i: L3Index) {
454        self.set(i, PdptEntry::zero());
455    }
456
457    /// Derive the PDPT index from a virtual address.
458    #[inline]
459    #[must_use]
460    pub const fn index_of(va: VirtualAddress) -> L3Index {
461        L3Index::from(va)
462    }
463}
464
465#[cfg(test)]
466mod test {
467    use super::*;
468    use crate::addresses::PhysicalAddress;
469
470    #[test]
471    fn pdpt_table_vs_1g() {
472        // next-level PD
473        let pd = PhysicalPage::<Size4K>::from_addr(PhysicalAddress::new(0x2000_0000));
474        let e_tbl = PdptEntry::present_next_with(Pdpte::new_common_rw().into(), pd);
475        match e_tbl.kind().unwrap() {
476            PdptEntryKind::NextPageDirectory(p, f) => {
477                assert_eq!(p.base().as_u64(), 0x2000_0000);
478                assert_eq!(f.into_bits() & (1 << 7), 0, "must be PS=0");
479            }
480            _ => panic!("expected next PD"),
481        }
482
483        // 1 GiB leaf
484        let g1 = PhysicalPage::<Size1G>::from_addr(PhysicalAddress::new(0x8000_0000));
485        let e_1g = PdptEntry::present_leaf_with(Pdpte1G::new_common_rw().into(), g1);
486        match e_1g.kind().unwrap() {
487            PdptEntryKind::Leaf1GiB(p, f) => {
488                assert_eq!(p.base().as_u64(), 0x8000_0000);
489                assert_ne!(f.into_bits() & (1 << 7), 0, "must be PS=1");
490            }
491            _ => panic!("expected 1GiB leaf"),
492        }
493    }
494}