kernel_vmem/page_table/
pt.rs

1//! # x86-64 Page Table (PT / L1)
2//!
3//! This module models the lowest paging level (L1, Page Table).
4//!
5//! - [`L1Index`]: index type for VA bits `[20:12]`.
6//! - [`PtEntry4k`]: a PT entry (PTE). At this level, `PS` **must be 0**; entries
7//!   represent 4 KiB leaf mappings only.
8//! - [`PageTable`]: a 4 KiB-aligned array of 512 PTEs.
9//!
10//! ## Semantics
11//!
12//! - L1 does **not** point to another table. Every present entry maps a 4 KiB page.
13//! - The base address stored in a PTE must be 4 KiB-aligned (hardware requirement).
14//!
15//! ## Invariants & Notes
16//!
17//! - [`PageTable`] is 4 KiB-aligned and contains exactly 512 entries.
18//! - [`PtEntry4k::present_with`] forces `PS=0` and `present=1`.
19//! - Raw constructors do not validate consistency; prefer typed helpers.
20//! - After modifying active mappings, the caller must perform any required TLB maintenance.
21
22use crate::VirtualMemoryPageBits;
23use crate::addresses::{PhysicalAddress, PhysicalPage, Size4K, VirtualAddress};
24use bitfield_struct::bitfield;
25
26/// L1 **PTE (4 KiB leaf)** — maps a single 4 KiB page (bit 7 is **PAT**).
27///
28/// - Physical address uses bits **51:12** and must be **4 KiB aligned**.
29/// - The three PAT selector bits are **PWT (bit 3)**, **PCD (bit 4)**,
30///   and **PAT (bit 7)**.
31#[bitfield(u64)]
32pub struct PtEntry4k {
33    /// Present (bit 0).
34    pub present: bool,
35    /// Writable (bit 1).
36    pub writable: bool,
37    /// User (bit 2).
38    pub user: bool,
39    /// Write-Through (bit 3) — **PAT selector bit 0**.
40    pub write_through: bool,
41    /// Cache Disable (bit 4) — **PAT selector bit 1**.
42    pub cache_disable: bool,
43    /// Accessed (bit 5).
44    pub accessed: bool,
45    /// Dirty (bit 6): set by CPU on first write.
46    pub dirty: bool,
47    /// **PAT** (bit 7) — **PAT selector bit 2** for 4 KiB mappings.
48    pub pat_small: bool,
49    /// Global (bit 8): TLB entry not flushed on CR3 reload.
50    pub global: bool,
51    /// OS-available low (bits 9..11).
52    #[bits(3)]
53    pub os_available_low: u8,
54    /// Physical address bits **51:12** (4 KiB-aligned base).
55    #[bits(40)]
56    phys_addr_51_12: u64,
57    /// OS-available high (bits 52..58).
58    #[bits(7)]
59    pub os_available_high: u8,
60    /// Protection Key / OS use (59..62).
61    #[bits(4)]
62    pub protection_key: u8,
63    /// No-Execute (bit 63).
64    pub no_execute: bool,
65}
66
67/// Index into the Page Table (derived from VA bits `[20:12]`).
68///
69/// Strongly typed to avoid mixing with other levels. Range is `0..512`
70/// (checked in debug builds).
71#[repr(transparent)]
72#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
73pub struct L1Index(u16);
74
75/// The Page Table (L1): 512 entries, 4 KiB-aligned.
76#[doc(alias = "PT")]
77#[repr(C, align(4096))]
78pub struct PageTable {
79    entries: [PtEntry4k; 512],
80}
81
82impl L1Index {
83    /// Build an index from a canonical virtual address (extracts bits `[20:12]`).
84    ///
85    /// Returns a value in `0..512`.
86    #[inline]
87    #[must_use]
88    pub const fn from(va: VirtualAddress) -> Self {
89        Self::new(((va.as_u64() >> 12) & 0x1FF) as u16)
90    }
91
92    /// Construct from a raw `u16`.
93    ///
94    /// ### Debug assertions
95    /// - Asserts `v < 512` in debug builds.
96    #[inline]
97    #[must_use]
98    pub const fn new(v: u16) -> Self {
99        debug_assert!(v < 512);
100        Self(v)
101    }
102
103    /// Return the index as `usize` for table access.
104    #[inline]
105    #[must_use]
106    pub const fn as_usize(self) -> usize {
107        self.0 as usize
108    }
109}
110
111impl PtEntry4k {
112    /// Set the 4 KiB page base (4 KiB-aligned).
113    #[inline]
114    #[must_use]
115    pub const fn with_physical_page(mut self, phys: PhysicalPage<Size4K>) -> Self {
116        self.set_physical_page(phys);
117        self
118    }
119
120    /// Set the 4 KiB page base (4 KiB-aligned).
121    #[inline]
122    pub const fn set_physical_page(&mut self, phys: PhysicalPage<Size4K>) {
123        self.set_phys_addr_51_12(phys.base().as_u64() >> 12);
124    }
125
126    /// Get the 4 KiB page base.
127    #[inline]
128    #[must_use]
129    pub const fn physical_page(self) -> PhysicalPage<Size4K> {
130        PhysicalPage::from_addr(PhysicalAddress::new(self.phys_addr_51_12() << 12))
131    }
132
133    /// Create a new, present [`PtEntry4k`] with the specified flags, at the specified page.
134    #[must_use]
135    pub const fn present_with(
136        leaf_flags: VirtualMemoryPageBits,
137        page: PhysicalPage<Size4K>,
138    ) -> Self {
139        leaf_flags.to_pte_4k(page).with_present(true)
140    }
141
142    /// 4 KiB **user RO+NX** mapping (read-only, no execute).
143    #[inline]
144    #[must_use]
145    pub const fn new_user_ro_nx() -> Self {
146        Self::new()
147            .with_present(true)
148            .with_writable(false)
149            .with_user(true)
150            .with_no_execute(true)
151    }
152
153    /// Create a zero (non-present) entry.
154    #[inline]
155    #[must_use]
156    pub const fn zero() -> Self {
157        Self::new()
158    }
159
160    /// If present, return the mapped 4 KiB physical page and its flags.
161    ///
162    /// Debug-asserts that `PS=0` (required at L1).
163    #[inline]
164    #[must_use]
165    pub const fn page_4k(self) -> Option<(PhysicalPage<Size4K>, Self)> {
166        if !self.present() {
167            return None;
168        }
169        Some((self.physical_page(), self))
170    }
171}
172
173impl PageTable {
174    /// Create a fully zeroed Page Table (all entries non-present).
175    #[inline]
176    #[must_use]
177    pub const fn zeroed() -> Self {
178        Self {
179            entries: [PtEntry4k::zero(); 512],
180        }
181    }
182
183    /// Read the entry at `i`.
184    ///
185    /// Plain load; does not imply any TLB synchronization.
186    #[inline]
187    #[must_use]
188    pub const fn get(&self, i: L1Index) -> PtEntry4k {
189        self.entries[i.as_usize()]
190    }
191
192    /// Write the entry at `i`.
193    ///
194    /// Caller must handle any required TLB invalidation when changing active mappings.
195    #[inline]
196    pub const fn set(&mut self, i: L1Index, e: PtEntry4k) {
197        self.entries[i.as_usize()] = e;
198    }
199
200    /// Set the entry at `i` to [`PtEntry4k::zero`].
201    ///
202    /// Caller is responsible for necessary TLB invalidations if this affects an
203    /// active address space.
204    #[inline]
205    pub const fn set_zero(&mut self, i: L1Index) {
206        self.set(i, PtEntry4k::zero());
207    }
208
209    /// Derive the PT index from a virtual address.
210    #[inline]
211    #[must_use]
212    pub const fn index_of(va: VirtualAddress) -> L1Index {
213        L1Index::from(va)
214    }
215}
216
217#[cfg(test)]
218mod test {
219    use super::*;
220    use crate::addresses::PhysicalAddress;
221
222    #[test]
223    fn pte_4k_leaf() {
224        let k4 = PhysicalPage::<Size4K>::from_addr(PhysicalAddress::new(0x5555_0000));
225        let e = PtEntry4k::new_user_ro_nx()
226            .with_present(true)
227            .with_physical_page(k4);
228
229        let (p, fl) = e.page_4k().unwrap();
230        assert_eq!(p.base().as_u64(), 0x5555_0000);
231        assert!(fl.no_execute());
232        assert!(fl.user());
233        assert!(!fl.writable());
234    }
235}