kernel_vmem/page_table/pml4.rs
1//! # x86-64 Page Map Level 4 (PML4)
2//!
3//! This module defines strongly-typed wrappers around the top-level x86-64
4//! page-table layer (PML4):
5//!
6//! - [`L4Index`]: index type for bits 47..39 of a canonical virtual address.
7//! - [`Pml4Entry`]: a single PML4 entry (must not be a large page).
8//! - [`PageMapLevel4`]: a 4 KiB-aligned array of 512 PML4 entries.
9//!
10//! ## Background
11//!
12//! In 4-level paging, the PML4 selects a Page-Directory-Pointer Table ([PDPT](super::pdpt::PageDirectoryPointerTable)).
13//! A PML4E must have `PS=0` (no large pages at this level). Each entry holds
14//! flags and the physical base address of the next-level table. The index is
15//! derived from VA bits `[47:39]`.
16//!
17//! ## Guarantees & Invariants
18//!
19//! - [`PageMapLevel4`] is 4 KiB-aligned and has exactly 512 entries.
20//! - [`Pml4Entry::present_with`] enforces `PS=0` for PML4Es.
21//! - Accessors avoid unsafe operations and prefer explicit types such as
22//! [`PhysicalPage<Size4K>`] for next-level tables.
23
24use crate::VirtualMemoryPageBits;
25use crate::addresses::{PhysicalAddress, PhysicalPage, Size4K, VirtualAddress};
26use bitfield_struct::bitfield;
27
28/// L4 **PML4E** — pointer to a **PDPT** (non-leaf; PS **must be 0**).
29///
30/// This entry never maps memory directly. Bits that are meaningful only on
31/// leaf entries (e.g., `dirty`, `global`) are ignored here.
32///
33/// - Physical address (bits **51:12**) is a 4 KiB-aligned PDPT.
34/// - `NX` participates in permission intersection across the walk.
35/// - `PKU` may be repurposed as OS-available when not supported.
36///
37/// Reference: AMD APM / Intel SDM paging structures (x86-64).
38#[doc(alias = "PML4E")]
39#[bitfield(u64)]
40pub struct Pml4Entry {
41 /// **Present** (bit 0): valid entry if set.
42 ///
43 /// When clear, the entry is not present and most other fields are ignored.
44 pub present: bool,
45
46 /// **Writable** (bit 1): write permission.
47 ///
48 /// Intersects with lower-level permissions; supervisor write protection,
49 /// SMEP/SMAP, CR0.WP, and U/S checks apply.
50 pub writable: bool,
51
52 /// **User/Supervisor** (bit 2): allow user-mode access if set.
53 ///
54 /// If clear, access is restricted to supervisor (ring 0).
55 pub user: bool,
56
57 /// **Page Write-Through** (PWT, bit 3): write-through caching policy.
58 ///
59 /// Effective only if caching isn’t disabled for the mapping.
60 pub write_through: bool,
61
62 /// **Page Cache Disable** (PCD, bit 4): disable caching if set.
63 ///
64 /// Strongly impacts performance; use for MMIO or compliance with device
65 /// requirements. Effective policy is the intersection across the walk.
66 pub cache_disable: bool,
67
68 /// **Accessed** (A, bit 5): set by CPU on first access via this entry.
69 ///
70 /// Software may clear to track usage; not a permission bit.
71 pub accessed: bool,
72
73 /// (bit 6): **ignored** for non-leaf entries at L4.
74 #[bits(1)]
75 __d_ignored: u8,
76
77 /// **Page Size** (bit 7): **must be 0** for PML4E (non-leaf).
78 #[bits(1)]
79 __ps_must_be_0: u8,
80
81 /// **Global** (bit 8): **ignored** for non-leaf entries.
82 #[bits(1)]
83 __g_ignored: u8,
84
85 /// **OS-available low** (bits 9..11): not interpreted by hardware.
86 #[bits(3)]
87 pub os_available_low: u8,
88
89 /// **Next-level table physical address** (bits 12..51).
90 ///
91 /// Stores the PDPT base (4 KiB-aligned). The low 12 bits are omitted.
92 #[bits(40)]
93 phys_addr_51_12: u64,
94
95 /// **OS-available high** (bits 52..58): not interpreted by hardware.
96 #[bits(7)]
97 pub os_available_high: u8,
98
99 /// **Protection Key / OS use** (bits 59..62).
100 ///
101 /// If PKU is supported and enabled, these bits select the protection key;
102 /// otherwise they may be used by the OS.
103 #[bits(4)]
104 pub protection_key: u8,
105
106 /// **No-Execute** (NX, bit 63 / XD on Intel).
107 ///
108 /// When set and EFER.NXE is enabled, instruction fetch is disallowed
109 /// through this entry (permission intersection applies).
110 pub no_execute: bool,
111}
112
113/// Index into the PML4 table (derived from virtual-address bits `[47:39]`).
114///
115/// This newtype prevents accidental mixing with other indices and allows
116/// compile-time checking of valid index ranges (0..512).
117#[repr(transparent)]
118#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
119pub struct L4Index(u16);
120
121/// The top-level page map (PML4).
122///
123/// Layout:
124/// - 512 entries, 8 bytes each (4096 bytes total).
125/// - 4 KiB aligned, as required by the hardware.
126///
127/// Use [`PageMapLevel4::get`] and [`PageMapLevel4::set`] to read/write entries.
128#[doc(alias = "PML4")]
129#[repr(C, align(4096))]
130pub struct PageMapLevel4 {
131 entries: [Pml4Entry; 512],
132}
133
134impl L4Index {
135 /// Construct an index from a canonical virtual address by extracting bits `[47:39]`.
136 ///
137 /// Returns a value in `0..512`.
138 #[inline]
139 #[must_use]
140 pub const fn from(va: VirtualAddress) -> Self {
141 Self::new(((va.as_u64() >> 39) & 0x1FF) as u16)
142 }
143
144 /// Construct an index from a raw `u16`.
145 ///
146 /// ### Panics / Debug assertions
147 /// - Debug builds assert `v < 512`.
148 #[inline]
149 #[must_use]
150 pub const fn new(v: u16) -> Self {
151 debug_assert!(v < 512);
152 Self(v)
153 }
154
155 /// Return the index as `usize` for array indexing.
156 #[inline]
157 #[must_use]
158 pub const fn as_usize(self) -> usize {
159 self.0 as usize
160 }
161}
162
163impl Pml4Entry {
164 /// Create a zero (non-present) entry with all bits cleared.
165 #[inline]
166 #[must_use]
167 pub const fn zero() -> Self {
168 Self::new()
169 }
170
171 /// If present, return the physical page of the next-level PDPT.
172 ///
173 /// Returns `None` if the entry is not present. The returned page is always
174 /// 4 KiB-aligned as required for page-table bases.
175 #[inline]
176 #[must_use]
177 pub const fn next_table(self) -> Option<PhysicalPage<Size4K>> {
178 if !self.present() {
179 return None;
180 }
181 Some(self.physical_address())
182 }
183
184 /// Build a PML4 entry that points to the given PDPT page and applies the provided flags.
185 ///
186 /// ### Requirements
187 /// - `flags.large_page()` **must be false** (`PS=0`). Enforced via `debug_assert!`.
188 /// - This function sets `present=1` and the physical base to `next_pdpt_page.base()`.
189 #[inline]
190 #[must_use]
191 pub const fn present_with(
192 flags: VirtualMemoryPageBits,
193 next_pdpt_page: PhysicalPage<Size4K>,
194 ) -> Self {
195 flags.to_pml4e(next_pdpt_page).with_present(true)
196 }
197
198 /// Set the PDPT base address (must be 4 KiB-aligned).
199 #[inline]
200 #[must_use]
201 pub const fn with_physical_address(mut self, phys: PhysicalPage<Size4K>) -> Self {
202 self.set_physical_address(phys);
203 self
204 }
205
206 /// Set the PDPT base address (must be 4 KiB-aligned).
207 #[inline]
208 pub const fn set_physical_address(&mut self, phys: PhysicalPage<Size4K>) {
209 self.set_phys_addr_51_12(phys.base().as_u64() >> 12);
210 }
211
212 /// Get the PDPT base address (4 KiB-aligned).
213 #[inline]
214 #[must_use]
215 pub const fn physical_address(self) -> PhysicalPage<Size4K> {
216 PhysicalPage::from_addr(PhysicalAddress::new(self.phys_addr_51_12() << 12))
217 }
218}
219
220impl PageMapLevel4 {
221 /// Create a fully zeroed (all entries non-present) PML4 table.
222 #[inline]
223 #[must_use]
224 pub const fn zeroed() -> Self {
225 Self {
226 entries: [Pml4Entry::zero(); 512],
227 }
228 }
229
230 /// Read the entry at the given index.
231 ///
232 /// This is a plain fetch; it does not perform TLB synchronization.
233 #[inline]
234 #[must_use]
235 pub const fn get(&self, i: L4Index) -> Pml4Entry {
236 self.entries[i.as_usize()]
237 }
238
239 /// Write the entry at the given index.
240 ///
241 /// Caller is responsible for any required TLB invalidation after modifying
242 /// mappings that affect active address spaces.
243 #[inline]
244 pub const fn set(&mut self, i: L4Index, e: Pml4Entry) {
245 self.entries[i.as_usize()] = e;
246 }
247
248 /// Derive the [`L4Index`] from a virtual address.
249 #[inline]
250 #[must_use]
251 pub const fn index_of(va: VirtualAddress) -> L4Index {
252 L4Index::from(va)
253 }
254}
255
256#[cfg(test)]
257mod tests {
258 use super::*;
259 use crate::addresses::PhysicalAddress;
260
261 #[test]
262 fn pml4_points_to_pdpt() {
263 let pdpt_page = PhysicalPage::<Size4K>::from_addr(PhysicalAddress::new(0x1234_5000));
264 let f = Pml4Entry::new().with_writable(true).with_user(false);
265 let e = Pml4Entry::present_with(f.into(), pdpt_page);
266 assert!(e.present());
267 assert_eq!(e.next_table().unwrap().base().as_u64(), 0x1234_5000);
268 }
269}