kernel_vmem/page_table/pd.rs
1//! # x86-64 Page Directory (PD / L2)
2//!
3//! This module models the second paging level (L2, Page Directory):
4//!
5//! - [`L2Index`]: index type for virtual-address bits `[29:21]`.
6//! - [`PdEntry`]: a PD entry that is either a pointer to a PT (`PS=0`) or a 2 MiB leaf (`PS=1`).
7//! - [`PdEntryKind`]: decoded view of an entry (next PT vs 2 MiB leaf).
8//! - [`PageDirectory`]: a 4 KiB-aligned array of 512 PD entries.
9//!
10//! ## Semantics
11//!
12//! At L2, the `PS` bit selects the role of an entry:
13//! - `PS=0`: entry points to a next-level Page Table (PT), whose base is 4 KiB-aligned.
14//! - `PS=1`: entry is a 2 MiB leaf mapping; base must be 2 MiB-aligned.
15//!
16//! ## Invariants & Notes
17//!
18//! - [`PageDirectory`] is 4 KiB-aligned and contains exactly 512 entries.
19//! - [`PdEntry::present_next_with`] forces `PS=0`; [`PdEntry::present_leaf_with`] forces `PS=1`.
20//! - Raw constructors don’t validate consistency; callers must ensure correctness.
21//! - TLB maintenance is the caller’s responsibility after mutating active mappings.
22
23use crate::VirtualMemoryPageBits;
24use crate::addresses::{PhysicalAddress, PhysicalPage, Size2M, Size4K, VirtualAddress};
25use crate::page_table::{PRESENT_BIT, PS_BIT};
26use bitfield_struct::bitfield;
27
28/// **Borrowed view** into an L2 PDE.
29///
30/// Returned by [`PdEntry::view`].
31pub enum L2View {
32 /// Non-leaf PDE view (PS=0).
33 Entry(Pde),
34 /// 2 MiB leaf PDE view (PS=1).
35 Leaf2M(Pde2M),
36}
37
38/// **L2 PDE union** — overlays non-leaf [`Pde`] and leaf [`Pde2M`]
39/// on the same 64-bit storage.
40///
41/// Prefer [`PdEntry::view`] for safe typed access.
42/// These check the **PS** bit and hand you the correct variant.
43#[derive(Copy, Clone)]
44#[repr(C)]
45pub union PdEntry {
46 /// Raw 64-bit storage of the entry.
47 bits: u64,
48 /// Non-leaf form: next-level Page Table (PS=0).
49 entry: Pde,
50 /// Leaf form: 2 MiB mapping (PS=1).
51 leaf_2m: Pde2M,
52}
53
54/// L2 **PDE** — pointer to a **Page Table** (non-leaf; PS **= 0**).
55///
56/// - Physical address (bits **51:12**) is a 4 KiB-aligned PT.
57/// - In non-leaf PDEs, **PAT lives at bit 12 only in the leaf form**;
58/// here, all bits 12..51 are the next-level table address.
59#[bitfield(u64)]
60pub struct Pde {
61 /// Present (bit 0).
62 pub present: bool,
63 /// Writable (bit 1).
64 pub writable: bool,
65 /// User (bit 2).
66 pub user: bool,
67 /// Write-Through (bit 3).
68 pub write_through: bool,
69 /// Cache Disable (bit 4).
70 pub cache_disable: bool,
71 /// Accessed (bit 5).
72 pub accessed: bool,
73 /// Dirty (bit 6): **ignored** in non-leaf.
74 #[bits(1)]
75 __d_ignored: u8,
76 /// PS (bit 7): **must be 0** in non-leaf.
77 #[bits(1)]
78 __ps_must_be_0: u8,
79 /// Global (bit 8): **ignored** in non-leaf.
80 #[bits(1)]
81 __g_ignored: u8,
82
83 /// OS-available low (bits 9..11).
84 #[bits(3)]
85 pub os_available_low: u8,
86
87 /// **Next-level table physical address** (bits 12..51, 4 KiB-aligned).
88 ///
89 /// Note: Do **not** insert reserved placeholders here; in non-leaf form
90 /// these bits are entirely the PT base address.
91 #[bits(40)]
92 phys_addr_51_12: u64,
93
94 /// OS-available high (bits 52..58).
95 #[bits(7)]
96 pub os_available_high: u8,
97 /// Protection Key / OS use (59..62).
98 #[bits(4)]
99 pub protection_key: u8,
100 /// No-Execute (bit 63).
101 pub no_execute: bool,
102}
103
104impl Pde {
105 /// Set the Page Table base (4 KiB-aligned).
106 #[inline]
107 #[must_use]
108 pub const fn with_physical_page(mut self, phys: PhysicalPage<Size4K>) -> Self {
109 self.set_physical_page(phys);
110 self
111 }
112
113 /// Set the Page Table base (4 KiB-aligned).
114 #[inline]
115 pub const fn set_physical_page(&mut self, phys: PhysicalPage<Size4K>) {
116 self.set_phys_addr_51_12(phys.base().as_u64() >> 12);
117 }
118
119 /// Get the Page Table base.
120 #[inline]
121 #[must_use]
122 pub const fn physical_address(self) -> PhysicalPage<Size4K> {
123 PhysicalPage::from_addr(PhysicalAddress::new(self.phys_addr_51_12() << 12))
124 }
125
126 /// Non-leaf PDE with common kernel RW flags.
127 #[inline]
128 #[must_use]
129 pub const fn new_common_rw() -> Self {
130 Self::new()
131 .with_present(true)
132 .with_writable(true)
133 .with_user(false)
134 .with_write_through(false)
135 .with_cache_disable(false)
136 .with_no_execute(false)
137 }
138}
139
140/// L2 **PDE (2 MiB leaf)** — maps a single 2 MiB page (`PS = 1`).
141///
142/// - **PAT** (Page Attribute Table) selector lives at bit **12** in this form.
143/// - Physical address uses bits **51:21** and must be **2 MiB aligned**.
144/// - `Dirty` is set by CPU on first write; `Global` keeps TLB entries across
145/// CR3 reload unless explicitly invalidated.
146///
147/// This is a terminal mapping (leaf).
148#[bitfield(u64)]
149pub struct Pde2M {
150 /// Present (bit 0).
151 pub present: bool,
152 /// Writable (bit 1).
153 pub writable: bool,
154 /// User (bit 2).
155 pub user: bool,
156 /// Write-Through (bit 3).
157 pub write_through: bool,
158 /// Cache Disable (bit 4).
159 pub cache_disable: bool,
160 /// Accessed (bit 5).
161 pub accessed: bool,
162 /// **Dirty** (bit 6): set by CPU on first write to this 2 MiB page.
163 pub dirty: bool,
164 /// **Page Size** (bit 7): **must be 1** for 2 MiB leaf.
165 #[bits(default = true)]
166 pub(crate) page_size: bool,
167 /// **Global** (bit 8): TLB entry not flushed on CR3 reload.
168 pub global: bool,
169 /// OS-available low (bits 9..11).
170 #[bits(3)]
171 pub os_available_low: u8,
172 /// **PAT** (Page Attribute Table) selector for 2 MiB mappings (bit 12).
173 pub pat_large: bool,
174 /// Reserved (bits 13..20): must be 0.
175 #[bits(8)]
176 __res13_20: u8,
177 /// Physical address bits **51:21** (2 MiB-aligned base).
178 #[bits(31)]
179 phys_addr_51_21: u32,
180 /// OS-available high (bits 52..58).
181 #[bits(7)]
182 pub os_available_high: u8,
183 /// Protection Key / OS use (59..62).
184 #[bits(4)]
185 pub protection_key: u8,
186 /// No-Execute (bit 63).
187 pub no_execute: bool,
188}
189
190impl Pde2M {
191 /// Set the 2 MiB page base (must be 2 MiB-aligned).
192 #[inline]
193 #[must_use]
194 pub const fn with_physical_page(mut self, phys: PhysicalPage<Size2M>) -> Self {
195 self.set_physical_page(phys);
196 self
197 }
198
199 /// Set the 2 MiB page base (must be 2 MiB-aligned).
200 #[inline]
201 #[allow(clippy::cast_possible_truncation)]
202 pub const fn set_physical_page(&mut self, phys: PhysicalPage<Size2M>) {
203 self.set_phys_addr_51_21((phys.base().as_u64() >> 21) as u32);
204 self.set_page_size(true);
205 }
206
207 /// Get the 2 MiB page base.
208 #[inline]
209 #[must_use]
210 pub const fn physical_page(self) -> PhysicalPage<Size2M> {
211 PhysicalPage::from_addr(PhysicalAddress::new((self.phys_addr_51_21() as u64) << 21))
212 }
213
214 /// Leaf PDE with common kernel RW flags.
215 #[inline]
216 #[must_use]
217 pub const fn new_common_rw() -> Self {
218 Self::new()
219 .with_present(true)
220 .with_writable(true)
221 .with_user(false)
222 .with_write_through(false)
223 .with_cache_disable(false)
224 .with_no_execute(false)
225 .with_page_size(true)
226 }
227}
228
229/// Index into the Page Directory (derived from VA bits `[29:21]`).
230///
231/// Strongly-typed to avoid mixing with other levels. Range is `0..512`
232/// (checked in debug builds).
233#[repr(transparent)]
234#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
235pub struct L2Index(u16);
236
237/// Decoded PDE kind.
238///
239/// - [`NextPageTable`](PdEntryKind::NextPageTable): non-leaf (`PS=0`), contains the 4 KiB-aligned PT base.
240/// - [`Leaf2MiB`](PdEntryKind::Leaf2MiB): leaf (`PS=1`), contains the 2 MiB-aligned large-page base.
241pub enum PdEntryKind {
242 NextPageTable(PhysicalPage<Size4K>, Pde),
243 Leaf2MiB(PhysicalPage<Size2M>, Pde2M),
244}
245
246/// The Page Directory (L2): 512 entries, 4 KiB-aligned.
247#[doc(alias = "PD")]
248#[repr(C, align(4096))]
249pub struct PageDirectory {
250 entries: [PdEntry; 512],
251}
252
253impl L2Index {
254 /// Build an index from a canonical virtual address (extracts bits `[29:21]`).
255 ///
256 /// Returns a value in `0..512`.
257 #[inline]
258 #[must_use]
259 pub const fn from(va: VirtualAddress) -> Self {
260 Self::new(((va.as_u64() >> 21) & 0x1FF) as u16)
261 }
262
263 /// Construct from a raw `u16`.
264 ///
265 /// ### Debug assertions
266 /// - Asserts `v < 512` in debug builds.
267 #[inline]
268 #[must_use]
269 pub const fn new(v: u16) -> Self {
270 debug_assert!(v < 512);
271 Self(v)
272 }
273
274 /// Return the index as `usize` for table access.
275 #[inline]
276 #[must_use]
277 pub const fn as_usize(self) -> usize {
278 self.0 as usize
279 }
280}
281
282impl Default for PdEntry {
283 #[inline]
284 fn default() -> Self {
285 Self::new()
286 }
287}
288
289impl PdEntry {
290 #[inline]
291 #[must_use]
292 pub const fn new() -> Self {
293 Self { bits: 0 }
294 }
295
296 #[inline]
297 #[must_use]
298 pub const fn new_entry(entry: Pde) -> Self {
299 Self { entry }
300 }
301
302 #[inline]
303 #[must_use]
304 pub const fn new_leaf(leaf: Pde2M) -> Self {
305 Self { leaf_2m: leaf }
306 }
307
308 #[inline]
309 #[must_use]
310 pub const fn present(self) -> bool {
311 unsafe { self.bits & PRESENT_BIT != 0 }
312 }
313
314 /// Construct union from raw `bits` (no validation).
315 #[inline]
316 #[must_use]
317 pub const fn from_bits(bits: u64) -> Self {
318 Self { bits }
319 }
320
321 /// Extract raw `bits` back from the union.
322 #[inline]
323 #[must_use]
324 pub const fn into_bits(self) -> u64 {
325 unsafe { self.bits }
326 }
327
328 /// **Typed read-only view** chosen by the **PS** bit.
329 ///
330 /// - If PS=1 → [`L2View::Leaf2M`]
331 /// - If PS=0 → [`L2View::Entry`]
332 #[inline]
333 #[must_use]
334 pub const fn view(self) -> L2View {
335 unsafe {
336 if (self.bits & PS_BIT) != 0 {
337 L2View::Leaf2M(self.leaf_2m)
338 } else {
339 L2View::Entry(self.entry)
340 }
341 }
342 }
343
344 /// Create a zero (non-present) entry.
345 #[inline]
346 #[must_use]
347 pub const fn zero() -> Self {
348 Self::new()
349 }
350
351 /// Decode the entry into its semantic kind, or `None` if not present.
352 ///
353 /// - When `PS=1`, returns [`PdEntryKind::Leaf2MiB`] with a 2 MiB page base.
354 /// - When `PS=0`, returns [`PdEntryKind::NextPageTable`] with a PT base.
355 #[inline]
356 #[must_use]
357 pub const fn kind(self) -> Option<PdEntryKind> {
358 if !self.present() {
359 return None;
360 }
361
362 Some(match self.view() {
363 L2View::Entry(entry) => {
364 let base = entry.physical_address();
365 PdEntryKind::NextPageTable(base, entry)
366 }
367 L2View::Leaf2M(entry) => {
368 let base = entry.physical_page();
369 PdEntryKind::Leaf2MiB(base, entry)
370 }
371 })
372 }
373
374 /// Create a non-leaf PDE that points to a Page Table (`PS=0`).
375 ///
376 /// Sets `present=1`, forces `PS=0`, and writes the PT base address.
377 /// The PT base must be 4 KiB-aligned.
378 #[must_use]
379 pub const fn present_next_with(
380 leaf_flags: VirtualMemoryPageBits,
381 page: PhysicalPage<Size4K>,
382 ) -> Self {
383 Self::new_entry(leaf_flags.to_pde(page).with_present(true))
384 }
385
386 /// Create a new, present [`PdEntry`] with the specified flags, at the specified page.
387 #[must_use]
388 pub const fn present_leaf_with(
389 leaf_flags: VirtualMemoryPageBits,
390 page: PhysicalPage<Size2M>,
391 ) -> Self {
392 Self::new_leaf(
393 leaf_flags
394 .to_pde_2m(page)
395 .with_present(true)
396 .with_page_size(true),
397 )
398 }
399}
400
401impl From<Pde> for PdEntry {
402 #[inline]
403 fn from(e: Pde) -> Self {
404 Self::new_entry(e)
405 }
406}
407
408impl From<Pde2M> for PdEntry {
409 #[inline]
410 fn from(e: Pde2M) -> Self {
411 Self::new_leaf(e)
412 }
413}
414
415impl PageDirectory {
416 /// Create a fully zeroed Page Directory (all entries non-present).
417 #[inline]
418 #[must_use]
419 pub const fn zeroed() -> Self {
420 Self {
421 entries: [PdEntry::zero(); 512],
422 }
423 }
424
425 /// Read the entry at `i`.
426 ///
427 /// Plain load; does not imply any TLB synchronization.
428 #[inline]
429 #[must_use]
430 pub const fn get(&self, i: L2Index) -> PdEntry {
431 self.entries[i.as_usize()]
432 }
433
434 /// Write the entry at `i`.
435 ///
436 /// Caller must handle any required TLB invalidation when changing active mappings.
437 #[inline]
438 pub const fn set(&mut self, i: L2Index, e: PdEntry) {
439 self.entries[i.as_usize()] = e;
440 }
441
442 /// Set the entry at `i` to [`PdEntry::zero`].
443 ///
444 /// Caller is responsible for necessary TLB invalidations if this affects an
445 /// active address space.
446 #[inline]
447 pub const fn set_zero(&mut self, i: L2Index) {
448 self.set(i, PdEntry::zero());
449 }
450
451 /// Derive the PD index from a virtual address.
452 #[inline]
453 #[must_use]
454 pub const fn index_of(va: VirtualAddress) -> L2Index {
455 L2Index::from(va)
456 }
457}
458
459#[cfg(test)]
460mod test {
461 use super::*;
462 use crate::addresses::PhysicalAddress;
463
464 #[test]
465 fn pd_table_vs_2m() {
466 let pt = PhysicalPage::<Size4K>::from_addr(PhysicalAddress::new(0x3000_0000));
467 let e_tbl = PdEntry::present_next_with(Pde::new_common_rw().into(), pt);
468 match e_tbl.kind().unwrap() {
469 PdEntryKind::NextPageTable(p, f) => {
470 assert_eq!(p.base().as_u64(), 0x3000_0000);
471 assert_eq!(f.into_bits() & (1 << 7), 0, "must be PS=0");
472 }
473 _ => panic!("expected next PT"),
474 }
475
476 let m2 = PhysicalPage::<Size2M>::from_addr(PhysicalAddress::new(0x4000_0000));
477 let e_2m = PdEntry::present_leaf_with(Pde2M::new_common_rw().into(), m2);
478 match e_2m.kind().unwrap() {
479 PdEntryKind::Leaf2MiB(p, f) => {
480 assert_eq!(p.base().as_u64(), 0x4000_0000);
481 assert_ne!(f.into_bits() & (1 << 7), 0, "must be PS=1");
482 }
483 _ => panic!("expected 2MiB leaf"),
484 }
485 }
486}