kernel_vmem/
address_space.rs

1//! # Address Space (x86-64, PML4-rooted)
2//!
3//! Strongly-typed helpers to build and manipulate a **single** virtual address
4//! space (tree rooted at a PML4). This complements the typed paging layers
5//! (`PageMapLevel4`, `PageDirectoryPointerTable`, `PageDirectory`, `PageTable`).
6//!
7//! ## Highlights
8//!
9//! - [`AddressSpace::map_one`] to install one mapping (4 KiB / 2 MiB / 1 GiB).
10//! - [`AddressSpace::unmap_one`] to clear a single 4 KiB PTE.
11//! - [`AddressSpace::query`] to translate a VA to PA (handles huge pages).
12//! - [`AddressSpace::activate`] to load CR3 with this space’s root.
13//!
14//! ## Design
15//!
16//! - Non-leaf entries are created with caller-provided **non-leaf flags**
17//!   (typically: present + writable, US as needed). Leaf flags come from the
18//!   mapping call. We never silently set US/GLOBAL/NX; the caller decides.
19//! - Uses [`PhysicalPage<Size4K>`] for page-table frames, and [`VirtualAddress`] /
20//!   [`PhysicalAddress`] for endpoints. Alignment is asserted via typed helpers.
21//! - Keeps `unsafe` confined to mapping a physical frame to a typed table
22//!   through the [`PhysMapper`].
23//!
24//! ## Safety
25//!
26//! - Mutating active mappings requires appropriate **TLB maintenance** (e.g.,
27//!   `invlpg` per page or CR3 reload).
28//! - The provided `PhysMapper` must yield **writable** references to table frames.
29
30mod map_size;
31
32pub use crate::address_space::map_size::MapSize;
33use crate::address_space::map_size::MapSizeEnsureChainError;
34use crate::addresses::{
35    PageSize, PhysicalAddress, PhysicalPage, Size1G, Size2M, Size4K, VirtualAddress,
36};
37use crate::bits::VirtualMemoryPageBits;
38use crate::page_table::pd::{L2Index, PageDirectory, PdEntry, PdEntryKind};
39use crate::page_table::pdpt::{L3Index, PageDirectoryPointerTable, PdptEntry, PdptEntryKind};
40use crate::page_table::pml4::{L4Index, PageMapLevel4, Pml4Entry};
41use crate::page_table::pt::{L1Index, PageTable, PtEntry4k};
42use crate::{FrameAlloc, PhysMapper, PhysMapperExt, read_cr3_phys};
43
44/// Handle to a single, concrete address space.
45pub struct AddressSpace<'m, M: PhysMapper> {
46    root: PhysicalPage<Size4K>, // PML4 frame
47    mapper: &'m M,
48}
49
50/// The PML4 root page for an [`AddressSpace`].
51pub type RootPage = PhysicalPage<Size4K>;
52
53impl<'m, M: PhysMapper> AddressSpace<'m, M> {
54    /// View the **currently active** address space by reading CR3.
55    ///
56    /// # Safety
57    /// - Must run at CPL0 with paging enabled.
58    /// - Assumes CR3 points at a valid PML4 frame.
59    #[inline]
60    pub unsafe fn from_current(mapper: &'m M) -> Self {
61        let root_pa = unsafe { read_cr3_phys() };
62        let root = PhysicalPage::<Size4K>::from_addr(root_pa);
63        Self { root, mapper }
64    }
65
66    /// If you already know the root frame (e.g., from your own allocator),
67    /// you can still use the explicit constructor:
68    #[inline]
69    pub const fn from_root(mapper: &'m M, root: PhysicalPage<Size4K>) -> Self {
70        Self { root, mapper }
71    }
72
73    /// Load CR3 with this address space’s root.
74    ///
75    /// # Safety
76    /// You must ensure the CPU paging state (CR0/CR4/EFER) and code/data mappings
77    /// are consistent with the target space. Consider reloading CR3 or issuing
78    /// `invlpg` after changes to active mappings.
79    #[inline]
80    pub unsafe fn activate(&self) {
81        let cr3 = self.root.base().as_u64();
82        unsafe {
83            core::arch::asm!("mov cr3, {}", in(reg) cr3, options(nostack, preserves_flags));
84        }
85    }
86
87    /// Physical page of the PML4.
88    #[inline]
89    #[must_use]
90    pub const fn root_page(&self) -> RootPage {
91        self.root
92    }
93
94    /// Borrow a [`PageTable`] (PT) in this frame.
95    ///
96    /// Convenience wrapper for [`PhysMapper::pt_mut`.
97    #[inline]
98    pub(crate) fn pt_mut(&self, page: PhysicalPage<Size4K>) -> &mut PageTable {
99        self.mapper.pt_mut(page)
100    }
101
102    /// Translate a `VirtualAddress` to `PhysicalAddress` if mapped.
103    ///
104    /// Handles 1 GiB and 2 MiB leaves by adding the appropriate **in-page offset**.
105    #[must_use]
106    pub fn query(&self, va: VirtualAddress) -> Option<PhysicalAddress> {
107        match self.walk(va) {
108            WalkResult::Leaf1G { base, .. } => {
109                let off = va.offset::<Size1G>();
110                Some(base.join(off))
111            }
112            WalkResult::Leaf2M { base, .. } => {
113                let off = va.offset::<Size2M>();
114                Some(base.join(off))
115            }
116            WalkResult::L1 { pte, .. } => {
117                let (base4k, _fl) = pte.page_4k()?;
118                let off = va.offset::<Size4K>();
119                Some(base4k.join(off))
120            }
121            WalkResult::Missing => None,
122        }
123    }
124
125    /// Map **one** page at `va → pa` with size `S` and `leaf_flags`.
126    ///
127    /// - Non-leaf links are created with `nonleaf_flags` (e.g., present+writable).
128    /// - Alignment is asserted (debug) via typed wrappers.
129    ///
130    /// # Errors
131    /// - An Out of Memory error occurred in one of the tables.
132    pub fn map_one<A: FrameAlloc, S: MapSize>(
133        &self,
134        alloc: &mut A,
135        va: VirtualAddress,
136        pa: PhysicalAddress,
137        nonleaf_flags: VirtualMemoryPageBits,
138        leaf_flags: VirtualMemoryPageBits,
139    ) -> Result<(), AddressSpaceMapOneError> {
140        debug_assert_eq!(pa.offset::<S>().as_u64(), 0, "physical address not aligned");
141
142        let leaf_tbl = S::ensure_chain_for(self, alloc, va, nonleaf_flags)?;
143        S::set_leaf(self, leaf_tbl, va, pa, leaf_flags);
144        Ok(())
145    }
146
147    /// Unmap a single **4 KiB** page at `va`. Returns Err if missing.
148    ///
149    /// # Errors
150    /// - Invalid tables
151    // TODO: Refactor to error type
152    pub fn unmap_one(&self, va: VirtualAddress) -> Result<(), &'static str> {
153        match self.walk(va) {
154            WalkResult::L1 { pt, i1, pte } => {
155                if !pte.present() {
156                    return Err("missing: pte");
157                }
158                pt.set_zero(i1);
159                Ok(())
160            }
161            WalkResult::Leaf2M { .. } => Err("found 2MiB leaf (not a 4KiB mapping)"),
162            WalkResult::Leaf1G { .. } => Err("found 1GiB leaf (not a 4KiB mapping)"),
163            WalkResult::Missing => Err("missing: chain"),
164        }
165    }
166
167    /// Greedy region mapping: tiles `[virt_start .. virt_start+len)` onto
168    /// `[phys_start .. phys_start+len)` using 1G / 2M / 4K pages as alignment permits.
169    ///
170    /// - Non-leaf links use `nonleaf_flags` (e.g. present|writable).
171    /// - Leaves use `leaf_flags` (e.g. perms, NX, GLOBAL).
172    ///
173    /// # Errors
174    /// - Propagates OOMs from intermediate table allocation.
175    pub fn map_region<A: FrameAlloc>(
176        &self,
177        alloc: &mut A,
178        virt_start: VirtualAddress,
179        phys_start: PhysicalAddress,
180        len: u64,
181        nonleaf_flags: VirtualMemoryPageBits,
182        leaf_flags: VirtualMemoryPageBits,
183    ) -> Result<(), AddressSpaceMapRegionError> {
184        let mut off = 0u64;
185        while off < len {
186            let va = VirtualAddress::new(virt_start.as_u64() + off);
187            let pa = PhysicalAddress::new(phys_start.as_u64() + off);
188            let remain = len - off;
189
190            // Try 1 GiB
191            if (va.as_u64() & (Size1G::SIZE - 1) == 0)
192                && (pa.as_u64() & (Size1G::SIZE - 1) == 0)
193                && remain >= Size1G::SIZE
194            {
195                self.map_one::<A, Size1G>(alloc, va, pa, nonleaf_flags, leaf_flags)?;
196                off += Size1G::SIZE;
197                continue;
198            }
199
200            // Try 2 MiB
201            if (va.as_u64() & (Size2M::SIZE - 1) == 0)
202                && (pa.as_u64() & (Size2M::SIZE - 1) == 0)
203                && remain >= Size2M::SIZE
204            {
205                self.map_one::<A, Size2M>(alloc, va, pa, nonleaf_flags, leaf_flags)?;
206                off += Size2M::SIZE;
207                continue;
208            }
209
210            // Fall back to 4 KiB
211            if (va.as_u64() & (Size4K::SIZE - 1) == 0) && (pa.as_u64() & (Size4K::SIZE - 1) == 0) {
212                self.map_one::<A, Size4K>(alloc, va, pa, nonleaf_flags, leaf_flags)?;
213                off += Size4K::SIZE;
214                continue;
215            }
216
217            return Err(AddressSpaceMapRegionError::Unaligned(va, pa));
218        }
219        Ok(())
220    }
221
222    /// Greedy unmap of a region: clears whole 1G/2M leaves when aligned, otherwise 4K PTEs.
223    /// (Does not collapse tables; that's a separate optimization pass.)
224    pub fn unmap_region(&self, virt_start: VirtualAddress, len: u64) {
225        let mut off = 0u64;
226        while off < len {
227            let va = VirtualAddress::new(virt_start.as_u64() + off);
228            match self.walk(va) {
229                WalkResult::Leaf1G { pdpt, i3, .. }
230                    if (va.as_u64() & (Size1G::SIZE - 1) == 0) && (len - off) >= Size1G::SIZE =>
231                {
232                    // Clear the PDPTE
233                    pdpt.set_zero(i3);
234                    off += Size1G::SIZE;
235                }
236                WalkResult::Leaf2M { pd, i2, .. }
237                    if (va.as_u64() & (Size2M::SIZE - 1) == 0) && (len - off) >= Size2M::SIZE =>
238                {
239                    // Clear the PDE
240                    pd.set_zero(i2);
241                    off += Size2M::SIZE;
242                }
243                WalkResult::L1 { pt, i1, pte } => {
244                    if pte.present() {
245                        pt.set_zero(i1);
246                    }
247                    off += Size4K::SIZE;
248                }
249                // Missing entry: treat as unmapped 4K and advance to avoid infinite loop,
250                // or
251                // Leaf size larger than remaining or unaligned start: fall back to 4K step.
252                _ => off += Size4K::SIZE,
253            }
254        }
255    }
256
257    /// Walks the whole tree and frees empty tables (PT/PD/PDPT). Does not merge leaves.
258    #[allow(clippy::similar_names)]
259    pub fn collapse_empty_tables<F: FrameAlloc>(&self, free: &mut F) {
260        let pml4 = self.pml4_mut();
261
262        // For every L4 entry:
263        for i4 in 0..512 {
264            let e4 = pml4.get(L4Index::new(i4));
265            let Some(pdpt_page) = e4.next_table() else {
266                continue;
267            };
268            let pdpt = self.pdpt_mut(pdpt_page);
269
270            // For every L3 entry:
271            let mut used_l3 = false;
272            for i3 in 0..512 {
273                match pdpt.get(L3Index::new(i3)).kind() {
274                    Some(PdptEntryKind::Leaf1GiB(_, _)) => {
275                        used_l3 = true;
276                    }
277                    Some(PdptEntryKind::NextPageDirectory(pd_page, _)) => {
278                        let pd = self.pd_mut(pd_page);
279                        // For every L2 entry:
280                        let mut used_l2 = false;
281                        for i2 in 0..512 {
282                            match pd.get(L2Index::new(i2)).kind() {
283                                Some(PdEntryKind::Leaf2MiB(_, _)) => {
284                                    used_l2 = true;
285                                }
286                                Some(PdEntryKind::NextPageTable(pt_page, _)) => {
287                                    let pt = self.pt_mut(pt_page);
288                                    let mut any_present = false;
289                                    for i1 in 0..512 {
290                                        if pt.get(L1Index::new(i1)).present() {
291                                            any_present = true;
292                                            break;
293                                        }
294                                    }
295                                    if any_present {
296                                        used_l2 = true;
297                                    } else {
298                                        // free PT
299                                        pd.set(L2Index::new(i2), PdEntry::zero());
300                                        free.free_4k(pt_page);
301                                    }
302                                }
303                                None => {}
304                            }
305                        }
306                        // If PD ended up empty (no leaves / no child PTs left), free it.
307                        if used_l2 {
308                            used_l3 = true;
309                        } else {
310                            pdpt.set(L3Index::new(i3), PdptEntry::zero());
311                            free.free_4k(pd_page);
312                        }
313                    }
314                    None => {}
315                }
316            }
317
318            // If PDPT is now empty, free it.
319            if !used_l3 {
320                pml4.set(L4Index::new(i4), Pml4Entry::zero());
321                free.free_4k(pdpt_page);
322            }
323        }
324    }
325
326    /// Internal walker: resolves VA to the point it terminates.
327    #[allow(clippy::similar_names)]
328    fn walk(&self, va: VirtualAddress) -> WalkResult<'_> {
329        let (i4, i3, i2, i1) = crate::page_table::split_indices(va);
330
331        // PML4
332        let pml4 = self.pml4_mut();
333        let Some(pdpt_page) = pml4.get(i4).next_table() else {
334            return WalkResult::Missing;
335        };
336
337        // PDPT
338        let pdpt = self.pdpt_mut(pdpt_page);
339        match pdpt.get(i3).kind() {
340            Some(PdptEntryKind::Leaf1GiB(base, _fl)) => WalkResult::Leaf1G { base, pdpt, i3 },
341            Some(PdptEntryKind::NextPageDirectory(pd_page, _fl)) => {
342                // PD
343                let pd = self.pd_mut(pd_page);
344                match pd.get(i2).kind() {
345                    Some(PdEntryKind::Leaf2MiB(base, _fl)) => WalkResult::Leaf2M { base, pd, i2 },
346                    Some(PdEntryKind::NextPageTable(pt_page, _fl)) => {
347                        // PT
348                        let pt = self.pt_mut(pt_page);
349                        let pte = pt.get(i1);
350                        WalkResult::L1 { pt, i1, pte }
351                    }
352                    None => WalkResult::Missing,
353                }
354            }
355            None => WalkResult::Missing,
356        }
357    }
358
359    /// Borrow the [`PageMapLevel4`] (PML4) as a typed table.
360    ///
361    /// Convenience wrapper for [`PhysMapper::pml4_mut`] at the [`root_page`](Self::root_page).
362    #[inline]
363    pub(crate) fn pml4_mut(&self) -> &mut PageMapLevel4 {
364        self.mapper.pml4_mut(self.root)
365    }
366
367    /// Borrow a [`PageDirectoryPointerTable`] (PDPT) in this frame
368    ///
369    /// Convenience wrapper for [`PhysMapper::pdpt_mut`].
370    #[inline]
371    pub(crate) fn pdpt_mut(&self, page: PhysicalPage<Size4K>) -> &mut PageDirectoryPointerTable {
372        self.mapper.pdpt_mut(page)
373    }
374
375    /// Borrow a [`PageDirectory`] (PD) in this frame
376    ///
377    /// Convenience wrapper for [`PhysMapper::pd_mut`].
378    #[inline]
379    pub(crate) fn pd_mut(&self, page: PhysicalPage<Size4K>) -> &mut PageDirectory {
380        self.mapper.pd_mut(page)
381    }
382
383    /// Zeroes the [`PageDirectoryPointerTable`] (PDPT) in this frame
384    ///
385    /// Convenience wrapper for [`PhysMapper::zero_pdpt`].
386    #[inline]
387    pub(crate) fn zero_pdpt(&self, page: PhysicalPage<Size4K>) {
388        self.mapper.zero_pdpt(page);
389    }
390
391    /// Zeroes the [`PageDirectory`] (PD) in this frame
392    ///
393    /// Convenience wrapper for [`PhysMapper::zero_pd`].
394    #[inline]
395    pub(crate) fn zero_pd(&self, page: PhysicalPage<Size4K>) {
396        self.mapper.zero_pd(page);
397    }
398
399    /// Zeroes the [`PageTable`] (PD) in this frame
400    ///
401    /// Convenience wrapper for [`PhysMapper::zero_pt`].
402    #[inline]
403    pub(crate) fn zero_pt(&self, page: PhysicalPage<Size4K>) {
404        self.mapper.zero_pt(page);
405    }
406}
407
408/// A mapping error.
409#[derive(Debug, Copy, Clone, PartialEq, Eq, thiserror::Error)]
410pub enum AddressSpaceMapOneError {
411    #[error(transparent)]
412    OutOfMemory(#[from] MapSizeEnsureChainError),
413}
414
415/// A mapping error.
416#[derive(Debug, Copy, Clone, PartialEq, Eq, thiserror::Error)]
417pub enum AddressSpaceMapRegionError {
418    #[error(transparent)]
419    OutOfMemory(#[from] MapSizeEnsureChainError),
420    #[error("unaligned va/pa for remaining size: {0:?} -> {1:?}")]
421    Unaligned(VirtualAddress, PhysicalAddress),
422}
423
424impl From<AddressSpaceMapOneError> for AddressSpaceMapRegionError {
425    fn from(e: AddressSpaceMapOneError) -> Self {
426        match e {
427            AddressSpaceMapOneError::OutOfMemory(e) => Self::OutOfMemory(e),
428        }
429    }
430}
431
432/// Target table/level produced by `ensure_chain`.
433#[derive(Copy, Clone, Debug, Eq, PartialEq)]
434pub enum EnsureTarget {
435    /// You will write a **PDPTE** (1 GiB leaf).
436    L3For1G,
437    /// You will write a **PDE** (2 MiB leaf).
438    L2For2M,
439    /// You will write a **PTE** (4 KiB leaf).
440    L1For4K,
441}
442
443/// The result of a table walk.
444#[allow(dead_code)]
445enum WalkResult<'a> {
446    /// Hit a 1 GiB leaf at PDPT.
447    Leaf1G {
448        base: PhysicalPage<Size1G>,
449        pdpt: &'a mut PageDirectoryPointerTable,
450        i3: L3Index,
451    },
452    /// Hit a 2 MiB leaf at PD.
453    Leaf2M {
454        base: PhysicalPage<Size2M>,
455        pd: &'a mut PageDirectory,
456        i2: L2Index,
457    },
458    /// Reached PT (L1) with its index and current entry.
459    L1 {
460        pt: &'a mut PageTable,
461        i1: L1Index,
462        pte: PtEntry4k,
463    },
464    /// Missing somewhere in the chain.
465    Missing,
466}