kernel_vmem/address_space/
map_size.rs

1//! # Memory Page Table Mapping Size
2//!
3//! This module defines the behavior of [`AddressSpace::map_one`](AddressSpace::map_one) for
4//! different page sizes.
5//!
6//! The `MapSize` trait is implemented for each page size, and provides the
7//! following methods:
8//!
9//! - `ensure_chain_for`: Given a virtual address, ensure that the non-leaf
10//!   chain for that address down to the table that holds the leaf for the
11
12use crate::addresses::{
13    PageSize, PhysicalAddress, PhysicalPage, Size1G, Size2M, Size4K, VirtualAddress,
14};
15use crate::bits::VirtualMemoryPageBits;
16use crate::page_table::pd::{L2Index, PdEntry, PdEntryKind};
17use crate::page_table::pdpt::{L3Index, PdptEntry, PdptEntryKind};
18use crate::page_table::pml4::{L4Index, Pml4Entry};
19use crate::page_table::pt::{L1Index, PtEntry4k};
20use crate::{AddressSpace, FrameAlloc, PhysMapper};
21
22/// # Page-size–directed mapping behavior
23///
24/// `MapSize` encodes, at the type level, how to:
25/// 1) **ensure** the non-leaf page-table chain exists for a given virtual
26///    address, and
27/// 2) **install** the correct **leaf** entry for that page size.
28///
29/// Implementations for [`Size1G`], [`Size2M`], and [`Size4K`] decide **where to
30/// stop the walk** and **which entry to write**, so callers don’t branch at
31/// runtime. This keeps the mapping code zero-cost and compile-time checked.
32///
33/// ## What `ensure_chain_for` returns
34///
35/// It returns the **target table frame (4 KiB page)** into which you will write
36/// the leaf entry for `Self`:
37///
38/// - For **1 GiB** pages (`Self = Size1G`): returns the **PDPT** frame
39///   *(you will write a PDPTE with `PS=1`)*.
40/// - For **2 MiB** pages (`Self = Size2M`): returns the **PD** frame
41///   *(you will write a PDE with `PS=1`)*.
42/// - For **4 KiB** pages (`Self = Size4K`): returns the **PT** frame
43///   *(you will write a PTE with `PS=0`)*.
44///
45/// Newly created non-leaf entries are initialized with `nonleaf_flags`
46/// (e.g., `present | writable`), and any **conflicting huge leaves are split**
47/// on demand by allocating and linking the next-level table.
48///
49/// ## Typical flow
50///
51/// ```ignore
52/// // Decide size with the type parameter S, no runtime branching:
53/// let leaf_table = S::ensure_chain_for(aspace, alloc, va, nonleaf_flags)?;
54/// S::set_leaf(aspace, leaf_table, va, pa, leaf_flags);
55/// ```
56///
57/// ## Safety & alignment
58///
59/// - Physical alignment is **asserted (debug)** by callers via `pa.offset::<S>() == 0`.
60/// - The mapper (`PhysMapper`) must yield **writable** views of table frames.
61/// - If you mutate the **active** address space, perform the required **TLB
62///   maintenance** (`invlpg` per page or CR3 reload).
63pub trait MapSize: PageSize {
64    /// Ensure that the non-leaf chain for `va` exists down to the table that
65    /// holds the **leaf** for `Self`, allocating and linking intermediate
66    /// tables as needed.
67    ///
68    /// ### Returns
69    /// The 4 KiB **frame** (as `PhysicalPage<Size4K>`) of the table where the
70    /// leaf for `Self` must be written:
71    /// - `Size1G` → PDPT frame
72    /// - `Size2M` → PD frame
73    /// - `Size4K` → PT frame
74    ///
75    /// ### Behavior
76    /// - Initializes newly allocated non-leaf tables to zeroed state and links
77    ///   them with `nonleaf_flags`.
78    /// - If a conflicting huge leaf is encountered at a higher level, it is
79    ///   **split** by allocating the next-level table and relinking.
80    ///
81    /// ### Errors
82    /// - `"oom: pdpt" / "oom: pd" / "oom: pt"` if allocating an intermediate
83    ///   table frame fails.
84    fn ensure_chain_for<A: FrameAlloc, M: PhysMapper>(
85        aspace: &AddressSpace<M>,
86        alloc: &mut A,
87        va: VirtualAddress,
88        nonleaf_flags: VirtualMemoryPageBits,
89    ) -> Result<PhysicalPage<Size4K>, MapSizeEnsureChainError>;
90
91    /// Install the **leaf** entry for `va → pa` in the `leaf_tbl_page`
92    /// returned by [`ensure_chain_for`](Self::ensure_chain_for), with the given `leaf_flags`.
93    ///
94    /// - `Size1G`: writes a **PDPTE (PS=1)** into the PDPT at `va`.
95    /// - `Size2M`: writes a **PDE   (PS=1)** into the PD   at `va`.
96    /// - `Size4K`: writes a **PTE   (PS=0)** into the PT   at `va`.
97    ///
98    /// Callers should assert (in debug) that `pa` is aligned to `Self`:
99    /// `debug_assert_eq!(pa.offset::<Self>().as_u64(), 0)`.
100    fn set_leaf<M: PhysMapper>(
101        aspace: &AddressSpace<M>,
102        leaf_tbl_page: PhysicalPage<Size4K>,
103        va: VirtualAddress,
104        pa: PhysicalAddress,
105        leaf_flags: VirtualMemoryPageBits,
106    );
107}
108
109/// Error returned by [`MapSize::ensure_chain_for`] when allocating a new
110/// intermediate table frame fails.
111#[derive(Debug, Copy, Clone, PartialEq, Eq, thiserror::Error)]
112#[allow(clippy::enum_variant_names)]
113pub enum MapSizeEnsureChainError {
114    #[error("out of memory (PDPT)")]
115    OomPdpt,
116    #[error("out of memory (PD)")]
117    OomPd,
118    #[error("out of memory (PT)")]
119    OomPt,
120}
121
122impl MapSize for Size1G {
123    fn ensure_chain_for<A: FrameAlloc, M: PhysMapper>(
124        aspace: &AddressSpace<M>,
125        alloc: &mut A,
126        va: VirtualAddress,
127        nonleaf_flags: VirtualMemoryPageBits,
128    ) -> Result<PhysicalPage<Size4K>, MapSizeEnsureChainError> {
129        let i4 = L4Index::from(va);
130
131        // L4 → L3
132        let pml4 = aspace.pml4_mut();
133        let e4 = pml4.get(i4);
134        if let Some(pdpt_page) = e4.next_table() {
135            return Ok(pdpt_page);
136        }
137        let f = alloc.alloc_4k().ok_or(MapSizeEnsureChainError::OomPdpt)?;
138        aspace.zero_pdpt(f);
139        pml4.set(i4, Pml4Entry::present_with(nonleaf_flags, f));
140        Ok(f)
141    }
142
143    fn set_leaf<M: PhysMapper>(
144        aspace: &AddressSpace<M>,
145        leaf_tbl_page: PhysicalPage<Size4K>,
146        va: VirtualAddress,
147        pa: PhysicalAddress,
148        leaf_flags: VirtualMemoryPageBits,
149    ) {
150        // require 1 GiB alignment in debug
151        debug_assert_eq!(pa.offset::<Self>().as_u64(), 0);
152        let pdpt = aspace.pdpt_mut(leaf_tbl_page);
153        let idx = L3Index::from(va);
154        let g1 = PhysicalPage::<Self>::from_addr(pa);
155        pdpt.set(idx, PdptEntry::present_leaf_with(leaf_flags, g1));
156    }
157}
158
159impl MapSize for Size2M {
160    fn ensure_chain_for<A: FrameAlloc, M: PhysMapper>(
161        aspace: &AddressSpace<M>,
162        alloc: &mut A,
163        va: VirtualAddress,
164        nonleaf_flags: VirtualMemoryPageBits,
165    ) -> Result<PhysicalPage<Size4K>, MapSizeEnsureChainError> {
166        let i4 = L4Index::from(va);
167        let i3 = L3Index::from(va);
168
169        // L4 → L3
170        let pml4 = aspace.pml4_mut();
171        let e4 = pml4.get(i4);
172        let pdpt_page = if let Some(p) = e4.next_table() {
173            p
174        } else {
175            let f = alloc.alloc_4k().ok_or(MapSizeEnsureChainError::OomPdpt)?;
176            aspace.zero_pdpt(f);
177            pml4.set(i4, Pml4Entry::present_with(nonleaf_flags, f));
178            f
179        };
180
181        // L3 → L2 (and split 1GiB if necessary)
182        let pdpt = aspace.pdpt_mut(pdpt_page);
183        let e3 = pdpt.get(i3);
184        Ok(match e3.kind() {
185            Some(PdptEntryKind::NextPageDirectory(pd, _)) => pd,
186            Some(PdptEntryKind::Leaf1GiB(_, _)) | None => {
187                let f = alloc.alloc_4k().ok_or(MapSizeEnsureChainError::OomPd)?;
188                aspace.zero_pd(f);
189                pdpt.set(i3, PdptEntry::present_next_with(nonleaf_flags, f));
190                f
191            }
192        })
193    }
194
195    fn set_leaf<M: PhysMapper>(
196        aspace: &AddressSpace<M>,
197        leaf_tbl_page: PhysicalPage<Size4K>,
198        va: VirtualAddress,
199        pa: PhysicalAddress,
200        leaf_flags: VirtualMemoryPageBits,
201    ) {
202        debug_assert_eq!(pa.offset::<Self>().as_u64(), 0);
203        let pd = aspace.pd_mut(leaf_tbl_page);
204        let idx = L2Index::from(va);
205        let m2 = PhysicalPage::<Self>::from_addr(pa);
206        pd.set(idx, PdEntry::present_leaf_with(leaf_flags, m2));
207    }
208}
209
210impl MapSize for Size4K {
211    fn ensure_chain_for<A: FrameAlloc, M: PhysMapper>(
212        aspace: &AddressSpace<M>,
213        alloc: &mut A,
214        va: VirtualAddress,
215        nonleaf_flags: VirtualMemoryPageBits,
216    ) -> Result<PhysicalPage<Size4K>, MapSizeEnsureChainError> {
217        let i4 = L4Index::from(va);
218        let i3 = L3Index::from(va);
219        let i2 = L2Index::from(va);
220
221        // L4 → L3
222        let pml4 = aspace.pml4_mut();
223        let e4 = pml4.get(i4);
224        let pdpt_page = if let Some(p) = e4.next_table() {
225            p
226        } else {
227            let f = alloc.alloc_4k().ok_or(MapSizeEnsureChainError::OomPdpt)?;
228            aspace.zero_pdpt(f);
229            pml4.set(i4, Pml4Entry::present_with(nonleaf_flags, f));
230            f
231        };
232
233        // L3 → L2 (and split 1GiB if necessary)
234        let pdpt = aspace.pdpt_mut(pdpt_page);
235        let e3 = pdpt.get(i3);
236        let pd_page = match e3.kind() {
237            Some(PdptEntryKind::NextPageDirectory(pd, _)) => pd,
238            Some(PdptEntryKind::Leaf1GiB(_, _)) | None => {
239                let f = alloc.alloc_4k().ok_or(MapSizeEnsureChainError::OomPd)?;
240                aspace.zero_pd(f);
241                pdpt.set(i3, PdptEntry::present_next_with(nonleaf_flags, f));
242                f
243            }
244        };
245
246        // L2 → L1 (and split 2MiB if necessary)
247        let pd = aspace.pd_mut(pd_page);
248        let e2 = pd.get(i2);
249        Ok(match e2.kind() {
250            Some(PdEntryKind::NextPageTable(pt, _)) => pt,
251            Some(PdEntryKind::Leaf2MiB(_, _)) | None => {
252                let f = alloc.alloc_4k().ok_or(MapSizeEnsureChainError::OomPt)?;
253                aspace.zero_pt(f);
254                pd.set(i2, PdEntry::present_next_with(nonleaf_flags, f));
255                f
256            }
257        })
258    }
259
260    fn set_leaf<M: PhysMapper>(
261        aspace: &AddressSpace<M>,
262        leaf_tbl_page: PhysicalPage<Size4K>,
263        va: VirtualAddress,
264        pa: PhysicalAddress,
265        leaf_flags: VirtualMemoryPageBits,
266    ) {
267        debug_assert_eq!(pa.offset::<Self>().as_u64(), 0);
268        let pt = aspace.pt_mut(leaf_tbl_page);
269        let idx = L1Index::from(va);
270        let k4 = PhysicalPage::<Self>::from_addr(pa);
271
272        let entry = PtEntry4k::present_with(leaf_flags, k4);
273        pt.set(idx, entry);
274    }
275}