uefi_loader/
vmem.rs

1//! # Virtual Memory Setup for Kernel loading (new typed API)
2
3use crate::elf::loader::LoadedSegMap;
4use kernel_info::memory::{HHDM_BASE /*KERNEL_BASE,*/ /*PHYS_LOAD*/};
5
6use kernel_vmem::{
7    AddressSpace, FrameAlloc, PhysMapper, PhysMapperExt,
8    addresses::{PhysicalAddress, PhysicalPage, Size1G, Size2M, Size4K, VirtualAddress},
9};
10
11use kernel_vmem::VirtualMemoryPageBits;
12use kernel_vmem::address_space::AddressSpaceMapOneError;
13use kernel_vmem::addresses::PageSize;
14use uefi::boot;
15use uefi::boot::{AllocateType, MemoryType};
16
17#[inline]
18const fn align_up_u64(x: u64, a: u64) -> u64 {
19    (x + (a - 1)) & !(a - 1)
20}
21
22/// UEFI-backed frame allocator: hands out zeroed 4 KiB frames.
23struct BsFrameAlloc;
24
25impl FrameAlloc for BsFrameAlloc {
26    fn alloc_4k(&mut self) -> Option<PhysicalPage<Size4K>> {
27        let pages = 1usize;
28        let mem_type = MemoryType::LOADER_DATA;
29        let ptr = boot::allocate_pages(AllocateType::AnyPages, mem_type, pages).ok()?;
30        // Zero the frame (UEFI gives physical RAM identity-mapped in loader)
31        unsafe {
32            core::ptr::write_bytes(ptr.as_ptr(), 0, 4096);
33        }
34        let pa = PhysicalAddress::from_nonnull(ptr);
35        Some(PhysicalPage::<Size4K>::from_addr(pa))
36    }
37
38    fn free_4k(&mut self, pa: PhysicalPage<Size4K>) {
39        if let Some(nn) = core::ptr::NonNull::new(pa.base().as_u64() as *mut u8) {
40            let _ = unsafe { boot::free_pages(nn, 1) };
41        }
42    }
43}
44
45/// Loader mapper: treat low physical memory frames as directly accessible.
46///
47/// # Safety
48/// Valid only in the UEFI loader context where those frames are mapped.
49struct LoaderPhysMapper;
50
51impl PhysMapper for LoaderPhysMapper {
52    unsafe fn phys_to_mut<T>(&self, at: PhysicalAddress) -> &mut T {
53        unsafe { &mut *(at.as_u64() as *mut T) }
54    }
55}
56
57#[allow(
58    clippy::too_many_arguments,
59    clippy::too_many_lines,
60    clippy::similar_names
61)]
62pub fn create_kernel_pagetables(
63    kernel_maps: &[LoadedSegMap],
64    tramp_code_va: VirtualAddress,
65    tramp_code_len: usize,
66    tramp_stack_base_phys: PhysicalAddress,
67    tramp_stack_size_bytes: usize,
68    boot_info_ptr_va: VirtualAddress,
69) -> Result<PhysicalAddress, KernelPageTableError> {
70    let mapper = LoaderPhysMapper;
71    let mut alloc = BsFrameAlloc;
72
73    // Root PML4
74    let pml4_phys = alloc
75        .alloc_4k()
76        .ok_or(KernelPageTableError::OutOfMemoryPml4)?;
77    mapper.zero_pml4(pml4_phys);
78
79    let aspace = AddressSpace::from_root(&mapper, pml4_phys);
80    let pml4_phys = aspace.root_page().base();
81
82    // Common flags
83    // Non-leaf: present + writable (no NX on non-leaves)
84    let nonleaf_flags = VirtualMemoryPageBits::default()
85        .with_present(true)
86        .with_writable(true);
87
88    // Map each PT_LOAD segment
89    for m in kernel_maps {
90        let mut cur_va = m.vaddr_page.base(); // VirtualAddress (page-aligned)
91        let end_u64 = m
92            .vaddr_page
93            .base()
94            .as_u64()
95            .checked_add(m.map_len)
96            .ok_or(KernelPageTableError::SegmentLengthOverflow)?;
97
98        while cur_va.as_u64() < end_u64 {
99            // Compute PA = phys_page.base + (cur_va - vaddr_page.base)
100            let off = cur_va.as_u64() - m.vaddr_page.base().as_u64();
101            let cur_pa = PhysicalAddress::new(m.phys_page.base().as_u64() + off);
102
103            // Leaf flags from ELF PF_*:
104            // start with present + global; add writable if PF_W; add NX if !PF_X
105            let leaf_flags = VirtualMemoryPageBits::default()
106                .with_present(true)
107                .with_global(true)
108                .with_writable(m.flags.write())
109                .with_no_execute(!m.flags.execute());
110
111            // Try 2 MiB leaf where legal
112            let remaining = end_u64 - cur_va.as_u64();
113            let can_2m = (cur_va.as_u64() & (Size2M::SIZE - 1) == 0)
114                && (cur_pa.as_u64() & (Size2M::SIZE - 1) == 0)
115                && remaining >= Size2M::SIZE;
116
117            if can_2m {
118                aspace.map_one::<_, Size2M>(
119                    &mut alloc,
120                    cur_va,
121                    cur_pa,
122                    nonleaf_flags,
123                    leaf_flags,
124                )?;
125                cur_va = VirtualAddress::new(cur_va.as_u64() + Size2M::SIZE);
126            } else {
127                aspace.map_one::<_, Size4K>(
128                    &mut alloc,
129                    cur_va,
130                    cur_pa,
131                    nonleaf_flags,
132                    leaf_flags,
133                )?;
134                cur_va = VirtualAddress::new(cur_va.as_u64() + Size4K::SIZE);
135            }
136        }
137    }
138
139    // HHDM: map first 1 GiB VA = HHDM_BASE → PA = 0, NX + writable + global
140    {
141        let hhdm_va = VirtualAddress::new(HHDM_BASE);
142        let zero_pa = PhysicalAddress::new(0);
143        let leaf = VirtualMemoryPageBits::default()
144            .with_present(true)
145            .with_writable(true)
146            .with_global(true)
147            .with_no_execute(true);
148        aspace.map_one::<_, Size1G>(&mut alloc, hhdm_va, zero_pa, nonleaf_flags, leaf)?;
149    }
150
151    // Identity map first 2 MiB of low VA so the trampoline keeps executing after mov cr3.
152    // Executable (i.e., NX not set), global, writable.
153    {
154        let va0 = VirtualAddress::new(0);
155        let pa0 = PhysicalAddress::new(0);
156        let leaf = VirtualMemoryPageBits::default()
157            .with_present(true)
158            .with_writable(true)
159            .with_global(true);
160        aspace.map_one::<_, Size2M>(&mut alloc, va0, pa0, nonleaf_flags, leaf)?;
161    }
162
163    // Identity map the trampoline stack (4 KiB, NX)
164    {
165        let start = tramp_stack_base_phys.as_u64() & !(Size4K::SIZE - 1);
166        let end = align_up_u64(
167            tramp_stack_base_phys
168                .as_u64()
169                .checked_add(tramp_stack_size_bytes as u64)
170                .ok_or(KernelPageTableError::TrampolineStackRangeOverflow)?,
171            Size4K::SIZE,
172        );
173        let leaf = VirtualMemoryPageBits::default()
174            .with_present(true)
175            .with_writable(true)
176            .with_global(true)
177            .with_no_execute(true);
178
179        let mut pa = start;
180        while pa < end {
181            let va = VirtualAddress::new(pa); // identity
182            let phys = PhysicalAddress::new(pa);
183            aspace.map_one::<_, Size4K>(&mut alloc, va, phys, nonleaf_flags, leaf)?;
184            pa += Size4K::SIZE;
185        }
186    }
187
188    // Identity map the trampoline code (4 KiB, executable)
189    {
190        let start = tramp_code_va.page::<Size4K>().base().as_u64();
191        let end = align_up_u64(
192            tramp_code_va
193                .as_u64()
194                .checked_add(tramp_code_len as u64)
195                .ok_or(KernelPageTableError::TrampolineCodeRangeOverflow)?,
196            Size4K::SIZE,
197        );
198        let leaf = VirtualMemoryPageBits::default()
199            .with_present(true)
200            .with_global(true)
201            .with_no_execute(false) // executable (no NX)
202            .with_writable(false);
203
204        let mut addr = start;
205        while addr < end {
206            let va = VirtualAddress::new(addr);
207            let pa = PhysicalAddress::new(addr); // identity
208            aspace.map_one::<_, Size4K>(&mut alloc, va, pa, nonleaf_flags, leaf)?;
209            addr += Size4K::SIZE;
210        }
211    }
212
213    // Identity map just the BootInfo pointer page (4 KiB, NX)
214    {
215        let bi_page = boot_info_ptr_va.page::<Size4K>().base();
216        let leaf = VirtualMemoryPageBits::default()
217            .with_present(true)
218            .with_writable(true)
219            .with_global(true)
220            .with_no_execute(true);
221        aspace.map_one::<_, Size4K>(
222            &mut alloc,
223            bi_page,
224            PhysicalAddress::new(bi_page.as_u64()),
225            nonleaf_flags,
226            leaf,
227        )?;
228    }
229
230    Ok(pml4_phys)
231}
232
233#[derive(Debug, thiserror::Error)]
234pub enum KernelPageTableError {
235    #[error("out of memory in PML4")]
236    OutOfMemoryPml4,
237    #[error("PT_LOAD segment length overflow")]
238    SegmentLengthOverflow,
239    /// Address arithmetic overflow while mapping trampoline stack memory range.
240    #[error("stack range overflow")]
241    TrampolineStackRangeOverflow,
242    /// Address arithmetic overflow while mapping trampoline code memory range.
243    #[error("trampoline code overflow")]
244    TrampolineCodeRangeOverflow,
245    #[error(transparent)]
246    OutOfMemoryPageTable(#[from] AddressSpaceMapOneError),
247}