uefi_loader/
memory.rs

1#![allow(unsafe_code)]
2
3use crate::elf::PAGE_SIZE;
4use core::alloc::{GlobalAlloc, Layout};
5use core::ptr;
6use core::ptr::NonNull;
7use core::ptr::null_mut;
8use kernel_vmem::addresses::{PhysicalAddress, VirtualAddress};
9use uefi::boot;
10use uefi::boot::{AllocateType, MemoryType};
11
12/// A UEFI Boot Services pool allocation to back Rust's global allocator.
13///
14/// # Notes
15/// - Valid only while Boot Services are active (before `ExitBootServices`).
16/// - We always over-allocate to satisfy alignment and store the original pointer
17///   just before the returned aligned block for correct deallocation.
18pub struct UefiBootAllocator;
19
20#[global_allocator]
21static GLOBAL_ALLOC: UefiBootAllocator = UefiBootAllocator;
22
23unsafe impl GlobalAlloc for UefiBootAllocator {
24    unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
25        // Ensure minimum size of 1 and include header for original pointer and padding for alignment
26        let align = layout.align().max(size_of::<usize>());
27        let size = layout.size().max(1);
28        let Some(total) = size
29            .checked_add(align)
30            .and_then(|v| v.checked_add(size_of::<usize>()))
31        else {
32            return null_mut();
33        };
34
35        // Boot services must be active; if not, return null to signal OOM.
36        // Allocate from LOADER_DATA pool; align is handled manually.
37        let Ok(raw) = boot::allocate_pool(boot::MemoryType::LOADER_DATA, total) else {
38            return null_mut();
39        };
40
41        let raw_ptr = raw.as_ptr();
42        let addr = raw_ptr as usize + size_of::<usize>();
43        let aligned = (addr + (align - 1)) & !(align - 1);
44        let header_ptr = (aligned - size_of::<usize>()) as *mut usize;
45        // Store the original allocation pointer just before the aligned region
46        unsafe {
47            ptr::write(header_ptr, raw_ptr as usize);
48        }
49        aligned as *mut u8
50    }
51
52    unsafe fn dealloc(&self, ptr: *mut u8, _layout: Layout) {
53        if ptr.is_null() {
54            return;
55        }
56        // Recover the original pool pointer from the header we stored in alloc()
57        let header_ptr = (ptr as usize - size_of::<usize>()) as *mut usize;
58        let orig_ptr = unsafe { ptr::read(header_ptr) as *mut u8 };
59        // SAFETY: `orig_ptr` was returned by `allocate_pool` and stored by us.
60        let _ = unsafe { boot::free_pool(NonNull::new_unchecked(orig_ptr)) };
61    }
62
63    unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
64        let p = unsafe { self.alloc(layout) };
65        if !p.is_null() {
66            unsafe { ptr::write_bytes(p, 0, layout.size()) };
67        }
68        p
69    }
70}
71
72/// Allocate a trampoline stack (optionally with a guard page) and return:
73/// - `base_phys`: physical base address (also used as VA, since we'll identity-map it)
74/// - `top_va`: virtual top-of-stack address you'll load into RSP
75pub fn alloc_trampoline_stack(
76    stack_size_bytes: usize, // e.g. 64 * 1024
77    with_guard: bool,
78) -> (PhysicalAddress, VirtualAddress) {
79    let page_size = usize::try_from(PAGE_SIZE).expect("PAGE_SIZE is too large");
80    let pages_for_stack = stack_size_bytes.div_ceil(page_size);
81    let guard_pages = usize::from(with_guard);
82    let total_pages = pages_for_stack + guard_pages;
83
84    // AllocateAnyPages returns a physical base in `base_phys`
85    let base_phys =
86        boot::allocate_pages(AllocateType::AnyPages, MemoryType::LOADER_DATA, total_pages)
87            .expect("failed to allocate trampoline stack pages");
88
89    // If a guard page was requested, make the **first** page the guard
90    // and use the rest as the actual stack.
91    let base_phys = base_phys.as_ptr() as u64;
92    let stack_base_phys = if with_guard {
93        base_phys + page_size as u64 // TODO: Convert to actual pointer arithmetic ops.
94    } else {
95        base_phys
96    };
97    let stack_size = pages_for_stack * page_size;
98    let mut top = stack_base_phys + stack_size as u64;
99
100    // ABI alignment:
101    // Both SysV and Win64 expect RSP % 16 == 8 at function entry (because of a pushed return address).
102    // Since we *jmp* (no return address), we emulate that by subtracting 8.
103    top -= 8;
104
105    // VA == PA because we'll identity-map this span
106    (
107        PhysicalAddress::new(stack_base_phys),
108        VirtualAddress::new(top),
109    )
110}