uefi_loader/elf/
parser.rs1extern crate alloc;
4
5use alloc::vec::Vec;
6use core::mem::size_of;
7use core::ptr::read_unaligned;
8use kernel_vmem::addresses::{PhysicalAddress, VirtualAddress};
9use uefi::Status;
10
11#[repr(C)]
13#[derive(Clone, Copy)]
14#[allow(clippy::struct_field_names)]
15struct Elf64Ehdr {
16    e_ident: [u8; 16],
17    e_type: u16,
18    e_machine: u16,
19    e_version: u32,
20    e_entry: VirtualAddress,
21    e_phoff: u64,
22    e_shoff: u64,
23    e_flags: u32,
24    e_ehsize: u16,
25    e_phentsize: u16,
26    e_phnum: u16,
27    e_shentsize: u16,
28    e_shnum: u16,
29    e_shstrndx: u16,
30}
31
32#[repr(C)]
33#[derive(Clone, Copy)]
34#[allow(clippy::struct_field_names)]
35struct Elf64Phdr {
36    p_type: u32,
37    p_flags: PFlags,
38    p_offset: u64,
39    p_vaddr: VirtualAddress,
40    p_paddr: PhysicalAddress,
41    p_filesz: u64,
42    p_memsz: u64,
43    p_align: u64,
44}
45
46const PT_LOAD: u32 = 1;
47const EM_X86_64: u16 = 62;
48
49#[derive(Debug, Clone, Copy)]
51pub struct LoadSegment {
52    pub vaddr: VirtualAddress,
53    pub offset: u64,
54    pub filesz: u64,
55    pub memsz: u64,
56    pub flags: PFlags,
57    pub align: u64,
58}
59
60#[derive(Debug)]
61pub struct ElfHeader {
62    pub entry: VirtualAddress,
63    pub segments: Vec<LoadSegment>,
64}
65
66impl ElfHeader {
67    const EI_MAGIC_BYTES: [u8; 4] = [0x7F, b'E', b'L', b'F'];
68
69    pub fn parse_elf64(bytes: &[u8]) -> Result<Self, Status> {
72        if bytes.len() < size_of::<Elf64Ehdr>() {
74            return Err(Status::UNSUPPORTED);
75        }
76
77        let ehdr = unsafe { read_unaligned(bytes.as_ptr().cast::<Elf64Ehdr>()) };
79
80        if ehdr.e_ident[0..4] != Self::EI_MAGIC_BYTES {
82            return Err(Status::UNSUPPORTED);
83        }
84        if ehdr.e_ident[4] != 2 || ehdr.e_ident[5] != 1 || ehdr.e_ident[6] != 1 {
86            return Err(Status::UNSUPPORTED);
87        }
88
89        if ehdr.e_machine != EM_X86_64 {
90            return Err(Status::UNSUPPORTED);
91        }
92
93        if ehdr.e_phentsize as usize != size_of::<Elf64Phdr>() {
94            return Err(Status::UNSUPPORTED);
95        }
96
97        let phoff = usize::try_from(ehdr.e_phoff).map_err(|_| Status::UNSUPPORTED)?;
99        let phentsize = ehdr.e_phentsize as usize;
100        let phnum = ehdr.e_phnum as usize;
101
102        let table_size = phentsize.checked_mul(phnum).ok_or(Status::UNSUPPORTED)?;
104        let end = phoff.checked_add(table_size).ok_or(Status::UNSUPPORTED)?;
105        if end > bytes.len() {
106            return Err(Status::UNSUPPORTED);
107        }
108
109        let mut segments = Vec::new();
110
111        for i in 0..phnum {
112            let off = phoff + i * phentsize;
113            let ph = unsafe { read_unaligned(bytes.as_ptr().add(off).cast::<Elf64Phdr>()) };
115            if ph.p_type == PT_LOAD {
116                segments.push(LoadSegment {
117                    vaddr: ph.p_vaddr,
118                    offset: ph.p_offset,
119                    filesz: ph.p_filesz,
120                    memsz: ph.p_memsz,
121                    flags: ph.p_flags,
122                    align: ph.p_align,
123                });
124            }
125        }
126
127        Ok(Self {
128            entry: ehdr.e_entry,
129            segments,
130        })
131    }
132}
133
134#[bitfield_struct::bitfield(u32)]
142pub struct PFlags {
143    #[bits(1)]
144    pub execute: bool,
145    #[bits(1)]
146    pub write: bool,
147    #[bits(1)]
148    pub read: bool,
149    #[bits(29)]
150    __: u32,
151}