kernel_registers/
efer.rs

1use crate::{LoadRegisterUnsafe, StoreRegisterUnsafe};
2use bitfield_struct::bitfield;
3
4/// `IA32_EFER` / EFER (MSR `0xC000_0080`).
5///
6/// Extended Feature Enable Register used for `SYSCALL`/`SYSRET`, long mode,
7/// `NX`, and various AMD extensions.
8#[bitfield(u64, order = Lsb)]
9#[derive(Eq, PartialEq)]
10pub struct Efer {
11    /// Bit 0 — SCE: System Call Extensions.
12    ///
13    /// Enables SYSCALL/SYSRET when set.
14    pub sce: bool,
15
16    /// Bit 1 — DPE (AMD K6 only): Data Prefetch Enable.
17    pub dpe: bool,
18
19    /// Bit 2 — SEWBED (AMD K6 only): Speculative EWBE# Disable.
20    pub sewbed: bool,
21
22    /// Bit 3 — GEWBED (AMD K6 only): Global EWBE# Disable.
23    pub gewbed: bool,
24
25    /// Bit 4 — L2D (AMD K6 only): L2 Cache Disable.
26    pub l2d: bool,
27
28    /// Bits 5–7 — Reserved (read as zero, must be written as zero).
29    #[bits(3)]
30    pub reserved0: u8,
31
32    /// Bit 8 — LME: Long Mode Enable.
33    ///
34    /// Enables IA-32e (long) mode when paging is enabled.
35    pub lme: bool,
36
37    /// Bit 9 — Reserved.
38    #[bits(access = RO)]
39    pub reserved1: bool,
40
41    /// Bit 10 — LMA: Long Mode Active (read-only).
42    ///
43    /// Indicates that the processor is currently in long mode.
44    pub lma: bool,
45
46    /// Bit 11 — NXE: No-Execute Enable.
47    ///
48    /// Enables the NX bit in page tables.
49    pub nxe: bool,
50
51    /// Bit 12 — SVME: Secure Virtual Machine Enable (AMD SVM).
52    pub svme: bool,
53
54    /// Bit 13 — LMSLE: Long Mode Segment Limit Enable.
55    pub lmsle: bool,
56
57    /// Bit 14 — FFXSR: Fast FXSAVE/FXRSTOR.
58    pub ffxsr: bool,
59
60    /// Bit 15 — TCE: Translation Cache Extension.
61    pub tce: bool,
62
63    /// Bit 16 — Reserved.
64    pub reserved2: bool,
65
66    /// Bit 17 — MCOMMIT: MCOMMIT instruction enable (AMD).
67    pub mcommit: bool,
68
69    /// Bit 18 — INTWB: Interruptible WBINVD/WBNOINVD enable (AMD).
70    pub intwb: bool,
71
72    /// Bit 19 — Reserved.
73    pub reserved3: bool,
74
75    /// Bit 20 — UAIE: Upper Address Ignore Enable.
76    pub uaie: bool,
77
78    /// Bit 21 — AIBRSE: Automatic IBRS Enable.
79    pub aibrse: bool,
80
81    /// Bits 22–63 — Reserved.
82    #[bits(42, access = RO)]
83    pub reserved4: u64,
84}
85
86impl Efer {
87    /// MSR index for `IA32_EFER` / `EFER`.
88    pub const MSR_EFER: u32 = 0xC000_0080;
89}
90
91#[cfg(feature = "asm")]
92impl LoadRegisterUnsafe for Efer {
93    unsafe fn load_unsafe() -> Self {
94        let (mut lo, mut hi): (u32, u32);
95        unsafe {
96            core::arch::asm!(
97                "rdmsr",
98                in("ecx") Self::MSR_EFER,
99                out("eax") lo,
100                out("edx") hi,
101                options(nomem, preserves_flags)
102            );
103        }
104        let efer = u64::from(hi) << 32 | u64::from(lo);
105        Self::from_bits(efer)
106    }
107}
108
109#[cfg(feature = "asm")]
110impl StoreRegisterUnsafe for Efer {
111    #[allow(clippy::cast_possible_truncation)]
112    unsafe fn store_unsafe(self) {
113        let efer = self.into_bits();
114        let lo = efer as u32;
115        let hi = (efer >> 32) as u32;
116        unsafe {
117            core::arch::asm!(
118                "wrmsr",
119                in("ecx") Self::MSR_EFER,
120                in("eax") lo,
121                in("edx") hi,
122                options(nomem, preserves_flags)
123            );
124        }
125    }
126}