kernel_vmem/
addresses.rs

1//! # Virtual and Physical Memory Address Types
2//!
3//! Strongly typed wrappers for raw memory addresses and page bases used in
4//! paging and memory management code.
5//!
6//! ## Overview
7//!
8//! This module defines a minimal set of types that prevent mixing virtual and
9//! physical addresses at compile time while remaining zero-cost wrappers around
10//! `u64` values.
11//!
12//! The core idea is to build all higher-level memory abstractions from a few
13//! principal types:
14//!
15//! | Concept | Generic | Description |
16//! |----------|----------|-------------|
17//! | [`MemoryAddress`] | – | A raw 64-bit address, either physical or virtual. |
18//! | [`MemoryPage<S>`] | [`S: PageSize`](PageSize) | A page-aligned base address of a page of size `S`. |
19//! | [`MemoryAddressOffset<S>`] | [`S: PageSize`](PageSize) | An offset within a page of size `S`. |
20//!
21//! These are then wrapped to distinguish between virtual and physical spaces:
22//!
23//! | Wrapper | Meaning |
24//! |----------|----------|
25//! | [`VirtualAddress`] / [`VirtualPage<S>`] | Refer to virtual (page-table translated) memory. |
26//! | [`PhysicalAddress`] / [`PhysicalPage<S>`] | Refer to physical memory or MMIO regions. |
27//!
28//! ## Page Sizes
29//!
30//! Three standard x86-64 page sizes are supported out of the box via marker
31//! types that implement [`PageSize`]:
32//!
33//! - [`Size4K`] — 4 KiB pages (base granularity)
34//! - [`Size2M`] — 2 MiB huge pages
35//! - [`Size1G`] — 1 GiB giant pages
36//!
37//! The [`PageSize`] trait defines constants [`SIZE`](PageSize::SIZE) and
38//! [`SHIFT`](PageSize::SHIFT) used throughout the helpers.
39//!
40//! ## Typical Usage
41//!
42//! ```rust
43//! # use kernel_vmem::addresses::*;
44//! // Create a virtual address
45//! let va = VirtualAddress::new(0xFFFF_FFFF_8000_1234);
46//!
47//! // Split it into a page base and an in-page offset
48//! let (page, off) = va.split::<Size4K>();
49//! assert_eq!(page.base().as_u64() & (Size4K::SIZE - 1), 0);
50//!
51//! // Join them back to the same address
52//! assert_eq!(page.join(off).as_u64(), va.as_u64());
53//!
54//! // Do the same for physical addresses
55//! let pa = PhysicalAddress::new(0x0000_0010_2000_0042);
56//! let (pp, po) = pa.split::<Size4K>();
57//! assert_eq!(pp.join(po).as_u64(), pa.as_u64());
58//! ```
59//!
60//! ## Design Notes
61//!
62//! - The types are `#[repr(transparent)]` and implement `Copy`, `Eq`, `Ord`, and
63//!   `Hash`, making them suitable as map keys or for FFI use.
64//! - All alignment and offset calculations are `const fn` and zero-cost in
65//!   release builds.
66//! - The phantom marker `S` enforces the page size at the type level instead of
67//!   using constants, ensuring all conversions are explicit.
68//!
69//! This forms the foundation for paging, virtual memory mapping, and kernel
70//! address-space management code.
71
72use core::fmt;
73use core::hash::Hash;
74use core::marker::PhantomData;
75use core::ops::{Add, AddAssign};
76use core::ptr::NonNull;
77
78/// Sealed trait pattern to restrict `PageSize` impls to our markers.
79mod sealed {
80    pub trait Sealed {}
81}
82
83/// Marker trait for supported page sizes.
84pub trait PageSize:
85    sealed::Sealed + Clone + Copy + Eq + PartialEq + Ord + PartialOrd + Hash
86{
87    /// Page size in bytes (power of two).
88    const SIZE: u64;
89    /// log2(SIZE), i.e., number of low bits used for the offset.
90    const SHIFT: u32;
91}
92
93/// 4 KiB page (4096 bytes).
94#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
95pub struct Size4K;
96impl sealed::Sealed for Size4K {}
97impl PageSize for Size4K {
98    const SIZE: u64 = 4096;
99    const SHIFT: u32 = 12;
100}
101
102/// 2 MiB page (`2_097_152` bytes).
103#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
104pub struct Size2M;
105impl sealed::Sealed for Size2M {}
106impl PageSize for Size2M {
107    const SIZE: u64 = 2 * 1024 * 1024;
108    const SHIFT: u32 = 21;
109}
110
111/// 1 GiB page (`1_073_741_824` bytes).
112#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
113pub struct Size1G;
114impl sealed::Sealed for Size1G {}
115impl PageSize for Size1G {
116    const SIZE: u64 = 1024 * 1024 * 1024;
117    const SHIFT: u32 = 30;
118}
119
120/// Principal raw memory address ([virtual](VirtualAddress) or [physical](PhysicalAddress)).
121#[repr(transparent)]
122#[derive(Copy, Clone, Default, Eq, PartialEq, Ord, PartialOrd, Hash)]
123pub struct MemoryAddress(u64);
124
125impl MemoryAddress {
126    #[inline]
127    #[must_use]
128    pub fn from_nonnull<T>(ptr: NonNull<T>) -> Self {
129        Self::from_ptr(ptr.as_ptr())
130    }
131
132    #[inline]
133    #[must_use]
134    pub fn from_ptr<T>(ptr: *const T) -> Self {
135        Self::new(ptr as u64)
136    }
137
138    #[inline]
139    #[must_use]
140    pub const fn new(value: u64) -> Self {
141        Self(value)
142    }
143
144    #[inline]
145    #[must_use]
146    pub const fn as_u64(self) -> u64 {
147        self.0
148    }
149
150    /// The page for size `S` that contains this address (lower bits zeroed).
151    #[inline]
152    #[must_use]
153    pub const fn page<S: PageSize>(self) -> MemoryPage<S> {
154        let value = self.align_down::<S>().0;
155        MemoryPage {
156            value,
157            _phantom: PhantomData,
158        }
159    }
160
161    /// The offset within the page of size `S` that contains this address.
162    #[inline]
163    #[must_use]
164    pub const fn offset<S: PageSize>(self) -> MemoryAddressOffset<S> {
165        let value = self.0 & (S::SIZE - 1);
166        MemoryAddressOffset {
167            value,
168            _phantom: PhantomData,
169        }
170    }
171
172    /// Split into (`MemoryPage<S>`, `MemoryAddressOffset<S>`).
173    #[inline]
174    #[must_use]
175    pub const fn split<S: PageSize>(self) -> (MemoryPage<S>, MemoryAddressOffset<S>) {
176        (self.page::<S>(), self.offset::<S>())
177    }
178
179    /// Align down to page boundary `S`.
180    #[inline]
181    #[must_use]
182    pub const fn align_down<S: PageSize>(self) -> Self {
183        Self(self.0 & !(S::SIZE - 1))
184    }
185}
186
187impl fmt::Debug for MemoryAddress {
188    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
189        // 0xHHHH_HHHH_HHHH_HHHH style
190        write!(f, "MemoryAddress(0x{:016X})", self.0)
191    }
192}
193
194impl fmt::Display for MemoryAddress {
195    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
196        write!(f, "0x{:016X}", self.as_u64())
197    }
198}
199
200impl Add<u64> for MemoryAddress {
201    type Output = Self;
202    #[inline]
203    fn add(self, rhs: u64) -> Self::Output {
204        Self(self.0 + rhs)
205    }
206}
207
208impl AddAssign<u64> for MemoryAddress {
209    #[inline]
210    fn add_assign(&mut self, rhs: u64) {
211        self.0 += rhs;
212    }
213}
214
215/// A page base address (lower `S::SHIFT` bits are zero).
216#[repr(transparent)]
217#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
218pub struct MemoryPage<S: PageSize> {
219    value: u64,
220    _phantom: PhantomData<S>,
221}
222
223impl<S: PageSize> MemoryPage<S> {
224    /// Create from a raw value, aligning down to the page boundary.
225    #[inline]
226    #[must_use]
227    pub const fn from_addr(addr: MemoryAddress) -> Self {
228        let value = addr.as_u64() & !(S::SIZE - 1);
229        Self {
230            value,
231            _phantom: PhantomData,
232        }
233    }
234
235    /// Create from a raw value that must already be aligned.
236    /// Panics in debug if unaligned (no runtime cost in release).
237    #[inline]
238    #[must_use]
239    pub fn new_aligned(addr: MemoryAddress) -> Self {
240        debug_assert_eq!(addr.as_u64() & (S::SIZE - 1), 0, "unaligned page address");
241        let value = addr.as_u64();
242        Self {
243            value,
244            _phantom: PhantomData,
245        }
246    }
247
248    /// Return the base as `MemoryAddress`.
249    #[inline]
250    #[must_use]
251    pub const fn base(self) -> MemoryAddress {
252        MemoryAddress::new(self.value)
253    }
254
255    /// Combine with an offset to form a full address.
256    #[inline]
257    #[must_use]
258    pub const fn join(self, off: MemoryAddressOffset<S>) -> MemoryAddress {
259        MemoryAddress::new(self.value + off.as_u64())
260    }
261
262    /// Checked add of an offset, returning `None` on overflow.
263    #[inline]
264    #[must_use]
265    pub const fn checked_join(self, off: MemoryAddressOffset<S>) -> Option<MemoryAddress> {
266        match self.value.checked_add(off.as_u64()) {
267            Some(v) => Some(MemoryAddress::new(v)),
268            None => None,
269        }
270    }
271}
272
273impl<S: PageSize> fmt::Debug for MemoryPage<S> {
274    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
275        write!(
276            f,
277            "MemoryPage<{}>(0x{:016X})",
278            core::any::type_name::<S>(),
279            self.value
280        )
281    }
282}
283
284/// The offset within a page of size `S` (`0..S::SIZE-1`).
285#[repr(transparent)]
286#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
287pub struct MemoryAddressOffset<S: PageSize> {
288    value: u64,
289    _phantom: PhantomData<S>,
290}
291
292impl<S: PageSize> MemoryAddressOffset<S> {
293    /// Create from a raw value, asserting it is < `S::SIZE` in debug.
294    #[inline]
295    #[must_use]
296    pub fn new(value: u64) -> Self {
297        debug_assert!(value < S::SIZE, "offset must be < page size");
298        let value = value & (S::SIZE - 1);
299        Self {
300            value,
301            _phantom: PhantomData,
302        }
303    }
304
305    /// Construct from a full address's offset bits.
306    #[inline]
307    #[must_use]
308    pub const fn from_addr(addr: MemoryAddress) -> Self {
309        let value = addr.as_u64() & (S::SIZE - 1);
310        Self {
311            value,
312            _phantom: PhantomData,
313        }
314    }
315
316    #[inline]
317    #[must_use]
318    pub const fn as_u64(self) -> u64 {
319        self.value
320    }
321}
322
323impl<S: PageSize> fmt::Debug for MemoryAddressOffset<S> {
324    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
325        write!(
326            f,
327            "Offset<{}>({:#X})",
328            core::any::type_name::<S>(),
329            self.value
330        )
331    }
332}
333
334impl<S: PageSize> Add<MemoryAddressOffset<S>> for MemoryPage<S> {
335    type Output = MemoryAddress;
336    #[inline]
337    fn add(self, rhs: MemoryAddressOffset<S>) -> Self::Output {
338        self.join(rhs)
339    }
340}
341
342impl<S: PageSize> From<MemoryAddress> for MemoryPage<S> {
343    #[inline]
344    fn from(addr: MemoryAddress) -> Self {
345        Self::from_addr(addr)
346    }
347}
348
349impl<S: PageSize> From<MemoryAddress> for MemoryAddressOffset<S> {
350    #[inline]
351    fn from(addr: MemoryAddress) -> Self {
352        Self::from_addr(addr)
353    }
354}
355
356/// Virtual memory address.
357///
358/// A thin wrapper around [`MemoryAddress`] that denotes **virtual** addresses.
359/// It does not validate canonicality at runtime; it only carries the *kind* of
360/// address at the type level so you don't accidentally mix virtual and physical
361/// values.
362///
363/// ### Semantics
364/// - Use [`VirtualAddress::page`] / [`VirtualAddress::offset`] / [`VirtualAddress::split`]
365///   to derive the page base and the in-page offset for a concrete [`PageSize`].
366/// - Combine a [`VirtualPage<S>`] and a [`MemoryAddressOffset<S>`] with
367///   [`VirtualPage::join`] to reconstruct a `VirtualAddress`.
368///
369/// ### Invariants
370/// - No invariant beyond “this is intended to be a virtual address”.
371/// - Alignment is only guaranteed for values returned from `page::<S>()`.
372///
373/// ### Examples
374/// ```rust
375/// # use kernel_vmem::addresses::*;
376/// let va = VirtualAddress::new(0xFFFF_FFFF_8000_1234);
377/// let (vp, off) = va.split::<Size4K>();
378/// assert_eq!(vp.base().as_u64() & (Size4K::SIZE - 1), 0);
379/// assert_eq!(vp.join(off).as_u64(), va.as_u64());
380/// ```
381#[repr(transparent)]
382#[derive(Copy, Clone, Default, Eq, PartialEq, Ord, PartialOrd, Hash)]
383pub struct VirtualAddress(MemoryAddress);
384
385impl VirtualAddress {
386    #[inline]
387    #[must_use]
388    pub fn from_nonnull<T>(ptr: NonNull<T>) -> Self {
389        Self::from_ptr(ptr.as_ptr())
390    }
391
392    #[inline]
393    #[must_use]
394    pub fn from_ptr<T>(ptr: *const T) -> Self {
395        Self(MemoryAddress::from_ptr(ptr))
396    }
397
398    #[inline]
399    #[must_use]
400    pub const fn new(v: u64) -> Self {
401        Self(MemoryAddress::new(v))
402    }
403
404    #[inline]
405    #[must_use]
406    pub const fn as_u64(self) -> u64 {
407        self.0.as_u64()
408    }
409
410    #[inline]
411    #[must_use]
412    pub const fn page<S: PageSize>(self) -> VirtualPage<S> {
413        VirtualPage::<S>(self.0.page::<S>())
414    }
415
416    #[inline]
417    #[must_use]
418    pub const fn offset<S: PageSize>(self) -> MemoryAddressOffset<S> {
419        self.0.offset::<S>()
420    }
421
422    #[inline]
423    #[must_use]
424    pub const fn split<S: PageSize>(self) -> (VirtualPage<S>, MemoryAddressOffset<S>) {
425        (self.page::<S>(), self.offset::<S>())
426    }
427}
428
429impl fmt::Debug for VirtualAddress {
430    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
431        write!(f, "VirtualAddress(0x{:016X})", self.as_u64())
432    }
433}
434
435impl fmt::Display for VirtualAddress {
436    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
437        write!(f, "0x{:016X}", self.as_u64())
438    }
439}
440
441/// Physical memory address.
442///
443/// A thin wrapper around [`MemoryAddress`] that denotes **physical** addresses
444/// (host RAM / MMIO). Like [`VirtualAddress`], this type carries intent and
445/// prevents accidental VA↔PA mix-ups.
446///
447/// ### Semantics
448/// - Use [`PhysicalAddress::page`] / [`PhysicalAddress::offset`] / [`PhysicalAddress::split`]
449///   to derive the page base and in-page offset for a concrete [`PageSize`].
450/// - Combine a [`PhysicalPage<S>`] with a [`MemoryAddressOffset<S>`] using
451///   [`PhysicalPage::join`] to reconstruct the original `PhysicalAddress`.
452///
453/// ### Notes
454/// - Page-table entries often store a **page-aligned** physical base (low
455///   `S::SHIFT` bits cleared) plus per-entry flag bits; use `split::<S>()` to
456///   reason about base vs. offset explicitly.
457///
458/// ### Examples
459/// ```rust
460/// # use kernel_vmem::addresses::*;
461/// let pa = PhysicalAddress::new(0x0000_0010_2000_0042);
462/// let (pp, off) = pa.split::<Size4K>();
463/// assert_eq!(pp.base().as_u64() & (Size4K::SIZE - 1), 0);
464/// assert_eq!(pp.join(off).as_u64(), pa.as_u64());
465/// ```
466#[repr(transparent)]
467#[derive(Copy, Clone, Default, Eq, PartialEq, Ord, PartialOrd, Hash)]
468pub struct PhysicalAddress(MemoryAddress);
469
470impl PhysicalAddress {
471    #[inline]
472    #[must_use]
473    pub fn from_nonnull<T>(ptr: NonNull<T>) -> Self {
474        Self::from_ptr(ptr.as_ptr())
475    }
476
477    #[inline]
478    #[must_use]
479    pub fn from_ptr<T>(ptr: *const T) -> Self {
480        Self(MemoryAddress::from_ptr(ptr))
481    }
482
483    #[inline]
484    #[must_use]
485    pub const fn new(v: u64) -> Self {
486        Self(MemoryAddress::new(v))
487    }
488
489    #[inline]
490    #[must_use]
491    pub const fn as_u64(self) -> u64 {
492        self.0.as_u64()
493    }
494
495    #[inline]
496    #[must_use]
497    pub const fn page<S: PageSize>(self) -> PhysicalPage<S> {
498        PhysicalPage::<S>(self.0.page::<S>())
499    }
500
501    #[inline]
502    #[must_use]
503    pub const fn offset<S: PageSize>(self) -> MemoryAddressOffset<S> {
504        self.0.offset::<S>()
505    }
506
507    #[inline]
508    #[must_use]
509    pub const fn split<S: PageSize>(self) -> (PhysicalPage<S>, MemoryAddressOffset<S>) {
510        (self.page::<S>(), self.offset::<S>())
511    }
512}
513
514impl fmt::Debug for PhysicalAddress {
515    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
516        write!(f, "PhysicalAddress(0x{:016X})", self.as_u64())
517    }
518}
519
520impl fmt::Display for PhysicalAddress {
521    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
522        write!(f, "0x{:016X}", self.as_u64())
523    }
524}
525
526/// Virtual memory page base for size `S`.
527///
528/// A `VirtualPage<S>` represents the **page-aligned base** of a virtual page of
529/// size `S` (`S::SIZE` bytes). It is a thin wrapper over [`MemoryPage<S>`] with
530/// virtual-address intent.
531///
532/// ### Semantics
533/// - `base()` returns the page base as a [`VirtualAddress`].
534/// - `join(off)` combines this base with a [`MemoryAddressOffset<S>`] to form a
535///   full [`VirtualAddress`].
536///
537/// ### Invariants
538/// - The low `S::SHIFT` bits of the base are always zero (page aligned).
539///
540/// ### Examples
541/// ```rust
542/// # use kernel_vmem::addresses::*;
543/// let va = VirtualAddress::new(0xFFFF_FFFF_8000_1234);
544/// let vp = va.page::<Size4K>();
545/// assert_eq!(vp.base().as_u64() & (Size4K::SIZE - 1), 0);
546/// let va2 = vp.join(va.offset::<Size4K>());
547/// assert_eq!(va2.as_u64(), va.as_u64());
548/// ```
549#[repr(transparent)]
550#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
551pub struct VirtualPage<S: PageSize>(MemoryPage<S>);
552
553impl<S: PageSize> VirtualPage<S> {
554    #[inline]
555    #[must_use]
556    pub const fn from_page(p: MemoryPage<S>) -> Self {
557        Self(p)
558    }
559
560    #[inline]
561    #[must_use]
562    pub const fn base(self) -> VirtualAddress {
563        VirtualAddress(self.0.base())
564    }
565
566    #[inline]
567    #[must_use]
568    pub const fn join(self, off: MemoryAddressOffset<S>) -> VirtualAddress {
569        VirtualAddress(self.0.join(off))
570    }
571}
572
573impl<S: PageSize> fmt::Debug for VirtualPage<S> {
574    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
575        write!(
576            f,
577            "VirtualPage<{}>({:#018X})",
578            core::any::type_name::<S>(),
579            self.0.base().as_u64()
580        )
581    }
582}
583
584/// Physical memory page base for size `S`.
585///
586/// A `PhysicalPage<S>` represents the **page-aligned base** of a physical page
587/// of size `S` (`S::SIZE` bytes). It is a thin wrapper over [`MemoryPage<S>`]
588/// with physical-address intent.
589///
590/// ### Semantics
591/// - `base()` returns the page base as a [`PhysicalAddress`].
592/// - `join(off)` combines this base with a [`MemoryAddressOffset<S>`] to form a
593///   full [`PhysicalAddress`].
594///
595/// ### Invariants
596/// - The low `S::SHIFT` bits of the base are always zero (page aligned).
597///
598/// ### Examples
599/// ```rust
600/// # use kernel_vmem::addresses::*;
601/// let pa = PhysicalAddress::new(0x0000_0008_1234_5678);
602/// let pp = pa.page::<Size2M>();
603/// assert_eq!(pp.base().as_u64() & (Size2M::SIZE - 1), 0);
604/// let pa2 = pp.join(pa.offset::<Size2M>());
605/// assert_eq!(pa2.as_u64(), pa.as_u64());
606/// ```
607#[repr(transparent)]
608#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
609pub struct PhysicalPage<S: PageSize>(MemoryPage<S>);
610
611impl<S: PageSize> PhysicalPage<S> {
612    #[inline]
613    #[must_use]
614    pub const fn from_addr(p: PhysicalAddress) -> Self {
615        Self::from_page(MemoryPage::from_addr(p.0))
616    }
617
618    #[inline]
619    #[must_use]
620    pub const fn from_page(p: MemoryPage<S>) -> Self {
621        Self(p)
622    }
623
624    #[inline]
625    #[must_use]
626    pub const fn base(self) -> PhysicalAddress {
627        PhysicalAddress(self.0.base())
628    }
629
630    #[inline]
631    #[must_use]
632    pub const fn join(self, off: MemoryAddressOffset<S>) -> PhysicalAddress {
633        PhysicalAddress(self.0.join(off))
634    }
635}
636
637impl<S: PageSize> fmt::Debug for PhysicalPage<S> {
638    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
639        write!(
640            f,
641            "PhysicalPage<{}>({:#018X})",
642            core::any::type_name::<S>(),
643            self.0.base().as_u64()
644        )
645    }
646}
647
648impl From<u64> for MemoryAddress {
649    #[inline]
650    fn from(v: u64) -> Self {
651        Self::new(v)
652    }
653}
654
655impl From<MemoryAddress> for u64 {
656    #[inline]
657    fn from(a: MemoryAddress) -> Self {
658        a.as_u64()
659    }
660}
661
662impl<S> From<MemoryPage<S>> for MemoryAddress
663where
664    S: PageSize,
665{
666    fn from(value: MemoryPage<S>) -> Self {
667        Self(value.value)
668    }
669}
670
671impl From<u64> for VirtualAddress {
672    #[inline]
673    fn from(v: u64) -> Self {
674        Self::new(v)
675    }
676}
677
678impl<S> From<VirtualPage<S>> for VirtualAddress
679where
680    S: PageSize,
681{
682    fn from(value: VirtualPage<S>) -> Self {
683        value.base()
684    }
685}
686
687impl From<u64> for PhysicalAddress {
688    #[inline]
689    fn from(v: u64) -> Self {
690        Self::new(v)
691    }
692}
693
694impl<S> From<PhysicalPage<S>> for PhysicalAddress
695where
696    S: PageSize,
697{
698    fn from(value: PhysicalPage<S>) -> Self {
699        value.base()
700    }
701}
702
703impl<S> From<MemoryPage<S>> for VirtualPage<S>
704where
705    S: PageSize,
706{
707    #[inline]
708    fn from(p: MemoryPage<S>) -> Self {
709        Self(p)
710    }
711}
712
713impl<S> From<MemoryPage<S>> for PhysicalPage<S>
714where
715    S: PageSize,
716{
717    #[inline]
718    fn from(p: MemoryPage<S>) -> Self {
719        Self(p)
720    }
721}
722
723#[cfg(test)]
724mod tests {
725    use super::*;
726
727    #[test]
728    fn split_and_join_4k() {
729        let a = MemoryAddress::new(0x1234_5678_9ABC_DEF0);
730        let (p, o) = a.split::<Size4K>();
731        assert_eq!(p.base().as_u64() & 0xFFF, 0);
732        assert_eq!(o.as_u64(), a.as_u64() & 0xFFF);
733        assert_eq!(p.join(o).as_u64(), a.as_u64());
734    }
735
736    #[test]
737    fn split_and_join_2m() {
738        let a = MemoryAddress::new(0x0000_0008_1234_5678);
739        let (p, o) = a.split::<Size2M>();
740        assert_eq!(p.base().as_u64() & (Size2M::SIZE - 1), 0);
741        assert_eq!(o.as_u64(), a.as_u64() & (Size2M::SIZE - 1));
742        assert_eq!(p.join(o).as_u64(), a.as_u64());
743    }
744
745    #[test]
746    fn split_and_join_1g() {
747        let a = MemoryAddress::new(0x0000_0004_1234_5678);
748        let (p, o) = a.split::<Size1G>();
749        assert_eq!(p.base().as_u64() & (Size1G::SIZE - 1), 0);
750        assert_eq!(o.as_u64(), a.as_u64() & (Size1G::SIZE - 1));
751        assert_eq!(p.join(o).as_u64(), a.as_u64());
752    }
753
754    #[test]
755    fn virtual_vs_physical_wrappers() {
756        let va = VirtualAddress::new(0xFFFF_FFFF_8000_1234);
757        let (vp, vo) = va.split::<Size4K>();
758        assert_eq!(vp.base().as_u64() & 0xFFF, 0);
759        assert_eq!(vo.as_u64(), 0x1234 & 0xFFF);
760        assert_eq!(vp.join(vo).as_u64(), va.as_u64());
761
762        let pa = PhysicalAddress::new(0x0000_0010_2000_0042);
763        let (pp, po) = pa.split::<Size4K>();
764        assert_eq!(pp.base().as_u64() & 0xFFF, 0);
765        assert_eq!(po.as_u64(), 0x42);
766        assert_eq!(pp.join(po).as_u64(), pa.as_u64());
767    }
768
769    #[test]
770    fn alignment_helpers() {
771        let a = MemoryAddress::new(0x12345);
772        assert_eq!(a.align_down::<Size4K>().as_u64(), 0x12000);
773        assert_eq!(a.page::<Size4K>().base().as_u64(), 0x12000);
774        assert_eq!(a.offset::<Size4K>().as_u64(), 0x345);
775    }
776}