kernel_sync/spin_lock.rs
1//! # Spin Lock
2
3use core::cell::UnsafeCell;
4use core::sync::atomic::{AtomicBool, Ordering};
5
6/// A tiny spinlock for short critical sections.
7///
8/// This lock is suitable for **uniprocessor** or early boot stages where:
9/// - Preemption is either disabled or non-existent.
10/// - Critical sections are very short (no I/O, no blocking).
11///
12/// # Guarantees
13/// - Provides mutual exclusion for access to the protected value.
14/// - `Sync` is implemented when `T: Send`, allowing shared references across
15///   threads (the lock enforces interior mutability).
16///
17/// # Caveats
18/// - Does **not** disable interrupts.
19/// - Busy-waits with `spin_loop`, so keep critical sections small.
20pub struct SpinLock<T> {
21    /// Lock state (`false` = unlocked, `true` = locked).
22    locked: AtomicBool,
23    /// The protected value.
24    inner: UnsafeCell<T>,
25}
26
27// Safety: SpinLock provides mutual exclusion; it can be shared across threads as long as T is Send.
28unsafe impl<T: Send> Sync for SpinLock<T> {}
29
30impl<T> SpinLock<T> {
31    /// Create a new spinlock wrapping `inner`.
32    pub const fn new(inner: T) -> Self {
33        Self {
34            locked: AtomicBool::new(false),
35            inner: UnsafeCell::new(inner),
36        }
37    }
38
39    /// Execute `f` with exclusive access to the inner value.
40    ///
41    /// Spins until the lock is acquired, then releases it after `f` returns.
42    ///
43    /// # Panics
44    /// Never panics by itself; panics in `f` will unwind through the critical section.
45    pub fn with_lock<R>(&self, f: impl FnOnce(&mut T) -> R) -> R {
46        // Spin until we acquire the lock.
47        while self
48            .locked
49            .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
50            .is_err()
51        {
52            core::hint::spin_loop();
53        }
54
55        // SAFETY: We have exclusive access while the lock is held.
56        let res = {
57            let inner = unsafe { &mut *self.inner.get() };
58            f(inner)
59        };
60        self.locked.store(false, Ordering::Release);
61        res
62    }
63}