vasi_sync/
sync.rs

1//! Synchronization primitives that are modeled in loom
2//!
3//! This module provides some very low-level primitives, such as atomics,
4//! and futex. When testing under loom they model the corresponding operation
5//! in loom instead of executing it natively.
6
7// Use std sync primitives, or loom equivalents
8#[cfg(not(loom))]
9pub use core::{
10    sync::atomic,
11    sync::atomic::{AtomicBool, AtomicI8, AtomicI32, AtomicU32, AtomicUsize, Ordering},
12};
13#[cfg(loom)]
14use std::collections::HashMap;
15
16#[cfg(not(loom))]
17pub use core::cell::Cell;
18#[cfg(loom)]
19pub use loom::cell::Cell;
20
21// Map a *virtual* address to a list of Condvars. This doesn't support mapping into multiple
22// processes, or into different virtual addresses in the same process, etc.
23#[cfg(loom)]
24use loom::sync::{Condvar, Mutex};
25#[cfg(loom)]
26pub use loom::{
27    sync::Arc,
28    sync::atomic,
29    sync::atomic::{AtomicBool, AtomicI8, AtomicI32, AtomicU32, AtomicUsize, Ordering},
30};
31#[cfg(not(loom))]
32use vasi::VirtualAddressSpaceIndependent;
33#[cfg(loom)]
34loom::lazy_static! {
35    pub static ref FUTEXES: Mutex<HashMap<usize, Arc<Condvar>>> = Mutex::new(HashMap::new());
36}
37
38#[cfg(not(loom))]
39pub fn sched_yield() {
40    rustix::process::sched_yield();
41}
42#[cfg(loom)]
43pub fn sched_yield() {
44    loom::thread::yield_now();
45}
46
47// Rustix doesn't define its `FutexOperation` type under miri, so we can't use it in
48// our interfaces. Use our own type and translate in our futex "backends".
49enum FutexOperation {
50    Wait,
51    Wake,
52}
53
54#[cfg(not(loom))]
55unsafe fn futex(
56    futex_word: &AtomicU32,
57    futex_operation: FutexOperation,
58    val: u32,
59) -> rustix::io::Result<usize> {
60    #[cfg(not(miri))]
61    {
62        let futex_operation = match futex_operation {
63            FutexOperation::Wait => rustix::thread::FutexOperation::Wait,
64            FutexOperation::Wake => rustix::thread::FutexOperation::Wake,
65        };
66
67        unsafe {
68            rustix::thread::futex(
69                futex_word.as_ptr(),
70                futex_operation,
71                rustix::thread::FutexFlags::empty(),
72                val,
73                core::ptr::null(),
74                core::ptr::null_mut(),
75                0u32,
76            )
77        }
78    }
79    // Rustix doesn't include `futex` at all under miri. miri understands
80    // futex syscalls made through libc.
81    #[cfg(miri)]
82    {
83        let futex_operation = match futex_operation {
84            FutexOperation::Wait => libc::FUTEX_WAIT,
85            FutexOperation::Wake => libc::FUTEX_WAKE,
86        };
87        let rv = unsafe {
88            libc::syscall(
89                libc::SYS_futex,
90                futex_word.as_ptr(),
91                futex_operation,
92                val,
93                core::ptr::null() as *const libc::timespec,
94                core::ptr::null_mut() as *mut u32,
95                0u32,
96            )
97        };
98        if rv >= 0 {
99            Ok(rv.try_into().unwrap())
100        } else {
101            Err(rustix::io::Errno::from_raw_os_error(unsafe {
102                *libc::__errno_location()
103            }))
104        }
105    }
106}
107
108#[inline]
109pub fn futex_wait(futex_word: &AtomicU32, val: u32) -> rustix::io::Result<usize> {
110    // In "production" we use linux_syscall to avoid going through libc, and to
111    // avoid touching libc's `errno` in particular.
112    #[cfg(not(loom))]
113    {
114        unsafe { futex(futex_word, FutexOperation::Wait, val) }
115    }
116    #[cfg(loom)]
117    {
118        // From futex(2):
119        //   This load, the comparison with the expected value, and starting to
120        //   sleep are performed atomically and totally ordered with
121        //   respect to other futex operations on the same futex word.
122        //
123        // We hold a lock on our FUTEXES to represent this.
124        // TODO: If we want to run loom tests with multiple interacting locks,
125        // we should have per-futex mutexes here, and not hold a lock over the
126        // whole list the whole time.
127        let mut hashmap = FUTEXES.lock().unwrap();
128        let futex_word_val = futex_word.load(Ordering::Relaxed);
129        if futex_word_val != val {
130            return Err(rustix::io::Errno::AGAIN);
131        }
132        let condvar = hashmap
133            .entry(std::ptr::from_ref(futex_word) as usize)
134            .or_insert(Arc::new(Condvar::new()))
135            .clone();
136        // We could get a spurious wakeup here, but that's ok.
137        // Futexes are subject to spurious wakeups too.
138        condvar.wait(hashmap).unwrap();
139        Ok(0)
140    }
141}
142
143#[inline]
144pub fn futex_wake_one(futex_word: &AtomicU32) -> rustix::io::Result<()> {
145    #[cfg(not(loom))]
146    {
147        unsafe { futex(futex_word, FutexOperation::Wake, 1) }.map(|_| ())
148    }
149    // loom doesn't understand syscalls; emulate via loom primitives.
150    #[cfg(loom)]
151    {
152        let hashmap = FUTEXES.lock().unwrap();
153        let Some(condvar) = hashmap.get(&(std::ptr::from_ref(futex_word) as usize)) else {
154            return Ok(());
155        };
156        condvar.notify_one();
157        Ok(())
158    }
159}
160
161#[inline]
162pub fn futex_wake_all(futex_word: &AtomicU32) -> rustix::io::Result<()> {
163    #[cfg(not(loom))]
164    {
165        // u32::MAX seems like it'd make sense here, but the man page says to use INT_MAX.
166        // Better to go with that than risk some unexpected behavior...
167        unsafe {
168            futex(
169                futex_word,
170                FutexOperation::Wake,
171                u32::try_from(i32::MAX).unwrap(),
172            )
173        }
174        .map(|_| ())
175    }
176    // loom doesn't understand syscalls; emulate via loom primitives.
177    #[cfg(loom)]
178    {
179        let hashmap = FUTEXES.lock().unwrap();
180        let Some(condvar) = hashmap.get(&(std::ptr::from_ref(futex_word) as usize)) else {
181            return Ok(());
182        };
183        condvar.notify_all();
184        Ok(())
185    }
186}
187
188#[cfg(not(loom))]
189pub struct MutPtr<T: ?Sized>(*mut T);
190#[cfg(not(loom))]
191impl<T: ?Sized> MutPtr<T> {
192    /// # Safety
193    ///
194    /// See `loom::cell::MutPtr::deref`.
195    #[inline]
196    #[allow(clippy::mut_from_ref)]
197    pub unsafe fn deref(&self) -> &mut T {
198        unsafe { &mut *self.0 }
199    }
200
201    #[inline]
202    pub fn with<F, R>(&self, f: F) -> R
203    where
204        F: FnOnce(*mut T) -> R,
205    {
206        f(self.0)
207    }
208}
209// We have to wrap loom's MutPtr as well, since it's otherwise !Send.
210// https://github.com/tokio-rs/loom/issues/294
211#[cfg(loom)]
212pub struct MutPtr<T: ?Sized>(loom::cell::MutPtr<T>);
213#[cfg(loom)]
214impl<T: ?Sized> MutPtr<T> {
215    #[inline]
216    #[allow(clippy::mut_from_ref)]
217    pub unsafe fn deref(&self) -> &mut T {
218        unsafe { self.0.deref() }
219    }
220
221    #[inline]
222    pub fn with<F, R>(&self, f: F) -> R
223    where
224        F: FnOnce(*mut T) -> R,
225    {
226        self.0.with(f)
227    }
228}
229
230unsafe impl<T: ?Sized> Send for MutPtr<T> where T: Send {}
231
232#[cfg(not(loom))]
233pub struct ConstPtr<T: ?Sized>(*const T);
234#[cfg(not(loom))]
235impl<T: ?Sized> ConstPtr<T> {
236    /// # Safety
237    ///
238    /// See `loom::cell::ConstPtr::deref`.
239    pub unsafe fn deref(&self) -> &T {
240        unsafe { &*self.0 }
241    }
242
243    pub fn with<F, R>(&self, f: F) -> R
244    where
245        F: FnOnce(*const T) -> R,
246    {
247        f(self.0)
248    }
249}
250
251#[cfg(loom)]
252pub use loom::cell::ConstPtr;
253
254/// From <https://docs.rs/loom/latest/loom/#handling-loom-api-differences>
255#[cfg(not(loom))]
256#[derive(Debug, VirtualAddressSpaceIndependent)]
257#[repr(transparent)]
258pub struct UnsafeCell<T>(core::cell::UnsafeCell<T>);
259#[cfg(not(loom))]
260impl<T> UnsafeCell<T> {
261    #[inline]
262    pub const fn new(data: T) -> UnsafeCell<T> {
263        UnsafeCell(core::cell::UnsafeCell::new(data))
264    }
265
266    /// Note that this has a different signature from the method
267    /// of the same name in `core::cell::UnsafeCell`.
268    /// See <https://docs.rs/loom/latest/loom/#handling-loom-api-differences>
269    #[inline]
270    pub fn get_mut(&self) -> MutPtr<T> {
271        MutPtr(self.0.get())
272    }
273
274    #[inline]
275    pub fn get(&self) -> ConstPtr<T> {
276        ConstPtr(self.0.get())
277    }
278
279    /// This is analogous to `core::UnsafeCell::get` in that it returns
280    /// a raw pointer instead of an object.
281    ///
282    /// We can't provide this method under loom without giving up some of loom's
283    /// analysis.
284    pub fn untracked_get(&self) -> *mut T {
285        self.0.get()
286    }
287}
288#[cfg(loom)]
289#[derive(Debug)]
290pub struct UnsafeCell<T>(loom::cell::UnsafeCell<T>);
291#[cfg(loom)]
292impl<T> UnsafeCell<T> {
293    // TODO: make this `const` if and when loom's UnsafeCell supports a const new.
294    pub fn new(data: T) -> UnsafeCell<T> {
295        UnsafeCell(loom::cell::UnsafeCell::new(data))
296    }
297
298    pub fn get_mut(&self) -> MutPtr<T> {
299        MutPtr(self.0.get_mut())
300    }
301
302    pub fn get(&self) -> ConstPtr<T> {
303        self.0.get()
304    }
305}
306
307/// Lets us clear global state in between loom iterations, in loom tests.
308#[cfg(loom)]
309pub fn loom_reset() {
310    FUTEXES.lock().unwrap().clear();
311}