shadow_shim/
mmap_box.rs

1use core::ptr::NonNull;
2
3use rustix::mm::{MapFlags, ProtFlags};
4
5/// Analogous to `alloc::boxed::Box`, but directly uses `mmap` instead of a
6/// global allocator.
7///
8/// Useful since we don't currently have a global allocator in the shim, and
9/// probably don't want to install one that makes direct `mmap` calls for every
10/// allocation, since that would be a performance footgun.
11///
12/// We should be able to replace this with `alloc::boxed::Box<T>` if and when we
13/// implement a global allocator suitable for the shim.  (Or with
14/// `alloc::boxed::Box<T, MmapAllocator>` when non-global allocators are
15/// stabilized)
16pub struct MmapBox<T> {
17    ptr: Option<NonNull<T>>,
18}
19unsafe impl<T> Send for MmapBox<T> where T: Send {}
20unsafe impl<T> Sync for MmapBox<T> where T: Sync {}
21
22impl<T> MmapBox<T> {
23    pub fn new(x: T) -> Self {
24        #[cfg(not(miri))]
25        {
26            let ptr: *mut core::ffi::c_void = unsafe {
27                rustix::mm::mmap_anonymous(
28                    core::ptr::null_mut(),
29                    core::mem::size_of::<T>(),
30                    ProtFlags::READ | ProtFlags::WRITE,
31                    MapFlags::PRIVATE,
32                )
33            }
34            .unwrap();
35            assert!(!ptr.is_null());
36
37            // Memory returned by mmap is page-aligned, which is generally at least
38            // 4096.  This should be enough for most types.
39            assert_eq!(ptr.align_offset(core::mem::align_of::<T>()), 0);
40
41            let ptr: *mut T = ptr.cast();
42            unsafe { ptr.write(x) };
43            Self {
44                ptr: Some(NonNull::new(ptr).unwrap()),
45            }
46        }
47        #[cfg(miri)]
48        {
49            Self {
50                ptr: Some(NonNull::new(Box::into_raw(Box::new(x))).unwrap()),
51            }
52        }
53    }
54
55    #[allow(unused)]
56    pub fn leak(mut this: MmapBox<T>) -> *mut T {
57        this.ptr.take().unwrap().as_ptr()
58    }
59}
60
61impl<T> core::ops::Deref for MmapBox<T> {
62    type Target = T;
63
64    fn deref(&self) -> &Self::Target {
65        unsafe { self.ptr.as_ref().unwrap().as_ref() }
66    }
67}
68
69impl<T> core::ops::DerefMut for MmapBox<T> {
70    fn deref_mut(&mut self) -> &mut Self::Target {
71        unsafe { self.ptr.as_mut().unwrap().as_mut() }
72    }
73}
74
75impl<T> Drop for MmapBox<T> {
76    fn drop(&mut self) {
77        let Some(ptr) = self.ptr else {
78            return;
79        };
80        let ptr = ptr.as_ptr();
81
82        #[cfg(not(miri))]
83        {
84            unsafe { ptr.drop_in_place() }
85            unsafe {
86                rustix::mm::munmap(ptr.cast::<core::ffi::c_void>(), core::mem::size_of::<T>())
87                    .unwrap()
88            }
89        }
90        #[cfg(miri)]
91        {
92            drop(unsafe { Box::from_raw(ptr) })
93        }
94    }
95}
96
97#[cfg(test)]
98mod test {
99    use std::sync::Arc;
100
101    use vasi_sync::lazy_lock::LazyLock;
102
103    use super::*;
104
105    #[test]
106    fn test_basic() {
107        let x = MmapBox::new(42);
108        assert_eq!(*x, 42);
109    }
110
111    #[test]
112    fn test_large_alloc() {
113        // This should span multiple pages.
114        let val = [0; 100_000];
115
116        let x = MmapBox::new(val);
117        assert_eq!(*x, val);
118    }
119
120    #[test]
121    fn test_mutate() {
122        let mut x = MmapBox::new(42);
123        assert_eq!(*x, 42);
124        *x += 1;
125        assert_eq!(*x, 43);
126    }
127
128    #[test]
129    fn test_drop() {
130        let arc = Arc::new(());
131        assert_eq!(Arc::strong_count(&arc), 1);
132        {
133            let _clone = MmapBox::new(arc.clone());
134            assert_eq!(Arc::strong_count(&arc), 2);
135        }
136        assert_eq!(Arc::strong_count(&arc), 1);
137    }
138
139    #[test]
140    fn test_leak() {
141        static MY_LEAKED: LazyLock<&'static u32> =
142            LazyLock::const_new(|| unsafe { &*MmapBox::leak(MmapBox::new(42)) });
143        assert_eq!(**MY_LEAKED.force(), 42);
144    }
145}