1use crate::raw_syscall::*;
14use core::fmt::Write;
15
16use formatting_nostd::FormatBuffer;
17use linux_api::errno::Errno;
18
19use vasi::VirtualAddressSpaceIndependent;
20
21use crate::util::PathBuf;
22
23#[cfg(debug_assertions)]
25type CanaryBuf = [u8; 4];
26
27#[cfg(debug_assertions)]
28const CANARY: CanaryBuf = [0xDE, 0xAD, 0xBE, 0xEF];
29
30#[cfg(not(debug_assertions))]
31type CanaryBuf = [u8; 0];
32
33#[cfg(not(debug_assertions))]
34const CANARY: CanaryBuf = [];
35
36trait Canary {
37 fn canary_init(&mut self);
38
39 #[cfg_attr(not(debug_assertions), allow(dead_code))]
40 fn canary_check(&self) -> bool;
41
42 #[cfg(debug_assertions)]
43 fn canary_assert(&self) {
44 assert!(self.canary_check());
45 }
46
47 #[cfg(not(debug_assertions))]
48 fn canary_assert(&self) {}
49}
50
51#[derive(Copy, Clone)]
52pub(crate) enum AllocError {
53 Clock,
54 Open,
55 FTruncate,
56 MMap,
57 MUnmap,
58 Unlink,
59 WrongAllocator,
60 GetPID,
62}
63
64const fn alloc_error_to_str(e: AllocError) -> Option<&'static str> {
65 match e {
66 AllocError::Clock => Some("Error calling clock_gettime()"),
67 AllocError::Open => Some("Error calling open()"),
68 AllocError::FTruncate => Some("Error calling ftruncate()"),
69 AllocError::MMap => Some("Error calling mmap()"),
70 AllocError::MUnmap => Some("Error calling munmap()"),
71 AllocError::Unlink => Some("Error calling unlink()"),
72 AllocError::WrongAllocator => Some("Block was passed to incorrect allocator"),
73 AllocError::GetPID => Some("Error calling getpid()"),
75 }
76}
77
78impl core::fmt::Debug for AllocError {
79 fn fmt(&self, formatter: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> {
80 match alloc_error_to_str(*self) {
81 Some(s) => formatter.write_str(s),
82 None => write!(formatter, "unknown allocator error"),
83 }
84 }
85}
86
87impl core::fmt::Display for AllocError {
88 fn fmt(&self, formatter: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> {
89 match alloc_error_to_str(*self) {
90 Some(s) => formatter.write_str(s),
91 None => write!(formatter, "unknown allocator error"),
92 }
93 }
94}
95
96pub(crate) fn log_err(error: AllocError, errno: Option<Errno>) {
97 let mut buf = FormatBuffer::<1024>::new();
98
99 if let Some(e) = errno {
100 write!(&mut buf, "{error} ({e})").unwrap();
101 } else {
102 write!(&mut buf, "{error}").unwrap();
103 }
104
105 log::error!("{}", buf.as_str());
106}
107
108pub(crate) fn log_err_and_exit(error: AllocError, errno: Option<Errno>) -> ! {
109 log_err(error, errno);
110 let _ = tgkill(
111 getpid().unwrap(),
112 gettid().unwrap(),
113 linux_api::signal::Signal::SIGABRT.into(),
114 );
115 unreachable!()
116}
117
118fn format_shmem_name(buf: &mut PathBuf) {
119 let pid = match getpid() {
120 Ok(pid) => pid,
121 Err(err) => log_err_and_exit(AllocError::GetPID, Some(err)),
122 };
123
124 let ts = match clock_monotonic_gettime() {
125 Ok(ts) => ts,
126 Err(errno) => log_err_and_exit(AllocError::Clock, Some(errno)),
127 };
128
129 let mut fb = FormatBuffer::<{ crate::util::PATH_MAX_NBYTES }>::new();
130 write!(
131 &mut fb,
132 "/dev/shm/shadow_shmemfile_{}.{}-{}",
133 ts.tv_sec, ts.tv_nsec, pid
134 )
135 .unwrap();
136
137 *buf = crate::util::buf_from_utf8_str(fb.as_str()).unwrap();
138}
139
140const CHUNK_NBYTES_DEFAULT: usize = 8 * 1024 * 1024; fn create_map_shared_memory<'a>(path_buf: &PathBuf, nbytes: usize) -> (&'a mut [u8], i32) {
143 use linux_api::fcntl::OFlag;
144 use linux_api::mman::{MapFlags, ProtFlags};
145
146 const MODE: u32 = S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP;
147 let open_flags = OFlag::O_RDWR | OFlag::O_CREAT | OFlag::O_EXCL | OFlag::O_CLOEXEC;
148 let prot = ProtFlags::PROT_READ | ProtFlags::PROT_WRITE;
149 let map_flags = MapFlags::MAP_SHARED;
150
151 let fd = match unsafe { open(path_buf, open_flags, MODE) } {
152 Ok(fd) => fd,
153 Err(err) => log_err_and_exit(AllocError::Open, Some(err)),
154 };
155
156 if let Err(errno) = ftruncate(fd, nbytes.try_into().unwrap()) {
158 log_err_and_exit(AllocError::FTruncate, Some(errno))
159 };
160
161 let retval = match unsafe {
162 mmap(
163 core::ptr::null_mut(),
164 nbytes.try_into().unwrap(),
165 prot,
166 map_flags,
167 fd,
168 0,
169 )
170 } {
171 Ok(retval) => retval,
172 Err(errno) => log_err_and_exit(AllocError::MMap, Some(errno)),
173 };
174
175 (retval, fd)
176}
177
178fn view_shared_memory<'a>(path_buf: &PathBuf, nbytes: usize) -> (&'a mut [u8], i32) {
180 use linux_api::fcntl::OFlag;
181 use linux_api::mman::{MapFlags, ProtFlags};
182
183 const MODE: u32 = S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP;
184 let open_flags = OFlag::O_RDWR | OFlag::O_CLOEXEC;
185 let prot = ProtFlags::PROT_READ | ProtFlags::PROT_WRITE;
186 let map_flags = MapFlags::MAP_SHARED;
187
188 let fd = match unsafe { open(path_buf, open_flags, MODE) } {
189 Ok(fd) => fd,
190 Err(errno) => log_err_and_exit(AllocError::Open, Some(errno)),
191 };
192
193 let retval = match unsafe {
194 mmap(
195 core::ptr::null_mut(),
196 nbytes.try_into().unwrap(),
197 prot,
198 map_flags,
199 fd,
200 0,
201 )
202 } {
203 Ok(retval) => retval,
204 Err(errno) => log_err_and_exit(AllocError::MMap, Some(errno)),
205 };
206
207 (retval, fd)
208}
209
210#[repr(C)]
211struct Chunk {
212 canary_front: CanaryBuf,
213 chunk_name: PathBuf,
214 chunk_fd: i32,
215 chunk_nbytes: usize,
216 data_cur: u32, next_chunk: *mut Chunk,
218 canary_back: CanaryBuf,
219}
220
221impl Chunk {
222 fn get_mut_data_start(&mut self) -> *mut u8 {
223 self.get_data_start().cast_mut()
224 }
225
226 fn get_data_start(&self) -> *const u8 {
227 let p = core::ptr::from_ref(self) as *const u8;
228 unsafe { p.add(core::mem::size_of::<Self>()) }
229 }
230}
231
232impl Canary for Chunk {
233 fn canary_init(&mut self) {
234 self.canary_front = CANARY;
235 self.canary_back = CANARY;
236 }
237
238 fn canary_check(&self) -> bool {
239 self.canary_front == CANARY && self.canary_back == CANARY
240 }
241}
242
243fn allocate_shared_chunk(path_buf: &PathBuf, nbytes: usize) -> *mut Chunk {
244 let (p, fd) = create_map_shared_memory(path_buf, nbytes);
245
246 let chunk_meta: *mut Chunk = p.as_mut_ptr() as *mut Chunk;
247
248 unsafe {
249 (*chunk_meta).chunk_name = *path_buf;
250 (*chunk_meta).chunk_fd = fd;
251 (*chunk_meta).chunk_nbytes = nbytes;
252 (*chunk_meta).data_cur = 0;
253 (*chunk_meta).next_chunk = core::ptr::null_mut();
254 (*chunk_meta).canary_init();
255 }
256
257 chunk_meta
258}
259
260fn view_shared_chunk(path_buf: &PathBuf, nbytes: usize) -> *mut Chunk {
261 let (p, _) = view_shared_memory(path_buf, nbytes);
262 let chunk_meta: *mut Chunk = p.as_mut_ptr() as *mut Chunk;
263 chunk_meta
264}
265
266fn deallocate_shared_chunk(chunk_meta: *const Chunk) {
267 unsafe {
268 (*chunk_meta).canary_assert();
269 }
270
271 let path_buf = unsafe { (*chunk_meta).chunk_name };
272 let chunk_nbytes = unsafe { (*chunk_meta).chunk_nbytes };
273
274 if let Err(errno) =
275 munmap(unsafe { core::slice::from_raw_parts_mut(chunk_meta as *mut u8, chunk_nbytes) })
276 {
277 log_err(AllocError::MUnmap, Some(errno));
278 }
279
280 if let Err(errno) = unsafe { unlink(&path_buf) } {
281 log_err(AllocError::Unlink, Some(errno));
282 }
283}
284
285#[repr(C)]
286#[derive(Debug)]
287pub(crate) struct Block {
288 canary_front: CanaryBuf,
289 next_free_block: *mut Block, alloc_nbytes: u32, data_offset: u32, canary_back: CanaryBuf,
293}
294
295#[repr(C)]
296#[derive(Copy, Clone, Debug, VirtualAddressSpaceIndependent)]
297pub(crate) struct BlockSerialized {
298 pub(crate) chunk_name: crate::util::PathBuf,
299 pub(crate) offset: isize,
300}
301
302const BLOCK_STRUCT_NBYTES: usize = core::mem::size_of::<Block>();
303const BLOCK_STRUCT_ALIGNMENT: usize = core::mem::align_of::<Block>();
304
305impl Canary for Block {
306 fn canary_init(&mut self) {
307 self.canary_front = CANARY;
308 self.canary_back = CANARY;
309 }
310
311 fn canary_check(&self) -> bool {
312 self.canary_front == CANARY && self.canary_back == CANARY
313 }
314}
315
316impl Block {
317 pub(self) fn get_block_data_range(&self) -> (*const u8, *const u8) {
318 self.canary_assert();
319
320 let data_offset = self.data_offset;
321 let alloc_nbytes = self.alloc_nbytes;
322 let block = core::ptr::from_ref(self) as *const u8;
323 assert!(!block.is_null());
324
325 let data_begin = unsafe { block.add(data_offset as usize) };
326 let data_end = unsafe { data_begin.add(alloc_nbytes as usize) };
327
328 (data_begin, data_end)
329 }
330
331 pub(crate) fn get_ref<T>(&self) -> &[T] {
332 let (begin_p, end_p) = self.get_block_data_range();
333 let block_len = unsafe { end_p.offset_from(begin_p) } as usize;
334 assert!(block_len % core::mem::size_of::<T>() == 0);
335 let nelems = block_len / core::mem::size_of::<T>();
336 unsafe { core::slice::from_raw_parts(begin_p as *const T, nelems) }
337 }
338
339 pub(crate) fn get_mut_ref<T>(&mut self) -> &mut [T] {
340 let x = self.get_ref();
341 let nelems = x.len();
342 let x_ptr: *const T = x.as_ptr();
343 unsafe { core::slice::from_raw_parts_mut(x_ptr.cast_mut(), nelems) }
344 }
345}
346
347#[derive(Debug)]
394pub(crate) struct FreelistAllocator {
395 first_chunk: *mut Chunk,
396 next_free_block: *mut Block,
397 chunk_nbytes: usize,
398}
399
400impl FreelistAllocator {
401 pub const fn new() -> Self {
402 FreelistAllocator {
403 first_chunk: core::ptr::null_mut(),
404 next_free_block: core::ptr::null_mut(),
405 chunk_nbytes: CHUNK_NBYTES_DEFAULT,
406 }
407 }
408
409 pub fn init(&mut self) -> Result<(), i32> {
410 self.add_chunk()
411 }
412
413 fn add_chunk(&mut self) -> Result<(), i32> {
414 let mut path_buf = crate::util::NULL_PATH_BUF;
415 format_shmem_name(&mut path_buf);
416
417 let new_chunk = allocate_shared_chunk(&path_buf, self.chunk_nbytes);
418
419 unsafe {
420 (*new_chunk).next_chunk = self.first_chunk;
421 }
422
423 self.first_chunk = new_chunk;
424
425 Ok(())
426 }
427
428 fn check_free_list_for_acceptable_block(
430 &mut self,
431 alloc_nbytes: usize,
432 alloc_alignment: usize,
433 ) -> (*mut Block, *mut Block) {
435 let mut block = self.next_free_block;
436 let mut pred: *mut Block = core::ptr::null_mut();
437
438 while !block.is_null() {
439 let (start_p, _) = unsafe { (*block).get_block_data_range() };
440
441 if unsafe { (*block).alloc_nbytes as usize == alloc_nbytes }
442 && start_p.align_offset(alloc_alignment) == 0
443 {
444 return (pred, block);
445 }
446
447 pred = block;
448 unsafe {
449 block = (*block).next_free_block;
450 }
451 }
452
453 (pred, core::ptr::null_mut())
454 }
455
456 fn find_next_suitable_positions(
457 p: *mut u8,
458 alloc_nbytes: usize,
459 alloc_alignment: usize,
460 ) -> (*mut u8, *mut u8) {
461 let off = p.align_offset(alloc_alignment);
462 let start = unsafe { p.add(off) };
463 let end = unsafe { start.add(alloc_nbytes) };
464 (start, end)
465 }
466
467 fn try_creating_block_in_chunk(
468 chunk: &mut Chunk,
469 alloc_nbytes: usize,
470 alloc_alignment: usize,
471 ) -> *mut Block {
472 let chunk_start = core::ptr::from_mut(chunk) as *mut u8;
473 let chunk_end = unsafe { chunk_start.add(chunk.chunk_nbytes) };
474
475 let data_start = unsafe { chunk.get_mut_data_start().add(chunk.data_cur as usize) };
476
477 let (block_struct_start, block_struct_end) = Self::find_next_suitable_positions(
478 data_start,
479 BLOCK_STRUCT_NBYTES,
480 BLOCK_STRUCT_ALIGNMENT,
481 );
482 let (block_data_start, block_data_end) =
483 Self::find_next_suitable_positions(block_struct_end, alloc_nbytes, alloc_alignment);
484
485 let data_offset = unsafe { block_data_start.offset_from(block_struct_start) };
486
487 assert!(data_offset > 0);
488
489 if block_data_end <= chunk_end {
490 let block = block_struct_start as *mut Block;
493
494 unsafe {
495 (*block).canary_init();
496 (*block).next_free_block = core::ptr::null_mut();
497 (*block).alloc_nbytes = alloc_nbytes as u32;
498 (*block).data_offset = data_offset as u32;
499 }
500
501 return block;
502 }
503
504 core::ptr::null_mut()
505 }
506
507 pub fn alloc(&mut self, alloc_nbytes: usize, alloc_alignment: usize) -> *mut Block {
508 let (pred, mut block) =
510 self.check_free_list_for_acceptable_block(alloc_nbytes, alloc_alignment);
511
512 if !block.is_null() {
513 if pred.is_null() {
516 self.next_free_block = unsafe { (*block).next_free_block };
518 } else {
519 unsafe {
521 (*pred).next_free_block = (*block).next_free_block;
522 }
523 }
524
525 let (p, _) = unsafe { (*block).get_block_data_range() };
526 assert!(p.align_offset(alloc_alignment) == 0);
527
528 return block;
529 }
530
531 block = Self::try_creating_block_in_chunk(
533 unsafe { &mut (*self.first_chunk) },
534 alloc_nbytes,
535 alloc_alignment,
536 );
537
538 if block.is_null() {
539 self.add_chunk().unwrap();
541 }
542
543 block = Self::try_creating_block_in_chunk(
544 unsafe { &mut (*self.first_chunk) },
545 alloc_nbytes,
546 alloc_alignment,
547 );
548
549 let block_p = block as *mut u8;
550
551 let block_end = unsafe {
552 let data_offset = (*block).data_offset;
553 assert!(data_offset > 0);
554 block_p.add(data_offset as usize).add(alloc_nbytes)
555 };
556
557 let chunk_p = self.first_chunk as *mut u8;
558 unsafe {
559 let data_cur = block_end.offset_from(chunk_p);
560 (*self.first_chunk).data_cur = data_cur as u32;
561 }
562
563 assert!(!block.is_null());
564 let (p, _) = unsafe { (*block).get_block_data_range() };
565 assert!(p.align_offset(alloc_alignment) == 0);
566
567 block
568 }
569
570 pub fn dealloc(&mut self, block: *mut Block) {
571 if block.is_null() {
572 return;
573 }
574
575 unsafe {
576 (*block).canary_assert();
577 }
578 let old_block = self.next_free_block;
579 unsafe {
580 (*block).next_free_block = old_block;
581 }
582 self.next_free_block = block;
583 }
584
585 fn find_chunk(&self, block: *const Block) -> Option<*const Chunk> {
587 unsafe {
588 (*block).canary_assert();
589 }
590
591 if !self.first_chunk.is_null() {
592 let mut chunk_to_check = self.first_chunk;
593
594 while !chunk_to_check.is_null() {
595 unsafe {
597 (*chunk_to_check).canary_assert();
598 }
599 let data_start = unsafe { (*chunk_to_check).get_data_start() };
600 let data_end = unsafe { (chunk_to_check as *const u8).add(self.chunk_nbytes) };
601
602 let block_p = block as *const u8;
604
605 if block_p >= data_start && block_p < data_end {
606 return Some(chunk_to_check);
607 }
608
609 chunk_to_check = unsafe { (*chunk_to_check).next_chunk };
610 }
611 }
612
613 None
614 }
615
616 pub fn serialize(&self, block: *const Block) -> BlockSerialized {
618 unsafe {
619 (*block).canary_assert();
620 }
621
622 if let Some(chunk) = self.find_chunk(block) {
623 let chunk_p = chunk as *const u8;
624 let block_p = block as *const u8;
625 let offset = unsafe { block_p.offset_from(chunk_p) };
626 assert!(offset > 0);
627
628 BlockSerialized {
629 chunk_name: unsafe { (*chunk).chunk_name },
630 offset,
631 }
632 } else {
633 log_err_and_exit(AllocError::WrongAllocator, None);
634 }
635 }
636
637 pub fn destruct(&mut self) {
638 if !self.first_chunk.is_null() {
639 let mut chunk_to_dealloc = self.first_chunk;
640
641 while !chunk_to_dealloc.is_null() {
642 let tmp = unsafe { (*chunk_to_dealloc).next_chunk };
644
645 deallocate_shared_chunk(chunk_to_dealloc);
646
647 chunk_to_dealloc = tmp;
648 }
649
650 self.first_chunk = core::ptr::null_mut();
651 }
652 }
653}
654
655const CHUNK_CAPACITY: usize = 64;
656
657#[repr(C)]
658#[derive(Debug)]
659pub(crate) struct FreelistDeserializer {
660 chunks: [*mut Chunk; CHUNK_CAPACITY],
661 nmapped_chunks: usize,
662 chunk_nbytes: usize,
663}
664
665impl FreelistDeserializer {
666 pub fn new() -> FreelistDeserializer {
667 FreelistDeserializer {
668 chunks: [core::ptr::null_mut(); CHUNK_CAPACITY],
669 nmapped_chunks: 0,
670 chunk_nbytes: CHUNK_NBYTES_DEFAULT,
671 }
672 }
673
674 fn find_chunk(&self, chunk_name: &PathBuf) -> *mut Chunk {
675 for idx in 0..self.nmapped_chunks {
676 let chunk = self.chunks[idx];
677
678 if unsafe { (*chunk).chunk_name == (*chunk_name) } {
680 return chunk;
681 }
682 }
683
684 core::ptr::null_mut()
685 }
686
687 fn map_chunk(&mut self, chunk_name: &PathBuf) -> *mut Chunk {
688 let chunk = view_shared_chunk(chunk_name, self.chunk_nbytes);
689
690 if self.nmapped_chunks == CHUNK_CAPACITY {
691 } else {
693 self.chunks[self.nmapped_chunks] = chunk;
694 self.nmapped_chunks += 1;
695 }
696
697 chunk
698 }
699
700 pub fn deserialize(&mut self, block_ser: &BlockSerialized) -> *mut Block {
701 let mut block_chunk = self.find_chunk(&block_ser.chunk_name);
702
703 if block_chunk.is_null() {
704 block_chunk = self.map_chunk(&block_ser.chunk_name);
705 }
706
707 let chunk_p = block_chunk as *mut u8;
708
709 assert!(block_ser.offset > 0);
710 let block_p = unsafe { chunk_p.add(block_ser.offset as usize) };
711
712 assert!(!block_p.is_null());
713 unsafe {
714 (*(block_p as *mut Block)).canary_assert();
715 };
716
717 block_p as *mut Block
718 }
719}