shadow_shmem/
shmalloc_impl.rs

1//! This module implements a low-level, unsafe shared memory allocator that uses mmap'ed shared
2//! memory files as the backing store. The module is intended to be no-std so it can be used in
3//! Shadow's shim library, which must async-signal-safe.
4//!
5//! The allocator chains together chunks of shared memory and divvies out portions of each chunk
6//! using a first-fit strategy. The allocator also implements a freelist so that allocated blocks
7//! can be reused efficiently after free. The allocator design isn't good for general-purpose
8//! allocation, but should be OK when used for just a few types.
9//!
10//! This code is intended to be private; the `allocator` module is the public, safer-to-use
11//! front end.
12
13use crate::raw_syscall::*;
14use core::fmt::Write;
15
16use formatting_nostd::FormatBuffer;
17use linux_api::errno::Errno;
18
19use vasi::VirtualAddressSpaceIndependent;
20
21use crate::util::PathBuf;
22
23// TODO(rwails): This may be unified with `shadow_rs::utility::Magic`.
24#[cfg(debug_assertions)]
25type CanaryBuf = [u8; 4];
26
27#[cfg(debug_assertions)]
28const CANARY: CanaryBuf = [0xDE, 0xAD, 0xBE, 0xEF];
29
30#[cfg(not(debug_assertions))]
31type CanaryBuf = [u8; 0];
32
33#[cfg(not(debug_assertions))]
34const CANARY: CanaryBuf = [];
35
36trait Canary {
37    fn canary_init(&mut self);
38
39    #[cfg_attr(not(debug_assertions), allow(dead_code))]
40    fn canary_check(&self) -> bool;
41
42    #[cfg(debug_assertions)]
43    fn canary_assert(&self) {
44        assert!(self.canary_check());
45    }
46
47    #[cfg(not(debug_assertions))]
48    fn canary_assert(&self) {}
49}
50
51#[derive(Copy, Clone)]
52pub(crate) enum AllocError {
53    Clock,
54    Open,
55    FTruncate,
56    MMap,
57    MUnmap,
58    Unlink,
59    WrongAllocator,
60    // Leak,
61    GetPID,
62}
63
64const fn alloc_error_to_str(e: AllocError) -> Option<&'static str> {
65    match e {
66        AllocError::Clock => Some("Error calling clock_gettime()"),
67        AllocError::Open => Some("Error calling open()"),
68        AllocError::FTruncate => Some("Error calling ftruncate()"),
69        AllocError::MMap => Some("Error calling mmap()"),
70        AllocError::MUnmap => Some("Error calling munmap()"),
71        AllocError::Unlink => Some("Error calling unlink()"),
72        AllocError::WrongAllocator => Some("Block was passed to incorrect allocator"),
73        // AllocError::Leak => Some("Allocator destroyed but not all blocks are deallocated first"),
74        AllocError::GetPID => Some("Error calling getpid()"),
75    }
76}
77
78impl core::fmt::Debug for AllocError {
79    fn fmt(&self, formatter: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> {
80        match alloc_error_to_str(*self) {
81            Some(s) => formatter.write_str(s),
82            None => write!(formatter, "unknown allocator error"),
83        }
84    }
85}
86
87impl core::fmt::Display for AllocError {
88    fn fmt(&self, formatter: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> {
89        match alloc_error_to_str(*self) {
90            Some(s) => formatter.write_str(s),
91            None => write!(formatter, "unknown allocator error"),
92        }
93    }
94}
95
96pub(crate) fn log_err(error: AllocError, errno: Option<Errno>) {
97    let mut buf = FormatBuffer::<1024>::new();
98
99    if let Some(e) = errno {
100        write!(&mut buf, "{error} ({e})").unwrap();
101    } else {
102        write!(&mut buf, "{error}").unwrap();
103    }
104
105    log::error!("{}", buf.as_str());
106}
107
108pub(crate) fn log_err_and_exit(error: AllocError, errno: Option<Errno>) -> ! {
109    log_err(error, errno);
110    let _ = tgkill(
111        getpid().unwrap(),
112        gettid().unwrap(),
113        linux_api::signal::Signal::SIGABRT.into(),
114    );
115    unreachable!()
116}
117
118fn format_shmem_name(buf: &mut PathBuf) {
119    let pid = match getpid() {
120        Ok(pid) => pid,
121        Err(err) => log_err_and_exit(AllocError::GetPID, Some(err)),
122    };
123
124    let ts = match clock_monotonic_gettime() {
125        Ok(ts) => ts,
126        Err(errno) => log_err_and_exit(AllocError::Clock, Some(errno)),
127    };
128
129    let mut fb = FormatBuffer::<{ crate::util::PATH_MAX_NBYTES }>::new();
130    write!(
131        &mut fb,
132        // Ensure consistent *size* of the formatted name, so that the size
133        // of /proc/self/maps of managed programs doesn't change depending
134        // on the number of digits in the nanoseconds or pid here.
135        "/dev/shm/shadow_shmemfile_{}.{:09}-{:010}",
136        ts.tv_sec, ts.tv_nsec, pid
137    )
138    .unwrap();
139
140    *buf = crate::util::buf_from_utf8_str(fb.as_str()).unwrap();
141}
142
143const CHUNK_NBYTES_DEFAULT: usize = 8 * 1024 * 1024; // 8 MiB
144
145fn create_map_shared_memory<'a>(path_buf: &PathBuf, nbytes: usize) -> (&'a mut [u8], i32) {
146    use linux_api::fcntl::OFlag;
147    use linux_api::mman::{MapFlags, ProtFlags};
148
149    const MODE: u32 = S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP;
150    let open_flags = OFlag::O_RDWR | OFlag::O_CREAT | OFlag::O_EXCL | OFlag::O_CLOEXEC;
151    let prot = ProtFlags::PROT_READ | ProtFlags::PROT_WRITE;
152    let map_flags = MapFlags::MAP_SHARED;
153
154    let fd = match unsafe { open(path_buf, open_flags, MODE) } {
155        Ok(fd) => fd,
156        Err(err) => log_err_and_exit(AllocError::Open, Some(err)),
157    };
158
159    // u64 into usize should be safe to unwrap.
160    if let Err(errno) = ftruncate(fd, nbytes.try_into().unwrap()) {
161        log_err_and_exit(AllocError::FTruncate, Some(errno))
162    };
163
164    let retval = match unsafe {
165        mmap(
166            core::ptr::null_mut(),
167            nbytes.try_into().unwrap(),
168            prot,
169            map_flags,
170            fd,
171            0,
172        )
173    } {
174        Ok(retval) => retval,
175        Err(errno) => log_err_and_exit(AllocError::MMap, Some(errno)),
176    };
177
178    (retval, fd)
179}
180
181// Similar to `create_map_shared_memory` but no O_CREAT or O_EXCL and no ftruncate calls.
182fn view_shared_memory<'a>(path_buf: &PathBuf, nbytes: usize) -> (&'a mut [u8], i32) {
183    use linux_api::fcntl::OFlag;
184    use linux_api::mman::{MapFlags, ProtFlags};
185
186    const MODE: u32 = S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP;
187    let open_flags = OFlag::O_RDWR | OFlag::O_CLOEXEC;
188    let prot = ProtFlags::PROT_READ | ProtFlags::PROT_WRITE;
189    let map_flags = MapFlags::MAP_SHARED;
190
191    let fd = match unsafe { open(path_buf, open_flags, MODE) } {
192        Ok(fd) => fd,
193        Err(errno) => log_err_and_exit(AllocError::Open, Some(errno)),
194    };
195
196    let retval = match unsafe {
197        mmap(
198            core::ptr::null_mut(),
199            nbytes.try_into().unwrap(),
200            prot,
201            map_flags,
202            fd,
203            0,
204        )
205    } {
206        Ok(retval) => retval,
207        Err(errno) => log_err_and_exit(AllocError::MMap, Some(errno)),
208    };
209
210    (retval, fd)
211}
212
213#[repr(C)]
214struct Chunk {
215    canary_front: CanaryBuf,
216    chunk_name: PathBuf,
217    chunk_fd: i32,
218    chunk_nbytes: usize,
219    data_cur: u32, // The current point at which the data starts from the start of the data segment
220    next_chunk: *mut Chunk,
221    canary_back: CanaryBuf,
222}
223
224impl Chunk {
225    fn get_mut_data_start(&mut self) -> *mut u8 {
226        self.get_data_start().cast_mut()
227    }
228
229    fn get_data_start(&self) -> *const u8 {
230        let p = core::ptr::from_ref(self) as *const u8;
231        unsafe { p.add(core::mem::size_of::<Self>()) }
232    }
233}
234
235impl Canary for Chunk {
236    fn canary_init(&mut self) {
237        self.canary_front = CANARY;
238        self.canary_back = CANARY;
239    }
240
241    fn canary_check(&self) -> bool {
242        self.canary_front == CANARY && self.canary_back == CANARY
243    }
244}
245
246fn allocate_shared_chunk(path_buf: &PathBuf, nbytes: usize) -> *mut Chunk {
247    let (p, fd) = create_map_shared_memory(path_buf, nbytes);
248
249    let chunk_meta: *mut Chunk = p.as_mut_ptr() as *mut Chunk;
250
251    unsafe {
252        (*chunk_meta).chunk_name = *path_buf;
253        (*chunk_meta).chunk_fd = fd;
254        (*chunk_meta).chunk_nbytes = nbytes;
255        (*chunk_meta).data_cur = 0;
256        (*chunk_meta).next_chunk = core::ptr::null_mut();
257        (*chunk_meta).canary_init();
258    }
259
260    chunk_meta
261}
262
263fn view_shared_chunk(path_buf: &PathBuf, nbytes: usize) -> *mut Chunk {
264    let (p, _) = view_shared_memory(path_buf, nbytes);
265    let chunk_meta: *mut Chunk = p.as_mut_ptr() as *mut Chunk;
266    chunk_meta
267}
268
269fn deallocate_shared_chunk(chunk_meta: *const Chunk) {
270    unsafe {
271        (*chunk_meta).canary_assert();
272    }
273
274    let path_buf = unsafe { (*chunk_meta).chunk_name };
275    let chunk_nbytes = unsafe { (*chunk_meta).chunk_nbytes };
276
277    if let Err(errno) =
278        munmap(unsafe { core::slice::from_raw_parts_mut(chunk_meta as *mut u8, chunk_nbytes) })
279    {
280        log_err(AllocError::MUnmap, Some(errno));
281    }
282
283    if let Err(errno) = unsafe { unlink(&path_buf) } {
284        log_err(AllocError::Unlink, Some(errno));
285    }
286}
287
288#[repr(C)]
289#[derive(Debug)]
290pub(crate) struct Block {
291    canary_front: CanaryBuf,
292    next_free_block: *mut Block, // This can't be a short pointer, because it may point across chunks.
293    alloc_nbytes: u32,           // What is the size of the block
294    data_offset: u32,            // From the start location of the block header
295    canary_back: CanaryBuf,
296}
297
298#[repr(C)]
299#[derive(Copy, Clone, Debug, VirtualAddressSpaceIndependent)]
300pub(crate) struct BlockSerialized {
301    pub(crate) chunk_name: crate::util::PathBuf,
302    pub(crate) offset: isize,
303}
304
305const BLOCK_STRUCT_NBYTES: usize = core::mem::size_of::<Block>();
306const BLOCK_STRUCT_ALIGNMENT: usize = core::mem::align_of::<Block>();
307
308impl Canary for Block {
309    fn canary_init(&mut self) {
310        self.canary_front = CANARY;
311        self.canary_back = CANARY;
312    }
313
314    fn canary_check(&self) -> bool {
315        self.canary_front == CANARY && self.canary_back == CANARY
316    }
317}
318
319impl Block {
320    pub(self) fn get_block_data_range(&self) -> (*const u8, *const u8) {
321        self.canary_assert();
322
323        let data_offset = self.data_offset;
324        let alloc_nbytes = self.alloc_nbytes;
325        let block = core::ptr::from_ref(self) as *const u8;
326        assert!(!block.is_null());
327
328        let data_begin = unsafe { block.add(data_offset as usize) };
329        let data_end = unsafe { data_begin.add(alloc_nbytes as usize) };
330
331        (data_begin, data_end)
332    }
333
334    pub(crate) fn get_ref<T>(&self) -> &[T] {
335        let (begin_p, end_p) = self.get_block_data_range();
336        let block_len = unsafe { end_p.offset_from(begin_p) } as usize;
337        assert!(block_len % core::mem::size_of::<T>() == 0);
338        let nelems = block_len / core::mem::size_of::<T>();
339        unsafe { core::slice::from_raw_parts(begin_p as *const T, nelems) }
340    }
341
342    pub(crate) fn get_mut_ref<T>(&mut self) -> &mut [T] {
343        let x = self.get_ref();
344        let nelems = x.len();
345        let x_ptr: *const T = x.as_ptr();
346        unsafe { core::slice::from_raw_parts_mut(x_ptr.cast_mut(), nelems) }
347    }
348}
349
350/*
351
352----> These functions are needed if implementing the global allocator API
353
354fn seek_prv_aligned_ptr(p: *mut u8, alignment: usize) -> *mut u8 {
355    if p.align_offset(alignment) == 0 {
356        unsafe { p.offset(-(alignment as isize)) }
357    } else {
358        let offset = (p as usize) % alignment;
359        let p = unsafe { p.offset(-(offset as isize)) };
360        assert!(p.align_offset(alignment) == 0);
361        p
362    }
363}
364
365pub(crate) fn rewind(p: *mut u8) -> *mut Block {
366    // This logic could be simplified if we use the layout information that the allocator gets on
367    // free. We could stick the block header *behind* the block data in the first space it can fit.
368    // That would allow us to find the header deterministically versus using this scan.
369
370    // First, go to the first pointer offset that could possibly correspond to this block.
371
372    let mut block_p = unsafe { p.offset(-(BLOCK_STRUCT_NBYTES as isize)) };
373
374    if block_p.align_offset(BLOCK_STRUCT_ALIGNMENT) != 0 {
375        block_p = seek_prv_aligned_ptr(block_p, BLOCK_STRUCT_ALIGNMENT);
376    }
377
378    loop {
379        // Interpret block_p as a block. If the offset matches up, we are good to go.
380        let block_offset = unsafe { (*(block_p as *mut Block)).data_offset };
381        let real_offset = unsafe { p.offset_from(block_p) } as u32;
382
383        if real_offset == block_offset {
384            // `block_p` now points to a valid block.
385            break;
386        } else {
387            block_p = seek_prv_aligned_ptr(block_p, BLOCK_STRUCT_ALIGNMENT);
388        }
389    }
390
391    block_p as *mut Block
392}
393
394*/
395
396#[derive(Debug)]
397pub(crate) struct FreelistAllocator {
398    first_chunk: *mut Chunk,
399    next_free_block: *mut Block,
400    chunk_nbytes: usize,
401}
402
403impl FreelistAllocator {
404    pub const fn new() -> Self {
405        FreelistAllocator {
406            first_chunk: core::ptr::null_mut(),
407            next_free_block: core::ptr::null_mut(),
408            chunk_nbytes: CHUNK_NBYTES_DEFAULT,
409        }
410    }
411
412    pub fn init(&mut self) -> Result<(), i32> {
413        self.add_chunk()
414    }
415
416    fn add_chunk(&mut self) -> Result<(), i32> {
417        let mut path_buf = crate::util::NULL_PATH_BUF;
418        format_shmem_name(&mut path_buf);
419
420        let new_chunk = allocate_shared_chunk(&path_buf, self.chunk_nbytes);
421
422        unsafe {
423            (*new_chunk).next_chunk = self.first_chunk;
424        }
425
426        self.first_chunk = new_chunk;
427
428        Ok(())
429    }
430
431    /// Returns the block and its predecessor (if it exists)
432    fn check_free_list_for_acceptable_block(
433        &mut self,
434        alloc_nbytes: usize,
435        alloc_alignment: usize,
436    ) -> (*mut Block, *mut Block) // (pred, block)
437    {
438        let mut block = self.next_free_block;
439        let mut pred: *mut Block = core::ptr::null_mut();
440
441        while !block.is_null() {
442            let (start_p, _) = unsafe { (*block).get_block_data_range() };
443
444            if unsafe { (*block).alloc_nbytes as usize == alloc_nbytes }
445                && start_p.align_offset(alloc_alignment) == 0
446            {
447                return (pred, block);
448            }
449
450            pred = block;
451            unsafe {
452                block = (*block).next_free_block;
453            }
454        }
455
456        (pred, core::ptr::null_mut())
457    }
458
459    fn find_next_suitable_positions(
460        p: *mut u8,
461        alloc_nbytes: usize,
462        alloc_alignment: usize,
463    ) -> (*mut u8, *mut u8) {
464        let off = p.align_offset(alloc_alignment);
465        let start = unsafe { p.add(off) };
466        let end = unsafe { start.add(alloc_nbytes) };
467        (start, end)
468    }
469
470    fn try_creating_block_in_chunk(
471        chunk: &mut Chunk,
472        alloc_nbytes: usize,
473        alloc_alignment: usize,
474    ) -> *mut Block {
475        let chunk_start = core::ptr::from_mut(chunk) as *mut u8;
476        let chunk_end = unsafe { chunk_start.add(chunk.chunk_nbytes) };
477
478        let data_start = unsafe { chunk.get_mut_data_start().add(chunk.data_cur as usize) };
479
480        let (block_struct_start, block_struct_end) = Self::find_next_suitable_positions(
481            data_start,
482            BLOCK_STRUCT_NBYTES,
483            BLOCK_STRUCT_ALIGNMENT,
484        );
485        let (block_data_start, block_data_end) =
486            Self::find_next_suitable_positions(block_struct_end, alloc_nbytes, alloc_alignment);
487
488        let data_offset = unsafe { block_data_start.offset_from(block_struct_start) };
489
490        assert!(data_offset > 0);
491
492        if block_data_end <= chunk_end {
493            // The block fits.
494            // Initialize the block
495            let block = block_struct_start as *mut Block;
496
497            unsafe {
498                (*block).canary_init();
499                (*block).next_free_block = core::ptr::null_mut();
500                (*block).alloc_nbytes = alloc_nbytes as u32;
501                (*block).data_offset = data_offset as u32;
502            }
503
504            return block;
505        }
506
507        core::ptr::null_mut()
508    }
509
510    pub fn alloc(&mut self, alloc_nbytes: usize, alloc_alignment: usize) -> *mut Block {
511        // First, check the free list
512        let (pred, mut block) =
513            self.check_free_list_for_acceptable_block(alloc_nbytes, alloc_alignment);
514
515        if !block.is_null() {
516            // We found a hit off the free list, we can just return that.
517            // But first we update the free list.
518            if pred.is_null() {
519                // The block was the first element on the list.
520                self.next_free_block = unsafe { (*block).next_free_block };
521            } else {
522                // We can just update the predecessor
523                unsafe {
524                    (*pred).next_free_block = (*block).next_free_block;
525                }
526            }
527
528            let (p, _) = unsafe { (*block).get_block_data_range() };
529            assert!(p.align_offset(alloc_alignment) == 0);
530
531            return block;
532        }
533
534        // If nothing in the free list, then check if the current chunk can handle the allocation
535        block = Self::try_creating_block_in_chunk(
536            unsafe { &mut (*self.first_chunk) },
537            alloc_nbytes,
538            alloc_alignment,
539        );
540
541        if block.is_null() {
542            // Chunk didn't have enough capacity...
543            self.add_chunk().unwrap();
544        }
545
546        block = Self::try_creating_block_in_chunk(
547            unsafe { &mut (*self.first_chunk) },
548            alloc_nbytes,
549            alloc_alignment,
550        );
551
552        let block_p = block as *mut u8;
553
554        let block_end = unsafe {
555            let data_offset = (*block).data_offset;
556            assert!(data_offset > 0);
557            block_p.add(data_offset as usize).add(alloc_nbytes)
558        };
559
560        let chunk_p = self.first_chunk as *mut u8;
561        unsafe {
562            let data_cur = block_end.offset_from(chunk_p);
563            (*self.first_chunk).data_cur = data_cur as u32;
564        }
565
566        assert!(!block.is_null());
567        let (p, _) = unsafe { (*block).get_block_data_range() };
568        assert!(p.align_offset(alloc_alignment) == 0);
569
570        block
571    }
572
573    pub fn dealloc(&mut self, block: *mut Block) {
574        if block.is_null() {
575            return;
576        }
577
578        unsafe {
579            (*block).canary_assert();
580        }
581        let old_block = self.next_free_block;
582        unsafe {
583            (*block).next_free_block = old_block;
584        }
585        self.next_free_block = block;
586    }
587
588    // PRE: Block was allocated with this allocator
589    fn find_chunk(&self, block: *const Block) -> Option<*const Chunk> {
590        unsafe {
591            (*block).canary_assert();
592        }
593
594        if !self.first_chunk.is_null() {
595            let mut chunk_to_check = self.first_chunk;
596
597            while !chunk_to_check.is_null() {
598                // Safe to deref throughout this block because we checked for null above
599                unsafe {
600                    (*chunk_to_check).canary_assert();
601                }
602                let data_start = unsafe { (*chunk_to_check).get_data_start() };
603                let data_end = unsafe { (chunk_to_check as *const u8).add(self.chunk_nbytes) };
604
605                // Now we just see if the block is in the range.
606                let block_p = block as *const u8;
607
608                if block_p >= data_start && block_p < data_end {
609                    return Some(chunk_to_check);
610                }
611
612                chunk_to_check = unsafe { (*chunk_to_check).next_chunk };
613            }
614        }
615
616        None
617    }
618
619    // PRE: Block was allocated with this allocator
620    pub fn serialize(&self, block: *const Block) -> BlockSerialized {
621        unsafe {
622            (*block).canary_assert();
623        }
624
625        if let Some(chunk) = self.find_chunk(block) {
626            let chunk_p = chunk as *const u8;
627            let block_p = block as *const u8;
628            let offset = unsafe { block_p.offset_from(chunk_p) };
629            assert!(offset > 0);
630
631            BlockSerialized {
632                chunk_name: unsafe { (*chunk).chunk_name },
633                offset,
634            }
635        } else {
636            log_err_and_exit(AllocError::WrongAllocator, None);
637        }
638    }
639
640    pub fn destruct(&mut self) {
641        if !self.first_chunk.is_null() {
642            let mut chunk_to_dealloc = self.first_chunk;
643
644            while !chunk_to_dealloc.is_null() {
645                // Safe due to check above
646                let tmp = unsafe { (*chunk_to_dealloc).next_chunk };
647
648                deallocate_shared_chunk(chunk_to_dealloc);
649
650                chunk_to_dealloc = tmp;
651            }
652
653            self.first_chunk = core::ptr::null_mut();
654        }
655    }
656}
657
658const CHUNK_CAPACITY: usize = 64;
659
660#[repr(C)]
661#[derive(Debug)]
662pub(crate) struct FreelistDeserializer {
663    chunks: [*mut Chunk; CHUNK_CAPACITY],
664    nmapped_chunks: usize,
665    chunk_nbytes: usize,
666}
667
668impl FreelistDeserializer {
669    pub fn new() -> FreelistDeserializer {
670        FreelistDeserializer {
671            chunks: [core::ptr::null_mut(); CHUNK_CAPACITY],
672            nmapped_chunks: 0,
673            chunk_nbytes: CHUNK_NBYTES_DEFAULT,
674        }
675    }
676
677    fn find_chunk(&self, chunk_name: &PathBuf) -> *mut Chunk {
678        for idx in 0..self.nmapped_chunks {
679            let chunk = self.chunks[idx];
680
681            // Safe here to deref because we are only checking within the allocated range.
682            if unsafe { (*chunk).chunk_name == (*chunk_name) } {
683                return chunk;
684            }
685        }
686
687        core::ptr::null_mut()
688    }
689
690    fn map_chunk(&mut self, chunk_name: &PathBuf) -> *mut Chunk {
691        let chunk = view_shared_chunk(chunk_name, self.chunk_nbytes);
692
693        if self.nmapped_chunks == CHUNK_CAPACITY {
694            // Ran out of chunk slots -- we're going to leak the handle.
695        } else {
696            self.chunks[self.nmapped_chunks] = chunk;
697            self.nmapped_chunks += 1;
698        }
699
700        chunk
701    }
702
703    pub fn deserialize(&mut self, block_ser: &BlockSerialized) -> *mut Block {
704        let mut block_chunk = self.find_chunk(&block_ser.chunk_name);
705
706        if block_chunk.is_null() {
707            block_chunk = self.map_chunk(&block_ser.chunk_name);
708        }
709
710        let chunk_p = block_chunk as *mut u8;
711
712        assert!(block_ser.offset > 0);
713        let block_p = unsafe { chunk_p.add(block_ser.offset as usize) };
714
715        assert!(!block_p.is_null());
716        unsafe {
717            (*(block_p as *mut Block)).canary_assert();
718        };
719
720        block_p as *mut Block
721    }
722}