1use std::cell::RefCell;
2use std::collections::HashMap;
3use std::ffi::CString;
4use std::fmt::Debug;
5use std::fs::File;
6use std::os::raw::c_void;
7use std::os::unix::io::AsRawFd;
8use std::path::PathBuf;
9use std::process;
10
11use linux_api::errno::Errno;
12use linux_api::mman::MapFlags;
13use linux_api::mman::ProtFlags;
14use linux_api::posix_types::Pid;
15use log::*;
16use rustix::fs::FallocateFlags;
17use rustix::fs::MemfdFlags;
18use shadow_pod::Pod;
19use shadow_shim_helper_rs::notnull::*;
20use shadow_shim_helper_rs::syscall_types::ForeignPtr;
21
22use crate::host::context::ProcessContext;
23use crate::host::context::ThreadContext;
24use crate::host::memory_manager::{MemoryManager, page_size};
25use crate::host::syscall::types::ForeignArrayPtr;
26use crate::utility::interval_map::{Interval, IntervalMap, Mutation};
27use crate::utility::proc_maps;
28use crate::utility::proc_maps::{MappingPath, Sharing};
29
30const HEAP_PROT: ProtFlags = ProtFlags::PROT_READ.union(ProtFlags::PROT_WRITE);
32const STACK_PROT: ProtFlags = ProtFlags::PROT_READ.union(ProtFlags::PROT_WRITE);
34
35#[derive(Clone, Debug, Eq, PartialEq)]
37struct Region {
38 shadow_base: *mut c_void,
40 prot: ProtFlags,
41 sharing: proc_maps::Sharing,
42 original_path: Option<proc_maps::MappingPath>,
44}
45
46unsafe impl Send for Region {}
53
54#[allow(dead_code)]
55fn log_regions<It: Iterator<Item = (Interval, Region)>>(level: log::Level, regions: It) {
56 if log::log_enabled!(level) {
57 log!(level, "MemoryManager regions:");
58 for (interval, mapping) in regions {
59 log!(
63 level,
64 "{:x}-{:x} {:?}",
65 interval.start,
66 interval.end,
67 mapping
68 );
69 }
70 }
71}
72
73#[derive(Debug)]
84pub struct MemoryMapper {
85 shm_file: ShmFile,
86 regions: IntervalMap<Region>,
87
88 misses_by_path: RefCell<HashMap<String, u32>>,
89
90 heap: Interval,
94}
95
96#[derive(Debug)]
98struct ShmFile {
99 shm_file: File,
100 shm_plugin_fd: i32,
101 len: usize,
102}
103
104impl ShmFile {
105 fn alloc(&mut self, interval: &Interval) {
107 let needed_len = interval.end;
108 if needed_len > self.len {
112 rustix::fs::ftruncate(&self.shm_file, u64::try_from(needed_len).unwrap()).unwrap();
113 self.len = needed_len;
114 }
115 }
116
117 fn dealloc(&self, interval: &Interval) {
119 trace!("dealloc {:?}", interval);
120 rustix::fs::fallocate(
121 &self.shm_file,
122 FallocateFlags::PUNCH_HOLE | FallocateFlags::KEEP_SIZE,
123 u64::try_from(interval.start).unwrap(),
124 u64::try_from(interval.len()).unwrap(),
125 )
126 .unwrap();
127 }
128
129 fn mmap_into_shadow(&self, interval: &Interval, prot: ProtFlags) -> *mut c_void {
131 unsafe {
132 linux_api::mman::mmap(
133 std::ptr::null_mut(),
134 interval.len(),
135 prot,
136 MapFlags::MAP_SHARED,
137 self.shm_file.as_raw_fd(),
138 interval.start,
139 )
140 }
141 .unwrap()
142 }
143
144 fn copy_into_file(
148 &self,
149 memory_manager: &MemoryManager,
150 region_interval: &Interval,
151 region: &Region,
152 interval: &Interval,
153 ) {
154 if interval.is_empty() {
155 return;
156 }
157 assert!(!region.shadow_base.is_null());
158 assert!(region_interval.contains(&interval.start));
159 assert!(region_interval.contains(&(interval.end - 1)));
160 let offset = interval.start - region_interval.start;
161 let dst = unsafe {
162 std::slice::from_raw_parts_mut(
163 region.shadow_base.add(offset) as *mut u8,
164 interval.len(),
165 )
166 };
167
168 memory_manager
169 .copy_from_ptr(
170 dst,
171 ForeignArrayPtr::new(
172 ForeignPtr::from(interval.start).cast::<u8>(),
173 interval.len(),
174 ),
175 )
176 .unwrap()
177 }
178
179 fn mmap_into_plugin(&self, ctx: &ThreadContext, interval: &Interval, prot: ProtFlags) {
181 ctx.thread
182 .native_mmap(
183 &ProcessContext::new(ctx.host, ctx.process),
184 ForeignPtr::from(interval.start).cast::<u8>(),
185 interval.len(),
186 prot,
187 MapFlags::MAP_SHARED | MapFlags::MAP_FIXED,
188 self.shm_plugin_fd,
189 interval.start as i64,
190 )
191 .unwrap();
192 }
193}
194
195fn get_regions(pid: Pid) -> IntervalMap<Region> {
197 let mut regions = IntervalMap::new();
198 for mapping in proc_maps::mappings_for_pid(pid.as_raw_nonzero().get()).unwrap() {
199 let mut prot = ProtFlags::empty();
200 if mapping.read {
201 prot |= ProtFlags::PROT_READ;
202 }
203 if mapping.write {
204 prot |= ProtFlags::PROT_WRITE;
205 }
206 if mapping.execute {
207 prot |= ProtFlags::PROT_EXEC;
208 }
209 let mutations = regions.insert(
210 mapping.begin..mapping.end,
211 Region {
212 shadow_base: std::ptr::null_mut(),
213 prot,
214 sharing: mapping.sharing,
215 original_path: mapping.path,
216 },
217 );
218 assert_eq!(mutations.len(), 0);
220 }
221 regions
222}
223
224fn get_heap(
226 ctx: &ThreadContext,
227 shm_file: &mut ShmFile,
228 memory_manager: &MemoryManager,
229 regions: &mut IntervalMap<Region>,
230) -> Interval {
231 let heap_mapping = {
233 let mut it = regions
234 .iter()
235 .filter(|m| m.1.original_path == Some(proc_maps::MappingPath::Heap));
236 let heap_mapping = it.next();
237 assert_eq!(it.fuse().next(), None);
239 heap_mapping
240 };
241 if heap_mapping.is_none() {
242 let (ctx, thread) = ctx.split_thread();
243 let start = usize::from(thread.native_brk(&ctx, ForeignPtr::null()).unwrap());
245 return start..start;
246 }
247 let (heap_interval, heap_region) = heap_mapping.unwrap();
248
249 shm_file.alloc(&heap_interval);
250 let mut heap_region = heap_region.clone();
251 heap_region.shadow_base = shm_file.mmap_into_shadow(&heap_interval, HEAP_PROT);
252 shm_file.copy_into_file(memory_manager, &heap_interval, &heap_region, &heap_interval);
253 shm_file.mmap_into_plugin(ctx, &heap_interval, HEAP_PROT);
254
255 {
256 let mutations = regions.insert(heap_interval.clone(), heap_region);
257 assert!(mutations.len() == 1);
259 }
260
261 heap_interval
262}
263
264fn map_stack(
267 memory_manager: &mut MemoryManager,
268 ctx: &ThreadContext,
269 shm_file: &mut ShmFile,
270 regions: &mut IntervalMap<Region>,
271) {
272 let mut iter = regions
274 .iter()
275 .filter(|(_i, r)| r.original_path == Some(MappingPath::InitialStack));
276 let (current_stack_bounds, region) = iter.next().unwrap();
278 assert!(iter.next().is_none());
280
281 let max_stack_size: usize = 8 * (1 << 20); let remapped_stack_end = current_stack_bounds.end - page_size();
289
290 let remapped_stack_begin = current_stack_bounds.end - max_stack_size;
291 let remapped_stack_bounds = remapped_stack_begin..remapped_stack_end;
292 let mut region = region.clone();
293 region.shadow_base = shm_file.mmap_into_shadow(&remapped_stack_bounds, STACK_PROT);
294
295 shm_file.alloc(&remapped_stack_bounds);
297
298 let remapped_overlaps_current = current_stack_bounds.start < remapped_stack_bounds.end;
299
300 if remapped_overlaps_current {
302 shm_file.copy_into_file(
303 memory_manager,
304 &remapped_stack_bounds,
305 ®ion,
306 &(current_stack_bounds.start..remapped_stack_bounds.end),
307 );
308 }
309
310 shm_file.mmap_into_plugin(ctx, &remapped_stack_bounds, STACK_PROT);
311
312 let mutations = regions.insert(remapped_stack_bounds, region);
313 if remapped_overlaps_current {
314 debug_assert_eq!(mutations.len(), 1);
315 } else {
316 debug_assert_eq!(mutations.len(), 0);
317 }
318}
319
320impl Drop for MemoryMapper {
321 fn drop(&mut self) {
322 let misses = self.misses_by_path.borrow();
323 if misses.is_empty() {
324 debug!("MemoryManager misses: None");
325 } else {
326 debug!(
327 "MemoryManager misses: (consider extending MemoryManager to remap regions with a high miss count)"
328 );
329 for (path, count) in misses.iter() {
330 debug!("\t{} in {}", count, path);
331 }
332 }
333
334 let mutations = self.regions.clear(usize::MIN..usize::MAX);
337 for m in mutations {
338 if let Mutation::Removed(interval, region) = m {
339 if !region.shadow_base.is_null() {
340 unsafe { linux_api::mman::munmap(region.shadow_base, interval.len()) }
341 .unwrap_or_else(|e| warn!("munmap: {}", e));
342 }
343 }
344 }
345 }
346}
347
348fn coalesce_regions(regions: IntervalMap<Region>) -> IntervalMap<Region> {
355 let mut out = IntervalMap::new();
356 let mut agg_interval_region: Option<(Interval, Region)> = None;
357 for (interval, region) in regions.iter() {
358 assert!(region.shadow_base.is_null());
360 agg_interval_region = Some(
361 if let Some((agg_interval, agg_region)) = agg_interval_region.take() {
362 if interval.start == agg_interval.end && region == &agg_region {
363 (agg_interval.start..interval.end, agg_region)
365 } else {
366 out.insert(agg_interval, agg_region);
368 (interval, region.clone())
369 }
370 } else {
371 (interval, region.clone())
372 },
373 );
374 }
375 if let Some((current_interval, current_region)) = agg_interval_region.take() {
377 out.insert(current_interval, current_region);
378 }
379 out
380}
381
382impl MemoryMapper {
383 pub fn new(memory_manager: &mut MemoryManager, ctx: &ThreadContext) -> MemoryMapper {
384 let shm_name = CString::new(format!(
385 "shadow_memory_manager_{}_{:?}_{}",
386 process::id(),
387 ctx.thread.host_id(),
388 u32::from(ctx.process.id())
389 ))
390 .unwrap();
391 let raw_file = rustix::fs::memfd_create(&shm_name, MemfdFlags::CLOEXEC).unwrap();
392 let shm_file = File::from(raw_file);
393
394 let shm_path = format!("/proc/{}/fd/{}\0", process::id(), shm_file.as_raw_fd());
396
397 let shm_plugin_fd = {
398 let (ctx, thread) = ctx.split_thread();
399 let path_buf_foreign_ptr = ForeignArrayPtr::new(
400 thread.malloc_foreign_ptr(&ctx, shm_path.len()).unwrap(),
401 shm_path.len(),
402 );
403 memory_manager
404 .copy_to_ptr(path_buf_foreign_ptr, shm_path.as_bytes())
405 .unwrap();
406 let shm_plugin_fd = thread
407 .native_open(
408 &ctx,
409 path_buf_foreign_ptr.ptr(),
410 libc::O_RDWR | libc::O_CLOEXEC,
411 0,
412 )
413 .unwrap();
414 thread
415 .free_foreign_ptr(&ctx, path_buf_foreign_ptr.ptr(), path_buf_foreign_ptr.len())
416 .unwrap();
417 shm_plugin_fd
418 };
419
420 let mut shm_file = ShmFile {
421 shm_file,
422 shm_plugin_fd,
423 len: 0,
424 };
425 let regions = get_regions(memory_manager.pid);
426 let mut regions = coalesce_regions(regions);
427 let heap = get_heap(ctx, &mut shm_file, memory_manager, &mut regions);
428 map_stack(memory_manager, ctx, &mut shm_file, &mut regions);
429
430 MemoryMapper {
431 shm_file,
432 regions,
433 misses_by_path: RefCell::new(HashMap::new()),
434 heap,
435 }
436 }
437
438 fn unmap_mutations(&mut self, mutations: Vec<Mutation<Region>>) {
451 for mutation in mutations {
452 match mutation {
453 Mutation::ModifiedBegin(interval, new_start) => {
454 let (_, region) = self.regions.get_mut(new_start).unwrap();
455 if region.shadow_base.is_null() {
456 continue;
457 }
458 let removed_range = interval.start..new_start;
459
460 self.shm_file.dealloc(&removed_range);
462
463 unsafe { linux_api::mman::munmap(region.shadow_base, removed_range.len()) }
465 .unwrap_or_else(|e| warn!("munmap: {}", e));
466
467 region.shadow_base = unsafe { region.shadow_base.add(removed_range.len()) };
469 }
470 Mutation::ModifiedEnd(interval, new_end) => {
471 let (_, region) = self.regions.get(interval.start).unwrap();
472 if region.shadow_base.is_null() {
473 continue;
474 }
475 let removed_range = new_end..interval.end;
476
477 self.shm_file.dealloc(&removed_range);
479
480 unsafe {
482 linux_api::mman::munmap(
483 region.shadow_base.add((interval.start..new_end).len()),
484 removed_range.len(),
485 )
486 }
487 .unwrap_or_else(|e| warn!("munmap: {}", e));
488 }
489 Mutation::Split(_original, left, right) => {
490 let (_, left_region) = self.regions.get(left.start).unwrap();
491 let (_, right_region) = self.regions.get(right.start).unwrap();
492 debug_assert_eq!(left_region.shadow_base, right_region.shadow_base);
493 if left_region.shadow_base.is_null() {
494 continue;
495 }
496 let removed_range = left.end..right.start;
497
498 self.shm_file.dealloc(&removed_range);
500
501 unsafe {
503 linux_api::mman::munmap(
504 (left_region.shadow_base.add(left.len())) as *mut c_void,
505 removed_range.len(),
506 )
507 }
508 .unwrap_or_else(|e| warn!("munmap: {}", e));
509
510 let (_, right_region) = self.regions.get_mut(right.start).unwrap();
512 right_region.shadow_base =
513 unsafe { right_region.shadow_base.add(right.start - left.start) };
514 }
515 Mutation::Removed(interval, region) => {
516 if region.shadow_base.is_null() {
517 continue;
518 }
519
520 self.shm_file.dealloc(&interval);
522
523 unsafe { linux_api::mman::munmap(region.shadow_base, interval.len()) }
525 .unwrap_or_else(|e| warn!("munmap: {}", e));
526 }
527 }
528 }
529 }
530
531 pub fn handle_mmap_result(
539 &mut self,
540 ctx: &ThreadContext,
541 ptr: ForeignArrayPtr<u8>,
542 prot: ProtFlags,
543 flags: MapFlags,
544 fd: i32,
545 ) {
546 trace!(
547 "Handling mmap result for {:x}..+{}",
548 usize::from(ptr.ptr()),
549 ptr.len()
550 );
551 if ptr.is_empty() {
552 return;
553 }
554 let addr = usize::from(ptr.ptr());
555 let interval = addr..(addr + ptr.len());
556 let is_anonymous = flags.contains(MapFlags::MAP_ANONYMOUS);
557 let sharing = if flags.contains(MapFlags::MAP_PRIVATE) {
558 Sharing::Private
559 } else {
560 Sharing::Shared
561 };
562 let original_path = if is_anonymous {
563 None
564 } else {
565 Some(MappingPath::Path(
569 std::fs::read_link(format!(
570 "/proc/{}/fd/{}",
571 ctx.thread.native_pid().as_raw_nonzero().get(),
572 fd
573 ))
574 .unwrap_or_else(|_| PathBuf::from(format!("bad-fd-{}", fd))),
575 ))
576 };
577 let mut region = Region {
578 shadow_base: std::ptr::null_mut(),
579 prot,
580 sharing,
581 original_path,
582 };
583
584 let mutations = self.regions.clear(interval.clone());
586 self.unmap_mutations(mutations);
587
588 if is_anonymous && sharing == Sharing::Private {
589 self.shm_file.alloc(&interval);
593 region.shadow_base = self.shm_file.mmap_into_shadow(&interval, prot);
594 self.shm_file.mmap_into_plugin(ctx, &interval, prot);
595 }
596
597 {
602 let mutations = self.regions.insert(interval, region);
604 assert!(mutations.is_empty());
605 }
606 }
607
608 pub fn handle_munmap_result(&mut self, addr: ForeignPtr<u8>, length: usize) {
613 trace!("handle_munmap_result({:?}, {})", addr, length);
614 if length == 0 {
615 return;
616 }
617
618 let start = usize::from(addr);
620 let end = start + length;
621 let mutations = self.regions.clear(start..end);
622 self.unmap_mutations(mutations);
623 }
624
625 pub fn handle_mremap(
631 &mut self,
632 ctx: &ThreadContext,
633 old_address: ForeignPtr<u8>,
634 old_size: usize,
635 new_size: usize,
636 flags: i32,
637 new_address: ForeignPtr<u8>,
638 ) -> Result<ForeignPtr<u8>, Errno> {
639 let new_address = {
640 let (ctx, thread) = ctx.split_thread();
641 thread.native_mremap(&ctx, old_address, old_size, new_size, flags, new_address)?
642 };
643 let old_interval = usize::from(old_address)..(usize::from(old_address) + old_size);
644 let new_interval = usize::from(new_address)..(usize::from(new_address) + new_size);
645
646 if (flags & libc::MREMAP_MAYMOVE) != 0 && old_size == 0 {
653 let region = {
654 let (_, region) = self.regions.get(usize::from(old_address)).unwrap();
655 region.clone()
656 };
657 assert_eq!(region.sharing, Sharing::Shared);
658 assert_eq!(region.shadow_base, std::ptr::null_mut());
660 let mutations = self.regions.insert(new_interval, region);
661 self.unmap_mutations(mutations);
662 return Ok(new_address);
663 }
664
665 let mut region = {
668 let mut mutations = self.regions.clear(old_interval.clone());
669 assert_eq!(mutations.len(), 1);
670 if let Some(Mutation::Removed(removed_interval, region)) = mutations.pop() {
671 assert_eq!(removed_interval, old_interval);
672 region
673 } else {
674 panic!("Unexpected mutation {:?}", mutations[0])
675 }
676 };
677
678 {
682 let mutations = self.regions.clear(new_interval.clone());
683 self.unmap_mutations(mutations);
684 }
685
686 if !region.shadow_base.is_null() {
687 assert_eq!(region.original_path, None);
690
691 if new_interval.start != old_interval.start {
692 assert!(!new_interval.contains(&old_interval.start));
696 assert!(!old_interval.contains(&new_interval.start));
697
698 self.shm_file.alloc(&new_interval);
700
701 self.shm_file
703 .mmap_into_plugin(ctx, &new_interval, region.prot);
704
705 let new_shadow_base = self.shm_file.mmap_into_shadow(&new_interval, region.prot);
707
708 unsafe {
710 libc::memcpy(
711 new_shadow_base,
712 region.shadow_base,
713 std::cmp::min(old_size, new_size),
714 )
715 };
716
717 unsafe { linux_api::mman::munmap(region.shadow_base, old_size) }
719 .unwrap_or_else(|e| warn!("munmap: {}", e));
720
721 region.shadow_base = new_shadow_base;
723
724 self.shm_file.dealloc(&old_interval);
726 } else if new_size < old_size {
727 self.shm_file.dealloc(&(new_interval.end..old_interval.end));
729
730 assert_ne!(
733 unsafe { libc::mremap(region.shadow_base, old_size, new_size, 0) },
734 libc::MAP_FAILED
735 );
736 } else if new_size > old_size {
737 self.shm_file.alloc(&new_interval);
739
740 region.shadow_base = unsafe {
744 libc::mremap(region.shadow_base, old_size, new_size, libc::MREMAP_MAYMOVE)
745 };
746 assert_ne!(region.shadow_base, libc::MAP_FAILED);
747 }
748 }
749 let mutations = self.regions.insert(new_interval, region);
752 assert_eq!(mutations.len(), 0);
753
754 Ok(new_address)
755 }
756
757 pub fn handle_brk(
761 &mut self,
762 ctx: &ThreadContext,
763 ptr: ForeignPtr<u8>,
764 ) -> Result<ForeignPtr<u8>, Errno> {
765 let requested_brk = usize::from(ptr);
766
767 if requested_brk < self.heap.start {
771 return Ok(ForeignPtr::from(self.heap.end).cast::<u8>());
772 }
773
774 assert!(requested_brk % page_size() == 0);
776
777 if requested_brk == self.heap.end {
780 return Ok(ptr);
781 }
782
783 let opt_heap_interval_and_region = self.regions.get(self.heap.start);
784 let new_heap = self.heap.start..requested_brk;
785
786 if requested_brk > self.heap.end {
787 let shadow_base = match opt_heap_interval_and_region {
789 None => {
790 assert_eq!(self.heap.start, self.heap.end);
792 self.shm_file.alloc(&new_heap);
793 let shadow_base = self.shm_file.mmap_into_shadow(&new_heap, HEAP_PROT);
794 self.shm_file.mmap_into_plugin(ctx, &new_heap, HEAP_PROT);
795 shadow_base
796 }
797 Some((_, heap_region)) => {
798 self.shm_file.alloc(&self.heap);
800 let (ctx, thread) = ctx.split_thread();
802 thread
803 .native_mremap(
804 &ctx,
805 ForeignPtr::from(self.heap.start).cast::<u8>(),
807 self.heap.end - self.heap.start,
808 new_heap.end - new_heap.start,
809 0,
810 ForeignPtr::null(),
811 )
812 .unwrap();
813 let shadow_base = unsafe {
816 libc::mremap(
817 heap_region.shadow_base,
818 self.heap.end - self.heap.start,
819 new_heap.end - new_heap.start,
820 libc::MREMAP_MAYMOVE,
821 )
822 };
823 assert_ne!(shadow_base as i32, -1);
824 shadow_base
825 }
826 };
827 self.regions.insert(
828 new_heap.clone(),
829 Region {
830 shadow_base,
831 prot: HEAP_PROT,
832 sharing: Sharing::Private,
833 original_path: Some(MappingPath::Heap),
834 },
835 );
836 } else {
837 if new_heap.start == new_heap.end {
839 unimplemented!();
841 }
842 let (_, heap_region) = opt_heap_interval_and_region.unwrap();
844
845 let (ctx, thread) = ctx.split_thread();
847 thread
848 .native_mremap(
849 &ctx,
850 ForeignPtr::from(self.heap.start).cast::<u8>(),
851 self.heap.len(),
852 new_heap.len(),
853 0,
854 ForeignPtr::null(),
855 )
856 .unwrap();
857 let shadow_base = unsafe {
860 libc::mremap(
861 heap_region.shadow_base,
862 self.heap.len(),
863 new_heap.len(),
864 0,
865 )
866 };
867 assert_eq!(shadow_base, heap_region.shadow_base);
868 self.regions.clear(new_heap.end..self.heap.end);
869 self.shm_file.dealloc(&(new_heap.end..self.heap.end));
870 }
871 self.heap = new_heap;
872
873 Ok(ForeignPtr::from(requested_brk).cast::<u8>())
874 }
875
876 pub fn handle_mprotect(
888 &mut self,
889 ctx: &ThreadContext,
890 addr: ForeignPtr<u8>,
891 size: usize,
892 prot: ProtFlags,
893 ) -> Result<(), Errno> {
894 let (ctx, thread) = ctx.split_thread();
895 trace!("mprotect({:?}, {}, {:?})", addr, size, prot);
896 thread.native_mprotect(&ctx, addr, size, prot)?;
897
898 let mutations = self
901 .regions
902 .clear(usize::from(addr)..(usize::from(addr) + size));
903 for mutation in mutations {
904 match mutation {
905 Mutation::ModifiedBegin(interval, new_start) => {
906 let (_extant_interval, extant_region) =
908 self.regions.get_mut(new_start).unwrap();
909 let modified_interval = interval.start..new_start;
910 let mut modified_region = extant_region.clone();
911 modified_region.prot = prot;
912 if !extant_region.shadow_base.is_null() {
914 extant_region.shadow_base =
915 unsafe { extant_region.shadow_base.add(modified_interval.len()) };
916 unsafe {
917 linux_api::mman::mprotect(
918 modified_region.shadow_base,
919 modified_interval.len(),
920 prot,
921 )
922 }
923 .unwrap_or_else(|e| {
924 warn!(
925 "mprotect({:?}, {:?}, {:?}): {}",
926 modified_region.shadow_base,
927 modified_interval.len(),
928 prot,
929 e
930 );
931 });
932 }
933 assert!(
935 self.regions
936 .insert(modified_interval, modified_region)
937 .is_empty()
938 );
939 }
940 Mutation::ModifiedEnd(interval, new_end) => {
941 let (extant_interval, extant_region) =
943 self.regions.get_mut(new_end - 1).unwrap();
944 let modified_interval = new_end..interval.end;
945 let mut modified_region = extant_region.clone();
946 modified_region.prot = prot;
947 if !modified_region.shadow_base.is_null() {
948 modified_region.shadow_base =
949 unsafe { modified_region.shadow_base.add(extant_interval.len()) };
950 unsafe {
951 linux_api::mman::mprotect(
952 modified_region.shadow_base,
953 modified_interval.len(),
954 prot,
955 )
956 }
957 .unwrap_or_else(|e| warn!("mprotect: {}", e));
958 }
959 assert!(
960 self.regions
961 .insert(modified_interval, modified_region)
962 .is_empty()
963 );
964 }
965 Mutation::Split(_original, left_interval, right_interval) => {
966 let right_region = self.regions.get_mut(right_interval.start).unwrap().1;
967 let modified_interval = left_interval.end..right_interval.start;
968 let mut modified_region = right_region.clone();
969 modified_region.prot = prot;
970 if !modified_region.shadow_base.is_null() {
971 modified_region.shadow_base =
972 unsafe { modified_region.shadow_base.add(left_interval.len()) };
973 right_region.shadow_base = unsafe {
974 right_region
975 .shadow_base
976 .add(left_interval.len() + modified_interval.len())
977 };
978 unsafe {
979 linux_api::mman::mprotect(
980 modified_region.shadow_base,
981 modified_interval.len(),
982 prot,
983 )
984 }
985 .unwrap_or_else(|e| warn!("mprotect: {}", e));
986 }
987 assert!(
988 self.regions
989 .insert(modified_interval, modified_region)
990 .is_empty()
991 );
992 }
993 Mutation::Removed(modified_interval, mut modified_region) => {
994 modified_region.prot = prot;
995 if !modified_region.shadow_base.is_null() {
996 unsafe {
997 linux_api::mman::mprotect(
998 modified_region.shadow_base,
999 modified_interval.len(),
1000 prot,
1001 )
1002 }
1003 .unwrap_or_else(|e| warn!("mprotect: {}", e));
1004 }
1005 assert!(
1006 self.regions
1007 .insert(modified_interval, modified_region)
1008 .is_empty()
1009 );
1010 }
1011 }
1012 }
1013 Ok(())
1014 }
1015
1016 fn get_mapped_ptr<T: Pod + Debug>(&self, src: ForeignArrayPtr<T>) -> Option<*mut T> {
1019 assert!(!src.is_empty());
1020
1021 if usize::from(src.ptr()) % std::mem::align_of::<T>() != 0 {
1022 trace!("Can't map unaligned pointer {:?}", src);
1027 return None;
1028 }
1029
1030 let (interval, region) = match self.regions.get(usize::from(src.ptr())) {
1031 Some((i, r)) => (i, r),
1032 None => {
1033 if !src.ptr().is_null() {
1034 warn!("src {:?} isn't in any mapped region", src);
1035 }
1036 return None;
1037 }
1038 };
1039 let shadow_base = if region.shadow_base.is_null() {
1040 trace!("src {:?} isn't mapped into Shadow", src);
1041 return None;
1042 } else {
1043 region.shadow_base
1044 };
1045
1046 if !interval.contains(&(usize::from(src.slice(src.len()..src.len()).ptr()) - 1)) {
1047 trace!(
1049 "src {:?} mapped into Shadow, but extends beyond mapped region.",
1050 src
1051 );
1052 return None;
1053 }
1054
1055 let offset = usize::from(src.ptr()) - interval.start;
1056 let ptr = unsafe { shadow_base.add(offset) } as *mut T;
1058
1059 Some(ptr)
1060 }
1061
1062 fn get_mapped_ptr_and_count<T: Pod + Debug>(&self, src: ForeignArrayPtr<T>) -> Option<*mut T> {
1063 let res = self.get_mapped_ptr(src);
1064 if res.is_none() {
1065 self.inc_misses(src);
1066 }
1067 res
1068 }
1069
1070 pub unsafe fn get_ref<T: Debug + Pod>(&self, src: ForeignArrayPtr<T>) -> Option<&[T]> {
1071 if src.is_empty() {
1072 return Some(&[]);
1073 }
1074 let ptr = self.get_mapped_ptr_and_count(src)?;
1075 Some(unsafe { std::slice::from_raw_parts(notnull_debug(ptr), src.len()) })
1076 }
1077
1078 pub unsafe fn get_mut<T: Debug + Pod>(&self, src: ForeignArrayPtr<T>) -> Option<&mut [T]> {
1079 if src.is_empty() {
1080 return Some(&mut []);
1081 }
1082 let ptr = self.get_mapped_ptr_and_count(src)?;
1083 Some(unsafe { std::slice::from_raw_parts_mut(notnull_mut_debug(ptr), src.len()) })
1084 }
1085
1086 fn inc_misses<T: Debug + Pod>(&self, src: ForeignArrayPtr<T>) {
1088 let key = match self.regions.get(usize::from(src.ptr())) {
1089 Some((_, original_path)) => format!("{:?}", original_path),
1090 None => "not found".to_string(),
1091 };
1092 let mut misses = self.misses_by_path.borrow_mut();
1093 let counter = misses.entry(key).or_insert(0);
1094 *counter += 1;
1095 }
1096}
1097
1098#[cfg(test)]
1099#[test]
1100fn test_validate_void_size() {
1104 assert_eq!(std::mem::size_of::<c_void>(), 1);
1105}