shadow_rs/host/
host.rs

1//! An emulated Linux system.
2
3use std::cell::{Cell, Ref, RefCell, RefMut, UnsafeCell};
4use std::collections::BTreeMap;
5use std::ffi::{CStr, CString, OsString};
6use std::net::{Ipv4Addr, SocketAddrV4};
7use std::ops::{Deref, DerefMut};
8use std::os::unix::prelude::OsStringExt;
9use std::path::{Path, PathBuf};
10use std::sync::{Arc, Mutex};
11
12use atomic_refcell::AtomicRefCell;
13use linux_api::signal::{Signal, siginfo_t};
14use log::{debug, trace};
15use logger::LogLevel;
16use once_cell::unsync::OnceCell;
17use rand::SeedableRng;
18use rand_xoshiro::Xoshiro256PlusPlus;
19use shadow_shim_helper_rs::HostId;
20use shadow_shim_helper_rs::emulated_time::EmulatedTime;
21use shadow_shim_helper_rs::explicit_drop::ExplicitDropper;
22use shadow_shim_helper_rs::rootedcell::Root;
23use shadow_shim_helper_rs::rootedcell::cell::RootedCell;
24use shadow_shim_helper_rs::rootedcell::rc::RootedRc;
25use shadow_shim_helper_rs::rootedcell::refcell::RootedRefCell;
26use shadow_shim_helper_rs::shim_shmem::{HostShmem, HostShmemProtected, ManagerShmem};
27use shadow_shim_helper_rs::simulation_time::SimulationTime;
28use shadow_shmem::allocator::ShMemBlock;
29use shadow_tsc::Tsc;
30use vasi_sync::scmutex::SelfContainedMutexGuard;
31
32use crate::core::configuration::{ProcessFinalState, QDiscMode};
33use crate::core::sim_config::PcapConfig;
34use crate::core::work::event::{Event, EventData};
35use crate::core::work::event_queue::EventQueue;
36use crate::core::work::task::TaskRef;
37use crate::core::worker::Worker;
38use crate::cshadow;
39use crate::host::descriptor::socket::abstract_unix_ns::AbstractUnixNamespace;
40use crate::host::descriptor::socket::inet::InetSocket;
41use crate::host::futex_table::FutexTable;
42use crate::host::network::interface::{FifoPacketPriority, NetworkInterface, PcapOptions};
43use crate::host::network::namespace::NetworkNamespace;
44use crate::host::process::Process;
45use crate::host::thread::{Thread, ThreadId};
46use crate::network::PacketDevice;
47use crate::network::relay::{RateLimit, Relay};
48use crate::network::router::Router;
49use crate::utility;
50#[cfg(feature = "perf_timers")]
51use crate::utility::perf_timer::PerfTimer;
52
53pub struct HostParameters {
54    pub id: HostId,
55    pub node_seed: u64,
56    // TODO: Remove when we don't need C compatibility.
57    // Already storing as a String in HostInfo.
58    pub hostname: CString,
59    pub node_id: u32,
60    pub ip_addr: libc::in_addr_t,
61    pub sim_end_time: EmulatedTime,
62    pub requested_bw_down_bits: u64,
63    pub requested_bw_up_bits: u64,
64    pub cpu_frequency: u64,
65    pub cpu_threshold: Option<SimulationTime>,
66    pub cpu_precision: Option<SimulationTime>,
67    pub log_level: LogLevel,
68    pub pcap_config: Option<PcapConfig>,
69    pub qdisc: QDiscMode,
70    pub init_sock_recv_buf_size: u64,
71    pub autotune_recv_buf: bool,
72    pub init_sock_send_buf_size: u64,
73    pub autotune_send_buf: bool,
74    pub native_tsc_frequency: u64,
75    pub model_unblocked_syscall_latency: bool,
76    pub max_unapplied_cpu_latency: SimulationTime,
77    pub unblocked_syscall_latency: SimulationTime,
78    pub unblocked_vdso_latency: SimulationTime,
79    pub strace_logging_options: Option<FmtOptions>,
80    pub shim_log_level: LogLevel,
81    pub use_new_tcp: bool,
82    pub use_mem_mapper: bool,
83    pub use_syscall_counters: bool,
84}
85
86use super::cpu::Cpu;
87use super::process::ProcessId;
88use super::syscall::formatter::FmtOptions;
89
90/// Immutable information about the Host.
91#[derive(Debug, Clone)]
92pub struct HostInfo {
93    pub id: HostId,
94    pub name: String,
95    pub default_ip: Ipv4Addr,
96    pub log_level: Option<log::LevelFilter>,
97}
98
99/// A simulated Host.
100pub struct Host {
101    // Store immutable info in an Arc, that we can safely clone into the
102    // ShadowLogger. We can't use a RootedRc here since this needs to be cloned
103    // into the logger thread, which doesn't have access to the Host's Root.
104    //
105    // TODO: Get rid of the enclosing OnceCell and initialize at the point where
106    // the necessary data is available.
107    info: OnceCell<Arc<HostInfo>>,
108
109    // Inside the Host "object graph", we use the Host's Root for RootedRc and RootedRefCells,
110    // giving us atomic-free refcounting and checked borrowing.
111    //
112    // This makes the Host !Sync.
113    root: Root,
114
115    event_queue: Arc<Mutex<EventQueue>>,
116
117    random: RefCell<Xoshiro256PlusPlus>,
118
119    // The upstream router that will queue packets until we can receive them.
120    // This only applies to the internet interface; the localhost interface
121    // does not receive packets from a router.
122    router: RefCell<Router>,
123
124    // Forwards packets out from our internet interface to the router.
125    relay_inet_out: Arc<Relay>,
126    // Forwards packets from the router in to our internet interface.
127    relay_inet_in: Arc<Relay>,
128    // Forwards packets from the localhost interface back to itself.
129    relay_loopback: Arc<Relay>,
130
131    // map address to futex objects
132    futex_table: RefCell<FutexTable>,
133
134    #[cfg(feature = "perf_timers")]
135    execution_timer: RefCell<PerfTimer>,
136
137    pub params: HostParameters,
138
139    cpu: RefCell<Cpu>,
140
141    net_ns: NetworkNamespace,
142
143    // Store as a CString so that we can return a borrowed pointer to C code
144    // instead of having to allocate a new string.
145    //
146    // TODO: Remove `data_dir_path_cstring` once we can remove `host_getDataPath`. (Or maybe don't
147    // store it at all)
148    data_dir_path: PathBuf,
149    data_dir_path_cstring: CString,
150
151    // virtual process and event id counter
152    thread_id_counter: Cell<libc::pid_t>,
153    event_id_counter: Cell<u64>,
154    packet_id_counter: Cell<u64>,
155
156    // Enables us to sort objects deterministically based on their creation order.
157    determinism_sequence_counter: Cell<u64>,
158
159    // track the order in which the application sent us application data
160    packet_priority_counter: Cell<FifoPacketPriority>,
161
162    // Owned pointers to processes.
163    processes: RefCell<BTreeMap<ProcessId, RootedRc<RootedRefCell<Process>>>>,
164
165    tsc: Tsc,
166    // Cached lock for shim_shmem. `[Host::shmem_lock]` uses unsafe code to give it
167    // a 'static lifetime.
168    // SAFETY:
169    // * This field must not outlive `shim_shmem`. We achieve this by:
170    //   * Declaring this field before `shim_shmem` so that it's dropped before
171    //   it.
172    //   * We never expose the guard itself via non-unsafe interfaces. e.g.  our
173    //   safe interfaces don't allow access to the guard itself, nor to the
174    //   internal data with a lifetime that could outlive `self` (and thereby
175    //   `shim_shmem`).
176    shim_shmem_lock:
177        RefCell<Option<UnsafeCell<SelfContainedMutexGuard<'static, HostShmemProtected>>>>,
178    // Shared memory with the shim.
179    //
180    // SAFETY: The data inside HostShmem::protected aliases shim_shmem_lock when
181    // the latter is held.  Even when holding `&mut self` or `self`, if
182    // `shim_shmem_lock` is held we must avoid invalidating it, e.g. by
183    // `std::mem::replace`.
184    //
185    // Note though that we're already prevented from creating another reference
186    // to the data inside `HostShmem::protected` through this field, since
187    // `self.shim_shmem...protected.lock()` will fail if the lock is already
188    // held.
189    shim_shmem: UnsafeCell<ShMemBlock<'static, HostShmem>>,
190
191    in_notify_socket_has_packets: RootedCell<bool>,
192
193    /// Paths to be added to LD_PRELOAD of managed processes.
194    preload_paths: Arc<Vec<PathBuf>>,
195}
196
197/// Host must be `Send`.
198impl crate::utility::IsSend for Host {}
199
200// TODO: use derive(Debug) if/when all fields implement Debug.
201impl std::fmt::Debug for Host {
202    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
203        f.debug_struct("Host")
204            .field("info", &self.info)
205            .finish_non_exhaustive()
206    }
207}
208
209impl Host {
210    pub fn new(
211        params: HostParameters,
212        host_root_path: &Path,
213        raw_cpu_freq_khz: u64,
214        manager_shmem: &ShMemBlock<ManagerShmem>,
215        preload_paths: Arc<Vec<PathBuf>>,
216    ) -> Self {
217        #[cfg(feature = "perf_timers")]
218        let execution_timer = RefCell::new(PerfTimer::new_started());
219
220        let root = Root::new();
221        let random = RefCell::new(Xoshiro256PlusPlus::seed_from_u64(params.node_seed));
222        let cpu = RefCell::new(Cpu::new(
223            params.cpu_frequency,
224            raw_cpu_freq_khz,
225            params.cpu_threshold,
226            params.cpu_precision,
227        ));
228        let data_dir_path = Self::make_data_dir_path(&params.hostname, host_root_path);
229        let data_dir_path_cstring = utility::pathbuf_to_nul_term_cstring(data_dir_path.clone());
230
231        let host_shmem = HostShmem::new(
232            params.id,
233            params.model_unblocked_syscall_latency,
234            params.max_unapplied_cpu_latency,
235            params.unblocked_syscall_latency,
236            params.unblocked_vdso_latency,
237            nix::unistd::getpid().as_raw(),
238            params.native_tsc_frequency,
239            params.shim_log_level,
240            manager_shmem,
241        );
242        let shim_shmem = UnsafeCell::new(shadow_shmem::allocator::shmalloc(host_shmem));
243
244        // Process IDs start at 1000
245        let thread_id_counter = Cell::new(1000);
246        let event_id_counter = Cell::new(0);
247        let packet_id_counter = Cell::new(0);
248        let determinism_sequence_counter = Cell::new(0);
249        // Packet priorities start at 1. "0" is used for control packets.
250        let packet_priority_counter = Cell::new(1);
251        let tsc = Tsc::new(params.native_tsc_frequency);
252
253        std::fs::create_dir_all(&data_dir_path).unwrap();
254
255        // Register using the param hints.
256        // We already checked that the addresses are available, so fail if they are not.
257
258        let public_ip: Ipv4Addr = u32::from_be(params.ip_addr).into();
259
260        let pcap_options = params.pcap_config.as_ref().map(|x| PcapOptions {
261            path: data_dir_path.clone(),
262            capture_size_bytes: x.capture_size.try_into().unwrap(),
263        });
264
265        let net_ns = NetworkNamespace::new(public_ip, pcap_options, params.qdisc);
266
267        // Packets that are not for localhost or our public ip go to the router.
268        // Use `Ipv4Addr::UNSPECIFIED` for the router to encode this for our
269        // routing table logic inside of `Host::get_packet_device()`.
270        let router = Router::new(Ipv4Addr::UNSPECIFIED);
271        let relay_inet_out = Relay::new(
272            RateLimit::BytesPerSecond(params.requested_bw_up_bits / 8),
273            net_ns.internet.borrow().get_address(),
274        );
275        let relay_inet_in = Relay::new(
276            RateLimit::BytesPerSecond(params.requested_bw_down_bits / 8),
277            router.get_address(),
278        );
279        let relay_loopback = Relay::new(
280            RateLimit::Unlimited,
281            net_ns.localhost.borrow().get_address(),
282        );
283
284        let in_notify_socket_has_packets = RootedCell::new(&root, false);
285
286        let res = Self {
287            info: OnceCell::new(),
288            root,
289            event_queue: Arc::new(Mutex::new(EventQueue::new())),
290            params,
291            router: RefCell::new(router),
292            relay_inet_out: Arc::new(relay_inet_out),
293            relay_inet_in: Arc::new(relay_inet_in),
294            relay_loopback: Arc::new(relay_loopback),
295            futex_table: RefCell::new(FutexTable::new()),
296            random,
297            shim_shmem,
298            shim_shmem_lock: RefCell::new(None),
299            cpu,
300            net_ns,
301            data_dir_path,
302            data_dir_path_cstring,
303            thread_id_counter,
304            event_id_counter,
305            packet_id_counter,
306            packet_priority_counter,
307            determinism_sequence_counter,
308            tsc,
309            processes: RefCell::new(BTreeMap::new()),
310            #[cfg(feature = "perf_timers")]
311            execution_timer,
312            in_notify_socket_has_packets,
313            preload_paths,
314        };
315
316        res.stop_execution_timer();
317
318        debug!(
319            concat!(
320                "Setup host id '{:?}'",
321                " name '{name}'",
322                " with seed {seed},",
323                " {bw_up_kiBps} bwUpKiBps,",
324                " {bw_down_kiBps} bwDownKiBps,",
325                " {init_sock_send_buf_size} initSockSendBufSize,",
326                " {init_sock_recv_buf_size} initSockRecvBufSize, ",
327                " {cpu_frequency:?} cpuFrequency, ",
328                " {cpu_threshold:?} cpuThreshold, ",
329                " {cpu_precision:?} cpuPrecision"
330            ),
331            res.id(),
332            name = res.info().name,
333            seed = res.params.node_seed,
334            bw_up_kiBps = res.bw_up_kiBps(),
335            bw_down_kiBps = res.bw_down_kiBps(),
336            init_sock_send_buf_size = res.params.init_sock_send_buf_size,
337            init_sock_recv_buf_size = res.params.init_sock_recv_buf_size,
338            cpu_frequency = res.params.cpu_frequency,
339            cpu_threshold = res.params.cpu_threshold,
340            cpu_precision = res.params.cpu_precision,
341        );
342
343        res
344    }
345
346    pub fn root(&self) -> &Root {
347        &self.root
348    }
349
350    fn make_data_dir_path(hostname: &CStr, host_root_path: &Path) -> PathBuf {
351        let hostname: OsString = { OsString::from_vec(hostname.to_bytes().to_vec()) };
352
353        let mut data_dir_path = PathBuf::new();
354        data_dir_path.push(host_root_path);
355        data_dir_path.push(&hostname);
356        data_dir_path
357    }
358
359    pub fn data_dir_path(&self) -> &Path {
360        &self.data_dir_path
361    }
362
363    pub fn add_application(
364        &self,
365        start_time: SimulationTime,
366        shutdown_time: Option<SimulationTime>,
367        shutdown_signal: nix::sys::signal::Signal,
368        plugin_name: CString,
369        plugin_path: CString,
370        argv: Vec<CString>,
371        envv: Vec<CString>,
372        pause_for_debugging: bool,
373        expected_final_state: ProcessFinalState,
374    ) {
375        debug_assert!(shutdown_time.is_none() || shutdown_time.unwrap() > start_time);
376
377        // Schedule spawning the process.
378        let task = TaskRef::new(move |host| {
379            // We can't move out of these captured variables, since TaskRef takes
380            // a Fn, not a FnOnce.
381            // TODO: Add support for FnOnce?
382            let envv = envv.clone();
383            let argv = argv.clone();
384
385            let process = Process::spawn(
386                host,
387                plugin_name.clone(),
388                &plugin_path,
389                argv,
390                envv,
391                pause_for_debugging,
392                host.params.strace_logging_options,
393                expected_final_state,
394            )
395            .unwrap_or_else(|e| panic!("Failed to initialize application {plugin_name:?}: {e:?}"));
396            let (process_id, thread_id) = {
397                let process = process.borrow(host.root());
398                (process.id(), process.thread_group_leader_id())
399            };
400            host.processes.borrow_mut().insert(process_id, process);
401
402            if let Some(shutdown_time) = shutdown_time {
403                let task = TaskRef::new(move |host| {
404                    let Some(process) = host.process_borrow(process_id) else {
405                        debug!(
406                            "Can't send shutdown signal to process {process_id}; it no longer exists"
407                        );
408                        return;
409                    };
410                    let process = process.borrow(host.root());
411                    let siginfo_t = siginfo_t::new_for_kill(
412                        Signal::try_from(shutdown_signal as i32).unwrap(),
413                        1,
414                        0,
415                    );
416                    process.signal(host, None, &siginfo_t);
417                });
418                host.schedule_task_at_emulated_time(
419                    task,
420                    EmulatedTime::SIMULATION_START + shutdown_time,
421                );
422            }
423
424            host.resume(process_id, thread_id);
425        });
426        self.schedule_task_at_emulated_time(task, EmulatedTime::SIMULATION_START + start_time);
427    }
428
429    pub fn add_and_schedule_forked_process(
430        &self,
431        host: &Host,
432        process: RootedRc<RootedRefCell<Process>>,
433    ) {
434        let (process_id, thread_id) = {
435            let process = process.borrow(&self.root);
436            (process.id(), process.thread_group_leader_id())
437        };
438        host.processes.borrow_mut().insert(process_id, process);
439        // Schedule process to run.
440        let task = TaskRef::new(move |host| {
441            host.resume(process_id, thread_id);
442        });
443        self.schedule_task_with_delay(task, SimulationTime::ZERO);
444    }
445
446    pub fn resume(&self, pid: ProcessId, tid: ThreadId) {
447        let Some(processrc) = self
448            .process_borrow(pid)
449            .map(|p| RootedRc::clone(&p, &self.root))
450        else {
451            trace!("{pid:?} doesn't exist");
452            return;
453        };
454        let processrc = ExplicitDropper::new(processrc, |p| {
455            p.explicit_drop_recursive(&self.root, self);
456        });
457        let died;
458        let is_orphan;
459        {
460            Worker::set_active_process(&processrc);
461            let process = processrc.borrow(self.root());
462            process.resume(self, tid);
463            Worker::clear_active_process();
464            let zombie_state = process.borrow_as_zombie();
465            if let Some(zombie) = zombie_state {
466                died = true;
467                is_orphan = zombie.reaper(self).is_none();
468            } else {
469                died = false;
470                is_orphan = false;
471            }
472        };
473
474        if !died {
475            return;
476        }
477
478        // Reparent children, and collect IDs of children that are dead.
479        let mut orphaned_zombie_pids: Vec<ProcessId> = self
480            .processes
481            .borrow()
482            .iter()
483            .filter_map(|(other_pid, processrc)| {
484                let process = processrc.borrow(&self.root);
485                if process.parent_id() != pid {
486                    // Not a child of the current process
487                    return None;
488                }
489                process.set_parent_id(ProcessId::INIT);
490                let Some(z) = process.borrow_as_zombie() else {
491                    // Not a zombie
492                    return None;
493                };
494                if z.reaper(self).is_some() {
495                    // Not an orphan
496                    None
497                } else {
498                    // Is a zombie orphan child
499                    Some(*other_pid)
500                }
501            })
502            .collect();
503
504        // Process we ran is a zombie; is it also an orphan?
505        debug_assert!(died);
506        if is_orphan {
507            orphaned_zombie_pids.push(pid);
508        }
509
510        // Free orphaned zombies.
511        let mut processes = self.processes.borrow_mut();
512        for pid in orphaned_zombie_pids {
513            trace!("Dropping orphan zombie process {pid:?}");
514            let processrc = processes.remove(&pid).unwrap();
515            RootedRc::explicit_drop_recursive(processrc, &self.root, self);
516        }
517    }
518
519    #[track_caller]
520    pub fn process_borrow(
521        &self,
522        id: ProcessId,
523    ) -> Option<impl Deref<Target = RootedRc<RootedRefCell<Process>>> + '_> {
524        Ref::filter_map(self.processes.borrow(), |processes| processes.get(&id)).ok()
525    }
526
527    /// Remove the given process from the Host, if it exists.
528    #[track_caller]
529    pub fn process_remove(&self, id: ProcessId) -> Option<RootedRc<RootedRefCell<Process>>> {
530        self.processes.borrow_mut().remove(&id)
531    }
532
533    /// Borrow the set of processes. Generally this should only be used to
534    /// iterate over the set of processes. e.g. fetching a specific process
535    /// should be done via via `process_borrow`.
536    // TODO: It would be preferable to return an iterator instead of the
537    // collection itself. There has to be an intermediate object though since we
538    // need both the borrowed map of processes, and an iterator that borrows
539    // from that. I suppose we could create an abstract "Iterator factory" and
540    // return that here instead of exposing BTreeMap type.
541    #[track_caller]
542    pub fn processes_borrow(
543        &self,
544    ) -> impl Deref<Target = BTreeMap<ProcessId, RootedRc<RootedRefCell<Process>>>> + '_ {
545        self.processes.borrow()
546    }
547
548    pub fn cpu_borrow(&self) -> impl Deref<Target = Cpu> + '_ {
549        self.cpu.borrow()
550    }
551
552    pub fn cpu_borrow_mut(&self) -> impl DerefMut<Target = Cpu> + '_ {
553        self.cpu.borrow_mut()
554    }
555
556    /// Information about the Host. Made available as an Arc for cheap cloning
557    /// into, e.g. Worker and ShadowLogger. When there's no need to clone the
558    /// Arc, generally prefer the top-level `Host` methods for accessing this
559    /// information, which are likely to be more stable.
560    pub fn info(&self) -> &Arc<HostInfo> {
561        self.info.get_or_init(|| {
562            Arc::new(HostInfo {
563                id: self.id(),
564                name: self.params.hostname.to_str().unwrap().to_owned(),
565                default_ip: self.default_ip(),
566                log_level: self.log_level(),
567            })
568        })
569    }
570
571    pub fn id(&self) -> HostId {
572        self.params.id
573    }
574
575    pub fn name(&self) -> &str {
576        &self.info().name
577    }
578
579    pub fn default_ip(&self) -> Ipv4Addr {
580        self.net_ns.default_ip
581    }
582
583    pub fn abstract_unix_namespace(
584        &self,
585    ) -> impl Deref<Target = Arc<AtomicRefCell<AbstractUnixNamespace>>> + '_ {
586        &self.net_ns.unix
587    }
588
589    pub fn log_level(&self) -> Option<log::LevelFilter> {
590        let level = self.params.log_level;
591        log_c2rust::c_to_rust_log_level(level).map(|l| l.to_level_filter())
592    }
593
594    #[track_caller]
595    pub fn upstream_router_borrow_mut(&self) -> impl DerefMut<Target = Router> + '_ {
596        self.router.borrow_mut()
597    }
598
599    #[track_caller]
600    pub fn network_namespace_borrow(&self) -> impl Deref<Target = NetworkNamespace> + '_ {
601        &self.net_ns
602    }
603
604    #[track_caller]
605    pub fn futextable_borrow(&self) -> impl Deref<Target = FutexTable> + '_ {
606        self.futex_table.borrow()
607    }
608
609    #[track_caller]
610    pub fn futextable_borrow_mut(&self) -> impl DerefMut<Target = FutexTable> + '_ {
611        self.futex_table.borrow_mut()
612    }
613
614    #[allow(non_snake_case)]
615    pub fn bw_up_kiBps(&self) -> u64 {
616        self.params.requested_bw_up_bits / (8 * 1024)
617    }
618
619    #[allow(non_snake_case)]
620    pub fn bw_down_kiBps(&self) -> u64 {
621        self.params.requested_bw_down_bits / (8 * 1024)
622    }
623
624    /// Returns `None` if there is no such interface.
625    ///
626    /// Panics if we have shut down.
627    pub fn interface_borrow_mut(
628        &self,
629        addr: Ipv4Addr,
630    ) -> Option<impl DerefMut<Target = NetworkInterface> + '_> {
631        self.net_ns.interface_borrow_mut(addr)
632    }
633
634    /// Returns `None` if there is no such interface.
635    ///
636    /// Panics if we have shut down.
637    pub fn interface_borrow(
638        &self,
639        addr: Ipv4Addr,
640    ) -> Option<impl Deref<Target = NetworkInterface> + '_> {
641        self.net_ns.interface_borrow(addr)
642    }
643
644    #[track_caller]
645    pub fn random_mut(&self) -> impl DerefMut<Target = Xoshiro256PlusPlus> + '_ {
646        self.random.borrow_mut()
647    }
648
649    pub fn get_new_event_id(&self) -> u64 {
650        let res = self.event_id_counter.get();
651        self.event_id_counter.set(res + 1);
652        res
653    }
654
655    pub fn get_new_thread_id(&self) -> ThreadId {
656        let res = self.thread_id_counter.get();
657        self.thread_id_counter.set(res + 1);
658        res.try_into().unwrap()
659    }
660
661    pub fn get_new_packet_id(&self) -> u64 {
662        let res = self.packet_id_counter.get();
663        self.packet_id_counter.set(res + 1);
664        res
665    }
666
667    pub fn get_next_deterministic_sequence_value(&self) -> u64 {
668        let res = self.determinism_sequence_counter.get();
669        self.determinism_sequence_counter.set(res + 1);
670        res
671    }
672
673    pub fn get_next_packet_priority(&self) -> FifoPacketPriority {
674        let res = self.packet_priority_counter.get();
675        self.packet_priority_counter
676            .set(res.checked_add(1).unwrap());
677        res
678    }
679
680    pub fn continue_execution_timer(&self) {
681        #[cfg(feature = "perf_timers")]
682        self.execution_timer.borrow_mut().start();
683    }
684
685    pub fn stop_execution_timer(&self) {
686        #[cfg(feature = "perf_timers")]
687        self.execution_timer.borrow_mut().stop();
688    }
689
690    pub fn schedule_task_at_emulated_time(&self, task: TaskRef, t: EmulatedTime) -> bool {
691        let event = Event::new_local(task, t, self);
692        self.push_local_event(event)
693    }
694
695    pub fn schedule_task_with_delay(&self, task: TaskRef, t: SimulationTime) -> bool {
696        self.schedule_task_at_emulated_time(task, Worker::current_time().unwrap() + t)
697    }
698
699    pub fn event_queue(&self) -> &Arc<Mutex<EventQueue>> {
700        &self.event_queue
701    }
702
703    pub fn push_local_event(&self, event: Event) -> bool {
704        if event.time() >= self.params.sim_end_time {
705            return false;
706        }
707        self.event_queue.lock().unwrap().push(event);
708        true
709    }
710
711    /// Shut down the host. This should be called while `Worker` has the active host set.
712    pub fn shutdown(&self) {
713        self.continue_execution_timer();
714
715        debug!("shutting down host {}", self.name());
716
717        // the network namespace object needs to be cleaned up before it's dropped
718        self.net_ns.cleanup();
719
720        assert!(self.processes.borrow().is_empty());
721
722        self.stop_execution_timer();
723        #[cfg(feature = "perf_timers")]
724        debug!(
725            "host '{}' has been shut down, total execution time was {:?}",
726            self.name(),
727            self.execution_timer.borrow().elapsed()
728        );
729    }
730
731    pub fn free_all_applications(&self) {
732        trace!("start freeing applications for host '{}'", self.name());
733        let processes = std::mem::take(&mut *self.processes.borrow_mut());
734        for (_id, processrc) in processes.into_iter() {
735            let processrc = ExplicitDropper::new(processrc, |p| {
736                p.explicit_drop_recursive(self.root(), self);
737            });
738            Worker::set_active_process(&processrc);
739            let process = processrc.borrow(self.root());
740            process.stop(self);
741            Worker::clear_active_process();
742            // Reparent to Shadow/INIT, since the original parent is or is
743            // about to be dead.
744            process.set_parent_id(ProcessId::INIT);
745        }
746        trace!("done freeing application for host '{}'", self.name());
747    }
748
749    pub fn execute(&self, until: EmulatedTime) {
750        loop {
751            let mut event = {
752                let mut event_queue = self.event_queue.lock().unwrap();
753                match event_queue.next_event_time() {
754                    Some(t) if t < until => {}
755                    _ => break,
756                };
757                event_queue.pop().unwrap()
758            };
759
760            {
761                let mut cpu = self.cpu.borrow_mut();
762                cpu.update_time(event.time());
763                let cpu_delay = cpu.delay();
764                if cpu_delay > SimulationTime::ZERO {
765                    trace!(
766                        "event blocked on CPU, rescheduled for {:?} from now",
767                        cpu_delay
768                    );
769
770                    // reschedule the event after the CPU delay time
771                    event.set_time(event.time() + cpu_delay);
772                    self.push_local_event(event);
773
774                    // want to continue pushing back events until we reach the delay time
775                    continue;
776                }
777            }
778
779            // run the event
780            Worker::set_current_time(event.time());
781            self.continue_execution_timer();
782            match event.data() {
783                EventData::Packet(data) => {
784                    self.upstream_router_borrow_mut()
785                        .route_incoming_packet(data.into());
786                    self.notify_router_has_packets();
787                }
788                EventData::Local(data) => TaskRef::from(data).execute(self),
789            }
790            self.stop_execution_timer();
791            Worker::clear_current_time();
792        }
793    }
794
795    pub fn next_event_time(&self) -> Option<EmulatedTime> {
796        self.event_queue.lock().unwrap().next_event_time()
797    }
798
799    /// The unprotected part of the Host's shared memory.
800    ///
801    /// Do not try to take the lock of [`HostShmem::protected`] directly.
802    /// Instead use [`Host::lock_shmem`], [`Host::shim_shmem_lock_borrow`], and
803    /// [`Host::shim_shmem_lock_borrow_mut`].
804    pub fn shim_shmem(&self) -> &ShMemBlock<'static, HostShmem> {
805        unsafe { &*self.shim_shmem.get() }
806    }
807
808    /// Returns the specified thread if it exists. If you already have the thread's process,
809    /// [`Process::thread_borrow`] may be more efficient.
810    pub fn thread_cloned_rc(
811        &self,
812        virtual_tid: ThreadId,
813    ) -> Option<RootedRc<RootedRefCell<Thread>>> {
814        for process in self.processes.borrow().values() {
815            let process = process.borrow(self.root());
816            if let Some(thread) = process.thread_borrow(virtual_tid) {
817                return Some(RootedRc::clone(&*thread, self.root()));
818            };
819        }
820
821        None
822    }
823
824    /// Returns `true` if the host has a process that contains the specified thread.
825    pub fn has_thread(&self, virtual_tid: ThreadId) -> bool {
826        for process in self.processes.borrow().values() {
827            let process = process.borrow(self.root());
828            if process.thread_borrow(virtual_tid).is_some() {
829                return true;
830            }
831        }
832
833        false
834    }
835
836    /// Locks the Host's shared memory, caching the lock internally.
837    ///
838    /// Dropping the Host before calling [`Host::unlock_shmem`] will panic.
839    ///
840    /// TODO: Consider removing this API once we don't need to cache the lock for the C API.
841    pub fn lock_shmem(&self) {
842        // We're extending this lifetime to extend the lifetime of `lock`, below, without
843        // having to `transmute` the type itself.
844        //
845        // SAFETY:
846        // * We ensure that `self.shim_shmem_lock` doesn't outlive `self.shim_shmem`.
847        //   See SAFETY requirements on Self::shim_shmem_lock itself.
848        // * We never mutate `self.shim_shmem` nor borrow the internals of
849        //   `self.shim_shmem.protected` while the lock is held, since that would
850        //   conflict with the cached guard's mutable reference.
851        // * `ShMemBlock` guarantees that its data doesn't move even if the block does.
852        //    So moving `shim_shmem` (e.g. by moving `self`) doesn't invalidate the lock.
853        let shim_shmem: &'static ShMemBlock<HostShmem> =
854            unsafe { self.shim_shmem.get().as_ref().unwrap() };
855        let lock = shim_shmem.protected().lock();
856        let prev = self
857            .shim_shmem_lock
858            .borrow_mut()
859            .replace(UnsafeCell::new(lock));
860        assert!(prev.is_none());
861    }
862
863    /// Panics if there is still an outstanding reference returned by
864    /// `shim_shmem_lock_borrow` or `shim_shmem_lock_borrow_mut`.
865    pub fn unlock_shmem(&self) {
866        let prev = self.shim_shmem_lock.borrow_mut().take();
867        assert!(prev.is_some());
868    }
869
870    pub fn shim_shmem_lock_borrow(&self) -> Option<impl Deref<Target = HostShmemProtected> + '_> {
871        Ref::filter_map(self.shim_shmem_lock.borrow(), |l| {
872            l.as_ref().map(|l| {
873                // SAFETY: Returned object holds a checked borrow of the lock;
874                // trying to release the lock before the returned object is
875                // dropped will result in a panic.
876                let guard = unsafe { &*l.get() };
877                guard.deref()
878            })
879        })
880        .ok()
881    }
882
883    pub fn shim_shmem_lock_borrow_mut(
884        &self,
885    ) -> Option<impl DerefMut<Target = HostShmemProtected> + '_> {
886        RefMut::filter_map(self.shim_shmem_lock.borrow_mut(), |l| {
887            l.as_ref().map(|l| {
888                // SAFETY: Returned object holds a checked borrow of the lock;
889                // trying to release the lock before the returned object is
890                // dropped will result in a panic.
891                let guard = unsafe { &mut *l.get() };
892                guard.deref_mut()
893            })
894        })
895        .ok()
896    }
897
898    /// Timestamp Counter emulation for this Host. It ticks at the same rate as
899    /// the native Timestamp Counter, if we were able to find it.
900    pub fn tsc(&self) -> &Tsc {
901        &self.tsc
902    }
903
904    /// Get the packet device that handles packets for the given address. This
905    /// could be the source device from which we forward packets, or the device
906    /// that will receive and process packets with a given destination address.
907    /// In the latter case, if the packet destination is not on this host, we
908    /// return the router to route it to the correct host.
909    pub fn get_packet_device(&self, address: Ipv4Addr) -> Ref<dyn PacketDevice> {
910        if address == Ipv4Addr::LOCALHOST {
911            self.net_ns.localhost.borrow()
912        } else if address == self.default_ip() {
913            self.net_ns.internet.borrow()
914        } else {
915            self.router.borrow()
916        }
917    }
918
919    /// Call to trigger the forwarding of packets from the router to the network
920    /// interface.
921    pub fn notify_router_has_packets(&self) {
922        self.relay_inet_in.notify(self);
923    }
924
925    /// Call to trigger the forwarding of packets from the network interface to
926    /// the next hop (either back to the network interface for loopback, or up to
927    /// the router for internet-bound packets).
928    ///
929    /// WARNING: This is not reentrant. Do not allow this to be called recursively. Nothing in
930    /// `add_data_source()` or `notify()` can call back into this method. This includes any socket
931    /// code called in any indirect way from here.
932    pub fn notify_socket_has_packets(&self, addr: Ipv4Addr, socket: &InetSocket) {
933        if self.in_notify_socket_has_packets.replace(&self.root, true) {
934            panic!("Recursively calling host.notify_socket_has_packets()");
935        }
936
937        if let Some(iface) = self.interface_borrow(addr) {
938            iface.add_data_source(socket);
939            match addr {
940                Ipv4Addr::LOCALHOST => self.relay_loopback.notify(self),
941                _ => self.relay_inet_out.notify(self),
942            };
943        }
944
945        self.in_notify_socket_has_packets.set(&self.root, false);
946    }
947
948    /// Returns the Session ID for the given process group ID, if it exists.
949    pub fn process_session_id_of_group_id(&self, group_id: ProcessId) -> Option<ProcessId> {
950        let processes = self.processes.borrow();
951        for processrc in processes.values() {
952            let process = processrc.borrow(&self.root);
953            if process.group_id() == group_id {
954                return Some(process.session_id());
955            }
956        }
957        None
958    }
959
960    /// Paths of libraries that should be preloaded into managed processes.
961    pub fn preload_paths(&self) -> &[PathBuf] {
962        &self.preload_paths
963    }
964}
965
966impl Drop for Host {
967    fn drop(&mut self) {
968        // Validate that the shmem lock isn't held, which would potentially
969        // violate the SAFETY argument in `lock_shmem`. (AFAIK Rust makes no formal
970        // guarantee about the order in which fields are dropped)
971        assert!(self.shim_shmem_lock.borrow().is_none());
972    }
973}
974
975mod export {
976    use std::{os::raw::c_char, time::Duration};
977
978    use libc::{in_addr_t, in_port_t};
979    use rand::{Rng, RngCore};
980    use shadow_shim_helper_rs::shim_shmem;
981
982    use super::*;
983    use crate::cshadow::{CEmulatedTime, CSimulationTime};
984    use crate::network::packet::IanaProtocol;
985
986    #[unsafe(no_mangle)]
987    pub unsafe extern "C-unwind" fn host_execute(hostrc: *const Host, until: CEmulatedTime) {
988        let hostrc = unsafe { hostrc.as_ref().unwrap() };
989        let until = EmulatedTime::from_c_emutime(until).unwrap();
990        hostrc.execute(until)
991    }
992
993    #[unsafe(no_mangle)]
994    pub unsafe extern "C-unwind" fn host_nextEventTime(hostrc: *const Host) -> CEmulatedTime {
995        let hostrc = unsafe { hostrc.as_ref().unwrap() };
996        EmulatedTime::to_c_emutime(hostrc.next_event_time())
997    }
998
999    #[unsafe(no_mangle)]
1000    pub unsafe extern "C-unwind" fn host_getNewPacketID(hostrc: *const Host) -> u64 {
1001        let hostrc = unsafe { hostrc.as_ref().unwrap() };
1002        hostrc.get_new_packet_id()
1003    }
1004
1005    #[unsafe(no_mangle)]
1006    pub unsafe extern "C-unwind" fn host_freeAllApplications(hostrc: *const Host) {
1007        let hostrc = unsafe { hostrc.as_ref().unwrap() };
1008        hostrc.free_all_applications()
1009    }
1010
1011    #[unsafe(no_mangle)]
1012    pub unsafe extern "C-unwind" fn host_getID(hostrc: *const Host) -> HostId {
1013        let hostrc = unsafe { hostrc.as_ref().unwrap() };
1014        hostrc.id()
1015    }
1016
1017    /// SAFETY: The returned pointer belongs to Host, and is invalidated when
1018    /// `host` is moved or freed.
1019    #[unsafe(no_mangle)]
1020    pub unsafe extern "C-unwind" fn host_getTsc(host: *const Host) -> *const Tsc {
1021        let hostrc = unsafe { host.as_ref().unwrap() };
1022        hostrc.tsc()
1023    }
1024
1025    #[unsafe(no_mangle)]
1026    pub unsafe extern "C-unwind" fn host_getName(hostrc: *const Host) -> *const c_char {
1027        let hostrc = unsafe { hostrc.as_ref().unwrap() };
1028        hostrc.params.hostname.as_ptr()
1029    }
1030
1031    #[unsafe(no_mangle)]
1032    pub unsafe extern "C-unwind" fn host_getDefaultIP(hostrc: *const Host) -> in_addr_t {
1033        let hostrc = unsafe { hostrc.as_ref().unwrap() };
1034        let ip = hostrc.default_ip();
1035        u32::from(ip).to_be()
1036    }
1037
1038    #[unsafe(no_mangle)]
1039    pub unsafe extern "C-unwind" fn host_getNextPacketPriority(
1040        hostrc: *const Host,
1041    ) -> FifoPacketPriority {
1042        let hostrc = unsafe { hostrc.as_ref().unwrap() };
1043        hostrc.get_next_packet_priority()
1044    }
1045
1046    #[unsafe(no_mangle)]
1047    pub unsafe extern "C-unwind" fn host_autotuneReceiveBuffer(hostrc: *const Host) -> bool {
1048        let hostrc = unsafe { hostrc.as_ref().unwrap() };
1049        hostrc.params.autotune_recv_buf
1050    }
1051
1052    #[unsafe(no_mangle)]
1053    pub unsafe extern "C-unwind" fn host_autotuneSendBuffer(hostrc: *const Host) -> bool {
1054        let hostrc = unsafe { hostrc.as_ref().unwrap() };
1055        hostrc.params.autotune_send_buf
1056    }
1057
1058    #[unsafe(no_mangle)]
1059    pub unsafe extern "C-unwind" fn host_getConfiguredRecvBufSize(hostrc: *const Host) -> u64 {
1060        let hostrc = unsafe { hostrc.as_ref().unwrap() };
1061        hostrc.params.init_sock_recv_buf_size
1062    }
1063
1064    #[unsafe(no_mangle)]
1065    pub unsafe extern "C-unwind" fn host_getConfiguredSendBufSize(hostrc: *const Host) -> u64 {
1066        let hostrc = unsafe { hostrc.as_ref().unwrap() };
1067        hostrc.params.init_sock_send_buf_size
1068    }
1069
1070    #[unsafe(no_mangle)]
1071    pub unsafe extern "C-unwind" fn host_getUpstreamRouter(hostrc: *const Host) -> *mut Router {
1072        let hostrc = unsafe { hostrc.as_ref().unwrap() };
1073        &mut *hostrc.upstream_router_borrow_mut()
1074    }
1075
1076    #[unsafe(no_mangle)]
1077    pub unsafe extern "C-unwind" fn host_get_bw_down_kiBps(hostrc: *const Host) -> u64 {
1078        let hostrc = unsafe { hostrc.as_ref().unwrap() };
1079        hostrc.bw_down_kiBps()
1080    }
1081
1082    #[unsafe(no_mangle)]
1083    pub unsafe extern "C-unwind" fn host_get_bw_up_kiBps(hostrc: *const Host) -> u64 {
1084        let hostrc = unsafe { hostrc.as_ref().unwrap() };
1085        hostrc.bw_up_kiBps()
1086    }
1087
1088    /// SAFETY: The returned pointer is owned by the Host, and will be invalidated when
1089    /// the Host is destroyed, and possibly when it is otherwise moved or mutated.
1090    #[unsafe(no_mangle)]
1091    pub unsafe extern "C-unwind" fn host_getDataPath(hostrc: *const Host) -> *const c_char {
1092        let hostrc = unsafe { hostrc.as_ref().unwrap() };
1093        hostrc.data_dir_path_cstring.as_ptr()
1094    }
1095
1096    #[unsafe(no_mangle)]
1097    pub unsafe extern "C-unwind" fn host_disassociateInterface(
1098        hostrc: *const Host,
1099        c_protocol: cshadow::ProtocolType,
1100        bind_ip: in_addr_t,
1101        bind_port: in_port_t,
1102        peer_ip: in_addr_t,
1103        peer_port: in_port_t,
1104    ) {
1105        let hostrc = unsafe { hostrc.as_ref().unwrap() };
1106
1107        let bind_ip = Ipv4Addr::from(u32::from_be(bind_ip));
1108        let peer_ip = Ipv4Addr::from(u32::from_be(peer_ip));
1109        let bind_port = u16::from_be(bind_port);
1110        let peer_port = u16::from_be(peer_port);
1111
1112        let bind_addr = SocketAddrV4::new(bind_ip, bind_port);
1113        let peer_addr = SocketAddrV4::new(peer_ip, peer_port);
1114
1115        let protocol = IanaProtocol::from(c_protocol);
1116
1117        // associate the interfaces corresponding to bind_addr with socket
1118        hostrc
1119            .net_ns
1120            .disassociate_interface(protocol, bind_addr, peer_addr);
1121    }
1122
1123    #[unsafe(no_mangle)]
1124    pub unsafe extern "C-unwind" fn host_getRandomFreePort(
1125        hostrc: *const Host,
1126        c_protocol: cshadow::ProtocolType,
1127        interface_ip: in_addr_t,
1128        peer_ip: in_addr_t,
1129        peer_port: in_port_t,
1130    ) -> in_port_t {
1131        let hostrc = unsafe { hostrc.as_ref().unwrap() };
1132
1133        let interface_ip = Ipv4Addr::from(u32::from_be(interface_ip));
1134        let peer_addr = SocketAddrV4::new(
1135            Ipv4Addr::from(u32::from_be(peer_ip)),
1136            u16::from_be(peer_port),
1137        );
1138
1139        let protocol = IanaProtocol::from(c_protocol);
1140
1141        hostrc
1142            .net_ns
1143            .get_random_free_port(
1144                protocol,
1145                interface_ip,
1146                peer_addr,
1147                hostrc.random.borrow_mut().deref_mut(),
1148            )
1149            .unwrap_or(0)
1150            .to_be()
1151    }
1152
1153    /// Returns a pointer to the Host's FutexTable.
1154    ///
1155    /// SAFETY: The returned pointer belongs to and is synchronized by the Host,
1156    /// and is invalidated when the Host is no longer accessible to the current
1157    /// thread, or something else accesses its FutexTable.
1158    #[unsafe(no_mangle)]
1159    pub unsafe extern "C-unwind" fn host_getFutexTable(hostrc: *const Host) -> *mut FutexTable {
1160        let hostrc = unsafe { hostrc.as_ref().unwrap() };
1161        &mut *hostrc.futextable_borrow_mut()
1162    }
1163
1164    /// Returns the specified process, or NULL if it doesn't exist.
1165    #[unsafe(no_mangle)]
1166    pub unsafe extern "C-unwind" fn host_getProcess(
1167        host: *const Host,
1168        virtual_pid: libc::pid_t,
1169    ) -> *const Process {
1170        let host = unsafe { host.as_ref().unwrap() };
1171        let virtual_pid = ProcessId::try_from(virtual_pid).unwrap();
1172        host.process_borrow(virtual_pid)
1173            .map(|x| std::ptr::from_ref(&*x.borrow(host.root())))
1174            .unwrap_or(std::ptr::null_mut())
1175    }
1176
1177    /// Returns the specified thread, or NULL if it doesn't exist.
1178    /// If you already have the thread's Process*, `process_getThread` may be more
1179    /// efficient.
1180    ///
1181    /// # Safety
1182    ///
1183    /// The pointer should not be accessed from threads other than the calling thread,
1184    /// or after `host` is no longer active on the current thread.
1185    #[unsafe(no_mangle)]
1186    pub unsafe extern "C-unwind" fn host_getThread(
1187        host: *const Host,
1188        virtual_tid: libc::pid_t,
1189    ) -> *const Thread {
1190        let host = unsafe { host.as_ref().unwrap() };
1191        let tid = ThreadId::try_from(virtual_tid).unwrap();
1192        for process in host.processes.borrow().values() {
1193            let process = process.borrow(host.root());
1194            if let Some(thread) = process.thread_borrow(tid) {
1195                // We're returning a pointer to the Thread itself after having
1196                // dropped the borrow. In addition to the requirements noted for the calling code,
1197                // this could cause soundness issues if we were to ever take mutable borrows of
1198                // the RootedRefCell, since it'd be difficult to ensure we didn't have any simultaneous
1199                // additional references from dereferencing a C pointer.
1200                //
1201                // TODO: Add a variant of RootedRefCell that doesn't allow
1202                // mutable borrows, use it for Thread, and name that type
1203                // explicitly here to ensure a compilation error if the type is
1204                // changed again to one that would allow mutable references.
1205                let thread = thread.borrow(host.root());
1206                return std::ptr::from_ref(&*thread);
1207            };
1208        }
1209        std::ptr::null_mut()
1210    }
1211
1212    /// Returns the lock, or panics if the lock isn't held by Shadow.
1213    ///
1214    /// Generally the lock can and should be held when Shadow is running, and *not*
1215    /// held when any of the host's managed threads are running (leaving it available
1216    /// to be taken by the shim). While this can be a little fragile to ensure
1217    /// properly, debug builds detect if we get it wrong (e.g. we try accessing
1218    /// protected data without holding the lock, or the shim tries to take the lock
1219    /// but can't).
1220    ///
1221    /// SAFETY: The returned pointer is invalidated when the memory is unlocked, e.g.
1222    /// via `host_unlockShimShmemLock`.
1223    #[unsafe(no_mangle)]
1224    pub unsafe extern "C-unwind" fn host_getShimShmemLock(
1225        hostrc: *const Host,
1226    ) -> *mut shim_shmem::export::ShimShmemHostLock {
1227        let hostrc = unsafe { hostrc.as_ref().unwrap() };
1228        let mut opt_lock = hostrc.shim_shmem_lock.borrow_mut();
1229        let lock = opt_lock.as_mut().unwrap();
1230        // SAFETY: The caller is responsible for not accessing the returned pointer
1231        // after the lock has been released.
1232        unsafe { lock.get().as_mut().unwrap().deref_mut() }
1233    }
1234
1235    /// Take the host's shared memory lock. See `host_getShimShmemLock`.
1236    #[unsafe(no_mangle)]
1237    pub unsafe extern "C-unwind" fn host_lockShimShmemLock(hostrc: *const Host) {
1238        let hostrc = unsafe { hostrc.as_ref().unwrap() };
1239        hostrc.lock_shmem()
1240    }
1241
1242    /// Release the host's shared memory lock. See `host_getShimShmemLock`.
1243    #[unsafe(no_mangle)]
1244    pub unsafe extern "C-unwind" fn host_unlockShimShmemLock(hostrc: *const Host) {
1245        let hostrc = unsafe { hostrc.as_ref().unwrap() };
1246        hostrc.unlock_shmem()
1247    }
1248
1249    /// Returns the next value and increments our monotonically increasing
1250    /// determinism sequence counter. The resulting values can be sorted to
1251    /// established a deterministic ordering, which can be useful when iterating
1252    /// items that are otherwise inconsistently ordered (e.g. hash table iterators).
1253    #[unsafe(no_mangle)]
1254    pub unsafe extern "C-unwind" fn host_getNextDeterministicSequenceValue(
1255        hostrc: *const Host,
1256    ) -> u64 {
1257        let hostrc = unsafe { hostrc.as_ref().unwrap() };
1258        hostrc.get_next_deterministic_sequence_value()
1259    }
1260
1261    /// Schedule a task for this host at time 'time'.
1262    #[unsafe(no_mangle)]
1263    pub unsafe extern "C-unwind" fn host_scheduleTaskAtEmulatedTime(
1264        hostrc: *const Host,
1265        task: *mut TaskRef,
1266        time: CEmulatedTime,
1267    ) -> bool {
1268        let hostrc = unsafe { hostrc.as_ref().unwrap() };
1269        let task = unsafe { task.as_ref().unwrap().clone() };
1270        let time = EmulatedTime::from_c_emutime(time).unwrap();
1271        hostrc.schedule_task_at_emulated_time(task, time)
1272    }
1273
1274    /// Schedule a task for this host at a time 'nanoDelay' from now,.
1275    #[unsafe(no_mangle)]
1276    pub unsafe extern "C-unwind" fn host_scheduleTaskWithDelay(
1277        hostrc: *const Host,
1278        task: *mut TaskRef,
1279        delay: CSimulationTime,
1280    ) -> bool {
1281        let hostrc = unsafe { hostrc.as_ref().unwrap() };
1282        let task = unsafe { task.as_ref().unwrap().clone() };
1283        let delay = SimulationTime::from_c_simtime(delay).unwrap();
1284        hostrc.schedule_task_with_delay(task, delay)
1285    }
1286
1287    #[unsafe(no_mangle)]
1288    pub unsafe extern "C-unwind" fn host_rngDouble(host: *const Host) -> f64 {
1289        let host = unsafe { host.as_ref().unwrap() };
1290        host.random_mut().random()
1291    }
1292
1293    /// Fills the buffer with pseudo-random bytes.
1294    #[unsafe(no_mangle)]
1295    pub extern "C-unwind" fn host_rngNextNBytes(host: *const Host, buf: *mut u8, len: usize) {
1296        let host = unsafe { host.as_ref().unwrap() };
1297        let buf = unsafe { std::slice::from_raw_parts_mut(buf, len) };
1298        host.random_mut().fill_bytes(buf);
1299    }
1300
1301    #[unsafe(no_mangle)]
1302    pub extern "C-unwind" fn host_paramsCpuFrequencyHz(host: *const Host) -> u64 {
1303        let host = unsafe { host.as_ref().unwrap() };
1304        host.params.cpu_frequency
1305    }
1306
1307    #[unsafe(no_mangle)]
1308    pub extern "C-unwind" fn host_addDelayNanos(host: *const Host, delay_nanos: u64) {
1309        let host = unsafe { host.as_ref().unwrap() };
1310        let delay = Duration::from_nanos(delay_nanos);
1311        host.cpu.borrow_mut().add_delay(delay);
1312    }
1313
1314    #[unsafe(no_mangle)]
1315    pub unsafe extern "C-unwind" fn host_socketWantsToSend(
1316        hostrc: *const Host,
1317        socket: *const InetSocket,
1318        addr: in_addr_t,
1319    ) {
1320        let host = unsafe { hostrc.as_ref().unwrap() };
1321        let socket = unsafe { socket.as_ref().unwrap() };
1322        let addr = u32::from_be(addr).into();
1323        host.notify_socket_has_packets(addr, socket);
1324    }
1325
1326    #[unsafe(no_mangle)]
1327    pub unsafe extern "C-unwind" fn host_continue(
1328        host: *const Host,
1329        pid: libc::pid_t,
1330        tid: libc::pid_t,
1331    ) {
1332        let host = unsafe { host.as_ref().unwrap() };
1333        host.resume(pid.try_into().unwrap(), tid.try_into().unwrap())
1334    }
1335}