Skip to main content

shadow_rs/host/
thread.rs

1//! An emulated Linux thread.
2
3use std::cell::{Cell, RefCell};
4use std::ops::{Deref, DerefMut};
5
6use linux_api::errno::Errno;
7use linux_api::fcntl::DescriptorFlags;
8use linux_api::mman::{MapFlags, ProtFlags};
9use linux_api::posix_types::Pid;
10use linux_api::sched::{Sched, SchedFlags, sched_attr};
11use linux_api::signal::stack_t;
12use shadow_shim_helper_rs::HostId;
13use shadow_shim_helper_rs::explicit_drop::ExplicitDrop;
14use shadow_shim_helper_rs::rootedcell::rc::RootedRc;
15use shadow_shim_helper_rs::rootedcell::refcell::RootedRefCell;
16use shadow_shim_helper_rs::shim_shmem::{HostShmemProtected, ThreadShmem};
17use shadow_shim_helper_rs::syscall_types::{ForeignPtr, SyscallReg};
18use shadow_shim_helper_rs::util::SendPointer;
19use shadow_shmem::allocator::{ShMemBlock, shmalloc};
20
21use super::context::ProcessContext;
22use super::descriptor::descriptor_table::{DescriptorHandle, DescriptorTable};
23use super::host::Host;
24use super::managed_thread::{self, ManagedThread};
25use super::process::{Process, ProcessId};
26use crate::cshadow as c;
27use crate::host::syscall::condition::{SyscallConditionRef, SyscallConditionRefMut};
28use crate::host::syscall::handler::SyscallHandler;
29use crate::utility::callback_queue::CallbackQueue;
30use crate::utility::{IsSend, ObjectCounter, syscall};
31
32/// The thread's state after having been allowed to execute some code.
33#[derive(Debug)]
34#[must_use]
35pub enum ResumeResult {
36    /// Blocked on a syscall.
37    Blocked,
38    /// The thread has exited with the given code.
39    ExitedThread(i32),
40    /// The process has exited.
41    ExitedProcess,
42}
43
44/// A virtual Thread in Shadow. Currently a thin wrapper around the C Thread,
45/// which this object owns, and frees on Drop.
46pub struct Thread {
47    id: ThreadId,
48    host_id: HostId,
49    process_id: ProcessId,
50    sched_policy: Cell<Sched>,
51    sched_attr: Cell<sched_attr>,
52    // If non-NULL, this address should be cleared and futex-awoken on thread exit.
53    // See set_tid_address(2).
54    tid_address: Cell<ForeignPtr<libc::pid_t>>,
55    shim_shared_memory: ShMemBlock<'static, ThreadShmem>,
56    syscallhandler: RootedRefCell<SyscallHandler>,
57    /// Descriptor table; potentially shared with other threads and processes.
58    // TODO: Consider using an Arc instead of RootedRc, particularly if this
59    // continues to be the only RootedRc member. Cloning this object currently
60    // only done when creating a child process or thread, and if we don't have
61    // any RootedRc members we could get rid of the requirement to explicitly
62    // drop Thread.
63    desc_table: Option<RootedRc<RootedRefCell<DescriptorTable>>>,
64    // TODO: convert to SyscallCondition (Rust wrapper for c::SysCallCondition).
65    // Non-trivial because SyscallCondition is currently not `Send`.
66    cond: Cell<SendPointer<c::SysCallCondition>>,
67    /// The native, managed thread
68    mthread: RefCell<ManagedThread>,
69    _counter: ObjectCounter,
70}
71
72impl IsSend for Thread {}
73
74impl Thread {
75    /// Minimal wrapper around the native managed thread.
76    pub fn mthread(&self) -> impl Deref<Target = ManagedThread> + '_ {
77        self.mthread.borrow()
78    }
79
80    /// Update this thread to be the new thread group leader as part of an
81    /// `execve` or `execveat` syscall.  Replaces the managed thread with
82    /// `mthread` and updates the thread ID.
83    pub fn update_for_exec(&mut self, host: &Host, mthread: ManagedThread, new_tid: ThreadId) {
84        self.mthread.replace(mthread).handle_process_exit();
85        self.tid_address.set(ForeignPtr::null());
86
87        // Update shmem
88        {
89            // We potentially need to update the thread-id. It doesn't currently
90            // have interior mutability, and since mutating it is rare, it seems
91            // nicer to get a mutable copy of the current  shared memory, update
92            // it, and alloc a new block, vs. adding another layer of interior
93            // mutability at all the other points we access it.
94
95            let host_shmem_prot = host.shim_shmem_lock_borrow().unwrap();
96
97            let mut thread_shmem =
98                ThreadShmem::clone(&self.shim_shared_memory, &host_shmem_prot.root);
99
100            // thread id is updated to make this the new thread group leader.
101            thread_shmem.tid = new_tid.into();
102
103            // sigaltstack is reset to disabled.
104            unsafe {
105                *thread_shmem
106                    .protected
107                    .borrow_mut(&host_shmem_prot.root)
108                    .sigaltstack_mut() = stack_t::new(
109                    std::ptr::null_mut(),
110                    linux_api::signal::SigAltStackFlags::SS_DISABLE,
111                    0,
112                )
113            };
114
115            self.shim_shared_memory = shmalloc(thread_shmem);
116        }
117
118        self.syscallhandler = RootedRefCell::new(
119            host.root(),
120            SyscallHandler::new(
121                host.id(),
122                self.process_id,
123                new_tid,
124                host.params.use_syscall_counters,
125            ),
126        );
127
128        // Update descriptor table
129        {
130            // Descriptor table is unshared
131            let desc_table_rc = self.desc_table.take().unwrap();
132            let mut desc_table = DescriptorTable::clone(&desc_table_rc.borrow(host.root()));
133            desc_table_rc.explicit_drop_recursive(host.root(), host);
134
135            // Any descriptors with CLOEXEC are closed.
136            let to_close: Vec<DescriptorHandle> = desc_table
137                .iter()
138                .filter_map(|(handle, descriptor)| {
139                    if descriptor.flags().contains(DescriptorFlags::FD_CLOEXEC) {
140                        Some(*handle)
141                    } else {
142                        None
143                    }
144                })
145                .collect();
146
147            CallbackQueue::queue_and_run_with_legacy(|q| {
148                for handle in to_close {
149                    log::trace!("Unregistering FD_CLOEXEC descriptor {handle:?}");
150                    if let Some(Err(e)) = desc_table
151                        .deregister_descriptor(handle)
152                        .unwrap()
153                        .close(host, q)
154                    {
155                        log::debug!("Error closing {handle:?}: {e:?}");
156                    };
157                }
158            });
159
160            self.desc_table = Some(RootedRc::new(
161                host.root(),
162                RootedRefCell::new(host.root(), desc_table),
163            ));
164        }
165
166        if let Some(c) = unsafe { self.cond.get_mut().ptr().as_mut() } {
167            unsafe { c::syscallcondition_cancel(c) };
168            unsafe { c::syscallcondition_unref(c) };
169        }
170        self.cond = Cell::new(unsafe { SendPointer::new(std::ptr::null_mut()) });
171
172        self.id = new_tid;
173    }
174
175    /// Have the plugin thread natively execute the given syscall.
176    fn native_syscall_raw(
177        &self,
178        ctx: &ProcessContext,
179        n: i64,
180        args: &[SyscallReg],
181    ) -> libc::c_long {
182        self.mthread
183            .borrow()
184            .native_syscall(&ctx.with_thread(self), n, args)
185            .into()
186    }
187
188    /// Have the plugin thread natively execute the given syscall.
189    fn native_syscall(
190        &self,
191        ctx: &ProcessContext,
192        n: i64,
193        args: &[SyscallReg],
194    ) -> Result<SyscallReg, Errno> {
195        syscall::raw_return_value_to_result(self.native_syscall_raw(ctx, n, args))
196    }
197
198    pub fn process_id(&self) -> ProcessId {
199        self.process_id
200    }
201
202    pub fn host_id(&self) -> HostId {
203        self.host_id
204    }
205
206    pub fn native_pid(&self) -> Pid {
207        self.mthread.borrow().native_pid()
208    }
209
210    pub fn native_tid(&self) -> Pid {
211        self.mthread.borrow().native_tid()
212    }
213
214    pub fn id(&self) -> ThreadId {
215        self.id
216    }
217
218    pub fn sched_policy(&self) -> Sched {
219        self.sched_policy.get()
220    }
221
222    pub fn sched_priority(&self) -> std::ffi::c_int {
223        self.sched_attr.get().sched_priority.try_into().unwrap()
224    }
225
226    pub fn sched_reset_on_fork(&self) -> bool {
227        let flags = self.sched_attr.get().sched_flags;
228        flags == u64::try_from(SchedFlags::SCHED_FLAG_RESET_ON_FORK.bits()).unwrap()
229    }
230
231    pub fn set_sched_attrs(&self, policy: Sched, reset_on_fork: bool, priority: std::ffi::c_int) {
232        self.sched_policy.set(policy);
233
234        let mut sched_attr = self.sched_attr.get();
235        sched_attr.sched_policy = u32::try_from(i32::from(policy)).unwrap();
236        sched_attr.sched_flags = if reset_on_fork {
237            u64::try_from(SchedFlags::SCHED_FLAG_RESET_ON_FORK.bits()).unwrap()
238        } else {
239            0
240        };
241        sched_attr.sched_priority = priority.try_into().unwrap();
242        self.sched_attr.set(sched_attr);
243    }
244
245    /// Returns whether the given thread is its thread group (aka process) leader.
246    /// Typically this is true for the first thread created in a process.
247    pub fn is_leader(&self) -> bool {
248        self.id == self.process_id.into()
249    }
250
251    pub fn syscall_condition(&self) -> Option<SyscallConditionRef<'_>> {
252        // We check the for null explicitly here instead of using `as_mut` to
253        // construct and match an `Option<&mut c::SysCallCondition>`, since it's
254        // difficult to ensure we're not breaking any Rust aliasing rules when
255        // constructing a mutable reference.
256        let c = self.cond.get().ptr();
257        if c.is_null() {
258            None
259        } else {
260            Some(unsafe { SyscallConditionRef::borrow_from_c(c) })
261        }
262    }
263
264    pub fn syscall_condition_mut(&self) -> Option<SyscallConditionRefMut<'_>> {
265        // We can't safely use `as_mut` here, since that would construct a mutable reference,
266        // and we can't prove no other reference exists.
267        let c = self.cond.get().ptr();
268        if c.is_null() {
269            None
270        } else {
271            Some(unsafe { SyscallConditionRefMut::borrow_from_c(c) })
272        }
273    }
274
275    pub fn cleanup_syscall_condition(&self) {
276        if let Some(c) = unsafe {
277            self.cond
278                .replace(SendPointer::new(std::ptr::null_mut()))
279                .ptr()
280                .as_mut()
281        } {
282            unsafe { c::syscallcondition_cancel(c) };
283            unsafe { c::syscallcondition_unref(c) };
284        }
285    }
286
287    pub fn descriptor_table(&self) -> &RootedRc<RootedRefCell<DescriptorTable>> {
288        self.desc_table.as_ref().unwrap()
289    }
290
291    #[track_caller]
292    pub fn descriptor_table_borrow<'a>(
293        &'a self,
294        host: &'a Host,
295    ) -> impl Deref<Target = DescriptorTable> + 'a {
296        self.desc_table.as_ref().unwrap().borrow(host.root())
297    }
298
299    #[track_caller]
300    pub fn descriptor_table_borrow_mut<'a>(
301        &'a self,
302        host: &'a Host,
303    ) -> impl DerefMut<Target = DescriptorTable> + 'a {
304        self.desc_table.as_ref().unwrap().borrow_mut(host.root())
305    }
306
307    /// Natively execute munmap(2) on the given thread.
308    pub fn native_munmap(
309        &self,
310        ctx: &ProcessContext,
311        ptr: ForeignPtr<u8>,
312        size: usize,
313    ) -> Result<(), Errno> {
314        self.native_syscall(ctx, libc::SYS_munmap, &[ptr.into(), size.into()])?;
315        Ok(())
316    }
317
318    /// Natively execute mmap(2) on the given thread.
319    pub fn native_mmap(
320        &self,
321        ctx: &ProcessContext,
322        addr: ForeignPtr<u8>,
323        len: usize,
324        prot: ProtFlags,
325        flags: MapFlags,
326        fd: i32,
327        offset: i64,
328    ) -> Result<ForeignPtr<u8>, Errno> {
329        Ok(self
330            .native_syscall(
331                ctx,
332                libc::SYS_mmap,
333                &[
334                    SyscallReg::from(addr),
335                    SyscallReg::from(len),
336                    SyscallReg::from(prot.bits()),
337                    SyscallReg::from(flags.bits()),
338                    SyscallReg::from(fd),
339                    SyscallReg::from(offset),
340                ],
341            )?
342            .into())
343    }
344
345    /// Natively execute mremap(2) on the given thread.
346    pub fn native_mremap(
347        &self,
348        ctx: &ProcessContext,
349        old_addr: ForeignPtr<u8>,
350        old_len: usize,
351        new_len: usize,
352        flags: i32,
353        new_addr: ForeignPtr<u8>,
354    ) -> Result<ForeignPtr<u8>, Errno> {
355        Ok(self
356            .native_syscall(
357                ctx,
358                libc::SYS_mremap,
359                &[
360                    SyscallReg::from(old_addr),
361                    SyscallReg::from(old_len),
362                    SyscallReg::from(new_len),
363                    SyscallReg::from(flags),
364                    SyscallReg::from(new_addr),
365                ],
366            )?
367            .into())
368    }
369
370    /// Natively execute mmap(2) on the given thread.
371    pub fn native_mprotect(
372        &self,
373        ctx: &ProcessContext,
374        addr: ForeignPtr<u8>,
375        len: usize,
376        prot: ProtFlags,
377    ) -> Result<(), Errno> {
378        self.native_syscall(
379            ctx,
380            libc::SYS_mprotect,
381            &[
382                SyscallReg::from(addr),
383                SyscallReg::from(len),
384                SyscallReg::from(prot.bits()),
385            ],
386        )?;
387        Ok(())
388    }
389
390    /// Natively execute open(2) on the given thread.
391    pub fn native_open(
392        &self,
393        ctx: &ProcessContext,
394        pathname: ForeignPtr<u8>,
395        flags: i32,
396        mode: i32,
397    ) -> Result<i32, Errno> {
398        let res = self.native_syscall(
399            ctx,
400            libc::SYS_open,
401            &[
402                SyscallReg::from(pathname),
403                SyscallReg::from(flags),
404                SyscallReg::from(mode),
405            ],
406        );
407        Ok(i32::from(res?))
408    }
409
410    /// Natively execute close(2) on the given thread.
411    pub fn native_close(&self, ctx: &ProcessContext, fd: i32) -> Result<(), Errno> {
412        self.native_syscall(ctx, libc::SYS_close, &[SyscallReg::from(fd)])?;
413        Ok(())
414    }
415
416    /// Natively execute brk(2) on the given thread.
417    pub fn native_brk(
418        &self,
419        ctx: &ProcessContext,
420        addr: ForeignPtr<u8>,
421    ) -> Result<ForeignPtr<u8>, Errno> {
422        let res = self.native_syscall(ctx, libc::SYS_brk, &[SyscallReg::from(addr)])?;
423        Ok(ForeignPtr::from(res))
424    }
425
426    /// Natively execute a chdir(2) syscall on the given thread.
427    pub fn native_chdir(
428        &self,
429        ctx: &ProcessContext,
430        pathname: ForeignPtr<std::ffi::c_char>,
431    ) -> Result<i32, Errno> {
432        let res = self.native_syscall(ctx, libc::SYS_chdir, &[SyscallReg::from(pathname)]);
433        Ok(i32::from(res?))
434    }
435
436    /// Allocates some space in the plugin's memory. Use `get_writeable_ptr` to write to it, and
437    /// `flush` to ensure that the write is flushed to the plugin's memory.
438    pub fn malloc_foreign_ptr(
439        &self,
440        ctx: &ProcessContext,
441        size: usize,
442    ) -> Result<ForeignPtr<u8>, Errno> {
443        // SAFETY: No pointer specified; can't pass a bad one.
444        self.native_mmap(
445            ctx,
446            ForeignPtr::null(),
447            size,
448            ProtFlags::PROT_READ | ProtFlags::PROT_WRITE,
449            MapFlags::MAP_PRIVATE | MapFlags::MAP_ANONYMOUS,
450            -1,
451            0,
452        )
453    }
454
455    /// Frees a pointer previously returned by `malloc_foreign_ptr`
456    pub fn free_foreign_ptr(
457        &self,
458        ctx: &ProcessContext,
459        ptr: ForeignPtr<u8>,
460        size: usize,
461    ) -> Result<(), Errno> {
462        self.native_munmap(ctx, ptr, size)?;
463        Ok(())
464    }
465
466    /// Create a new `Thread`, wrapping `mthread`. Intended for use by
467    /// syscall handlers such as `clone`.
468    pub fn wrap_mthread(
469        host: &Host,
470        mthread: ManagedThread,
471        desc_table: RootedRc<RootedRefCell<DescriptorTable>>,
472        pid: ProcessId,
473        tid: ThreadId,
474    ) -> Result<Thread, Errno> {
475        let child = Self {
476            mthread: RefCell::new(mthread),
477            syscallhandler: RootedRefCell::new(
478                host.root(),
479                SyscallHandler::new(host.id(), pid, tid, host.params.use_syscall_counters),
480            ),
481            cond: Cell::new(unsafe { SendPointer::new(std::ptr::null_mut()) }),
482            id: tid,
483            host_id: host.id(),
484            process_id: pid,
485            sched_policy: Cell::new(Sched::SCHED_NORMAL),
486            sched_attr: Cell::new(sched_attr {
487                size: u32::try_from(std::mem::size_of::<sched_attr>()).unwrap(),
488                sched_policy: u32::try_from(i32::from(Sched::SCHED_NORMAL)).unwrap(),
489                sched_flags: 0,
490                sched_nice: 0,
491                sched_priority: 0,
492                sched_runtime: 0,
493                sched_deadline: 0,
494                sched_period: 0,
495                sched_util_min: 0,
496                sched_util_max: 0,
497            }),
498            tid_address: Cell::new(ForeignPtr::null()),
499            shim_shared_memory: shmalloc(ThreadShmem::new(
500                &host.shim_shmem_lock_borrow().unwrap(),
501                tid.into(),
502            )),
503            desc_table: Some(desc_table),
504            _counter: ObjectCounter::new("Thread"),
505        };
506        Ok(child)
507    }
508
509    /// Shared memory for this thread.
510    pub fn shmem(&self) -> &ShMemBlock<'_, ThreadShmem> {
511        &self.shim_shared_memory
512    }
513
514    pub fn resume(&self, ctx: &ProcessContext) -> ResumeResult {
515        // Ensure the condition isn't triggered again, but don't clear it yet.
516        // Syscall handler can still access.
517        if let Some(c) = unsafe { self.cond.get().ptr().as_mut() } {
518            unsafe { c::syscallcondition_cancel(c) };
519        }
520
521        let mut syscall_handler = self.syscallhandler.borrow_mut(ctx.host.root());
522
523        let res = self
524            .mthread
525            .borrow()
526            .resume(&ctx.with_thread(self), &mut syscall_handler);
527
528        // Now we're done with old condition.
529        if let Some(c) = unsafe {
530            self.cond
531                .replace(SendPointer::new(std::ptr::null_mut()))
532                .ptr()
533                .as_mut()
534        } {
535            unsafe { c::syscallcondition_unref(c) };
536        }
537
538        match res {
539            managed_thread::ResumeResult::Blocked(cond) => {
540                // Wait on new condition.
541                let cond = cond.into_inner();
542                self.cond.set(unsafe { SendPointer::new(cond) });
543                if let Some(cond) = unsafe { cond.as_mut() } {
544                    unsafe { c::syscallcondition_waitNonblock(cond, ctx.host, ctx.process, self) }
545                }
546                ResumeResult::Blocked
547            }
548            managed_thread::ResumeResult::ExitedThread(c) => ResumeResult::ExitedThread(c),
549            managed_thread::ResumeResult::ExitedProcess => ResumeResult::ExitedProcess,
550        }
551    }
552
553    pub fn handle_process_exit(&self) {
554        self.cleanup_syscall_condition();
555        self.mthread.borrow().handle_process_exit();
556    }
557
558    pub fn return_code(&self) -> Option<i32> {
559        self.mthread.borrow().return_code()
560    }
561
562    pub fn is_running(&self) -> bool {
563        self.mthread.borrow().is_running()
564    }
565
566    pub fn get_tid_address(&self) -> ForeignPtr<libc::pid_t> {
567        self.tid_address.get()
568    }
569
570    /// Sets the `clear_child_tid` attribute as for `set_tid_address(2)`. The thread will perform a
571    /// futex-wake operation on the given address on termination.
572    pub fn set_tid_address(&self, ptr: ForeignPtr<libc::pid_t>) {
573        self.tid_address.set(ptr)
574    }
575
576    pub fn unblocked_signal_pending(
577        &self,
578        process: &Process,
579        host_shmem: &HostShmemProtected,
580    ) -> bool {
581        debug_assert_eq!(process.id(), self.process_id);
582
583        let thread_shmem_protected = self.shmem().protected.borrow(&host_shmem.root);
584
585        let unblocked_signals = !thread_shmem_protected.blocked_signals;
586        let pending_signals = self
587            .shmem()
588            .protected
589            .borrow(&host_shmem.root)
590            .pending_signals
591            | process
592                .shmem()
593                .protected
594                .borrow(&host_shmem.root)
595                .pending_signals;
596
597        !(pending_signals & unblocked_signals).is_empty()
598    }
599}
600
601impl Drop for Thread {
602    fn drop(&mut self) {
603        if let Some(c) = unsafe { self.cond.get_mut().ptr().as_mut() } {
604            unsafe { c::syscallcondition_cancel(c) };
605            unsafe { c::syscallcondition_unref(c) };
606        }
607    }
608}
609
610impl ExplicitDrop for Thread {
611    type ExplicitDropParam = Host;
612    type ExplicitDropResult = ();
613
614    fn explicit_drop(mut self, host: &Host) {
615        if let Some(table) = self.desc_table.take() {
616            table.explicit_drop_recursive(host.root(), host);
617        }
618    }
619}
620
621#[derive(Debug, PartialEq, Eq, Hash, Copy, Clone, Ord, PartialOrd)]
622pub struct ThreadId(u32);
623
624impl TryFrom<libc::pid_t> for ThreadId {
625    type Error = <u32 as TryFrom<libc::pid_t>>::Error;
626
627    fn try_from(value: libc::pid_t) -> Result<Self, Self::Error> {
628        Ok(Self(u32::try_from(value)?))
629    }
630}
631
632impl From<ProcessId> for ThreadId {
633    fn from(value: ProcessId) -> Self {
634        // A process ID is also a valid thread ID
635        ThreadId(value.into())
636    }
637}
638
639impl From<ThreadId> for libc::pid_t {
640    fn from(val: ThreadId) -> Self {
641        val.0.try_into().unwrap()
642    }
643}
644
645impl std::fmt::Display for ThreadId {
646    fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
647        write!(f, "{}", self.0)
648    }
649}
650
651mod export {
652    use shadow_shim_helper_rs::shim_shmem::export::{ShimShmemHostLock, ShimShmemThread};
653    use shadow_shim_helper_rs::syscall_types::UntypedForeignPtr;
654
655    use super::*;
656    use crate::core::worker::Worker;
657    use crate::host::descriptor::socket::Socket;
658    use crate::host::descriptor::socket::inet::InetSocket;
659    use crate::host::descriptor::{CompatFile, Descriptor, File};
660
661    /// Make the requested syscall from within the plugin.
662    ///
663    /// Does *not* flush or invalidate MemoryManager pointers, such as those
664    /// obtained through `process_getReadablePtr` etc.
665    ///
666    /// Arguments are treated opaquely. e.g. no pointer-marshalling is done.
667    ///
668    /// The return value is the value returned by the syscall *instruction*.
669    /// You can map to a corresponding errno value with syscall_rawReturnValueToErrno.
670    //
671    // Rust doesn't support declaring a function with varargs (...), but this
672    // declaration is ABI compatible with a caller who sees this function declared
673    // with arguments `Thread* thread, long n, ...`. We manually generate that declartion
674    // in our bindings.
675    #[unsafe(no_mangle)]
676    unsafe extern "C-unwind" fn thread_nativeSyscall(
677        thread: *const Thread,
678        n: libc::c_long,
679        arg1: SyscallReg,
680        arg2: SyscallReg,
681        arg3: SyscallReg,
682        arg4: SyscallReg,
683        arg5: SyscallReg,
684        arg6: SyscallReg,
685    ) -> libc::c_long {
686        let thread = unsafe { thread.as_ref().unwrap() };
687        Worker::with_active_host(|host| {
688            Worker::with_active_process(|process| {
689                thread.native_syscall_raw(
690                    &ProcessContext::new(host, process),
691                    n,
692                    &[arg1, arg2, arg3, arg4, arg5, arg6],
693                )
694            })
695            .unwrap()
696        })
697        .unwrap()
698    }
699
700    #[unsafe(no_mangle)]
701    pub unsafe extern "C-unwind" fn thread_getID(thread: *const Thread) -> libc::pid_t {
702        let thread = unsafe { thread.as_ref().unwrap() };
703        thread.id().into()
704    }
705
706    /// Gets the `clear_child_tid` attribute, as set by `thread_setTidAddress`.
707    #[unsafe(no_mangle)]
708    pub unsafe extern "C-unwind" fn thread_getTidAddress(
709        thread: *const Thread,
710    ) -> UntypedForeignPtr {
711        let thread = unsafe { thread.as_ref().unwrap() };
712        thread.get_tid_address().cast::<()>()
713    }
714
715    /// Returns a typed pointer to memory shared with the shim (which is backed by
716    /// the block returned by thread_getShMBlock).
717    #[unsafe(no_mangle)]
718    pub unsafe extern "C-unwind" fn thread_sharedMem(
719        thread: *const Thread,
720    ) -> *const ShimShmemThread {
721        let thread = unsafe { thread.as_ref().unwrap() };
722        &*thread.shim_shared_memory
723    }
724
725    #[unsafe(no_mangle)]
726    pub unsafe extern "C-unwind" fn thread_getProcess(thread: *const Thread) -> *const Process {
727        let thread = unsafe { thread.as_ref().unwrap() };
728        Worker::with_active_host(|host| {
729            let process = host.process_borrow(thread.process_id).unwrap();
730            let p: &Process = &process.borrow(host.root());
731            std::ptr::from_ref(p)
732        })
733        .unwrap()
734    }
735
736    #[unsafe(no_mangle)]
737    pub unsafe extern "C-unwind" fn thread_getHost(thread: *const Thread) -> *const Host {
738        let thread = unsafe { thread.as_ref().unwrap() };
739        Worker::with_active_host(|host| {
740            assert_eq!(host.id(), thread.host_id());
741            std::ptr::from_ref(host)
742        })
743        .unwrap()
744    }
745
746    #[unsafe(no_mangle)]
747    pub unsafe extern "C-unwind" fn thread_clearSysCallCondition(thread: *const Thread) {
748        let thread = unsafe { thread.as_ref().unwrap() };
749        thread.cleanup_syscall_condition();
750    }
751
752    /// Returns true iff there is an unblocked, unignored signal pending for this
753    /// thread (or its process).
754    #[unsafe(no_mangle)]
755    pub unsafe extern "C-unwind" fn thread_unblockedSignalPending(
756        thread: *const Thread,
757        host_lock: *const ShimShmemHostLock,
758    ) -> bool {
759        let thread = unsafe { thread.as_ref().unwrap() };
760        let host_lock = unsafe { host_lock.as_ref().unwrap() };
761
762        Worker::with_active_host(|host| {
763            let process = host.process_borrow(thread.process_id()).unwrap();
764            let process = process.borrow(host.root());
765            thread.unblocked_signal_pending(&process, host_lock)
766        })
767        .unwrap()
768    }
769
770    /// Register a `Descriptor`. This takes ownership of the descriptor and you must not access it
771    /// after.
772    #[unsafe(no_mangle)]
773    pub extern "C-unwind" fn thread_registerDescriptor(
774        thread: *const Thread,
775        desc: *mut Descriptor,
776    ) -> libc::c_int {
777        let thread = unsafe { thread.as_ref().unwrap() };
778        let desc = Descriptor::from_raw(desc).unwrap();
779
780        Worker::with_active_host(|host| {
781            thread
782                .descriptor_table_borrow_mut(host)
783                .register_descriptor(*desc)
784                .unwrap()
785                .into()
786        })
787        .unwrap()
788    }
789
790    /// Get a temporary reference to a descriptor.
791    #[unsafe(no_mangle)]
792    pub extern "C-unwind" fn thread_getRegisteredDescriptor(
793        thread: *const Thread,
794        handle: libc::c_int,
795    ) -> *const Descriptor {
796        let thread = unsafe { thread.as_ref().unwrap() };
797
798        let handle = match handle.try_into() {
799            Ok(i) => i,
800            Err(_) => {
801                log::debug!("Attempted to get a descriptor with handle {handle}");
802                return std::ptr::null();
803            }
804        };
805
806        Worker::with_active_host(
807            |host| match thread.descriptor_table_borrow(host).get(handle) {
808                Some(d) => std::ptr::from_ref(d),
809                None => std::ptr::null(),
810            },
811        )
812        .unwrap()
813    }
814
815    /// Get a temporary mutable reference to a descriptor.
816    #[unsafe(no_mangle)]
817    pub extern "C-unwind" fn thread_getRegisteredDescriptorMut(
818        thread: *const Thread,
819        handle: libc::c_int,
820    ) -> *mut Descriptor {
821        let thread = unsafe { thread.as_ref().unwrap() };
822
823        let handle = match handle.try_into() {
824            Ok(i) => i,
825            Err(_) => {
826                log::debug!("Attempted to get a descriptor with handle {handle}");
827                return std::ptr::null_mut();
828            }
829        };
830
831        Worker::with_active_host(|host| {
832            match thread.descriptor_table_borrow_mut(host).get_mut(handle) {
833                Some(d) => d as *mut Descriptor,
834                None => std::ptr::null_mut(),
835            }
836        })
837        .unwrap()
838    }
839
840    /// Get a temporary reference to a legacy file.
841    #[unsafe(no_mangle)]
842    pub unsafe extern "C-unwind" fn thread_getRegisteredLegacyFile(
843        thread: *const Thread,
844        handle: libc::c_int,
845    ) -> *mut c::LegacyFile {
846        let thread = unsafe { thread.as_ref().unwrap() };
847
848        let handle = match handle.try_into() {
849            Ok(i) => i,
850            Err(_) => {
851                log::debug!("Attempted to get a descriptor with handle {handle}");
852                return std::ptr::null_mut();
853            }
854        };
855
856        Worker::with_active_host(|host| {
857        match thread.descriptor_table_borrow(host).get(handle).map(|x| x.file()) {
858            Some(CompatFile::Legacy(file)) => file.ptr(),
859            Some(CompatFile::New(file)) => {
860                // we have a special case for the legacy C TCP objects
861                if let File::Socket(Socket::Inet(InetSocket::LegacyTcp(tcp))) = file.inner_file() {
862                    tcp.borrow().as_legacy_file()
863                } else {
864                    log::warn!(
865                        "A descriptor exists for fd={handle}, but it is not a legacy file. Returning NULL."
866                    );
867                    std::ptr::null_mut()
868                }
869            }
870            None => std::ptr::null_mut(),
871        }
872        }).unwrap()
873    }
874}