shadow_rs/core/work/task.rs
1use std::sync::Arc;
2
3use crate::{
4 host::host::Host,
5 utility::{IsSend, IsSync, Magic, ObjectCounter},
6};
7
8/// Mostly for interoperability with C APIs.
9/// In Rust code that doesn't need to interact with C, it may make more sense
10/// to directly use a `Fn(&mut Host)` trait object.
11#[derive(Clone)]
12pub struct TaskRef {
13 magic: Magic<Self>,
14 _counter: ObjectCounter,
15 inner: Arc<dyn Fn(&Host) + Send + Sync>,
16}
17
18impl TaskRef {
19 pub fn new<T: 'static + Fn(&Host) + Send + Sync>(f: T) -> Self {
20 Self {
21 inner: Arc::new(f),
22 magic: Magic::new(),
23 _counter: ObjectCounter::new("TaskRef"),
24 }
25 }
26
27 /// Executes the task.
28 ///
29 /// If the task was created from C, will panic if the task's host lock isn't held.
30 pub fn execute(&self, host: &Host) {
31 self.magic.debug_check();
32 (self.inner)(host)
33 }
34}
35
36impl IsSend for TaskRef {}
37impl IsSync for TaskRef {}
38
39impl std::fmt::Debug for TaskRef {
40 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
41 self.magic.debug_check();
42 f.debug_struct("TaskRef")
43 .field("magic", &self.magic)
44 // `Fn` doesn't have a debug impl, so we'll print the trait object's address
45 .field("inner", &Arc::as_ptr(&self.inner))
46 .finish()
47 }
48}
49
50impl PartialEq for TaskRef {
51 /// Two `TaskRef`s are equal if they point to the same task object.
52 fn eq(&self, other: &Self) -> bool {
53 self.magic.debug_check();
54 other.magic.debug_check();
55 Arc::ptr_eq(&self.inner, &other.inner)
56 }
57}
58
59impl Eq for TaskRef {}
60
61pub mod export {
62 use shadow_shim_helper_rs::util::SyncSendPointer;
63 use shadow_shim_helper_rs::{HostId, notnull::notnull_mut};
64
65 use super::*;
66 use crate::utility::HostTreePointer;
67
68 pub type TaskCallbackFunc =
69 extern "C-unwind" fn(*const Host, *mut libc::c_void, *mut libc::c_void);
70 pub type TaskObjectFreeFunc = Option<extern "C-unwind" fn(*mut libc::c_void)>;
71 pub type TaskArgumentFreeFunc = Option<extern "C-unwind" fn(*mut libc::c_void)>;
72
73 /// Compatibility struct for creating a `TaskRef` from function pointers.
74 struct CTaskHostTreePtrs {
75 callback: TaskCallbackFunc,
76 object: HostTreePointer<libc::c_void>,
77 argument: HostTreePointer<libc::c_void>,
78 object_free: TaskObjectFreeFunc,
79 argument_free: TaskArgumentFreeFunc,
80 }
81
82 impl CTaskHostTreePtrs {
83 /// # Safety
84 ///
85 /// Given that the host lock is held when execution of a callback
86 /// starts, they must not cause `object` or `argument` to be
87 /// dereferenced without the host lock held. (e.g. by releasing the host
88 /// lock or exfiltrating the pointers to be dereferenced by other code
89 /// that might not hold the lock).
90 unsafe fn new(
91 callback: TaskCallbackFunc,
92 object: HostTreePointer<libc::c_void>,
93 argument: HostTreePointer<libc::c_void>,
94 object_free: TaskObjectFreeFunc,
95 argument_free: TaskArgumentFreeFunc,
96 ) -> Self {
97 Self {
98 callback,
99 object,
100 argument,
101 object_free,
102 argument_free,
103 }
104 }
105
106 /// Panics if host lock for `object` and `argument` aren't held.
107 fn execute(&self, host: *const Host) {
108 (self.callback)(host, unsafe { self.object.ptr() }, unsafe {
109 self.argument.ptr()
110 })
111 }
112 }
113
114 impl Drop for CTaskHostTreePtrs {
115 fn drop(&mut self) {
116 if let Some(object_free) = self.object_free {
117 let ptr = unsafe { self.object.ptr() };
118 object_free(ptr);
119 }
120 if let Some(argument_free) = self.argument_free {
121 let ptr = unsafe { self.argument.ptr() };
122 argument_free(ptr);
123 }
124 }
125 }
126
127 /// Compatibility struct for creating a `TaskRef` from function pointers.
128 struct CTaskSyncSendPtrs {
129 callback: TaskCallbackFunc,
130 object: SyncSendPointer<libc::c_void>,
131 argument: SyncSendPointer<libc::c_void>,
132 object_free: TaskObjectFreeFunc,
133 argument_free: TaskArgumentFreeFunc,
134 }
135
136 impl CTaskSyncSendPtrs {
137 /// # Safety
138 ///
139 /// callbacks must be safe to call from another thread, with the given
140 /// `object` and `argument`. If `object` and/or `argument` require the
141 /// host lock to be held by the calling thread to access safely, use
142 /// CTaskHostTreePtrs instead.
143 unsafe fn new(
144 callback: TaskCallbackFunc,
145 object: SyncSendPointer<libc::c_void>,
146 argument: SyncSendPointer<libc::c_void>,
147 object_free: TaskObjectFreeFunc,
148 argument_free: TaskArgumentFreeFunc,
149 ) -> Self {
150 Self {
151 callback,
152 object,
153 argument,
154 object_free,
155 argument_free,
156 }
157 }
158
159 /// Panics if host lock for `object` and `argument` aren't held.
160 fn execute(&self, host: *const Host) {
161 (self.callback)(host, self.object.ptr(), self.argument.ptr())
162 }
163 }
164
165 impl Drop for CTaskSyncSendPtrs {
166 fn drop(&mut self) {
167 if let Some(object_free) = self.object_free {
168 let ptr = self.object.ptr();
169 object_free(ptr);
170 }
171 if let Some(argument_free) = self.argument_free {
172 let ptr = self.argument.ptr();
173 argument_free(ptr);
174 }
175 }
176 }
177
178 /// Create a new reference-counted task that can only be executed on the
179 /// given host. The callbacks can safely assume that they will only be called
180 /// with the lock for the specified host held.
181 ///
182 /// # Safety
183 ///
184 /// * `object` and `argument` must meet the requirements
185 /// for `HostTreePointer::new`.
186 /// * Given that the host lock is held when execution of a callback
187 /// starts, they must not cause `object` or `argument` to be dereferenced
188 /// without the host lock held. (e.g. by releasing the host lock or exfiltrating
189 /// the pointers to be dereferenced by other code that might not hold the lock).
190 ///
191 /// There must still be some coordination between the creator of the TaskRef
192 /// and the callers of `taskref_execute` and `taskref_drop` to ensure that
193 /// the callbacks don't conflict with other accesses in the same thread
194 /// (e.g. that the caller isn't holding a Rust mutable reference to one of
195 /// the pointers while the callback transforms the pointer into another Rust
196 /// reference).
197 #[unsafe(no_mangle)]
198 pub unsafe extern "C-unwind" fn taskref_new_bound(
199 host_id: HostId,
200 callback: TaskCallbackFunc,
201 object: *mut libc::c_void,
202 argument: *mut libc::c_void,
203 object_free: TaskObjectFreeFunc,
204 argument_free: TaskArgumentFreeFunc,
205 ) -> *mut TaskRef {
206 let objs = unsafe {
207 CTaskHostTreePtrs::new(
208 callback,
209 HostTreePointer::new_for_host(host_id, object),
210 HostTreePointer::new_for_host(host_id, argument),
211 object_free,
212 argument_free,
213 )
214 };
215 let task = TaskRef::new(move |host: &Host| objs.execute(host));
216 // It'd be nice if we could use Arc::into_raw here, avoiding a level of
217 // pointer indirection. Unfortunately that doesn't work because of the
218 // internal dynamic Trait object, making the resulting pointer non-ABI
219 // safe.
220 Box::into_raw(Box::new(task))
221 }
222
223 /// Create a new reference-counted task that may be executed on any Host.
224 ///
225 /// # Safety
226 ///
227 /// * The callbacks must be safe to call with `object` and `argument`
228 /// with *any* Host. (e.g. even if task is expected to execute on another Host,
229 /// must be safe to execute or free the Task from the current Host.)
230 ///
231 /// There must still be some coordination between the creator of the TaskRef
232 /// and the callers of `taskref_execute` and `taskref_drop` to ensure that
233 /// the callbacks don't conflict with other accesses in the same thread
234 /// (e.g. that the caller isn't holding a Rust mutable reference to one of
235 /// the pointers while the callback transforms the pointer into another Rust
236 /// reference).
237 #[unsafe(no_mangle)]
238 pub unsafe extern "C-unwind" fn taskref_new_unbound(
239 callback: TaskCallbackFunc,
240 object: *mut libc::c_void,
241 argument: *mut libc::c_void,
242 object_free: TaskObjectFreeFunc,
243 argument_free: TaskArgumentFreeFunc,
244 ) -> *mut TaskRef {
245 let objs = unsafe {
246 CTaskSyncSendPtrs::new(
247 callback,
248 SyncSendPointer::new(object),
249 SyncSendPointer::new(argument),
250 object_free,
251 argument_free,
252 )
253 };
254 let task = TaskRef::new(move |host: &Host| objs.execute(host));
255 // It'd be nice if we could use Arc::into_raw here, avoiding a level of
256 // pointer indirection. Unfortunately that doesn't work because of the
257 // internal dynamic Trait object, making the resulting pointer non-ABI
258 // safe.
259 Box::into_raw(Box::new(task))
260 }
261
262 /// Destroys this reference to the `Task`, dropping the `Task` if no references remain.
263 ///
264 /// Panics if task's Host lock isn't held.
265 ///
266 /// # Safety
267 ///
268 /// `task` must be legally dereferencable.
269 #[unsafe(no_mangle)]
270 pub unsafe extern "C-unwind" fn taskref_drop(task: *mut TaskRef) {
271 drop(unsafe { Box::from_raw(notnull_mut(task)) });
272 }
273}