shadow_rs/core/work/task.rs
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273
use std::sync::Arc;
use crate::{
host::host::Host,
utility::{IsSend, IsSync, Magic, ObjectCounter},
};
/// Mostly for interoperability with C APIs.
/// In Rust code that doesn't need to interact with C, it may make more sense
/// to directly use a `Fn(&mut Host)` trait object.
#[derive(Clone)]
pub struct TaskRef {
magic: Magic<Self>,
_counter: ObjectCounter,
inner: Arc<dyn Fn(&Host) + Send + Sync>,
}
impl TaskRef {
pub fn new<T: 'static + Fn(&Host) + Send + Sync>(f: T) -> Self {
Self {
inner: Arc::new(f),
magic: Magic::new(),
_counter: ObjectCounter::new("TaskRef"),
}
}
/// Executes the task.
///
/// If the task was created from C, will panic if the task's host lock isn't held.
pub fn execute(&self, host: &Host) {
self.magic.debug_check();
(self.inner)(host)
}
}
impl IsSend for TaskRef {}
impl IsSync for TaskRef {}
impl std::fmt::Debug for TaskRef {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.magic.debug_check();
f.debug_struct("TaskRef")
.field("magic", &self.magic)
// `Fn` doesn't have a debug impl, so we'll print the trait object's address
.field("inner", &Arc::as_ptr(&self.inner))
.finish()
}
}
impl PartialEq for TaskRef {
/// Two `TaskRef`s are equal if they point to the same task object.
fn eq(&self, other: &Self) -> bool {
self.magic.debug_check();
other.magic.debug_check();
Arc::ptr_eq(&self.inner, &other.inner)
}
}
impl Eq for TaskRef {}
pub mod export {
use shadow_shim_helper_rs::util::SyncSendPointer;
use shadow_shim_helper_rs::{notnull::notnull_mut, HostId};
use super::*;
use crate::utility::HostTreePointer;
pub type TaskCallbackFunc =
extern "C-unwind" fn(*const Host, *mut libc::c_void, *mut libc::c_void);
pub type TaskObjectFreeFunc = Option<extern "C-unwind" fn(*mut libc::c_void)>;
pub type TaskArgumentFreeFunc = Option<extern "C-unwind" fn(*mut libc::c_void)>;
/// Compatibility struct for creating a `TaskRef` from function pointers.
struct CTaskHostTreePtrs {
callback: TaskCallbackFunc,
object: HostTreePointer<libc::c_void>,
argument: HostTreePointer<libc::c_void>,
object_free: TaskObjectFreeFunc,
argument_free: TaskArgumentFreeFunc,
}
impl CTaskHostTreePtrs {
/// # Safety
///
/// Given that the host lock is held when execution of a callback
/// starts, they must not cause `object` or `argument` to be
/// dereferenced without the host lock held. (e.g. by releasing the host
/// lock or exfiltrating the pointers to be dereferenced by other code
/// that might not hold the lock).
unsafe fn new(
callback: TaskCallbackFunc,
object: HostTreePointer<libc::c_void>,
argument: HostTreePointer<libc::c_void>,
object_free: TaskObjectFreeFunc,
argument_free: TaskArgumentFreeFunc,
) -> Self {
Self {
callback,
object,
argument,
object_free,
argument_free,
}
}
/// Panics if host lock for `object` and `argument` aren't held.
fn execute(&self, host: *const Host) {
(self.callback)(host, unsafe { self.object.ptr() }, unsafe {
self.argument.ptr()
})
}
}
impl Drop for CTaskHostTreePtrs {
fn drop(&mut self) {
if let Some(object_free) = self.object_free {
let ptr = unsafe { self.object.ptr() };
object_free(ptr);
}
if let Some(argument_free) = self.argument_free {
let ptr = unsafe { self.argument.ptr() };
argument_free(ptr);
}
}
}
/// Compatibility struct for creating a `TaskRef` from function pointers.
struct CTaskSyncSendPtrs {
callback: TaskCallbackFunc,
object: SyncSendPointer<libc::c_void>,
argument: SyncSendPointer<libc::c_void>,
object_free: TaskObjectFreeFunc,
argument_free: TaskArgumentFreeFunc,
}
impl CTaskSyncSendPtrs {
/// # Safety
///
/// callbacks must be safe to call from another thread, with the given
/// `object` and `argument`. If `object` and/or `argument` require the
/// host lock to be held by the calling thread to access safely, use
/// CTaskHostTreePtrs instead.
unsafe fn new(
callback: TaskCallbackFunc,
object: SyncSendPointer<libc::c_void>,
argument: SyncSendPointer<libc::c_void>,
object_free: TaskObjectFreeFunc,
argument_free: TaskArgumentFreeFunc,
) -> Self {
Self {
callback,
object,
argument,
object_free,
argument_free,
}
}
/// Panics if host lock for `object` and `argument` aren't held.
fn execute(&self, host: *const Host) {
(self.callback)(host, self.object.ptr(), self.argument.ptr())
}
}
impl Drop for CTaskSyncSendPtrs {
fn drop(&mut self) {
if let Some(object_free) = self.object_free {
let ptr = self.object.ptr();
object_free(ptr);
}
if let Some(argument_free) = self.argument_free {
let ptr = self.argument.ptr();
argument_free(ptr);
}
}
}
/// Create a new reference-counted task that can only be executed on the
/// given host. The callbacks can safely assume that they will only be called
/// with the lock for the specified host held.
///
/// # Safety
///
/// * `object` and `argument` must meet the requirements
/// for `HostTreePointer::new`.
/// * Given that the host lock is held when execution of a callback
/// starts, they must not cause `object` or `argument` to be dereferenced
/// without the host lock held. (e.g. by releasing the host lock or exfiltrating
/// the pointers to be dereferenced by other code that might not hold the lock).
///
/// There must still be some coordination between the creator of the TaskRef
/// and the callers of `taskref_execute` and `taskref_drop` to ensure that
/// the callbacks don't conflict with other accesses in the same thread
/// (e.g. that the caller isn't holding a Rust mutable reference to one of
/// the pointers while the callback transforms the pointer into another Rust
/// reference).
#[no_mangle]
pub unsafe extern "C-unwind" fn taskref_new_bound(
host_id: HostId,
callback: TaskCallbackFunc,
object: *mut libc::c_void,
argument: *mut libc::c_void,
object_free: TaskObjectFreeFunc,
argument_free: TaskArgumentFreeFunc,
) -> *mut TaskRef {
let objs = unsafe {
CTaskHostTreePtrs::new(
callback,
HostTreePointer::new_for_host(host_id, object),
HostTreePointer::new_for_host(host_id, argument),
object_free,
argument_free,
)
};
let task = TaskRef::new(move |host: &Host| objs.execute(host));
// It'd be nice if we could use Arc::into_raw here, avoiding a level of
// pointer indirection. Unfortunately that doesn't work because of the
// internal dynamic Trait object, making the resulting pointer non-ABI
// safe.
Box::into_raw(Box::new(task))
}
/// Create a new reference-counted task that may be executed on any Host.
///
/// # Safety
///
/// * The callbacks must be safe to call with `object` and `argument`
/// with *any* Host. (e.g. even if task is expected to execute on another Host,
/// must be safe to execute or free the Task from the current Host.)
///
/// There must still be some coordination between the creator of the TaskRef
/// and the callers of `taskref_execute` and `taskref_drop` to ensure that
/// the callbacks don't conflict with other accesses in the same thread
/// (e.g. that the caller isn't holding a Rust mutable reference to one of
/// the pointers while the callback transforms the pointer into another Rust
/// reference).
#[no_mangle]
pub unsafe extern "C-unwind" fn taskref_new_unbound(
callback: TaskCallbackFunc,
object: *mut libc::c_void,
argument: *mut libc::c_void,
object_free: TaskObjectFreeFunc,
argument_free: TaskArgumentFreeFunc,
) -> *mut TaskRef {
let objs = unsafe {
CTaskSyncSendPtrs::new(
callback,
SyncSendPointer::new(object),
SyncSendPointer::new(argument),
object_free,
argument_free,
)
};
let task = TaskRef::new(move |host: &Host| objs.execute(host));
// It'd be nice if we could use Arc::into_raw here, avoiding a level of
// pointer indirection. Unfortunately that doesn't work because of the
// internal dynamic Trait object, making the resulting pointer non-ABI
// safe.
Box::into_raw(Box::new(task))
}
/// Destroys this reference to the `Task`, dropping the `Task` if no references remain.
///
/// Panics if task's Host lock isn't held.
///
/// # Safety
///
/// `task` must be legally dereferencable.
#[no_mangle]
pub unsafe extern "C-unwind" fn taskref_drop(task: *mut TaskRef) {
drop(unsafe { Box::from_raw(notnull_mut(task)) });
}
}