1use std::collections::{BTreeSet, HashMap};
23use log::*;
4use shadow_shim_helper_rs::explicit_drop::ExplicitDrop;
5use shadow_shim_helper_rs::syscall_types::SyscallReg;
67use crate::host::descriptor::Descriptor;
8use crate::host::host::Host;
9use crate::utility::ObjectCounter;
10use crate::utility::callback_queue::CallbackQueue;
1112/// POSIX requires fds to be assigned as `libc::c_int`, so we can't allow any fds larger than this.
13pub const FD_MAX: u32 = i32::MAX as u32;
1415/// Map of file handles to file descriptors. Typically owned by a
16/// [`Thread`][crate::host::thread::Thread].
17#[derive(Clone)]
18pub struct DescriptorTable {
19 descriptors: HashMap<DescriptorHandle, Descriptor>,
2021// Indices less than `next_index` known to be available.
22available_indices: BTreeSet<u32>,
2324// Lowest index not in `available_indices` that *might* be available. We still need to verify
25 // availability in `descriptors`, though.
26next_index: u32,
2728 _counter: ObjectCounter,
29}
3031impl DescriptorTable {
32pub fn new() -> Self {
33 DescriptorTable {
34 descriptors: HashMap::new(),
35 available_indices: BTreeSet::new(),
36 next_index: 0,
37 _counter: ObjectCounter::new("DescriptorTable"),
38 }
39 }
4041/// Add the descriptor at an unused index, and return the index. If the descriptor could not be
42 /// added, the descriptor is returned in the `Err`.
43fn add(
44&mut self,
45 descriptor: Descriptor,
46 min_index: DescriptorHandle,
47 ) -> Result<DescriptorHandle, Descriptor> {
48let idx = if let Some(idx) = self.available_indices.range(min_index.val()..).next() {
49// Un-borrow from `available_indices`.
50let idx = *idx;
51// Take from `available_indices`
52trace!("Reusing available index {}", idx);
53self.available_indices.remove(&idx);
54 idx
55 } else {
56// Start our search at either the next likely available index or the minimum index,
57 // whichever is larger.
58let mut idx = std::cmp::max(self.next_index, min_index.val());
5960// Check if this index out of range.
61if idx > FD_MAX {
62return Err(descriptor);
63 }
6465// Only update next_index if we started at it, otherwise there may be other
66 // available indexes lower than idx.
67let should_update_next_index = idx == self.next_index;
6869// Skip past any indexes that are in use. This can happen after
70 // calling `set` with a value greater than `next_index`.
71while self
72.descriptors
73 .contains_key(&DescriptorHandle::new(idx).unwrap())
74 {
75trace!("Skipping past in-use index {}", idx);
7677// Check if the next index is out of range.
78if idx >= FD_MAX {
79return Err(descriptor);
80 }
8182// Won't overflow because of the check above.
83idx += 1;
84 }
8586if should_update_next_index {
87self.next_index = idx + 1;
88 }
8990// Take the next index.
91trace!("Using index {}", idx);
92 idx
93 };
9495let idx = DescriptorHandle::new(idx).unwrap();
9697let prev = self.descriptors.insert(idx, descriptor);
98assert!(prev.is_none(), "Already a descriptor at {}", idx);
99100Ok(idx)
101 }
102103// Call after inserting to `available_indices`, to free any that are contiguous
104 // with `next_index`.
105fn trim_tail(&mut self) {
106while let Some(last_in_available) = self.available_indices.iter().next_back().copied() {
107if (last_in_available + 1) == self.next_index {
108// Last entry in available_indices is adjacent to next_index.
109 // We can merge them, freeing an entry in `available_indices`.
110self.next_index -= 1;
111self.available_indices.remove(&last_in_available);
112 } else {
113break;
114 }
115 }
116 }
117118/// Get the descriptor at `idx`, if any.
119pub fn get(&self, idx: DescriptorHandle) -> Option<&Descriptor> {
120self.descriptors.get(&idx)
121 }
122123/// Get the descriptor at `idx`, if any.
124pub fn get_mut(&mut self, idx: DescriptorHandle) -> Option<&mut Descriptor> {
125self.descriptors.get_mut(&idx)
126 }
127128/// Insert a descriptor at `index`. If a descriptor is already present at that index, it is
129 /// unregistered from that index and returned.
130#[must_use]
131fn set(&mut self, index: DescriptorHandle, descriptor: Descriptor) -> Option<Descriptor> {
132// We ensure the index is no longer in `self.available_indices`. We *don't* ensure
133 // `self.next_index` is > `index`, since that'd require adding the indices in between to
134 // `self.available_indices`. It uses less memory and is no more expensive to iterate when
135 // *using* `self.available_indices` instead.
136self.available_indices.remove(&index.val());
137138let prev = self.descriptors.insert(index, descriptor);
139140if prev.is_some() {
141trace!("Overwriting index {}", index);
142 } else {
143trace!("Setting to unused index {}", index);
144 }
145146 prev
147 }
148149/// Register a descriptor and return its fd handle. Equivalent to
150 /// [`register_descriptor_with_min_fd(desc, 0)`][Self::register_descriptor_with_min_fd]. If the
151 /// descriptor could not be added, the descriptor is returned in the `Err`.
152pub fn register_descriptor(
153&mut self,
154 desc: Descriptor,
155 ) -> Result<DescriptorHandle, Descriptor> {
156const ZERO: DescriptorHandle = match DescriptorHandle::new(0) {
157Some(x) => x,
158None => unreachable!(),
159 };
160self.add(desc, ZERO)
161 }
162163/// Register a descriptor and return its fd handle. If the descriptor could not be added, the
164 /// descriptor is returned in the `Err`.
165pub fn register_descriptor_with_min_fd(
166&mut self,
167 desc: Descriptor,
168 min_fd: DescriptorHandle,
169 ) -> Result<DescriptorHandle, Descriptor> {
170self.add(desc, min_fd)
171 }
172173/// Register a descriptor with a given fd handle and return the descriptor that it replaced.
174#[must_use]
175pub fn register_descriptor_with_fd(
176&mut self,
177 desc: Descriptor,
178 new_fd: DescriptorHandle,
179 ) -> Option<Descriptor> {
180self.set(new_fd, desc)
181 }
182183/// Deregister the descriptor with the given fd handle and return it.
184#[must_use]
185pub fn deregister_descriptor(&mut self, fd: DescriptorHandle) -> Option<Descriptor> {
186let maybe_descriptor = self.descriptors.remove(&fd);
187self.available_indices.insert(fd.val());
188self.trim_tail();
189 maybe_descriptor
190 }
191192/// Remove and return all descriptors.
193pub fn remove_all(&mut self) -> impl Iterator<Item = Descriptor> {
194// reset the descriptor table
195let old_self = std::mem::replace(self, Self::new());
196// return the old descriptors
197old_self.descriptors.into_values()
198 }
199200/// Remove and return all descriptors in the range. If you want to remove all descriptors, you
201 /// should use [`remove_all`](Self::remove_all).
202pub fn remove_range(
203&mut self,
204 range: impl std::ops::RangeBounds<DescriptorHandle>,
205 ) -> impl Iterator<Item = Descriptor> {
206// This code is not very efficient but it shouldn't be called often, so it should be fine
207 // for now. If we wanted something more efficient, we'd need to redesign the descriptor
208 // table to not use a hash map.
209210let fds: Vec<_> = self
211.iter()
212 .filter_map(|(fd, _)| range.contains(fd).then_some(*fd))
213 .collect();
214215let mut descriptors = Vec::with_capacity(fds.len());
216for fd in fds {
217 descriptors.push(self.deregister_descriptor(fd).unwrap());
218 }
219220 descriptors.into_iter()
221 }
222223pub fn iter(&self) -> impl Iterator<Item = (&DescriptorHandle, &Descriptor)> {
224self.descriptors.iter()
225 }
226227pub fn iter_mut(&mut self) -> impl Iterator<Item = (&DescriptorHandle, &mut Descriptor)> {
228self.descriptors.iter_mut()
229 }
230}
231232impl Default for DescriptorTable {
233fn default() -> Self {
234Self::new()
235 }
236}
237238impl ExplicitDrop for DescriptorTable {
239type ExplicitDropParam = Host;
240type ExplicitDropResult = ();
241242fn explicit_drop(mut self, host: &Host) {
243// Drop all descriptors using a callback queue.
244 //
245 // Doing this explicitly instead of letting `DescriptorTable`'s `Drop`
246 // implementation implicitly close these individually is a performance
247 // optimization so that all descriptors are closed before any of their
248 // callbacks run.
249let descriptors = self.remove_all();
250 CallbackQueue::queue_and_run_with_legacy(|cb_queue| {
251for desc in descriptors {
252 desc.close(host, cb_queue);
253 }
254 });
255 }
256}
257258/// A handle for a file descriptor.
259#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, PartialOrd, Ord)]
260pub struct DescriptorHandle(u32);
261262impl DescriptorHandle {
263/// Returns `Some` if `fd` is less than [`FD_MAX`]. Can be used in `const` contexts.
264pub const fn new(fd: u32) -> Option<Self> {
265if fd > FD_MAX {
266return None;
267 }
268269Some(DescriptorHandle(fd))
270 }
271272pub fn val(&self) -> u32 {
273self.0
274}
275}
276277impl std::fmt::Display for DescriptorHandle {
278fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
279self.0.fmt(f)
280 }
281}
282283impl From<DescriptorHandle> for u32 {
284fn from(x: DescriptorHandle) -> u32 {
285 x.0
286}
287}
288289impl From<DescriptorHandle> for u64 {
290fn from(x: DescriptorHandle) -> u64 {
291 x.0.into()
292 }
293}
294295impl From<DescriptorHandle> for i32 {
296fn from(x: DescriptorHandle) -> i32 {
297const { assert!(FD_MAX <= i32::MAX as u32) };
298// the constructor makes sure this won't panic
299x.0.try_into().unwrap()
300 }
301}
302303impl From<DescriptorHandle> for i64 {
304fn from(x: DescriptorHandle) -> i64 {
305 x.0.into()
306 }
307}
308309impl From<DescriptorHandle> for SyscallReg {
310fn from(x: DescriptorHandle) -> SyscallReg {
311 x.0.into()
312 }
313}
314315impl TryFrom<u32> for DescriptorHandle {
316type Error = DescriptorHandleError;
317fn try_from(x: u32) -> Result<Self, Self::Error> {
318 DescriptorHandle::new(x).ok_or(DescriptorHandleError())
319 }
320}
321322impl TryFrom<u64> for DescriptorHandle {
323// use the same error type as the conversion from u32
324type Error = <DescriptorHandle as TryFrom<u32>>::Error;
325fn try_from(x: u64) -> Result<Self, Self::Error> {
326 u32::try_from(x)
327 .or(Err(DescriptorHandleError()))?
328.try_into()
329 }
330}
331332impl TryFrom<i32> for DescriptorHandle {
333type Error = DescriptorHandleError;
334fn try_from(x: i32) -> Result<Self, Self::Error> {
335 x.try_into()
336 .ok()
337 .and_then(DescriptorHandle::new)
338 .ok_or(DescriptorHandleError())
339 }
340}
341342impl TryFrom<i64> for DescriptorHandle {
343// use the same error type as the conversion from i32
344type Error = <DescriptorHandle as TryFrom<i32>>::Error;
345fn try_from(x: i64) -> Result<Self, Self::Error> {
346 i32::try_from(x)
347 .or(Err(DescriptorHandleError()))?
348.try_into()
349 }
350}
351352/// The handle is not valid.
353#[derive(Copy, Clone, Debug, PartialEq, Eq)]
354pub struct DescriptorHandleError();
355356impl std::fmt::Display for DescriptorHandleError {
357fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
358write!(f, "Not a valid descriptor handle")
359 }
360}
361362impl std::error::Error for DescriptorHandleError {}