indexmap/map/core/
raw.rs

1#![allow(unsafe_code)]
2//! This module encapsulates the `unsafe` access to `hashbrown::raw::RawTable`,
3//! mostly in dealing with its bucket "pointers".
4
5use super::{equivalent, get_hash, Bucket, HashValue, IndexMapCore};
6use hashbrown::raw::RawTable;
7
8type RawBucket = hashbrown::raw::Bucket<usize>;
9
10/// Inserts many entries into a raw table without reallocating.
11///
12/// ***Panics*** if there is not sufficient capacity already.
13pub(super) fn insert_bulk_no_grow<K, V>(indices: &mut RawTable<usize>, entries: &[Bucket<K, V>]) {
14    assert!(indices.capacity() - indices.len() >= entries.len());
15    for entry in entries {
16        // SAFETY: we asserted that sufficient capacity exists for all entries.
17        unsafe {
18            indices.insert_no_grow(entry.hash.get(), indices.len());
19        }
20    }
21}
22
23#[cfg(feature = "test_debug")]
24pub(super) struct DebugIndices<'a>(pub &'a RawTable<usize>);
25
26#[cfg(feature = "test_debug")]
27impl core::fmt::Debug for DebugIndices<'_> {
28    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
29        // SAFETY: we're not letting any of the buckets escape this function
30        let indices = unsafe { self.0.iter().map(|raw_bucket| *raw_bucket.as_ref()) };
31        f.debug_list().entries(indices).finish()
32    }
33}
34
35impl<K, V> IndexMapCore<K, V> {
36    /// Sweep the whole table to erase indices start..end
37    pub(super) fn erase_indices_sweep(&mut self, start: usize, end: usize) {
38        // SAFETY: we're not letting any of the buckets escape this function
39        unsafe {
40            let offset = end - start;
41            for bucket in self.indices.iter() {
42                let i = bucket.as_mut();
43                if *i >= end {
44                    *i -= offset;
45                } else if *i >= start {
46                    self.indices.erase(bucket);
47                }
48            }
49        }
50    }
51
52    /// Search for a key in the table and return `Ok(entry_index)` if found.
53    /// Otherwise, insert the key and return `Err(new_index)`.
54    ///
55    /// Note that hashbrown may resize the table to reserve space for insertion,
56    /// even before checking if it's already present, so this is somewhat biased
57    /// towards new items.
58    pub(crate) fn find_or_insert(&mut self, hash: HashValue, key: &K) -> Result<usize, usize>
59    where
60        K: Eq,
61    {
62        let hash = hash.get();
63        let eq = equivalent(key, &self.entries);
64        let hasher = get_hash(&self.entries);
65        // SAFETY: We're not mutating between find and read/insert.
66        unsafe {
67            match self.indices.find_or_find_insert_slot(hash, eq, hasher) {
68                Ok(raw_bucket) => Ok(*raw_bucket.as_ref()),
69                Err(slot) => {
70                    let index = self.indices.len();
71                    self.indices.insert_in_slot(hash, slot, index);
72                    Err(index)
73                }
74            }
75        }
76    }
77
78    pub(super) fn raw_entry(
79        &mut self,
80        hash: HashValue,
81        mut is_match: impl FnMut(&K) -> bool,
82    ) -> Result<RawTableEntry<'_, K, V>, &mut Self> {
83        let entries = &*self.entries;
84        let eq = move |&i: &usize| is_match(&entries[i].key);
85        match self.indices.find(hash.get(), eq) {
86            // SAFETY: The bucket is valid because we *just* found it in this map.
87            Some(raw_bucket) => Ok(unsafe { RawTableEntry::new(self, raw_bucket) }),
88            None => Err(self),
89        }
90    }
91
92    pub(super) fn index_raw_entry(&mut self, index: usize) -> Option<RawTableEntry<'_, K, V>> {
93        let hash = self.entries.get(index)?.hash;
94        let raw_bucket = self.indices.find(hash.get(), move |&i| i == index)?;
95        // SAFETY: The bucket is valid because we *just* found it in this map.
96        Some(unsafe { RawTableEntry::new(self, raw_bucket) })
97    }
98
99    pub(super) fn indices_mut(&mut self) -> impl Iterator<Item = &mut usize> {
100        // SAFETY: we're not letting any of the buckets escape this function,
101        // only the item references that are appropriately bound to `&mut self`.
102        unsafe { self.indices.iter().map(|bucket| bucket.as_mut()) }
103    }
104}
105
106/// A view into an occupied raw entry in an `IndexMap`.
107// SAFETY: The lifetime of the map reference also constrains the raw bucket,
108// which is essentially a raw pointer into the map indices.
109pub(super) struct RawTableEntry<'a, K, V> {
110    map: &'a mut IndexMapCore<K, V>,
111    raw_bucket: RawBucket,
112}
113
114// `hashbrown::raw::Bucket` is only `Send`, not `Sync`.
115// SAFETY: `&self` only accesses the bucket to read it.
116unsafe impl<K: Sync, V: Sync> Sync for RawTableEntry<'_, K, V> {}
117
118impl<'a, K, V> RawTableEntry<'a, K, V> {
119    /// The caller must ensure that the `raw_bucket` is valid in the given `map`,
120    /// and then we hold the `&mut` reference for exclusive access.
121    #[inline]
122    unsafe fn new(map: &'a mut IndexMapCore<K, V>, raw_bucket: RawBucket) -> Self {
123        Self { map, raw_bucket }
124    }
125
126    /// Return the index of the key-value pair
127    #[inline]
128    pub(super) fn index(&self) -> usize {
129        // SAFETY: we have `&mut map` keeping the bucket stable
130        unsafe { *self.raw_bucket.as_ref() }
131    }
132
133    #[inline]
134    pub(super) fn bucket(&self) -> &Bucket<K, V> {
135        &self.map.entries[self.index()]
136    }
137
138    #[inline]
139    pub(super) fn bucket_mut(&mut self) -> &mut Bucket<K, V> {
140        let index = self.index();
141        &mut self.map.entries[index]
142    }
143
144    #[inline]
145    pub(super) fn into_bucket(self) -> &'a mut Bucket<K, V> {
146        let index = self.index();
147        &mut self.map.entries[index]
148    }
149
150    /// Remove the index from indices, leaving the actual entries to the caller.
151    pub(super) fn remove_index(self) -> (&'a mut IndexMapCore<K, V>, usize) {
152        // SAFETY: This is safe because it can only happen once (self is consumed)
153        // and map.indices have not been modified since entry construction
154        let (index, _slot) = unsafe { self.map.indices.remove(self.raw_bucket) };
155        (self.map, index)
156    }
157
158    /// Take no action, just return the index and the original map reference.
159    #[inline]
160    pub(super) fn into_inner(self) -> (&'a mut IndexMapCore<K, V>, usize) {
161        let index = self.index();
162        (self.map, index)
163    }
164}