crossbeam_utils/atomic/
atomic_cell.rs

1// Necessary for implementing atomic methods for `AtomicUnit`
2#![allow(clippy::unit_arg)]
3
4use crate::primitive::sync::atomic::{self, Ordering};
5use crate::CachePadded;
6use core::cell::UnsafeCell;
7use core::cmp;
8use core::fmt;
9use core::mem::{self, ManuallyDrop, MaybeUninit};
10use core::panic::{RefUnwindSafe, UnwindSafe};
11use core::ptr;
12
13use super::seq_lock::SeqLock;
14
15/// A thread-safe mutable memory location.
16///
17/// This type is equivalent to [`Cell`], except it can also be shared among multiple threads.
18///
19/// Operations on `AtomicCell`s use atomic instructions whenever possible, and synchronize using
20/// global locks otherwise. You can call [`AtomicCell::<T>::is_lock_free()`] to check whether
21/// atomic instructions or locks will be used.
22///
23/// Atomic loads use the [`Acquire`] ordering and atomic stores use the [`Release`] ordering.
24///
25/// [`Cell`]: std::cell::Cell
26/// [`AtomicCell::<T>::is_lock_free()`]: AtomicCell::is_lock_free
27/// [`Acquire`]: std::sync::atomic::Ordering::Acquire
28/// [`Release`]: std::sync::atomic::Ordering::Release
29#[repr(transparent)]
30pub struct AtomicCell<T> {
31    /// The inner value.
32    ///
33    /// If this value can be transmuted into a primitive atomic type, it will be treated as such.
34    /// Otherwise, all potentially concurrent operations on this data will be protected by a global
35    /// lock.
36    ///
37    /// Using MaybeUninit to prevent code outside the cell from observing partially initialized state:
38    /// <https://github.com/crossbeam-rs/crossbeam/issues/833>
39    ///
40    /// Note:
41    /// - we'll never store uninitialized `T` due to our API only using initialized `T`.
42    /// - this `MaybeUninit` does *not* fix <https://github.com/crossbeam-rs/crossbeam/issues/315>.
43    value: UnsafeCell<MaybeUninit<T>>,
44}
45
46unsafe impl<T: Send> Send for AtomicCell<T> {}
47unsafe impl<T: Send> Sync for AtomicCell<T> {}
48
49impl<T> UnwindSafe for AtomicCell<T> {}
50impl<T> RefUnwindSafe for AtomicCell<T> {}
51
52impl<T> AtomicCell<T> {
53    /// Creates a new atomic cell initialized with `val`.
54    ///
55    /// # Examples
56    ///
57    /// ```
58    /// use crossbeam_utils::atomic::AtomicCell;
59    ///
60    /// let a = AtomicCell::new(7);
61    /// ```
62    pub const fn new(val: T) -> AtomicCell<T> {
63        AtomicCell {
64            value: UnsafeCell::new(MaybeUninit::new(val)),
65        }
66    }
67
68    /// Consumes the atomic and returns the contained value.
69    ///
70    /// This is safe because passing `self` by value guarantees that no other threads are
71    /// concurrently accessing the atomic data.
72    ///
73    /// # Examples
74    ///
75    /// ```
76    /// use crossbeam_utils::atomic::AtomicCell;
77    ///
78    /// let a = AtomicCell::new(7);
79    /// let v = a.into_inner();
80    ///
81    /// assert_eq!(v, 7);
82    /// ```
83    pub fn into_inner(self) -> T {
84        let this = ManuallyDrop::new(self);
85        // SAFETY:
86        // - passing `self` by value guarantees that no other threads are concurrently
87        //   accessing the atomic data
88        // - the raw pointer passed in is valid because we got it from an owned value.
89        // - `ManuallyDrop` prevents double dropping `T`
90        unsafe { this.as_ptr().read() }
91    }
92
93    /// Returns `true` if operations on values of this type are lock-free.
94    ///
95    /// If the compiler or the platform doesn't support the necessary atomic instructions,
96    /// `AtomicCell<T>` will use global locks for every potentially concurrent atomic operation.
97    ///
98    /// # Examples
99    ///
100    /// ```
101    /// use crossbeam_utils::atomic::AtomicCell;
102    ///
103    /// // This type is internally represented as `AtomicUsize` so we can just use atomic
104    /// // operations provided by it.
105    /// assert_eq!(AtomicCell::<usize>::is_lock_free(), true);
106    ///
107    /// // A wrapper struct around `isize`.
108    /// struct Foo {
109    ///     bar: isize,
110    /// }
111    /// // `AtomicCell<Foo>` will be internally represented as `AtomicIsize`.
112    /// assert_eq!(AtomicCell::<Foo>::is_lock_free(), true);
113    ///
114    /// // Operations on zero-sized types are always lock-free.
115    /// assert_eq!(AtomicCell::<()>::is_lock_free(), true);
116    ///
117    /// // Very large types cannot be represented as any of the standard atomic types, so atomic
118    /// // operations on them will have to use global locks for synchronization.
119    /// assert_eq!(AtomicCell::<[u8; 1000]>::is_lock_free(), false);
120    /// ```
121    pub const fn is_lock_free() -> bool {
122        atomic_is_lock_free::<T>()
123    }
124
125    /// Stores `val` into the atomic cell.
126    ///
127    /// # Examples
128    ///
129    /// ```
130    /// use crossbeam_utils::atomic::AtomicCell;
131    ///
132    /// let a = AtomicCell::new(7);
133    ///
134    /// assert_eq!(a.load(), 7);
135    /// a.store(8);
136    /// assert_eq!(a.load(), 8);
137    /// ```
138    pub fn store(&self, val: T) {
139        if mem::needs_drop::<T>() {
140            drop(self.swap(val));
141        } else {
142            unsafe {
143                atomic_store(self.as_ptr(), val);
144            }
145        }
146    }
147
148    /// Stores `val` into the atomic cell and returns the previous value.
149    ///
150    /// # Examples
151    ///
152    /// ```
153    /// use crossbeam_utils::atomic::AtomicCell;
154    ///
155    /// let a = AtomicCell::new(7);
156    ///
157    /// assert_eq!(a.load(), 7);
158    /// assert_eq!(a.swap(8), 7);
159    /// assert_eq!(a.load(), 8);
160    /// ```
161    pub fn swap(&self, val: T) -> T {
162        unsafe { atomic_swap(self.as_ptr(), val) }
163    }
164
165    /// Returns a raw pointer to the underlying data in this atomic cell.
166    ///
167    /// # Examples
168    ///
169    /// ```
170    /// use crossbeam_utils::atomic::AtomicCell;
171    ///
172    /// let a = AtomicCell::new(5);
173    ///
174    /// let ptr = a.as_ptr();
175    /// ```
176    #[inline]
177    pub fn as_ptr(&self) -> *mut T {
178        self.value.get().cast::<T>()
179    }
180}
181
182impl<T: Default> AtomicCell<T> {
183    /// Takes the value of the atomic cell, leaving `Default::default()` in its place.
184    ///
185    /// # Examples
186    ///
187    /// ```
188    /// use crossbeam_utils::atomic::AtomicCell;
189    ///
190    /// let a = AtomicCell::new(5);
191    /// let five = a.take();
192    ///
193    /// assert_eq!(five, 5);
194    /// assert_eq!(a.into_inner(), 0);
195    /// ```
196    pub fn take(&self) -> T {
197        self.swap(Default::default())
198    }
199}
200
201impl<T: Copy> AtomicCell<T> {
202    /// Loads a value from the atomic cell.
203    ///
204    /// # Examples
205    ///
206    /// ```
207    /// use crossbeam_utils::atomic::AtomicCell;
208    ///
209    /// let a = AtomicCell::new(7);
210    ///
211    /// assert_eq!(a.load(), 7);
212    /// ```
213    pub fn load(&self) -> T {
214        unsafe { atomic_load(self.as_ptr()) }
215    }
216}
217
218impl<T: Copy + Eq> AtomicCell<T> {
219    /// If the current value equals `current`, stores `new` into the atomic cell.
220    ///
221    /// The return value is always the previous value. If it is equal to `current`, then the value
222    /// was updated.
223    ///
224    /// # Examples
225    ///
226    /// ```
227    /// # #![allow(deprecated)]
228    /// use crossbeam_utils::atomic::AtomicCell;
229    ///
230    /// let a = AtomicCell::new(1);
231    ///
232    /// assert_eq!(a.compare_and_swap(2, 3), 1);
233    /// assert_eq!(a.load(), 1);
234    ///
235    /// assert_eq!(a.compare_and_swap(1, 2), 1);
236    /// assert_eq!(a.load(), 2);
237    /// ```
238    // TODO: remove in the next major version.
239    #[deprecated(note = "Use `compare_exchange` instead")]
240    pub fn compare_and_swap(&self, current: T, new: T) -> T {
241        match self.compare_exchange(current, new) {
242            Ok(v) => v,
243            Err(v) => v,
244        }
245    }
246
247    /// If the current value equals `current`, stores `new` into the atomic cell.
248    ///
249    /// The return value is a result indicating whether the new value was written and containing
250    /// the previous value. On success this value is guaranteed to be equal to `current`.
251    ///
252    /// # Examples
253    ///
254    /// ```
255    /// use crossbeam_utils::atomic::AtomicCell;
256    ///
257    /// let a = AtomicCell::new(1);
258    ///
259    /// assert_eq!(a.compare_exchange(2, 3), Err(1));
260    /// assert_eq!(a.load(), 1);
261    ///
262    /// assert_eq!(a.compare_exchange(1, 2), Ok(1));
263    /// assert_eq!(a.load(), 2);
264    /// ```
265    pub fn compare_exchange(&self, current: T, new: T) -> Result<T, T> {
266        unsafe { atomic_compare_exchange_weak(self.as_ptr(), current, new) }
267    }
268
269    /// Fetches the value, and applies a function to it that returns an optional
270    /// new value. Returns a `Result` of `Ok(previous_value)` if the function returned `Some(_)`, else
271    /// `Err(previous_value)`.
272    ///
273    /// Note: This may call the function multiple times if the value has been changed from other threads in
274    /// the meantime, as long as the function returns `Some(_)`, but the function will have been applied
275    /// only once to the stored value.
276    ///
277    /// # Examples
278    ///
279    /// ```rust
280    /// use crossbeam_utils::atomic::AtomicCell;
281    ///
282    /// let a = AtomicCell::new(7);
283    /// assert_eq!(a.fetch_update(|_| None), Err(7));
284    /// assert_eq!(a.fetch_update(|a| Some(a + 1)), Ok(7));
285    /// assert_eq!(a.fetch_update(|a| Some(a + 1)), Ok(8));
286    /// assert_eq!(a.load(), 9);
287    /// ```
288    #[inline]
289    pub fn fetch_update<F>(&self, mut f: F) -> Result<T, T>
290    where
291        F: FnMut(T) -> Option<T>,
292    {
293        let mut prev = self.load();
294        while let Some(next) = f(prev) {
295            match self.compare_exchange(prev, next) {
296                x @ Ok(_) => return x,
297                Err(next_prev) => prev = next_prev,
298            }
299        }
300        Err(prev)
301    }
302}
303
304// `MaybeUninit` prevents `T` from being dropped, so we need to implement `Drop`
305// for `AtomicCell` to avoid leaks of non-`Copy` types.
306impl<T> Drop for AtomicCell<T> {
307    fn drop(&mut self) {
308        if mem::needs_drop::<T>() {
309            // SAFETY:
310            // - the mutable reference guarantees that no other threads are concurrently accessing the atomic data
311            // - the raw pointer passed in is valid because we got it from a reference
312            // - `MaybeUninit` prevents double dropping `T`
313            unsafe {
314                self.as_ptr().drop_in_place();
315            }
316        }
317    }
318}
319
320macro_rules! atomic {
321    // If values of type `$t` can be transmuted into values of the primitive atomic type `$atomic`,
322    // declares variable `$a` of type `$atomic` and executes `$atomic_op`, breaking out of the loop.
323    (@check, $t:ty, $atomic:ty, $a:ident, $atomic_op:expr) => {
324        if can_transmute::<$t, $atomic>() {
325            let $a: &$atomic;
326            break $atomic_op;
327        }
328    };
329
330    // If values of type `$t` can be transmuted into values of a primitive atomic type, declares
331    // variable `$a` of that type and executes `$atomic_op`. Otherwise, just executes
332    // `$fallback_op`.
333    ($t:ty, $a:ident, $atomic_op:expr, $fallback_op:expr) => {
334        loop {
335            atomic!(@check, $t, AtomicUnit, $a, $atomic_op);
336
337            atomic!(@check, $t, atomic::AtomicU8, $a, $atomic_op);
338            atomic!(@check, $t, atomic::AtomicU16, $a, $atomic_op);
339            atomic!(@check, $t, atomic::AtomicU32, $a, $atomic_op);
340            #[cfg(target_has_atomic = "64")]
341            atomic!(@check, $t, atomic::AtomicU64, $a, $atomic_op);
342            // TODO: AtomicU128 is unstable
343            // atomic!(@check, $t, atomic::AtomicU128, $a, $atomic_op);
344
345            break $fallback_op;
346        }
347    };
348}
349
350macro_rules! impl_arithmetic {
351    ($t:ty, fallback, $example:tt) => {
352        impl AtomicCell<$t> {
353            /// Increments the current value by `val` and returns the previous value.
354            ///
355            /// The addition wraps on overflow.
356            ///
357            /// # Examples
358            ///
359            /// ```
360            /// use crossbeam_utils::atomic::AtomicCell;
361            ///
362            #[doc = $example]
363            ///
364            /// assert_eq!(a.fetch_add(3), 7);
365            /// assert_eq!(a.load(), 10);
366            /// ```
367            #[inline]
368            pub fn fetch_add(&self, val: $t) -> $t {
369                let _guard = lock(self.as_ptr() as usize).write();
370                let value = unsafe { &mut *(self.as_ptr()) };
371                let old = *value;
372                *value = value.wrapping_add(val);
373                old
374            }
375
376            /// Decrements the current value by `val` and returns the previous value.
377            ///
378            /// The subtraction wraps on overflow.
379            ///
380            /// # Examples
381            ///
382            /// ```
383            /// use crossbeam_utils::atomic::AtomicCell;
384            ///
385            #[doc = $example]
386            ///
387            /// assert_eq!(a.fetch_sub(3), 7);
388            /// assert_eq!(a.load(), 4);
389            /// ```
390            #[inline]
391            pub fn fetch_sub(&self, val: $t) -> $t {
392                let _guard = lock(self.as_ptr() as usize).write();
393                let value = unsafe { &mut *(self.as_ptr()) };
394                let old = *value;
395                *value = value.wrapping_sub(val);
396                old
397            }
398
399            /// Applies bitwise "and" to the current value and returns the previous value.
400            ///
401            /// # Examples
402            ///
403            /// ```
404            /// use crossbeam_utils::atomic::AtomicCell;
405            ///
406            #[doc = $example]
407            ///
408            /// assert_eq!(a.fetch_and(3), 7);
409            /// assert_eq!(a.load(), 3);
410            /// ```
411            #[inline]
412            pub fn fetch_and(&self, val: $t) -> $t {
413                let _guard = lock(self.as_ptr() as usize).write();
414                let value = unsafe { &mut *(self.as_ptr()) };
415                let old = *value;
416                *value &= val;
417                old
418            }
419
420            /// Applies bitwise "nand" to the current value and returns the previous value.
421            ///
422            /// # Examples
423            ///
424            /// ```
425            /// use crossbeam_utils::atomic::AtomicCell;
426            ///
427            #[doc = $example]
428            ///
429            /// assert_eq!(a.fetch_nand(3), 7);
430            /// assert_eq!(a.load(), !(7 & 3));
431            /// ```
432            #[inline]
433            pub fn fetch_nand(&self, val: $t) -> $t {
434                let _guard = lock(self.as_ptr() as usize).write();
435                let value = unsafe { &mut *(self.as_ptr()) };
436                let old = *value;
437                *value = !(old & val);
438                old
439            }
440
441            /// Applies bitwise "or" to the current value and returns the previous value.
442            ///
443            /// # Examples
444            ///
445            /// ```
446            /// use crossbeam_utils::atomic::AtomicCell;
447            ///
448            #[doc = $example]
449            ///
450            /// assert_eq!(a.fetch_or(16), 7);
451            /// assert_eq!(a.load(), 23);
452            /// ```
453            #[inline]
454            pub fn fetch_or(&self, val: $t) -> $t {
455                let _guard = lock(self.as_ptr() as usize).write();
456                let value = unsafe { &mut *(self.as_ptr()) };
457                let old = *value;
458                *value |= val;
459                old
460            }
461
462            /// Applies bitwise "xor" to the current value and returns the previous value.
463            ///
464            /// # Examples
465            ///
466            /// ```
467            /// use crossbeam_utils::atomic::AtomicCell;
468            ///
469            #[doc = $example]
470            ///
471            /// assert_eq!(a.fetch_xor(2), 7);
472            /// assert_eq!(a.load(), 5);
473            /// ```
474            #[inline]
475            pub fn fetch_xor(&self, val: $t) -> $t {
476                let _guard = lock(self.as_ptr() as usize).write();
477                let value = unsafe { &mut *(self.as_ptr()) };
478                let old = *value;
479                *value ^= val;
480                old
481            }
482
483            /// Compares and sets the maximum of the current value and `val`,
484            /// and returns the previous value.
485            ///
486            /// # Examples
487            ///
488            /// ```
489            /// use crossbeam_utils::atomic::AtomicCell;
490            ///
491            #[doc = $example]
492            ///
493            /// assert_eq!(a.fetch_max(2), 7);
494            /// assert_eq!(a.load(), 7);
495            /// ```
496            #[inline]
497            pub fn fetch_max(&self, val: $t) -> $t {
498                let _guard = lock(self.as_ptr() as usize).write();
499                let value = unsafe { &mut *(self.as_ptr()) };
500                let old = *value;
501                *value = cmp::max(old, val);
502                old
503            }
504
505            /// Compares and sets the minimum of the current value and `val`,
506            /// and returns the previous value.
507            ///
508            /// # Examples
509            ///
510            /// ```
511            /// use crossbeam_utils::atomic::AtomicCell;
512            ///
513            #[doc = $example]
514            ///
515            /// assert_eq!(a.fetch_min(2), 7);
516            /// assert_eq!(a.load(), 2);
517            /// ```
518            #[inline]
519            pub fn fetch_min(&self, val: $t) -> $t {
520                let _guard = lock(self.as_ptr() as usize).write();
521                let value = unsafe { &mut *(self.as_ptr()) };
522                let old = *value;
523                *value = cmp::min(old, val);
524                old
525            }
526        }
527    };
528    ($t:ty, $atomic:ident, $example:tt) => {
529        impl AtomicCell<$t> {
530            /// Increments the current value by `val` and returns the previous value.
531            ///
532            /// The addition wraps on overflow.
533            ///
534            /// # Examples
535            ///
536            /// ```
537            /// use crossbeam_utils::atomic::AtomicCell;
538            ///
539            #[doc = $example]
540            ///
541            /// assert_eq!(a.fetch_add(3), 7);
542            /// assert_eq!(a.load(), 10);
543            /// ```
544            #[inline]
545            pub fn fetch_add(&self, val: $t) -> $t {
546                atomic! {
547                    $t, _a,
548                    {
549                        let a = unsafe { &*(self.as_ptr() as *const atomic::$atomic) };
550                        a.fetch_add(val, Ordering::AcqRel)
551                    },
552                    {
553                        let _guard = lock(self.as_ptr() as usize).write();
554                        let value = unsafe { &mut *(self.as_ptr()) };
555                        let old = *value;
556                        *value = value.wrapping_add(val);
557                        old
558                    }
559                }
560            }
561
562            /// Decrements the current value by `val` and returns the previous value.
563            ///
564            /// The subtraction wraps on overflow.
565            ///
566            /// # Examples
567            ///
568            /// ```
569            /// use crossbeam_utils::atomic::AtomicCell;
570            ///
571            #[doc = $example]
572            ///
573            /// assert_eq!(a.fetch_sub(3), 7);
574            /// assert_eq!(a.load(), 4);
575            /// ```
576            #[inline]
577            pub fn fetch_sub(&self, val: $t) -> $t {
578                atomic! {
579                    $t, _a,
580                    {
581                        let a = unsafe { &*(self.as_ptr() as *const atomic::$atomic) };
582                        a.fetch_sub(val, Ordering::AcqRel)
583                    },
584                    {
585                        let _guard = lock(self.as_ptr() as usize).write();
586                        let value = unsafe { &mut *(self.as_ptr()) };
587                        let old = *value;
588                        *value = value.wrapping_sub(val);
589                        old
590                    }
591                }
592            }
593
594            /// Applies bitwise "and" to the current value and returns the previous value.
595            ///
596            /// # Examples
597            ///
598            /// ```
599            /// use crossbeam_utils::atomic::AtomicCell;
600            ///
601            #[doc = $example]
602            ///
603            /// assert_eq!(a.fetch_and(3), 7);
604            /// assert_eq!(a.load(), 3);
605            /// ```
606            #[inline]
607            pub fn fetch_and(&self, val: $t) -> $t {
608                atomic! {
609                    $t, _a,
610                    {
611                        let a = unsafe { &*(self.as_ptr() as *const atomic::$atomic) };
612                        a.fetch_and(val, Ordering::AcqRel)
613                    },
614                    {
615                        let _guard = lock(self.as_ptr() as usize).write();
616                        let value = unsafe { &mut *(self.as_ptr()) };
617                        let old = *value;
618                        *value &= val;
619                        old
620                    }
621                }
622            }
623
624            /// Applies bitwise "nand" to the current value and returns the previous value.
625            ///
626            /// # Examples
627            ///
628            /// ```
629            /// use crossbeam_utils::atomic::AtomicCell;
630            ///
631            #[doc = $example]
632            ///
633            /// assert_eq!(a.fetch_nand(3), 7);
634            /// assert_eq!(a.load(), !(7 & 3));
635            /// ```
636            #[inline]
637            pub fn fetch_nand(&self, val: $t) -> $t {
638                atomic! {
639                    $t, _a,
640                    {
641                        let a = unsafe { &*(self.as_ptr() as *const atomic::$atomic) };
642                        a.fetch_nand(val, Ordering::AcqRel)
643                    },
644                    {
645                        let _guard = lock(self.as_ptr() as usize).write();
646                        let value = unsafe { &mut *(self.as_ptr()) };
647                        let old = *value;
648                        *value = !(old & val);
649                        old
650                    }
651                }
652            }
653
654            /// Applies bitwise "or" to the current value and returns the previous value.
655            ///
656            /// # Examples
657            ///
658            /// ```
659            /// use crossbeam_utils::atomic::AtomicCell;
660            ///
661            #[doc = $example]
662            ///
663            /// assert_eq!(a.fetch_or(16), 7);
664            /// assert_eq!(a.load(), 23);
665            /// ```
666            #[inline]
667            pub fn fetch_or(&self, val: $t) -> $t {
668                atomic! {
669                    $t, _a,
670                    {
671                        let a = unsafe { &*(self.as_ptr() as *const atomic::$atomic) };
672                        a.fetch_or(val, Ordering::AcqRel)
673                    },
674                    {
675                        let _guard = lock(self.as_ptr() as usize).write();
676                        let value = unsafe { &mut *(self.as_ptr()) };
677                        let old = *value;
678                        *value |= val;
679                        old
680                    }
681                }
682            }
683
684            /// Applies bitwise "xor" to the current value and returns the previous value.
685            ///
686            /// # Examples
687            ///
688            /// ```
689            /// use crossbeam_utils::atomic::AtomicCell;
690            ///
691            #[doc = $example]
692            ///
693            /// assert_eq!(a.fetch_xor(2), 7);
694            /// assert_eq!(a.load(), 5);
695            /// ```
696            #[inline]
697            pub fn fetch_xor(&self, val: $t) -> $t {
698                atomic! {
699                    $t, _a,
700                    {
701                        let a = unsafe { &*(self.as_ptr() as *const atomic::$atomic) };
702                        a.fetch_xor(val, Ordering::AcqRel)
703                    },
704                    {
705                        let _guard = lock(self.as_ptr() as usize).write();
706                        let value = unsafe { &mut *(self.as_ptr()) };
707                        let old = *value;
708                        *value ^= val;
709                        old
710                    }
711                }
712            }
713
714            /// Compares and sets the maximum of the current value and `val`,
715            /// and returns the previous value.
716            ///
717            /// # Examples
718            ///
719            /// ```
720            /// use crossbeam_utils::atomic::AtomicCell;
721            ///
722            #[doc = $example]
723            ///
724            /// assert_eq!(a.fetch_max(9), 7);
725            /// assert_eq!(a.load(), 9);
726            /// ```
727            #[inline]
728            pub fn fetch_max(&self, val: $t) -> $t {
729                atomic! {
730                    $t, _a,
731                    {
732                        let a = unsafe { &*(self.as_ptr() as *const atomic::$atomic) };
733                        a.fetch_max(val, Ordering::AcqRel)
734                    },
735                    {
736                        let _guard = lock(self.as_ptr() as usize).write();
737                        let value = unsafe { &mut *(self.as_ptr()) };
738                        let old = *value;
739                        *value = cmp::max(old, val);
740                        old
741                    }
742                }
743            }
744
745            /// Compares and sets the minimum of the current value and `val`,
746            /// and returns the previous value.
747            ///
748            /// # Examples
749            ///
750            /// ```
751            /// use crossbeam_utils::atomic::AtomicCell;
752            ///
753            #[doc = $example]
754            ///
755            /// assert_eq!(a.fetch_min(2), 7);
756            /// assert_eq!(a.load(), 2);
757            /// ```
758            #[inline]
759            pub fn fetch_min(&self, val: $t) -> $t {
760                atomic! {
761                    $t, _a,
762                    {
763                        let a = unsafe { &*(self.as_ptr() as *const atomic::$atomic) };
764                        a.fetch_min(val, Ordering::AcqRel)
765                    },
766                    {
767                        let _guard = lock(self.as_ptr() as usize).write();
768                        let value = unsafe { &mut *(self.as_ptr()) };
769                        let old = *value;
770                        *value = cmp::min(old, val);
771                        old
772                    }
773                }
774            }
775        }
776    };
777}
778
779impl_arithmetic!(u8, AtomicU8, "let a = AtomicCell::new(7u8);");
780impl_arithmetic!(i8, AtomicI8, "let a = AtomicCell::new(7i8);");
781impl_arithmetic!(u16, AtomicU16, "let a = AtomicCell::new(7u16);");
782impl_arithmetic!(i16, AtomicI16, "let a = AtomicCell::new(7i16);");
783
784impl_arithmetic!(u32, AtomicU32, "let a = AtomicCell::new(7u32);");
785impl_arithmetic!(i32, AtomicI32, "let a = AtomicCell::new(7i32);");
786
787#[cfg(target_has_atomic = "64")]
788impl_arithmetic!(u64, AtomicU64, "let a = AtomicCell::new(7u64);");
789#[cfg(target_has_atomic = "64")]
790impl_arithmetic!(i64, AtomicI64, "let a = AtomicCell::new(7i64);");
791#[cfg(not(target_has_atomic = "64"))]
792impl_arithmetic!(u64, fallback, "let a = AtomicCell::new(7u64);");
793#[cfg(not(target_has_atomic = "64"))]
794impl_arithmetic!(i64, fallback, "let a = AtomicCell::new(7i64);");
795
796// TODO: AtomicU128 is unstable
797// impl_arithmetic!(u128, AtomicU128, "let a = AtomicCell::new(7u128);");
798// impl_arithmetic!(i128, AtomicI128, "let a = AtomicCell::new(7i128);");
799impl_arithmetic!(u128, fallback, "let a = AtomicCell::new(7u128);");
800impl_arithmetic!(i128, fallback, "let a = AtomicCell::new(7i128);");
801
802impl_arithmetic!(usize, AtomicUsize, "let a = AtomicCell::new(7usize);");
803impl_arithmetic!(isize, AtomicIsize, "let a = AtomicCell::new(7isize);");
804
805impl AtomicCell<bool> {
806    /// Applies logical "and" to the current value and returns the previous value.
807    ///
808    /// # Examples
809    ///
810    /// ```
811    /// use crossbeam_utils::atomic::AtomicCell;
812    ///
813    /// let a = AtomicCell::new(true);
814    ///
815    /// assert_eq!(a.fetch_and(true), true);
816    /// assert_eq!(a.load(), true);
817    ///
818    /// assert_eq!(a.fetch_and(false), true);
819    /// assert_eq!(a.load(), false);
820    /// ```
821    #[inline]
822    pub fn fetch_and(&self, val: bool) -> bool {
823        atomic! {
824            bool, _a,
825            {
826                let a = unsafe { &*(self.as_ptr() as *const atomic::AtomicBool) };
827                a.fetch_and(val, Ordering::AcqRel)
828            },
829            {
830                let _guard = lock(self.as_ptr() as usize).write();
831                let value = unsafe { &mut *(self.as_ptr()) };
832                let old = *value;
833                *value &= val;
834                old
835            }
836        }
837    }
838
839    /// Applies logical "nand" to the current value and returns the previous value.
840    ///
841    /// # Examples
842    ///
843    /// ```
844    /// use crossbeam_utils::atomic::AtomicCell;
845    ///
846    /// let a = AtomicCell::new(true);
847    ///
848    /// assert_eq!(a.fetch_nand(false), true);
849    /// assert_eq!(a.load(), true);
850    ///
851    /// assert_eq!(a.fetch_nand(true), true);
852    /// assert_eq!(a.load(), false);
853    ///
854    /// assert_eq!(a.fetch_nand(false), false);
855    /// assert_eq!(a.load(), true);
856    /// ```
857    #[inline]
858    pub fn fetch_nand(&self, val: bool) -> bool {
859        atomic! {
860            bool, _a,
861            {
862                let a = unsafe { &*(self.as_ptr() as *const atomic::AtomicBool) };
863                a.fetch_nand(val, Ordering::AcqRel)
864            },
865            {
866                let _guard = lock(self.as_ptr() as usize).write();
867                let value = unsafe { &mut *(self.as_ptr()) };
868                let old = *value;
869                *value = !(old & val);
870                old
871            }
872        }
873    }
874
875    /// Applies logical "or" to the current value and returns the previous value.
876    ///
877    /// # Examples
878    ///
879    /// ```
880    /// use crossbeam_utils::atomic::AtomicCell;
881    ///
882    /// let a = AtomicCell::new(false);
883    ///
884    /// assert_eq!(a.fetch_or(false), false);
885    /// assert_eq!(a.load(), false);
886    ///
887    /// assert_eq!(a.fetch_or(true), false);
888    /// assert_eq!(a.load(), true);
889    /// ```
890    #[inline]
891    pub fn fetch_or(&self, val: bool) -> bool {
892        atomic! {
893            bool, _a,
894            {
895                let a = unsafe { &*(self.as_ptr() as *const atomic::AtomicBool) };
896                a.fetch_or(val, Ordering::AcqRel)
897            },
898            {
899                let _guard = lock(self.as_ptr() as usize).write();
900                let value = unsafe { &mut *(self.as_ptr()) };
901                let old = *value;
902                *value |= val;
903                old
904            }
905        }
906    }
907
908    /// Applies logical "xor" to the current value and returns the previous value.
909    ///
910    /// # Examples
911    ///
912    /// ```
913    /// use crossbeam_utils::atomic::AtomicCell;
914    ///
915    /// let a = AtomicCell::new(true);
916    ///
917    /// assert_eq!(a.fetch_xor(false), true);
918    /// assert_eq!(a.load(), true);
919    ///
920    /// assert_eq!(a.fetch_xor(true), true);
921    /// assert_eq!(a.load(), false);
922    /// ```
923    #[inline]
924    pub fn fetch_xor(&self, val: bool) -> bool {
925        atomic! {
926            bool, _a,
927            {
928                let a = unsafe { &*(self.as_ptr() as *const atomic::AtomicBool) };
929                a.fetch_xor(val, Ordering::AcqRel)
930            },
931            {
932                let _guard = lock(self.as_ptr() as usize).write();
933                let value = unsafe { &mut *(self.as_ptr()) };
934                let old = *value;
935                *value ^= val;
936                old
937            }
938        }
939    }
940}
941
942impl<T: Default> Default for AtomicCell<T> {
943    fn default() -> AtomicCell<T> {
944        AtomicCell::new(T::default())
945    }
946}
947
948impl<T> From<T> for AtomicCell<T> {
949    #[inline]
950    fn from(val: T) -> AtomicCell<T> {
951        AtomicCell::new(val)
952    }
953}
954
955impl<T: Copy + fmt::Debug> fmt::Debug for AtomicCell<T> {
956    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
957        f.debug_struct("AtomicCell")
958            .field("value", &self.load())
959            .finish()
960    }
961}
962
963/// Returns `true` if values of type `A` can be transmuted into values of type `B`.
964const fn can_transmute<A, B>() -> bool {
965    // Sizes must be equal, but alignment of `A` must be greater or equal than that of `B`.
966    (mem::size_of::<A>() == mem::size_of::<B>()) & (mem::align_of::<A>() >= mem::align_of::<B>())
967}
968
969/// Returns a reference to the global lock associated with the `AtomicCell` at address `addr`.
970///
971/// This function is used to protect atomic data which doesn't fit into any of the primitive atomic
972/// types in `std::sync::atomic`. Operations on such atomics must therefore use a global lock.
973///
974/// However, there is not only one global lock but an array of many locks, and one of them is
975/// picked based on the given address. Having many locks reduces contention and improves
976/// scalability.
977#[inline]
978#[must_use]
979fn lock(addr: usize) -> &'static SeqLock {
980    // The number of locks is a prime number because we want to make sure `addr % LEN` gets
981    // dispersed across all locks.
982    //
983    // Note that addresses are always aligned to some power of 2, depending on type `T` in
984    // `AtomicCell<T>`. If `LEN` was an even number, then `addr % LEN` would be an even number,
985    // too, which means only half of the locks would get utilized!
986    //
987    // It is also possible for addresses to accidentally get aligned to a number that is not a
988    // power of 2. Consider this example:
989    //
990    // ```
991    // #[repr(C)]
992    // struct Foo {
993    //     a: AtomicCell<u8>,
994    //     b: u8,
995    //     c: u8,
996    // }
997    // ```
998    //
999    // Now, if we have a slice of type `&[Foo]`, it is possible that field `a` in all items gets
1000    // stored at addresses that are multiples of 3. It'd be too bad if `LEN` was divisible by 3.
1001    // In order to protect from such cases, we simply choose a large prime number for `LEN`.
1002    const LEN: usize = 67;
1003    #[allow(clippy::declare_interior_mutable_const)]
1004    const L: CachePadded<SeqLock> = CachePadded::new(SeqLock::new());
1005    static LOCKS: [CachePadded<SeqLock>; LEN] = [L; LEN];
1006
1007    // If the modulus is a constant number, the compiler will use crazy math to transform this into
1008    // a sequence of cheap arithmetic operations rather than using the slow modulo instruction.
1009    &LOCKS[addr % LEN]
1010}
1011
1012/// An atomic `()`.
1013///
1014/// All operations are noops.
1015struct AtomicUnit;
1016
1017impl AtomicUnit {
1018    #[inline]
1019    fn load(&self, _order: Ordering) {}
1020
1021    #[inline]
1022    fn store(&self, _val: (), _order: Ordering) {}
1023
1024    #[inline]
1025    fn swap(&self, _val: (), _order: Ordering) {}
1026
1027    #[inline]
1028    fn compare_exchange_weak(
1029        &self,
1030        _current: (),
1031        _new: (),
1032        _success: Ordering,
1033        _failure: Ordering,
1034    ) -> Result<(), ()> {
1035        Ok(())
1036    }
1037}
1038
1039/// Returns `true` if operations on `AtomicCell<T>` are lock-free.
1040const fn atomic_is_lock_free<T>() -> bool {
1041    atomic! { T, _a, true, false }
1042}
1043
1044/// Atomically reads data from `src`.
1045///
1046/// This operation uses the `Acquire` ordering. If possible, an atomic instructions is used, and a
1047/// global lock otherwise.
1048unsafe fn atomic_load<T>(src: *mut T) -> T
1049where
1050    T: Copy,
1051{
1052    atomic! {
1053        T, a,
1054        {
1055            a = &*(src as *const _ as *const _);
1056            mem::transmute_copy(&a.load(Ordering::Acquire))
1057        },
1058        {
1059            let lock = lock(src as usize);
1060
1061            // Try doing an optimistic read first.
1062            if let Some(stamp) = lock.optimistic_read() {
1063                // We need a volatile read here because other threads might concurrently modify the
1064                // value. In theory, data races are *always* UB, even if we use volatile reads and
1065                // discard the data when a data race is detected. The proper solution would be to
1066                // do atomic reads and atomic writes, but we can't atomically read and write all
1067                // kinds of data since `AtomicU8` is not available on stable Rust yet.
1068                // Load as `MaybeUninit` because we may load a value that is not valid as `T`.
1069                let val = ptr::read_volatile(src.cast::<MaybeUninit<T>>());
1070
1071                if lock.validate_read(stamp) {
1072                    return val.assume_init();
1073                }
1074            }
1075
1076            // Grab a regular write lock so that writers don't starve this load.
1077            let guard = lock.write();
1078            let val = ptr::read(src);
1079            // The value hasn't been changed. Drop the guard without incrementing the stamp.
1080            guard.abort();
1081            val
1082        }
1083    }
1084}
1085
1086/// Atomically writes `val` to `dst`.
1087///
1088/// This operation uses the `Release` ordering. If possible, an atomic instructions is used, and a
1089/// global lock otherwise.
1090unsafe fn atomic_store<T>(dst: *mut T, val: T) {
1091    atomic! {
1092        T, a,
1093        {
1094            a = &*(dst as *const _ as *const _);
1095            a.store(mem::transmute_copy(&val), Ordering::Release);
1096            mem::forget(val);
1097        },
1098        {
1099            let _guard = lock(dst as usize).write();
1100            ptr::write(dst, val);
1101        }
1102    }
1103}
1104
1105/// Atomically swaps data at `dst` with `val`.
1106///
1107/// This operation uses the `AcqRel` ordering. If possible, an atomic instructions is used, and a
1108/// global lock otherwise.
1109unsafe fn atomic_swap<T>(dst: *mut T, val: T) -> T {
1110    atomic! {
1111        T, a,
1112        {
1113            a = &*(dst as *const _ as *const _);
1114            let res = mem::transmute_copy(&a.swap(mem::transmute_copy(&val), Ordering::AcqRel));
1115            mem::forget(val);
1116            res
1117        },
1118        {
1119            let _guard = lock(dst as usize).write();
1120            ptr::replace(dst, val)
1121        }
1122    }
1123}
1124
1125/// Atomically compares data at `dst` to `current` and, if equal byte-for-byte, exchanges data at
1126/// `dst` with `new`.
1127///
1128/// Returns the old value on success, or the current value at `dst` on failure.
1129///
1130/// This operation uses the `AcqRel` ordering. If possible, an atomic instructions is used, and a
1131/// global lock otherwise.
1132#[allow(clippy::let_unit_value)]
1133unsafe fn atomic_compare_exchange_weak<T>(dst: *mut T, mut current: T, new: T) -> Result<T, T>
1134where
1135    T: Copy + Eq,
1136{
1137    atomic! {
1138        T, a,
1139        {
1140            a = &*(dst as *const _ as *const _);
1141            let mut current_raw = mem::transmute_copy(&current);
1142            let new_raw = mem::transmute_copy(&new);
1143
1144            loop {
1145                match a.compare_exchange_weak(
1146                    current_raw,
1147                    new_raw,
1148                    Ordering::AcqRel,
1149                    Ordering::Acquire,
1150                ) {
1151                    Ok(_) => break Ok(current),
1152                    Err(previous_raw) => {
1153                        let previous = mem::transmute_copy(&previous_raw);
1154
1155                        if !T::eq(&previous, &current) {
1156                            break Err(previous);
1157                        }
1158
1159                        // The compare-exchange operation has failed and didn't store `new`. The
1160                        // failure is either spurious, or `previous` was semantically equal to
1161                        // `current` but not byte-equal. Let's retry with `previous` as the new
1162                        // `current`.
1163                        current = previous;
1164                        current_raw = previous_raw;
1165                    }
1166                }
1167            }
1168        },
1169        {
1170            let guard = lock(dst as usize).write();
1171
1172            if T::eq(&*dst, &current) {
1173                Ok(ptr::replace(dst, new))
1174            } else {
1175                let val = ptr::read(dst);
1176                // The value hasn't been changed. Drop the guard without incrementing the stamp.
1177                guard.abort();
1178                Err(val)
1179            }
1180        }
1181    }
1182}