1use core::iter::FromIterator;
2use core::ops::{Deref, RangeBounds};
3use core::{cmp, fmt, hash, mem, ptr, slice, usize};
4
5use alloc::{
6 alloc::{dealloc, Layout},
7 borrow::Borrow,
8 boxed::Box,
9 string::String,
10 vec::Vec,
11};
12
13use crate::buf::IntoIter;
14#[allow(unused)]
15use crate::loom::sync::atomic::AtomicMut;
16use crate::loom::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
17use crate::Buf;
18
19pub struct Bytes {
101 ptr: *const u8,
102 len: usize,
103 data: AtomicPtr<()>,
105 vtable: &'static Vtable,
106}
107
108pub(crate) struct Vtable {
109 pub clone: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> Bytes,
111 pub to_vec: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> Vec<u8>,
115 pub drop: unsafe fn(&mut AtomicPtr<()>, *const u8, usize),
117}
118
119impl Bytes {
120 #[inline]
133 #[cfg(not(all(loom, test)))]
134 pub const fn new() -> Bytes {
135 const EMPTY: &[u8] = &[];
138 Bytes::from_static(EMPTY)
139 }
140
141 #[cfg(all(loom, test))]
142 pub fn new() -> Bytes {
143 const EMPTY: &[u8] = &[];
144 Bytes::from_static(EMPTY)
145 }
146
147 #[inline]
161 #[cfg(not(all(loom, test)))]
162 pub const fn from_static(bytes: &'static [u8]) -> Bytes {
163 Bytes {
164 ptr: bytes.as_ptr(),
165 len: bytes.len(),
166 data: AtomicPtr::new(ptr::null_mut()),
167 vtable: &STATIC_VTABLE,
168 }
169 }
170
171 #[cfg(all(loom, test))]
172 pub fn from_static(bytes: &'static [u8]) -> Bytes {
173 Bytes {
174 ptr: bytes.as_ptr(),
175 len: bytes.len(),
176 data: AtomicPtr::new(ptr::null_mut()),
177 vtable: &STATIC_VTABLE,
178 }
179 }
180
181 #[inline]
192 pub const fn len(&self) -> usize {
193 self.len
194 }
195
196 #[inline]
207 pub const fn is_empty(&self) -> bool {
208 self.len == 0
209 }
210
211 pub fn copy_from_slice(data: &[u8]) -> Self {
213 data.to_vec().into()
214 }
215
216 pub fn slice(&self, range: impl RangeBounds<usize>) -> Bytes {
239 use core::ops::Bound;
240
241 let len = self.len();
242
243 let begin = match range.start_bound() {
244 Bound::Included(&n) => n,
245 Bound::Excluded(&n) => n + 1,
246 Bound::Unbounded => 0,
247 };
248
249 let end = match range.end_bound() {
250 Bound::Included(&n) => n.checked_add(1).expect("out of range"),
251 Bound::Excluded(&n) => n,
252 Bound::Unbounded => len,
253 };
254
255 assert!(
256 begin <= end,
257 "range start must not be greater than end: {:?} <= {:?}",
258 begin,
259 end,
260 );
261 assert!(
262 end <= len,
263 "range end out of bounds: {:?} <= {:?}",
264 end,
265 len,
266 );
267
268 if end == begin {
269 return Bytes::new();
270 }
271
272 let mut ret = self.clone();
273
274 ret.len = end - begin;
275 ret.ptr = unsafe { ret.ptr.add(begin) };
276
277 ret
278 }
279
280 pub fn slice_ref(&self, subset: &[u8]) -> Bytes {
306 if subset.is_empty() {
309 return Bytes::new();
310 }
311
312 let bytes_p = self.as_ptr() as usize;
313 let bytes_len = self.len();
314
315 let sub_p = subset.as_ptr() as usize;
316 let sub_len = subset.len();
317
318 assert!(
319 sub_p >= bytes_p,
320 "subset pointer ({:p}) is smaller than self pointer ({:p})",
321 subset.as_ptr(),
322 self.as_ptr(),
323 );
324 assert!(
325 sub_p + sub_len <= bytes_p + bytes_len,
326 "subset is out of bounds: self = ({:p}, {}), subset = ({:p}, {})",
327 self.as_ptr(),
328 bytes_len,
329 subset.as_ptr(),
330 sub_len,
331 );
332
333 let sub_offset = sub_p - bytes_p;
334
335 self.slice(sub_offset..(sub_offset + sub_len))
336 }
337
338 #[must_use = "consider Bytes::truncate if you don't need the other half"]
362 pub fn split_off(&mut self, at: usize) -> Bytes {
363 assert!(
364 at <= self.len(),
365 "split_off out of bounds: {:?} <= {:?}",
366 at,
367 self.len(),
368 );
369
370 if at == self.len() {
371 return Bytes::new();
372 }
373
374 if at == 0 {
375 return mem::replace(self, Bytes::new());
376 }
377
378 let mut ret = self.clone();
379
380 self.len = at;
381
382 unsafe { ret.inc_start(at) };
383
384 ret
385 }
386
387 #[must_use = "consider Bytes::advance if you don't need the other half"]
411 pub fn split_to(&mut self, at: usize) -> Bytes {
412 assert!(
413 at <= self.len(),
414 "split_to out of bounds: {:?} <= {:?}",
415 at,
416 self.len(),
417 );
418
419 if at == self.len() {
420 return mem::replace(self, Bytes::new());
421 }
422
423 if at == 0 {
424 return Bytes::new();
425 }
426
427 let mut ret = self.clone();
428
429 unsafe { self.inc_start(at) };
430
431 ret.len = at;
432 ret
433 }
434
435 #[inline]
456 pub fn truncate(&mut self, len: usize) {
457 if len < self.len {
458 if self.vtable as *const Vtable == &PROMOTABLE_EVEN_VTABLE
462 || self.vtable as *const Vtable == &PROMOTABLE_ODD_VTABLE
463 {
464 drop(self.split_off(len));
465 } else {
466 self.len = len;
467 }
468 }
469 }
470
471 #[inline]
483 pub fn clear(&mut self) {
484 self.truncate(0);
485 }
486
487 #[inline]
488 pub(crate) unsafe fn with_vtable(
489 ptr: *const u8,
490 len: usize,
491 data: AtomicPtr<()>,
492 vtable: &'static Vtable,
493 ) -> Bytes {
494 Bytes {
495 ptr,
496 len,
497 data,
498 vtable,
499 }
500 }
501
502 #[inline]
505 fn as_slice(&self) -> &[u8] {
506 unsafe { slice::from_raw_parts(self.ptr, self.len) }
507 }
508
509 #[inline]
510 unsafe fn inc_start(&mut self, by: usize) {
511 debug_assert!(self.len >= by, "internal: inc_start out of bounds");
513 self.len -= by;
514 self.ptr = self.ptr.add(by);
515 }
516}
517
518unsafe impl Send for Bytes {}
520unsafe impl Sync for Bytes {}
521
522impl Drop for Bytes {
523 #[inline]
524 fn drop(&mut self) {
525 unsafe { (self.vtable.drop)(&mut self.data, self.ptr, self.len) }
526 }
527}
528
529impl Clone for Bytes {
530 #[inline]
531 fn clone(&self) -> Bytes {
532 unsafe { (self.vtable.clone)(&self.data, self.ptr, self.len) }
533 }
534}
535
536impl Buf for Bytes {
537 #[inline]
538 fn remaining(&self) -> usize {
539 self.len()
540 }
541
542 #[inline]
543 fn chunk(&self) -> &[u8] {
544 self.as_slice()
545 }
546
547 #[inline]
548 fn advance(&mut self, cnt: usize) {
549 assert!(
550 cnt <= self.len(),
551 "cannot advance past `remaining`: {:?} <= {:?}",
552 cnt,
553 self.len(),
554 );
555
556 unsafe {
557 self.inc_start(cnt);
558 }
559 }
560
561 fn copy_to_bytes(&mut self, len: usize) -> crate::Bytes {
562 if len == self.remaining() {
563 core::mem::replace(self, Bytes::new())
564 } else {
565 let ret = self.slice(..len);
566 self.advance(len);
567 ret
568 }
569 }
570}
571
572impl Deref for Bytes {
573 type Target = [u8];
574
575 #[inline]
576 fn deref(&self) -> &[u8] {
577 self.as_slice()
578 }
579}
580
581impl AsRef<[u8]> for Bytes {
582 #[inline]
583 fn as_ref(&self) -> &[u8] {
584 self.as_slice()
585 }
586}
587
588impl hash::Hash for Bytes {
589 fn hash<H>(&self, state: &mut H)
590 where
591 H: hash::Hasher,
592 {
593 self.as_slice().hash(state);
594 }
595}
596
597impl Borrow<[u8]> for Bytes {
598 fn borrow(&self) -> &[u8] {
599 self.as_slice()
600 }
601}
602
603impl IntoIterator for Bytes {
604 type Item = u8;
605 type IntoIter = IntoIter<Bytes>;
606
607 fn into_iter(self) -> Self::IntoIter {
608 IntoIter::new(self)
609 }
610}
611
612impl<'a> IntoIterator for &'a Bytes {
613 type Item = &'a u8;
614 type IntoIter = core::slice::Iter<'a, u8>;
615
616 fn into_iter(self) -> Self::IntoIter {
617 self.as_slice().iter()
618 }
619}
620
621impl FromIterator<u8> for Bytes {
622 fn from_iter<T: IntoIterator<Item = u8>>(into_iter: T) -> Self {
623 Vec::from_iter(into_iter).into()
624 }
625}
626
627impl PartialEq for Bytes {
630 fn eq(&self, other: &Bytes) -> bool {
631 self.as_slice() == other.as_slice()
632 }
633}
634
635impl PartialOrd for Bytes {
636 fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
637 self.as_slice().partial_cmp(other.as_slice())
638 }
639}
640
641impl Ord for Bytes {
642 fn cmp(&self, other: &Bytes) -> cmp::Ordering {
643 self.as_slice().cmp(other.as_slice())
644 }
645}
646
647impl Eq for Bytes {}
648
649impl PartialEq<[u8]> for Bytes {
650 fn eq(&self, other: &[u8]) -> bool {
651 self.as_slice() == other
652 }
653}
654
655impl PartialOrd<[u8]> for Bytes {
656 fn partial_cmp(&self, other: &[u8]) -> Option<cmp::Ordering> {
657 self.as_slice().partial_cmp(other)
658 }
659}
660
661impl PartialEq<Bytes> for [u8] {
662 fn eq(&self, other: &Bytes) -> bool {
663 *other == *self
664 }
665}
666
667impl PartialOrd<Bytes> for [u8] {
668 fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
669 <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other)
670 }
671}
672
673impl PartialEq<str> for Bytes {
674 fn eq(&self, other: &str) -> bool {
675 self.as_slice() == other.as_bytes()
676 }
677}
678
679impl PartialOrd<str> for Bytes {
680 fn partial_cmp(&self, other: &str) -> Option<cmp::Ordering> {
681 self.as_slice().partial_cmp(other.as_bytes())
682 }
683}
684
685impl PartialEq<Bytes> for str {
686 fn eq(&self, other: &Bytes) -> bool {
687 *other == *self
688 }
689}
690
691impl PartialOrd<Bytes> for str {
692 fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
693 <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other)
694 }
695}
696
697impl PartialEq<Vec<u8>> for Bytes {
698 fn eq(&self, other: &Vec<u8>) -> bool {
699 *self == other[..]
700 }
701}
702
703impl PartialOrd<Vec<u8>> for Bytes {
704 fn partial_cmp(&self, other: &Vec<u8>) -> Option<cmp::Ordering> {
705 self.as_slice().partial_cmp(&other[..])
706 }
707}
708
709impl PartialEq<Bytes> for Vec<u8> {
710 fn eq(&self, other: &Bytes) -> bool {
711 *other == *self
712 }
713}
714
715impl PartialOrd<Bytes> for Vec<u8> {
716 fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
717 <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other)
718 }
719}
720
721impl PartialEq<String> for Bytes {
722 fn eq(&self, other: &String) -> bool {
723 *self == other[..]
724 }
725}
726
727impl PartialOrd<String> for Bytes {
728 fn partial_cmp(&self, other: &String) -> Option<cmp::Ordering> {
729 self.as_slice().partial_cmp(other.as_bytes())
730 }
731}
732
733impl PartialEq<Bytes> for String {
734 fn eq(&self, other: &Bytes) -> bool {
735 *other == *self
736 }
737}
738
739impl PartialOrd<Bytes> for String {
740 fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
741 <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other)
742 }
743}
744
745impl PartialEq<Bytes> for &[u8] {
746 fn eq(&self, other: &Bytes) -> bool {
747 *other == *self
748 }
749}
750
751impl PartialOrd<Bytes> for &[u8] {
752 fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
753 <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other)
754 }
755}
756
757impl PartialEq<Bytes> for &str {
758 fn eq(&self, other: &Bytes) -> bool {
759 *other == *self
760 }
761}
762
763impl PartialOrd<Bytes> for &str {
764 fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
765 <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other)
766 }
767}
768
769impl<'a, T: ?Sized> PartialEq<&'a T> for Bytes
770where
771 Bytes: PartialEq<T>,
772{
773 fn eq(&self, other: &&'a T) -> bool {
774 *self == **other
775 }
776}
777
778impl<'a, T: ?Sized> PartialOrd<&'a T> for Bytes
779where
780 Bytes: PartialOrd<T>,
781{
782 fn partial_cmp(&self, other: &&'a T) -> Option<cmp::Ordering> {
783 self.partial_cmp(&**other)
784 }
785}
786
787impl Default for Bytes {
790 #[inline]
791 fn default() -> Bytes {
792 Bytes::new()
793 }
794}
795
796impl From<&'static [u8]> for Bytes {
797 fn from(slice: &'static [u8]) -> Bytes {
798 Bytes::from_static(slice)
799 }
800}
801
802impl From<&'static str> for Bytes {
803 fn from(slice: &'static str) -> Bytes {
804 Bytes::from_static(slice.as_bytes())
805 }
806}
807
808impl From<Vec<u8>> for Bytes {
809 fn from(vec: Vec<u8>) -> Bytes {
810 let slice = vec.into_boxed_slice();
811 slice.into()
812 }
813}
814
815impl From<Box<[u8]>> for Bytes {
816 fn from(slice: Box<[u8]>) -> Bytes {
817 if slice.is_empty() {
821 return Bytes::new();
822 }
823
824 let len = slice.len();
825 let ptr = Box::into_raw(slice) as *mut u8;
826
827 if ptr as usize & 0x1 == 0 {
828 let data = ptr_map(ptr, |addr| addr | KIND_VEC);
829 Bytes {
830 ptr,
831 len,
832 data: AtomicPtr::new(data.cast()),
833 vtable: &PROMOTABLE_EVEN_VTABLE,
834 }
835 } else {
836 Bytes {
837 ptr,
838 len,
839 data: AtomicPtr::new(ptr.cast()),
840 vtable: &PROMOTABLE_ODD_VTABLE,
841 }
842 }
843 }
844}
845
846impl From<String> for Bytes {
847 fn from(s: String) -> Bytes {
848 Bytes::from(s.into_bytes())
849 }
850}
851
852impl From<Bytes> for Vec<u8> {
853 fn from(bytes: Bytes) -> Vec<u8> {
854 let bytes = mem::ManuallyDrop::new(bytes);
855 unsafe { (bytes.vtable.to_vec)(&bytes.data, bytes.ptr, bytes.len) }
856 }
857}
858
859impl fmt::Debug for Vtable {
862 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
863 f.debug_struct("Vtable")
864 .field("clone", &(self.clone as *const ()))
865 .field("drop", &(self.drop as *const ()))
866 .finish()
867 }
868}
869
870const STATIC_VTABLE: Vtable = Vtable {
873 clone: static_clone,
874 to_vec: static_to_vec,
875 drop: static_drop,
876};
877
878unsafe fn static_clone(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
879 let slice = slice::from_raw_parts(ptr, len);
880 Bytes::from_static(slice)
881}
882
883unsafe fn static_to_vec(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
884 let slice = slice::from_raw_parts(ptr, len);
885 slice.to_vec()
886}
887
888unsafe fn static_drop(_: &mut AtomicPtr<()>, _: *const u8, _: usize) {
889 }
891
892static PROMOTABLE_EVEN_VTABLE: Vtable = Vtable {
895 clone: promotable_even_clone,
896 to_vec: promotable_even_to_vec,
897 drop: promotable_even_drop,
898};
899
900static PROMOTABLE_ODD_VTABLE: Vtable = Vtable {
901 clone: promotable_odd_clone,
902 to_vec: promotable_odd_to_vec,
903 drop: promotable_odd_drop,
904};
905
906unsafe fn promotable_even_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
907 let shared = data.load(Ordering::Acquire);
908 let kind = shared as usize & KIND_MASK;
909
910 if kind == KIND_ARC {
911 shallow_clone_arc(shared.cast(), ptr, len)
912 } else {
913 debug_assert_eq!(kind, KIND_VEC);
914 let buf = ptr_map(shared.cast(), |addr| addr & !KIND_MASK);
915 shallow_clone_vec(data, shared, buf, ptr, len)
916 }
917}
918
919unsafe fn promotable_to_vec(
920 data: &AtomicPtr<()>,
921 ptr: *const u8,
922 len: usize,
923 f: fn(*mut ()) -> *mut u8,
924) -> Vec<u8> {
925 let shared = data.load(Ordering::Acquire);
926 let kind = shared as usize & KIND_MASK;
927
928 if kind == KIND_ARC {
929 shared_to_vec_impl(shared.cast(), ptr, len)
930 } else {
931 debug_assert_eq!(kind, KIND_VEC);
933
934 let buf = f(shared);
935
936 let cap = (ptr as usize - buf as usize) + len;
937
938 ptr::copy(ptr, buf, len);
940
941 Vec::from_raw_parts(buf, len, cap)
942 }
943}
944
945unsafe fn promotable_even_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
946 promotable_to_vec(data, ptr, len, |shared| {
947 ptr_map(shared.cast(), |addr| addr & !KIND_MASK)
948 })
949}
950
951unsafe fn promotable_even_drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) {
952 data.with_mut(|shared| {
953 let shared = *shared;
954 let kind = shared as usize & KIND_MASK;
955
956 if kind == KIND_ARC {
957 release_shared(shared.cast());
958 } else {
959 debug_assert_eq!(kind, KIND_VEC);
960 let buf = ptr_map(shared.cast(), |addr| addr & !KIND_MASK);
961 free_boxed_slice(buf, ptr, len);
962 }
963 });
964}
965
966unsafe fn promotable_odd_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
967 let shared = data.load(Ordering::Acquire);
968 let kind = shared as usize & KIND_MASK;
969
970 if kind == KIND_ARC {
971 shallow_clone_arc(shared as _, ptr, len)
972 } else {
973 debug_assert_eq!(kind, KIND_VEC);
974 shallow_clone_vec(data, shared, shared.cast(), ptr, len)
975 }
976}
977
978unsafe fn promotable_odd_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
979 promotable_to_vec(data, ptr, len, |shared| shared.cast())
980}
981
982unsafe fn promotable_odd_drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) {
983 data.with_mut(|shared| {
984 let shared = *shared;
985 let kind = shared as usize & KIND_MASK;
986
987 if kind == KIND_ARC {
988 release_shared(shared.cast());
989 } else {
990 debug_assert_eq!(kind, KIND_VEC);
991
992 free_boxed_slice(shared.cast(), ptr, len);
993 }
994 });
995}
996
997unsafe fn free_boxed_slice(buf: *mut u8, offset: *const u8, len: usize) {
998 let cap = (offset as usize - buf as usize) + len;
999 dealloc(buf, Layout::from_size_align(cap, 1).unwrap())
1000}
1001
1002struct Shared {
1005 buf: *mut u8,
1007 cap: usize,
1008 ref_cnt: AtomicUsize,
1009}
1010
1011impl Drop for Shared {
1012 fn drop(&mut self) {
1013 unsafe { dealloc(self.buf, Layout::from_size_align(self.cap, 1).unwrap()) }
1014 }
1015}
1016
1017const _: [(); 0 - mem::align_of::<Shared>() % 2] = []; static SHARED_VTABLE: Vtable = Vtable {
1024 clone: shared_clone,
1025 to_vec: shared_to_vec,
1026 drop: shared_drop,
1027};
1028
1029const KIND_ARC: usize = 0b0;
1030const KIND_VEC: usize = 0b1;
1031const KIND_MASK: usize = 0b1;
1032
1033unsafe fn shared_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
1034 let shared = data.load(Ordering::Relaxed);
1035 shallow_clone_arc(shared as _, ptr, len)
1036}
1037
1038unsafe fn shared_to_vec_impl(shared: *mut Shared, ptr: *const u8, len: usize) -> Vec<u8> {
1039 if (*shared)
1046 .ref_cnt
1047 .compare_exchange(1, 0, Ordering::AcqRel, Ordering::Relaxed)
1048 .is_ok()
1049 {
1050 let buf = (*shared).buf;
1051 let cap = (*shared).cap;
1052
1053 drop(Box::from_raw(shared as *mut mem::ManuallyDrop<Shared>));
1055
1056 ptr::copy(ptr, buf, len);
1058
1059 Vec::from_raw_parts(buf, len, cap)
1060 } else {
1061 let v = slice::from_raw_parts(ptr, len).to_vec();
1062 release_shared(shared);
1063 v
1064 }
1065}
1066
1067unsafe fn shared_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
1068 shared_to_vec_impl(data.load(Ordering::Relaxed).cast(), ptr, len)
1069}
1070
1071unsafe fn shared_drop(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize) {
1072 data.with_mut(|shared| {
1073 release_shared(shared.cast());
1074 });
1075}
1076
1077unsafe fn shallow_clone_arc(shared: *mut Shared, ptr: *const u8, len: usize) -> Bytes {
1078 let old_size = (*shared).ref_cnt.fetch_add(1, Ordering::Relaxed);
1079
1080 if old_size > usize::MAX >> 1 {
1081 crate::abort();
1082 }
1083
1084 Bytes {
1085 ptr,
1086 len,
1087 data: AtomicPtr::new(shared as _),
1088 vtable: &SHARED_VTABLE,
1089 }
1090}
1091
1092#[cold]
1093unsafe fn shallow_clone_vec(
1094 atom: &AtomicPtr<()>,
1095 ptr: *const (),
1096 buf: *mut u8,
1097 offset: *const u8,
1098 len: usize,
1099) -> Bytes {
1100 let shared = Box::new(Shared {
1112 buf,
1113 cap: (offset as usize - buf as usize) + len,
1114 ref_cnt: AtomicUsize::new(2),
1118 });
1119
1120 let shared = Box::into_raw(shared);
1121
1122 debug_assert!(
1125 0 == (shared as usize & KIND_MASK),
1126 "internal: Box<Shared> should have an aligned pointer",
1127 );
1128
1129 match atom.compare_exchange(ptr as _, shared as _, Ordering::AcqRel, Ordering::Acquire) {
1139 Ok(actual) => {
1140 debug_assert!(actual as usize == ptr as usize);
1141 Bytes {
1144 ptr: offset,
1145 len,
1146 data: AtomicPtr::new(shared as _),
1147 vtable: &SHARED_VTABLE,
1148 }
1149 }
1150 Err(actual) => {
1151 let shared = Box::from_raw(shared);
1155 mem::forget(*shared);
1156
1157 shallow_clone_arc(actual as _, offset, len)
1160 }
1161 }
1162}
1163
1164unsafe fn release_shared(ptr: *mut Shared) {
1165 if (*ptr).ref_cnt.fetch_sub(1, Ordering::Release) != 1 {
1167 return;
1168 }
1169
1170 (*ptr).ref_cnt.load(Ordering::Acquire);
1191
1192 drop(Box::from_raw(ptr));
1194}
1195
1196#[cfg(miri)]
1203fn ptr_map<F>(ptr: *mut u8, f: F) -> *mut u8
1204where
1205 F: FnOnce(usize) -> usize,
1206{
1207 let old_addr = ptr as usize;
1208 let new_addr = f(old_addr);
1209 let diff = new_addr.wrapping_sub(old_addr);
1210 ptr.wrapping_add(diff)
1211}
1212
1213#[cfg(not(miri))]
1214fn ptr_map<F>(ptr: *mut u8, f: F) -> *mut u8
1215where
1216 F: FnOnce(usize) -> usize,
1217{
1218 let old_addr = ptr as usize;
1219 let new_addr = f(old_addr);
1220 new_addr as *mut u8
1221}
1222
1223fn _split_to_must_use() {}
1234
1235fn _split_off_must_use() {}
1244
1245#[cfg(all(test, loom))]
1247mod fuzz {
1248 use loom::sync::Arc;
1249 use loom::thread;
1250
1251 use super::Bytes;
1252 #[test]
1253 fn bytes_cloning_vec() {
1254 loom::model(|| {
1255 let a = Bytes::from(b"abcdefgh".to_vec());
1256 let addr = a.as_ptr() as usize;
1257
1258 let a1 = Arc::new(a);
1260 let a2 = a1.clone();
1261
1262 let t1 = thread::spawn(move || {
1263 let b: Bytes = (*a1).clone();
1264 assert_eq!(b.as_ptr() as usize, addr);
1265 });
1266
1267 let t2 = thread::spawn(move || {
1268 let b: Bytes = (*a2).clone();
1269 assert_eq!(b.as_ptr() as usize, addr);
1270 });
1271
1272 t1.join().unwrap();
1273 t2.join().unwrap();
1274 });
1275 }
1276}