bytes/bytes_mut.rs
1use core::iter::{FromIterator, Iterator};
2use core::mem::{self, ManuallyDrop};
3use core::ops::{Deref, DerefMut};
4use core::ptr::{self, NonNull};
5use core::{cmp, fmt, hash, isize, slice, usize};
6
7use alloc::{
8 borrow::{Borrow, BorrowMut},
9 boxed::Box,
10 string::String,
11 vec,
12 vec::Vec,
13};
14
15use crate::buf::{IntoIter, UninitSlice};
16use crate::bytes::Vtable;
17#[allow(unused)]
18use crate::loom::sync::atomic::AtomicMut;
19use crate::loom::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
20use crate::{Buf, BufMut, Bytes};
21
22/// A unique reference to a contiguous slice of memory.
23///
24/// `BytesMut` represents a unique view into a potentially shared memory region.
25/// Given the uniqueness guarantee, owners of `BytesMut` handles are able to
26/// mutate the memory.
27///
28/// `BytesMut` can be thought of as containing a `buf: Arc<Vec<u8>>`, an offset
29/// into `buf`, a slice length, and a guarantee that no other `BytesMut` for the
30/// same `buf` overlaps with its slice. That guarantee means that a write lock
31/// is not required.
32///
33/// # Growth
34///
35/// `BytesMut`'s `BufMut` implementation will implicitly grow its buffer as
36/// necessary. However, explicitly reserving the required space up-front before
37/// a series of inserts will be more efficient.
38///
39/// # Examples
40///
41/// ```
42/// use bytes::{BytesMut, BufMut};
43///
44/// let mut buf = BytesMut::with_capacity(64);
45///
46/// buf.put_u8(b'h');
47/// buf.put_u8(b'e');
48/// buf.put(&b"llo"[..]);
49///
50/// assert_eq!(&buf[..], b"hello");
51///
52/// // Freeze the buffer so that it can be shared
53/// let a = buf.freeze();
54///
55/// // This does not allocate, instead `b` points to the same memory.
56/// let b = a.clone();
57///
58/// assert_eq!(&a[..], b"hello");
59/// assert_eq!(&b[..], b"hello");
60/// ```
61pub struct BytesMut {
62 ptr: NonNull<u8>,
63 len: usize,
64 cap: usize,
65 data: *mut Shared,
66}
67
68// Thread-safe reference-counted container for the shared storage. This mostly
69// the same as `core::sync::Arc` but without the weak counter. The ref counting
70// fns are based on the ones found in `std`.
71//
72// The main reason to use `Shared` instead of `core::sync::Arc` is that it ends
73// up making the overall code simpler and easier to reason about. This is due to
74// some of the logic around setting `Inner::arc` and other ways the `arc` field
75// is used. Using `Arc` ended up requiring a number of funky transmutes and
76// other shenanigans to make it work.
77struct Shared {
78 vec: Vec<u8>,
79 original_capacity_repr: usize,
80 ref_count: AtomicUsize,
81}
82
83// Buffer storage strategy flags.
84const KIND_ARC: usize = 0b0;
85const KIND_VEC: usize = 0b1;
86const KIND_MASK: usize = 0b1;
87
88// The max original capacity value. Any `Bytes` allocated with a greater initial
89// capacity will default to this.
90const MAX_ORIGINAL_CAPACITY_WIDTH: usize = 17;
91// The original capacity algorithm will not take effect unless the originally
92// allocated capacity was at least 1kb in size.
93const MIN_ORIGINAL_CAPACITY_WIDTH: usize = 10;
94// The original capacity is stored in powers of 2 starting at 1kb to a max of
95// 64kb. Representing it as such requires only 3 bits of storage.
96const ORIGINAL_CAPACITY_MASK: usize = 0b11100;
97const ORIGINAL_CAPACITY_OFFSET: usize = 2;
98
99// When the storage is in the `Vec` representation, the pointer can be advanced
100// at most this value. This is due to the amount of storage available to track
101// the offset is usize - number of KIND bits and number of ORIGINAL_CAPACITY
102// bits.
103const VEC_POS_OFFSET: usize = 5;
104const MAX_VEC_POS: usize = usize::MAX >> VEC_POS_OFFSET;
105const NOT_VEC_POS_MASK: usize = 0b11111;
106
107#[cfg(target_pointer_width = "64")]
108const PTR_WIDTH: usize = 64;
109#[cfg(target_pointer_width = "32")]
110const PTR_WIDTH: usize = 32;
111
112/*
113 *
114 * ===== BytesMut =====
115 *
116 */
117
118impl BytesMut {
119 /// Creates a new `BytesMut` with the specified capacity.
120 ///
121 /// The returned `BytesMut` will be able to hold at least `capacity` bytes
122 /// without reallocating.
123 ///
124 /// It is important to note that this function does not specify the length
125 /// of the returned `BytesMut`, but only the capacity.
126 ///
127 /// # Examples
128 ///
129 /// ```
130 /// use bytes::{BytesMut, BufMut};
131 ///
132 /// let mut bytes = BytesMut::with_capacity(64);
133 ///
134 /// // `bytes` contains no data, even though there is capacity
135 /// assert_eq!(bytes.len(), 0);
136 ///
137 /// bytes.put(&b"hello world"[..]);
138 ///
139 /// assert_eq!(&bytes[..], b"hello world");
140 /// ```
141 #[inline]
142 pub fn with_capacity(capacity: usize) -> BytesMut {
143 BytesMut::from_vec(Vec::with_capacity(capacity))
144 }
145
146 /// Creates a new `BytesMut` with default capacity.
147 ///
148 /// Resulting object has length 0 and unspecified capacity.
149 /// This function does not allocate.
150 ///
151 /// # Examples
152 ///
153 /// ```
154 /// use bytes::{BytesMut, BufMut};
155 ///
156 /// let mut bytes = BytesMut::new();
157 ///
158 /// assert_eq!(0, bytes.len());
159 ///
160 /// bytes.reserve(2);
161 /// bytes.put_slice(b"xy");
162 ///
163 /// assert_eq!(&b"xy"[..], &bytes[..]);
164 /// ```
165 #[inline]
166 pub fn new() -> BytesMut {
167 BytesMut::with_capacity(0)
168 }
169
170 /// Returns the number of bytes contained in this `BytesMut`.
171 ///
172 /// # Examples
173 ///
174 /// ```
175 /// use bytes::BytesMut;
176 ///
177 /// let b = BytesMut::from(&b"hello"[..]);
178 /// assert_eq!(b.len(), 5);
179 /// ```
180 #[inline]
181 pub fn len(&self) -> usize {
182 self.len
183 }
184
185 /// Returns true if the `BytesMut` has a length of 0.
186 ///
187 /// # Examples
188 ///
189 /// ```
190 /// use bytes::BytesMut;
191 ///
192 /// let b = BytesMut::with_capacity(64);
193 /// assert!(b.is_empty());
194 /// ```
195 #[inline]
196 pub fn is_empty(&self) -> bool {
197 self.len == 0
198 }
199
200 /// Returns the number of bytes the `BytesMut` can hold without reallocating.
201 ///
202 /// # Examples
203 ///
204 /// ```
205 /// use bytes::BytesMut;
206 ///
207 /// let b = BytesMut::with_capacity(64);
208 /// assert_eq!(b.capacity(), 64);
209 /// ```
210 #[inline]
211 pub fn capacity(&self) -> usize {
212 self.cap
213 }
214
215 /// Converts `self` into an immutable `Bytes`.
216 ///
217 /// The conversion is zero cost and is used to indicate that the slice
218 /// referenced by the handle will no longer be mutated. Once the conversion
219 /// is done, the handle can be cloned and shared across threads.
220 ///
221 /// # Examples
222 ///
223 /// ```
224 /// use bytes::{BytesMut, BufMut};
225 /// use std::thread;
226 ///
227 /// let mut b = BytesMut::with_capacity(64);
228 /// b.put(&b"hello world"[..]);
229 /// let b1 = b.freeze();
230 /// let b2 = b1.clone();
231 ///
232 /// let th = thread::spawn(move || {
233 /// assert_eq!(&b1[..], b"hello world");
234 /// });
235 ///
236 /// assert_eq!(&b2[..], b"hello world");
237 /// th.join().unwrap();
238 /// ```
239 #[inline]
240 pub fn freeze(mut self) -> Bytes {
241 if self.kind() == KIND_VEC {
242 // Just re-use `Bytes` internal Vec vtable
243 unsafe {
244 let (off, _) = self.get_vec_pos();
245 let vec = rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off);
246 mem::forget(self);
247 let mut b: Bytes = vec.into();
248 b.advance(off);
249 b
250 }
251 } else {
252 debug_assert_eq!(self.kind(), KIND_ARC);
253
254 let ptr = self.ptr.as_ptr();
255 let len = self.len;
256 let data = AtomicPtr::new(self.data.cast());
257 mem::forget(self);
258 unsafe { Bytes::with_vtable(ptr, len, data, &SHARED_VTABLE) }
259 }
260 }
261
262 /// Creates a new `BytesMut`, which is initialized with zero.
263 ///
264 /// # Examples
265 ///
266 /// ```
267 /// use bytes::BytesMut;
268 ///
269 /// let zeros = BytesMut::zeroed(42);
270 ///
271 /// assert_eq!(zeros.len(), 42);
272 /// zeros.into_iter().for_each(|x| assert_eq!(x, 0));
273 /// ```
274 pub fn zeroed(len: usize) -> BytesMut {
275 BytesMut::from_vec(vec![0; len])
276 }
277
278 /// Splits the bytes into two at the given index.
279 ///
280 /// Afterwards `self` contains elements `[0, at)`, and the returned
281 /// `BytesMut` contains elements `[at, capacity)`.
282 ///
283 /// This is an `O(1)` operation that just increases the reference count
284 /// and sets a few indices.
285 ///
286 /// # Examples
287 ///
288 /// ```
289 /// use bytes::BytesMut;
290 ///
291 /// let mut a = BytesMut::from(&b"hello world"[..]);
292 /// let mut b = a.split_off(5);
293 ///
294 /// a[0] = b'j';
295 /// b[0] = b'!';
296 ///
297 /// assert_eq!(&a[..], b"jello");
298 /// assert_eq!(&b[..], b"!world");
299 /// ```
300 ///
301 /// # Panics
302 ///
303 /// Panics if `at > capacity`.
304 #[must_use = "consider BytesMut::truncate if you don't need the other half"]
305 pub fn split_off(&mut self, at: usize) -> BytesMut {
306 assert!(
307 at <= self.capacity(),
308 "split_off out of bounds: {:?} <= {:?}",
309 at,
310 self.capacity(),
311 );
312 unsafe {
313 let mut other = self.shallow_clone();
314 other.set_start(at);
315 self.set_end(at);
316 other
317 }
318 }
319
320 /// Removes the bytes from the current view, returning them in a new
321 /// `BytesMut` handle.
322 ///
323 /// Afterwards, `self` will be empty, but will retain any additional
324 /// capacity that it had before the operation. This is identical to
325 /// `self.split_to(self.len())`.
326 ///
327 /// This is an `O(1)` operation that just increases the reference count and
328 /// sets a few indices.
329 ///
330 /// # Examples
331 ///
332 /// ```
333 /// use bytes::{BytesMut, BufMut};
334 ///
335 /// let mut buf = BytesMut::with_capacity(1024);
336 /// buf.put(&b"hello world"[..]);
337 ///
338 /// let other = buf.split();
339 ///
340 /// assert!(buf.is_empty());
341 /// assert_eq!(1013, buf.capacity());
342 ///
343 /// assert_eq!(other, b"hello world"[..]);
344 /// ```
345 #[must_use = "consider BytesMut::advance(len()) if you don't need the other half"]
346 pub fn split(&mut self) -> BytesMut {
347 let len = self.len();
348 self.split_to(len)
349 }
350
351 /// Splits the buffer into two at the given index.
352 ///
353 /// Afterwards `self` contains elements `[at, len)`, and the returned `BytesMut`
354 /// contains elements `[0, at)`.
355 ///
356 /// This is an `O(1)` operation that just increases the reference count and
357 /// sets a few indices.
358 ///
359 /// # Examples
360 ///
361 /// ```
362 /// use bytes::BytesMut;
363 ///
364 /// let mut a = BytesMut::from(&b"hello world"[..]);
365 /// let mut b = a.split_to(5);
366 ///
367 /// a[0] = b'!';
368 /// b[0] = b'j';
369 ///
370 /// assert_eq!(&a[..], b"!world");
371 /// assert_eq!(&b[..], b"jello");
372 /// ```
373 ///
374 /// # Panics
375 ///
376 /// Panics if `at > len`.
377 #[must_use = "consider BytesMut::advance if you don't need the other half"]
378 pub fn split_to(&mut self, at: usize) -> BytesMut {
379 assert!(
380 at <= self.len(),
381 "split_to out of bounds: {:?} <= {:?}",
382 at,
383 self.len(),
384 );
385
386 unsafe {
387 let mut other = self.shallow_clone();
388 other.set_end(at);
389 self.set_start(at);
390 other
391 }
392 }
393
394 /// Shortens the buffer, keeping the first `len` bytes and dropping the
395 /// rest.
396 ///
397 /// If `len` is greater than the buffer's current length, this has no
398 /// effect.
399 ///
400 /// Existing underlying capacity is preserved.
401 ///
402 /// The [`split_off`] method can emulate `truncate`, but this causes the
403 /// excess bytes to be returned instead of dropped.
404 ///
405 /// # Examples
406 ///
407 /// ```
408 /// use bytes::BytesMut;
409 ///
410 /// let mut buf = BytesMut::from(&b"hello world"[..]);
411 /// buf.truncate(5);
412 /// assert_eq!(buf, b"hello"[..]);
413 /// ```
414 ///
415 /// [`split_off`]: #method.split_off
416 pub fn truncate(&mut self, len: usize) {
417 if len <= self.len() {
418 unsafe {
419 self.set_len(len);
420 }
421 }
422 }
423
424 /// Clears the buffer, removing all data. Existing capacity is preserved.
425 ///
426 /// # Examples
427 ///
428 /// ```
429 /// use bytes::BytesMut;
430 ///
431 /// let mut buf = BytesMut::from(&b"hello world"[..]);
432 /// buf.clear();
433 /// assert!(buf.is_empty());
434 /// ```
435 pub fn clear(&mut self) {
436 self.truncate(0);
437 }
438
439 /// Resizes the buffer so that `len` is equal to `new_len`.
440 ///
441 /// If `new_len` is greater than `len`, the buffer is extended by the
442 /// difference with each additional byte set to `value`. If `new_len` is
443 /// less than `len`, the buffer is simply truncated.
444 ///
445 /// # Examples
446 ///
447 /// ```
448 /// use bytes::BytesMut;
449 ///
450 /// let mut buf = BytesMut::new();
451 ///
452 /// buf.resize(3, 0x1);
453 /// assert_eq!(&buf[..], &[0x1, 0x1, 0x1]);
454 ///
455 /// buf.resize(2, 0x2);
456 /// assert_eq!(&buf[..], &[0x1, 0x1]);
457 ///
458 /// buf.resize(4, 0x3);
459 /// assert_eq!(&buf[..], &[0x1, 0x1, 0x3, 0x3]);
460 /// ```
461 pub fn resize(&mut self, new_len: usize, value: u8) {
462 let len = self.len();
463 if new_len > len {
464 let additional = new_len - len;
465 self.reserve(additional);
466 unsafe {
467 let dst = self.chunk_mut().as_mut_ptr();
468 ptr::write_bytes(dst, value, additional);
469 self.set_len(new_len);
470 }
471 } else {
472 self.truncate(new_len);
473 }
474 }
475
476 /// Sets the length of the buffer.
477 ///
478 /// This will explicitly set the size of the buffer without actually
479 /// modifying the data, so it is up to the caller to ensure that the data
480 /// has been initialized.
481 ///
482 /// # Examples
483 ///
484 /// ```
485 /// use bytes::BytesMut;
486 ///
487 /// let mut b = BytesMut::from(&b"hello world"[..]);
488 ///
489 /// unsafe {
490 /// b.set_len(5);
491 /// }
492 ///
493 /// assert_eq!(&b[..], b"hello");
494 ///
495 /// unsafe {
496 /// b.set_len(11);
497 /// }
498 ///
499 /// assert_eq!(&b[..], b"hello world");
500 /// ```
501 #[inline]
502 pub unsafe fn set_len(&mut self, len: usize) {
503 debug_assert!(len <= self.cap, "set_len out of bounds");
504 self.len = len;
505 }
506
507 /// Reserves capacity for at least `additional` more bytes to be inserted
508 /// into the given `BytesMut`.
509 ///
510 /// More than `additional` bytes may be reserved in order to avoid frequent
511 /// reallocations. A call to `reserve` may result in an allocation.
512 ///
513 /// Before allocating new buffer space, the function will attempt to reclaim
514 /// space in the existing buffer. If the current handle references a view
515 /// into a larger original buffer, and all other handles referencing part
516 /// of the same original buffer have been dropped, then the current view
517 /// can be copied/shifted to the front of the buffer and the handle can take
518 /// ownership of the full buffer, provided that the full buffer is large
519 /// enough to fit the requested additional capacity.
520 ///
521 /// This optimization will only happen if shifting the data from the current
522 /// view to the front of the buffer is not too expensive in terms of the
523 /// (amortized) time required. The precise condition is subject to change;
524 /// as of now, the length of the data being shifted needs to be at least as
525 /// large as the distance that it's shifted by. If the current view is empty
526 /// and the original buffer is large enough to fit the requested additional
527 /// capacity, then reallocations will never happen.
528 ///
529 /// # Examples
530 ///
531 /// In the following example, a new buffer is allocated.
532 ///
533 /// ```
534 /// use bytes::BytesMut;
535 ///
536 /// let mut buf = BytesMut::from(&b"hello"[..]);
537 /// buf.reserve(64);
538 /// assert!(buf.capacity() >= 69);
539 /// ```
540 ///
541 /// In the following example, the existing buffer is reclaimed.
542 ///
543 /// ```
544 /// use bytes::{BytesMut, BufMut};
545 ///
546 /// let mut buf = BytesMut::with_capacity(128);
547 /// buf.put(&[0; 64][..]);
548 ///
549 /// let ptr = buf.as_ptr();
550 /// let other = buf.split();
551 ///
552 /// assert!(buf.is_empty());
553 /// assert_eq!(buf.capacity(), 64);
554 ///
555 /// drop(other);
556 /// buf.reserve(128);
557 ///
558 /// assert_eq!(buf.capacity(), 128);
559 /// assert_eq!(buf.as_ptr(), ptr);
560 /// ```
561 ///
562 /// # Panics
563 ///
564 /// Panics if the new capacity overflows `usize`.
565 #[inline]
566 pub fn reserve(&mut self, additional: usize) {
567 let len = self.len();
568 let rem = self.capacity() - len;
569
570 if additional <= rem {
571 // The handle can already store at least `additional` more bytes, so
572 // there is no further work needed to be done.
573 return;
574 }
575
576 self.reserve_inner(additional);
577 }
578
579 // In separate function to allow the short-circuits in `reserve` to
580 // be inline-able. Significant helps performance.
581 fn reserve_inner(&mut self, additional: usize) {
582 let len = self.len();
583 let kind = self.kind();
584
585 if kind == KIND_VEC {
586 // If there's enough free space before the start of the buffer, then
587 // just copy the data backwards and reuse the already-allocated
588 // space.
589 //
590 // Otherwise, since backed by a vector, use `Vec::reserve`
591 //
592 // We need to make sure that this optimization does not kill the
593 // amortized runtimes of BytesMut's operations.
594 unsafe {
595 let (off, prev) = self.get_vec_pos();
596
597 // Only reuse space if we can satisfy the requested additional space.
598 //
599 // Also check if the value of `off` suggests that enough bytes
600 // have been read to account for the overhead of shifting all
601 // the data (in an amortized analysis).
602 // Hence the condition `off >= self.len()`.
603 //
604 // This condition also already implies that the buffer is going
605 // to be (at least) half-empty in the end; so we do not break
606 // the (amortized) runtime with future resizes of the underlying
607 // `Vec`.
608 //
609 // [For more details check issue #524, and PR #525.]
610 if self.capacity() - self.len() + off >= additional && off >= self.len() {
611 // There's enough space, and it's not too much overhead:
612 // reuse the space!
613 //
614 // Just move the pointer back to the start after copying
615 // data back.
616 let base_ptr = self.ptr.as_ptr().offset(-(off as isize));
617 // Since `off >= self.len()`, the two regions don't overlap.
618 ptr::copy_nonoverlapping(self.ptr.as_ptr(), base_ptr, self.len);
619 self.ptr = vptr(base_ptr);
620 self.set_vec_pos(0, prev);
621
622 // Length stays constant, but since we moved backwards we
623 // can gain capacity back.
624 self.cap += off;
625 } else {
626 // Not enough space, or reusing might be too much overhead:
627 // allocate more space!
628 let mut v =
629 ManuallyDrop::new(rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off));
630 v.reserve(additional);
631
632 // Update the info
633 self.ptr = vptr(v.as_mut_ptr().add(off));
634 self.len = v.len() - off;
635 self.cap = v.capacity() - off;
636 }
637
638 return;
639 }
640 }
641
642 debug_assert_eq!(kind, KIND_ARC);
643 let shared: *mut Shared = self.data;
644
645 // Reserving involves abandoning the currently shared buffer and
646 // allocating a new vector with the requested capacity.
647 //
648 // Compute the new capacity
649 let mut new_cap = len.checked_add(additional).expect("overflow");
650
651 let original_capacity;
652 let original_capacity_repr;
653
654 unsafe {
655 original_capacity_repr = (*shared).original_capacity_repr;
656 original_capacity = original_capacity_from_repr(original_capacity_repr);
657
658 // First, try to reclaim the buffer. This is possible if the current
659 // handle is the only outstanding handle pointing to the buffer.
660 if (*shared).is_unique() {
661 // This is the only handle to the buffer. It can be reclaimed.
662 // However, before doing the work of copying data, check to make
663 // sure that the vector has enough capacity.
664 let v = &mut (*shared).vec;
665
666 let v_capacity = v.capacity();
667 let ptr = v.as_mut_ptr();
668
669 let offset = offset_from(self.ptr.as_ptr(), ptr);
670
671 // Compare the condition in the `kind == KIND_VEC` case above
672 // for more details.
673 if v_capacity >= new_cap + offset {
674 self.cap = new_cap;
675 // no copy is necessary
676 } else if v_capacity >= new_cap && offset >= len {
677 // The capacity is sufficient, and copying is not too much
678 // overhead: reclaim the buffer!
679
680 // `offset >= len` means: no overlap
681 ptr::copy_nonoverlapping(self.ptr.as_ptr(), ptr, len);
682
683 self.ptr = vptr(ptr);
684 self.cap = v.capacity();
685 } else {
686 // calculate offset
687 let off = (self.ptr.as_ptr() as usize) - (v.as_ptr() as usize);
688
689 // new_cap is calculated in terms of `BytesMut`, not the underlying
690 // `Vec`, so it does not take the offset into account.
691 //
692 // Thus we have to manually add it here.
693 new_cap = new_cap.checked_add(off).expect("overflow");
694
695 // The vector capacity is not sufficient. The reserve request is
696 // asking for more than the initial buffer capacity. Allocate more
697 // than requested if `new_cap` is not much bigger than the current
698 // capacity.
699 //
700 // There are some situations, using `reserve_exact` that the
701 // buffer capacity could be below `original_capacity`, so do a
702 // check.
703 let double = v.capacity().checked_shl(1).unwrap_or(new_cap);
704
705 new_cap = cmp::max(double, new_cap);
706
707 // No space - allocate more
708 v.reserve(new_cap - v.len());
709
710 // Update the info
711 self.ptr = vptr(v.as_mut_ptr().add(off));
712 self.cap = v.capacity() - off;
713 }
714
715 return;
716 } else {
717 new_cap = cmp::max(new_cap, original_capacity);
718 }
719 }
720
721 // Create a new vector to store the data
722 let mut v = ManuallyDrop::new(Vec::with_capacity(new_cap));
723
724 // Copy the bytes
725 v.extend_from_slice(self.as_ref());
726
727 // Release the shared handle. This must be done *after* the bytes are
728 // copied.
729 unsafe { release_shared(shared) };
730
731 // Update self
732 let data = (original_capacity_repr << ORIGINAL_CAPACITY_OFFSET) | KIND_VEC;
733 self.data = invalid_ptr(data);
734 self.ptr = vptr(v.as_mut_ptr());
735 self.len = v.len();
736 self.cap = v.capacity();
737 }
738
739 /// Appends given bytes to this `BytesMut`.
740 ///
741 /// If this `BytesMut` object does not have enough capacity, it is resized
742 /// first.
743 ///
744 /// # Examples
745 ///
746 /// ```
747 /// use bytes::BytesMut;
748 ///
749 /// let mut buf = BytesMut::with_capacity(0);
750 /// buf.extend_from_slice(b"aaabbb");
751 /// buf.extend_from_slice(b"cccddd");
752 ///
753 /// assert_eq!(b"aaabbbcccddd", &buf[..]);
754 /// ```
755 pub fn extend_from_slice(&mut self, extend: &[u8]) {
756 let cnt = extend.len();
757 self.reserve(cnt);
758
759 unsafe {
760 let dst = self.uninit_slice();
761 // Reserved above
762 debug_assert!(dst.len() >= cnt);
763
764 ptr::copy_nonoverlapping(extend.as_ptr(), dst.as_mut_ptr(), cnt);
765 }
766
767 unsafe {
768 self.advance_mut(cnt);
769 }
770 }
771
772 /// Absorbs a `BytesMut` that was previously split off if they are
773 /// contiguous, otherwise appends its bytes to this `BytesMut`.
774 ///
775 /// If the two `BytesMut` objects were previously contiguous and not mutated
776 /// in a way that causes re-allocation i.e., if `other` was created by
777 /// calling `split_off` on this `BytesMut`, then this is an `O(1)` operation
778 /// that just decreases a reference count and sets a few indices.
779 /// Otherwise this method degenerates to
780 /// `self.extend_from_slice(other.as_ref())`.
781 ///
782 /// # Examples
783 ///
784 /// ```
785 /// use bytes::BytesMut;
786 ///
787 /// let mut buf = BytesMut::with_capacity(64);
788 /// buf.extend_from_slice(b"aaabbbcccddd");
789 ///
790 /// let split = buf.split_off(6);
791 /// assert_eq!(b"aaabbb", &buf[..]);
792 /// assert_eq!(b"cccddd", &split[..]);
793 ///
794 /// buf.unsplit(split);
795 /// assert_eq!(b"aaabbbcccddd", &buf[..]);
796 /// ```
797 pub fn unsplit(&mut self, other: BytesMut) {
798 if self.is_empty() {
799 *self = other;
800 return;
801 }
802
803 if let Err(other) = self.try_unsplit(other) {
804 self.extend_from_slice(other.as_ref());
805 }
806 }
807
808 // private
809
810 // For now, use a `Vec` to manage the memory for us, but we may want to
811 // change that in the future to some alternate allocator strategy.
812 //
813 // Thus, we don't expose an easy way to construct from a `Vec` since an
814 // internal change could make a simple pattern (`BytesMut::from(vec)`)
815 // suddenly a lot more expensive.
816 #[inline]
817 pub(crate) fn from_vec(mut vec: Vec<u8>) -> BytesMut {
818 let ptr = vptr(vec.as_mut_ptr());
819 let len = vec.len();
820 let cap = vec.capacity();
821 mem::forget(vec);
822
823 let original_capacity_repr = original_capacity_to_repr(cap);
824 let data = (original_capacity_repr << ORIGINAL_CAPACITY_OFFSET) | KIND_VEC;
825
826 BytesMut {
827 ptr,
828 len,
829 cap,
830 data: invalid_ptr(data),
831 }
832 }
833
834 #[inline]
835 fn as_slice(&self) -> &[u8] {
836 unsafe { slice::from_raw_parts(self.ptr.as_ptr(), self.len) }
837 }
838
839 #[inline]
840 fn as_slice_mut(&mut self) -> &mut [u8] {
841 unsafe { slice::from_raw_parts_mut(self.ptr.as_ptr(), self.len) }
842 }
843
844 unsafe fn set_start(&mut self, start: usize) {
845 // Setting the start to 0 is a no-op, so return early if this is the
846 // case.
847 if start == 0 {
848 return;
849 }
850
851 debug_assert!(start <= self.cap, "internal: set_start out of bounds");
852
853 let kind = self.kind();
854
855 if kind == KIND_VEC {
856 // Setting the start when in vec representation is a little more
857 // complicated. First, we have to track how far ahead the
858 // "start" of the byte buffer from the beginning of the vec. We
859 // also have to ensure that we don't exceed the maximum shift.
860 let (mut pos, prev) = self.get_vec_pos();
861 pos += start;
862
863 if pos <= MAX_VEC_POS {
864 self.set_vec_pos(pos, prev);
865 } else {
866 // The repr must be upgraded to ARC. This will never happen
867 // on 64 bit systems and will only happen on 32 bit systems
868 // when shifting past 134,217,727 bytes. As such, we don't
869 // worry too much about performance here.
870 self.promote_to_shared(/*ref_count = */ 1);
871 }
872 }
873
874 // Updating the start of the view is setting `ptr` to point to the
875 // new start and updating the `len` field to reflect the new length
876 // of the view.
877 self.ptr = vptr(self.ptr.as_ptr().add(start));
878
879 if self.len >= start {
880 self.len -= start;
881 } else {
882 self.len = 0;
883 }
884
885 self.cap -= start;
886 }
887
888 unsafe fn set_end(&mut self, end: usize) {
889 debug_assert_eq!(self.kind(), KIND_ARC);
890 assert!(end <= self.cap, "set_end out of bounds");
891
892 self.cap = end;
893 self.len = cmp::min(self.len, end);
894 }
895
896 /// Absorbs a `BytesMut` that was previously split off.
897 ///
898 /// If the two `BytesMut` objects were previously contiguous, i.e., if
899 /// `other` was created by calling `split_off` on this `BytesMut`, then
900 /// this is an `O(1)` operation that just decreases a reference
901 /// count and sets a few indices. Otherwise this method returns an error
902 /// containing the original `other`.
903 ///
904 /// # Examples
905 ///
906 /// ```
907 /// use bytes::BytesMut;
908 ///
909 /// let mut buf = BytesMut::with_capacity(64);
910 /// buf.extend_from_slice(b"aaabbbcccddd");
911 ///
912 /// let mut split_1 = buf.split_off(3);
913 /// let split_2 = split_1.split_off(3);
914 /// assert_eq!(b"aaa", &buf[..]);
915 /// assert_eq!(b"bbb", &split_1[..]);
916 /// assert_eq!(b"cccddd", &split_2[..]);
917 ///
918 /// let split_2 = buf.try_unsplit(split_2).unwrap_err();
919 ///
920 /// buf.try_unsplit(split_1).unwrap();
921 /// buf.try_unsplit(split_2).unwrap();
922 /// assert_eq!(b"aaabbbcccddd", &buf[..]);
923 /// ```
924 pub fn try_unsplit(&mut self, other: BytesMut) -> Result<(), BytesMut> {
925 if other.capacity() == 0 {
926 return Ok(());
927 }
928
929 let ptr = unsafe { self.ptr.as_ptr().add(self.len) };
930 if ptr == other.ptr.as_ptr()
931 && self.kind() == KIND_ARC
932 && other.kind() == KIND_ARC
933 && self.data == other.data
934 {
935 // Contiguous blocks, just combine directly
936 self.len += other.len;
937 self.cap += other.cap;
938 Ok(())
939 } else {
940 Err(other)
941 }
942 }
943
944 #[inline]
945 fn kind(&self) -> usize {
946 self.data as usize & KIND_MASK
947 }
948
949 unsafe fn promote_to_shared(&mut self, ref_cnt: usize) {
950 debug_assert_eq!(self.kind(), KIND_VEC);
951 debug_assert!(ref_cnt == 1 || ref_cnt == 2);
952
953 let original_capacity_repr =
954 (self.data as usize & ORIGINAL_CAPACITY_MASK) >> ORIGINAL_CAPACITY_OFFSET;
955
956 // The vec offset cannot be concurrently mutated, so there
957 // should be no danger reading it.
958 let off = (self.data as usize) >> VEC_POS_OFFSET;
959
960 // First, allocate a new `Shared` instance containing the
961 // `Vec` fields. It's important to note that `ptr`, `len`,
962 // and `cap` cannot be mutated without having `&mut self`.
963 // This means that these fields will not be concurrently
964 // updated and since the buffer hasn't been promoted to an
965 // `Arc`, those three fields still are the components of the
966 // vector.
967 let shared = Box::new(Shared {
968 vec: rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off),
969 original_capacity_repr,
970 ref_count: AtomicUsize::new(ref_cnt),
971 });
972
973 let shared = Box::into_raw(shared);
974
975 // The pointer should be aligned, so this assert should
976 // always succeed.
977 debug_assert_eq!(shared as usize & KIND_MASK, KIND_ARC);
978
979 self.data = shared;
980 }
981
982 /// Makes an exact shallow clone of `self`.
983 ///
984 /// The kind of `self` doesn't matter, but this is unsafe
985 /// because the clone will have the same offsets. You must
986 /// be sure the returned value to the user doesn't allow
987 /// two views into the same range.
988 #[inline]
989 unsafe fn shallow_clone(&mut self) -> BytesMut {
990 if self.kind() == KIND_ARC {
991 increment_shared(self.data);
992 ptr::read(self)
993 } else {
994 self.promote_to_shared(/*ref_count = */ 2);
995 ptr::read(self)
996 }
997 }
998
999 #[inline]
1000 unsafe fn get_vec_pos(&mut self) -> (usize, usize) {
1001 debug_assert_eq!(self.kind(), KIND_VEC);
1002
1003 let prev = self.data as usize;
1004 (prev >> VEC_POS_OFFSET, prev)
1005 }
1006
1007 #[inline]
1008 unsafe fn set_vec_pos(&mut self, pos: usize, prev: usize) {
1009 debug_assert_eq!(self.kind(), KIND_VEC);
1010 debug_assert!(pos <= MAX_VEC_POS);
1011
1012 self.data = invalid_ptr((pos << VEC_POS_OFFSET) | (prev & NOT_VEC_POS_MASK));
1013 }
1014
1015 #[inline]
1016 fn uninit_slice(&mut self) -> &mut UninitSlice {
1017 unsafe {
1018 let ptr = self.ptr.as_ptr().add(self.len);
1019 let len = self.cap - self.len;
1020
1021 UninitSlice::from_raw_parts_mut(ptr, len)
1022 }
1023 }
1024}
1025
1026impl Drop for BytesMut {
1027 fn drop(&mut self) {
1028 let kind = self.kind();
1029
1030 if kind == KIND_VEC {
1031 unsafe {
1032 let (off, _) = self.get_vec_pos();
1033
1034 // Vector storage, free the vector
1035 let _ = rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off);
1036 }
1037 } else if kind == KIND_ARC {
1038 unsafe { release_shared(self.data) };
1039 }
1040 }
1041}
1042
1043impl Buf for BytesMut {
1044 #[inline]
1045 fn remaining(&self) -> usize {
1046 self.len()
1047 }
1048
1049 #[inline]
1050 fn chunk(&self) -> &[u8] {
1051 self.as_slice()
1052 }
1053
1054 #[inline]
1055 fn advance(&mut self, cnt: usize) {
1056 assert!(
1057 cnt <= self.remaining(),
1058 "cannot advance past `remaining`: {:?} <= {:?}",
1059 cnt,
1060 self.remaining(),
1061 );
1062 unsafe {
1063 self.set_start(cnt);
1064 }
1065 }
1066
1067 fn copy_to_bytes(&mut self, len: usize) -> crate::Bytes {
1068 self.split_to(len).freeze()
1069 }
1070}
1071
1072unsafe impl BufMut for BytesMut {
1073 #[inline]
1074 fn remaining_mut(&self) -> usize {
1075 usize::MAX - self.len()
1076 }
1077
1078 #[inline]
1079 unsafe fn advance_mut(&mut self, cnt: usize) {
1080 let new_len = self.len() + cnt;
1081 assert!(
1082 new_len <= self.cap,
1083 "new_len = {}; capacity = {}",
1084 new_len,
1085 self.cap
1086 );
1087 self.len = new_len;
1088 }
1089
1090 #[inline]
1091 fn chunk_mut(&mut self) -> &mut UninitSlice {
1092 if self.capacity() == self.len() {
1093 self.reserve(64);
1094 }
1095 self.uninit_slice()
1096 }
1097
1098 // Specialize these methods so they can skip checking `remaining_mut`
1099 // and `advance_mut`.
1100
1101 fn put<T: crate::Buf>(&mut self, mut src: T)
1102 where
1103 Self: Sized,
1104 {
1105 while src.has_remaining() {
1106 let s = src.chunk();
1107 let l = s.len();
1108 self.extend_from_slice(s);
1109 src.advance(l);
1110 }
1111 }
1112
1113 fn put_slice(&mut self, src: &[u8]) {
1114 self.extend_from_slice(src);
1115 }
1116
1117 fn put_bytes(&mut self, val: u8, cnt: usize) {
1118 self.reserve(cnt);
1119 unsafe {
1120 let dst = self.uninit_slice();
1121 // Reserved above
1122 debug_assert!(dst.len() >= cnt);
1123
1124 ptr::write_bytes(dst.as_mut_ptr(), val, cnt);
1125
1126 self.advance_mut(cnt);
1127 }
1128 }
1129}
1130
1131impl AsRef<[u8]> for BytesMut {
1132 #[inline]
1133 fn as_ref(&self) -> &[u8] {
1134 self.as_slice()
1135 }
1136}
1137
1138impl Deref for BytesMut {
1139 type Target = [u8];
1140
1141 #[inline]
1142 fn deref(&self) -> &[u8] {
1143 self.as_ref()
1144 }
1145}
1146
1147impl AsMut<[u8]> for BytesMut {
1148 #[inline]
1149 fn as_mut(&mut self) -> &mut [u8] {
1150 self.as_slice_mut()
1151 }
1152}
1153
1154impl DerefMut for BytesMut {
1155 #[inline]
1156 fn deref_mut(&mut self) -> &mut [u8] {
1157 self.as_mut()
1158 }
1159}
1160
1161impl<'a> From<&'a [u8]> for BytesMut {
1162 fn from(src: &'a [u8]) -> BytesMut {
1163 BytesMut::from_vec(src.to_vec())
1164 }
1165}
1166
1167impl<'a> From<&'a str> for BytesMut {
1168 fn from(src: &'a str) -> BytesMut {
1169 BytesMut::from(src.as_bytes())
1170 }
1171}
1172
1173impl From<BytesMut> for Bytes {
1174 fn from(src: BytesMut) -> Bytes {
1175 src.freeze()
1176 }
1177}
1178
1179impl PartialEq for BytesMut {
1180 fn eq(&self, other: &BytesMut) -> bool {
1181 self.as_slice() == other.as_slice()
1182 }
1183}
1184
1185impl PartialOrd for BytesMut {
1186 fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
1187 self.as_slice().partial_cmp(other.as_slice())
1188 }
1189}
1190
1191impl Ord for BytesMut {
1192 fn cmp(&self, other: &BytesMut) -> cmp::Ordering {
1193 self.as_slice().cmp(other.as_slice())
1194 }
1195}
1196
1197impl Eq for BytesMut {}
1198
1199impl Default for BytesMut {
1200 #[inline]
1201 fn default() -> BytesMut {
1202 BytesMut::new()
1203 }
1204}
1205
1206impl hash::Hash for BytesMut {
1207 fn hash<H>(&self, state: &mut H)
1208 where
1209 H: hash::Hasher,
1210 {
1211 let s: &[u8] = self.as_ref();
1212 s.hash(state);
1213 }
1214}
1215
1216impl Borrow<[u8]> for BytesMut {
1217 fn borrow(&self) -> &[u8] {
1218 self.as_ref()
1219 }
1220}
1221
1222impl BorrowMut<[u8]> for BytesMut {
1223 fn borrow_mut(&mut self) -> &mut [u8] {
1224 self.as_mut()
1225 }
1226}
1227
1228impl fmt::Write for BytesMut {
1229 #[inline]
1230 fn write_str(&mut self, s: &str) -> fmt::Result {
1231 if self.remaining_mut() >= s.len() {
1232 self.put_slice(s.as_bytes());
1233 Ok(())
1234 } else {
1235 Err(fmt::Error)
1236 }
1237 }
1238
1239 #[inline]
1240 fn write_fmt(&mut self, args: fmt::Arguments<'_>) -> fmt::Result {
1241 fmt::write(self, args)
1242 }
1243}
1244
1245impl Clone for BytesMut {
1246 fn clone(&self) -> BytesMut {
1247 BytesMut::from(&self[..])
1248 }
1249}
1250
1251impl IntoIterator for BytesMut {
1252 type Item = u8;
1253 type IntoIter = IntoIter<BytesMut>;
1254
1255 fn into_iter(self) -> Self::IntoIter {
1256 IntoIter::new(self)
1257 }
1258}
1259
1260impl<'a> IntoIterator for &'a BytesMut {
1261 type Item = &'a u8;
1262 type IntoIter = core::slice::Iter<'a, u8>;
1263
1264 fn into_iter(self) -> Self::IntoIter {
1265 self.as_ref().iter()
1266 }
1267}
1268
1269impl Extend<u8> for BytesMut {
1270 fn extend<T>(&mut self, iter: T)
1271 where
1272 T: IntoIterator<Item = u8>,
1273 {
1274 let iter = iter.into_iter();
1275
1276 let (lower, _) = iter.size_hint();
1277 self.reserve(lower);
1278
1279 // TODO: optimize
1280 // 1. If self.kind() == KIND_VEC, use Vec::extend
1281 // 2. Make `reserve` inline-able
1282 for b in iter {
1283 self.reserve(1);
1284 self.put_u8(b);
1285 }
1286 }
1287}
1288
1289impl<'a> Extend<&'a u8> for BytesMut {
1290 fn extend<T>(&mut self, iter: T)
1291 where
1292 T: IntoIterator<Item = &'a u8>,
1293 {
1294 self.extend(iter.into_iter().copied())
1295 }
1296}
1297
1298impl Extend<Bytes> for BytesMut {
1299 fn extend<T>(&mut self, iter: T)
1300 where
1301 T: IntoIterator<Item = Bytes>,
1302 {
1303 for bytes in iter {
1304 self.extend_from_slice(&bytes)
1305 }
1306 }
1307}
1308
1309impl FromIterator<u8> for BytesMut {
1310 fn from_iter<T: IntoIterator<Item = u8>>(into_iter: T) -> Self {
1311 BytesMut::from_vec(Vec::from_iter(into_iter))
1312 }
1313}
1314
1315impl<'a> FromIterator<&'a u8> for BytesMut {
1316 fn from_iter<T: IntoIterator<Item = &'a u8>>(into_iter: T) -> Self {
1317 BytesMut::from_iter(into_iter.into_iter().copied())
1318 }
1319}
1320
1321/*
1322 *
1323 * ===== Inner =====
1324 *
1325 */
1326
1327unsafe fn increment_shared(ptr: *mut Shared) {
1328 let old_size = (*ptr).ref_count.fetch_add(1, Ordering::Relaxed);
1329
1330 if old_size > isize::MAX as usize {
1331 crate::abort();
1332 }
1333}
1334
1335unsafe fn release_shared(ptr: *mut Shared) {
1336 // `Shared` storage... follow the drop steps from Arc.
1337 if (*ptr).ref_count.fetch_sub(1, Ordering::Release) != 1 {
1338 return;
1339 }
1340
1341 // This fence is needed to prevent reordering of use of the data and
1342 // deletion of the data. Because it is marked `Release`, the decreasing
1343 // of the reference count synchronizes with this `Acquire` fence. This
1344 // means that use of the data happens before decreasing the reference
1345 // count, which happens before this fence, which happens before the
1346 // deletion of the data.
1347 //
1348 // As explained in the [Boost documentation][1],
1349 //
1350 // > It is important to enforce any possible access to the object in one
1351 // > thread (through an existing reference) to *happen before* deleting
1352 // > the object in a different thread. This is achieved by a "release"
1353 // > operation after dropping a reference (any access to the object
1354 // > through this reference must obviously happened before), and an
1355 // > "acquire" operation before deleting the object.
1356 //
1357 // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
1358 //
1359 // Thread sanitizer does not support atomic fences. Use an atomic load
1360 // instead.
1361 (*ptr).ref_count.load(Ordering::Acquire);
1362
1363 // Drop the data
1364 drop(Box::from_raw(ptr));
1365}
1366
1367impl Shared {
1368 fn is_unique(&self) -> bool {
1369 // The goal is to check if the current handle is the only handle
1370 // that currently has access to the buffer. This is done by
1371 // checking if the `ref_count` is currently 1.
1372 //
1373 // The `Acquire` ordering synchronizes with the `Release` as
1374 // part of the `fetch_sub` in `release_shared`. The `fetch_sub`
1375 // operation guarantees that any mutations done in other threads
1376 // are ordered before the `ref_count` is decremented. As such,
1377 // this `Acquire` will guarantee that those mutations are
1378 // visible to the current thread.
1379 self.ref_count.load(Ordering::Acquire) == 1
1380 }
1381}
1382
1383#[inline]
1384fn original_capacity_to_repr(cap: usize) -> usize {
1385 let width = PTR_WIDTH - ((cap >> MIN_ORIGINAL_CAPACITY_WIDTH).leading_zeros() as usize);
1386 cmp::min(
1387 width,
1388 MAX_ORIGINAL_CAPACITY_WIDTH - MIN_ORIGINAL_CAPACITY_WIDTH,
1389 )
1390}
1391
1392fn original_capacity_from_repr(repr: usize) -> usize {
1393 if repr == 0 {
1394 return 0;
1395 }
1396
1397 1 << (repr + (MIN_ORIGINAL_CAPACITY_WIDTH - 1))
1398}
1399
1400/*
1401#[test]
1402fn test_original_capacity_to_repr() {
1403 assert_eq!(original_capacity_to_repr(0), 0);
1404
1405 let max_width = 32;
1406
1407 for width in 1..(max_width + 1) {
1408 let cap = 1 << width - 1;
1409
1410 let expected = if width < MIN_ORIGINAL_CAPACITY_WIDTH {
1411 0
1412 } else if width < MAX_ORIGINAL_CAPACITY_WIDTH {
1413 width - MIN_ORIGINAL_CAPACITY_WIDTH
1414 } else {
1415 MAX_ORIGINAL_CAPACITY_WIDTH - MIN_ORIGINAL_CAPACITY_WIDTH
1416 };
1417
1418 assert_eq!(original_capacity_to_repr(cap), expected);
1419
1420 if width > 1 {
1421 assert_eq!(original_capacity_to_repr(cap + 1), expected);
1422 }
1423
1424 // MIN_ORIGINAL_CAPACITY_WIDTH must be bigger than 7 to pass tests below
1425 if width == MIN_ORIGINAL_CAPACITY_WIDTH + 1 {
1426 assert_eq!(original_capacity_to_repr(cap - 24), expected - 1);
1427 assert_eq!(original_capacity_to_repr(cap + 76), expected);
1428 } else if width == MIN_ORIGINAL_CAPACITY_WIDTH + 2 {
1429 assert_eq!(original_capacity_to_repr(cap - 1), expected - 1);
1430 assert_eq!(original_capacity_to_repr(cap - 48), expected - 1);
1431 }
1432 }
1433}
1434
1435#[test]
1436fn test_original_capacity_from_repr() {
1437 assert_eq!(0, original_capacity_from_repr(0));
1438
1439 let min_cap = 1 << MIN_ORIGINAL_CAPACITY_WIDTH;
1440
1441 assert_eq!(min_cap, original_capacity_from_repr(1));
1442 assert_eq!(min_cap * 2, original_capacity_from_repr(2));
1443 assert_eq!(min_cap * 4, original_capacity_from_repr(3));
1444 assert_eq!(min_cap * 8, original_capacity_from_repr(4));
1445 assert_eq!(min_cap * 16, original_capacity_from_repr(5));
1446 assert_eq!(min_cap * 32, original_capacity_from_repr(6));
1447 assert_eq!(min_cap * 64, original_capacity_from_repr(7));
1448}
1449*/
1450
1451unsafe impl Send for BytesMut {}
1452unsafe impl Sync for BytesMut {}
1453
1454/*
1455 *
1456 * ===== PartialEq / PartialOrd =====
1457 *
1458 */
1459
1460impl PartialEq<[u8]> for BytesMut {
1461 fn eq(&self, other: &[u8]) -> bool {
1462 &**self == other
1463 }
1464}
1465
1466impl PartialOrd<[u8]> for BytesMut {
1467 fn partial_cmp(&self, other: &[u8]) -> Option<cmp::Ordering> {
1468 (**self).partial_cmp(other)
1469 }
1470}
1471
1472impl PartialEq<BytesMut> for [u8] {
1473 fn eq(&self, other: &BytesMut) -> bool {
1474 *other == *self
1475 }
1476}
1477
1478impl PartialOrd<BytesMut> for [u8] {
1479 fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
1480 <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other)
1481 }
1482}
1483
1484impl PartialEq<str> for BytesMut {
1485 fn eq(&self, other: &str) -> bool {
1486 &**self == other.as_bytes()
1487 }
1488}
1489
1490impl PartialOrd<str> for BytesMut {
1491 fn partial_cmp(&self, other: &str) -> Option<cmp::Ordering> {
1492 (**self).partial_cmp(other.as_bytes())
1493 }
1494}
1495
1496impl PartialEq<BytesMut> for str {
1497 fn eq(&self, other: &BytesMut) -> bool {
1498 *other == *self
1499 }
1500}
1501
1502impl PartialOrd<BytesMut> for str {
1503 fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
1504 <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other)
1505 }
1506}
1507
1508impl PartialEq<Vec<u8>> for BytesMut {
1509 fn eq(&self, other: &Vec<u8>) -> bool {
1510 *self == other[..]
1511 }
1512}
1513
1514impl PartialOrd<Vec<u8>> for BytesMut {
1515 fn partial_cmp(&self, other: &Vec<u8>) -> Option<cmp::Ordering> {
1516 (**self).partial_cmp(&other[..])
1517 }
1518}
1519
1520impl PartialEq<BytesMut> for Vec<u8> {
1521 fn eq(&self, other: &BytesMut) -> bool {
1522 *other == *self
1523 }
1524}
1525
1526impl PartialOrd<BytesMut> for Vec<u8> {
1527 fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
1528 other.partial_cmp(self)
1529 }
1530}
1531
1532impl PartialEq<String> for BytesMut {
1533 fn eq(&self, other: &String) -> bool {
1534 *self == other[..]
1535 }
1536}
1537
1538impl PartialOrd<String> for BytesMut {
1539 fn partial_cmp(&self, other: &String) -> Option<cmp::Ordering> {
1540 (**self).partial_cmp(other.as_bytes())
1541 }
1542}
1543
1544impl PartialEq<BytesMut> for String {
1545 fn eq(&self, other: &BytesMut) -> bool {
1546 *other == *self
1547 }
1548}
1549
1550impl PartialOrd<BytesMut> for String {
1551 fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
1552 <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other)
1553 }
1554}
1555
1556impl<'a, T: ?Sized> PartialEq<&'a T> for BytesMut
1557where
1558 BytesMut: PartialEq<T>,
1559{
1560 fn eq(&self, other: &&'a T) -> bool {
1561 *self == **other
1562 }
1563}
1564
1565impl<'a, T: ?Sized> PartialOrd<&'a T> for BytesMut
1566where
1567 BytesMut: PartialOrd<T>,
1568{
1569 fn partial_cmp(&self, other: &&'a T) -> Option<cmp::Ordering> {
1570 self.partial_cmp(*other)
1571 }
1572}
1573
1574impl PartialEq<BytesMut> for &[u8] {
1575 fn eq(&self, other: &BytesMut) -> bool {
1576 *other == *self
1577 }
1578}
1579
1580impl PartialOrd<BytesMut> for &[u8] {
1581 fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
1582 <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other)
1583 }
1584}
1585
1586impl PartialEq<BytesMut> for &str {
1587 fn eq(&self, other: &BytesMut) -> bool {
1588 *other == *self
1589 }
1590}
1591
1592impl PartialOrd<BytesMut> for &str {
1593 fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
1594 other.partial_cmp(self)
1595 }
1596}
1597
1598impl PartialEq<BytesMut> for Bytes {
1599 fn eq(&self, other: &BytesMut) -> bool {
1600 other[..] == self[..]
1601 }
1602}
1603
1604impl PartialEq<Bytes> for BytesMut {
1605 fn eq(&self, other: &Bytes) -> bool {
1606 other[..] == self[..]
1607 }
1608}
1609
1610impl From<BytesMut> for Vec<u8> {
1611 fn from(mut bytes: BytesMut) -> Self {
1612 let kind = bytes.kind();
1613
1614 let mut vec = if kind == KIND_VEC {
1615 unsafe {
1616 let (off, _) = bytes.get_vec_pos();
1617 rebuild_vec(bytes.ptr.as_ptr(), bytes.len, bytes.cap, off)
1618 }
1619 } else if kind == KIND_ARC {
1620 let shared = bytes.data as *mut Shared;
1621
1622 if unsafe { (*shared).is_unique() } {
1623 let vec = mem::replace(unsafe { &mut (*shared).vec }, Vec::new());
1624
1625 unsafe { release_shared(shared) };
1626
1627 vec
1628 } else {
1629 return bytes.deref().to_vec();
1630 }
1631 } else {
1632 return bytes.deref().to_vec();
1633 };
1634
1635 let len = bytes.len;
1636
1637 unsafe {
1638 ptr::copy(bytes.ptr.as_ptr(), vec.as_mut_ptr(), len);
1639 vec.set_len(len);
1640 }
1641
1642 mem::forget(bytes);
1643
1644 vec
1645 }
1646}
1647
1648#[inline]
1649fn vptr(ptr: *mut u8) -> NonNull<u8> {
1650 if cfg!(debug_assertions) {
1651 NonNull::new(ptr).expect("Vec pointer should be non-null")
1652 } else {
1653 unsafe { NonNull::new_unchecked(ptr) }
1654 }
1655}
1656
1657/// Returns a dangling pointer with the given address. This is used to store
1658/// integer data in pointer fields.
1659///
1660/// It is equivalent to `addr as *mut T`, but this fails on miri when strict
1661/// provenance checking is enabled.
1662#[inline]
1663fn invalid_ptr<T>(addr: usize) -> *mut T {
1664 let ptr = core::ptr::null_mut::<u8>().wrapping_add(addr);
1665 debug_assert_eq!(ptr as usize, addr);
1666 ptr.cast::<T>()
1667}
1668
1669/// Precondition: dst >= original
1670///
1671/// The following line is equivalent to:
1672///
1673/// ```rust,ignore
1674/// self.ptr.as_ptr().offset_from(ptr) as usize;
1675/// ```
1676///
1677/// But due to min rust is 1.39 and it is only stablised
1678/// in 1.47, we cannot use it.
1679#[inline]
1680fn offset_from(dst: *mut u8, original: *mut u8) -> usize {
1681 debug_assert!(dst >= original);
1682
1683 dst as usize - original as usize
1684}
1685
1686unsafe fn rebuild_vec(ptr: *mut u8, mut len: usize, mut cap: usize, off: usize) -> Vec<u8> {
1687 let ptr = ptr.offset(-(off as isize));
1688 len += off;
1689 cap += off;
1690
1691 Vec::from_raw_parts(ptr, len, cap)
1692}
1693
1694// ===== impl SharedVtable =====
1695
1696static SHARED_VTABLE: Vtable = Vtable {
1697 clone: shared_v_clone,
1698 to_vec: shared_v_to_vec,
1699 drop: shared_v_drop,
1700};
1701
1702unsafe fn shared_v_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
1703 let shared = data.load(Ordering::Relaxed) as *mut Shared;
1704 increment_shared(shared);
1705
1706 let data = AtomicPtr::new(shared as *mut ());
1707 Bytes::with_vtable(ptr, len, data, &SHARED_VTABLE)
1708}
1709
1710unsafe fn shared_v_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
1711 let shared: *mut Shared = data.load(Ordering::Relaxed).cast();
1712
1713 if (*shared).is_unique() {
1714 let shared = &mut *shared;
1715
1716 // Drop shared
1717 let mut vec = mem::replace(&mut shared.vec, Vec::new());
1718 release_shared(shared);
1719
1720 // Copy back buffer
1721 ptr::copy(ptr, vec.as_mut_ptr(), len);
1722 vec.set_len(len);
1723
1724 vec
1725 } else {
1726 let v = slice::from_raw_parts(ptr, len).to_vec();
1727 release_shared(shared);
1728 v
1729 }
1730}
1731
1732unsafe fn shared_v_drop(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize) {
1733 data.with_mut(|shared| {
1734 release_shared(*shared as *mut Shared);
1735 });
1736}
1737
1738// compile-fails
1739
1740/// ```compile_fail
1741/// use bytes::BytesMut;
1742/// #[deny(unused_must_use)]
1743/// {
1744/// let mut b1 = BytesMut::from("hello world");
1745/// b1.split_to(6);
1746/// }
1747/// ```
1748fn _split_to_must_use() {}
1749
1750/// ```compile_fail
1751/// use bytes::BytesMut;
1752/// #[deny(unused_must_use)]
1753/// {
1754/// let mut b1 = BytesMut::from("hello world");
1755/// b1.split_off(6);
1756/// }
1757/// ```
1758fn _split_off_must_use() {}
1759
1760/// ```compile_fail
1761/// use bytes::BytesMut;
1762/// #[deny(unused_must_use)]
1763/// {
1764/// let mut b1 = BytesMut::from("hello world");
1765/// b1.split();
1766/// }
1767/// ```
1768fn _split_must_use() {}
1769
1770// fuzz tests
1771#[cfg(all(test, loom))]
1772mod fuzz {
1773 use loom::sync::Arc;
1774 use loom::thread;
1775
1776 use super::BytesMut;
1777 use crate::Bytes;
1778
1779 #[test]
1780 fn bytes_mut_cloning_frozen() {
1781 loom::model(|| {
1782 let a = BytesMut::from(&b"abcdefgh"[..]).split().freeze();
1783 let addr = a.as_ptr() as usize;
1784
1785 // test the Bytes::clone is Sync by putting it in an Arc
1786 let a1 = Arc::new(a);
1787 let a2 = a1.clone();
1788
1789 let t1 = thread::spawn(move || {
1790 let b: Bytes = (*a1).clone();
1791 assert_eq!(b.as_ptr() as usize, addr);
1792 });
1793
1794 let t2 = thread::spawn(move || {
1795 let b: Bytes = (*a2).clone();
1796 assert_eq!(b.as_ptr() as usize, addr);
1797 });
1798
1799 t1.join().unwrap();
1800 t2.join().unwrap();
1801 });
1802 }
1803}