Auto merge of #102991 - Sp00ph:master, r=scottmcm

Update VecDeque implementation to use head+len instead of head+tail

(See #99805)

This changes `alloc::collections::VecDeque`'s internal representation from using head and tail indices to using a head index and a length field. It has a few advantages over the current design:
* It allows the buffer to be of length 0, which means the `VecDeque::new` new longer has to allocate and could be changed to a `const fn`
* It allows the `VecDeque` to fill the buffer completely, unlike the old implementation, which always had to leave a free space
* It removes the restriction for the size to be a power of two, allowing it to properly `shrink_to_fit`, unlike the old `VecDeque`
* The above points also combine to allow the `Vec<T> -> VecDeque<T>` conversion to be very cheap and guaranteed O(1). I mention this in the `From<Vec<T>>` impl, but it's not a strong guarantee just yet, as that would likely need some form of API change proposal.

All the tests seem to pass for the new `VecDeque`, with some slight adjustments.

r? `@scottmcm`
This commit is contained in:
bors 2022-11-28 10:39:47 +00:00
commit 69df0f2c2f
14 changed files with 953 additions and 1313 deletions

View File

@ -1,12 +1,12 @@
use core::fmt;
use core::iter::FusedIterator; use core::iter::FusedIterator;
use core::marker::PhantomData; use core::marker::PhantomData;
use core::mem::{self, MaybeUninit}; use core::mem::{self, SizedTypeProperties};
use core::ptr::{self, NonNull}; use core::ptr::NonNull;
use core::{fmt, ptr};
use crate::alloc::{Allocator, Global}; use crate::alloc::{Allocator, Global};
use super::{count, wrap_index, VecDeque}; use super::VecDeque;
/// A draining iterator over the elements of a `VecDeque`. /// A draining iterator over the elements of a `VecDeque`.
/// ///
@ -20,26 +20,70 @@ pub struct Drain<
T: 'a, T: 'a,
#[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global, #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
> { > {
after_tail: usize, // We can't just use a &mut VecDeque<T, A>, as that would make Drain invariant over T
after_head: usize, // and we want it to be covariant instead
ring: NonNull<[T]>,
tail: usize,
head: usize,
deque: NonNull<VecDeque<T, A>>, deque: NonNull<VecDeque<T, A>>,
_phantom: PhantomData<&'a T>, // drain_start is stored in deque.len
drain_len: usize,
// index into the logical array, not the physical one (always lies in [0..deque.len))
idx: usize,
// number of elements after the drain range
tail_len: usize,
remaining: usize,
// Needed to make Drain covariant over T
_marker: PhantomData<&'a T>,
} }
impl<'a, T, A: Allocator> Drain<'a, T, A> { impl<'a, T, A: Allocator> Drain<'a, T, A> {
pub(super) unsafe fn new( pub(super) unsafe fn new(
after_tail: usize, deque: &'a mut VecDeque<T, A>,
after_head: usize, drain_start: usize,
ring: &'a [MaybeUninit<T>], drain_len: usize,
tail: usize,
head: usize,
deque: NonNull<VecDeque<T, A>>,
) -> Self { ) -> Self {
let ring = unsafe { NonNull::new_unchecked(ring as *const [MaybeUninit<T>] as *mut _) }; let orig_len = mem::replace(&mut deque.len, drain_start);
Drain { after_tail, after_head, ring, tail, head, deque, _phantom: PhantomData } let tail_len = orig_len - drain_start - drain_len;
Drain {
deque: NonNull::from(deque),
drain_len,
idx: drain_start,
tail_len,
remaining: drain_len,
_marker: PhantomData,
}
}
// Only returns pointers to the slices, as that's
// all we need to drop them. May only be called if `self.remaining != 0`.
unsafe fn as_slices(&self) -> (*mut [T], *mut [T]) {
unsafe {
let deque = self.deque.as_ref();
// FIXME: This is doing almost exactly the same thing as the else branch in `VecDeque::slice_ranges`.
// Unfortunately, we can't just call `slice_ranges` here, as the deque's `len` is currently
// just `drain_start`, so the range check would (almost) always panic. Between temporarily
// adjusting the deques `len` to call `slice_ranges`, and just copy pasting the `slice_ranges`
// implementation, this seemed like the less hacky solution, though it might be good to
// find a better one in the future.
// because `self.remaining != 0`, we know that `self.idx < deque.original_len`, so it's a valid
// logical index.
let wrapped_start = deque.to_physical_idx(self.idx);
let head_len = deque.capacity() - wrapped_start;
let (a_range, b_range) = if head_len >= self.remaining {
(wrapped_start..wrapped_start + self.remaining, 0..0)
} else {
let tail_len = self.remaining - head_len;
(wrapped_start..deque.capacity(), 0..tail_len)
};
// SAFETY: the range `self.idx..self.idx+self.remaining` lies strictly inside
// the range `0..deque.original_len`. because of this, and because of the fact
// that we acquire `a_range` and `b_range` exactly like `slice_ranges` would,
// it's guaranteed that `a_range` and `b_range` represent valid ranges into
// the deques buffer.
(deque.buffer_range(a_range), deque.buffer_range(b_range))
}
} }
} }
@ -47,11 +91,10 @@ impl<'a, T, A: Allocator> Drain<'a, T, A> {
impl<T: fmt::Debug, A: Allocator> fmt::Debug for Drain<'_, T, A> { impl<T: fmt::Debug, A: Allocator> fmt::Debug for Drain<'_, T, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("Drain") f.debug_tuple("Drain")
.field(&self.after_tail) .field(&self.drain_len)
.field(&self.after_head) .field(&self.idx)
.field(&self.ring) .field(&self.tail_len)
.field(&self.tail) .field(&self.remaining)
.field(&self.head)
.finish() .finish()
} }
} }
@ -68,57 +111,81 @@ impl<T, A: Allocator> Drop for Drain<'_, T, A> {
impl<'r, 'a, T, A: Allocator> Drop for DropGuard<'r, 'a, T, A> { impl<'r, 'a, T, A: Allocator> Drop for DropGuard<'r, 'a, T, A> {
fn drop(&mut self) { fn drop(&mut self) {
self.0.for_each(drop); if self.0.remaining != 0 {
unsafe {
// SAFETY: We just checked that `self.remaining != 0`.
let (front, back) = self.0.as_slices();
ptr::drop_in_place(front);
ptr::drop_in_place(back);
}
}
let source_deque = unsafe { self.0.deque.as_mut() }; let source_deque = unsafe { self.0.deque.as_mut() };
// T = source_deque_tail; H = source_deque_head; t = drain_tail; h = drain_head let drain_start = source_deque.len();
// let drain_len = self.0.drain_len;
// T t h H let drain_end = drain_start + drain_len;
// [. . . o o x x o o . . .]
//
let orig_tail = source_deque.tail;
let drain_tail = source_deque.head;
let drain_head = self.0.after_tail;
let orig_head = self.0.after_head;
let tail_len = count(orig_tail, drain_tail, source_deque.cap()); let orig_len = self.0.tail_len + drain_end;
let head_len = count(drain_head, orig_head, source_deque.cap());
// Restore the original head value if T::IS_ZST {
source_deque.head = orig_head; // no need to copy around any memory if T is a ZST
source_deque.len = orig_len - drain_len;
return;
}
match (tail_len, head_len) { let head_len = drain_start;
let tail_len = self.0.tail_len;
match (head_len, tail_len) {
(0, 0) => { (0, 0) => {
source_deque.head = 0; source_deque.head = 0;
source_deque.tail = 0; source_deque.len = 0;
} }
(0, _) => { (0, _) => {
source_deque.tail = drain_head; source_deque.head = source_deque.to_physical_idx(drain_len);
source_deque.len = orig_len - drain_len;
} }
(_, 0) => { (_, 0) => {
source_deque.head = drain_tail; source_deque.len = orig_len - drain_len;
} }
_ => unsafe { _ => unsafe {
if tail_len <= head_len { if head_len <= tail_len {
source_deque.tail = source_deque.wrap_sub(drain_head, tail_len); source_deque.wrap_copy(
source_deque.wrap_copy(source_deque.tail, orig_tail, tail_len); source_deque.head,
source_deque.to_physical_idx(drain_len),
head_len,
);
source_deque.head = source_deque.to_physical_idx(drain_len);
source_deque.len = orig_len - drain_len;
} else { } else {
source_deque.head = source_deque.wrap_add(drain_tail, head_len); source_deque.wrap_copy(
source_deque.wrap_copy(drain_tail, drain_head, head_len); source_deque.to_physical_idx(head_len + drain_len),
source_deque.to_physical_idx(head_len),
tail_len,
);
source_deque.len = orig_len - drain_len;
} }
}, },
} }
} }
} }
while let Some(item) = self.next() { let guard = DropGuard(self);
let guard = DropGuard(self); if guard.0.remaining != 0 {
drop(item); unsafe {
mem::forget(guard); // SAFETY: We just checked that `self.remaining != 0`.
let (front, back) = guard.0.as_slices();
// since idx is a logical index, we don't need to worry about wrapping.
guard.0.idx += front.len();
guard.0.remaining -= front.len();
ptr::drop_in_place(front);
guard.0.remaining = 0;
ptr::drop_in_place(back);
}
} }
DropGuard(self); // Dropping `guard` handles moving the remaining elements into place.
} }
} }
@ -128,20 +195,18 @@ impl<T, A: Allocator> Iterator for Drain<'_, T, A> {
#[inline] #[inline]
fn next(&mut self) -> Option<T> { fn next(&mut self) -> Option<T> {
if self.tail == self.head { if self.remaining == 0 {
return None; return None;
} }
let tail = self.tail; let wrapped_idx = unsafe { self.deque.as_ref().to_physical_idx(self.idx) };
self.tail = wrap_index(self.tail.wrapping_add(1), self.ring.len()); self.idx += 1;
// Safety: self.remaining -= 1;
// - `self.tail` in a ring buffer is always a valid index. Some(unsafe { self.deque.as_mut().buffer_read(wrapped_idx) })
// - `self.head` and `self.tail` equality is checked above.
unsafe { Some(ptr::read(self.ring.as_ptr().get_unchecked_mut(tail))) }
} }
#[inline] #[inline]
fn size_hint(&self) -> (usize, Option<usize>) { fn size_hint(&self) -> (usize, Option<usize>) {
let len = count(self.tail, self.head, self.ring.len()); let len = self.remaining;
(len, Some(len)) (len, Some(len))
} }
} }
@ -150,14 +215,12 @@ impl<T, A: Allocator> Iterator for Drain<'_, T, A> {
impl<T, A: Allocator> DoubleEndedIterator for Drain<'_, T, A> { impl<T, A: Allocator> DoubleEndedIterator for Drain<'_, T, A> {
#[inline] #[inline]
fn next_back(&mut self) -> Option<T> { fn next_back(&mut self) -> Option<T> {
if self.tail == self.head { if self.remaining == 0 {
return None; return None;
} }
self.head = wrap_index(self.head.wrapping_sub(1), self.ring.len()); self.remaining -= 1;
// Safety: let wrapped_idx = unsafe { self.deque.as_ref().to_physical_idx(self.idx + self.remaining) };
// - `self.head` in a ring buffer is always a valid index. Some(unsafe { self.deque.as_mut().buffer_read(wrapped_idx) })
// - `self.head` and `self.tail` equality is checked above.
unsafe { Some(ptr::read(self.ring.as_ptr().get_unchecked_mut(self.head))) }
} }
} }

View File

@ -1,9 +1,6 @@
use core::fmt;
use core::iter::{FusedIterator, TrustedLen, TrustedRandomAccess, TrustedRandomAccessNoCoerce}; use core::iter::{FusedIterator, TrustedLen, TrustedRandomAccess, TrustedRandomAccessNoCoerce};
use core::mem::MaybeUninit;
use core::ops::Try; use core::ops::Try;
use core::{fmt, mem, slice};
use super::{count, wrap_index, RingSlices};
/// An iterator over the elements of a `VecDeque`. /// An iterator over the elements of a `VecDeque`.
/// ///
@ -13,30 +10,20 @@ use super::{count, wrap_index, RingSlices};
/// [`iter`]: super::VecDeque::iter /// [`iter`]: super::VecDeque::iter
#[stable(feature = "rust1", since = "1.0.0")] #[stable(feature = "rust1", since = "1.0.0")]
pub struct Iter<'a, T: 'a> { pub struct Iter<'a, T: 'a> {
ring: &'a [MaybeUninit<T>], i1: slice::Iter<'a, T>,
tail: usize, i2: slice::Iter<'a, T>,
head: usize,
} }
impl<'a, T> Iter<'a, T> { impl<'a, T> Iter<'a, T> {
pub(super) fn new(ring: &'a [MaybeUninit<T>], tail: usize, head: usize) -> Self { pub(super) fn new(i1: slice::Iter<'a, T>, i2: slice::Iter<'a, T>) -> Self {
Iter { ring, tail, head } Self { i1, i2 }
} }
} }
#[stable(feature = "collection_debug", since = "1.17.0")] #[stable(feature = "collection_debug", since = "1.17.0")]
impl<T: fmt::Debug> fmt::Debug for Iter<'_, T> { impl<T: fmt::Debug> fmt::Debug for Iter<'_, T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail); f.debug_tuple("Iter").field(&self.i1.as_slice()).field(&self.i2.as_slice()).finish()
// Safety:
// - `self.head` and `self.tail` in a ring buffer are always valid indices.
// - `RingSlices::ring_slices` guarantees that the slices split according to `self.head` and `self.tail` are initialized.
unsafe {
f.debug_tuple("Iter")
.field(&MaybeUninit::slice_assume_init_ref(front))
.field(&MaybeUninit::slice_assume_init_ref(back))
.finish()
}
} }
} }
@ -44,7 +31,7 @@ impl<T: fmt::Debug> fmt::Debug for Iter<'_, T> {
#[stable(feature = "rust1", since = "1.0.0")] #[stable(feature = "rust1", since = "1.0.0")]
impl<T> Clone for Iter<'_, T> { impl<T> Clone for Iter<'_, T> {
fn clone(&self) -> Self { fn clone(&self) -> Self {
Iter { ring: self.ring, tail: self.tail, head: self.head } Iter { i1: self.i1.clone(), i2: self.i2.clone() }
} }
} }
@ -54,72 +41,50 @@ impl<'a, T> Iterator for Iter<'a, T> {
#[inline] #[inline]
fn next(&mut self) -> Option<&'a T> { fn next(&mut self) -> Option<&'a T> {
if self.tail == self.head { match self.i1.next() {
return None; Some(val) => Some(val),
None => {
// most of the time, the iterator will either always
// call next(), or always call next_back(). By swapping
// the iterators once the first one is empty, we ensure
// that the first branch is taken as often as possible,
// without sacrificing correctness, as i1 is empty anyways
mem::swap(&mut self.i1, &mut self.i2);
self.i1.next()
}
} }
let tail = self.tail; }
self.tail = wrap_index(self.tail.wrapping_add(1), self.ring.len());
// Safety: fn advance_by(&mut self, n: usize) -> Result<(), usize> {
// - `self.tail` in a ring buffer is always a valid index. let m = match self.i1.advance_by(n) {
// - `self.head` and `self.tail` equality is checked above. Ok(_) => return Ok(()),
unsafe { Some(self.ring.get_unchecked(tail).assume_init_ref()) } Err(m) => m,
};
mem::swap(&mut self.i1, &mut self.i2);
self.i1.advance_by(n - m).map_err(|o| o + m)
} }
#[inline] #[inline]
fn size_hint(&self) -> (usize, Option<usize>) { fn size_hint(&self) -> (usize, Option<usize>) {
let len = count(self.tail, self.head, self.ring.len()); let len = self.len();
(len, Some(len)) (len, Some(len))
} }
fn fold<Acc, F>(self, mut accum: Acc, mut f: F) -> Acc fn fold<Acc, F>(self, accum: Acc, mut f: F) -> Acc
where where
F: FnMut(Acc, Self::Item) -> Acc, F: FnMut(Acc, Self::Item) -> Acc,
{ {
let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail); let accum = self.i1.fold(accum, &mut f);
// Safety: self.i2.fold(accum, &mut f)
// - `self.head` and `self.tail` in a ring buffer are always valid indices.
// - `RingSlices::ring_slices` guarantees that the slices split according to `self.head` and `self.tail` are initialized.
unsafe {
accum = MaybeUninit::slice_assume_init_ref(front).iter().fold(accum, &mut f);
MaybeUninit::slice_assume_init_ref(back).iter().fold(accum, &mut f)
}
} }
fn try_fold<B, F, R>(&mut self, init: B, mut f: F) -> R fn try_fold<B, F, R>(&mut self, init: B, mut f: F) -> R
where where
Self: Sized,
F: FnMut(B, Self::Item) -> R, F: FnMut(B, Self::Item) -> R,
R: Try<Output = B>, R: Try<Output = B>,
{ {
let (mut iter, final_res); let acc = self.i1.try_fold(init, &mut f)?;
if self.tail <= self.head { self.i2.try_fold(acc, &mut f)
// Safety: single slice self.ring[self.tail..self.head] is initialized.
iter = unsafe { MaybeUninit::slice_assume_init_ref(&self.ring[self.tail..self.head]) }
.iter();
final_res = iter.try_fold(init, &mut f);
} else {
// Safety: two slices: self.ring[self.tail..], self.ring[..self.head] both are initialized.
let (front, back) = self.ring.split_at(self.tail);
let mut back_iter = unsafe { MaybeUninit::slice_assume_init_ref(back).iter() };
let res = back_iter.try_fold(init, &mut f);
let len = self.ring.len();
self.tail = (self.ring.len() - back_iter.len()) & (len - 1);
iter = unsafe { MaybeUninit::slice_assume_init_ref(&front[..self.head]).iter() };
final_res = iter.try_fold(res?, &mut f);
}
self.tail = self.head - iter.len();
final_res
}
fn nth(&mut self, n: usize) -> Option<Self::Item> {
if n >= count(self.tail, self.head, self.ring.len()) {
self.tail = self.head;
None
} else {
self.tail = wrap_index(self.tail.wrapping_add(n), self.ring.len());
self.next()
}
} }
#[inline] #[inline]
@ -132,8 +97,12 @@ impl<'a, T> Iterator for Iter<'a, T> {
// Safety: The TrustedRandomAccess contract requires that callers only pass an index // Safety: The TrustedRandomAccess contract requires that callers only pass an index
// that is in bounds. // that is in bounds.
unsafe { unsafe {
let idx = wrap_index(self.tail.wrapping_add(idx), self.ring.len()); let i1_len = self.i1.len();
self.ring.get_unchecked(idx).assume_init_ref() if idx < i1_len {
self.i1.__iterator_get_unchecked(idx)
} else {
self.i2.__iterator_get_unchecked(idx - i1_len)
}
} }
} }
} }
@ -142,63 +111,56 @@ impl<'a, T> Iterator for Iter<'a, T> {
impl<'a, T> DoubleEndedIterator for Iter<'a, T> { impl<'a, T> DoubleEndedIterator for Iter<'a, T> {
#[inline] #[inline]
fn next_back(&mut self) -> Option<&'a T> { fn next_back(&mut self) -> Option<&'a T> {
if self.tail == self.head { match self.i2.next_back() {
return None; Some(val) => Some(val),
None => {
// most of the time, the iterator will either always
// call next(), or always call next_back(). By swapping
// the iterators once the second one is empty, we ensure
// that the first branch is taken as often as possible,
// without sacrificing correctness, as i2 is empty anyways
mem::swap(&mut self.i1, &mut self.i2);
self.i2.next_back()
}
} }
self.head = wrap_index(self.head.wrapping_sub(1), self.ring.len());
// Safety:
// - `self.head` in a ring buffer is always a valid index.
// - `self.head` and `self.tail` equality is checked above.
unsafe { Some(self.ring.get_unchecked(self.head).assume_init_ref()) }
} }
fn rfold<Acc, F>(self, mut accum: Acc, mut f: F) -> Acc fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
let m = match self.i2.advance_back_by(n) {
Ok(_) => return Ok(()),
Err(m) => m,
};
mem::swap(&mut self.i1, &mut self.i2);
self.i2.advance_back_by(n - m).map_err(|o| m + o)
}
fn rfold<Acc, F>(self, accum: Acc, mut f: F) -> Acc
where where
F: FnMut(Acc, Self::Item) -> Acc, F: FnMut(Acc, Self::Item) -> Acc,
{ {
let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail); let accum = self.i2.rfold(accum, &mut f);
// Safety: self.i1.rfold(accum, &mut f)
// - `self.head` and `self.tail` in a ring buffer are always valid indices.
// - `RingSlices::ring_slices` guarantees that the slices split according to `self.head` and `self.tail` are initialized.
unsafe {
accum = MaybeUninit::slice_assume_init_ref(back).iter().rfold(accum, &mut f);
MaybeUninit::slice_assume_init_ref(front).iter().rfold(accum, &mut f)
}
} }
fn try_rfold<B, F, R>(&mut self, init: B, mut f: F) -> R fn try_rfold<B, F, R>(&mut self, init: B, mut f: F) -> R
where where
Self: Sized,
F: FnMut(B, Self::Item) -> R, F: FnMut(B, Self::Item) -> R,
R: Try<Output = B>, R: Try<Output = B>,
{ {
let (mut iter, final_res); let acc = self.i2.try_rfold(init, &mut f)?;
if self.tail <= self.head { self.i1.try_rfold(acc, &mut f)
// Safety: single slice self.ring[self.tail..self.head] is initialized.
iter = unsafe {
MaybeUninit::slice_assume_init_ref(&self.ring[self.tail..self.head]).iter()
};
final_res = iter.try_rfold(init, &mut f);
} else {
// Safety: two slices: self.ring[self.tail..], self.ring[..self.head] both are initialized.
let (front, back) = self.ring.split_at(self.tail);
let mut front_iter =
unsafe { MaybeUninit::slice_assume_init_ref(&front[..self.head]).iter() };
let res = front_iter.try_rfold(init, &mut f);
self.head = front_iter.len();
iter = unsafe { MaybeUninit::slice_assume_init_ref(back).iter() };
final_res = iter.try_rfold(res?, &mut f);
}
self.head = self.tail + iter.len();
final_res
} }
} }
#[stable(feature = "rust1", since = "1.0.0")] #[stable(feature = "rust1", since = "1.0.0")]
impl<T> ExactSizeIterator for Iter<'_, T> { impl<T> ExactSizeIterator for Iter<'_, T> {
fn len(&self) -> usize {
self.i1.len() + self.i2.len()
}
fn is_empty(&self) -> bool { fn is_empty(&self) -> bool {
self.head == self.tail self.i1.is_empty() && self.i2.is_empty()
} }
} }

View File

@ -1,8 +1,6 @@
use core::fmt;
use core::iter::{FusedIterator, TrustedLen, TrustedRandomAccess, TrustedRandomAccessNoCoerce}; use core::iter::{FusedIterator, TrustedLen, TrustedRandomAccess, TrustedRandomAccessNoCoerce};
use core::marker::PhantomData; use core::ops::Try;
use core::{fmt, mem, slice};
use super::{count, wrap_index, RingSlices};
/// A mutable iterator over the elements of a `VecDeque`. /// A mutable iterator over the elements of a `VecDeque`.
/// ///
@ -12,39 +10,20 @@ use super::{count, wrap_index, RingSlices};
/// [`iter_mut`]: super::VecDeque::iter_mut /// [`iter_mut`]: super::VecDeque::iter_mut
#[stable(feature = "rust1", since = "1.0.0")] #[stable(feature = "rust1", since = "1.0.0")]
pub struct IterMut<'a, T: 'a> { pub struct IterMut<'a, T: 'a> {
// Internal safety invariant: the entire slice is dereferenceable. i1: slice::IterMut<'a, T>,
ring: *mut [T], i2: slice::IterMut<'a, T>,
tail: usize,
head: usize,
phantom: PhantomData<&'a mut [T]>,
} }
impl<'a, T> IterMut<'a, T> { impl<'a, T> IterMut<'a, T> {
pub(super) unsafe fn new( pub(super) fn new(i1: slice::IterMut<'a, T>, i2: slice::IterMut<'a, T>) -> Self {
ring: *mut [T], Self { i1, i2 }
tail: usize,
head: usize,
phantom: PhantomData<&'a mut [T]>,
) -> Self {
IterMut { ring, tail, head, phantom }
} }
} }
// SAFETY: we do nothing thread-local and there is no interior mutability,
// so the usual structural `Send`/`Sync` apply.
#[stable(feature = "rust1", since = "1.0.0")]
unsafe impl<T: Send> Send for IterMut<'_, T> {}
#[stable(feature = "rust1", since = "1.0.0")]
unsafe impl<T: Sync> Sync for IterMut<'_, T> {}
#[stable(feature = "collection_debug", since = "1.17.0")] #[stable(feature = "collection_debug", since = "1.17.0")]
impl<T: fmt::Debug> fmt::Debug for IterMut<'_, T> { impl<T: fmt::Debug> fmt::Debug for IterMut<'_, T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail); f.debug_tuple("IterMut").field(&self.i1.as_slice()).field(&self.i2.as_slice()).finish()
// SAFETY: these are the elements we have not handed out yet, so aliasing is fine.
// The `IterMut` invariant also ensures everything is dereferenceable.
let (front, back) = unsafe { (&*front, &*back) };
f.debug_tuple("IterMut").field(&front).field(&back).finish()
} }
} }
@ -54,44 +33,50 @@ impl<'a, T> Iterator for IterMut<'a, T> {
#[inline] #[inline]
fn next(&mut self) -> Option<&'a mut T> { fn next(&mut self) -> Option<&'a mut T> {
if self.tail == self.head { match self.i1.next() {
return None; Some(val) => Some(val),
None => {
// most of the time, the iterator will either always
// call next(), or always call next_back(). By swapping
// the iterators once the first one is empty, we ensure
// that the first branch is taken as often as possible,
// without sacrificing correctness, as i1 is empty anyways
mem::swap(&mut self.i1, &mut self.i2);
self.i1.next()
}
} }
let tail = self.tail; }
self.tail = wrap_index(self.tail.wrapping_add(1), self.ring.len());
unsafe { fn advance_by(&mut self, n: usize) -> Result<(), usize> {
let elem = self.ring.get_unchecked_mut(tail); let m = match self.i1.advance_by(n) {
Some(&mut *elem) Ok(_) => return Ok(()),
} Err(m) => m,
};
mem::swap(&mut self.i1, &mut self.i2);
self.i1.advance_by(n - m).map_err(|o| o + m)
} }
#[inline] #[inline]
fn size_hint(&self) -> (usize, Option<usize>) { fn size_hint(&self) -> (usize, Option<usize>) {
let len = count(self.tail, self.head, self.ring.len()); let len = self.len();
(len, Some(len)) (len, Some(len))
} }
fn fold<Acc, F>(self, mut accum: Acc, mut f: F) -> Acc fn fold<Acc, F>(self, accum: Acc, mut f: F) -> Acc
where where
F: FnMut(Acc, Self::Item) -> Acc, F: FnMut(Acc, Self::Item) -> Acc,
{ {
let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail); let accum = self.i1.fold(accum, &mut f);
// SAFETY: these are the elements we have not handed out yet, so aliasing is fine. self.i2.fold(accum, &mut f)
// The `IterMut` invariant also ensures everything is dereferenceable.
let (front, back) = unsafe { (&mut *front, &mut *back) };
accum = front.iter_mut().fold(accum, &mut f);
back.iter_mut().fold(accum, &mut f)
} }
fn nth(&mut self, n: usize) -> Option<Self::Item> { fn try_fold<B, F, R>(&mut self, init: B, mut f: F) -> R
if n >= count(self.tail, self.head, self.ring.len()) { where
self.tail = self.head; F: FnMut(B, Self::Item) -> R,
None R: Try<Output = B>,
} else { {
self.tail = wrap_index(self.tail.wrapping_add(n), self.ring.len()); let acc = self.i1.try_fold(init, &mut f)?;
self.next() self.i2.try_fold(acc, &mut f)
}
} }
#[inline] #[inline]
@ -104,8 +89,12 @@ impl<'a, T> Iterator for IterMut<'a, T> {
// Safety: The TrustedRandomAccess contract requires that callers only pass an index // Safety: The TrustedRandomAccess contract requires that callers only pass an index
// that is in bounds. // that is in bounds.
unsafe { unsafe {
let idx = wrap_index(self.tail.wrapping_add(idx), self.ring.len()); let i1_len = self.i1.len();
&mut *self.ring.get_unchecked_mut(idx) if idx < i1_len {
self.i1.__iterator_get_unchecked(idx)
} else {
self.i2.__iterator_get_unchecked(idx - i1_len)
}
} }
} }
} }
@ -114,34 +103,56 @@ impl<'a, T> Iterator for IterMut<'a, T> {
impl<'a, T> DoubleEndedIterator for IterMut<'a, T> { impl<'a, T> DoubleEndedIterator for IterMut<'a, T> {
#[inline] #[inline]
fn next_back(&mut self) -> Option<&'a mut T> { fn next_back(&mut self) -> Option<&'a mut T> {
if self.tail == self.head { match self.i2.next_back() {
return None; Some(val) => Some(val),
} None => {
self.head = wrap_index(self.head.wrapping_sub(1), self.ring.len()); // most of the time, the iterator will either always
// call next(), or always call next_back(). By swapping
unsafe { // the iterators once the first one is empty, we ensure
let elem = self.ring.get_unchecked_mut(self.head); // that the first branch is taken as often as possible,
Some(&mut *elem) // without sacrificing correctness, as i2 is empty anyways
mem::swap(&mut self.i1, &mut self.i2);
self.i2.next_back()
}
} }
} }
fn rfold<Acc, F>(self, mut accum: Acc, mut f: F) -> Acc fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
let m = match self.i2.advance_back_by(n) {
Ok(_) => return Ok(()),
Err(m) => m,
};
mem::swap(&mut self.i1, &mut self.i2);
self.i2.advance_back_by(n - m).map_err(|o| m + o)
}
fn rfold<Acc, F>(self, accum: Acc, mut f: F) -> Acc
where where
F: FnMut(Acc, Self::Item) -> Acc, F: FnMut(Acc, Self::Item) -> Acc,
{ {
let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail); let accum = self.i2.rfold(accum, &mut f);
// SAFETY: these are the elements we have not handed out yet, so aliasing is fine. self.i1.rfold(accum, &mut f)
// The `IterMut` invariant also ensures everything is dereferenceable. }
let (front, back) = unsafe { (&mut *front, &mut *back) };
accum = back.iter_mut().rfold(accum, &mut f); fn try_rfold<B, F, R>(&mut self, init: B, mut f: F) -> R
front.iter_mut().rfold(accum, &mut f) where
F: FnMut(B, Self::Item) -> R,
R: Try<Output = B>,
{
let acc = self.i2.try_rfold(init, &mut f)?;
self.i1.try_rfold(acc, &mut f)
} }
} }
#[stable(feature = "rust1", since = "1.0.0")] #[stable(feature = "rust1", since = "1.0.0")]
impl<T> ExactSizeIterator for IterMut<'_, T> { impl<T> ExactSizeIterator for IterMut<'_, T> {
fn len(&self) -> usize {
self.i1.len() + self.i2.len()
}
fn is_empty(&self) -> bool { fn is_empty(&self) -> bool {
self.head == self.tail self.i1.is_empty() && self.i2.is_empty()
} }
} }

File diff suppressed because it is too large Load Diff

View File

@ -1,67 +0,0 @@
use core::cmp::{self};
use core::mem::replace;
use crate::alloc::Allocator;
use super::VecDeque;
/// PairSlices pairs up equal length slice parts of two deques
///
/// For example, given deques "A" and "B" with the following division into slices:
///
/// A: [0 1 2] [3 4 5]
/// B: [a b] [c d e]
///
/// It produces the following sequence of matching slices:
///
/// ([0 1], [a b])
/// (\[2\], \[c\])
/// ([3 4], [d e])
///
/// and the uneven remainder of either A or B is skipped.
pub struct PairSlices<'a, 'b, T> {
a0: &'a mut [T],
a1: &'a mut [T],
b0: &'b [T],
b1: &'b [T],
}
impl<'a, 'b, T> PairSlices<'a, 'b, T> {
pub fn from<A: Allocator>(to: &'a mut VecDeque<T, A>, from: &'b VecDeque<T, A>) -> Self {
let (a0, a1) = to.as_mut_slices();
let (b0, b1) = from.as_slices();
PairSlices { a0, a1, b0, b1 }
}
pub fn has_remainder(&self) -> bool {
!self.b0.is_empty()
}
pub fn remainder(self) -> impl Iterator<Item = &'b [T]> {
IntoIterator::into_iter([self.b0, self.b1])
}
}
impl<'a, 'b, T> Iterator for PairSlices<'a, 'b, T> {
type Item = (&'a mut [T], &'b [T]);
fn next(&mut self) -> Option<Self::Item> {
// Get next part length
let part = cmp::min(self.a0.len(), self.b0.len());
if part == 0 {
return None;
}
let (p0, p1) = replace(&mut self.a0, &mut []).split_at_mut(part);
let (q0, q1) = self.b0.split_at(part);
// Move a1 into a0, if it's empty (and b1, b0 the same way).
self.a0 = p1;
self.b0 = q1;
if self.a0.is_empty() {
self.a0 = replace(&mut self.a1, &mut []);
}
if self.b0.is_empty() {
self.b0 = replace(&mut self.b1, &[]);
}
Some((p0, q0))
}
}

View File

@ -1,56 +0,0 @@
use core::ptr::{self};
/// Returns the two slices that cover the `VecDeque`'s valid range
pub trait RingSlices: Sized {
fn slice(self, from: usize, to: usize) -> Self;
fn split_at(self, i: usize) -> (Self, Self);
fn ring_slices(buf: Self, head: usize, tail: usize) -> (Self, Self) {
let contiguous = tail <= head;
if contiguous {
let (empty, buf) = buf.split_at(0);
(buf.slice(tail, head), empty)
} else {
let (mid, right) = buf.split_at(tail);
let (left, _) = mid.split_at(head);
(right, left)
}
}
}
impl<T> RingSlices for &[T] {
fn slice(self, from: usize, to: usize) -> Self {
&self[from..to]
}
fn split_at(self, i: usize) -> (Self, Self) {
(*self).split_at(i)
}
}
impl<T> RingSlices for &mut [T] {
fn slice(self, from: usize, to: usize) -> Self {
&mut self[from..to]
}
fn split_at(self, i: usize) -> (Self, Self) {
(*self).split_at_mut(i)
}
}
impl<T> RingSlices for *mut [T] {
fn slice(self, from: usize, to: usize) -> Self {
assert!(from <= to && to < self.len());
// Not using `get_unchecked_mut` to keep this a safe operation.
let len = to - from;
ptr::slice_from_raw_parts_mut(self.as_mut_ptr().wrapping_add(from), len)
}
fn split_at(self, mid: usize) -> (Self, Self) {
let len = self.len();
let ptr = self.as_mut_ptr();
assert!(mid <= len);
(
ptr::slice_from_raw_parts_mut(ptr, mid),
ptr::slice_from_raw_parts_mut(ptr.wrapping_add(mid), len - mid),
)
}
}

View File

@ -1,6 +1,6 @@
use crate::alloc::Allocator; use crate::alloc::Allocator;
use crate::vec; use crate::vec;
use core::iter::{ByRefSized, TrustedLen}; use core::iter::TrustedLen;
use core::slice; use core::slice;
use super::VecDeque; use super::VecDeque;
@ -17,19 +17,33 @@ where
default fn spec_extend(&mut self, mut iter: I) { default fn spec_extend(&mut self, mut iter: I) {
// This function should be the moral equivalent of: // This function should be the moral equivalent of:
// //
// for item in iter { // for item in iter {
// self.push_back(item); // self.push_back(item);
// } // }
while let Some(element) = iter.next() {
if self.len() == self.capacity() {
let (lower, _) = iter.size_hint();
self.reserve(lower.saturating_add(1));
}
let head = self.head; // May only be called if `deque.len() < deque.capacity()`
self.head = self.wrap_add(self.head, 1); unsafe fn push_unchecked<T, A: Allocator>(deque: &mut VecDeque<T, A>, element: T) {
unsafe { // SAFETY: Because of the precondition, it's guaranteed that there is space
self.buffer_write(head, element); // in the logical array after the last element.
unsafe { deque.buffer_write(deque.to_physical_idx(deque.len), element) };
// This can't overflow because `deque.len() < deque.capacity() <= usize::MAX`.
deque.len += 1;
}
while let Some(element) = iter.next() {
let (lower, _) = iter.size_hint();
self.reserve(lower.saturating_add(1));
// SAFETY: We just reserved space for at least one element.
unsafe { push_unchecked(self, element) };
// Inner loop to avoid repeatedly calling `reserve`.
while self.len < self.capacity() {
let Some(element) = iter.next() else {
return;
};
// SAFETY: The loop condition guarantees that `self.len() < self.capacity()`.
unsafe { push_unchecked(self, element) };
} }
} }
} }
@ -39,7 +53,7 @@ impl<T, I, A: Allocator> SpecExtend<T, I> for VecDeque<T, A>
where where
I: TrustedLen<Item = T>, I: TrustedLen<Item = T>,
{ {
default fn spec_extend(&mut self, mut iter: I) { default fn spec_extend(&mut self, iter: I) {
// This is the case for a TrustedLen iterator. // This is the case for a TrustedLen iterator.
let (low, high) = iter.size_hint(); let (low, high) = iter.size_hint();
if let Some(additional) = high { if let Some(additional) = high {
@ -51,35 +65,12 @@ where
); );
self.reserve(additional); self.reserve(additional);
struct WrapAddOnDrop<'a, T, A: Allocator> { let written = unsafe {
vec_deque: &'a mut VecDeque<T, A>, self.write_iter_wrapping(self.to_physical_idx(self.len), iter, additional)
written: usize, };
}
impl<'a, T, A: Allocator> Drop for WrapAddOnDrop<'a, T, A> {
fn drop(&mut self) {
self.vec_deque.head =
self.vec_deque.wrap_add(self.vec_deque.head, self.written);
}
}
let mut wrapper = WrapAddOnDrop { vec_deque: self, written: 0 };
let head_room = wrapper.vec_deque.cap() - wrapper.vec_deque.head;
unsafe {
wrapper.vec_deque.write_iter(
wrapper.vec_deque.head,
ByRefSized(&mut iter).take(head_room),
&mut wrapper.written,
);
if additional > head_room {
wrapper.vec_deque.write_iter(0, iter, &mut wrapper.written);
}
}
debug_assert_eq!( debug_assert_eq!(
additional, wrapper.written, additional, written,
"The number of items written to VecDeque doesn't match the TrustedLen size hint" "The number of items written to VecDeque doesn't match the TrustedLen size hint"
); );
} else { } else {
@ -99,8 +90,8 @@ impl<T, A: Allocator> SpecExtend<T, vec::IntoIter<T>> for VecDeque<T, A> {
self.reserve(slice.len()); self.reserve(slice.len());
unsafe { unsafe {
self.copy_slice(self.head, slice); self.copy_slice(self.to_physical_idx(self.len), slice);
self.head = self.wrap_add(self.head, slice.len()); self.len += slice.len();
} }
iterator.forget_remaining_elements(); iterator.forget_remaining_elements();
} }
@ -125,8 +116,8 @@ where
self.reserve(slice.len()); self.reserve(slice.len());
unsafe { unsafe {
self.copy_slice(self.head, slice); self.copy_slice(self.to_physical_idx(self.len), slice);
self.head = self.wrap_add(self.head, slice.len()); self.len += slice.len();
} }
} }
} }

View File

@ -10,7 +10,7 @@ fn bench_push_back_100(b: &mut test::Bencher) {
deq.push_back(i); deq.push_back(i);
} }
deq.head = 0; deq.head = 0;
deq.tail = 0; deq.len = 0;
}) })
} }
@ -22,7 +22,7 @@ fn bench_push_front_100(b: &mut test::Bencher) {
deq.push_front(i); deq.push_front(i);
} }
deq.head = 0; deq.head = 0;
deq.tail = 0; deq.len = 0;
}) })
} }
@ -35,8 +35,8 @@ fn bench_pop_back_100(b: &mut test::Bencher) {
unsafe { deq.ptr().write_bytes(0u8, size + 1) }; unsafe { deq.ptr().write_bytes(0u8, size + 1) };
b.iter(|| { b.iter(|| {
deq.head = size; deq.head = 0;
deq.tail = 0; deq.len = 100;
while !deq.is_empty() { while !deq.is_empty() {
test::black_box(deq.pop_back()); test::black_box(deq.pop_back());
} }
@ -85,8 +85,8 @@ fn bench_pop_front_100(b: &mut test::Bencher) {
unsafe { deq.ptr().write_bytes(0u8, size + 1) }; unsafe { deq.ptr().write_bytes(0u8, size + 1) };
b.iter(|| { b.iter(|| {
deq.head = size; deq.head = 0;
deq.tail = 0; deq.len = 100;
while !deq.is_empty() { while !deq.is_empty() {
test::black_box(deq.pop_front()); test::black_box(deq.pop_front());
} }
@ -105,9 +105,9 @@ fn test_swap_front_back_remove() {
for len in 0..final_len { for len in 0..final_len {
let expected: VecDeque<_> = let expected: VecDeque<_> =
if back { (0..len).collect() } else { (0..len).rev().collect() }; if back { (0..len).collect() } else { (0..len).rev().collect() };
for tail_pos in 0..usable_cap { for head_pos in 0..usable_cap {
tester.tail = tail_pos; tester.head = head_pos;
tester.head = tail_pos; tester.len = 0;
if back { if back {
for i in 0..len * 2 { for i in 0..len * 2 {
tester.push_front(i); tester.push_front(i);
@ -124,8 +124,8 @@ fn test_swap_front_back_remove() {
assert_eq!(tester.swap_remove_front(idx), Some(len * 2 - 1 - i)); assert_eq!(tester.swap_remove_front(idx), Some(len * 2 - 1 - i));
} }
} }
assert!(tester.tail < tester.cap()); assert!(tester.head <= tester.capacity());
assert!(tester.head < tester.cap()); assert!(tester.len <= tester.capacity());
assert_eq!(tester, expected); assert_eq!(tester, expected);
} }
} }
@ -150,18 +150,18 @@ fn test_insert() {
for len in minlen..cap { for len in minlen..cap {
// 0, 1, 2, .., len - 1 // 0, 1, 2, .., len - 1
let expected = (0..).take(len).collect::<VecDeque<_>>(); let expected = (0..).take(len).collect::<VecDeque<_>>();
for tail_pos in 0..cap { for head_pos in 0..cap {
for to_insert in 0..len { for to_insert in 0..len {
tester.tail = tail_pos; tester.head = head_pos;
tester.head = tail_pos; tester.len = 0;
for i in 0..len { for i in 0..len {
if i != to_insert { if i != to_insert {
tester.push_back(i); tester.push_back(i);
} }
} }
tester.insert(to_insert, to_insert); tester.insert(to_insert, to_insert);
assert!(tester.tail < tester.cap()); assert!(tester.head <= tester.capacity());
assert!(tester.head < tester.cap()); assert!(tester.len <= tester.capacity());
assert_eq!(tester, expected); assert_eq!(tester, expected);
} }
} }
@ -257,13 +257,14 @@ fn test_swap_panic() {
#[test] #[test]
fn test_reserve_exact() { fn test_reserve_exact() {
let mut tester: VecDeque<i32> = VecDeque::with_capacity(1); let mut tester: VecDeque<i32> = VecDeque::with_capacity(1);
assert!(tester.capacity() == 1); assert_eq!(tester.capacity(), 1);
tester.reserve_exact(50); tester.reserve_exact(50);
assert!(tester.capacity() >= 51); assert_eq!(tester.capacity(), 50);
tester.reserve_exact(40); tester.reserve_exact(40);
assert!(tester.capacity() >= 51); // reserving won't shrink the buffer
assert_eq!(tester.capacity(), 50);
tester.reserve_exact(200); tester.reserve_exact(200);
assert!(tester.capacity() >= 200); assert_eq!(tester.capacity(), 200);
} }
#[test] #[test]
@ -323,6 +324,7 @@ fn test_contains() {
#[test] #[test]
fn test_rotate_left_right() { fn test_rotate_left_right() {
let mut tester: VecDeque<_> = (1..=10).collect(); let mut tester: VecDeque<_> = (1..=10).collect();
tester.reserve(1);
assert_eq!(tester.len(), 10); assert_eq!(tester.len(), 10);
@ -463,7 +465,7 @@ fn test_binary_search_key() {
} }
#[test] #[test]
fn make_contiguous_big_tail() { fn make_contiguous_big_head() {
let mut tester = VecDeque::with_capacity(15); let mut tester = VecDeque::with_capacity(15);
for i in 0..3 { for i in 0..3 {
@ -478,14 +480,14 @@ fn make_contiguous_big_tail() {
assert_eq!(tester.capacity(), 15); assert_eq!(tester.capacity(), 15);
assert_eq!((&[9, 8, 7, 6, 5, 4, 3] as &[_], &[0, 1, 2] as &[_]), tester.as_slices()); assert_eq!((&[9, 8, 7, 6, 5, 4, 3] as &[_], &[0, 1, 2] as &[_]), tester.as_slices());
let expected_start = tester.head; let expected_start = tester.as_slices().1.len();
tester.make_contiguous(); tester.make_contiguous();
assert_eq!(tester.tail, expected_start); assert_eq!(tester.head, expected_start);
assert_eq!((&[9, 8, 7, 6, 5, 4, 3, 0, 1, 2] as &[_], &[] as &[_]), tester.as_slices()); assert_eq!((&[9, 8, 7, 6, 5, 4, 3, 0, 1, 2] as &[_], &[] as &[_]), tester.as_slices());
} }
#[test] #[test]
fn make_contiguous_big_head() { fn make_contiguous_big_tail() {
let mut tester = VecDeque::with_capacity(15); let mut tester = VecDeque::with_capacity(15);
for i in 0..8 { for i in 0..8 {
@ -499,44 +501,46 @@ fn make_contiguous_big_head() {
// 01234567......98 // 01234567......98
let expected_start = 0; let expected_start = 0;
tester.make_contiguous(); tester.make_contiguous();
assert_eq!(tester.tail, expected_start); assert_eq!(tester.head, expected_start);
assert_eq!((&[9, 8, 0, 1, 2, 3, 4, 5, 6, 7] as &[_], &[] as &[_]), tester.as_slices()); assert_eq!((&[9, 8, 0, 1, 2, 3, 4, 5, 6, 7] as &[_], &[] as &[_]), tester.as_slices());
} }
#[test] #[test]
fn make_contiguous_small_free() { fn make_contiguous_small_free() {
let mut tester = VecDeque::with_capacity(15); let mut tester = VecDeque::with_capacity(16);
for i in 'A' as u8..'I' as u8 { for i in b'A'..b'I' {
tester.push_back(i as char); tester.push_back(i as char);
} }
for i in 'I' as u8..'N' as u8 { for i in b'I'..b'N' {
tester.push_front(i as char); tester.push_front(i as char);
} }
assert_eq!(tester, ['M', 'L', 'K', 'J', 'I', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']);
// ABCDEFGH...MLKJI // ABCDEFGH...MLKJI
let expected_start = 0; let expected_start = 0;
tester.make_contiguous(); tester.make_contiguous();
assert_eq!(tester.tail, expected_start); assert_eq!(tester.head, expected_start);
assert_eq!( assert_eq!(
(&['M', 'L', 'K', 'J', 'I', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'] as &[_], &[] as &[_]), (&['M', 'L', 'K', 'J', 'I', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'] as &[_], &[] as &[_]),
tester.as_slices() tester.as_slices()
); );
tester.clear(); tester.clear();
for i in 'I' as u8..'N' as u8 { for i in b'I'..b'N' {
tester.push_back(i as char); tester.push_back(i as char);
} }
for i in 'A' as u8..'I' as u8 { for i in b'A'..b'I' {
tester.push_front(i as char); tester.push_front(i as char);
} }
// IJKLM...HGFEDCBA // IJKLM...HGFEDCBA
let expected_start = 0; let expected_start = 3;
tester.make_contiguous(); tester.make_contiguous();
assert_eq!(tester.tail, expected_start); assert_eq!(tester.head, expected_start);
assert_eq!( assert_eq!(
(&['H', 'G', 'F', 'E', 'D', 'C', 'B', 'A', 'I', 'J', 'K', 'L', 'M'] as &[_], &[] as &[_]), (&['H', 'G', 'F', 'E', 'D', 'C', 'B', 'A', 'I', 'J', 'K', 'L', 'M'] as &[_], &[] as &[_]),
tester.as_slices() tester.as_slices()
@ -545,16 +549,55 @@ fn make_contiguous_small_free() {
#[test] #[test]
fn make_contiguous_head_to_end() { fn make_contiguous_head_to_end() {
let mut dq = VecDeque::with_capacity(3); let mut tester = VecDeque::with_capacity(16);
dq.push_front('B');
dq.push_front('A'); for i in b'A'..b'L' {
dq.push_back('C'); tester.push_back(i as char);
dq.make_contiguous(); }
let expected_tail = 0;
let expected_head = 3; for i in b'L'..b'Q' {
assert_eq!(expected_tail, dq.tail); tester.push_front(i as char);
assert_eq!(expected_head, dq.head); }
assert_eq!((&['A', 'B', 'C'] as &[_], &[] as &[_]), dq.as_slices());
assert_eq!(
tester,
['P', 'O', 'N', 'M', 'L', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K']
);
// ABCDEFGHIJKPONML
let expected_start = 0;
tester.make_contiguous();
assert_eq!(tester.head, expected_start);
assert_eq!(
(
&['P', 'O', 'N', 'M', 'L', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K']
as &[_],
&[] as &[_]
),
tester.as_slices()
);
tester.clear();
for i in b'L'..b'Q' {
tester.push_back(i as char);
}
for i in b'A'..b'L' {
tester.push_front(i as char);
}
// LMNOPKJIHGFEDCBA
let expected_start = 0;
tester.make_contiguous();
assert_eq!(tester.head, expected_start);
assert_eq!(
(
&['K', 'J', 'I', 'H', 'G', 'F', 'E', 'D', 'C', 'B', 'A', 'L', 'M', 'N', 'O', 'P']
as &[_],
&[] as &[_]
),
tester.as_slices()
);
} }
#[test] #[test]
@ -588,10 +631,10 @@ fn test_remove() {
for len in minlen..cap - 1 { for len in minlen..cap - 1 {
// 0, 1, 2, .., len - 1 // 0, 1, 2, .., len - 1
let expected = (0..).take(len).collect::<VecDeque<_>>(); let expected = (0..).take(len).collect::<VecDeque<_>>();
for tail_pos in 0..cap { for head_pos in 0..cap {
for to_remove in 0..=len { for to_remove in 0..=len {
tester.tail = tail_pos; tester.head = head_pos;
tester.head = tail_pos; tester.len = 0;
for i in 0..len { for i in 0..len {
if i == to_remove { if i == to_remove {
tester.push_back(1234); tester.push_back(1234);
@ -602,8 +645,8 @@ fn test_remove() {
tester.push_back(1234); tester.push_back(1234);
} }
tester.remove(to_remove); tester.remove(to_remove);
assert!(tester.tail < tester.cap()); assert!(tester.head <= tester.capacity());
assert!(tester.head < tester.cap()); assert!(tester.len <= tester.capacity());
assert_eq!(tester, expected); assert_eq!(tester, expected);
} }
} }
@ -617,11 +660,11 @@ fn test_range() {
let cap = tester.capacity(); let cap = tester.capacity();
let minlen = if cfg!(miri) { cap - 1 } else { 0 }; // Miri is too slow let minlen = if cfg!(miri) { cap - 1 } else { 0 }; // Miri is too slow
for len in minlen..=cap { for len in minlen..=cap {
for tail in 0..=cap { for head in 0..=cap {
for start in 0..=len { for start in 0..=len {
for end in start..=len { for end in start..=len {
tester.tail = tail; tester.head = head;
tester.head = tail; tester.len = 0;
for i in 0..len { for i in 0..len {
tester.push_back(i); tester.push_back(i);
} }
@ -642,17 +685,17 @@ fn test_range_mut() {
let cap = tester.capacity(); let cap = tester.capacity();
for len in 0..=cap { for len in 0..=cap {
for tail in 0..=cap { for head in 0..=cap {
for start in 0..=len { for start in 0..=len {
for end in start..=len { for end in start..=len {
tester.tail = tail; tester.head = head;
tester.head = tail; tester.len = 0;
for i in 0..len { for i in 0..len {
tester.push_back(i); tester.push_back(i);
} }
let head_was = tester.head; let head_was = tester.head;
let tail_was = tester.tail; let len_was = tester.len;
// Check that we iterate over the correct values // Check that we iterate over the correct values
let range: VecDeque<_> = tester.range_mut(start..end).map(|v| *v).collect(); let range: VecDeque<_> = tester.range_mut(start..end).map(|v| *v).collect();
@ -662,8 +705,8 @@ fn test_range_mut() {
// We shouldn't have changed the capacity or made the // We shouldn't have changed the capacity or made the
// head or tail out of bounds // head or tail out of bounds
assert_eq!(tester.capacity(), cap); assert_eq!(tester.capacity(), cap);
assert_eq!(tester.tail, tail_was);
assert_eq!(tester.head, head_was); assert_eq!(tester.head, head_was);
assert_eq!(tester.len, len_was);
} }
} }
} }
@ -676,11 +719,11 @@ fn test_drain() {
let cap = tester.capacity(); let cap = tester.capacity();
for len in 0..=cap { for len in 0..=cap {
for tail in 0..=cap { for head in 0..cap {
for drain_start in 0..=len { for drain_start in 0..=len {
for drain_end in drain_start..=len { for drain_end in drain_start..=len {
tester.tail = tail; tester.head = head;
tester.head = tail; tester.len = 0;
for i in 0..len { for i in 0..len {
tester.push_back(i); tester.push_back(i);
} }
@ -693,8 +736,8 @@ fn test_drain() {
// We shouldn't have changed the capacity or made the // We shouldn't have changed the capacity or made the
// head or tail out of bounds // head or tail out of bounds
assert_eq!(tester.capacity(), cap); assert_eq!(tester.capacity(), cap);
assert!(tester.tail < tester.cap()); assert!(tester.head <= tester.capacity());
assert!(tester.head < tester.cap()); assert!(tester.len <= tester.capacity());
// We should see the correct values in the VecDeque // We should see the correct values in the VecDeque
let expected: VecDeque<_> = (0..drain_start).chain(drain_end..len).collect(); let expected: VecDeque<_> = (0..drain_start).chain(drain_end..len).collect();
@ -721,17 +764,18 @@ fn test_shrink_to_fit() {
for len in 0..=cap { for len in 0..=cap {
// 0, 1, 2, .., len - 1 // 0, 1, 2, .., len - 1
let expected = (0..).take(len).collect::<VecDeque<_>>(); let expected = (0..).take(len).collect::<VecDeque<_>>();
for tail_pos in 0..=max_cap { for head_pos in 0..=max_cap {
tester.tail = tail_pos; tester.reserve(head_pos);
tester.head = tail_pos; tester.head = head_pos;
tester.len = 0;
tester.reserve(63); tester.reserve(63);
for i in 0..len { for i in 0..len {
tester.push_back(i); tester.push_back(i);
} }
tester.shrink_to_fit(); tester.shrink_to_fit();
assert!(tester.capacity() <= cap); assert!(tester.capacity() <= cap);
assert!(tester.tail < tester.cap()); assert!(tester.head <= tester.capacity());
assert!(tester.head < tester.cap()); assert!(tester.len <= tester.capacity());
assert_eq!(tester, expected); assert_eq!(tester, expected);
} }
} }
@ -758,17 +802,17 @@ fn test_split_off() {
// at, at + 1, .., len - 1 (may be empty) // at, at + 1, .., len - 1 (may be empty)
let expected_other = (at..).take(len - at).collect::<VecDeque<_>>(); let expected_other = (at..).take(len - at).collect::<VecDeque<_>>();
for tail_pos in 0..cap { for head_pos in 0..cap {
tester.tail = tail_pos; tester.head = head_pos;
tester.head = tail_pos; tester.len = 0;
for i in 0..len { for i in 0..len {
tester.push_back(i); tester.push_back(i);
} }
let result = tester.split_off(at); let result = tester.split_off(at);
assert!(tester.tail < tester.cap()); assert!(tester.head <= tester.capacity());
assert!(tester.head < tester.cap()); assert!(tester.len <= tester.capacity());
assert!(result.tail < result.cap()); assert!(result.head <= result.capacity());
assert!(result.head < result.cap()); assert!(result.len <= result.capacity());
assert_eq!(tester, expected_self); assert_eq!(tester, expected_self);
assert_eq!(result, expected_other); assert_eq!(result, expected_other);
} }
@ -785,16 +829,10 @@ fn test_from_vec() {
vec.extend(0..len); vec.extend(0..len);
let vd = VecDeque::from(vec.clone()); let vd = VecDeque::from(vec.clone());
assert!(vd.cap().is_power_of_two());
assert_eq!(vd.len(), vec.len()); assert_eq!(vd.len(), vec.len());
assert!(vd.into_iter().eq(vec)); assert!(vd.into_iter().eq(vec));
} }
} }
let vec = Vec::from([(); MAXIMUM_ZST_CAPACITY - 1]);
let vd = VecDeque::from(vec.clone());
assert!(vd.cap().is_power_of_two());
assert_eq!(vd.len(), vec.len());
} }
#[test] #[test]
@ -846,10 +884,6 @@ fn test_extend_impl(trusted_len: bool) {
} }
assert_eq!(self.test, self.expected); assert_eq!(self.test, self.expected);
let (a1, b1) = self.test.as_slices();
let (a2, b2) = self.expected.as_slices();
assert_eq!(a1, a2);
assert_eq!(b1, b2);
} }
fn drain<R: RangeBounds<usize> + Clone>(&mut self, range: R) { fn drain<R: RangeBounds<usize> + Clone>(&mut self, range: R) {
@ -872,7 +906,7 @@ fn test_extend_impl(trusted_len: bool) {
let mut tester = VecDequeTester::new(trusted_len); let mut tester = VecDequeTester::new(trusted_len);
// Initial capacity // Initial capacity
tester.test_extend(0..tester.remaining_capacity() - 1); tester.test_extend(0..tester.remaining_capacity());
// Grow // Grow
tester.test_extend(1024..2048); tester.test_extend(1024..2048);
@ -880,7 +914,7 @@ fn test_extend_impl(trusted_len: bool) {
// Wrap around // Wrap around
tester.drain(..128); tester.drain(..128);
tester.test_extend(0..tester.remaining_capacity() - 1); tester.test_extend(0..tester.remaining_capacity());
// Continue // Continue
tester.drain(256..); tester.drain(256..);
@ -892,16 +926,6 @@ fn test_extend_impl(trusted_len: bool) {
tester.test_extend(0..32); tester.test_extend(0..32);
} }
#[test]
#[should_panic = "capacity overflow"]
fn test_from_vec_zst_overflow() {
use crate::vec::Vec;
let vec = Vec::from([(); MAXIMUM_ZST_CAPACITY]);
let vd = VecDeque::from(vec.clone()); // no room for +1
assert!(vd.cap().is_power_of_two());
assert_eq!(vd.len(), vec.len());
}
#[test] #[test]
fn test_from_array() { fn test_from_array() {
fn test<const N: usize>() { fn test<const N: usize>() {
@ -917,7 +941,6 @@ fn test_from_array() {
assert_eq!(deq[i], i); assert_eq!(deq[i], i);
} }
assert!(deq.cap().is_power_of_two());
assert_eq!(deq.len(), N); assert_eq!(deq.len(), N);
} }
test::<0>(); test::<0>();
@ -925,11 +948,6 @@ fn test_from_array() {
test::<2>(); test::<2>();
test::<32>(); test::<32>();
test::<35>(); test::<35>();
let array = [(); MAXIMUM_ZST_CAPACITY - 1];
let deq = VecDeque::from(array);
assert!(deq.cap().is_power_of_two());
assert_eq!(deq.len(), MAXIMUM_ZST_CAPACITY - 1);
} }
#[test] #[test]

View File

@ -465,7 +465,6 @@ fn test_drain() {
for i in 6..9 { for i in 6..9 {
d.push_front(i); d.push_front(i);
} }
assert_eq!(d.drain(..).collect::<Vec<_>>(), [8, 7, 6, 0, 1, 2, 3, 4]); assert_eq!(d.drain(..).collect::<Vec<_>>(), [8, 7, 6, 0, 1, 2, 3, 4]);
assert!(d.is_empty()); assert!(d.is_empty());
} }
@ -1142,7 +1141,7 @@ fn test_reserve_exact_2() {
v.push_back(16); v.push_back(16);
v.reserve_exact(16); v.reserve_exact(16);
assert!(v.capacity() >= 48) assert!(v.capacity() >= 33)
} }
#[test] #[test]
@ -1157,7 +1156,7 @@ fn test_try_reserve() {
// * overflow may trigger when adding `len` to `cap` (in number of elements) // * overflow may trigger when adding `len` to `cap` (in number of elements)
// * overflow may trigger when multiplying `new_cap` by size_of::<T> (to get bytes) // * overflow may trigger when multiplying `new_cap` by size_of::<T> (to get bytes)
const MAX_CAP: usize = (isize::MAX as usize + 1) / 2 - 1; const MAX_CAP: usize = isize::MAX as usize;
const MAX_USIZE: usize = usize::MAX; const MAX_USIZE: usize = usize::MAX;
{ {
@ -1248,7 +1247,7 @@ fn test_try_reserve_exact() {
// This is exactly the same as test_try_reserve with the method changed. // This is exactly the same as test_try_reserve with the method changed.
// See that test for comments. // See that test for comments.
const MAX_CAP: usize = (isize::MAX as usize + 1) / 2 - 1; const MAX_CAP: usize = isize::MAX as usize;
const MAX_USIZE: usize = usize::MAX; const MAX_USIZE: usize = usize::MAX;
{ {
@ -1391,7 +1390,8 @@ fn test_rotate_nop() {
#[test] #[test]
fn test_rotate_left_parts() { fn test_rotate_left_parts() {
let mut v: VecDeque<_> = (1..=7).collect(); let mut v: VecDeque<_> = VecDeque::with_capacity(8);
v.extend(1..=7);
v.rotate_left(2); v.rotate_left(2);
assert_eq!(v.as_slices(), (&[3, 4, 5, 6, 7, 1][..], &[2][..])); assert_eq!(v.as_slices(), (&[3, 4, 5, 6, 7, 1][..], &[2][..]));
v.rotate_left(2); v.rotate_left(2);
@ -1410,7 +1410,8 @@ fn test_rotate_left_parts() {
#[test] #[test]
fn test_rotate_right_parts() { fn test_rotate_right_parts() {
let mut v: VecDeque<_> = (1..=7).collect(); let mut v: VecDeque<_> = VecDeque::with_capacity(8);
v.extend(1..=7);
v.rotate_right(2); v.rotate_right(2);
assert_eq!(v.as_slices(), (&[6, 7][..], &[1, 2, 3, 4, 5][..])); assert_eq!(v.as_slices(), (&[6, 7][..], &[1, 2, 3, 4, 5][..]));
v.rotate_right(2); v.rotate_right(2);

View File

@ -144,20 +144,16 @@ class StdVecDequeProvider:
def __init__(self, valobj): def __init__(self, valobj):
self.valobj = valobj self.valobj = valobj
self.head = int(valobj["head"]) self.head = int(valobj["head"])
self.tail = int(valobj["tail"]) self.size = int(valobj["len"])
self.cap = int(valobj["buf"]["cap"]) self.cap = int(valobj["buf"]["cap"])
self.data_ptr = unwrap_unique_or_non_null(valobj["buf"]["ptr"]) self.data_ptr = unwrap_unique_or_non_null(valobj["buf"]["ptr"])
if self.head >= self.tail:
self.size = self.head - self.tail
else:
self.size = self.cap + self.head - self.tail
def to_string(self): def to_string(self):
return "VecDeque(size={})".format(self.size) return "VecDeque(size={})".format(self.size)
def children(self): def children(self):
return _enumerate_array_elements( return _enumerate_array_elements(
(self.data_ptr + ((self.tail + index) % self.cap)) for index in xrange(self.size) (self.data_ptr + ((self.head + index) % self.cap)) for index in xrange(self.size)
) )
@staticmethod @staticmethod

View File

@ -356,7 +356,7 @@ class StdSliceSyntheticProvider:
class StdVecDequeSyntheticProvider: class StdVecDequeSyntheticProvider:
"""Pretty-printer for alloc::collections::vec_deque::VecDeque<T> """Pretty-printer for alloc::collections::vec_deque::VecDeque<T>
struct VecDeque<T> { tail: usize, head: usize, buf: RawVec<T> } struct VecDeque<T> { head: usize, len: usize, buf: RawVec<T> }
""" """
def __init__(self, valobj, dict): def __init__(self, valobj, dict):
@ -373,7 +373,7 @@ class StdVecDequeSyntheticProvider:
def get_child_index(self, name): def get_child_index(self, name):
# type: (str) -> int # type: (str) -> int
index = name.lstrip('[').rstrip(']') index = name.lstrip('[').rstrip(']')
if index.isdigit() and self.tail <= index and (self.tail + index) % self.cap < self.head: if index.isdigit() and int(index) < self.size:
return int(index) return int(index)
else: else:
return -1 return -1
@ -381,20 +381,16 @@ class StdVecDequeSyntheticProvider:
def get_child_at_index(self, index): def get_child_at_index(self, index):
# type: (int) -> SBValue # type: (int) -> SBValue
start = self.data_ptr.GetValueAsUnsigned() start = self.data_ptr.GetValueAsUnsigned()
address = start + ((index + self.tail) % self.cap) * self.element_type_size address = start + ((index + self.head) % self.cap) * self.element_type_size
element = self.data_ptr.CreateValueFromAddress("[%s]" % index, address, self.element_type) element = self.data_ptr.CreateValueFromAddress("[%s]" % index, address, self.element_type)
return element return element
def update(self): def update(self):
# type: () -> None # type: () -> None
self.head = self.valobj.GetChildMemberWithName("head").GetValueAsUnsigned() self.head = self.valobj.GetChildMemberWithName("head").GetValueAsUnsigned()
self.tail = self.valobj.GetChildMemberWithName("tail").GetValueAsUnsigned() self.size = self.valobj.GetChildMemberWithName("len").GetValueAsUnsigned()
self.buf = self.valobj.GetChildMemberWithName("buf") self.buf = self.valobj.GetChildMemberWithName("buf")
self.cap = self.buf.GetChildMemberWithName("cap").GetValueAsUnsigned() self.cap = self.buf.GetChildMemberWithName("cap").GetValueAsUnsigned()
if self.head >= self.tail:
self.size = self.head - self.tail
else:
self.size = self.cap + self.head - self.tail
self.data_ptr = unwrap_unique_or_non_null(self.buf.GetChildMemberWithName("ptr")) self.data_ptr = unwrap_unique_or_non_null(self.buf.GetChildMemberWithName("ptr"))

View File

@ -12,20 +12,19 @@
</Expand> </Expand>
</Type> </Type>
<Type Name="alloc::collections::vec_deque::VecDeque&lt;*&gt;"> <Type Name="alloc::collections::vec_deque::VecDeque&lt;*&gt;">
<DisplayString>{{ len={tail &lt;= head ? head - tail : buf.cap - tail + head} }}</DisplayString> <DisplayString>{{ len={len} }}</DisplayString>
<Expand> <Expand>
<Item Name="[len]" ExcludeView="simple">tail &lt;= head ? head - tail : buf.cap - tail + head</Item> <Item Name="[len]" ExcludeView="simple">len</Item>
<Item Name="[capacity]" ExcludeView="simple">buf.cap</Item> <Item Name="[capacity]" ExcludeView="simple">buf.cap</Item>
<CustomListItems> <CustomListItems>
<Variable Name="i" InitialValue="tail" /> <Variable Name="i" InitialValue="0" />
<Size>len</Size>
<Size>tail &lt;= head ? head - tail : buf.cap - tail + head</Size>
<Loop> <Loop>
<If Condition="i == head"> <If Condition="i == len">
<Break/> <Break/>
</If> </If>
<Item>buf.ptr.pointer.pointer[i]</Item> <Item>buf.ptr.pointer.pointer[(i + head) % buf.cap]</Item>
<Exec>i = (i + 1 == buf.cap ? 0 : i + 1)</Exec> <Exec>i = i + 1</Exec>
</Loop> </Loop>
</CustomListItems> </CustomListItems>
</Expand> </Expand>

View File

@ -138,7 +138,7 @@
// cdb-command: dx vecdeque // cdb-command: dx vecdeque
// cdb-check:vecdeque : { len=0x2 } [Type: alloc::collections::vec_deque::VecDeque<i32,alloc::alloc::Global>] // cdb-check:vecdeque : { len=0x2 } [Type: alloc::collections::vec_deque::VecDeque<i32,alloc::alloc::Global>]
// cdb-check: [<Raw View>] [Type: alloc::collections::vec_deque::VecDeque<i32,alloc::alloc::Global>] // cdb-check: [<Raw View>] [Type: alloc::collections::vec_deque::VecDeque<i32,alloc::alloc::Global>]
// cdb-check: [len] : 0x2 // cdb-check: [len] : 0x2 [Type: unsigned [...]]
// cdb-check: [capacity] : 0x8 [Type: unsigned [...]] // cdb-check: [capacity] : 0x8 [Type: unsigned [...]]
// cdb-check: [0x0] : 90 [Type: int] // cdb-check: [0x0] : 90 [Type: int]
// cdb-check: [0x1] : 20 [Type: int] // cdb-check: [0x1] : 20 [Type: int]
@ -175,7 +175,7 @@ fn main() {
linkedlist.push_front(128); linkedlist.push_front(128);
// VecDeque // VecDeque
let mut vecdeque = VecDeque::new(); let mut vecdeque = VecDeque::with_capacity(8);
vecdeque.push_back(20); vecdeque.push_back(20);
vecdeque.push_front(90); vecdeque.push_front(90);

View File

@ -1,2 +1,2 @@
thread 'main' panicked at 'capacity overflow', $SRC_DIR/alloc/src/collections/vec_deque/mod.rs:LL:COL thread 'main' panicked at 'capacity overflow', library/alloc/src/raw_vec.rs:518:5
note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace