diff --git a/library/core/src/array/iter.rs b/library/core/src/array/iter.rs index 1edade41597..90f76d6d4c7 100644 --- a/library/core/src/array/iter.rs +++ b/library/core/src/array/iter.rs @@ -1,38 +1,35 @@ //! Defines the `IntoIter` owned iterator for arrays. use crate::intrinsics::transmute_unchecked; -use crate::iter::{self, FusedIterator, TrustedLen, TrustedRandomAccessNoCoerce}; +use crate::iter::{FusedIterator, TrustedLen, TrustedRandomAccessNoCoerce}; use crate::mem::MaybeUninit; use crate::num::NonZero; -use crate::ops::{IndexRange, Range}; +use crate::ops::{IndexRange, Range, Try}; use crate::{fmt, ptr}; +mod iter_inner; + +type InnerSized = iter_inner::PolymorphicIter<[MaybeUninit; N]>; +type InnerUnsized = iter_inner::PolymorphicIter<[MaybeUninit]>; + /// A by-value [array] iterator. #[stable(feature = "array_value_iter", since = "1.51.0")] #[rustc_insignificant_dtor] #[rustc_diagnostic_item = "ArrayIntoIter"] +#[derive(Clone)] pub struct IntoIter { - /// This is the array we are iterating over. - /// - /// Elements with index `i` where `alive.start <= i < alive.end` have not - /// been yielded yet and are valid array entries. Elements with indices `i - /// < alive.start` or `i >= alive.end` have been yielded already and must - /// not be accessed anymore! Those dead elements might even be in a - /// completely uninitialized state! - /// - /// So the invariants are: - /// - `data[alive]` is alive (i.e. contains valid elements) - /// - `data[..alive.start]` and `data[alive.end..]` are dead (i.e. the - /// elements were already read and must not be touched anymore!) - data: [MaybeUninit; N], + inner: InnerSized, +} - /// The elements in `data` that have not been yielded yet. - /// - /// Invariants: - /// - `alive.end <= N` - /// - /// (And the `IndexRange` type requires `alive.start <= alive.end`.) - alive: IndexRange, +impl IntoIter { + #[inline] + fn unsize(&self) -> &InnerUnsized { + &self.inner + } + #[inline] + fn unsize_mut(&mut self) -> &mut InnerUnsized { + &mut self.inner + } } // Note: the `#[rustc_skip_during_method_dispatch(array)]` on `trait IntoIterator` @@ -53,6 +50,7 @@ impl IntoIterator for [T; N] { /// 2021 edition -- see the [array] Editions section for more information. /// /// [array]: prim@array + #[inline] fn into_iter(self) -> Self::IntoIter { // SAFETY: The transmute here is actually safe. The docs of `MaybeUninit` // promise: @@ -68,7 +66,10 @@ impl IntoIterator for [T; N] { // FIXME: If normal `transmute` ever gets smart enough to allow this // directly, use it instead of `transmute_unchecked`. let data: [MaybeUninit; N] = unsafe { transmute_unchecked(self) }; - IntoIter { data, alive: IndexRange::zero_to(N) } + // SAFETY: The original array was entirely initialized and the the alive + // range we're passing here represents that fact. + let inner = unsafe { InnerSized::new_unchecked(IndexRange::zero_to(N), data) }; + IntoIter { inner } } } @@ -136,13 +137,16 @@ impl IntoIter { /// assert_eq!(r.collect::>(), vec![10, 11, 12, 13, 14, 15]); /// ``` #[unstable(feature = "array_into_iter_constructors", issue = "91583")] + #[inline] pub const unsafe fn new_unchecked( buffer: [MaybeUninit; N], initialized: Range, ) -> Self { // SAFETY: one of our safety conditions is that the range is canonical. let alive = unsafe { IndexRange::new_unchecked(initialized.start, initialized.end) }; - Self { data: buffer, alive } + // SAFETY: one of our safety condition is that these items are initialized. + let inner = unsafe { InnerSized::new_unchecked(alive, buffer) }; + IntoIter { inner } } /// Creates an iterator over `T` which returns no elements. @@ -198,172 +202,134 @@ impl IntoIter { /// assert_eq!(get_bytes(false).collect::>(), vec![]); /// ``` #[unstable(feature = "array_into_iter_constructors", issue = "91583")] + #[inline] pub const fn empty() -> Self { - let buffer = [const { MaybeUninit::uninit() }; N]; - let initialized = 0..0; - - // SAFETY: We're telling it that none of the elements are initialized, - // which is trivially true. And ∀N: usize, 0 <= N. - unsafe { Self::new_unchecked(buffer, initialized) } + let inner = InnerSized::empty(); + IntoIter { inner } } /// Returns an immutable slice of all elements that have not been yielded /// yet. #[stable(feature = "array_value_iter", since = "1.51.0")] + #[inline] pub fn as_slice(&self) -> &[T] { - // SAFETY: We know that all elements within `alive` are properly initialized. - unsafe { - let slice = self.data.get_unchecked(self.alive.clone()); - slice.assume_init_ref() - } + self.unsize().as_slice() } /// Returns a mutable slice of all elements that have not been yielded yet. #[stable(feature = "array_value_iter", since = "1.51.0")] + #[inline] pub fn as_mut_slice(&mut self) -> &mut [T] { - // SAFETY: We know that all elements within `alive` are properly initialized. - unsafe { - let slice = self.data.get_unchecked_mut(self.alive.clone()); - slice.assume_init_mut() - } + self.unsize_mut().as_mut_slice() } } #[stable(feature = "array_value_iter_impls", since = "1.40.0")] impl Iterator for IntoIter { type Item = T; - fn next(&mut self) -> Option { - // Get the next index from the front. - // - // Increasing `alive.start` by 1 maintains the invariant regarding - // `alive`. However, due to this change, for a short time, the alive - // zone is not `data[alive]` anymore, but `data[idx..alive.end]`. - self.alive.next().map(|idx| { - // Read the element from the array. - // SAFETY: `idx` is an index into the former "alive" region of the - // array. Reading this element means that `data[idx]` is regarded as - // dead now (i.e. do not touch). As `idx` was the start of the - // alive-zone, the alive zone is now `data[alive]` again, restoring - // all invariants. - unsafe { self.data.get_unchecked(idx).assume_init_read() } - }) - } - fn size_hint(&self) -> (usize, Option) { - let len = self.len(); - (len, Some(len)) + #[inline] + fn next(&mut self) -> Option { + self.unsize_mut().next() } #[inline] - fn fold(mut self, init: Acc, mut fold: Fold) -> Acc + fn size_hint(&self) -> (usize, Option) { + self.unsize().size_hint() + } + + #[inline] + fn fold(mut self, init: Acc, fold: Fold) -> Acc where Fold: FnMut(Acc, Self::Item) -> Acc, { - let data = &mut self.data; - iter::ByRefSized(&mut self.alive).fold(init, |acc, idx| { - // SAFETY: idx is obtained by folding over the `alive` range, which implies the - // value is currently considered alive but as the range is being consumed each value - // we read here will only be read once and then considered dead. - fold(acc, unsafe { data.get_unchecked(idx).assume_init_read() }) - }) + self.unsize_mut().fold(init, fold) } + #[inline] + fn try_fold(&mut self, init: B, f: F) -> R + where + Self: Sized, + F: FnMut(B, Self::Item) -> R, + R: Try, + { + self.unsize_mut().try_fold(init, f) + } + + #[inline] fn count(self) -> usize { self.len() } + #[inline] fn last(mut self) -> Option { self.next_back() } + #[inline] fn advance_by(&mut self, n: usize) -> Result<(), NonZero> { - // This also moves the start, which marks them as conceptually "dropped", - // so if anything goes bad then our drop impl won't double-free them. - let range_to_drop = self.alive.take_prefix(n); - let remaining = n - range_to_drop.len(); - - // SAFETY: These elements are currently initialized, so it's fine to drop them. - unsafe { - let slice = self.data.get_unchecked_mut(range_to_drop); - slice.assume_init_drop(); - } - - NonZero::new(remaining).map_or(Ok(()), Err) + self.unsize_mut().advance_by(n) } #[inline] unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item { // SAFETY: The caller must provide an idx that is in bound of the remainder. - unsafe { self.data.as_ptr().add(self.alive.start()).add(idx).cast::().read() } + let elem_ref = unsafe { self.as_mut_slice().get_unchecked_mut(idx) }; + // SAFETY: We only implement `TrustedRandomAccessNoCoerce` for types + // which are actually `Copy`, so cannot have multiple-drop issues. + unsafe { ptr::read(elem_ref) } } } #[stable(feature = "array_value_iter_impls", since = "1.40.0")] impl DoubleEndedIterator for IntoIter { + #[inline] fn next_back(&mut self) -> Option { - // Get the next index from the back. - // - // Decreasing `alive.end` by 1 maintains the invariant regarding - // `alive`. However, due to this change, for a short time, the alive - // zone is not `data[alive]` anymore, but `data[alive.start..=idx]`. - self.alive.next_back().map(|idx| { - // Read the element from the array. - // SAFETY: `idx` is an index into the former "alive" region of the - // array. Reading this element means that `data[idx]` is regarded as - // dead now (i.e. do not touch). As `idx` was the end of the - // alive-zone, the alive zone is now `data[alive]` again, restoring - // all invariants. - unsafe { self.data.get_unchecked(idx).assume_init_read() } - }) + self.unsize_mut().next_back() } #[inline] - fn rfold(mut self, init: Acc, mut rfold: Fold) -> Acc + fn rfold(mut self, init: Acc, rfold: Fold) -> Acc where Fold: FnMut(Acc, Self::Item) -> Acc, { - let data = &mut self.data; - iter::ByRefSized(&mut self.alive).rfold(init, |acc, idx| { - // SAFETY: idx is obtained by folding over the `alive` range, which implies the - // value is currently considered alive but as the range is being consumed each value - // we read here will only be read once and then considered dead. - rfold(acc, unsafe { data.get_unchecked(idx).assume_init_read() }) - }) + self.unsize_mut().rfold(init, rfold) } + #[inline] + fn try_rfold(&mut self, init: B, f: F) -> R + where + Self: Sized, + F: FnMut(B, Self::Item) -> R, + R: Try, + { + self.unsize_mut().try_rfold(init, f) + } + + #[inline] fn advance_back_by(&mut self, n: usize) -> Result<(), NonZero> { - // This also moves the end, which marks them as conceptually "dropped", - // so if anything goes bad then our drop impl won't double-free them. - let range_to_drop = self.alive.take_suffix(n); - let remaining = n - range_to_drop.len(); - - // SAFETY: These elements are currently initialized, so it's fine to drop them. - unsafe { - let slice = self.data.get_unchecked_mut(range_to_drop); - slice.assume_init_drop(); - } - - NonZero::new(remaining).map_or(Ok(()), Err) + self.unsize_mut().advance_back_by(n) } } #[stable(feature = "array_value_iter_impls", since = "1.40.0")] impl Drop for IntoIter { + #[inline] fn drop(&mut self) { - // SAFETY: This is safe: `as_mut_slice` returns exactly the sub-slice - // of elements that have not been moved out yet and that remain - // to be dropped. - unsafe { ptr::drop_in_place(self.as_mut_slice()) } + // `inner` now handles this, but it'd technically be a breaking change + // to remove this `impl`, even though it's useless. } } #[stable(feature = "array_value_iter_impls", since = "1.40.0")] impl ExactSizeIterator for IntoIter { + #[inline] fn len(&self) -> usize { - self.alive.len() + self.inner.len() } + #[inline] fn is_empty(&self) -> bool { - self.alive.is_empty() + self.inner.len() == 0 } } @@ -396,32 +362,9 @@ where const MAY_HAVE_SIDE_EFFECT: bool = false; } -#[stable(feature = "array_value_iter_impls", since = "1.40.0")] -impl Clone for IntoIter { - fn clone(&self) -> Self { - // Note, we don't really need to match the exact same alive range, so - // we can just clone into offset 0 regardless of where `self` is. - let mut new = - Self { data: [const { MaybeUninit::uninit() }; N], alive: IndexRange::zero_to(0) }; - - // Clone all alive elements. - for (src, dst) in iter::zip(self.as_slice(), &mut new.data) { - // Write a clone into the new array, then update its alive range. - // If cloning panics, we'll correctly drop the previous items. - dst.write(src.clone()); - // This addition cannot overflow as we're iterating a slice - new.alive = IndexRange::zero_to(new.alive.end() + 1); - } - - new - } -} - #[stable(feature = "array_value_iter_impls", since = "1.40.0")] impl fmt::Debug for IntoIter { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - // Only print the elements that were not yielded yet: we cannot - // access the yielded elements anymore. - f.debug_tuple("IntoIter").field(&self.as_slice()).finish() + self.unsize().fmt(f) } } diff --git a/library/core/src/array/iter/iter_inner.rs b/library/core/src/array/iter/iter_inner.rs new file mode 100644 index 00000000000..3c2343591f8 --- /dev/null +++ b/library/core/src/array/iter/iter_inner.rs @@ -0,0 +1,281 @@ +//! Defines the `IntoIter` owned iterator for arrays. + +use crate::mem::MaybeUninit; +use crate::num::NonZero; +use crate::ops::{IndexRange, NeverShortCircuit, Try}; +use crate::{fmt, iter}; + +#[allow(private_bounds)] +trait PartialDrop { + /// # Safety + /// `self[alive]` are all initialized before the call, + /// then are never used (without reinitializing them) after it. + unsafe fn partial_drop(&mut self, alive: IndexRange); +} +impl PartialDrop for [MaybeUninit] { + unsafe fn partial_drop(&mut self, alive: IndexRange) { + // SAFETY: We know that all elements within `alive` are properly initialized. + unsafe { self.get_unchecked_mut(alive).assume_init_drop() } + } +} +impl PartialDrop for [MaybeUninit; N] { + unsafe fn partial_drop(&mut self, alive: IndexRange) { + let slice: &mut [MaybeUninit] = self; + // SAFETY: Initialized elements in the array are also initialized in the slice. + unsafe { slice.partial_drop(alive) } + } +} + +/// The internals of a by-value array iterator. +/// +/// The real `array::IntoIter` stores a `PolymorphicIter<[MaybeUninit, N]>` +/// which it unsizes to `PolymorphicIter<[MaybeUninit]>` to iterate. +#[allow(private_bounds)] +pub(super) struct PolymorphicIter +where + DATA: PartialDrop, +{ + /// The elements in `data` that have not been yielded yet. + /// + /// Invariants: + /// - `alive.end <= N` + /// + /// (And the `IndexRange` type requires `alive.start <= alive.end`.) + alive: IndexRange, + + /// This is the array we are iterating over. + /// + /// Elements with index `i` where `alive.start <= i < alive.end` have not + /// been yielded yet and are valid array entries. Elements with indices `i + /// < alive.start` or `i >= alive.end` have been yielded already and must + /// not be accessed anymore! Those dead elements might even be in a + /// completely uninitialized state! + /// + /// So the invariants are: + /// - `data[alive]` is alive (i.e. contains valid elements) + /// - `data[..alive.start]` and `data[alive.end..]` are dead (i.e. the + /// elements were already read and must not be touched anymore!) + data: DATA, +} + +#[allow(private_bounds)] +impl PolymorphicIter +where + DATA: PartialDrop, +{ + #[inline] + pub(super) const fn len(&self) -> usize { + self.alive.len() + } +} + +#[allow(private_bounds)] +impl Drop for PolymorphicIter +where + DATA: PartialDrop, +{ + #[inline] + fn drop(&mut self) { + // SAFETY: by our type invariant `self.alive` is exactly the initialized + // items, and this is drop so nothing can use the items afterwards. + unsafe { self.data.partial_drop(self.alive.clone()) } + } +} + +impl PolymorphicIter<[MaybeUninit; N]> { + #[inline] + pub(super) const fn empty() -> Self { + Self { alive: IndexRange::zero_to(0), data: [const { MaybeUninit::uninit() }; N] } + } + + /// # Safety + /// `data[alive]` are all initialized. + #[inline] + pub(super) const unsafe fn new_unchecked(alive: IndexRange, data: [MaybeUninit; N]) -> Self { + Self { alive, data } + } +} + +impl Clone for PolymorphicIter<[MaybeUninit; N]> { + #[inline] + fn clone(&self) -> Self { + // Note, we don't really need to match the exact same alive range, so + // we can just clone into offset 0 regardless of where `self` is. + let mut new = Self::empty(); + + fn clone_into_new( + source: &PolymorphicIter<[MaybeUninit]>, + target: &mut PolymorphicIter<[MaybeUninit]>, + ) { + // Clone all alive elements. + for (src, dst) in iter::zip(source.as_slice(), &mut target.data) { + // Write a clone into the new array, then update its alive range. + // If cloning panics, we'll correctly drop the previous items. + dst.write(src.clone()); + // This addition cannot overflow as we're iterating a slice, + // the length of which always fits in usize. + target.alive = IndexRange::zero_to(target.alive.end() + 1); + } + } + + clone_into_new(self, &mut new); + new + } +} + +impl PolymorphicIter<[MaybeUninit]> { + #[inline] + pub(super) fn as_slice(&self) -> &[T] { + // SAFETY: We know that all elements within `alive` are properly initialized. + unsafe { + let slice = self.data.get_unchecked(self.alive.clone()); + slice.assume_init_ref() + } + } + + #[inline] + pub(super) fn as_mut_slice(&mut self) -> &mut [T] { + // SAFETY: We know that all elements within `alive` are properly initialized. + unsafe { + let slice = self.data.get_unchecked_mut(self.alive.clone()); + slice.assume_init_mut() + } + } +} + +impl fmt::Debug for PolymorphicIter<[MaybeUninit]> { + #[inline] + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // Only print the elements that were not yielded yet: we cannot + // access the yielded elements anymore. + f.debug_tuple("IntoIter").field(&self.as_slice()).finish() + } +} + +/// Iterator-equivalent methods. +/// +/// We don't implement the actual iterator traits because we want to implement +/// things like `try_fold` that require `Self: Sized` (which we're not). +impl PolymorphicIter<[MaybeUninit]> { + #[inline] + pub(super) fn next(&mut self) -> Option { + // Get the next index from the front. + // + // Increasing `alive.start` by 1 maintains the invariant regarding + // `alive`. However, due to this change, for a short time, the alive + // zone is not `data[alive]` anymore, but `data[idx..alive.end]`. + self.alive.next().map(|idx| { + // Read the element from the array. + // SAFETY: `idx` is an index into the former "alive" region of the + // array. Reading this element means that `data[idx]` is regarded as + // dead now (i.e. do not touch). As `idx` was the start of the + // alive-zone, the alive zone is now `data[alive]` again, restoring + // all invariants. + unsafe { self.data.get_unchecked(idx).assume_init_read() } + }) + } + + #[inline] + pub(super) fn size_hint(&self) -> (usize, Option) { + let len = self.len(); + (len, Some(len)) + } + + #[inline] + pub(super) fn advance_by(&mut self, n: usize) -> Result<(), NonZero> { + // This also moves the start, which marks them as conceptually "dropped", + // so if anything goes bad then our drop impl won't double-free them. + let range_to_drop = self.alive.take_prefix(n); + let remaining = n - range_to_drop.len(); + + // SAFETY: These elements are currently initialized, so it's fine to drop them. + unsafe { + let slice = self.data.get_unchecked_mut(range_to_drop); + slice.assume_init_drop(); + } + + NonZero::new(remaining).map_or(Ok(()), Err) + } + + #[inline] + pub(super) fn fold(&mut self, init: B, f: impl FnMut(B, T) -> B) -> B { + self.try_fold(init, NeverShortCircuit::wrap_mut_2(f)).0 + } + + #[inline] + pub(super) fn try_fold(&mut self, init: B, mut f: F) -> R + where + F: FnMut(B, T) -> R, + R: Try, + { + // `alive` is an `IndexRange`, not an arbitrary iterator, so we can + // trust that its `try_fold` isn't going to do something weird like + // call the fold-er multiple times for the same index. + let data = &mut self.data; + self.alive.try_fold(init, move |accum, idx| { + // SAFETY: `idx` has been removed from the alive range, so we're not + // going to drop it (even if `f` panics) and thus its ok to give + // out ownership of that item to `f` to handle. + let elem = unsafe { data.get_unchecked(idx).assume_init_read() }; + f(accum, elem) + }) + } + + #[inline] + pub(super) fn next_back(&mut self) -> Option { + // Get the next index from the back. + // + // Decreasing `alive.end` by 1 maintains the invariant regarding + // `alive`. However, due to this change, for a short time, the alive + // zone is not `data[alive]` anymore, but `data[alive.start..=idx]`. + self.alive.next_back().map(|idx| { + // Read the element from the array. + // SAFETY: `idx` is an index into the former "alive" region of the + // array. Reading this element means that `data[idx]` is regarded as + // dead now (i.e. do not touch). As `idx` was the end of the + // alive-zone, the alive zone is now `data[alive]` again, restoring + // all invariants. + unsafe { self.data.get_unchecked(idx).assume_init_read() } + }) + } + + #[inline] + pub(super) fn advance_back_by(&mut self, n: usize) -> Result<(), NonZero> { + // This also moves the end, which marks them as conceptually "dropped", + // so if anything goes bad then our drop impl won't double-free them. + let range_to_drop = self.alive.take_suffix(n); + let remaining = n - range_to_drop.len(); + + // SAFETY: These elements are currently initialized, so it's fine to drop them. + unsafe { + let slice = self.data.get_unchecked_mut(range_to_drop); + slice.assume_init_drop(); + } + + NonZero::new(remaining).map_or(Ok(()), Err) + } + + #[inline] + pub(super) fn rfold(&mut self, init: B, f: impl FnMut(B, T) -> B) -> B { + self.try_rfold(init, NeverShortCircuit::wrap_mut_2(f)).0 + } + + #[inline] + pub(super) fn try_rfold(&mut self, init: B, mut f: F) -> R + where + F: FnMut(B, T) -> R, + R: Try, + { + // `alive` is an `IndexRange`, not an arbitrary iterator, so we can + // trust that its `try_rfold` isn't going to do something weird like + // call the fold-er multiple times for the same index. + let data = &mut self.data; + self.alive.try_rfold(init, move |accum, idx| { + // SAFETY: `idx` has been removed from the alive range, so we're not + // going to drop it (even if `f` panics) and thus its ok to give + // out ownership of that item to `f` to handle. + let elem = unsafe { data.get_unchecked(idx).assume_init_read() }; + f(accum, elem) + }) + } +} diff --git a/library/core/src/ops/index_range.rs b/library/core/src/ops/index_range.rs index b82184b15b2..c645c996eb7 100644 --- a/library/core/src/ops/index_range.rs +++ b/library/core/src/ops/index_range.rs @@ -1,5 +1,6 @@ use crate::iter::{FusedIterator, TrustedLen}; use crate::num::NonZero; +use crate::ops::{NeverShortCircuit, Try}; use crate::ub_checks; /// Like a `Range`, but with a safety invariant that `start <= end`. @@ -112,6 +113,12 @@ impl IndexRange { self.end = mid; suffix } + + #[inline] + fn assume_range(&self) { + // SAFETY: This is the type invariant + unsafe { crate::hint::assert_unchecked(self.start <= self.end) } + } } impl Iterator for IndexRange { @@ -138,6 +145,30 @@ impl Iterator for IndexRange { let taken = self.take_prefix(n); NonZero::new(n - taken.len()).map_or(Ok(()), Err) } + + #[inline] + fn fold B>(mut self, init: B, f: F) -> B { + self.try_fold(init, NeverShortCircuit::wrap_mut_2(f)).0 + } + + #[inline] + fn try_fold(&mut self, mut accum: B, mut f: F) -> R + where + Self: Sized, + F: FnMut(B, Self::Item) -> R, + R: Try, + { + // `Range` needs to check `start < end`, but thanks to our type invariant + // we can loop on the stricter `start != end`. + + self.assume_range(); + while self.start != self.end { + // SAFETY: We just checked that the range is non-empty + let i = unsafe { self.next_unchecked() }; + accum = f(accum, i)?; + } + try { accum } + } } impl DoubleEndedIterator for IndexRange { @@ -156,6 +187,30 @@ impl DoubleEndedIterator for IndexRange { let taken = self.take_suffix(n); NonZero::new(n - taken.len()).map_or(Ok(()), Err) } + + #[inline] + fn rfold B>(mut self, init: B, f: F) -> B { + self.try_rfold(init, NeverShortCircuit::wrap_mut_2(f)).0 + } + + #[inline] + fn try_rfold(&mut self, mut accum: B, mut f: F) -> R + where + Self: Sized, + F: FnMut(B, Self::Item) -> R, + R: Try, + { + // `Range` needs to check `start < end`, but thanks to our type invariant + // we can loop on the stricter `start != end`. + + self.assume_range(); + while self.start != self.end { + // SAFETY: We just checked that the range is non-empty + let i = unsafe { self.next_back_unchecked() }; + accum = f(accum, i)?; + } + try { accum } + } } impl ExactSizeIterator for IndexRange { diff --git a/tests/codegen/issues/issue-101082.rs b/tests/codegen/issues/issue-101082.rs index 7fb850ca253..96cdff64dda 100644 --- a/tests/codegen/issues/issue-101082.rs +++ b/tests/codegen/issues/issue-101082.rs @@ -1,8 +1,16 @@ //@ compile-flags: -Copt-level=3 -//@ revisions: host x86-64-v3 +//@ revisions: host x86-64 x86-64-v3 //@ min-llvm-version: 20 -// This particular CPU regressed in #131563 +//@[host] ignore-x86_64 + +// Set the base cpu explicitly, in case the default has been changed. +//@[x86-64] only-x86_64 +//@[x86-64] compile-flags: -Ctarget-cpu=x86-64 + +// FIXME(cuviper) x86-64-v3 in particular regressed in #131563, and the workaround +// at the time still sometimes fails, so only verify it for the power-of-two size +// - https://github.com/llvm/llvm-project/issues/134735 //@[x86-64-v3] only-x86_64 //@[x86-64-v3] compile-flags: -Ctarget-cpu=x86-64-v3 @@ -11,7 +19,16 @@ #[no_mangle] pub fn test() -> usize { // CHECK-LABEL: @test( - // CHECK: ret {{i64|i32}} 165 + // host: ret {{i64|i32}} 165 + // x86-64: ret {{i64|i32}} 165 + + // FIXME: Now that this autovectorizes via a masked load, it doesn't actually + // const-fold for certain widths. The `test_eight` case below shows that, yes, + // what we're emitting *can* be const-folded, except that the way LLVM does it + // for certain widths doesn't today. We should be able to put this back to + // the same check after + // x86-64-v3: masked.load + let values = [23, 16, 54, 3, 60, 9]; let mut acc = 0; for item in values { @@ -19,3 +36,15 @@ pub fn test() -> usize { } acc } + +#[no_mangle] +pub fn test_eight() -> usize { + // CHECK-LABEL: @test_eight( + // CHECK: ret {{i64|i32}} 220 + let values = [23, 16, 54, 3, 60, 9, 13, 42]; + let mut acc = 0; + for item in values { + acc += item; + } + acc +}