diff --git a/library/core/src/array/iter/iter_inner.rs b/library/core/src/array/iter/iter_inner.rs
index f43d9e7ddd6..3c2343591f8 100644
--- a/library/core/src/array/iter/iter_inner.rs
+++ b/library/core/src/array/iter/iter_inner.rs
@@ -31,9 +31,9 @@ impl<T, const N: usize> PartialDrop for [MaybeUninit<T>; N] {
 /// The real `array::IntoIter<T, N>` stores a `PolymorphicIter<[MaybeUninit<T>, N]>`
 /// which it unsizes to `PolymorphicIter<[MaybeUninit<T>]>` to iterate.
 #[allow(private_bounds)]
-pub(super) struct PolymorphicIter<TAIL: ?Sized>
+pub(super) struct PolymorphicIter<DATA: ?Sized>
 where
-    TAIL: PartialDrop,
+    DATA: PartialDrop,
 {
     /// The elements in `data` that have not been yielded yet.
     ///
@@ -55,13 +55,13 @@ where
     /// - `data[alive]` is alive (i.e. contains valid elements)
     /// - `data[..alive.start]` and `data[alive.end..]` are dead (i.e. the
     ///   elements were already read and must not be touched anymore!)
-    data: TAIL,
+    data: DATA,
 }
 
 #[allow(private_bounds)]
-impl<TAIL: ?Sized> PolymorphicIter<TAIL>
+impl<DATA: ?Sized> PolymorphicIter<DATA>
 where
-    TAIL: PartialDrop,
+    DATA: PartialDrop,
 {
     #[inline]
     pub(super) const fn len(&self) -> usize {
@@ -70,9 +70,9 @@ where
 }
 
 #[allow(private_bounds)]
-impl<TAIL: ?Sized> Drop for PolymorphicIter<TAIL>
+impl<DATA: ?Sized> Drop for PolymorphicIter<DATA>
 where
-    TAIL: PartialDrop,
+    DATA: PartialDrop,
 {
     #[inline]
     fn drop(&mut self) {
@@ -209,7 +209,7 @@ impl<T> PolymorphicIter<[MaybeUninit<T>]> {
         R: Try<Output = B>,
     {
         // `alive` is an `IndexRange`, not an arbitrary iterator, so we can
-        // trust that its `try_rfold` isn't going to do something weird like
+        // trust that its `try_fold` isn't going to do something weird like
         // call the fold-er multiple times for the same index.
         let data = &mut self.data;
         self.alive.try_fold(init, move |accum, idx| {
diff --git a/tests/codegen/issues/issue-101082.rs b/tests/codegen/issues/issue-101082.rs
index 89295da5bd1..96cdff64dda 100644
--- a/tests/codegen/issues/issue-101082.rs
+++ b/tests/codegen/issues/issue-101082.rs
@@ -1,8 +1,16 @@
 //@ compile-flags: -Copt-level=3
-//@ revisions: host x86-64-v3
+//@ revisions: host x86-64 x86-64-v3
 //@ min-llvm-version: 20
 
-// This particular CPU regressed in #131563
+//@[host] ignore-x86_64
+
+// Set the base cpu explicitly, in case the default has been changed.
+//@[x86-64] only-x86_64
+//@[x86-64] compile-flags: -Ctarget-cpu=x86-64
+
+// FIXME(cuviper) x86-64-v3 in particular regressed in #131563, and the workaround
+// at the time still sometimes fails, so only verify it for the power-of-two size
+// - https://github.com/llvm/llvm-project/issues/134735
 //@[x86-64-v3] only-x86_64
 //@[x86-64-v3] compile-flags: -Ctarget-cpu=x86-64-v3
 
@@ -12,16 +20,14 @@
 pub fn test() -> usize {
     // CHECK-LABEL: @test(
     // host: ret {{i64|i32}} 165
+    // x86-64: ret {{i64|i32}} 165
 
     // FIXME: Now that this autovectorizes via a masked load, it doesn't actually
     // const-fold for certain widths.  The `test_eight` case below shows that, yes,
     // what we're emitting *can* be const-folded, except that the way LLVM does it
     // for certain widths doesn't today.  We should be able to put this back to
     // the same check after <https://github.com/llvm/llvm-project/issues/134513>
-    // x86-64-v3: <i64 23, i64 16, i64 54, i64 3>
-    // x86-64-v3: llvm.masked.load
-    // x86-64-v3: %[[R:.+]] = {{.+}}llvm.vector.reduce.add.v4i64
-    // x86-64-v3: ret i64 %[[R]]
+    // x86-64-v3: masked.load
 
     let values = [23, 16, 54, 3, 60, 9];
     let mut acc = 0;