diff --git a/compiler/rustc_arena/src/lib.rs b/compiler/rustc_arena/src/lib.rs index d2f8ef8eaae..46dbbd83d19 100644 --- a/compiler/rustc_arena/src/lib.rs +++ b/compiler/rustc_arena/src/lib.rs @@ -16,6 +16,7 @@ #![feature(maybe_uninit_slice)] #![feature(min_specialization)] #![feature(decl_macro)] +#![feature(pointer_byte_offsets)] #![feature(rustc_attrs)] #![cfg_attr(test, feature(test))] #![feature(strict_provenance)] @@ -211,7 +212,7 @@ impl TypedArena { unsafe { if mem::size_of::() == 0 { - self.ptr.set((self.ptr.get() as *mut u8).wrapping_offset(1) as *mut T); + self.ptr.set(self.ptr.get().wrapping_byte_add(1)); let ptr = ptr::NonNull::::dangling().as_ptr(); // Don't drop the object. This `write` is equivalent to `forget`. ptr::write(ptr, object); diff --git a/library/alloc/src/vec/into_iter.rs b/library/alloc/src/vec/into_iter.rs index 4351548811d..ed049194dd0 100644 --- a/library/alloc/src/vec/into_iter.rs +++ b/library/alloc/src/vec/into_iter.rs @@ -4,7 +4,6 @@ use crate::alloc::{Allocator, Global}; use crate::raw_vec::RawVec; use core::array; use core::fmt; -use core::intrinsics::arith_offset; use core::iter::{ FusedIterator, InPlaceIterable, SourceIter, TrustedLen, TrustedRandomAccessNoCoerce, }; @@ -154,7 +153,7 @@ impl Iterator for IntoIter { // purposefully don't use 'ptr.offset' because for // vectors with 0-size elements this would return the // same pointer. - self.ptr = unsafe { arith_offset(self.ptr as *const i8, 1) as *mut T }; + self.ptr = self.ptr.wrapping_byte_add(1); // Make up a value of this ZST. Some(unsafe { mem::zeroed() }) @@ -184,7 +183,7 @@ impl Iterator for IntoIter { // SAFETY: due to unchecked casts of unsigned amounts to signed offsets the wraparound // effectively results in unsigned pointers representing positions 0..usize::MAX, // which is valid for ZSTs. - self.ptr = unsafe { arith_offset(self.ptr as *const i8, step_size as isize) as *mut T } + self.ptr = self.ptr.wrapping_byte_add(step_size); } else { // SAFETY: the min() above ensures that step_size is in bounds self.ptr = unsafe { self.ptr.add(step_size) }; @@ -217,7 +216,7 @@ impl Iterator for IntoIter { return Err(unsafe { array::IntoIter::new_unchecked(raw_ary, 0..len) }); } - self.ptr = unsafe { arith_offset(self.ptr as *const i8, N as isize) as *mut T }; + self.ptr = self.ptr.wrapping_byte_add(N); // Safety: ditto return Ok(unsafe { MaybeUninit::array_assume_init(raw_ary) }); } @@ -267,7 +266,7 @@ impl DoubleEndedIterator for IntoIter { None } else if mem::size_of::() == 0 { // See above for why 'ptr.offset' isn't used - self.end = unsafe { arith_offset(self.end as *const i8, -1) as *mut T }; + self.end = self.ptr.wrapping_byte_sub(1); // Make up a value of this ZST. Some(unsafe { mem::zeroed() }) @@ -283,9 +282,7 @@ impl DoubleEndedIterator for IntoIter { let step_size = self.len().min(n); if mem::size_of::() == 0 { // SAFETY: same as for advance_by() - self.end = unsafe { - arith_offset(self.end as *const i8, step_size.wrapping_neg() as isize) as *mut T - } + self.end = self.end.wrapping_byte_sub(step_size); } else { // SAFETY: same as for advance_by() self.end = unsafe { self.end.sub(step_size) }; diff --git a/library/alloc/src/vec/mod.rs b/library/alloc/src/vec/mod.rs index 2c786fd511e..1f19b9e5945 100644 --- a/library/alloc/src/vec/mod.rs +++ b/library/alloc/src/vec/mod.rs @@ -59,7 +59,7 @@ use core::cmp::Ordering; use core::convert::TryFrom; use core::fmt; use core::hash::{Hash, Hasher}; -use core::intrinsics::{arith_offset, assume}; +use core::intrinsics::assume; use core::iter; #[cfg(not(no_global_oom_handling))] use core::iter::FromIterator; @@ -2678,7 +2678,7 @@ impl IntoIterator for Vec { let alloc = ManuallyDrop::new(ptr::read(me.allocator())); let begin = me.as_mut_ptr(); let end = if mem::size_of::() == 0 { - arith_offset(begin as *const i8, me.len() as isize) as *const T + begin.wrapping_byte_add(me.len()) } else { begin.add(me.len()) as *const T }; diff --git a/library/core/src/ptr/const_ptr.rs b/library/core/src/ptr/const_ptr.rs index c25b159c533..feba3283e46 100644 --- a/library/core/src/ptr/const_ptr.rs +++ b/library/core/src/ptr/const_ptr.rs @@ -249,7 +249,7 @@ impl *const T { let offset = dest_addr.wrapping_sub(self_addr); // This is the canonical desugarring of this operation - self.cast::().wrapping_offset(offset).cast::() + self.wrapping_byte_offset(offset) } /// Creates a new pointer by mapping `self`'s address to a new one. diff --git a/library/core/src/ptr/mut_ptr.rs b/library/core/src/ptr/mut_ptr.rs index fff06b458c7..a70f8747916 100644 --- a/library/core/src/ptr/mut_ptr.rs +++ b/library/core/src/ptr/mut_ptr.rs @@ -255,7 +255,7 @@ impl *mut T { let offset = dest_addr.wrapping_sub(self_addr); // This is the canonical desugarring of this operation - self.cast::().wrapping_offset(offset).cast::() + self.wrapping_byte_offset(offset) } /// Creates a new pointer by mapping `self`'s address to a new one. diff --git a/library/core/src/slice/iter/macros.rs b/library/core/src/slice/iter/macros.rs index c05242222dd..47455760a4b 100644 --- a/library/core/src/slice/iter/macros.rs +++ b/library/core/src/slice/iter/macros.rs @@ -64,7 +64,7 @@ macro_rules! iterator { // backwards by `n`. `n` must not exceed `self.len()`. macro_rules! zst_shrink { ($self: ident, $n: ident) => { - $self.end = ($self.end as * $raw_mut u8).wrapping_offset(-$n) as * $raw_mut T; + $self.end = $self.end.wrapping_byte_offset(-$n); } } diff --git a/library/core/tests/atomic.rs b/library/core/tests/atomic.rs index 13b12db209a..94b0310603b 100644 --- a/library/core/tests/atomic.rs +++ b/library/core/tests/atomic.rs @@ -155,7 +155,7 @@ fn ptr_add_data() { assert_eq!(atom.fetch_ptr_sub(1, SeqCst), n.wrapping_add(1)); assert_eq!(atom.load(SeqCst), n); - let bytes_from_n = |b| n.cast::().wrapping_add(b).cast::(); + let bytes_from_n = |b| n.wrapping_byte_add(b); assert_eq!(atom.fetch_byte_add(1, SeqCst), n); assert_eq!(atom.load(SeqCst), bytes_from_n(1)); diff --git a/library/core/tests/ptr.rs b/library/core/tests/ptr.rs index 12861794c2d..97a36981005 100644 --- a/library/core/tests/ptr.rs +++ b/library/core/tests/ptr.rs @@ -650,7 +650,7 @@ fn thin_box() { .unwrap_or_else(|| handle_alloc_error(layout)) .cast::>(); ptr.as_ptr().write(meta); - ptr.cast::().as_ptr().add(offset).cast::().write(value); + ptr.as_ptr().byte_add(offset).cast::().write(value); Self { ptr, phantom: PhantomData } } } diff --git a/library/std/src/io/error/repr_bitpacked.rs b/library/std/src/io/error/repr_bitpacked.rs index 292bf4826fd..781ae03ad45 100644 --- a/library/std/src/io/error/repr_bitpacked.rs +++ b/library/std/src/io/error/repr_bitpacked.rs @@ -269,10 +269,10 @@ where } TAG_SIMPLE_MESSAGE => ErrorData::SimpleMessage(&*ptr.cast::().as_ptr()), TAG_CUSTOM => { - // It would be correct for us to use `ptr::sub` here (see the + // It would be correct for us to use `ptr::byte_sub` here (see the // comment above the `wrapping_add` call in `new_custom` for why), // but it isn't clear that it makes a difference, so we don't. - let custom = ptr.as_ptr().cast::().wrapping_sub(TAG_CUSTOM).cast::(); + let custom = ptr.as_ptr().wrapping_byte_sub(TAG_CUSTOM).cast::(); ErrorData::Custom(make_custom(custom)) } _ => { diff --git a/library/std/src/lib.rs b/library/std/src/lib.rs index 71bbf4317e0..ba16a5c68ad 100644 --- a/library/std/src/lib.rs +++ b/library/std/src/lib.rs @@ -296,6 +296,7 @@ #![feature(panic_can_unwind)] #![feature(panic_info_message)] #![feature(panic_internals)] +#![feature(pointer_byte_offsets)] #![feature(pointer_is_aligned)] #![feature(portable_simd)] #![feature(prelude_2024)]