mirror of
https://github.com/rust-lang/rust.git
synced 2024-11-22 06:44:35 +00:00
Replace most uses of pointer::offset
with add
and sub
This commit is contained in:
parent
6c943bad02
commit
e4720e1cf2
@ -217,7 +217,7 @@ impl<T> TypedArena<T> {
|
||||
} else {
|
||||
let ptr = self.ptr.get();
|
||||
// Advance the pointer.
|
||||
self.ptr.set(self.ptr.get().offset(1));
|
||||
self.ptr.set(self.ptr.get().add(1));
|
||||
// Write into uninitialized memory.
|
||||
ptr::write(ptr, object);
|
||||
&mut *ptr
|
||||
|
@ -94,7 +94,7 @@ mod platform {
|
||||
struct Header(*mut u8);
|
||||
const HEAP_ZERO_MEMORY: DWORD = 0x00000008;
|
||||
unsafe fn get_header<'a>(ptr: *mut u8) -> &'a mut Header {
|
||||
&mut *(ptr as *mut Header).offset(-1)
|
||||
&mut *(ptr as *mut Header).sub(1)
|
||||
}
|
||||
unsafe fn align_ptr(ptr: *mut u8, align: usize) -> *mut u8 {
|
||||
let aligned = ptr.add(align - (ptr as usize & (align - 1)));
|
||||
|
@ -156,7 +156,7 @@ mod platform {
|
||||
struct Header(*mut u8);
|
||||
const HEAP_ZERO_MEMORY: DWORD = 0x00000008;
|
||||
unsafe fn get_header<'a>(ptr: *mut u8) -> &'a mut Header {
|
||||
&mut *(ptr as *mut Header).offset(-1)
|
||||
&mut *(ptr as *mut Header).sub(1)
|
||||
}
|
||||
unsafe fn align_ptr(ptr: *mut u8, align: usize) -> *mut u8 {
|
||||
let aligned = ptr.add(align - (ptr as usize & (align - 1)));
|
||||
|
@ -273,7 +273,7 @@ impl<D: Decoder, T: Decodable<D>> Decodable<D> for Vec<T> {
|
||||
unsafe {
|
||||
let ptr: *mut T = vec.as_mut_ptr();
|
||||
for i in 0..len {
|
||||
std::ptr::write(ptr.offset(i as isize), Decodable::decode(d));
|
||||
std::ptr::write(ptr.add(i), Decodable::decode(d));
|
||||
}
|
||||
vec.set_len(len);
|
||||
}
|
||||
|
@ -15,7 +15,7 @@ fn allocate_zeroed() {
|
||||
let end = i.add(layout.size());
|
||||
while i < end {
|
||||
assert_eq!(*i, 0);
|
||||
i = i.offset(1);
|
||||
i = i.add(1);
|
||||
}
|
||||
Global.deallocate(ptr.as_non_null_ptr(), layout);
|
||||
}
|
||||
|
@ -2447,8 +2447,8 @@ impl<T, A: Allocator> VecDeque<T, A> {
|
||||
let mut right_offset = 0;
|
||||
for i in left_edge..right_edge {
|
||||
right_offset = (i - left_edge) % (cap - right_edge);
|
||||
let src: isize = (right_edge + right_offset) as isize;
|
||||
ptr::swap(buf.add(i), buf.offset(src));
|
||||
let src = right_edge + right_offset;
|
||||
ptr::swap(buf.add(i), buf.add(src));
|
||||
}
|
||||
let n_ops = right_edge - left_edge;
|
||||
left_edge += n_ops;
|
||||
|
@ -1024,7 +1024,7 @@ where
|
||||
// Consume the greater side.
|
||||
// If equal, prefer the right run to maintain stability.
|
||||
unsafe {
|
||||
let to_copy = if is_less(&*right.offset(-1), &*left.offset(-1)) {
|
||||
let to_copy = if is_less(&*right.sub(1), &*left.sub(1)) {
|
||||
decrement_and_get(left)
|
||||
} else {
|
||||
decrement_and_get(right)
|
||||
@ -1038,12 +1038,12 @@ where
|
||||
|
||||
unsafe fn get_and_increment<T>(ptr: &mut *mut T) -> *mut T {
|
||||
let old = *ptr;
|
||||
*ptr = unsafe { ptr.offset(1) };
|
||||
*ptr = unsafe { ptr.add(1) };
|
||||
old
|
||||
}
|
||||
|
||||
unsafe fn decrement_and_get<T>(ptr: &mut *mut T) -> *mut T {
|
||||
*ptr = unsafe { ptr.offset(-1) };
|
||||
*ptr = unsafe { ptr.sub(1) };
|
||||
*ptr
|
||||
}
|
||||
|
||||
|
@ -267,7 +267,7 @@ where
|
||||
// one slot in the underlying storage will have been freed up and we can immediately
|
||||
// write back the result.
|
||||
unsafe {
|
||||
let dst = dst_buf.offset(i as isize);
|
||||
let dst = dst_buf.add(i);
|
||||
debug_assert!(dst as *const _ <= end, "InPlaceIterable contract violation");
|
||||
ptr::write(dst, self.__iterator_get_unchecked(i));
|
||||
// Since this executes user code which can panic we have to bump the pointer
|
||||
|
@ -160,7 +160,7 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> {
|
||||
Some(unsafe { mem::zeroed() })
|
||||
} else {
|
||||
let old = self.ptr;
|
||||
self.ptr = unsafe { self.ptr.offset(1) };
|
||||
self.ptr = unsafe { self.ptr.add(1) };
|
||||
|
||||
Some(unsafe { ptr::read(old) })
|
||||
}
|
||||
@ -272,7 +272,7 @@ impl<T, A: Allocator> DoubleEndedIterator for IntoIter<T, A> {
|
||||
// Make up a value of this ZST.
|
||||
Some(unsafe { mem::zeroed() })
|
||||
} else {
|
||||
self.end = unsafe { self.end.offset(-1) };
|
||||
self.end = unsafe { self.end.sub(1) };
|
||||
|
||||
Some(unsafe { ptr::read(self.end) })
|
||||
}
|
||||
@ -288,7 +288,7 @@ impl<T, A: Allocator> DoubleEndedIterator for IntoIter<T, A> {
|
||||
}
|
||||
} else {
|
||||
// SAFETY: same as for advance_by()
|
||||
self.end = unsafe { self.end.offset(step_size.wrapping_neg() as isize) };
|
||||
self.end = unsafe { self.end.sub(step_size) };
|
||||
}
|
||||
let to_drop = ptr::slice_from_raw_parts_mut(self.end as *mut T, step_size);
|
||||
// SAFETY: same as for advance_by()
|
||||
|
@ -1393,7 +1393,7 @@ impl<T, A: Allocator> Vec<T, A> {
|
||||
if index < len {
|
||||
// Shift everything over to make space. (Duplicating the
|
||||
// `index`th element into two consecutive places.)
|
||||
ptr::copy(p, p.offset(1), len - index);
|
||||
ptr::copy(p, p.add(1), len - index);
|
||||
} else if index == len {
|
||||
// No elements need shifting.
|
||||
} else {
|
||||
@ -1455,7 +1455,7 @@ impl<T, A: Allocator> Vec<T, A> {
|
||||
ret = ptr::read(ptr);
|
||||
|
||||
// Shift everything down to fill in that spot.
|
||||
ptr::copy(ptr.offset(1), ptr, len - index - 1);
|
||||
ptr::copy(ptr.add(1), ptr, len - index - 1);
|
||||
}
|
||||
self.set_len(len - 1);
|
||||
ret
|
||||
@ -2408,7 +2408,7 @@ impl<T, A: Allocator> Vec<T, A> {
|
||||
// Write all elements except the last one
|
||||
for _ in 1..n {
|
||||
ptr::write(ptr, value.next());
|
||||
ptr = ptr.offset(1);
|
||||
ptr = ptr.add(1);
|
||||
// Increment the length in every step in case next() panics
|
||||
local_len.increment_len(1);
|
||||
}
|
||||
|
@ -39,7 +39,7 @@ where
|
||||
let mut local_len = SetLenOnDrop::new(&mut self.len);
|
||||
iterator.for_each(move |element| {
|
||||
ptr::write(ptr, element);
|
||||
ptr = ptr.offset(1);
|
||||
ptr = ptr.add(1);
|
||||
// Since the loop executes user code which can panic we have to bump the pointer
|
||||
// after each step.
|
||||
// NB can't overflow since we would have had to alloc the address space
|
||||
|
@ -1010,11 +1010,11 @@ fn test_as_bytes_fail() {
|
||||
fn test_as_ptr() {
|
||||
let buf = "hello".as_ptr();
|
||||
unsafe {
|
||||
assert_eq!(*buf.offset(0), b'h');
|
||||
assert_eq!(*buf.offset(1), b'e');
|
||||
assert_eq!(*buf.offset(2), b'l');
|
||||
assert_eq!(*buf.offset(3), b'l');
|
||||
assert_eq!(*buf.offset(4), b'o');
|
||||
assert_eq!(*buf.add(0), b'h');
|
||||
assert_eq!(*buf.add(1), b'e');
|
||||
assert_eq!(*buf.add(2), b'l');
|
||||
assert_eq!(*buf.add(3), b'l');
|
||||
assert_eq!(*buf.add(4), b'o');
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2921,7 +2921,7 @@ impl<T> [T] {
|
||||
let prev_ptr_write = ptr.add(next_write - 1);
|
||||
if !same_bucket(&mut *ptr_read, &mut *prev_ptr_write) {
|
||||
if next_read != next_write {
|
||||
let ptr_write = prev_ptr_write.offset(1);
|
||||
let ptr_write = prev_ptr_write.add(1);
|
||||
mem::swap(&mut *ptr_read, &mut *ptr_write);
|
||||
}
|
||||
next_write += 1;
|
||||
|
@ -326,8 +326,8 @@ where
|
||||
unsafe {
|
||||
// Branchless comparison.
|
||||
*end_l = i as u8;
|
||||
end_l = end_l.offset(!is_less(&*elem, pivot) as isize);
|
||||
elem = elem.offset(1);
|
||||
end_l = end_l.add(!is_less(&*elem, pivot) as usize);
|
||||
elem = elem.add(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -352,9 +352,9 @@ where
|
||||
// Plus, `block_r` was asserted to be less than `BLOCK` and `elem` will therefore at most be pointing to the beginning of the slice.
|
||||
unsafe {
|
||||
// Branchless comparison.
|
||||
elem = elem.offset(-1);
|
||||
elem = elem.sub(1);
|
||||
*end_r = i as u8;
|
||||
end_r = end_r.offset(is_less(&*elem, pivot) as isize);
|
||||
end_r = end_r.add(is_less(&*elem, pivot) as usize);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -365,12 +365,12 @@ where
|
||||
if count > 0 {
|
||||
macro_rules! left {
|
||||
() => {
|
||||
l.offset(*start_l as isize)
|
||||
l.add(*start_l as usize)
|
||||
};
|
||||
}
|
||||
macro_rules! right {
|
||||
() => {
|
||||
r.offset(-(*start_r as isize) - 1)
|
||||
r.sub((*start_r as usize) + 1)
|
||||
};
|
||||
}
|
||||
|
||||
@ -398,16 +398,16 @@ where
|
||||
ptr::copy_nonoverlapping(right!(), left!(), 1);
|
||||
|
||||
for _ in 1..count {
|
||||
start_l = start_l.offset(1);
|
||||
start_l = start_l.add(1);
|
||||
ptr::copy_nonoverlapping(left!(), right!(), 1);
|
||||
start_r = start_r.offset(1);
|
||||
start_r = start_r.add(1);
|
||||
ptr::copy_nonoverlapping(right!(), left!(), 1);
|
||||
}
|
||||
|
||||
ptr::copy_nonoverlapping(&tmp, right!(), 1);
|
||||
mem::forget(tmp);
|
||||
start_l = start_l.offset(1);
|
||||
start_r = start_r.offset(1);
|
||||
start_l = start_l.add(1);
|
||||
start_r = start_r.add(1);
|
||||
}
|
||||
}
|
||||
|
||||
@ -420,7 +420,7 @@ where
|
||||
// safe. Otherwise, the debug assertions in the `is_done` case guarantee that
|
||||
// `width(l, r) == block_l + block_r`, namely, that the block sizes have been adjusted to account
|
||||
// for the smaller number of remaining elements.
|
||||
l = unsafe { l.offset(block_l as isize) };
|
||||
l = unsafe { l.add(block_l) };
|
||||
}
|
||||
|
||||
if start_r == end_r {
|
||||
@ -428,7 +428,7 @@ where
|
||||
|
||||
// SAFETY: Same argument as [block-width-guarantee]. Either this is a full block `2*BLOCK`-wide,
|
||||
// or `block_r` has been adjusted for the last handful of elements.
|
||||
r = unsafe { r.offset(-(block_r as isize)) };
|
||||
r = unsafe { r.sub(block_r) };
|
||||
}
|
||||
|
||||
if is_done {
|
||||
@ -457,9 +457,9 @@ where
|
||||
// - `offsets_l` contains valid offsets into `v` collected during the partitioning of
|
||||
// the last block, so the `l.offset` calls are valid.
|
||||
unsafe {
|
||||
end_l = end_l.offset(-1);
|
||||
ptr::swap(l.offset(*end_l as isize), r.offset(-1));
|
||||
r = r.offset(-1);
|
||||
end_l = end_l.sub(1);
|
||||
ptr::swap(l.add(*end_l as usize), r.sub(1));
|
||||
r = r.sub(1);
|
||||
}
|
||||
}
|
||||
width(v.as_mut_ptr(), r)
|
||||
@ -470,9 +470,9 @@ where
|
||||
while start_r < end_r {
|
||||
// SAFETY: See the reasoning in [remaining-elements-safety].
|
||||
unsafe {
|
||||
end_r = end_r.offset(-1);
|
||||
ptr::swap(l, r.offset(-(*end_r as isize) - 1));
|
||||
l = l.offset(1);
|
||||
end_r = end_r.sub(1);
|
||||
ptr::swap(l, r.sub((*end_r as usize) + 1));
|
||||
l = l.add(1);
|
||||
}
|
||||
}
|
||||
width(v.as_mut_ptr(), l)
|
||||
|
@ -216,12 +216,12 @@ pub(super) const fn run_utf8_validation(v: &[u8]) -> Result<(), Utf8Error> {
|
||||
// SAFETY: since `align - index` and `ascii_block_size` are
|
||||
// multiples of `usize_bytes`, `block = ptr.add(index)` is
|
||||
// always aligned with a `usize` so it's safe to dereference
|
||||
// both `block` and `block.offset(1)`.
|
||||
// both `block` and `block.add(1)`.
|
||||
unsafe {
|
||||
let block = ptr.add(index) as *const usize;
|
||||
// break if there is a nonascii byte
|
||||
let zu = contains_nonascii(*block);
|
||||
let zv = contains_nonascii(*block.offset(1));
|
||||
let zv = contains_nonascii(*block.add(1));
|
||||
if zu || zv {
|
||||
break;
|
||||
}
|
||||
|
@ -42,7 +42,7 @@ pub(crate) unsafe fn android_set_abort_message(payload: *mut &mut dyn BoxMeUp) {
|
||||
return; // allocation failure
|
||||
}
|
||||
copy_nonoverlapping(msg.as_ptr(), buf as *mut u8, msg.len());
|
||||
buf.offset(msg.len() as isize).write(0);
|
||||
buf.add(msg.len()).write(0);
|
||||
|
||||
let func = transmute::<usize, SetAbortMessageType>(func_addr);
|
||||
func(buf);
|
||||
|
@ -75,7 +75,7 @@ pub unsafe fn find_eh_action(lsda: *const u8, context: &EHContext<'_>) -> Result
|
||||
|
||||
let call_site_encoding = reader.read::<u8>();
|
||||
let call_site_table_length = reader.read_uleb128();
|
||||
let action_table = reader.ptr.offset(call_site_table_length as isize);
|
||||
let action_table = reader.ptr.add(call_site_table_length as usize);
|
||||
let ip = context.ip;
|
||||
|
||||
if !USING_SJLJ_EXCEPTIONS {
|
||||
|
@ -329,7 +329,7 @@ impl SocketAddr {
|
||||
|
||||
crate::ptr::copy_nonoverlapping(
|
||||
namespace.as_ptr(),
|
||||
addr.sun_path.as_mut_ptr().offset(1) as *mut u8,
|
||||
addr.sun_path.as_mut_ptr().add(1) as *mut u8,
|
||||
namespace.len(),
|
||||
);
|
||||
let len = (sun_path_offset(&addr) + 1 + namespace.len()) as libc::socklen_t;
|
||||
|
@ -17,12 +17,12 @@ fn test_copy_function() {
|
||||
dst.copy_from_enclave(&[0u8; 100]);
|
||||
|
||||
// Copy src[0..size] to dst + offset
|
||||
unsafe { copy_to_userspace(src.as_ptr(), dst.as_mut_ptr().offset(offset), size) };
|
||||
unsafe { copy_to_userspace(src.as_ptr(), dst.as_mut_ptr().add(offset), size) };
|
||||
|
||||
// Verify copy
|
||||
for byte in 0..size {
|
||||
unsafe {
|
||||
assert_eq!(*dst.as_ptr().offset(offset + byte as isize), src[byte as usize]);
|
||||
assert_eq!(*dst.as_ptr().add(offset + byte), src[byte as usize]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -168,7 +168,7 @@ unsafe fn allocate(layout: Layout, zeroed: bool) -> *mut u8 {
|
||||
// SAFETY: Because the size and alignment of a header is <= `MIN_ALIGN` and `aligned`
|
||||
// is aligned to at least `MIN_ALIGN` and has at least `MIN_ALIGN` bytes of padding before
|
||||
// it, it is safe to write a header directly before it.
|
||||
unsafe { ptr::write((aligned as *mut Header).offset(-1), Header(ptr)) };
|
||||
unsafe { ptr::write((aligned as *mut Header).sub(1), Header(ptr)) };
|
||||
|
||||
// SAFETY: The returned pointer does not point to the to the start of an allocated block,
|
||||
// but there is a header readable directly before it containing the location of the start
|
||||
@ -213,7 +213,7 @@ unsafe impl GlobalAlloc for System {
|
||||
|
||||
// SAFETY: Because of the contract of `System`, `ptr` is guaranteed to be non-null
|
||||
// and have a header readable directly before it.
|
||||
unsafe { ptr::read((ptr as *mut Header).offset(-1)).0 }
|
||||
unsafe { ptr::read((ptr as *mut Header).sub(1)).0 }
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -512,7 +512,7 @@ impl File {
|
||||
));
|
||||
}
|
||||
};
|
||||
let subst_ptr = path_buffer.offset(subst_off as isize);
|
||||
let subst_ptr = path_buffer.add(subst_off.into());
|
||||
let mut subst = slice::from_raw_parts(subst_ptr, subst_len as usize);
|
||||
// Absolute paths start with an NT internal namespace prefix `\??\`
|
||||
// We should not let it leak through.
|
||||
@ -1345,10 +1345,10 @@ fn symlink_junction_inner(original: &Path, junction: &Path) -> io::Result<()> {
|
||||
let v = br"\??\";
|
||||
let v = v.iter().map(|x| *x as u16);
|
||||
for c in v.chain(original.as_os_str().encode_wide()) {
|
||||
*buf.offset(i) = c;
|
||||
*buf.add(i) = c;
|
||||
i += 1;
|
||||
}
|
||||
*buf.offset(i) = 0;
|
||||
*buf.add(i) = 0;
|
||||
i += 1;
|
||||
(*db).ReparseTag = c::IO_REPARSE_TAG_MOUNT_POINT;
|
||||
(*db).ReparseTargetMaximumLength = (i * 2) as c::WORD;
|
||||
|
@ -99,11 +99,11 @@ impl Iterator for Env {
|
||||
}
|
||||
let p = self.cur as *const u16;
|
||||
let mut len = 0;
|
||||
while *p.offset(len) != 0 {
|
||||
while *p.add(len) != 0 {
|
||||
len += 1;
|
||||
}
|
||||
let s = slice::from_raw_parts(p, len as usize);
|
||||
self.cur = self.cur.offset(len + 1);
|
||||
let s = slice::from_raw_parts(p, len);
|
||||
self.cur = self.cur.add(len + 1);
|
||||
|
||||
// Windows allows environment variables to start with an equals
|
||||
// symbol (in any other position, this is the separator between
|
||||
|
Loading…
Reference in New Issue
Block a user