mirror of
https://github.com/rust-lang/rust.git
synced 2025-01-30 16:43:41 +00:00
std: Standardize (input, output) param orderings
This functions swaps the order of arguments to a few functions that previously took (output, input) parameters, but now take (input, output) parameters (in that order). The affected functions are: * ptr::copy * ptr::copy_nonoverlapping * slice::bytes::copy_memory * intrinsics::copy * intrinsics::copy_nonoverlapping Closes #22890 [breaking-change]
This commit is contained in:
parent
14192d6df5
commit
acd48a2b3e
@ -301,7 +301,7 @@ mod imp {
|
||||
libc::realloc(ptr as *mut libc::c_void, size as libc::size_t) as *mut u8
|
||||
} else {
|
||||
let new_ptr = allocate(size, align);
|
||||
ptr::copy(new_ptr, ptr, cmp::min(size, old_size));
|
||||
ptr::copy(ptr, new_ptr, cmp::min(size, old_size));
|
||||
deallocate(ptr, old_size, align);
|
||||
new_ptr
|
||||
}
|
||||
|
@ -1133,13 +1133,13 @@ impl<K, V> Node<K, V> {
|
||||
#[inline]
|
||||
unsafe fn insert_kv(&mut self, index: usize, key: K, val: V) -> &mut V {
|
||||
ptr::copy(
|
||||
self.keys_mut().as_mut_ptr().offset(index as isize + 1),
|
||||
self.keys().as_ptr().offset(index as isize),
|
||||
self.keys_mut().as_mut_ptr().offset(index as isize + 1),
|
||||
self.len() - index
|
||||
);
|
||||
ptr::copy(
|
||||
self.vals_mut().as_mut_ptr().offset(index as isize + 1),
|
||||
self.vals().as_ptr().offset(index as isize),
|
||||
self.vals_mut().as_mut_ptr().offset(index as isize + 1),
|
||||
self.len() - index
|
||||
);
|
||||
|
||||
@ -1155,8 +1155,8 @@ impl<K, V> Node<K, V> {
|
||||
#[inline]
|
||||
unsafe fn insert_edge(&mut self, index: usize, edge: Node<K, V>) {
|
||||
ptr::copy(
|
||||
self.edges_mut().as_mut_ptr().offset(index as isize + 1),
|
||||
self.edges().as_ptr().offset(index as isize),
|
||||
self.edges_mut().as_mut_ptr().offset(index as isize + 1),
|
||||
self.len() - index
|
||||
);
|
||||
ptr::write(self.edges_mut().get_unchecked_mut(index), edge);
|
||||
@ -1188,13 +1188,13 @@ impl<K, V> Node<K, V> {
|
||||
let val = ptr::read(self.vals().get_unchecked(index));
|
||||
|
||||
ptr::copy(
|
||||
self.keys_mut().as_mut_ptr().offset(index as isize),
|
||||
self.keys().as_ptr().offset(index as isize + 1),
|
||||
self.keys_mut().as_mut_ptr().offset(index as isize),
|
||||
self.len() - index - 1
|
||||
);
|
||||
ptr::copy(
|
||||
self.vals_mut().as_mut_ptr().offset(index as isize),
|
||||
self.vals().as_ptr().offset(index as isize + 1),
|
||||
self.vals_mut().as_mut_ptr().offset(index as isize),
|
||||
self.len() - index - 1
|
||||
);
|
||||
|
||||
@ -1209,8 +1209,8 @@ impl<K, V> Node<K, V> {
|
||||
let edge = ptr::read(self.edges().get_unchecked(index));
|
||||
|
||||
ptr::copy(
|
||||
self.edges_mut().as_mut_ptr().offset(index as isize),
|
||||
self.edges().as_ptr().offset(index as isize + 1),
|
||||
self.edges_mut().as_mut_ptr().offset(index as isize),
|
||||
// index can be == len+1, so do the +1 first to avoid underflow.
|
||||
(self.len() + 1) - index
|
||||
);
|
||||
@ -1237,19 +1237,19 @@ impl<K, V> Node<K, V> {
|
||||
right._len = self.len() / 2;
|
||||
let right_offset = self.len() - right.len();
|
||||
ptr::copy_nonoverlapping(
|
||||
right.keys_mut().as_mut_ptr(),
|
||||
self.keys().as_ptr().offset(right_offset as isize),
|
||||
right.keys_mut().as_mut_ptr(),
|
||||
right.len()
|
||||
);
|
||||
ptr::copy_nonoverlapping(
|
||||
right.vals_mut().as_mut_ptr(),
|
||||
self.vals().as_ptr().offset(right_offset as isize),
|
||||
right.vals_mut().as_mut_ptr(),
|
||||
right.len()
|
||||
);
|
||||
if !self.is_leaf() {
|
||||
ptr::copy_nonoverlapping(
|
||||
right.edges_mut().as_mut_ptr(),
|
||||
self.edges().as_ptr().offset(right_offset as isize),
|
||||
right.edges_mut().as_mut_ptr(),
|
||||
right.len() + 1
|
||||
);
|
||||
}
|
||||
@ -1278,19 +1278,19 @@ impl<K, V> Node<K, V> {
|
||||
ptr::write(self.vals_mut().get_unchecked_mut(old_len), val);
|
||||
|
||||
ptr::copy_nonoverlapping(
|
||||
self.keys_mut().as_mut_ptr().offset(old_len as isize + 1),
|
||||
right.keys().as_ptr(),
|
||||
self.keys_mut().as_mut_ptr().offset(old_len as isize + 1),
|
||||
right.len()
|
||||
);
|
||||
ptr::copy_nonoverlapping(
|
||||
self.vals_mut().as_mut_ptr().offset(old_len as isize + 1),
|
||||
right.vals().as_ptr(),
|
||||
self.vals_mut().as_mut_ptr().offset(old_len as isize + 1),
|
||||
right.len()
|
||||
);
|
||||
if !self.is_leaf() {
|
||||
ptr::copy_nonoverlapping(
|
||||
self.edges_mut().as_mut_ptr().offset(old_len as isize + 1),
|
||||
right.edges().as_ptr(),
|
||||
self.edges_mut().as_mut_ptr().offset(old_len as isize + 1),
|
||||
right.len() + 1
|
||||
);
|
||||
}
|
||||
|
@ -1320,10 +1320,10 @@ fn insertion_sort<T, F>(v: &mut [T], mut compare: F) where F: FnMut(&T, &T) -> O
|
||||
|
||||
if i != j {
|
||||
let tmp = ptr::read(read_ptr);
|
||||
ptr::copy(buf_v.offset(j + 1),
|
||||
&*buf_v.offset(j),
|
||||
ptr::copy(&*buf_v.offset(j),
|
||||
buf_v.offset(j + 1),
|
||||
(i - j) as usize);
|
||||
ptr::copy_nonoverlapping(buf_v.offset(j), &tmp, 1);
|
||||
ptr::copy_nonoverlapping(&tmp, buf_v.offset(j), 1);
|
||||
mem::forget(tmp);
|
||||
}
|
||||
}
|
||||
@ -1396,10 +1396,10 @@ fn merge_sort<T, F>(v: &mut [T], mut compare: F) where F: FnMut(&T, &T) -> Order
|
||||
// j + 1 could be `len` (for the last `i`), but in
|
||||
// that case, `i == j` so we don't copy. The
|
||||
// `.offset(j)` is always in bounds.
|
||||
ptr::copy(buf_dat.offset(j + 1),
|
||||
&*buf_dat.offset(j),
|
||||
ptr::copy(&*buf_dat.offset(j),
|
||||
buf_dat.offset(j + 1),
|
||||
i - j as usize);
|
||||
ptr::copy_nonoverlapping(buf_dat.offset(j), read_ptr, 1);
|
||||
ptr::copy_nonoverlapping(read_ptr, buf_dat.offset(j), 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1447,11 +1447,11 @@ fn merge_sort<T, F>(v: &mut [T], mut compare: F) where F: FnMut(&T, &T) -> Order
|
||||
if left == right_start {
|
||||
// the number remaining in this run.
|
||||
let elems = (right_end as usize - right as usize) / mem::size_of::<T>();
|
||||
ptr::copy_nonoverlapping(out, &*right, elems);
|
||||
ptr::copy_nonoverlapping(&*right, out, elems);
|
||||
break;
|
||||
} else if right == right_end {
|
||||
let elems = (right_start as usize - left as usize) / mem::size_of::<T>();
|
||||
ptr::copy_nonoverlapping(out, &*left, elems);
|
||||
ptr::copy_nonoverlapping(&*left, out, elems);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -1465,7 +1465,7 @@ fn merge_sort<T, F>(v: &mut [T], mut compare: F) where F: FnMut(&T, &T) -> Order
|
||||
} else {
|
||||
step(&mut left)
|
||||
};
|
||||
ptr::copy_nonoverlapping(out, &*to_copy, 1);
|
||||
ptr::copy_nonoverlapping(&*to_copy, out, 1);
|
||||
step(&mut out);
|
||||
}
|
||||
}
|
||||
@ -1479,7 +1479,7 @@ fn merge_sort<T, F>(v: &mut [T], mut compare: F) where F: FnMut(&T, &T) -> Order
|
||||
// write the result to `v` in one go, so that there are never two copies
|
||||
// of the same object in `v`.
|
||||
unsafe {
|
||||
ptr::copy_nonoverlapping(v.as_mut_ptr(), &*buf_dat, len);
|
||||
ptr::copy_nonoverlapping(&*buf_dat, v.as_mut_ptr(), len);
|
||||
}
|
||||
|
||||
// increment the pointer, returning the old pointer.
|
||||
|
@ -592,8 +592,8 @@ impl String {
|
||||
let ch = self.char_at(idx);
|
||||
let next = idx + ch.len_utf8();
|
||||
unsafe {
|
||||
ptr::copy(self.vec.as_mut_ptr().offset(idx as isize),
|
||||
self.vec.as_ptr().offset(next as isize),
|
||||
ptr::copy(self.vec.as_ptr().offset(next as isize),
|
||||
self.vec.as_mut_ptr().offset(idx as isize),
|
||||
len - next);
|
||||
self.vec.set_len(len - (next - idx));
|
||||
}
|
||||
@ -622,11 +622,11 @@ impl String {
|
||||
let amt = ch.encode_utf8(&mut bits).unwrap();
|
||||
|
||||
unsafe {
|
||||
ptr::copy(self.vec.as_mut_ptr().offset((idx + amt) as isize),
|
||||
self.vec.as_ptr().offset(idx as isize),
|
||||
ptr::copy(self.vec.as_ptr().offset(idx as isize),
|
||||
self.vec.as_mut_ptr().offset((idx + amt) as isize),
|
||||
len - idx);
|
||||
ptr::copy(self.vec.as_mut_ptr().offset(idx as isize),
|
||||
bits.as_ptr(),
|
||||
ptr::copy(bits.as_ptr(),
|
||||
self.vec.as_mut_ptr().offset(idx as isize),
|
||||
amt);
|
||||
self.vec.set_len(len + amt);
|
||||
}
|
||||
|
@ -260,16 +260,17 @@ impl<T> Vec<T> {
|
||||
|
||||
/// Creates a vector by copying the elements from a raw pointer.
|
||||
///
|
||||
/// This function will copy `elts` contiguous elements starting at `ptr` into a new allocation
|
||||
/// owned by the returned `Vec<T>`. The elements of the buffer are copied into the vector
|
||||
/// without cloning, as if `ptr::read()` were called on them.
|
||||
/// This function will copy `elts` contiguous elements starting at `ptr`
|
||||
/// into a new allocation owned by the returned `Vec<T>`. The elements of
|
||||
/// the buffer are copied into the vector without cloning, as if
|
||||
/// `ptr::read()` were called on them.
|
||||
#[inline]
|
||||
#[unstable(feature = "collections",
|
||||
reason = "may be better expressed via composition")]
|
||||
pub unsafe fn from_raw_buf(ptr: *const T, elts: usize) -> Vec<T> {
|
||||
let mut dst = Vec::with_capacity(elts);
|
||||
dst.set_len(elts);
|
||||
ptr::copy_nonoverlapping(dst.as_mut_ptr(), ptr, elts);
|
||||
ptr::copy_nonoverlapping(ptr, dst.as_mut_ptr(), elts);
|
||||
dst
|
||||
}
|
||||
|
||||
@ -288,8 +289,9 @@ impl<T> Vec<T> {
|
||||
self.cap
|
||||
}
|
||||
|
||||
/// Reserves capacity for at least `additional` more elements to be inserted in the given
|
||||
/// `Vec<T>`. The collection may reserve more space to avoid frequent reallocations.
|
||||
/// Reserves capacity for at least `additional` more elements to be inserted
|
||||
/// in the given `Vec<T>`. The collection may reserve more space to avoid
|
||||
/// frequent reallocations.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
@ -541,7 +543,7 @@ impl<T> Vec<T> {
|
||||
let p = self.as_mut_ptr().offset(index as isize);
|
||||
// Shift everything over to make space. (Duplicating the
|
||||
// `index`th element into two consecutive places.)
|
||||
ptr::copy(p.offset(1), &*p, len - index);
|
||||
ptr::copy(&*p, p.offset(1), len - index);
|
||||
// Write it in, overwriting the first copy of the `index`th
|
||||
// element.
|
||||
ptr::write(&mut *p, element);
|
||||
@ -579,7 +581,7 @@ impl<T> Vec<T> {
|
||||
ret = ptr::read(ptr);
|
||||
|
||||
// Shift everything down to fill in that spot.
|
||||
ptr::copy(ptr, &*ptr.offset(1), len - index - 1);
|
||||
ptr::copy(&*ptr.offset(1), ptr, len - index - 1);
|
||||
}
|
||||
self.set_len(len - 1);
|
||||
ret
|
||||
@ -721,8 +723,8 @@ impl<T> Vec<T> {
|
||||
let len = self.len();
|
||||
unsafe {
|
||||
ptr::copy_nonoverlapping(
|
||||
self.get_unchecked_mut(len),
|
||||
other.as_ptr(),
|
||||
self.get_unchecked_mut(len),
|
||||
other.len());
|
||||
}
|
||||
|
||||
@ -1042,8 +1044,8 @@ impl<T> Vec<T> {
|
||||
other.set_len(other_len);
|
||||
|
||||
ptr::copy_nonoverlapping(
|
||||
other.as_mut_ptr(),
|
||||
self.as_ptr().offset(at as isize),
|
||||
other.as_mut_ptr(),
|
||||
other.len());
|
||||
}
|
||||
other
|
||||
|
@ -142,8 +142,8 @@ impl<T> VecDeque<T> {
|
||||
debug_assert!(src + len <= self.cap, "dst={} src={} len={} cap={}", dst, src, len,
|
||||
self.cap);
|
||||
ptr::copy(
|
||||
self.ptr.offset(dst as isize),
|
||||
self.ptr.offset(src as isize),
|
||||
self.ptr.offset(dst as isize),
|
||||
len);
|
||||
}
|
||||
|
||||
@ -155,8 +155,8 @@ impl<T> VecDeque<T> {
|
||||
debug_assert!(src + len <= self.cap, "dst={} src={} len={} cap={}", dst, src, len,
|
||||
self.cap);
|
||||
ptr::copy_nonoverlapping(
|
||||
self.ptr.offset(dst as isize),
|
||||
self.ptr.offset(src as isize),
|
||||
self.ptr.offset(dst as isize),
|
||||
len);
|
||||
}
|
||||
}
|
||||
@ -1361,21 +1361,21 @@ impl<T> VecDeque<T> {
|
||||
// `at` lies in the first half.
|
||||
let amount_in_first = first_len - at;
|
||||
|
||||
ptr::copy_nonoverlapping(*other.ptr,
|
||||
first_half.as_ptr().offset(at as isize),
|
||||
ptr::copy_nonoverlapping(first_half.as_ptr().offset(at as isize),
|
||||
*other.ptr,
|
||||
amount_in_first);
|
||||
|
||||
// just take all of the second half.
|
||||
ptr::copy_nonoverlapping(other.ptr.offset(amount_in_first as isize),
|
||||
second_half.as_ptr(),
|
||||
ptr::copy_nonoverlapping(second_half.as_ptr(),
|
||||
other.ptr.offset(amount_in_first as isize),
|
||||
second_len);
|
||||
} else {
|
||||
// `at` lies in the second half, need to factor in the elements we skipped
|
||||
// in the first half.
|
||||
let offset = at - first_len;
|
||||
let amount_in_second = second_len - offset;
|
||||
ptr::copy_nonoverlapping(*other.ptr,
|
||||
second_half.as_ptr().offset(offset as isize),
|
||||
ptr::copy_nonoverlapping(second_half.as_ptr().offset(offset as isize),
|
||||
*other.ptr,
|
||||
amount_in_second);
|
||||
}
|
||||
}
|
||||
|
@ -316,8 +316,8 @@ pub fn float_to_str_bytes_common<T: Float, U, F>(
|
||||
|
||||
impl<'a> fmt::Write for Filler<'a> {
|
||||
fn write_str(&mut self, s: &str) -> fmt::Result {
|
||||
slice::bytes::copy_memory(&mut self.buf[(*self.end)..],
|
||||
s.as_bytes());
|
||||
slice::bytes::copy_memory(s.as_bytes(),
|
||||
&mut self.buf[(*self.end)..]);
|
||||
*self.end += s.len();
|
||||
Ok(())
|
||||
}
|
||||
|
@ -293,9 +293,9 @@ extern "rust-intrinsic" {
|
||||
/// let mut t: T = mem::uninitialized();
|
||||
///
|
||||
/// // Perform the swap, `&mut` pointers never alias
|
||||
/// ptr::copy_nonoverlapping(&mut t, &*x, 1);
|
||||
/// ptr::copy_nonoverlapping(x, &*y, 1);
|
||||
/// ptr::copy_nonoverlapping(y, &t, 1);
|
||||
/// ptr::copy_nonoverlapping(x, &mut t, 1);
|
||||
/// ptr::copy_nonoverlapping(y, x, 1);
|
||||
/// ptr::copy_nonoverlapping(&t, y, 1);
|
||||
///
|
||||
/// // y and t now point to the same thing, but we need to completely forget `tmp`
|
||||
/// // because it's no longer relevant.
|
||||
@ -304,6 +304,12 @@ extern "rust-intrinsic" {
|
||||
/// }
|
||||
/// ```
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
#[cfg(not(stage0))]
|
||||
pub fn copy_nonoverlapping<T>(src: *const T, dst: *mut T, count: usize);
|
||||
|
||||
/// dox
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
#[cfg(stage0)]
|
||||
pub fn copy_nonoverlapping<T>(dst: *mut T, src: *const T, count: usize);
|
||||
|
||||
/// Copies `count * size_of<T>` bytes from `src` to `dst`. The source
|
||||
@ -329,12 +335,18 @@ extern "rust-intrinsic" {
|
||||
/// unsafe fn from_buf_raw<T>(ptr: *const T, elts: usize) -> Vec<T> {
|
||||
/// let mut dst = Vec::with_capacity(elts);
|
||||
/// dst.set_len(elts);
|
||||
/// ptr::copy(dst.as_mut_ptr(), ptr, elts);
|
||||
/// ptr::copy(ptr, dst.as_mut_ptr(), elts);
|
||||
/// dst
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
#[cfg(not(stage0))]
|
||||
pub fn copy<T>(src: *const T, dst: *mut T, count: usize);
|
||||
|
||||
/// dox
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
#[cfg(stage0)]
|
||||
pub fn copy<T>(dst: *mut T, src: *const T, count: usize);
|
||||
|
||||
/// Invokes memset on the specified pointer, setting `count * size_of::<T>()`
|
||||
|
@ -229,9 +229,9 @@ pub fn swap<T>(x: &mut T, y: &mut T) {
|
||||
let mut t: T = uninitialized();
|
||||
|
||||
// Perform the swap, `&mut` pointers never alias
|
||||
ptr::copy_nonoverlapping(&mut t, &*x, 1);
|
||||
ptr::copy_nonoverlapping(x, &*y, 1);
|
||||
ptr::copy_nonoverlapping(y, &t, 1);
|
||||
ptr::copy_nonoverlapping(&*x, &mut t, 1);
|
||||
ptr::copy_nonoverlapping(&*y, x, 1);
|
||||
ptr::copy_nonoverlapping(&t, y, 1);
|
||||
|
||||
// y and t now point to the same thing, but we need to completely forget `t`
|
||||
// because it's no longer relevant.
|
||||
|
@ -104,11 +104,28 @@ use cmp::Ordering::{self, Less, Equal, Greater};
|
||||
// FIXME #19649: intrinsic docs don't render, so these have no docs :(
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
#[cfg(not(stage0))]
|
||||
pub use intrinsics::copy_nonoverlapping;
|
||||
|
||||
/// dox
|
||||
#[cfg(stage0)]
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub unsafe fn copy_nonoverlapping<T>(src: *const T, dst: *mut T, count: usize) {
|
||||
intrinsics::copy_nonoverlapping(dst, src, count)
|
||||
}
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
#[cfg(not(stage0))]
|
||||
pub use intrinsics::copy;
|
||||
|
||||
/// dox
|
||||
#[cfg(stage0)]
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub unsafe fn copy<T>(src: *const T, dst: *mut T, count: usize) {
|
||||
intrinsics::copy(dst, src, count)
|
||||
}
|
||||
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub use intrinsics::write_bytes;
|
||||
|
||||
@ -167,12 +184,11 @@ pub unsafe fn zero_memory<T>(dst: *mut T, count: usize) {
|
||||
pub unsafe fn swap<T>(x: *mut T, y: *mut T) {
|
||||
// Give ourselves some scratch space to work with
|
||||
let mut tmp: T = mem::uninitialized();
|
||||
let t: *mut T = &mut tmp;
|
||||
|
||||
// Perform the swap
|
||||
copy_nonoverlapping(t, &*x, 1);
|
||||
copy(x, &*y, 1); // `x` and `y` may overlap
|
||||
copy_nonoverlapping(y, &*t, 1);
|
||||
copy_nonoverlapping(x, &mut tmp, 1);
|
||||
copy(y, x, 1); // `x` and `y` may overlap
|
||||
copy_nonoverlapping(&tmp, y, 1);
|
||||
|
||||
// y and t now point to the same thing, but we need to completely forget `tmp`
|
||||
// because it's no longer relevant.
|
||||
@ -208,7 +224,7 @@ pub unsafe fn replace<T>(dest: *mut T, mut src: T) -> T {
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub unsafe fn read<T>(src: *const T) -> T {
|
||||
let mut tmp: T = mem::uninitialized();
|
||||
copy_nonoverlapping(&mut tmp, src, 1);
|
||||
copy_nonoverlapping(src, &mut tmp, 1);
|
||||
tmp
|
||||
}
|
||||
|
||||
|
@ -1577,14 +1577,14 @@ pub mod bytes {
|
||||
///
|
||||
/// Panics if the length of `dst` is less than the length of `src`.
|
||||
#[inline]
|
||||
pub fn copy_memory(dst: &mut [u8], src: &[u8]) {
|
||||
pub fn copy_memory(src: &[u8], dst: &mut [u8]) {
|
||||
let len_src = src.len();
|
||||
assert!(dst.len() >= len_src);
|
||||
// `dst` is unaliasable, so we know statically it doesn't overlap
|
||||
// with `src`.
|
||||
unsafe {
|
||||
ptr::copy_nonoverlapping(dst.as_mut_ptr(),
|
||||
src.as_ptr(),
|
||||
ptr::copy_nonoverlapping(src.as_ptr(),
|
||||
dst.as_mut_ptr(),
|
||||
len_src);
|
||||
}
|
||||
}
|
||||
|
@ -35,18 +35,15 @@ fn test() {
|
||||
let v0 = vec![32000u16, 32001u16, 32002u16];
|
||||
let mut v1 = vec![0u16, 0u16, 0u16];
|
||||
|
||||
copy(v1.as_mut_ptr().offset(1),
|
||||
v0.as_ptr().offset(1), 1);
|
||||
copy(v0.as_ptr().offset(1), v1.as_mut_ptr().offset(1), 1);
|
||||
assert!((v1[0] == 0u16 &&
|
||||
v1[1] == 32001u16 &&
|
||||
v1[2] == 0u16));
|
||||
copy(v1.as_mut_ptr(),
|
||||
v0.as_ptr().offset(2), 1);
|
||||
copy(v0.as_ptr().offset(2), v1.as_mut_ptr(), 1);
|
||||
assert!((v1[0] == 32002u16 &&
|
||||
v1[1] == 32001u16 &&
|
||||
v1[2] == 0u16));
|
||||
copy(v1.as_mut_ptr().offset(2),
|
||||
v0.as_ptr(), 1);
|
||||
copy(v0.as_ptr(), v1.as_mut_ptr().offset(2), 1);
|
||||
assert!((v1[0] == 32002u16 &&
|
||||
v1[1] == 32001u16 &&
|
||||
v1[2] == 32000u16));
|
||||
|
@ -449,21 +449,21 @@ pub mod reader {
|
||||
pub fn doc_as_u16(d: Doc) -> u16 {
|
||||
assert_eq!(d.end, d.start + 2);
|
||||
let mut b = [0; 2];
|
||||
bytes::copy_memory(&mut b, &d.data[d.start..d.end]);
|
||||
bytes::copy_memory(&d.data[d.start..d.end], &mut b);
|
||||
unsafe { (*(b.as_ptr() as *const u16)).to_be() }
|
||||
}
|
||||
|
||||
pub fn doc_as_u32(d: Doc) -> u32 {
|
||||
assert_eq!(d.end, d.start + 4);
|
||||
let mut b = [0; 4];
|
||||
bytes::copy_memory(&mut b, &d.data[d.start..d.end]);
|
||||
bytes::copy_memory(&d.data[d.start..d.end], &mut b);
|
||||
unsafe { (*(b.as_ptr() as *const u32)).to_be() }
|
||||
}
|
||||
|
||||
pub fn doc_as_u64(d: Doc) -> u64 {
|
||||
assert_eq!(d.end, d.start + 8);
|
||||
let mut b = [0; 8];
|
||||
bytes::copy_memory(&mut b, &d.data[d.start..d.end]);
|
||||
bytes::copy_memory(&d.data[d.start..d.end], &mut b);
|
||||
unsafe { (*(b.as_ptr() as *const u64)).to_be() }
|
||||
}
|
||||
|
||||
@ -938,7 +938,7 @@ pub mod writer {
|
||||
{
|
||||
let last_size_pos = last_size_pos as usize;
|
||||
let data = &self.writer.get_ref()[last_size_pos+4..cur_pos as usize];
|
||||
bytes::copy_memory(&mut buf, data);
|
||||
bytes::copy_memory(data, &mut buf);
|
||||
}
|
||||
|
||||
// overwrite the size and data and continue
|
||||
|
@ -62,7 +62,7 @@ pub type Cmd<'a> = &'a crate_metadata;
|
||||
|
||||
fn u32_from_be_bytes(bytes: &[u8]) -> u32 {
|
||||
let mut b = [0; 4];
|
||||
bytes::copy_memory(&mut b, &bytes[..4]);
|
||||
bytes::copy_memory(&bytes[..4], &mut b);
|
||||
unsafe { (*(b.as_ptr() as *const u32)).to_be() }
|
||||
}
|
||||
|
||||
|
@ -139,15 +139,15 @@ impl FixedBuffer for FixedBuffer64 {
|
||||
let buffer_remaining = size - self.buffer_idx;
|
||||
if input.len() >= buffer_remaining {
|
||||
copy_memory(
|
||||
&mut self.buffer[self.buffer_idx..size],
|
||||
&input[..buffer_remaining]);
|
||||
&input[..buffer_remaining],
|
||||
&mut self.buffer[self.buffer_idx..size]);
|
||||
self.buffer_idx = 0;
|
||||
func(&self.buffer);
|
||||
i += buffer_remaining;
|
||||
} else {
|
||||
copy_memory(
|
||||
&mut self.buffer[self.buffer_idx..self.buffer_idx + input.len()],
|
||||
input);
|
||||
input,
|
||||
&mut self.buffer[self.buffer_idx..self.buffer_idx + input.len()]);
|
||||
self.buffer_idx += input.len();
|
||||
return;
|
||||
}
|
||||
@ -165,8 +165,8 @@ impl FixedBuffer for FixedBuffer64 {
|
||||
// be empty.
|
||||
let input_remaining = input.len() - i;
|
||||
copy_memory(
|
||||
&mut self.buffer[..input_remaining],
|
||||
&input[i..]);
|
||||
&input[i..],
|
||||
&mut self.buffer[..input_remaining]);
|
||||
self.buffer_idx += input_remaining;
|
||||
}
|
||||
|
||||
|
@ -398,8 +398,8 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
|
||||
false,
|
||||
false,
|
||||
*substs.types.get(FnSpace, 0),
|
||||
llargs[0],
|
||||
llargs[1],
|
||||
llargs[0],
|
||||
llargs[2],
|
||||
call_debug_location)
|
||||
}
|
||||
@ -408,8 +408,8 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
|
||||
true,
|
||||
false,
|
||||
*substs.types.get(FnSpace, 0),
|
||||
llargs[0],
|
||||
llargs[1],
|
||||
llargs[0],
|
||||
llargs[2],
|
||||
call_debug_location)
|
||||
}
|
||||
|
@ -5417,7 +5417,21 @@ pub fn check_intrinsic_type(ccx: &CrateCtxt, it: &ast::ForeignItem) {
|
||||
mutbl: ast::MutImmutable
|
||||
}))
|
||||
}
|
||||
"copy" | "copy_nonoverlapping" |
|
||||
"copy" | "copy_nonoverlapping" => {
|
||||
(1,
|
||||
vec!(
|
||||
ty::mk_ptr(tcx, ty::mt {
|
||||
ty: param(ccx, 0),
|
||||
mutbl: ast::MutImmutable
|
||||
}),
|
||||
ty::mk_ptr(tcx, ty::mt {
|
||||
ty: param(ccx, 0),
|
||||
mutbl: ast::MutMutable
|
||||
}),
|
||||
tcx.types.usize,
|
||||
),
|
||||
ty::mk_nil(tcx))
|
||||
}
|
||||
"volatile_copy_memory" | "volatile_copy_nonoverlapping_memory" => {
|
||||
(1,
|
||||
vec!(
|
||||
|
@ -480,8 +480,8 @@ impl<K, V, M: Deref<Target=RawTable<K, V>>> GapThenFull<K, V, M> {
|
||||
pub fn shift(mut self) -> Option<GapThenFull<K, V, M>> {
|
||||
unsafe {
|
||||
*self.gap.raw.hash = mem::replace(&mut *self.full.raw.hash, EMPTY_BUCKET);
|
||||
ptr::copy_nonoverlapping(self.gap.raw.key, self.full.raw.key, 1);
|
||||
ptr::copy_nonoverlapping(self.gap.raw.val, self.full.raw.val, 1);
|
||||
ptr::copy_nonoverlapping(self.full.raw.key, self.gap.raw.key, 1);
|
||||
ptr::copy_nonoverlapping(self.full.raw.val, self.gap.raw.val, 1);
|
||||
}
|
||||
|
||||
let FullBucket { raw: prev_raw, idx: prev_idx, .. } = self.full;
|
||||
|
@ -177,8 +177,8 @@ impl<W: Write> BufWriter<W> {
|
||||
if written > 0 {
|
||||
// NB: would be better expressed as .remove(0..n) if it existed
|
||||
unsafe {
|
||||
ptr::copy(self.buf.as_mut_ptr(),
|
||||
self.buf.as_ptr().offset(written as isize),
|
||||
ptr::copy(self.buf.as_ptr().offset(written as isize),
|
||||
self.buf.as_mut_ptr(),
|
||||
len - written);
|
||||
}
|
||||
}
|
||||
|
@ -151,7 +151,7 @@ impl Write for Cursor<Vec<u8>> {
|
||||
// there (left), and what will be appended on the end (right)
|
||||
let space = self.inner.len() - pos as usize;
|
||||
let (left, right) = buf.split_at(cmp::min(space, buf.len()));
|
||||
slice::bytes::copy_memory(&mut self.inner[(pos as usize)..], left);
|
||||
slice::bytes::copy_memory(left, &mut self.inner[(pos as usize)..]);
|
||||
self.inner.push_all(right);
|
||||
|
||||
// Bump us forward
|
||||
|
@ -149,7 +149,7 @@ impl<'a> Read for &'a [u8] {
|
||||
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
||||
let amt = cmp::min(buf.len(), self.len());
|
||||
let (a, b) = self.split_at(amt);
|
||||
slice::bytes::copy_memory(buf, a);
|
||||
slice::bytes::copy_memory(a, buf);
|
||||
*self = b;
|
||||
Ok(amt)
|
||||
}
|
||||
@ -170,7 +170,7 @@ impl<'a> Write for &'a mut [u8] {
|
||||
fn write(&mut self, data: &[u8]) -> io::Result<usize> {
|
||||
let amt = cmp::min(data.len(), self.len());
|
||||
let (a, b) = mem::replace(self, &mut []).split_at_mut(amt);
|
||||
slice::bytes::copy_memory(a, &data[..amt]);
|
||||
slice::bytes::copy_memory(&data[..amt], a);
|
||||
*self = b;
|
||||
Ok(amt)
|
||||
}
|
||||
|
@ -118,7 +118,7 @@ impl<R: Reader> Reader for BufferedReader<R> {
|
||||
let nread = {
|
||||
let available = try!(self.fill_buf());
|
||||
let nread = cmp::min(available.len(), buf.len());
|
||||
slice::bytes::copy_memory(buf, &available[..nread]);
|
||||
slice::bytes::copy_memory(&available[..nread], buf);
|
||||
nread
|
||||
};
|
||||
self.pos += nread;
|
||||
@ -225,7 +225,7 @@ impl<W: Writer> Writer for BufferedWriter<W> {
|
||||
self.inner.as_mut().unwrap().write_all(buf)
|
||||
} else {
|
||||
let dst = &mut self.buf[self.pos..];
|
||||
slice::bytes::copy_memory(dst, buf);
|
||||
slice::bytes::copy_memory(buf, dst);
|
||||
self.pos += buf.len();
|
||||
Ok(())
|
||||
}
|
||||
|
@ -91,7 +91,7 @@ impl Reader for ChanReader {
|
||||
Some(src) => {
|
||||
let dst = &mut buf[num_read..];
|
||||
let count = cmp::min(src.len(), dst.len());
|
||||
bytes::copy_memory(dst, &src[..count]);
|
||||
bytes::copy_memory(&src[..count], dst);
|
||||
count
|
||||
},
|
||||
None => 0,
|
||||
|
@ -171,7 +171,7 @@ pub fn u64_from_be_bytes(data: &[u8], start: usize, size: usize) -> u64 {
|
||||
unsafe {
|
||||
let ptr = data.as_ptr().offset(start as isize);
|
||||
let out = buf.as_mut_ptr();
|
||||
copy_nonoverlapping(out.offset((8 - size) as isize), ptr, size);
|
||||
copy_nonoverlapping(ptr, out.offset((8 - size) as isize), size);
|
||||
(*(out as *const u64)).to_be()
|
||||
}
|
||||
}
|
||||
|
@ -168,7 +168,7 @@ impl Reader for MemReader {
|
||||
let input = &self.buf[self.pos.. self.pos + write_len];
|
||||
let output = &mut buf[..write_len];
|
||||
assert_eq!(input.len(), output.len());
|
||||
slice::bytes::copy_memory(output, input);
|
||||
slice::bytes::copy_memory(input, output);
|
||||
}
|
||||
self.pos += write_len;
|
||||
assert!(self.pos <= self.buf.len());
|
||||
@ -212,7 +212,7 @@ impl<'a> Reader for &'a [u8] {
|
||||
{
|
||||
let input = &self[..write_len];
|
||||
let output = &mut buf[.. write_len];
|
||||
slice::bytes::copy_memory(output, input);
|
||||
slice::bytes::copy_memory(input, output);
|
||||
}
|
||||
|
||||
*self = &self[write_len..];
|
||||
@ -287,13 +287,13 @@ impl<'a> Writer for BufWriter<'a> {
|
||||
let src_len = src.len();
|
||||
|
||||
if dst_len >= src_len {
|
||||
slice::bytes::copy_memory(dst, src);
|
||||
slice::bytes::copy_memory(src, dst);
|
||||
|
||||
self.pos += src_len;
|
||||
|
||||
Ok(())
|
||||
} else {
|
||||
slice::bytes::copy_memory(dst, &src[..dst_len]);
|
||||
slice::bytes::copy_memory(&src[..dst_len], dst);
|
||||
|
||||
self.pos += dst_len;
|
||||
|
||||
@ -360,7 +360,7 @@ impl<'a> Reader for BufReader<'a> {
|
||||
let input = &self.buf[self.pos.. self.pos + write_len];
|
||||
let output = &mut buf[..write_len];
|
||||
assert_eq!(input.len(), output.len());
|
||||
slice::bytes::copy_memory(output, input);
|
||||
slice::bytes::copy_memory(input, output);
|
||||
}
|
||||
self.pos += write_len;
|
||||
assert!(self.pos <= self.buf.len());
|
||||
|
@ -344,8 +344,8 @@ impl Wtf8Buf {
|
||||
Some((surrogate_pos, _)) => {
|
||||
pos = surrogate_pos + 3;
|
||||
slice::bytes::copy_memory(
|
||||
UTF8_REPLACEMENT_CHARACTER,
|
||||
&mut self.bytes[surrogate_pos .. pos],
|
||||
UTF8_REPLACEMENT_CHARACTER
|
||||
);
|
||||
},
|
||||
None => return unsafe { String::from_utf8_unchecked(self.bytes) }
|
||||
|
@ -126,10 +126,9 @@ impl<'a, W: Writer> RepeatFasta<'a, W> {
|
||||
let mut buf = repeat(0).take(alu_len + LINE_LEN).collect::<Vec<_>>();
|
||||
let alu: &[u8] = self.alu.as_bytes();
|
||||
|
||||
copy_memory(&mut buf, alu);
|
||||
copy_memory(alu, &mut buf);
|
||||
let buf_len = buf.len();
|
||||
copy_memory(&mut buf[alu_len..buf_len],
|
||||
&alu[..LINE_LEN]);
|
||||
copy_memory(&alu[..LINE_LEN], &mut buf[alu_len..buf_len]);
|
||||
|
||||
let mut pos = 0;
|
||||
let mut bytes;
|
||||
|
@ -181,8 +181,8 @@ fn reverse_complement(seq: &mut [u8], tables: &Tables) {
|
||||
let mut i = LINE_LEN;
|
||||
while i < len {
|
||||
unsafe {
|
||||
copy(seq.as_mut_ptr().offset((i - off + 1) as isize),
|
||||
seq.as_ptr().offset((i - off) as isize), off);
|
||||
copy(seq.as_ptr().offset((i - off) as isize),
|
||||
seq.as_mut_ptr().offset((i - off + 1) as isize), off);
|
||||
*seq.get_unchecked_mut(i - off) = b'\n';
|
||||
}
|
||||
i += LINE_LEN + 1;
|
||||
|
@ -26,7 +26,7 @@ trait MyWriter {
|
||||
|
||||
impl<'a> MyWriter for &'a mut [u8] {
|
||||
fn my_write(&mut self, buf: &[u8]) -> IoResult<()> {
|
||||
slice::bytes::copy_memory(*self, buf);
|
||||
slice::bytes::copy_memory(buf, *self);
|
||||
|
||||
let write_len = buf.len();
|
||||
unsafe {
|
||||
|
Loading…
Reference in New Issue
Block a user