Move small-copy optimization into <&[u8] as Read>

Based on the discussion in https://github.com/rust-lang/rust/pull/37573,
it is likely better to keep this limited to std::io, instead of
modifying a function which users expect to be a memcpy.
This commit is contained in:
Ruud van Asseldonk 2016-11-12 15:58:58 +01:00
parent 341805288e
commit 3be2c3b309
2 changed files with 23 additions and 15 deletions

View File

@ -515,19 +515,9 @@ impl<T> SliceExt for [T] {
fn copy_from_slice(&mut self, src: &[T]) where T: Copy { fn copy_from_slice(&mut self, src: &[T]) where T: Copy {
assert!(self.len() == src.len(), assert!(self.len() == src.len(),
"destination and source slices have different lengths"); "destination and source slices have different lengths");
// First check if the amount of elements we want to copy is small: unsafe {
// `copy_nonoverlapping` will do a memcopy, which involves an indirect ptr::copy_nonoverlapping(
// function call when `memcpy` is in the dynamically-linked libc. For src.as_ptr(), self.as_mut_ptr(), self.len());
// small elements (such as a single byte or pointer), the overhead is
// significant. If the element is big then the assignment is a memcopy
// anyway.
if self.len() == 1 {
self[0] = src[0];
} else {
unsafe {
ptr::copy_nonoverlapping(
src.as_ptr(), self.as_mut_ptr(), self.len());
}
} }
} }

View File

@ -157,7 +157,16 @@ impl<'a> Read for &'a [u8] {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let amt = cmp::min(buf.len(), self.len()); let amt = cmp::min(buf.len(), self.len());
let (a, b) = self.split_at(amt); let (a, b) = self.split_at(amt);
buf[..amt].copy_from_slice(a);
// First check if the amount of bytes we want to read is small:
// `copy_from_slice` will generally expand to a call to `memcpy`, and
// for a single byte the overhead is significant.
if amt == 1 {
buf[0] = a[0];
} else {
buf[..amt].copy_from_slice(a);
}
*self = b; *self = b;
Ok(amt) Ok(amt)
} }
@ -169,7 +178,16 @@ impl<'a> Read for &'a [u8] {
"failed to fill whole buffer")); "failed to fill whole buffer"));
} }
let (a, b) = self.split_at(buf.len()); let (a, b) = self.split_at(buf.len());
buf.copy_from_slice(a);
// First check if the amount of bytes we want to read is small:
// `copy_from_slice` will generally expand to a call to `memcpy`, and
// for a single byte the overhead is significant.
if buf.len() == 1 {
buf[0] = a[0];
} else {
buf.copy_from_slice(a);
}
*self = b; *self = b;
Ok(()) Ok(())
} }