Make [u8]::reverse() 5x faster

Since LLVM doesn't vectorize the loop for us, do unaligned reads
of a larger type and use LLVM's bswap intrinsic to do the
reversing of the actual bytes.  cfg!-restricted to x86 and
x86_64, as I assume it wouldn't help on things like ARMv5.

Also makes [u16]::reverse() a more modest 1.5x faster by
loading/storing u32 and swapping the u16s with ROT16.

Thank you ptr::*_unaligned for making this easy :)
This commit is contained in:
Scott McMurray 2017-05-04 20:28:34 -07:00
parent 06fb4d2564
commit e8fad325fe
3 changed files with 69 additions and 0 deletions

View File

@ -290,3 +290,24 @@ sort!(sort_unstable, sort_unstable_large_random, gen_random, 10000);
sort!(sort_unstable, sort_unstable_large_big_random, gen_big_random, 10000);
sort!(sort_unstable, sort_unstable_large_strings, gen_strings, 10000);
sort_expensive!(sort_unstable_by, sort_unstable_large_random_expensive, gen_random, 10000);
macro_rules! reverse {
($name:ident, $ty:ident) => {
#[bench]
fn $name(b: &mut Bencher) {
// odd length and offset by 1 to be as unaligned as possible
let n = 0xFFFFF;
let mut v: Vec<_> =
(0..1+(n / mem::size_of::<$ty>() as u64))
.map(|x| x as $ty)
.collect();
b.iter(|| black_box(&mut v[1..]).reverse());
b.bytes = n;
}
}
}
reverse!(reverse_u8, u8);
reverse!(reverse_u16, u16);
reverse!(reverse_u32, u32);
reverse!(reverse_u64, u64);

View File

@ -379,6 +379,16 @@ fn test_reverse() {
let mut v3 = Vec::<i32>::new();
v3.reverse();
assert!(v3.is_empty());
// check the 1-byte-types path
let mut v = (-50..51i8).collect::<Vec<_>>();
v.reverse();
assert_eq!(v, (-50..51i8).rev().collect::<Vec<_>>());
// check the 2-byte-types path
let mut v = (-50..51i16).collect::<Vec<_>>();
v.reverse();
assert_eq!(v, (-50..51i16).rev().collect::<Vec<_>>());
}
#[test]

View File

@ -539,6 +539,44 @@ impl<T> SliceExt for [T] {
fn reverse(&mut self) {
let mut i: usize = 0;
let ln = self.len();
let fast_unaligned =
cfg!(any(target_arch = "x86", target_arch = "x86_64"));
if fast_unaligned && mem::size_of::<T>() == 1 {
// Single-byte read & write are comparatively slow. Instead,
// work in usize chunks and get bswap to do the hard work.
let chunk = mem::size_of::<usize>();
while i + chunk - 1 < ln / 2 {
unsafe {
let pa: *mut T = self.get_unchecked_mut(i);
let pb: *mut T = self.get_unchecked_mut(ln - i - chunk);
let va = ptr::read_unaligned(pa as *mut usize);
let vb = ptr::read_unaligned(pb as *mut usize);
ptr::write_unaligned(pa as *mut usize, vb.swap_bytes());
ptr::write_unaligned(pb as *mut usize, va.swap_bytes());
}
i += chunk;
}
}
if fast_unaligned && mem::size_of::<T>() == 2 {
// Not quite as good as the above, but still helpful.
// Same general idea, read bigger and do the swap in a register.
let chunk = mem::size_of::<u32>() / 2;
while i + chunk - 1 < ln / 2 {
unsafe {
let pa: *mut T = self.get_unchecked_mut(i);
let pb: *mut T = self.get_unchecked_mut(ln - i - chunk);
let va = ptr::read_unaligned(pa as *mut u32);
let vb = ptr::read_unaligned(pb as *mut u32);
ptr::write_unaligned(pa as *mut u32, vb.rotate_left(16));
ptr::write_unaligned(pb as *mut u32, va.rotate_left(16));
}
i += chunk;
}
}
while i < ln / 2 {
// Unsafe swap to avoid the bounds check in safe swap.
unsafe {