From f6f96e2a8757717ca8d701d26d37b067e95bb584 Mon Sep 17 00:00:00 2001 From: Tyson Nottingham Date: Fri, 2 Oct 2020 19:34:01 -0700 Subject: [PATCH 1/4] perf: buffer SipHasher128 --- compiler/rustc_data_structures/src/lib.rs | 1 + compiler/rustc_data_structures/src/sip128.rs | 491 +++++++++++------- .../rustc_data_structures/src/sip128/tests.rs | 45 ++ 3 files changed, 345 insertions(+), 192 deletions(-) diff --git a/compiler/rustc_data_structures/src/lib.rs b/compiler/rustc_data_structures/src/lib.rs index 90b0f254751..20c68227bd9 100644 --- a/compiler/rustc_data_structures/src/lib.rs +++ b/compiler/rustc_data_structures/src/lib.rs @@ -28,6 +28,7 @@ #![feature(const_panic)] #![feature(min_const_generics)] #![feature(once_cell)] +#![feature(maybe_uninit_uninit_array)] #![allow(rustc::default_hash_types)] #[macro_use] diff --git a/compiler/rustc_data_structures/src/sip128.rs b/compiler/rustc_data_structures/src/sip128.rs index 2c4eff618c6..4acb0e69e99 100644 --- a/compiler/rustc_data_structures/src/sip128.rs +++ b/compiler/rustc_data_structures/src/sip128.rs @@ -1,21 +1,24 @@ //! This is a copy of `core::hash::sip` adapted to providing 128 bit hashes. -use std::cmp; use std::hash::Hasher; -use std::mem; +use std::mem::{self, MaybeUninit}; use std::ptr; #[cfg(test)] mod tests; +const BUFFER_SIZE_ELEMS: usize = 8; +const BUFFER_SIZE_BYTES: usize = BUFFER_SIZE_ELEMS * mem::size_of::(); +const BUFFER_SIZE_ELEMS_SPILL: usize = BUFFER_SIZE_ELEMS + 1; +const BUFFER_SIZE_BYTES_SPILL: usize = BUFFER_SIZE_ELEMS_SPILL * mem::size_of::(); +const BUFFER_SPILL_INDEX: usize = BUFFER_SIZE_ELEMS_SPILL - 1; + #[derive(Debug, Clone)] pub struct SipHasher128 { - k0: u64, - k1: u64, - length: usize, // how many bytes we've processed - state: State, // hash State - tail: u64, // unprocessed bytes le - ntail: usize, // how many bytes in tail are valid + nbuf: usize, // how many bytes in buf are valid + buf: [MaybeUninit; BUFFER_SIZE_ELEMS_SPILL], // unprocessed bytes le + state: State, // hash State + processed: usize, // how many bytes we've processed } #[derive(Debug, Clone, Copy)] @@ -51,178 +54,317 @@ macro_rules! compress { }}; } -/// Loads an integer of the desired type from a byte stream, in LE order. Uses -/// `copy_nonoverlapping` to let the compiler generate the most efficient way -/// to load it from a possibly unaligned address. -/// -/// Unsafe because: unchecked indexing at i..i+size_of(int_ty) -macro_rules! load_int_le { - ($buf:expr, $i:expr, $int_ty:ident) => {{ - debug_assert!($i + mem::size_of::<$int_ty>() <= $buf.len()); - let mut data = 0 as $int_ty; - ptr::copy_nonoverlapping( - $buf.get_unchecked($i), - &mut data as *mut _ as *mut u8, - mem::size_of::<$int_ty>(), - ); - data.to_le() - }}; -} - -/// Loads a u64 using up to 7 bytes of a byte slice. It looks clumsy but the -/// `copy_nonoverlapping` calls that occur (via `load_int_le!`) all have fixed -/// sizes and avoid calling `memcpy`, which is good for speed. -/// -/// Unsafe because: unchecked indexing at start..start+len +// Copies up to 8 bytes from source to destination. This may be faster than +// calling `ptr::copy_nonoverlapping` with an arbitrary count, since all of +// the copies have fixed sizes and thus avoid calling memcpy. #[inline] -unsafe fn u8to64_le(buf: &[u8], start: usize, len: usize) -> u64 { - debug_assert!(len < 8); - let mut i = 0; // current byte index (from LSB) in the output u64 - let mut out = 0; - if i + 3 < len { - out = load_int_le!(buf, start + i, u32) as u64; +unsafe fn copy_nonoverlapping_small(src: *const u8, dst: *mut u8, count: usize) { + debug_assert!(count <= 8); + + if count == 8 { + ptr::copy_nonoverlapping(src, dst, 8); + return; + } + + let mut i = 0; + if i + 3 < count { + ptr::copy_nonoverlapping(src.add(i), dst.add(i), 4); i += 4; } - if i + 1 < len { - out |= (load_int_le!(buf, start + i, u16) as u64) << (i * 8); + + if i + 1 < count { + ptr::copy_nonoverlapping(src.add(i), dst.add(i), 2); i += 2 } - if i < len { - out |= (*buf.get_unchecked(start + i) as u64) << (i * 8); + + if i < count { + *dst.add(i) = *src.add(i); i += 1; } - debug_assert_eq!(i, len); - out + + debug_assert_eq!(i, count); } +// Implementation +// +// This implementation uses buffering to reduce the hashing cost for inputs +// consisting of many small integers. Buffering simplifies the integration of +// integer input--the integer write function typically just appends to the +// buffer with a statically sized write, updates metadata, and returns. +// +// Buffering also prevents alternating between writes that do and do not trigger +// the hashing process. Only when the entire buffer is full do we transition +// into hashing. This allows us to keep the hash state in registers for longer, +// instead of loading and storing it before and after processing each element. +// +// When a write fills the buffer, a buffer processing function is invoked to +// hash all of the buffered input. The buffer processing functions are marked +// #[inline(never)] so that they aren't inlined into the append functions, which +// ensures the more frequently called append functions remain inlineable and +// don't include register pushing/popping that would only be made necessary by +// inclusion of the complex buffer processing path which uses those registers. +// +// The buffer includes a "spill"--an extra element at the end--which simplifies +// the integer write buffer processing path. The value that fills the buffer can +// be written with a statically sized write that may spill over into the spill. +// After the buffer is processed, the part of the value that spilled over can +// written from the spill to the beginning of the buffer with another statically +// sized write. Due to static sizes, this scheme performs better than copying +// the exact number of bytes needed into the end and beginning of the buffer. +// +// The buffer is uninitialized, which improves performance, but may preclude +// efficient implementation of alternative approaches. The improvement is not so +// large that an alternative approach should be disregarded because it cannot be +// efficiently implemented with an uninitialized buffer. On the other hand, an +// uninitialized buffer may become more important should a larger one be used. +// +// Platform Dependence +// +// The SipHash algorithm operates on byte sequences. It parses the input stream +// as 8-byte little-endian integers. Therefore, given the same byte sequence, it +// produces the same result on big- and little-endian hardware. +// +// However, the Hasher trait has methods which operate on multi-byte integers. +// How they are converted into byte sequences can be endian-dependent (by using +// native byte order) or independent (by consistently using either LE or BE byte +// order). It can also be `isize` and `usize` size dependent (by using the +// native size), or independent (by converting to a common size), supposing the +// values can be represented in 32 bits. +// +// In order to make SipHasher128 consistent with SipHasher in libstd, we choose +// to do the integer to byte sequence conversion in the platform-dependent way. +// Clients can achieve (nearly) platform-independent hashing by widening `isize` +// and `usize` integers to 64 bits on 32-bit systems and byte-swapping integers +// on big-endian systems before passing them to the writing functions. This +// causes the input byte sequence to look identical on big- and little- endian +// systems (supposing `isize` and `usize` values can be represented in 32 bits), +// which ensures platform-independent results. impl SipHasher128 { #[inline] pub fn new_with_keys(key0: u64, key1: u64) -> SipHasher128 { - let mut state = SipHasher128 { - k0: key0, - k1: key1, - length: 0, - state: State { v0: 0, v1: 0, v2: 0, v3: 0 }, - tail: 0, - ntail: 0, + let mut hasher = SipHasher128 { + nbuf: 0, + buf: MaybeUninit::uninit_array(), + state: State { + v0: key0 ^ 0x736f6d6570736575, + // The XOR with 0xee is only done on 128-bit algorithm version. + v1: key1 ^ (0x646f72616e646f6d ^ 0xee), + v2: key0 ^ 0x6c7967656e657261, + v3: key1 ^ 0x7465646279746573, + }, + processed: 0, }; - state.reset(); - state - } - #[inline] - fn reset(&mut self) { - self.length = 0; - self.state.v0 = self.k0 ^ 0x736f6d6570736575; - self.state.v1 = self.k1 ^ 0x646f72616e646f6d; - self.state.v2 = self.k0 ^ 0x6c7967656e657261; - self.state.v3 = self.k1 ^ 0x7465646279746573; - self.ntail = 0; + unsafe { + // Initialize spill because we read from it in short_write_process_buffer. + *hasher.buf.get_unchecked_mut(BUFFER_SPILL_INDEX) = MaybeUninit::zeroed(); + } - // This is only done in the 128 bit version: - self.state.v1 ^= 0xee; + hasher } // A specialized write function for values with size <= 8. - // - // The input must be zero-extended to 64-bits by the caller. This extension - // isn't hashed, but the implementation requires it for correctness. - // - // This function, given the same integer size and value, has the same effect - // on both little- and big-endian hardware. It operates on values without - // depending on their sequence in memory, so is independent of endianness. - // - // However, we want SipHasher128 to be platform-dependent, in order to be - // consistent with the platform-dependent SipHasher in libstd. In other - // words, we want: - // - // - little-endian: `write_u32(0xDDCCBBAA)` == `write([0xAA, 0xBB, 0xCC, 0xDD])` - // - big-endian: `write_u32(0xDDCCBBAA)` == `write([0xDD, 0xCC, 0xBB, 0xAA])` - // - // Therefore, in order to produce endian-dependent results, SipHasher128's - // `write_xxx` Hasher trait methods byte-swap `x` prior to zero-extending. - // - // If clients of SipHasher128 itself want platform-independent results, they - // *also* must byte-swap integer inputs before invoking the `write_xxx` - // methods on big-endian hardware (that is, two byte-swaps must occur--one - // in the client, and one in SipHasher128). Additionally, they must extend - // `usize` and `isize` types to 64 bits on 32-bit systems. #[inline] - fn short_write(&mut self, _x: T, x: u64) { + fn short_write(&mut self, x: T) { let size = mem::size_of::(); - self.length += size; + let nbuf = self.nbuf; + debug_assert!(size <= 8); + debug_assert!(nbuf < BUFFER_SIZE_BYTES); + debug_assert!(nbuf + size < BUFFER_SIZE_BYTES_SPILL); - // The original number must be zero-extended, not sign-extended. - debug_assert!(if size < 8 { x >> (8 * size) == 0 } else { true }); + if nbuf + size < BUFFER_SIZE_BYTES { + unsafe { + // The memcpy call is optimized away because the size is known. + let dst = (self.buf.as_mut_ptr() as *mut u8).add(nbuf); + ptr::copy_nonoverlapping(&x as *const _ as *const u8, dst, size); + } - // The number of bytes needed to fill `self.tail`. - let needed = 8 - self.ntail; + self.nbuf = nbuf + size; - // SipHash parses the input stream as 8-byte little-endian integers. - // Inputs are put into `self.tail` until 8 bytes of data have been - // collected, and then that word is processed. - // - // For example, imagine that `self.tail` is 0x0000_00EE_DDCC_BBAA, - // `self.ntail` is 5 (because 5 bytes have been put into `self.tail`), - // and `needed` is therefore 3. - // - // - Scenario 1, `self.write_u8(0xFF)`: we have already zero-extended - // the input to 0x0000_0000_0000_00FF. We now left-shift it five - // bytes, giving 0x0000_FF00_0000_0000. We then bitwise-OR that value - // into `self.tail`, resulting in 0x0000_FFEE_DDCC_BBAA. - // (Zero-extension of the original input is critical in this scenario - // because we don't want the high two bytes of `self.tail` to be - // touched by the bitwise-OR.) `self.tail` is not yet full, so we - // return early, after updating `self.ntail` to 6. - // - // - Scenario 2, `self.write_u32(0xIIHH_GGFF)`: we have already - // zero-extended the input to 0x0000_0000_IIHH_GGFF. We now - // left-shift it five bytes, giving 0xHHGG_FF00_0000_0000. We then - // bitwise-OR that value into `self.tail`, resulting in - // 0xHHGG_FFEE_DDCC_BBAA. `self.tail` is now full, and we can use it - // to update `self.state`. (As mentioned above, this assumes a - // little-endian machine; on a big-endian machine we would have - // byte-swapped 0xIIHH_GGFF in the caller, giving 0xFFGG_HHII, and we - // would then end up bitwise-ORing 0xGGHH_II00_0000_0000 into - // `self.tail`). - // - self.tail |= x << (8 * self.ntail); - if size < needed { - self.ntail += size; return; } - // `self.tail` is full, process it. - self.state.v3 ^= self.tail; - Sip24Rounds::c_rounds(&mut self.state); - self.state.v0 ^= self.tail; + unsafe { self.short_write_process_buffer(x) } + } - // Continuing scenario 2: we have one byte left over from the input. We - // set `self.ntail` to 1 and `self.tail` to `0x0000_0000_IIHH_GGFF >> - // 8*3`, which is 0x0000_0000_0000_00II. (Or on a big-endian machine - // the prior byte-swapping would leave us with 0x0000_0000_0000_00FF.) - // - // The `if` is needed to avoid shifting by 64 bits, which Rust - // complains about. - self.ntail = size - needed; - self.tail = if needed < 8 { x >> (8 * needed) } else { 0 }; + // A specialized write function for values with size <= 8 that should only + // be called when the write would cause the buffer to fill. + // + // SAFETY: the write of x into self.buf starting at byte offset self.nbuf + // must cause self.buf to become fully initialized (and not overflow) if it + // wasn't already. + #[inline(never)] + unsafe fn short_write_process_buffer(&mut self, x: T) { + let size = mem::size_of::(); + let nbuf = self.nbuf; + debug_assert!(size <= 8); + debug_assert!(nbuf < BUFFER_SIZE_BYTES); + debug_assert!(nbuf + size >= BUFFER_SIZE_BYTES); + debug_assert!(nbuf + size < BUFFER_SIZE_BYTES_SPILL); + + // Copy first part of input into end of buffer, possibly into spill + // element. The memcpy call is optimized away because the size is known. + let dst = (self.buf.as_mut_ptr() as *mut u8).add(nbuf); + ptr::copy_nonoverlapping(&x as *const _ as *const u8, dst, size); + + // Process buffer. + for i in 0..BUFFER_SIZE_ELEMS { + let elem = self.buf.get_unchecked(i).assume_init().to_le(); + self.state.v3 ^= elem; + Sip24Rounds::c_rounds(&mut self.state); + self.state.v0 ^= elem; + } + + // Copy remaining input into start of buffer by copying size - 1 + // elements from spill (at most size - 1 bytes could have overflowed + // into the spill). The memcpy call is optimized away because the size + // is known. And the whole copy is optimized away for size == 1. + let src = self.buf.get_unchecked(BUFFER_SPILL_INDEX) as *const _ as *const u8; + ptr::copy_nonoverlapping(src, self.buf.as_mut_ptr() as *mut u8, size - 1); + + // This function should only be called when the write fills the buffer. + // Therefore, when size == 1, the new self.nbuf must be zero. The size + // is statically known, so the branch is optimized away. + self.nbuf = if size == 1 { 0 } else { nbuf + size - BUFFER_SIZE_BYTES }; + self.processed += BUFFER_SIZE_BYTES; + } + + // A write function for byte slices. + #[inline] + fn slice_write(&mut self, msg: &[u8]) { + let length = msg.len(); + let nbuf = self.nbuf; + debug_assert!(nbuf < BUFFER_SIZE_BYTES); + + if nbuf + length < BUFFER_SIZE_BYTES { + unsafe { + let dst = (self.buf.as_mut_ptr() as *mut u8).add(nbuf); + + if length < 8 { + copy_nonoverlapping_small(msg.as_ptr(), dst, length); + } else { + // This memcpy is *not* optimized away. + ptr::copy_nonoverlapping(msg.as_ptr(), dst, length); + } + } + + self.nbuf = nbuf + length; + + return; + } + + unsafe { self.slice_write_process_buffer(msg) } + } + + // A write function for byte slices that should only be called when the + // write would cause the buffer to fill. + // + // SAFETY: self.buf must be initialized up to the byte offset self.nbuf, and + // msg must contain enough bytes to initialize the rest of the element + // containing the byte offset self.nbuf. + #[inline(never)] + unsafe fn slice_write_process_buffer(&mut self, msg: &[u8]) { + let length = msg.len(); + let nbuf = self.nbuf; + debug_assert!(nbuf < BUFFER_SIZE_BYTES); + debug_assert!(nbuf + length >= BUFFER_SIZE_BYTES); + + // Always copy first part of input into current element of buffer. + // This function should only be called when the write fills the buffer, + // so we know that there is enough input to fill the current element. + let valid_in_elem = nbuf & 0x7; + let needed_in_elem = 8 - valid_in_elem; + + let src = msg.as_ptr(); + let dst = (self.buf.as_mut_ptr() as *mut u8).add(nbuf); + copy_nonoverlapping_small(src, dst, needed_in_elem); + + // Process buffer. + + // Using nbuf / 8 + 1 rather than (nbuf + needed_in_elem) / 8 to show + // the compiler that this loop's upper bound is > 0. We know that is + // true, because last step ensured we have a full element in the buffer. + let last = nbuf / 8 + 1; + + for i in 0..last { + let elem = self.buf.get_unchecked(i).assume_init().to_le(); + self.state.v3 ^= elem; + Sip24Rounds::c_rounds(&mut self.state); + self.state.v0 ^= elem; + } + + // Process the remaining u64-sized chunks of input. + let mut processed = needed_in_elem; + let input_left = length - processed; + let u64s_left = input_left / 8; + let u8s_left = input_left & 0x7; + + for _ in 0..u64s_left { + let elem = (msg.as_ptr().add(processed) as *const u64).read_unaligned().to_le(); + self.state.v3 ^= elem; + Sip24Rounds::c_rounds(&mut self.state); + self.state.v0 ^= elem; + processed += 8; + } + + // Copy remaining input into start of buffer. + let src = msg.as_ptr().add(processed); + let dst = self.buf.as_mut_ptr() as *mut u8; + copy_nonoverlapping_small(src, dst, u8s_left); + + self.nbuf = u8s_left; + self.processed += nbuf + processed; } #[inline] pub fn finish128(mut self) -> (u64, u64) { - let b: u64 = ((self.length as u64 & 0xff) << 56) | self.tail; + debug_assert!(self.nbuf < BUFFER_SIZE_BYTES); - self.state.v3 ^= b; - Sip24Rounds::c_rounds(&mut self.state); - self.state.v0 ^= b; + // Process full elements in buffer. + let last = self.nbuf / 8; - self.state.v2 ^= 0xee; - Sip24Rounds::d_rounds(&mut self.state); - let _0 = self.state.v0 ^ self.state.v1 ^ self.state.v2 ^ self.state.v3; + // Since we're consuming self, avoid updating members for a potential + // performance gain. + let mut state = self.state; + + for i in 0..last { + let elem = unsafe { self.buf.get_unchecked(i).assume_init().to_le() }; + state.v3 ^= elem; + Sip24Rounds::c_rounds(&mut state); + state.v0 ^= elem; + } + + // Get remaining partial element. + let elem = if self.nbuf % 8 != 0 { + unsafe { + // Ensure element is initialized by writing zero bytes. At most + // seven are required given the above check. It's safe to write + // this many because we have the spill element and we maintain + // self.nbuf such that this write will start before the spill. + let dst = (self.buf.as_mut_ptr() as *mut u8).add(self.nbuf); + ptr::write_bytes(dst, 0, 7); + self.buf.get_unchecked(last).assume_init().to_le() + } + } else { + 0 + }; + + // Finalize the hash. + let length = self.processed + self.nbuf; + let b: u64 = ((length as u64 & 0xff) << 56) | elem; + + state.v3 ^= b; + Sip24Rounds::c_rounds(&mut state); + state.v0 ^= b; + + state.v2 ^= 0xee; + Sip24Rounds::d_rounds(&mut state); + let _0 = state.v0 ^ state.v1 ^ state.v2 ^ state.v3; + + state.v1 ^= 0xdd; + Sip24Rounds::d_rounds(&mut state); + let _1 = state.v0 ^ state.v1 ^ state.v2 ^ state.v3; - self.state.v1 ^= 0xdd; - Sip24Rounds::d_rounds(&mut self.state); - let _1 = self.state.v0 ^ self.state.v1 ^ self.state.v2 ^ self.state.v3; (_0, _1) } } @@ -230,92 +372,57 @@ impl SipHasher128 { impl Hasher for SipHasher128 { #[inline] fn write_u8(&mut self, i: u8) { - self.short_write(i, i as u64); + self.short_write(i); } #[inline] fn write_u16(&mut self, i: u16) { - self.short_write(i, i.to_le() as u64); + self.short_write(i); } #[inline] fn write_u32(&mut self, i: u32) { - self.short_write(i, i.to_le() as u64); + self.short_write(i); } #[inline] fn write_u64(&mut self, i: u64) { - self.short_write(i, i.to_le() as u64); + self.short_write(i); } #[inline] fn write_usize(&mut self, i: usize) { - self.short_write(i, i.to_le() as u64); + self.short_write(i); } #[inline] fn write_i8(&mut self, i: i8) { - self.short_write(i, i as u8 as u64); + self.short_write(i as u8); } #[inline] fn write_i16(&mut self, i: i16) { - self.short_write(i, (i as u16).to_le() as u64); + self.short_write(i as u16); } #[inline] fn write_i32(&mut self, i: i32) { - self.short_write(i, (i as u32).to_le() as u64); + self.short_write(i as u32); } #[inline] fn write_i64(&mut self, i: i64) { - self.short_write(i, (i as u64).to_le() as u64); + self.short_write(i as u64); } #[inline] fn write_isize(&mut self, i: isize) { - self.short_write(i, (i as usize).to_le() as u64); + self.short_write(i as usize); } #[inline] fn write(&mut self, msg: &[u8]) { - let length = msg.len(); - self.length += length; - - let mut needed = 0; - - if self.ntail != 0 { - needed = 8 - self.ntail; - self.tail |= unsafe { u8to64_le(msg, 0, cmp::min(length, needed)) } << (8 * self.ntail); - if length < needed { - self.ntail += length; - return; - } else { - self.state.v3 ^= self.tail; - Sip24Rounds::c_rounds(&mut self.state); - self.state.v0 ^= self.tail; - self.ntail = 0; - } - } - - // Buffered tail is now flushed, process new input. - let len = length - needed; - let left = len & 0x7; - - let mut i = needed; - while i < len - left { - let mi = unsafe { load_int_le!(msg, i, u64) }; - - self.state.v3 ^= mi; - Sip24Rounds::c_rounds(&mut self.state); - self.state.v0 ^= mi; - - i += 8; - } - - self.tail = unsafe { u8to64_le(msg, i, left) }; - self.ntail = left; + self.slice_write(msg); } fn finish(&self) -> u64 { diff --git a/compiler/rustc_data_structures/src/sip128/tests.rs b/compiler/rustc_data_structures/src/sip128/tests.rs index 2e2274a7b77..eda7ddc4f6d 100644 --- a/compiler/rustc_data_structures/src/sip128/tests.rs +++ b/compiler/rustc_data_structures/src/sip128/tests.rs @@ -450,3 +450,48 @@ fn test_short_write_works() { assert_eq!(h1_hash, h2_hash); } + +macro_rules! test_fill_buffer { + ($type:ty, $write_method:ident) => {{ + // Test filling and overfilling the buffer from all possible offsets + // for a given integer type and its corresponding write method. + const SIZE: usize = std::mem::size_of::<$type>(); + let input = [42; BUFFER_SIZE_BYTES]; + let x = 0x01234567_89ABCDEF_76543210_FEDCBA98_u128 as $type; + let x_bytes = &x.to_ne_bytes(); + + for i in 1..=SIZE { + let s = &input[..BUFFER_SIZE_BYTES - i]; + + let mut h1 = SipHasher128::new_with_keys(7, 13); + h1.write(s); + h1.$write_method(x); + + let mut h2 = SipHasher128::new_with_keys(7, 13); + h2.write(s); + h2.write(x_bytes); + + let h1_hash = h1.finish128(); + let h2_hash = h2.finish128(); + + assert_eq!(h1_hash, h2_hash); + } + }}; +} + +#[test] +fn test_fill_buffer() { + test_fill_buffer!(u8, write_u8); + test_fill_buffer!(u16, write_u16); + test_fill_buffer!(u32, write_u32); + test_fill_buffer!(u64, write_u64); + test_fill_buffer!(u128, write_u128); + test_fill_buffer!(usize, write_usize); + + test_fill_buffer!(i8, write_i8); + test_fill_buffer!(i16, write_i16); + test_fill_buffer!(i32, write_i32); + test_fill_buffer!(i64, write_i64); + test_fill_buffer!(i128, write_i128); + test_fill_buffer!(isize, write_isize); +} From b86161ad9c2ba27d8b2c899a7adead7d4ebd54bd Mon Sep 17 00:00:00 2001 From: Tyson Nottingham Date: Sun, 4 Oct 2020 19:39:17 -0700 Subject: [PATCH 2/4] SipHasher128: use more named constants, update comments --- compiler/rustc_data_structures/src/sip128.rs | 104 ++++++++++--------- 1 file changed, 54 insertions(+), 50 deletions(-) diff --git a/compiler/rustc_data_structures/src/sip128.rs b/compiler/rustc_data_structures/src/sip128.rs index 4acb0e69e99..8b91407acff 100644 --- a/compiler/rustc_data_structures/src/sip128.rs +++ b/compiler/rustc_data_structures/src/sip128.rs @@ -7,10 +7,11 @@ use std::ptr; #[cfg(test)] mod tests; +const ELEM_SIZE: usize = mem::size_of::(); const BUFFER_SIZE_ELEMS: usize = 8; -const BUFFER_SIZE_BYTES: usize = BUFFER_SIZE_ELEMS * mem::size_of::(); +const BUFFER_SIZE_BYTES: usize = BUFFER_SIZE_ELEMS * ELEM_SIZE; const BUFFER_SIZE_ELEMS_SPILL: usize = BUFFER_SIZE_ELEMS + 1; -const BUFFER_SIZE_BYTES_SPILL: usize = BUFFER_SIZE_ELEMS_SPILL * mem::size_of::(); +const BUFFER_SIZE_BYTES_SPILL: usize = BUFFER_SIZE_ELEMS_SPILL * ELEM_SIZE; const BUFFER_SPILL_INDEX: usize = BUFFER_SIZE_ELEMS_SPILL - 1; #[derive(Debug, Clone)] @@ -54,15 +55,16 @@ macro_rules! compress { }}; } -// Copies up to 8 bytes from source to destination. This may be faster than -// calling `ptr::copy_nonoverlapping` with an arbitrary count, since all of -// the copies have fixed sizes and thus avoid calling memcpy. +// Copies up to 8 bytes from source to destination. This performs better than +// `ptr::copy_nonoverlapping` on microbenchmarks and may perform better on real +// workloads since all of the copies have fixed sizes and avoid calling memcpy. #[inline] unsafe fn copy_nonoverlapping_small(src: *const u8, dst: *mut u8, count: usize) { - debug_assert!(count <= 8); + const COUNT_MAX: usize = 8; + debug_assert!(count <= COUNT_MAX); - if count == 8 { - ptr::copy_nonoverlapping(src, dst, 8); + if count == COUNT_MAX { + ptr::copy_nonoverlapping(src, dst, COUNT_MAX); return; } @@ -85,7 +87,7 @@ unsafe fn copy_nonoverlapping_small(src: *const u8, dst: *mut u8, count: usize) debug_assert_eq!(i, count); } -// Implementation +// # Implementation // // This implementation uses buffering to reduce the hashing cost for inputs // consisting of many small integers. Buffering simplifies the integration of @@ -99,10 +101,11 @@ unsafe fn copy_nonoverlapping_small(src: *const u8, dst: *mut u8, count: usize) // // When a write fills the buffer, a buffer processing function is invoked to // hash all of the buffered input. The buffer processing functions are marked -// #[inline(never)] so that they aren't inlined into the append functions, which -// ensures the more frequently called append functions remain inlineable and -// don't include register pushing/popping that would only be made necessary by -// inclusion of the complex buffer processing path which uses those registers. +// `#[inline(never)]` so that they aren't inlined into the append functions, +// which ensures the more frequently called append functions remain inlineable +// and don't include register pushing/popping that would only be made necessary +// by inclusion of the complex buffer processing path which uses those +// registers. // // The buffer includes a "spill"--an extra element at the end--which simplifies // the integer write buffer processing path. The value that fills the buffer can @@ -118,7 +121,7 @@ unsafe fn copy_nonoverlapping_small(src: *const u8, dst: *mut u8, count: usize) // efficiently implemented with an uninitialized buffer. On the other hand, an // uninitialized buffer may become more important should a larger one be used. // -// Platform Dependence +// # Platform Dependence // // The SipHash algorithm operates on byte sequences. It parses the input stream // as 8-byte little-endian integers. Therefore, given the same byte sequence, it @@ -131,14 +134,14 @@ unsafe fn copy_nonoverlapping_small(src: *const u8, dst: *mut u8, count: usize) // native size), or independent (by converting to a common size), supposing the // values can be represented in 32 bits. // -// In order to make SipHasher128 consistent with SipHasher in libstd, we choose -// to do the integer to byte sequence conversion in the platform-dependent way. -// Clients can achieve (nearly) platform-independent hashing by widening `isize` -// and `usize` integers to 64 bits on 32-bit systems and byte-swapping integers -// on big-endian systems before passing them to the writing functions. This -// causes the input byte sequence to look identical on big- and little- endian -// systems (supposing `isize` and `usize` values can be represented in 32 bits), -// which ensures platform-independent results. +// In order to make `SipHasher128` consistent with `SipHasher` in libstd, we +// choose to do the integer to byte sequence conversion in the platform- +// dependent way. Clients can achieve (nearly) platform-independent hashing by +// widening `isize` and `usize` integers to 64 bits on 32-bit systems and +// byte-swapping integers on big-endian systems before passing them to the +// writing functions. This causes the input byte sequence to look identical on +// big- and little- endian systems (supposing `isize` and `usize` values can be +// represented in 32 bits), which ensures platform-independent results. impl SipHasher128 { #[inline] pub fn new_with_keys(key0: u64, key1: u64) -> SipHasher128 { @@ -156,7 +159,7 @@ impl SipHasher128 { }; unsafe { - // Initialize spill because we read from it in short_write_process_buffer. + // Initialize spill because we read from it in `short_write_process_buffer`. *hasher.buf.get_unchecked_mut(BUFFER_SPILL_INDEX) = MaybeUninit::zeroed(); } @@ -190,9 +193,9 @@ impl SipHasher128 { // A specialized write function for values with size <= 8 that should only // be called when the write would cause the buffer to fill. // - // SAFETY: the write of x into self.buf starting at byte offset self.nbuf - // must cause self.buf to become fully initialized (and not overflow) if it - // wasn't already. + // SAFETY: the write of `x` into `self.buf` starting at byte offset + // `self.nbuf` must cause `self.buf` to become fully initialized (and not + // overflow) if it wasn't already. #[inline(never)] unsafe fn short_write_process_buffer(&mut self, x: T) { let size = mem::size_of::(); @@ -223,7 +226,7 @@ impl SipHasher128 { ptr::copy_nonoverlapping(src, self.buf.as_mut_ptr() as *mut u8, size - 1); // This function should only be called when the write fills the buffer. - // Therefore, when size == 1, the new self.nbuf must be zero. The size + // Therefore, when size == 1, the new `self.nbuf` must be zero. The size // is statically known, so the branch is optimized away. self.nbuf = if size == 1 { 0 } else { nbuf + size - BUFFER_SIZE_BYTES }; self.processed += BUFFER_SIZE_BYTES; @@ -240,7 +243,7 @@ impl SipHasher128 { unsafe { let dst = (self.buf.as_mut_ptr() as *mut u8).add(nbuf); - if length < 8 { + if length <= 8 { copy_nonoverlapping_small(msg.as_ptr(), dst, length); } else { // This memcpy is *not* optimized away. @@ -259,9 +262,9 @@ impl SipHasher128 { // A write function for byte slices that should only be called when the // write would cause the buffer to fill. // - // SAFETY: self.buf must be initialized up to the byte offset self.nbuf, and - // msg must contain enough bytes to initialize the rest of the element - // containing the byte offset self.nbuf. + // SAFETY: `self.buf` must be initialized up to the byte offset `self.nbuf`, + // and `msg` must contain enough bytes to initialize the rest of the element + // containing the byte offset `self.nbuf`. #[inline(never)] unsafe fn slice_write_process_buffer(&mut self, msg: &[u8]) { let length = msg.len(); @@ -272,8 +275,8 @@ impl SipHasher128 { // Always copy first part of input into current element of buffer. // This function should only be called when the write fills the buffer, // so we know that there is enough input to fill the current element. - let valid_in_elem = nbuf & 0x7; - let needed_in_elem = 8 - valid_in_elem; + let valid_in_elem = nbuf % ELEM_SIZE; + let needed_in_elem = ELEM_SIZE - valid_in_elem; let src = msg.as_ptr(); let dst = (self.buf.as_mut_ptr() as *mut u8).add(nbuf); @@ -281,10 +284,11 @@ impl SipHasher128 { // Process buffer. - // Using nbuf / 8 + 1 rather than (nbuf + needed_in_elem) / 8 to show - // the compiler that this loop's upper bound is > 0. We know that is - // true, because last step ensured we have a full element in the buffer. - let last = nbuf / 8 + 1; + // Using `nbuf / ELEM_SIZE + 1` rather than `(nbuf + needed_in_elem) / + // ELEM_SIZE` to show the compiler that this loop's upper bound is > 0. + // We know that is true, because last step ensured we have a full + // element in the buffer. + let last = nbuf / ELEM_SIZE + 1; for i in 0..last { let elem = self.buf.get_unchecked(i).assume_init().to_le(); @@ -293,26 +297,26 @@ impl SipHasher128 { self.state.v0 ^= elem; } - // Process the remaining u64-sized chunks of input. + // Process the remaining element-sized chunks of input. let mut processed = needed_in_elem; let input_left = length - processed; - let u64s_left = input_left / 8; - let u8s_left = input_left & 0x7; + let elems_left = input_left / ELEM_SIZE; + let extra_bytes_left = input_left % ELEM_SIZE; - for _ in 0..u64s_left { + for _ in 0..elems_left { let elem = (msg.as_ptr().add(processed) as *const u64).read_unaligned().to_le(); self.state.v3 ^= elem; Sip24Rounds::c_rounds(&mut self.state); self.state.v0 ^= elem; - processed += 8; + processed += ELEM_SIZE; } // Copy remaining input into start of buffer. let src = msg.as_ptr().add(processed); let dst = self.buf.as_mut_ptr() as *mut u8; - copy_nonoverlapping_small(src, dst, u8s_left); + copy_nonoverlapping_small(src, dst, extra_bytes_left); - self.nbuf = u8s_left; + self.nbuf = extra_bytes_left; self.processed += nbuf + processed; } @@ -321,7 +325,7 @@ impl SipHasher128 { debug_assert!(self.nbuf < BUFFER_SIZE_BYTES); // Process full elements in buffer. - let last = self.nbuf / 8; + let last = self.nbuf / ELEM_SIZE; // Since we're consuming self, avoid updating members for a potential // performance gain. @@ -335,14 +339,14 @@ impl SipHasher128 { } // Get remaining partial element. - let elem = if self.nbuf % 8 != 0 { + let elem = if self.nbuf % ELEM_SIZE != 0 { unsafe { // Ensure element is initialized by writing zero bytes. At most - // seven are required given the above check. It's safe to write - // this many because we have the spill element and we maintain - // self.nbuf such that this write will start before the spill. + // `ELEM_SIZE - 1` are required given the above check. It's safe + // to write this many because we have the spill and we maintain + // `self.nbuf` such that this write will start before the spill. let dst = (self.buf.as_mut_ptr() as *mut u8).add(self.nbuf); - ptr::write_bytes(dst, 0, 7); + ptr::write_bytes(dst, 0, ELEM_SIZE - 1); self.buf.get_unchecked(last).assume_init().to_le() } } else { From 581cc4abf5dd2802914cf4fee832cda2ae7a89a0 Mon Sep 17 00:00:00 2001 From: Tyson Nottingham Date: Mon, 5 Oct 2020 00:47:44 -0700 Subject: [PATCH 3/4] SipHasher128: use specific struct layout --- compiler/rustc_data_structures/src/sip128.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/compiler/rustc_data_structures/src/sip128.rs b/compiler/rustc_data_structures/src/sip128.rs index 8b91407acff..5bbc53945ed 100644 --- a/compiler/rustc_data_structures/src/sip128.rs +++ b/compiler/rustc_data_structures/src/sip128.rs @@ -15,7 +15,13 @@ const BUFFER_SIZE_BYTES_SPILL: usize = BUFFER_SIZE_ELEMS_SPILL * ELEM_SIZE; const BUFFER_SPILL_INDEX: usize = BUFFER_SIZE_ELEMS_SPILL - 1; #[derive(Debug, Clone)] +#[repr(C)] pub struct SipHasher128 { + // The access pattern during hashing consists of accesses to `nbuf` and + // `buf` until the buffer is full, followed by accesses to `state` and + // `processed`, and then repetition of that pattern until hashing is done. + // This is the basis for the ordering of fields below. However, in practice + // the cache miss-rate for data access is extremely low regardless of order. nbuf: usize, // how many bytes in buf are valid buf: [MaybeUninit; BUFFER_SIZE_ELEMS_SPILL], // unprocessed bytes le state: State, // hash State From a602d155f06bb3fb7129c036f372e1cb4595ab01 Mon Sep 17 00:00:00 2001 From: Tyson Nottingham Date: Sun, 11 Oct 2020 23:16:01 -0700 Subject: [PATCH 4/4] SipHasher128: improve constant names and add more comments --- compiler/rustc_data_structures/src/sip128.rs | 102 +++++++++++------- .../rustc_data_structures/src/sip128/tests.rs | 4 +- 2 files changed, 68 insertions(+), 38 deletions(-) diff --git a/compiler/rustc_data_structures/src/sip128.rs b/compiler/rustc_data_structures/src/sip128.rs index 5bbc53945ed..53062b9c20d 100644 --- a/compiler/rustc_data_structures/src/sip128.rs +++ b/compiler/rustc_data_structures/src/sip128.rs @@ -7,12 +7,34 @@ use std::ptr; #[cfg(test)] mod tests; +// The SipHash algorithm operates on 8-byte chunks. const ELEM_SIZE: usize = mem::size_of::(); -const BUFFER_SIZE_ELEMS: usize = 8; -const BUFFER_SIZE_BYTES: usize = BUFFER_SIZE_ELEMS * ELEM_SIZE; -const BUFFER_SIZE_ELEMS_SPILL: usize = BUFFER_SIZE_ELEMS + 1; -const BUFFER_SIZE_BYTES_SPILL: usize = BUFFER_SIZE_ELEMS_SPILL * ELEM_SIZE; -const BUFFER_SPILL_INDEX: usize = BUFFER_SIZE_ELEMS_SPILL - 1; + +// Size of the buffer in number of elements, not including the spill. +// +// The selection of this size was guided by rustc-perf benchmark comparisons of +// different buffer sizes. It should be periodically reevaluated as the compiler +// implementation and input characteristics change. +// +// Using the same-sized buffer for everything we hash is a performance versus +// complexity tradeoff. The ideal buffer size, and whether buffering should even +// be used, depends on what is being hashed. It may be worth it to size the +// buffer appropriately (perhaps by making SipHasher128 generic over the buffer +// size) or disable buffering depending on what is being hashed. But at this +// time, we use the same buffer size for everything. +const BUFFER_CAPACITY: usize = 8; + +// Size of the buffer in bytes, not including the spill. +const BUFFER_SIZE: usize = BUFFER_CAPACITY * ELEM_SIZE; + +// Size of the buffer in number of elements, including the spill. +const BUFFER_WITH_SPILL_CAPACITY: usize = BUFFER_CAPACITY + 1; + +// Size of the buffer in bytes, including the spill. +const BUFFER_WITH_SPILL_SIZE: usize = BUFFER_WITH_SPILL_CAPACITY * ELEM_SIZE; + +// Index of the spill element in the buffer. +const BUFFER_SPILL_INDEX: usize = BUFFER_WITH_SPILL_CAPACITY - 1; #[derive(Debug, Clone)] #[repr(C)] @@ -22,10 +44,10 @@ pub struct SipHasher128 { // `processed`, and then repetition of that pattern until hashing is done. // This is the basis for the ordering of fields below. However, in practice // the cache miss-rate for data access is extremely low regardless of order. - nbuf: usize, // how many bytes in buf are valid - buf: [MaybeUninit; BUFFER_SIZE_ELEMS_SPILL], // unprocessed bytes le - state: State, // hash State - processed: usize, // how many bytes we've processed + nbuf: usize, // how many bytes in buf are valid + buf: [MaybeUninit; BUFFER_WITH_SPILL_CAPACITY], // unprocessed bytes le + state: State, // hash State + processed: usize, // how many bytes we've processed } #[derive(Debug, Clone, Copy)] @@ -64,13 +86,18 @@ macro_rules! compress { // Copies up to 8 bytes from source to destination. This performs better than // `ptr::copy_nonoverlapping` on microbenchmarks and may perform better on real // workloads since all of the copies have fixed sizes and avoid calling memcpy. +// +// This is specifically designed for copies of up to 8 bytes, because that's the +// maximum of number bytes needed to fill an 8-byte-sized element on which +// SipHash operates. Note that for variable-sized copies which are known to be +// less than 8 bytes, this function will perform more work than necessary unless +// the compiler is able to optimize the extra work away. #[inline] unsafe fn copy_nonoverlapping_small(src: *const u8, dst: *mut u8, count: usize) { - const COUNT_MAX: usize = 8; - debug_assert!(count <= COUNT_MAX); + debug_assert!(count <= 8); - if count == COUNT_MAX { - ptr::copy_nonoverlapping(src, dst, COUNT_MAX); + if count == 8 { + ptr::copy_nonoverlapping(src, dst, 8); return; } @@ -116,10 +143,13 @@ unsafe fn copy_nonoverlapping_small(src: *const u8, dst: *mut u8, count: usize) // The buffer includes a "spill"--an extra element at the end--which simplifies // the integer write buffer processing path. The value that fills the buffer can // be written with a statically sized write that may spill over into the spill. -// After the buffer is processed, the part of the value that spilled over can +// After the buffer is processed, the part of the value that spilled over can be // written from the spill to the beginning of the buffer with another statically -// sized write. Due to static sizes, this scheme performs better than copying -// the exact number of bytes needed into the end and beginning of the buffer. +// sized write. This write may copy more bytes than actually spilled over, but +// we maintain the metadata such that any extra copied bytes will be ignored by +// subsequent processing. Due to the static sizes, this scheme performs better +// than copying the exact number of bytes needed into the end and beginning of +// the buffer. // // The buffer is uninitialized, which improves performance, but may preclude // efficient implementation of alternative approaches. The improvement is not so @@ -142,12 +172,12 @@ unsafe fn copy_nonoverlapping_small(src: *const u8, dst: *mut u8, count: usize) // // In order to make `SipHasher128` consistent with `SipHasher` in libstd, we // choose to do the integer to byte sequence conversion in the platform- -// dependent way. Clients can achieve (nearly) platform-independent hashing by -// widening `isize` and `usize` integers to 64 bits on 32-bit systems and -// byte-swapping integers on big-endian systems before passing them to the -// writing functions. This causes the input byte sequence to look identical on -// big- and little- endian systems (supposing `isize` and `usize` values can be -// represented in 32 bits), which ensures platform-independent results. +// dependent way. Clients can achieve platform-independent hashing by widening +// `isize` and `usize` integers to 64 bits on 32-bit systems and byte-swapping +// integers on big-endian systems before passing them to the writing functions. +// This causes the input byte sequence to look identical on big- and little- +// endian systems (supposing `isize` and `usize` values can be represented in 32 +// bits), which ensures platform-independent results. impl SipHasher128 { #[inline] pub fn new_with_keys(key0: u64, key1: u64) -> SipHasher128 { @@ -178,10 +208,10 @@ impl SipHasher128 { let size = mem::size_of::(); let nbuf = self.nbuf; debug_assert!(size <= 8); - debug_assert!(nbuf < BUFFER_SIZE_BYTES); - debug_assert!(nbuf + size < BUFFER_SIZE_BYTES_SPILL); + debug_assert!(nbuf < BUFFER_SIZE); + debug_assert!(nbuf + size < BUFFER_WITH_SPILL_SIZE); - if nbuf + size < BUFFER_SIZE_BYTES { + if nbuf + size < BUFFER_SIZE { unsafe { // The memcpy call is optimized away because the size is known. let dst = (self.buf.as_mut_ptr() as *mut u8).add(nbuf); @@ -207,9 +237,9 @@ impl SipHasher128 { let size = mem::size_of::(); let nbuf = self.nbuf; debug_assert!(size <= 8); - debug_assert!(nbuf < BUFFER_SIZE_BYTES); - debug_assert!(nbuf + size >= BUFFER_SIZE_BYTES); - debug_assert!(nbuf + size < BUFFER_SIZE_BYTES_SPILL); + debug_assert!(nbuf < BUFFER_SIZE); + debug_assert!(nbuf + size >= BUFFER_SIZE); + debug_assert!(nbuf + size < BUFFER_WITH_SPILL_SIZE); // Copy first part of input into end of buffer, possibly into spill // element. The memcpy call is optimized away because the size is known. @@ -217,7 +247,7 @@ impl SipHasher128 { ptr::copy_nonoverlapping(&x as *const _ as *const u8, dst, size); // Process buffer. - for i in 0..BUFFER_SIZE_ELEMS { + for i in 0..BUFFER_CAPACITY { let elem = self.buf.get_unchecked(i).assume_init().to_le(); self.state.v3 ^= elem; Sip24Rounds::c_rounds(&mut self.state); @@ -234,8 +264,8 @@ impl SipHasher128 { // This function should only be called when the write fills the buffer. // Therefore, when size == 1, the new `self.nbuf` must be zero. The size // is statically known, so the branch is optimized away. - self.nbuf = if size == 1 { 0 } else { nbuf + size - BUFFER_SIZE_BYTES }; - self.processed += BUFFER_SIZE_BYTES; + self.nbuf = if size == 1 { 0 } else { nbuf + size - BUFFER_SIZE }; + self.processed += BUFFER_SIZE; } // A write function for byte slices. @@ -243,9 +273,9 @@ impl SipHasher128 { fn slice_write(&mut self, msg: &[u8]) { let length = msg.len(); let nbuf = self.nbuf; - debug_assert!(nbuf < BUFFER_SIZE_BYTES); + debug_assert!(nbuf < BUFFER_SIZE); - if nbuf + length < BUFFER_SIZE_BYTES { + if nbuf + length < BUFFER_SIZE { unsafe { let dst = (self.buf.as_mut_ptr() as *mut u8).add(nbuf); @@ -275,8 +305,8 @@ impl SipHasher128 { unsafe fn slice_write_process_buffer(&mut self, msg: &[u8]) { let length = msg.len(); let nbuf = self.nbuf; - debug_assert!(nbuf < BUFFER_SIZE_BYTES); - debug_assert!(nbuf + length >= BUFFER_SIZE_BYTES); + debug_assert!(nbuf < BUFFER_SIZE); + debug_assert!(nbuf + length >= BUFFER_SIZE); // Always copy first part of input into current element of buffer. // This function should only be called when the write fills the buffer, @@ -328,7 +358,7 @@ impl SipHasher128 { #[inline] pub fn finish128(mut self) -> (u64, u64) { - debug_assert!(self.nbuf < BUFFER_SIZE_BYTES); + debug_assert!(self.nbuf < BUFFER_SIZE); // Process full elements in buffer. let last = self.nbuf / ELEM_SIZE; diff --git a/compiler/rustc_data_structures/src/sip128/tests.rs b/compiler/rustc_data_structures/src/sip128/tests.rs index eda7ddc4f6d..5fe967c4158 100644 --- a/compiler/rustc_data_structures/src/sip128/tests.rs +++ b/compiler/rustc_data_structures/src/sip128/tests.rs @@ -456,12 +456,12 @@ macro_rules! test_fill_buffer { // Test filling and overfilling the buffer from all possible offsets // for a given integer type and its corresponding write method. const SIZE: usize = std::mem::size_of::<$type>(); - let input = [42; BUFFER_SIZE_BYTES]; + let input = [42; BUFFER_SIZE]; let x = 0x01234567_89ABCDEF_76543210_FEDCBA98_u128 as $type; let x_bytes = &x.to_ne_bytes(); for i in 1..=SIZE { - let s = &input[..BUFFER_SIZE_BYTES - i]; + let s = &input[..BUFFER_SIZE - i]; let mut h1 = SipHasher128::new_with_keys(7, 13); h1.write(s);