mirror of
https://github.com/rust-lang/rust.git
synced 2024-11-22 06:44:35 +00:00
Auto merge of #77476 - tgnottingham:buffered_siphasher128, r=nnethercote
perf: buffer SipHasher128 This is an attempt to improve Siphasher128 performance by buffering input. Although it reduces instruction count, I'm not confident the effect on wall times, or lack-thereof, is worth the change. --- Additional notes not reflected in source comments: * Implementation choices were guided by a combination of results from rustc-perf and micro-benchmarks, mostly the former. * ~~I tried a couple of different struct layouts that might be more cache friendly with no obvious effect.~~ Update: a particular struct layout was chosen, but it's not critical to performance. See comments in source and discussion below. * I suspect that buffering would be important to a SIMD-accelerated algorithm, but from what I've read and my own tests, SipHash does not seem very amenable to SIMD acceleration, at least by SSE.
This commit is contained in:
commit
5171cc76c2
@ -28,6 +28,7 @@
|
||||
#![feature(const_panic)]
|
||||
#![feature(min_const_generics)]
|
||||
#![feature(once_cell)]
|
||||
#![feature(maybe_uninit_uninit_array)]
|
||||
#![allow(rustc::default_hash_types)]
|
||||
|
||||
#[macro_use]
|
||||
|
@ -1,21 +1,53 @@
|
||||
//! This is a copy of `core::hash::sip` adapted to providing 128 bit hashes.
|
||||
|
||||
use std::cmp;
|
||||
use std::hash::Hasher;
|
||||
use std::mem;
|
||||
use std::mem::{self, MaybeUninit};
|
||||
use std::ptr;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
// The SipHash algorithm operates on 8-byte chunks.
|
||||
const ELEM_SIZE: usize = mem::size_of::<u64>();
|
||||
|
||||
// Size of the buffer in number of elements, not including the spill.
|
||||
//
|
||||
// The selection of this size was guided by rustc-perf benchmark comparisons of
|
||||
// different buffer sizes. It should be periodically reevaluated as the compiler
|
||||
// implementation and input characteristics change.
|
||||
//
|
||||
// Using the same-sized buffer for everything we hash is a performance versus
|
||||
// complexity tradeoff. The ideal buffer size, and whether buffering should even
|
||||
// be used, depends on what is being hashed. It may be worth it to size the
|
||||
// buffer appropriately (perhaps by making SipHasher128 generic over the buffer
|
||||
// size) or disable buffering depending on what is being hashed. But at this
|
||||
// time, we use the same buffer size for everything.
|
||||
const BUFFER_CAPACITY: usize = 8;
|
||||
|
||||
// Size of the buffer in bytes, not including the spill.
|
||||
const BUFFER_SIZE: usize = BUFFER_CAPACITY * ELEM_SIZE;
|
||||
|
||||
// Size of the buffer in number of elements, including the spill.
|
||||
const BUFFER_WITH_SPILL_CAPACITY: usize = BUFFER_CAPACITY + 1;
|
||||
|
||||
// Size of the buffer in bytes, including the spill.
|
||||
const BUFFER_WITH_SPILL_SIZE: usize = BUFFER_WITH_SPILL_CAPACITY * ELEM_SIZE;
|
||||
|
||||
// Index of the spill element in the buffer.
|
||||
const BUFFER_SPILL_INDEX: usize = BUFFER_WITH_SPILL_CAPACITY - 1;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
#[repr(C)]
|
||||
pub struct SipHasher128 {
|
||||
k0: u64,
|
||||
k1: u64,
|
||||
length: usize, // how many bytes we've processed
|
||||
state: State, // hash State
|
||||
tail: u64, // unprocessed bytes le
|
||||
ntail: usize, // how many bytes in tail are valid
|
||||
// The access pattern during hashing consists of accesses to `nbuf` and
|
||||
// `buf` until the buffer is full, followed by accesses to `state` and
|
||||
// `processed`, and then repetition of that pattern until hashing is done.
|
||||
// This is the basis for the ordering of fields below. However, in practice
|
||||
// the cache miss-rate for data access is extremely low regardless of order.
|
||||
nbuf: usize, // how many bytes in buf are valid
|
||||
buf: [MaybeUninit<u64>; BUFFER_WITH_SPILL_CAPACITY], // unprocessed bytes le
|
||||
state: State, // hash State
|
||||
processed: usize, // how many bytes we've processed
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
@ -51,178 +83,328 @@ macro_rules! compress {
|
||||
}};
|
||||
}
|
||||
|
||||
/// Loads an integer of the desired type from a byte stream, in LE order. Uses
|
||||
/// `copy_nonoverlapping` to let the compiler generate the most efficient way
|
||||
/// to load it from a possibly unaligned address.
|
||||
///
|
||||
/// Unsafe because: unchecked indexing at i..i+size_of(int_ty)
|
||||
macro_rules! load_int_le {
|
||||
($buf:expr, $i:expr, $int_ty:ident) => {{
|
||||
debug_assert!($i + mem::size_of::<$int_ty>() <= $buf.len());
|
||||
let mut data = 0 as $int_ty;
|
||||
ptr::copy_nonoverlapping(
|
||||
$buf.get_unchecked($i),
|
||||
&mut data as *mut _ as *mut u8,
|
||||
mem::size_of::<$int_ty>(),
|
||||
);
|
||||
data.to_le()
|
||||
}};
|
||||
}
|
||||
|
||||
/// Loads a u64 using up to 7 bytes of a byte slice. It looks clumsy but the
|
||||
/// `copy_nonoverlapping` calls that occur (via `load_int_le!`) all have fixed
|
||||
/// sizes and avoid calling `memcpy`, which is good for speed.
|
||||
///
|
||||
/// Unsafe because: unchecked indexing at start..start+len
|
||||
// Copies up to 8 bytes from source to destination. This performs better than
|
||||
// `ptr::copy_nonoverlapping` on microbenchmarks and may perform better on real
|
||||
// workloads since all of the copies have fixed sizes and avoid calling memcpy.
|
||||
//
|
||||
// This is specifically designed for copies of up to 8 bytes, because that's the
|
||||
// maximum of number bytes needed to fill an 8-byte-sized element on which
|
||||
// SipHash operates. Note that for variable-sized copies which are known to be
|
||||
// less than 8 bytes, this function will perform more work than necessary unless
|
||||
// the compiler is able to optimize the extra work away.
|
||||
#[inline]
|
||||
unsafe fn u8to64_le(buf: &[u8], start: usize, len: usize) -> u64 {
|
||||
debug_assert!(len < 8);
|
||||
let mut i = 0; // current byte index (from LSB) in the output u64
|
||||
let mut out = 0;
|
||||
if i + 3 < len {
|
||||
out = load_int_le!(buf, start + i, u32) as u64;
|
||||
unsafe fn copy_nonoverlapping_small(src: *const u8, dst: *mut u8, count: usize) {
|
||||
debug_assert!(count <= 8);
|
||||
|
||||
if count == 8 {
|
||||
ptr::copy_nonoverlapping(src, dst, 8);
|
||||
return;
|
||||
}
|
||||
|
||||
let mut i = 0;
|
||||
if i + 3 < count {
|
||||
ptr::copy_nonoverlapping(src.add(i), dst.add(i), 4);
|
||||
i += 4;
|
||||
}
|
||||
if i + 1 < len {
|
||||
out |= (load_int_le!(buf, start + i, u16) as u64) << (i * 8);
|
||||
|
||||
if i + 1 < count {
|
||||
ptr::copy_nonoverlapping(src.add(i), dst.add(i), 2);
|
||||
i += 2
|
||||
}
|
||||
if i < len {
|
||||
out |= (*buf.get_unchecked(start + i) as u64) << (i * 8);
|
||||
|
||||
if i < count {
|
||||
*dst.add(i) = *src.add(i);
|
||||
i += 1;
|
||||
}
|
||||
debug_assert_eq!(i, len);
|
||||
out
|
||||
|
||||
debug_assert_eq!(i, count);
|
||||
}
|
||||
|
||||
// # Implementation
|
||||
//
|
||||
// This implementation uses buffering to reduce the hashing cost for inputs
|
||||
// consisting of many small integers. Buffering simplifies the integration of
|
||||
// integer input--the integer write function typically just appends to the
|
||||
// buffer with a statically sized write, updates metadata, and returns.
|
||||
//
|
||||
// Buffering also prevents alternating between writes that do and do not trigger
|
||||
// the hashing process. Only when the entire buffer is full do we transition
|
||||
// into hashing. This allows us to keep the hash state in registers for longer,
|
||||
// instead of loading and storing it before and after processing each element.
|
||||
//
|
||||
// When a write fills the buffer, a buffer processing function is invoked to
|
||||
// hash all of the buffered input. The buffer processing functions are marked
|
||||
// `#[inline(never)]` so that they aren't inlined into the append functions,
|
||||
// which ensures the more frequently called append functions remain inlineable
|
||||
// and don't include register pushing/popping that would only be made necessary
|
||||
// by inclusion of the complex buffer processing path which uses those
|
||||
// registers.
|
||||
//
|
||||
// The buffer includes a "spill"--an extra element at the end--which simplifies
|
||||
// the integer write buffer processing path. The value that fills the buffer can
|
||||
// be written with a statically sized write that may spill over into the spill.
|
||||
// After the buffer is processed, the part of the value that spilled over can be
|
||||
// written from the spill to the beginning of the buffer with another statically
|
||||
// sized write. This write may copy more bytes than actually spilled over, but
|
||||
// we maintain the metadata such that any extra copied bytes will be ignored by
|
||||
// subsequent processing. Due to the static sizes, this scheme performs better
|
||||
// than copying the exact number of bytes needed into the end and beginning of
|
||||
// the buffer.
|
||||
//
|
||||
// The buffer is uninitialized, which improves performance, but may preclude
|
||||
// efficient implementation of alternative approaches. The improvement is not so
|
||||
// large that an alternative approach should be disregarded because it cannot be
|
||||
// efficiently implemented with an uninitialized buffer. On the other hand, an
|
||||
// uninitialized buffer may become more important should a larger one be used.
|
||||
//
|
||||
// # Platform Dependence
|
||||
//
|
||||
// The SipHash algorithm operates on byte sequences. It parses the input stream
|
||||
// as 8-byte little-endian integers. Therefore, given the same byte sequence, it
|
||||
// produces the same result on big- and little-endian hardware.
|
||||
//
|
||||
// However, the Hasher trait has methods which operate on multi-byte integers.
|
||||
// How they are converted into byte sequences can be endian-dependent (by using
|
||||
// native byte order) or independent (by consistently using either LE or BE byte
|
||||
// order). It can also be `isize` and `usize` size dependent (by using the
|
||||
// native size), or independent (by converting to a common size), supposing the
|
||||
// values can be represented in 32 bits.
|
||||
//
|
||||
// In order to make `SipHasher128` consistent with `SipHasher` in libstd, we
|
||||
// choose to do the integer to byte sequence conversion in the platform-
|
||||
// dependent way. Clients can achieve platform-independent hashing by widening
|
||||
// `isize` and `usize` integers to 64 bits on 32-bit systems and byte-swapping
|
||||
// integers on big-endian systems before passing them to the writing functions.
|
||||
// This causes the input byte sequence to look identical on big- and little-
|
||||
// endian systems (supposing `isize` and `usize` values can be represented in 32
|
||||
// bits), which ensures platform-independent results.
|
||||
impl SipHasher128 {
|
||||
#[inline]
|
||||
pub fn new_with_keys(key0: u64, key1: u64) -> SipHasher128 {
|
||||
let mut state = SipHasher128 {
|
||||
k0: key0,
|
||||
k1: key1,
|
||||
length: 0,
|
||||
state: State { v0: 0, v1: 0, v2: 0, v3: 0 },
|
||||
tail: 0,
|
||||
ntail: 0,
|
||||
let mut hasher = SipHasher128 {
|
||||
nbuf: 0,
|
||||
buf: MaybeUninit::uninit_array(),
|
||||
state: State {
|
||||
v0: key0 ^ 0x736f6d6570736575,
|
||||
// The XOR with 0xee is only done on 128-bit algorithm version.
|
||||
v1: key1 ^ (0x646f72616e646f6d ^ 0xee),
|
||||
v2: key0 ^ 0x6c7967656e657261,
|
||||
v3: key1 ^ 0x7465646279746573,
|
||||
},
|
||||
processed: 0,
|
||||
};
|
||||
state.reset();
|
||||
state
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn reset(&mut self) {
|
||||
self.length = 0;
|
||||
self.state.v0 = self.k0 ^ 0x736f6d6570736575;
|
||||
self.state.v1 = self.k1 ^ 0x646f72616e646f6d;
|
||||
self.state.v2 = self.k0 ^ 0x6c7967656e657261;
|
||||
self.state.v3 = self.k1 ^ 0x7465646279746573;
|
||||
self.ntail = 0;
|
||||
unsafe {
|
||||
// Initialize spill because we read from it in `short_write_process_buffer`.
|
||||
*hasher.buf.get_unchecked_mut(BUFFER_SPILL_INDEX) = MaybeUninit::zeroed();
|
||||
}
|
||||
|
||||
// This is only done in the 128 bit version:
|
||||
self.state.v1 ^= 0xee;
|
||||
hasher
|
||||
}
|
||||
|
||||
// A specialized write function for values with size <= 8.
|
||||
//
|
||||
// The input must be zero-extended to 64-bits by the caller. This extension
|
||||
// isn't hashed, but the implementation requires it for correctness.
|
||||
//
|
||||
// This function, given the same integer size and value, has the same effect
|
||||
// on both little- and big-endian hardware. It operates on values without
|
||||
// depending on their sequence in memory, so is independent of endianness.
|
||||
//
|
||||
// However, we want SipHasher128 to be platform-dependent, in order to be
|
||||
// consistent with the platform-dependent SipHasher in libstd. In other
|
||||
// words, we want:
|
||||
//
|
||||
// - little-endian: `write_u32(0xDDCCBBAA)` == `write([0xAA, 0xBB, 0xCC, 0xDD])`
|
||||
// - big-endian: `write_u32(0xDDCCBBAA)` == `write([0xDD, 0xCC, 0xBB, 0xAA])`
|
||||
//
|
||||
// Therefore, in order to produce endian-dependent results, SipHasher128's
|
||||
// `write_xxx` Hasher trait methods byte-swap `x` prior to zero-extending.
|
||||
//
|
||||
// If clients of SipHasher128 itself want platform-independent results, they
|
||||
// *also* must byte-swap integer inputs before invoking the `write_xxx`
|
||||
// methods on big-endian hardware (that is, two byte-swaps must occur--one
|
||||
// in the client, and one in SipHasher128). Additionally, they must extend
|
||||
// `usize` and `isize` types to 64 bits on 32-bit systems.
|
||||
#[inline]
|
||||
fn short_write<T>(&mut self, _x: T, x: u64) {
|
||||
fn short_write<T>(&mut self, x: T) {
|
||||
let size = mem::size_of::<T>();
|
||||
self.length += size;
|
||||
let nbuf = self.nbuf;
|
||||
debug_assert!(size <= 8);
|
||||
debug_assert!(nbuf < BUFFER_SIZE);
|
||||
debug_assert!(nbuf + size < BUFFER_WITH_SPILL_SIZE);
|
||||
|
||||
// The original number must be zero-extended, not sign-extended.
|
||||
debug_assert!(if size < 8 { x >> (8 * size) == 0 } else { true });
|
||||
if nbuf + size < BUFFER_SIZE {
|
||||
unsafe {
|
||||
// The memcpy call is optimized away because the size is known.
|
||||
let dst = (self.buf.as_mut_ptr() as *mut u8).add(nbuf);
|
||||
ptr::copy_nonoverlapping(&x as *const _ as *const u8, dst, size);
|
||||
}
|
||||
|
||||
// The number of bytes needed to fill `self.tail`.
|
||||
let needed = 8 - self.ntail;
|
||||
self.nbuf = nbuf + size;
|
||||
|
||||
// SipHash parses the input stream as 8-byte little-endian integers.
|
||||
// Inputs are put into `self.tail` until 8 bytes of data have been
|
||||
// collected, and then that word is processed.
|
||||
//
|
||||
// For example, imagine that `self.tail` is 0x0000_00EE_DDCC_BBAA,
|
||||
// `self.ntail` is 5 (because 5 bytes have been put into `self.tail`),
|
||||
// and `needed` is therefore 3.
|
||||
//
|
||||
// - Scenario 1, `self.write_u8(0xFF)`: we have already zero-extended
|
||||
// the input to 0x0000_0000_0000_00FF. We now left-shift it five
|
||||
// bytes, giving 0x0000_FF00_0000_0000. We then bitwise-OR that value
|
||||
// into `self.tail`, resulting in 0x0000_FFEE_DDCC_BBAA.
|
||||
// (Zero-extension of the original input is critical in this scenario
|
||||
// because we don't want the high two bytes of `self.tail` to be
|
||||
// touched by the bitwise-OR.) `self.tail` is not yet full, so we
|
||||
// return early, after updating `self.ntail` to 6.
|
||||
//
|
||||
// - Scenario 2, `self.write_u32(0xIIHH_GGFF)`: we have already
|
||||
// zero-extended the input to 0x0000_0000_IIHH_GGFF. We now
|
||||
// left-shift it five bytes, giving 0xHHGG_FF00_0000_0000. We then
|
||||
// bitwise-OR that value into `self.tail`, resulting in
|
||||
// 0xHHGG_FFEE_DDCC_BBAA. `self.tail` is now full, and we can use it
|
||||
// to update `self.state`. (As mentioned above, this assumes a
|
||||
// little-endian machine; on a big-endian machine we would have
|
||||
// byte-swapped 0xIIHH_GGFF in the caller, giving 0xFFGG_HHII, and we
|
||||
// would then end up bitwise-ORing 0xGGHH_II00_0000_0000 into
|
||||
// `self.tail`).
|
||||
//
|
||||
self.tail |= x << (8 * self.ntail);
|
||||
if size < needed {
|
||||
self.ntail += size;
|
||||
return;
|
||||
}
|
||||
|
||||
// `self.tail` is full, process it.
|
||||
self.state.v3 ^= self.tail;
|
||||
Sip24Rounds::c_rounds(&mut self.state);
|
||||
self.state.v0 ^= self.tail;
|
||||
unsafe { self.short_write_process_buffer(x) }
|
||||
}
|
||||
|
||||
// Continuing scenario 2: we have one byte left over from the input. We
|
||||
// set `self.ntail` to 1 and `self.tail` to `0x0000_0000_IIHH_GGFF >>
|
||||
// 8*3`, which is 0x0000_0000_0000_00II. (Or on a big-endian machine
|
||||
// the prior byte-swapping would leave us with 0x0000_0000_0000_00FF.)
|
||||
//
|
||||
// The `if` is needed to avoid shifting by 64 bits, which Rust
|
||||
// complains about.
|
||||
self.ntail = size - needed;
|
||||
self.tail = if needed < 8 { x >> (8 * needed) } else { 0 };
|
||||
// A specialized write function for values with size <= 8 that should only
|
||||
// be called when the write would cause the buffer to fill.
|
||||
//
|
||||
// SAFETY: the write of `x` into `self.buf` starting at byte offset
|
||||
// `self.nbuf` must cause `self.buf` to become fully initialized (and not
|
||||
// overflow) if it wasn't already.
|
||||
#[inline(never)]
|
||||
unsafe fn short_write_process_buffer<T>(&mut self, x: T) {
|
||||
let size = mem::size_of::<T>();
|
||||
let nbuf = self.nbuf;
|
||||
debug_assert!(size <= 8);
|
||||
debug_assert!(nbuf < BUFFER_SIZE);
|
||||
debug_assert!(nbuf + size >= BUFFER_SIZE);
|
||||
debug_assert!(nbuf + size < BUFFER_WITH_SPILL_SIZE);
|
||||
|
||||
// Copy first part of input into end of buffer, possibly into spill
|
||||
// element. The memcpy call is optimized away because the size is known.
|
||||
let dst = (self.buf.as_mut_ptr() as *mut u8).add(nbuf);
|
||||
ptr::copy_nonoverlapping(&x as *const _ as *const u8, dst, size);
|
||||
|
||||
// Process buffer.
|
||||
for i in 0..BUFFER_CAPACITY {
|
||||
let elem = self.buf.get_unchecked(i).assume_init().to_le();
|
||||
self.state.v3 ^= elem;
|
||||
Sip24Rounds::c_rounds(&mut self.state);
|
||||
self.state.v0 ^= elem;
|
||||
}
|
||||
|
||||
// Copy remaining input into start of buffer by copying size - 1
|
||||
// elements from spill (at most size - 1 bytes could have overflowed
|
||||
// into the spill). The memcpy call is optimized away because the size
|
||||
// is known. And the whole copy is optimized away for size == 1.
|
||||
let src = self.buf.get_unchecked(BUFFER_SPILL_INDEX) as *const _ as *const u8;
|
||||
ptr::copy_nonoverlapping(src, self.buf.as_mut_ptr() as *mut u8, size - 1);
|
||||
|
||||
// This function should only be called when the write fills the buffer.
|
||||
// Therefore, when size == 1, the new `self.nbuf` must be zero. The size
|
||||
// is statically known, so the branch is optimized away.
|
||||
self.nbuf = if size == 1 { 0 } else { nbuf + size - BUFFER_SIZE };
|
||||
self.processed += BUFFER_SIZE;
|
||||
}
|
||||
|
||||
// A write function for byte slices.
|
||||
#[inline]
|
||||
fn slice_write(&mut self, msg: &[u8]) {
|
||||
let length = msg.len();
|
||||
let nbuf = self.nbuf;
|
||||
debug_assert!(nbuf < BUFFER_SIZE);
|
||||
|
||||
if nbuf + length < BUFFER_SIZE {
|
||||
unsafe {
|
||||
let dst = (self.buf.as_mut_ptr() as *mut u8).add(nbuf);
|
||||
|
||||
if length <= 8 {
|
||||
copy_nonoverlapping_small(msg.as_ptr(), dst, length);
|
||||
} else {
|
||||
// This memcpy is *not* optimized away.
|
||||
ptr::copy_nonoverlapping(msg.as_ptr(), dst, length);
|
||||
}
|
||||
}
|
||||
|
||||
self.nbuf = nbuf + length;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
unsafe { self.slice_write_process_buffer(msg) }
|
||||
}
|
||||
|
||||
// A write function for byte slices that should only be called when the
|
||||
// write would cause the buffer to fill.
|
||||
//
|
||||
// SAFETY: `self.buf` must be initialized up to the byte offset `self.nbuf`,
|
||||
// and `msg` must contain enough bytes to initialize the rest of the element
|
||||
// containing the byte offset `self.nbuf`.
|
||||
#[inline(never)]
|
||||
unsafe fn slice_write_process_buffer(&mut self, msg: &[u8]) {
|
||||
let length = msg.len();
|
||||
let nbuf = self.nbuf;
|
||||
debug_assert!(nbuf < BUFFER_SIZE);
|
||||
debug_assert!(nbuf + length >= BUFFER_SIZE);
|
||||
|
||||
// Always copy first part of input into current element of buffer.
|
||||
// This function should only be called when the write fills the buffer,
|
||||
// so we know that there is enough input to fill the current element.
|
||||
let valid_in_elem = nbuf % ELEM_SIZE;
|
||||
let needed_in_elem = ELEM_SIZE - valid_in_elem;
|
||||
|
||||
let src = msg.as_ptr();
|
||||
let dst = (self.buf.as_mut_ptr() as *mut u8).add(nbuf);
|
||||
copy_nonoverlapping_small(src, dst, needed_in_elem);
|
||||
|
||||
// Process buffer.
|
||||
|
||||
// Using `nbuf / ELEM_SIZE + 1` rather than `(nbuf + needed_in_elem) /
|
||||
// ELEM_SIZE` to show the compiler that this loop's upper bound is > 0.
|
||||
// We know that is true, because last step ensured we have a full
|
||||
// element in the buffer.
|
||||
let last = nbuf / ELEM_SIZE + 1;
|
||||
|
||||
for i in 0..last {
|
||||
let elem = self.buf.get_unchecked(i).assume_init().to_le();
|
||||
self.state.v3 ^= elem;
|
||||
Sip24Rounds::c_rounds(&mut self.state);
|
||||
self.state.v0 ^= elem;
|
||||
}
|
||||
|
||||
// Process the remaining element-sized chunks of input.
|
||||
let mut processed = needed_in_elem;
|
||||
let input_left = length - processed;
|
||||
let elems_left = input_left / ELEM_SIZE;
|
||||
let extra_bytes_left = input_left % ELEM_SIZE;
|
||||
|
||||
for _ in 0..elems_left {
|
||||
let elem = (msg.as_ptr().add(processed) as *const u64).read_unaligned().to_le();
|
||||
self.state.v3 ^= elem;
|
||||
Sip24Rounds::c_rounds(&mut self.state);
|
||||
self.state.v0 ^= elem;
|
||||
processed += ELEM_SIZE;
|
||||
}
|
||||
|
||||
// Copy remaining input into start of buffer.
|
||||
let src = msg.as_ptr().add(processed);
|
||||
let dst = self.buf.as_mut_ptr() as *mut u8;
|
||||
copy_nonoverlapping_small(src, dst, extra_bytes_left);
|
||||
|
||||
self.nbuf = extra_bytes_left;
|
||||
self.processed += nbuf + processed;
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn finish128(mut self) -> (u64, u64) {
|
||||
let b: u64 = ((self.length as u64 & 0xff) << 56) | self.tail;
|
||||
debug_assert!(self.nbuf < BUFFER_SIZE);
|
||||
|
||||
self.state.v3 ^= b;
|
||||
Sip24Rounds::c_rounds(&mut self.state);
|
||||
self.state.v0 ^= b;
|
||||
// Process full elements in buffer.
|
||||
let last = self.nbuf / ELEM_SIZE;
|
||||
|
||||
self.state.v2 ^= 0xee;
|
||||
Sip24Rounds::d_rounds(&mut self.state);
|
||||
let _0 = self.state.v0 ^ self.state.v1 ^ self.state.v2 ^ self.state.v3;
|
||||
// Since we're consuming self, avoid updating members for a potential
|
||||
// performance gain.
|
||||
let mut state = self.state;
|
||||
|
||||
for i in 0..last {
|
||||
let elem = unsafe { self.buf.get_unchecked(i).assume_init().to_le() };
|
||||
state.v3 ^= elem;
|
||||
Sip24Rounds::c_rounds(&mut state);
|
||||
state.v0 ^= elem;
|
||||
}
|
||||
|
||||
// Get remaining partial element.
|
||||
let elem = if self.nbuf % ELEM_SIZE != 0 {
|
||||
unsafe {
|
||||
// Ensure element is initialized by writing zero bytes. At most
|
||||
// `ELEM_SIZE - 1` are required given the above check. It's safe
|
||||
// to write this many because we have the spill and we maintain
|
||||
// `self.nbuf` such that this write will start before the spill.
|
||||
let dst = (self.buf.as_mut_ptr() as *mut u8).add(self.nbuf);
|
||||
ptr::write_bytes(dst, 0, ELEM_SIZE - 1);
|
||||
self.buf.get_unchecked(last).assume_init().to_le()
|
||||
}
|
||||
} else {
|
||||
0
|
||||
};
|
||||
|
||||
// Finalize the hash.
|
||||
let length = self.processed + self.nbuf;
|
||||
let b: u64 = ((length as u64 & 0xff) << 56) | elem;
|
||||
|
||||
state.v3 ^= b;
|
||||
Sip24Rounds::c_rounds(&mut state);
|
||||
state.v0 ^= b;
|
||||
|
||||
state.v2 ^= 0xee;
|
||||
Sip24Rounds::d_rounds(&mut state);
|
||||
let _0 = state.v0 ^ state.v1 ^ state.v2 ^ state.v3;
|
||||
|
||||
state.v1 ^= 0xdd;
|
||||
Sip24Rounds::d_rounds(&mut state);
|
||||
let _1 = state.v0 ^ state.v1 ^ state.v2 ^ state.v3;
|
||||
|
||||
self.state.v1 ^= 0xdd;
|
||||
Sip24Rounds::d_rounds(&mut self.state);
|
||||
let _1 = self.state.v0 ^ self.state.v1 ^ self.state.v2 ^ self.state.v3;
|
||||
(_0, _1)
|
||||
}
|
||||
}
|
||||
@ -230,92 +412,57 @@ impl SipHasher128 {
|
||||
impl Hasher for SipHasher128 {
|
||||
#[inline]
|
||||
fn write_u8(&mut self, i: u8) {
|
||||
self.short_write(i, i as u64);
|
||||
self.short_write(i);
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn write_u16(&mut self, i: u16) {
|
||||
self.short_write(i, i.to_le() as u64);
|
||||
self.short_write(i);
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn write_u32(&mut self, i: u32) {
|
||||
self.short_write(i, i.to_le() as u64);
|
||||
self.short_write(i);
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn write_u64(&mut self, i: u64) {
|
||||
self.short_write(i, i.to_le() as u64);
|
||||
self.short_write(i);
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn write_usize(&mut self, i: usize) {
|
||||
self.short_write(i, i.to_le() as u64);
|
||||
self.short_write(i);
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn write_i8(&mut self, i: i8) {
|
||||
self.short_write(i, i as u8 as u64);
|
||||
self.short_write(i as u8);
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn write_i16(&mut self, i: i16) {
|
||||
self.short_write(i, (i as u16).to_le() as u64);
|
||||
self.short_write(i as u16);
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn write_i32(&mut self, i: i32) {
|
||||
self.short_write(i, (i as u32).to_le() as u64);
|
||||
self.short_write(i as u32);
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn write_i64(&mut self, i: i64) {
|
||||
self.short_write(i, (i as u64).to_le() as u64);
|
||||
self.short_write(i as u64);
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn write_isize(&mut self, i: isize) {
|
||||
self.short_write(i, (i as usize).to_le() as u64);
|
||||
self.short_write(i as usize);
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn write(&mut self, msg: &[u8]) {
|
||||
let length = msg.len();
|
||||
self.length += length;
|
||||
|
||||
let mut needed = 0;
|
||||
|
||||
if self.ntail != 0 {
|
||||
needed = 8 - self.ntail;
|
||||
self.tail |= unsafe { u8to64_le(msg, 0, cmp::min(length, needed)) } << (8 * self.ntail);
|
||||
if length < needed {
|
||||
self.ntail += length;
|
||||
return;
|
||||
} else {
|
||||
self.state.v3 ^= self.tail;
|
||||
Sip24Rounds::c_rounds(&mut self.state);
|
||||
self.state.v0 ^= self.tail;
|
||||
self.ntail = 0;
|
||||
}
|
||||
}
|
||||
|
||||
// Buffered tail is now flushed, process new input.
|
||||
let len = length - needed;
|
||||
let left = len & 0x7;
|
||||
|
||||
let mut i = needed;
|
||||
while i < len - left {
|
||||
let mi = unsafe { load_int_le!(msg, i, u64) };
|
||||
|
||||
self.state.v3 ^= mi;
|
||||
Sip24Rounds::c_rounds(&mut self.state);
|
||||
self.state.v0 ^= mi;
|
||||
|
||||
i += 8;
|
||||
}
|
||||
|
||||
self.tail = unsafe { u8to64_le(msg, i, left) };
|
||||
self.ntail = left;
|
||||
self.slice_write(msg);
|
||||
}
|
||||
|
||||
fn finish(&self) -> u64 {
|
||||
|
@ -450,3 +450,48 @@ fn test_short_write_works() {
|
||||
|
||||
assert_eq!(h1_hash, h2_hash);
|
||||
}
|
||||
|
||||
macro_rules! test_fill_buffer {
|
||||
($type:ty, $write_method:ident) => {{
|
||||
// Test filling and overfilling the buffer from all possible offsets
|
||||
// for a given integer type and its corresponding write method.
|
||||
const SIZE: usize = std::mem::size_of::<$type>();
|
||||
let input = [42; BUFFER_SIZE];
|
||||
let x = 0x01234567_89ABCDEF_76543210_FEDCBA98_u128 as $type;
|
||||
let x_bytes = &x.to_ne_bytes();
|
||||
|
||||
for i in 1..=SIZE {
|
||||
let s = &input[..BUFFER_SIZE - i];
|
||||
|
||||
let mut h1 = SipHasher128::new_with_keys(7, 13);
|
||||
h1.write(s);
|
||||
h1.$write_method(x);
|
||||
|
||||
let mut h2 = SipHasher128::new_with_keys(7, 13);
|
||||
h2.write(s);
|
||||
h2.write(x_bytes);
|
||||
|
||||
let h1_hash = h1.finish128();
|
||||
let h2_hash = h2.finish128();
|
||||
|
||||
assert_eq!(h1_hash, h2_hash);
|
||||
}
|
||||
}};
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fill_buffer() {
|
||||
test_fill_buffer!(u8, write_u8);
|
||||
test_fill_buffer!(u16, write_u16);
|
||||
test_fill_buffer!(u32, write_u32);
|
||||
test_fill_buffer!(u64, write_u64);
|
||||
test_fill_buffer!(u128, write_u128);
|
||||
test_fill_buffer!(usize, write_usize);
|
||||
|
||||
test_fill_buffer!(i8, write_i8);
|
||||
test_fill_buffer!(i16, write_i16);
|
||||
test_fill_buffer!(i32, write_i32);
|
||||
test_fill_buffer!(i64, write_i64);
|
||||
test_fill_buffer!(i128, write_i128);
|
||||
test_fill_buffer!(isize, write_isize);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user