2017-10-16 11:16:04 +00:00
|
|
|
//! This is a copy of `core::hash::sip` adapted to providing 128 bit hashes.
|
|
|
|
|
|
|
|
use std::hash::Hasher;
|
2020-10-03 02:34:01 +00:00
|
|
|
use std::mem::{self, MaybeUninit};
|
2019-12-22 22:42:04 +00:00
|
|
|
use std::ptr;
|
2017-10-16 11:16:04 +00:00
|
|
|
|
2019-08-01 20:57:23 +00:00
|
|
|
#[cfg(test)]
|
|
|
|
mod tests;
|
|
|
|
|
2020-10-03 02:34:01 +00:00
|
|
|
const BUFFER_SIZE_ELEMS: usize = 8;
|
|
|
|
const BUFFER_SIZE_BYTES: usize = BUFFER_SIZE_ELEMS * mem::size_of::<u64>();
|
|
|
|
const BUFFER_SIZE_ELEMS_SPILL: usize = BUFFER_SIZE_ELEMS + 1;
|
|
|
|
const BUFFER_SIZE_BYTES_SPILL: usize = BUFFER_SIZE_ELEMS_SPILL * mem::size_of::<u64>();
|
|
|
|
const BUFFER_SPILL_INDEX: usize = BUFFER_SIZE_ELEMS_SPILL - 1;
|
|
|
|
|
2017-10-16 11:16:04 +00:00
|
|
|
#[derive(Debug, Clone)]
|
|
|
|
pub struct SipHasher128 {
|
2020-10-03 02:34:01 +00:00
|
|
|
nbuf: usize, // how many bytes in buf are valid
|
|
|
|
buf: [MaybeUninit<u64>; BUFFER_SIZE_ELEMS_SPILL], // unprocessed bytes le
|
|
|
|
state: State, // hash State
|
|
|
|
processed: usize, // how many bytes we've processed
|
2017-10-16 11:16:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug, Clone, Copy)]
|
|
|
|
#[repr(C)]
|
|
|
|
struct State {
|
|
|
|
// v0, v2 and v1, v3 show up in pairs in the algorithm,
|
|
|
|
// and simd implementations of SipHash will use vectors
|
|
|
|
// of v02 and v13. By placing them in this order in the struct,
|
|
|
|
// the compiler can pick up on just a few simd optimizations by itself.
|
|
|
|
v0: u64,
|
|
|
|
v2: u64,
|
|
|
|
v1: u64,
|
|
|
|
v3: u64,
|
|
|
|
}
|
|
|
|
|
|
|
|
macro_rules! compress {
|
2019-12-22 22:42:04 +00:00
|
|
|
($state:expr) => {{ compress!($state.v0, $state.v1, $state.v2, $state.v3) }};
|
|
|
|
($v0:expr, $v1:expr, $v2:expr, $v3:expr) => {{
|
|
|
|
$v0 = $v0.wrapping_add($v1);
|
|
|
|
$v1 = $v1.rotate_left(13);
|
|
|
|
$v1 ^= $v0;
|
2017-10-16 11:16:04 +00:00
|
|
|
$v0 = $v0.rotate_left(32);
|
2019-12-22 22:42:04 +00:00
|
|
|
$v2 = $v2.wrapping_add($v3);
|
|
|
|
$v3 = $v3.rotate_left(16);
|
|
|
|
$v3 ^= $v2;
|
|
|
|
$v0 = $v0.wrapping_add($v3);
|
|
|
|
$v3 = $v3.rotate_left(21);
|
|
|
|
$v3 ^= $v0;
|
|
|
|
$v2 = $v2.wrapping_add($v1);
|
|
|
|
$v1 = $v1.rotate_left(17);
|
|
|
|
$v1 ^= $v2;
|
2017-10-16 11:16:04 +00:00
|
|
|
$v2 = $v2.rotate_left(32);
|
2019-12-22 22:42:04 +00:00
|
|
|
}};
|
2017-10-16 11:16:04 +00:00
|
|
|
}
|
|
|
|
|
2020-10-03 02:34:01 +00:00
|
|
|
// Copies up to 8 bytes from source to destination. This may be faster than
|
|
|
|
// calling `ptr::copy_nonoverlapping` with an arbitrary count, since all of
|
|
|
|
// the copies have fixed sizes and thus avoid calling memcpy.
|
2020-02-07 00:53:07 +00:00
|
|
|
#[inline]
|
2020-10-03 02:34:01 +00:00
|
|
|
unsafe fn copy_nonoverlapping_small(src: *const u8, dst: *mut u8, count: usize) {
|
|
|
|
debug_assert!(count <= 8);
|
|
|
|
|
|
|
|
if count == 8 {
|
|
|
|
ptr::copy_nonoverlapping(src, dst, 8);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
let mut i = 0;
|
|
|
|
if i + 3 < count {
|
|
|
|
ptr::copy_nonoverlapping(src.add(i), dst.add(i), 4);
|
2020-02-07 00:53:07 +00:00
|
|
|
i += 4;
|
|
|
|
}
|
2020-10-03 02:34:01 +00:00
|
|
|
|
|
|
|
if i + 1 < count {
|
|
|
|
ptr::copy_nonoverlapping(src.add(i), dst.add(i), 2);
|
2020-02-07 00:53:07 +00:00
|
|
|
i += 2
|
|
|
|
}
|
2020-10-03 02:34:01 +00:00
|
|
|
|
|
|
|
if i < count {
|
|
|
|
*dst.add(i) = *src.add(i);
|
2020-02-07 00:53:07 +00:00
|
|
|
i += 1;
|
2017-10-16 11:16:04 +00:00
|
|
|
}
|
2020-10-03 02:34:01 +00:00
|
|
|
|
|
|
|
debug_assert_eq!(i, count);
|
2017-10-16 11:16:04 +00:00
|
|
|
}
|
|
|
|
|
2020-10-03 02:34:01 +00:00
|
|
|
// Implementation
|
|
|
|
//
|
|
|
|
// This implementation uses buffering to reduce the hashing cost for inputs
|
|
|
|
// consisting of many small integers. Buffering simplifies the integration of
|
|
|
|
// integer input--the integer write function typically just appends to the
|
|
|
|
// buffer with a statically sized write, updates metadata, and returns.
|
|
|
|
//
|
|
|
|
// Buffering also prevents alternating between writes that do and do not trigger
|
|
|
|
// the hashing process. Only when the entire buffer is full do we transition
|
|
|
|
// into hashing. This allows us to keep the hash state in registers for longer,
|
|
|
|
// instead of loading and storing it before and after processing each element.
|
|
|
|
//
|
|
|
|
// When a write fills the buffer, a buffer processing function is invoked to
|
|
|
|
// hash all of the buffered input. The buffer processing functions are marked
|
|
|
|
// #[inline(never)] so that they aren't inlined into the append functions, which
|
|
|
|
// ensures the more frequently called append functions remain inlineable and
|
|
|
|
// don't include register pushing/popping that would only be made necessary by
|
|
|
|
// inclusion of the complex buffer processing path which uses those registers.
|
|
|
|
//
|
|
|
|
// The buffer includes a "spill"--an extra element at the end--which simplifies
|
|
|
|
// the integer write buffer processing path. The value that fills the buffer can
|
|
|
|
// be written with a statically sized write that may spill over into the spill.
|
|
|
|
// After the buffer is processed, the part of the value that spilled over can
|
|
|
|
// written from the spill to the beginning of the buffer with another statically
|
|
|
|
// sized write. Due to static sizes, this scheme performs better than copying
|
|
|
|
// the exact number of bytes needed into the end and beginning of the buffer.
|
|
|
|
//
|
|
|
|
// The buffer is uninitialized, which improves performance, but may preclude
|
|
|
|
// efficient implementation of alternative approaches. The improvement is not so
|
|
|
|
// large that an alternative approach should be disregarded because it cannot be
|
|
|
|
// efficiently implemented with an uninitialized buffer. On the other hand, an
|
|
|
|
// uninitialized buffer may become more important should a larger one be used.
|
|
|
|
//
|
|
|
|
// Platform Dependence
|
|
|
|
//
|
|
|
|
// The SipHash algorithm operates on byte sequences. It parses the input stream
|
|
|
|
// as 8-byte little-endian integers. Therefore, given the same byte sequence, it
|
|
|
|
// produces the same result on big- and little-endian hardware.
|
|
|
|
//
|
|
|
|
// However, the Hasher trait has methods which operate on multi-byte integers.
|
|
|
|
// How they are converted into byte sequences can be endian-dependent (by using
|
|
|
|
// native byte order) or independent (by consistently using either LE or BE byte
|
|
|
|
// order). It can also be `isize` and `usize` size dependent (by using the
|
|
|
|
// native size), or independent (by converting to a common size), supposing the
|
|
|
|
// values can be represented in 32 bits.
|
|
|
|
//
|
|
|
|
// In order to make SipHasher128 consistent with SipHasher in libstd, we choose
|
|
|
|
// to do the integer to byte sequence conversion in the platform-dependent way.
|
|
|
|
// Clients can achieve (nearly) platform-independent hashing by widening `isize`
|
|
|
|
// and `usize` integers to 64 bits on 32-bit systems and byte-swapping integers
|
|
|
|
// on big-endian systems before passing them to the writing functions. This
|
|
|
|
// causes the input byte sequence to look identical on big- and little- endian
|
|
|
|
// systems (supposing `isize` and `usize` values can be represented in 32 bits),
|
|
|
|
// which ensures platform-independent results.
|
2017-10-16 11:16:04 +00:00
|
|
|
impl SipHasher128 {
|
|
|
|
#[inline]
|
|
|
|
pub fn new_with_keys(key0: u64, key1: u64) -> SipHasher128 {
|
2020-10-03 02:34:01 +00:00
|
|
|
let mut hasher = SipHasher128 {
|
|
|
|
nbuf: 0,
|
|
|
|
buf: MaybeUninit::uninit_array(),
|
|
|
|
state: State {
|
|
|
|
v0: key0 ^ 0x736f6d6570736575,
|
|
|
|
// The XOR with 0xee is only done on 128-bit algorithm version.
|
|
|
|
v1: key1 ^ (0x646f72616e646f6d ^ 0xee),
|
|
|
|
v2: key0 ^ 0x6c7967656e657261,
|
|
|
|
v3: key1 ^ 0x7465646279746573,
|
|
|
|
},
|
|
|
|
processed: 0,
|
2017-10-16 11:16:04 +00:00
|
|
|
};
|
2020-10-03 02:34:01 +00:00
|
|
|
|
|
|
|
unsafe {
|
|
|
|
// Initialize spill because we read from it in short_write_process_buffer.
|
|
|
|
*hasher.buf.get_unchecked_mut(BUFFER_SPILL_INDEX) = MaybeUninit::zeroed();
|
|
|
|
}
|
|
|
|
|
|
|
|
hasher
|
2017-10-16 11:16:04 +00:00
|
|
|
}
|
|
|
|
|
2020-10-03 02:34:01 +00:00
|
|
|
// A specialized write function for values with size <= 8.
|
2017-10-16 11:16:04 +00:00
|
|
|
#[inline]
|
2020-10-03 02:34:01 +00:00
|
|
|
fn short_write<T>(&mut self, x: T) {
|
|
|
|
let size = mem::size_of::<T>();
|
|
|
|
let nbuf = self.nbuf;
|
|
|
|
debug_assert!(size <= 8);
|
|
|
|
debug_assert!(nbuf < BUFFER_SIZE_BYTES);
|
|
|
|
debug_assert!(nbuf + size < BUFFER_SIZE_BYTES_SPILL);
|
|
|
|
|
|
|
|
if nbuf + size < BUFFER_SIZE_BYTES {
|
|
|
|
unsafe {
|
|
|
|
// The memcpy call is optimized away because the size is known.
|
|
|
|
let dst = (self.buf.as_mut_ptr() as *mut u8).add(nbuf);
|
|
|
|
ptr::copy_nonoverlapping(&x as *const _ as *const u8, dst, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
self.nbuf = nbuf + size;
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsafe { self.short_write_process_buffer(x) }
|
2017-10-16 11:16:04 +00:00
|
|
|
}
|
|
|
|
|
2020-10-03 02:34:01 +00:00
|
|
|
// A specialized write function for values with size <= 8 that should only
|
|
|
|
// be called when the write would cause the buffer to fill.
|
2020-09-29 00:34:27 +00:00
|
|
|
//
|
2020-10-03 02:34:01 +00:00
|
|
|
// SAFETY: the write of x into self.buf starting at byte offset self.nbuf
|
|
|
|
// must cause self.buf to become fully initialized (and not overflow) if it
|
|
|
|
// wasn't already.
|
|
|
|
#[inline(never)]
|
|
|
|
unsafe fn short_write_process_buffer<T>(&mut self, x: T) {
|
Speed up `SipHasher128`.
The current code in `SipHasher128::short_write` is inefficient. It uses
`u8to64_le` (which is complex and slow) to extract just the right number of
bytes of the input into a u64 and pad the result with zeroes. It then
left-shifts that value in order to bitwise-OR it with `self.tail`.
For example, imagine we have a u32 input 0xIIHH_GGFF and only need three bytes
to fill up `self.tail`. The current code uses `u8to64_le` to construct
0x0000_0000_00HH_GGFF, which is just 0xIIHH_GGFF with the 0xII removed and
zero-extended to a u64. The code then left-shifts that value by five bytes --
discarding the 0x00 byte that replaced the 0xII byte! -- to give
0xHHGG_FF00_0000_0000. It then then ORs that value with self.tail.
There's a much simpler way to do it: zero-extend to u64 first, then left shift.
E.g. 0xIIHH_GGFF is zero-extended to 0x0000_0000_IIHH_GGFF, and then
left-shifted to 0xHHGG_FF00_0000_0000. We don't have to take time to exclude
the unneeded 0xII byte, because it just gets shifted out anyway! It also avoids
multiple occurrences of `unsafe`.
There's a similar story with the setting of `self.tail` at the method's end.
The current code uses `u8to64_le` to extract the remaining part of the input,
but the same effect can be achieved more quickly with a right shift on the
zero-extended input.
All that works on little-endian. It doesn't work for big-endian, but we
can just do a `to_le` before calling `short_write` and then it works.
This commit changes `SipHasher128` to use the simpler shift-based approach. The
code is also smaller, which means that `short_write` is now inlined where
previously it wasn't, which makes things faster again. This gives big
speed-ups for all incremental builds, especially "baseline" incremental
builds.
2020-02-06 02:04:51 +00:00
|
|
|
let size = mem::size_of::<T>();
|
2020-10-03 02:34:01 +00:00
|
|
|
let nbuf = self.nbuf;
|
|
|
|
debug_assert!(size <= 8);
|
|
|
|
debug_assert!(nbuf < BUFFER_SIZE_BYTES);
|
|
|
|
debug_assert!(nbuf + size >= BUFFER_SIZE_BYTES);
|
|
|
|
debug_assert!(nbuf + size < BUFFER_SIZE_BYTES_SPILL);
|
|
|
|
|
|
|
|
// Copy first part of input into end of buffer, possibly into spill
|
|
|
|
// element. The memcpy call is optimized away because the size is known.
|
|
|
|
let dst = (self.buf.as_mut_ptr() as *mut u8).add(nbuf);
|
|
|
|
ptr::copy_nonoverlapping(&x as *const _ as *const u8, dst, size);
|
|
|
|
|
|
|
|
// Process buffer.
|
|
|
|
for i in 0..BUFFER_SIZE_ELEMS {
|
|
|
|
let elem = self.buf.get_unchecked(i).assume_init().to_le();
|
|
|
|
self.state.v3 ^= elem;
|
|
|
|
Sip24Rounds::c_rounds(&mut self.state);
|
|
|
|
self.state.v0 ^= elem;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Copy remaining input into start of buffer by copying size - 1
|
|
|
|
// elements from spill (at most size - 1 bytes could have overflowed
|
|
|
|
// into the spill). The memcpy call is optimized away because the size
|
|
|
|
// is known. And the whole copy is optimized away for size == 1.
|
|
|
|
let src = self.buf.get_unchecked(BUFFER_SPILL_INDEX) as *const _ as *const u8;
|
|
|
|
ptr::copy_nonoverlapping(src, self.buf.as_mut_ptr() as *mut u8, size - 1);
|
|
|
|
|
|
|
|
// This function should only be called when the write fills the buffer.
|
|
|
|
// Therefore, when size == 1, the new self.nbuf must be zero. The size
|
|
|
|
// is statically known, so the branch is optimized away.
|
|
|
|
self.nbuf = if size == 1 { 0 } else { nbuf + size - BUFFER_SIZE_BYTES };
|
|
|
|
self.processed += BUFFER_SIZE_BYTES;
|
|
|
|
}
|
|
|
|
|
|
|
|
// A write function for byte slices.
|
|
|
|
#[inline]
|
|
|
|
fn slice_write(&mut self, msg: &[u8]) {
|
|
|
|
let length = msg.len();
|
|
|
|
let nbuf = self.nbuf;
|
|
|
|
debug_assert!(nbuf < BUFFER_SIZE_BYTES);
|
|
|
|
|
|
|
|
if nbuf + length < BUFFER_SIZE_BYTES {
|
|
|
|
unsafe {
|
|
|
|
let dst = (self.buf.as_mut_ptr() as *mut u8).add(nbuf);
|
|
|
|
|
|
|
|
if length < 8 {
|
|
|
|
copy_nonoverlapping_small(msg.as_ptr(), dst, length);
|
|
|
|
} else {
|
|
|
|
// This memcpy is *not* optimized away.
|
|
|
|
ptr::copy_nonoverlapping(msg.as_ptr(), dst, length);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
self.nbuf = nbuf + length;
|
|
|
|
|
Speed up `SipHasher128`.
The current code in `SipHasher128::short_write` is inefficient. It uses
`u8to64_le` (which is complex and slow) to extract just the right number of
bytes of the input into a u64 and pad the result with zeroes. It then
left-shifts that value in order to bitwise-OR it with `self.tail`.
For example, imagine we have a u32 input 0xIIHH_GGFF and only need three bytes
to fill up `self.tail`. The current code uses `u8to64_le` to construct
0x0000_0000_00HH_GGFF, which is just 0xIIHH_GGFF with the 0xII removed and
zero-extended to a u64. The code then left-shifts that value by five bytes --
discarding the 0x00 byte that replaced the 0xII byte! -- to give
0xHHGG_FF00_0000_0000. It then then ORs that value with self.tail.
There's a much simpler way to do it: zero-extend to u64 first, then left shift.
E.g. 0xIIHH_GGFF is zero-extended to 0x0000_0000_IIHH_GGFF, and then
left-shifted to 0xHHGG_FF00_0000_0000. We don't have to take time to exclude
the unneeded 0xII byte, because it just gets shifted out anyway! It also avoids
multiple occurrences of `unsafe`.
There's a similar story with the setting of `self.tail` at the method's end.
The current code uses `u8to64_le` to extract the remaining part of the input,
but the same effect can be achieved more quickly with a right shift on the
zero-extended input.
All that works on little-endian. It doesn't work for big-endian, but we
can just do a `to_le` before calling `short_write` and then it works.
This commit changes `SipHasher128` to use the simpler shift-based approach. The
code is also smaller, which means that `short_write` is now inlined where
previously it wasn't, which makes things faster again. This gives big
speed-ups for all incremental builds, especially "baseline" incremental
builds.
2020-02-06 02:04:51 +00:00
|
|
|
return;
|
2017-10-16 11:16:04 +00:00
|
|
|
}
|
Speed up `SipHasher128`.
The current code in `SipHasher128::short_write` is inefficient. It uses
`u8to64_le` (which is complex and slow) to extract just the right number of
bytes of the input into a u64 and pad the result with zeroes. It then
left-shifts that value in order to bitwise-OR it with `self.tail`.
For example, imagine we have a u32 input 0xIIHH_GGFF and only need three bytes
to fill up `self.tail`. The current code uses `u8to64_le` to construct
0x0000_0000_00HH_GGFF, which is just 0xIIHH_GGFF with the 0xII removed and
zero-extended to a u64. The code then left-shifts that value by five bytes --
discarding the 0x00 byte that replaced the 0xII byte! -- to give
0xHHGG_FF00_0000_0000. It then then ORs that value with self.tail.
There's a much simpler way to do it: zero-extend to u64 first, then left shift.
E.g. 0xIIHH_GGFF is zero-extended to 0x0000_0000_IIHH_GGFF, and then
left-shifted to 0xHHGG_FF00_0000_0000. We don't have to take time to exclude
the unneeded 0xII byte, because it just gets shifted out anyway! It also avoids
multiple occurrences of `unsafe`.
There's a similar story with the setting of `self.tail` at the method's end.
The current code uses `u8to64_le` to extract the remaining part of the input,
but the same effect can be achieved more quickly with a right shift on the
zero-extended input.
All that works on little-endian. It doesn't work for big-endian, but we
can just do a `to_le` before calling `short_write` and then it works.
This commit changes `SipHasher128` to use the simpler shift-based approach. The
code is also smaller, which means that `short_write` is now inlined where
previously it wasn't, which makes things faster again. This gives big
speed-ups for all incremental builds, especially "baseline" incremental
builds.
2020-02-06 02:04:51 +00:00
|
|
|
|
2020-10-03 02:34:01 +00:00
|
|
|
unsafe { self.slice_write_process_buffer(msg) }
|
|
|
|
}
|
|
|
|
|
|
|
|
// A write function for byte slices that should only be called when the
|
|
|
|
// write would cause the buffer to fill.
|
|
|
|
//
|
|
|
|
// SAFETY: self.buf must be initialized up to the byte offset self.nbuf, and
|
|
|
|
// msg must contain enough bytes to initialize the rest of the element
|
|
|
|
// containing the byte offset self.nbuf.
|
|
|
|
#[inline(never)]
|
|
|
|
unsafe fn slice_write_process_buffer(&mut self, msg: &[u8]) {
|
|
|
|
let length = msg.len();
|
|
|
|
let nbuf = self.nbuf;
|
|
|
|
debug_assert!(nbuf < BUFFER_SIZE_BYTES);
|
|
|
|
debug_assert!(nbuf + length >= BUFFER_SIZE_BYTES);
|
|
|
|
|
|
|
|
// Always copy first part of input into current element of buffer.
|
|
|
|
// This function should only be called when the write fills the buffer,
|
|
|
|
// so we know that there is enough input to fill the current element.
|
|
|
|
let valid_in_elem = nbuf & 0x7;
|
|
|
|
let needed_in_elem = 8 - valid_in_elem;
|
|
|
|
|
|
|
|
let src = msg.as_ptr();
|
|
|
|
let dst = (self.buf.as_mut_ptr() as *mut u8).add(nbuf);
|
|
|
|
copy_nonoverlapping_small(src, dst, needed_in_elem);
|
|
|
|
|
|
|
|
// Process buffer.
|
|
|
|
|
|
|
|
// Using nbuf / 8 + 1 rather than (nbuf + needed_in_elem) / 8 to show
|
|
|
|
// the compiler that this loop's upper bound is > 0. We know that is
|
|
|
|
// true, because last step ensured we have a full element in the buffer.
|
|
|
|
let last = nbuf / 8 + 1;
|
|
|
|
|
|
|
|
for i in 0..last {
|
|
|
|
let elem = self.buf.get_unchecked(i).assume_init().to_le();
|
|
|
|
self.state.v3 ^= elem;
|
|
|
|
Sip24Rounds::c_rounds(&mut self.state);
|
|
|
|
self.state.v0 ^= elem;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Process the remaining u64-sized chunks of input.
|
|
|
|
let mut processed = needed_in_elem;
|
|
|
|
let input_left = length - processed;
|
|
|
|
let u64s_left = input_left / 8;
|
|
|
|
let u8s_left = input_left & 0x7;
|
|
|
|
|
|
|
|
for _ in 0..u64s_left {
|
|
|
|
let elem = (msg.as_ptr().add(processed) as *const u64).read_unaligned().to_le();
|
|
|
|
self.state.v3 ^= elem;
|
|
|
|
Sip24Rounds::c_rounds(&mut self.state);
|
|
|
|
self.state.v0 ^= elem;
|
|
|
|
processed += 8;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Copy remaining input into start of buffer.
|
|
|
|
let src = msg.as_ptr().add(processed);
|
|
|
|
let dst = self.buf.as_mut_ptr() as *mut u8;
|
|
|
|
copy_nonoverlapping_small(src, dst, u8s_left);
|
|
|
|
|
|
|
|
self.nbuf = u8s_left;
|
|
|
|
self.processed += nbuf + processed;
|
2017-10-16 11:16:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
pub fn finish128(mut self) -> (u64, u64) {
|
2020-10-03 02:34:01 +00:00
|
|
|
debug_assert!(self.nbuf < BUFFER_SIZE_BYTES);
|
|
|
|
|
|
|
|
// Process full elements in buffer.
|
|
|
|
let last = self.nbuf / 8;
|
|
|
|
|
|
|
|
// Since we're consuming self, avoid updating members for a potential
|
|
|
|
// performance gain.
|
|
|
|
let mut state = self.state;
|
2017-10-16 11:16:04 +00:00
|
|
|
|
2020-10-03 02:34:01 +00:00
|
|
|
for i in 0..last {
|
|
|
|
let elem = unsafe { self.buf.get_unchecked(i).assume_init().to_le() };
|
|
|
|
state.v3 ^= elem;
|
|
|
|
Sip24Rounds::c_rounds(&mut state);
|
|
|
|
state.v0 ^= elem;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get remaining partial element.
|
|
|
|
let elem = if self.nbuf % 8 != 0 {
|
|
|
|
unsafe {
|
|
|
|
// Ensure element is initialized by writing zero bytes. At most
|
|
|
|
// seven are required given the above check. It's safe to write
|
|
|
|
// this many because we have the spill element and we maintain
|
|
|
|
// self.nbuf such that this write will start before the spill.
|
|
|
|
let dst = (self.buf.as_mut_ptr() as *mut u8).add(self.nbuf);
|
|
|
|
ptr::write_bytes(dst, 0, 7);
|
|
|
|
self.buf.get_unchecked(last).assume_init().to_le()
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
0
|
|
|
|
};
|
|
|
|
|
|
|
|
// Finalize the hash.
|
|
|
|
let length = self.processed + self.nbuf;
|
|
|
|
let b: u64 = ((length as u64 & 0xff) << 56) | elem;
|
2017-10-16 11:16:04 +00:00
|
|
|
|
2020-10-03 02:34:01 +00:00
|
|
|
state.v3 ^= b;
|
|
|
|
Sip24Rounds::c_rounds(&mut state);
|
|
|
|
state.v0 ^= b;
|
|
|
|
|
|
|
|
state.v2 ^= 0xee;
|
|
|
|
Sip24Rounds::d_rounds(&mut state);
|
|
|
|
let _0 = state.v0 ^ state.v1 ^ state.v2 ^ state.v3;
|
|
|
|
|
|
|
|
state.v1 ^= 0xdd;
|
|
|
|
Sip24Rounds::d_rounds(&mut state);
|
|
|
|
let _1 = state.v0 ^ state.v1 ^ state.v2 ^ state.v3;
|
2017-10-16 11:16:04 +00:00
|
|
|
|
|
|
|
(_0, _1)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Hasher for SipHasher128 {
|
|
|
|
#[inline]
|
|
|
|
fn write_u8(&mut self, i: u8) {
|
2020-10-03 02:34:01 +00:00
|
|
|
self.short_write(i);
|
2017-10-16 11:16:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn write_u16(&mut self, i: u16) {
|
2020-10-03 02:34:01 +00:00
|
|
|
self.short_write(i);
|
2017-10-16 11:16:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn write_u32(&mut self, i: u32) {
|
2020-10-03 02:34:01 +00:00
|
|
|
self.short_write(i);
|
2017-10-16 11:16:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn write_u64(&mut self, i: u64) {
|
2020-10-03 02:34:01 +00:00
|
|
|
self.short_write(i);
|
2017-10-16 11:16:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn write_usize(&mut self, i: usize) {
|
2020-10-03 02:34:01 +00:00
|
|
|
self.short_write(i);
|
2017-10-16 11:16:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn write_i8(&mut self, i: i8) {
|
2020-10-03 02:34:01 +00:00
|
|
|
self.short_write(i as u8);
|
2017-10-16 11:16:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn write_i16(&mut self, i: i16) {
|
2020-10-03 02:34:01 +00:00
|
|
|
self.short_write(i as u16);
|
2017-10-16 11:16:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn write_i32(&mut self, i: i32) {
|
2020-10-03 02:34:01 +00:00
|
|
|
self.short_write(i as u32);
|
2017-10-16 11:16:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn write_i64(&mut self, i: i64) {
|
2020-10-03 02:34:01 +00:00
|
|
|
self.short_write(i as u64);
|
2017-10-16 11:16:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn write_isize(&mut self, i: isize) {
|
2020-10-03 02:34:01 +00:00
|
|
|
self.short_write(i as usize);
|
2017-10-16 11:16:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn write(&mut self, msg: &[u8]) {
|
2020-10-03 02:34:01 +00:00
|
|
|
self.slice_write(msg);
|
2017-10-16 11:16:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
fn finish(&self) -> u64 {
|
|
|
|
panic!("SipHasher128 cannot provide valid 64 bit hashes")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug, Clone, Default)]
|
|
|
|
struct Sip24Rounds;
|
|
|
|
|
|
|
|
impl Sip24Rounds {
|
|
|
|
#[inline]
|
|
|
|
fn c_rounds(state: &mut State) {
|
|
|
|
compress!(state);
|
|
|
|
compress!(state);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn d_rounds(state: &mut State) {
|
|
|
|
compress!(state);
|
|
|
|
compress!(state);
|
|
|
|
compress!(state);
|
|
|
|
compress!(state);
|
|
|
|
}
|
|
|
|
}
|