2017-10-16 11:16:04 +00:00
|
|
|
//! This is a copy of `core::hash::sip` adapted to providing 128 bit hashes.
|
|
|
|
|
|
|
|
use std::cmp;
|
|
|
|
use std::hash::Hasher;
|
|
|
|
use std::mem;
|
2019-12-22 22:42:04 +00:00
|
|
|
use std::ptr;
|
2017-10-16 11:16:04 +00:00
|
|
|
|
2019-08-01 20:57:23 +00:00
|
|
|
#[cfg(test)]
|
|
|
|
mod tests;
|
|
|
|
|
2017-10-16 11:16:04 +00:00
|
|
|
#[derive(Debug, Clone)]
|
|
|
|
pub struct SipHasher128 {
|
|
|
|
k0: u64,
|
|
|
|
k1: u64,
|
|
|
|
length: usize, // how many bytes we've processed
|
2019-12-22 22:42:04 +00:00
|
|
|
state: State, // hash State
|
|
|
|
tail: u64, // unprocessed bytes le
|
|
|
|
ntail: usize, // how many bytes in tail are valid
|
2017-10-16 11:16:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug, Clone, Copy)]
|
|
|
|
#[repr(C)]
|
|
|
|
struct State {
|
|
|
|
// v0, v2 and v1, v3 show up in pairs in the algorithm,
|
|
|
|
// and simd implementations of SipHash will use vectors
|
|
|
|
// of v02 and v13. By placing them in this order in the struct,
|
|
|
|
// the compiler can pick up on just a few simd optimizations by itself.
|
|
|
|
v0: u64,
|
|
|
|
v2: u64,
|
|
|
|
v1: u64,
|
|
|
|
v3: u64,
|
|
|
|
}
|
|
|
|
|
|
|
|
macro_rules! compress {
|
2019-12-22 22:42:04 +00:00
|
|
|
($state:expr) => {{ compress!($state.v0, $state.v1, $state.v2, $state.v3) }};
|
|
|
|
($v0:expr, $v1:expr, $v2:expr, $v3:expr) => {{
|
|
|
|
$v0 = $v0.wrapping_add($v1);
|
|
|
|
$v1 = $v1.rotate_left(13);
|
|
|
|
$v1 ^= $v0;
|
2017-10-16 11:16:04 +00:00
|
|
|
$v0 = $v0.rotate_left(32);
|
2019-12-22 22:42:04 +00:00
|
|
|
$v2 = $v2.wrapping_add($v3);
|
|
|
|
$v3 = $v3.rotate_left(16);
|
|
|
|
$v3 ^= $v2;
|
|
|
|
$v0 = $v0.wrapping_add($v3);
|
|
|
|
$v3 = $v3.rotate_left(21);
|
|
|
|
$v3 ^= $v0;
|
|
|
|
$v2 = $v2.wrapping_add($v1);
|
|
|
|
$v1 = $v1.rotate_left(17);
|
|
|
|
$v1 ^= $v2;
|
2017-10-16 11:16:04 +00:00
|
|
|
$v2 = $v2.rotate_left(32);
|
2019-12-22 22:42:04 +00:00
|
|
|
}};
|
2017-10-16 11:16:04 +00:00
|
|
|
}
|
|
|
|
|
2020-02-07 00:53:07 +00:00
|
|
|
/// Loads an integer of the desired type from a byte stream, in LE order. Uses
|
|
|
|
/// `copy_nonoverlapping` to let the compiler generate the most efficient way
|
|
|
|
/// to load it from a possibly unaligned address.
|
|
|
|
///
|
|
|
|
/// Unsafe because: unchecked indexing at i..i+size_of(int_ty)
|
|
|
|
macro_rules! load_int_le {
|
|
|
|
($buf:expr, $i:expr, $int_ty:ident) => {{
|
|
|
|
debug_assert!($i + mem::size_of::<$int_ty>() <= $buf.len());
|
|
|
|
let mut data = 0 as $int_ty;
|
|
|
|
ptr::copy_nonoverlapping(
|
|
|
|
$buf.get_unchecked($i),
|
|
|
|
&mut data as *mut _ as *mut u8,
|
|
|
|
mem::size_of::<$int_ty>(),
|
|
|
|
);
|
|
|
|
data.to_le()
|
|
|
|
}};
|
|
|
|
}
|
2020-02-11 23:36:29 +00:00
|
|
|
|
2020-02-07 00:53:07 +00:00
|
|
|
/// Loads a u64 using up to 7 bytes of a byte slice. It looks clumsy but the
|
|
|
|
/// `copy_nonoverlapping` calls that occur (via `load_int_le!`) all have fixed
|
|
|
|
/// sizes and avoid calling `memcpy`, which is good for speed.
|
|
|
|
///
|
|
|
|
/// Unsafe because: unchecked indexing at start..start+len
|
|
|
|
#[inline]
|
|
|
|
unsafe fn u8to64_le(buf: &[u8], start: usize, len: usize) -> u64 {
|
|
|
|
debug_assert!(len < 8);
|
|
|
|
let mut i = 0; // current byte index (from LSB) in the output u64
|
|
|
|
let mut out = 0;
|
|
|
|
if i + 3 < len {
|
|
|
|
out = load_int_le!(buf, start + i, u32) as u64;
|
|
|
|
i += 4;
|
|
|
|
}
|
|
|
|
if i + 1 < len {
|
|
|
|
out |= (load_int_le!(buf, start + i, u16) as u64) << (i * 8);
|
|
|
|
i += 2
|
|
|
|
}
|
|
|
|
if i < len {
|
|
|
|
out |= (*buf.get_unchecked(start + i) as u64) << (i * 8);
|
|
|
|
i += 1;
|
2017-10-16 11:16:04 +00:00
|
|
|
}
|
2020-02-07 00:53:07 +00:00
|
|
|
debug_assert_eq!(i, len);
|
|
|
|
out
|
2017-10-16 11:16:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
impl SipHasher128 {
|
|
|
|
#[inline]
|
|
|
|
pub fn new_with_keys(key0: u64, key1: u64) -> SipHasher128 {
|
|
|
|
let mut state = SipHasher128 {
|
|
|
|
k0: key0,
|
|
|
|
k1: key1,
|
|
|
|
length: 0,
|
2019-12-22 22:42:04 +00:00
|
|
|
state: State { v0: 0, v1: 0, v2: 0, v3: 0 },
|
2017-10-16 11:16:04 +00:00
|
|
|
tail: 0,
|
|
|
|
ntail: 0,
|
|
|
|
};
|
|
|
|
state.reset();
|
|
|
|
state
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn reset(&mut self) {
|
|
|
|
self.length = 0;
|
|
|
|
self.state.v0 = self.k0 ^ 0x736f6d6570736575;
|
|
|
|
self.state.v1 = self.k1 ^ 0x646f72616e646f6d;
|
|
|
|
self.state.v2 = self.k0 ^ 0x6c7967656e657261;
|
|
|
|
self.state.v3 = self.k1 ^ 0x7465646279746573;
|
|
|
|
self.ntail = 0;
|
|
|
|
|
|
|
|
// This is only done in the 128 bit version:
|
|
|
|
self.state.v1 ^= 0xee;
|
|
|
|
}
|
|
|
|
|
Speed up `SipHasher128`.
The current code in `SipHasher128::short_write` is inefficient. It uses
`u8to64_le` (which is complex and slow) to extract just the right number of
bytes of the input into a u64 and pad the result with zeroes. It then
left-shifts that value in order to bitwise-OR it with `self.tail`.
For example, imagine we have a u32 input 0xIIHH_GGFF and only need three bytes
to fill up `self.tail`. The current code uses `u8to64_le` to construct
0x0000_0000_00HH_GGFF, which is just 0xIIHH_GGFF with the 0xII removed and
zero-extended to a u64. The code then left-shifts that value by five bytes --
discarding the 0x00 byte that replaced the 0xII byte! -- to give
0xHHGG_FF00_0000_0000. It then then ORs that value with self.tail.
There's a much simpler way to do it: zero-extend to u64 first, then left shift.
E.g. 0xIIHH_GGFF is zero-extended to 0x0000_0000_IIHH_GGFF, and then
left-shifted to 0xHHGG_FF00_0000_0000. We don't have to take time to exclude
the unneeded 0xII byte, because it just gets shifted out anyway! It also avoids
multiple occurrences of `unsafe`.
There's a similar story with the setting of `self.tail` at the method's end.
The current code uses `u8to64_le` to extract the remaining part of the input,
but the same effect can be achieved more quickly with a right shift on the
zero-extended input.
All that works on little-endian. It doesn't work for big-endian, but we
can just do a `to_le` before calling `short_write` and then it works.
This commit changes `SipHasher128` to use the simpler shift-based approach. The
code is also smaller, which means that `short_write` is now inlined where
previously it wasn't, which makes things faster again. This gives big
speed-ups for all incremental builds, especially "baseline" incremental
builds.
2020-02-06 02:04:51 +00:00
|
|
|
// A specialized write function for values with size <= 8.
|
|
|
|
//
|
2020-09-29 00:34:27 +00:00
|
|
|
// The input must be zero-extended to 64-bits by the caller. This extension
|
|
|
|
// isn't hashed, but the implementation requires it for correctness.
|
|
|
|
//
|
|
|
|
// This function, given the same integer size and value, has the same effect
|
|
|
|
// on both little- and big-endian hardware. It operates on values without
|
|
|
|
// depending on their sequence in memory, so is independent of endianness.
|
|
|
|
//
|
|
|
|
// However, we want SipHasher128 to be platform-dependent, in order to be
|
|
|
|
// consistent with the platform-dependent SipHasher in libstd. In other
|
|
|
|
// words, we want:
|
|
|
|
//
|
Speed up `SipHasher128`.
The current code in `SipHasher128::short_write` is inefficient. It uses
`u8to64_le` (which is complex and slow) to extract just the right number of
bytes of the input into a u64 and pad the result with zeroes. It then
left-shifts that value in order to bitwise-OR it with `self.tail`.
For example, imagine we have a u32 input 0xIIHH_GGFF and only need three bytes
to fill up `self.tail`. The current code uses `u8to64_le` to construct
0x0000_0000_00HH_GGFF, which is just 0xIIHH_GGFF with the 0xII removed and
zero-extended to a u64. The code then left-shifts that value by five bytes --
discarding the 0x00 byte that replaced the 0xII byte! -- to give
0xHHGG_FF00_0000_0000. It then then ORs that value with self.tail.
There's a much simpler way to do it: zero-extend to u64 first, then left shift.
E.g. 0xIIHH_GGFF is zero-extended to 0x0000_0000_IIHH_GGFF, and then
left-shifted to 0xHHGG_FF00_0000_0000. We don't have to take time to exclude
the unneeded 0xII byte, because it just gets shifted out anyway! It also avoids
multiple occurrences of `unsafe`.
There's a similar story with the setting of `self.tail` at the method's end.
The current code uses `u8to64_le` to extract the remaining part of the input,
but the same effect can be achieved more quickly with a right shift on the
zero-extended input.
All that works on little-endian. It doesn't work for big-endian, but we
can just do a `to_le` before calling `short_write` and then it works.
This commit changes `SipHasher128` to use the simpler shift-based approach. The
code is also smaller, which means that `short_write` is now inlined where
previously it wasn't, which makes things faster again. This gives big
speed-ups for all incremental builds, especially "baseline" incremental
builds.
2020-02-06 02:04:51 +00:00
|
|
|
// - little-endian: `write_u32(0xDDCCBBAA)` == `write([0xAA, 0xBB, 0xCC, 0xDD])`
|
|
|
|
// - big-endian: `write_u32(0xDDCCBBAA)` == `write([0xDD, 0xCC, 0xBB, 0xAA])`
|
|
|
|
//
|
2020-09-29 00:34:27 +00:00
|
|
|
// Therefore, in order to produce endian-dependent results, SipHasher128's
|
|
|
|
// `write_xxx` Hasher trait methods byte-swap `x` prior to zero-extending.
|
|
|
|
//
|
|
|
|
// If clients of SipHasher128 itself want platform-independent results, they
|
|
|
|
// *also* must byte-swap integer inputs before invoking the `write_xxx`
|
|
|
|
// methods on big-endian hardware (that is, two byte-swaps must occur--one
|
|
|
|
// in the client, and one in SipHasher128). Additionally, they must extend
|
|
|
|
// `usize` and `isize` types to 64 bits on 32-bit systems.
|
2017-10-16 11:16:04 +00:00
|
|
|
#[inline]
|
Speed up `SipHasher128`.
The current code in `SipHasher128::short_write` is inefficient. It uses
`u8to64_le` (which is complex and slow) to extract just the right number of
bytes of the input into a u64 and pad the result with zeroes. It then
left-shifts that value in order to bitwise-OR it with `self.tail`.
For example, imagine we have a u32 input 0xIIHH_GGFF and only need three bytes
to fill up `self.tail`. The current code uses `u8to64_le` to construct
0x0000_0000_00HH_GGFF, which is just 0xIIHH_GGFF with the 0xII removed and
zero-extended to a u64. The code then left-shifts that value by five bytes --
discarding the 0x00 byte that replaced the 0xII byte! -- to give
0xHHGG_FF00_0000_0000. It then then ORs that value with self.tail.
There's a much simpler way to do it: zero-extend to u64 first, then left shift.
E.g. 0xIIHH_GGFF is zero-extended to 0x0000_0000_IIHH_GGFF, and then
left-shifted to 0xHHGG_FF00_0000_0000. We don't have to take time to exclude
the unneeded 0xII byte, because it just gets shifted out anyway! It also avoids
multiple occurrences of `unsafe`.
There's a similar story with the setting of `self.tail` at the method's end.
The current code uses `u8to64_le` to extract the remaining part of the input,
but the same effect can be achieved more quickly with a right shift on the
zero-extended input.
All that works on little-endian. It doesn't work for big-endian, but we
can just do a `to_le` before calling `short_write` and then it works.
This commit changes `SipHasher128` to use the simpler shift-based approach. The
code is also smaller, which means that `short_write` is now inlined where
previously it wasn't, which makes things faster again. This gives big
speed-ups for all incremental builds, especially "baseline" incremental
builds.
2020-02-06 02:04:51 +00:00
|
|
|
fn short_write<T>(&mut self, _x: T, x: u64) {
|
|
|
|
let size = mem::size_of::<T>();
|
|
|
|
self.length += size;
|
|
|
|
|
|
|
|
// The original number must be zero-extended, not sign-extended.
|
|
|
|
debug_assert!(if size < 8 { x >> (8 * size) == 0 } else { true });
|
2017-10-16 11:16:04 +00:00
|
|
|
|
Speed up `SipHasher128`.
The current code in `SipHasher128::short_write` is inefficient. It uses
`u8to64_le` (which is complex and slow) to extract just the right number of
bytes of the input into a u64 and pad the result with zeroes. It then
left-shifts that value in order to bitwise-OR it with `self.tail`.
For example, imagine we have a u32 input 0xIIHH_GGFF and only need three bytes
to fill up `self.tail`. The current code uses `u8to64_le` to construct
0x0000_0000_00HH_GGFF, which is just 0xIIHH_GGFF with the 0xII removed and
zero-extended to a u64. The code then left-shifts that value by five bytes --
discarding the 0x00 byte that replaced the 0xII byte! -- to give
0xHHGG_FF00_0000_0000. It then then ORs that value with self.tail.
There's a much simpler way to do it: zero-extend to u64 first, then left shift.
E.g. 0xIIHH_GGFF is zero-extended to 0x0000_0000_IIHH_GGFF, and then
left-shifted to 0xHHGG_FF00_0000_0000. We don't have to take time to exclude
the unneeded 0xII byte, because it just gets shifted out anyway! It also avoids
multiple occurrences of `unsafe`.
There's a similar story with the setting of `self.tail` at the method's end.
The current code uses `u8to64_le` to extract the remaining part of the input,
but the same effect can be achieved more quickly with a right shift on the
zero-extended input.
All that works on little-endian. It doesn't work for big-endian, but we
can just do a `to_le` before calling `short_write` and then it works.
This commit changes `SipHasher128` to use the simpler shift-based approach. The
code is also smaller, which means that `short_write` is now inlined where
previously it wasn't, which makes things faster again. This gives big
speed-ups for all incremental builds, especially "baseline" incremental
builds.
2020-02-06 02:04:51 +00:00
|
|
|
// The number of bytes needed to fill `self.tail`.
|
2017-10-16 11:16:04 +00:00
|
|
|
let needed = 8 - self.ntail;
|
Speed up `SipHasher128`.
The current code in `SipHasher128::short_write` is inefficient. It uses
`u8to64_le` (which is complex and slow) to extract just the right number of
bytes of the input into a u64 and pad the result with zeroes. It then
left-shifts that value in order to bitwise-OR it with `self.tail`.
For example, imagine we have a u32 input 0xIIHH_GGFF and only need three bytes
to fill up `self.tail`. The current code uses `u8to64_le` to construct
0x0000_0000_00HH_GGFF, which is just 0xIIHH_GGFF with the 0xII removed and
zero-extended to a u64. The code then left-shifts that value by five bytes --
discarding the 0x00 byte that replaced the 0xII byte! -- to give
0xHHGG_FF00_0000_0000. It then then ORs that value with self.tail.
There's a much simpler way to do it: zero-extend to u64 first, then left shift.
E.g. 0xIIHH_GGFF is zero-extended to 0x0000_0000_IIHH_GGFF, and then
left-shifted to 0xHHGG_FF00_0000_0000. We don't have to take time to exclude
the unneeded 0xII byte, because it just gets shifted out anyway! It also avoids
multiple occurrences of `unsafe`.
There's a similar story with the setting of `self.tail` at the method's end.
The current code uses `u8to64_le` to extract the remaining part of the input,
but the same effect can be achieved more quickly with a right shift on the
zero-extended input.
All that works on little-endian. It doesn't work for big-endian, but we
can just do a `to_le` before calling `short_write` and then it works.
This commit changes `SipHasher128` to use the simpler shift-based approach. The
code is also smaller, which means that `short_write` is now inlined where
previously it wasn't, which makes things faster again. This gives big
speed-ups for all incremental builds, especially "baseline" incremental
builds.
2020-02-06 02:04:51 +00:00
|
|
|
|
|
|
|
// SipHash parses the input stream as 8-byte little-endian integers.
|
|
|
|
// Inputs are put into `self.tail` until 8 bytes of data have been
|
|
|
|
// collected, and then that word is processed.
|
|
|
|
//
|
|
|
|
// For example, imagine that `self.tail` is 0x0000_00EE_DDCC_BBAA,
|
|
|
|
// `self.ntail` is 5 (because 5 bytes have been put into `self.tail`),
|
|
|
|
// and `needed` is therefore 3.
|
|
|
|
//
|
|
|
|
// - Scenario 1, `self.write_u8(0xFF)`: we have already zero-extended
|
|
|
|
// the input to 0x0000_0000_0000_00FF. We now left-shift it five
|
|
|
|
// bytes, giving 0x0000_FF00_0000_0000. We then bitwise-OR that value
|
|
|
|
// into `self.tail`, resulting in 0x0000_FFEE_DDCC_BBAA.
|
|
|
|
// (Zero-extension of the original input is critical in this scenario
|
|
|
|
// because we don't want the high two bytes of `self.tail` to be
|
|
|
|
// touched by the bitwise-OR.) `self.tail` is not yet full, so we
|
|
|
|
// return early, after updating `self.ntail` to 6.
|
|
|
|
//
|
|
|
|
// - Scenario 2, `self.write_u32(0xIIHH_GGFF)`: we have already
|
|
|
|
// zero-extended the input to 0x0000_0000_IIHH_GGFF. We now
|
|
|
|
// left-shift it five bytes, giving 0xHHGG_FF00_0000_0000. We then
|
|
|
|
// bitwise-OR that value into `self.tail`, resulting in
|
|
|
|
// 0xHHGG_FFEE_DDCC_BBAA. `self.tail` is now full, and we can use it
|
|
|
|
// to update `self.state`. (As mentioned above, this assumes a
|
|
|
|
// little-endian machine; on a big-endian machine we would have
|
|
|
|
// byte-swapped 0xIIHH_GGFF in the caller, giving 0xFFGG_HHII, and we
|
|
|
|
// would then end up bitwise-ORing 0xGGHH_II00_0000_0000 into
|
|
|
|
// `self.tail`).
|
|
|
|
//
|
|
|
|
self.tail |= x << (8 * self.ntail);
|
|
|
|
if size < needed {
|
|
|
|
self.ntail += size;
|
|
|
|
return;
|
2017-10-16 11:16:04 +00:00
|
|
|
}
|
Speed up `SipHasher128`.
The current code in `SipHasher128::short_write` is inefficient. It uses
`u8to64_le` (which is complex and slow) to extract just the right number of
bytes of the input into a u64 and pad the result with zeroes. It then
left-shifts that value in order to bitwise-OR it with `self.tail`.
For example, imagine we have a u32 input 0xIIHH_GGFF and only need three bytes
to fill up `self.tail`. The current code uses `u8to64_le` to construct
0x0000_0000_00HH_GGFF, which is just 0xIIHH_GGFF with the 0xII removed and
zero-extended to a u64. The code then left-shifts that value by five bytes --
discarding the 0x00 byte that replaced the 0xII byte! -- to give
0xHHGG_FF00_0000_0000. It then then ORs that value with self.tail.
There's a much simpler way to do it: zero-extend to u64 first, then left shift.
E.g. 0xIIHH_GGFF is zero-extended to 0x0000_0000_IIHH_GGFF, and then
left-shifted to 0xHHGG_FF00_0000_0000. We don't have to take time to exclude
the unneeded 0xII byte, because it just gets shifted out anyway! It also avoids
multiple occurrences of `unsafe`.
There's a similar story with the setting of `self.tail` at the method's end.
The current code uses `u8to64_le` to extract the remaining part of the input,
but the same effect can be achieved more quickly with a right shift on the
zero-extended input.
All that works on little-endian. It doesn't work for big-endian, but we
can just do a `to_le` before calling `short_write` and then it works.
This commit changes `SipHasher128` to use the simpler shift-based approach. The
code is also smaller, which means that `short_write` is now inlined where
previously it wasn't, which makes things faster again. This gives big
speed-ups for all incremental builds, especially "baseline" incremental
builds.
2020-02-06 02:04:51 +00:00
|
|
|
|
|
|
|
// `self.tail` is full, process it.
|
2017-10-16 11:16:04 +00:00
|
|
|
self.state.v3 ^= self.tail;
|
|
|
|
Sip24Rounds::c_rounds(&mut self.state);
|
|
|
|
self.state.v0 ^= self.tail;
|
|
|
|
|
Speed up `SipHasher128`.
The current code in `SipHasher128::short_write` is inefficient. It uses
`u8to64_le` (which is complex and slow) to extract just the right number of
bytes of the input into a u64 and pad the result with zeroes. It then
left-shifts that value in order to bitwise-OR it with `self.tail`.
For example, imagine we have a u32 input 0xIIHH_GGFF and only need three bytes
to fill up `self.tail`. The current code uses `u8to64_le` to construct
0x0000_0000_00HH_GGFF, which is just 0xIIHH_GGFF with the 0xII removed and
zero-extended to a u64. The code then left-shifts that value by five bytes --
discarding the 0x00 byte that replaced the 0xII byte! -- to give
0xHHGG_FF00_0000_0000. It then then ORs that value with self.tail.
There's a much simpler way to do it: zero-extend to u64 first, then left shift.
E.g. 0xIIHH_GGFF is zero-extended to 0x0000_0000_IIHH_GGFF, and then
left-shifted to 0xHHGG_FF00_0000_0000. We don't have to take time to exclude
the unneeded 0xII byte, because it just gets shifted out anyway! It also avoids
multiple occurrences of `unsafe`.
There's a similar story with the setting of `self.tail` at the method's end.
The current code uses `u8to64_le` to extract the remaining part of the input,
but the same effect can be achieved more quickly with a right shift on the
zero-extended input.
All that works on little-endian. It doesn't work for big-endian, but we
can just do a `to_le` before calling `short_write` and then it works.
This commit changes `SipHasher128` to use the simpler shift-based approach. The
code is also smaller, which means that `short_write` is now inlined where
previously it wasn't, which makes things faster again. This gives big
speed-ups for all incremental builds, especially "baseline" incremental
builds.
2020-02-06 02:04:51 +00:00
|
|
|
// Continuing scenario 2: we have one byte left over from the input. We
|
|
|
|
// set `self.ntail` to 1 and `self.tail` to `0x0000_0000_IIHH_GGFF >>
|
|
|
|
// 8*3`, which is 0x0000_0000_0000_00II. (Or on a big-endian machine
|
|
|
|
// the prior byte-swapping would leave us with 0x0000_0000_0000_00FF.)
|
|
|
|
//
|
|
|
|
// The `if` is needed to avoid shifting by 64 bits, which Rust
|
|
|
|
// complains about.
|
|
|
|
self.ntail = size - needed;
|
|
|
|
self.tail = if needed < 8 { x >> (8 * needed) } else { 0 };
|
2017-10-16 11:16:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
pub fn finish128(mut self) -> (u64, u64) {
|
|
|
|
let b: u64 = ((self.length as u64 & 0xff) << 56) | self.tail;
|
|
|
|
|
|
|
|
self.state.v3 ^= b;
|
|
|
|
Sip24Rounds::c_rounds(&mut self.state);
|
|
|
|
self.state.v0 ^= b;
|
|
|
|
|
|
|
|
self.state.v2 ^= 0xee;
|
|
|
|
Sip24Rounds::d_rounds(&mut self.state);
|
|
|
|
let _0 = self.state.v0 ^ self.state.v1 ^ self.state.v2 ^ self.state.v3;
|
|
|
|
|
|
|
|
self.state.v1 ^= 0xdd;
|
|
|
|
Sip24Rounds::d_rounds(&mut self.state);
|
|
|
|
let _1 = self.state.v0 ^ self.state.v1 ^ self.state.v2 ^ self.state.v3;
|
|
|
|
(_0, _1)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Hasher for SipHasher128 {
|
|
|
|
#[inline]
|
|
|
|
fn write_u8(&mut self, i: u8) {
|
Speed up `SipHasher128`.
The current code in `SipHasher128::short_write` is inefficient. It uses
`u8to64_le` (which is complex and slow) to extract just the right number of
bytes of the input into a u64 and pad the result with zeroes. It then
left-shifts that value in order to bitwise-OR it with `self.tail`.
For example, imagine we have a u32 input 0xIIHH_GGFF and only need three bytes
to fill up `self.tail`. The current code uses `u8to64_le` to construct
0x0000_0000_00HH_GGFF, which is just 0xIIHH_GGFF with the 0xII removed and
zero-extended to a u64. The code then left-shifts that value by five bytes --
discarding the 0x00 byte that replaced the 0xII byte! -- to give
0xHHGG_FF00_0000_0000. It then then ORs that value with self.tail.
There's a much simpler way to do it: zero-extend to u64 first, then left shift.
E.g. 0xIIHH_GGFF is zero-extended to 0x0000_0000_IIHH_GGFF, and then
left-shifted to 0xHHGG_FF00_0000_0000. We don't have to take time to exclude
the unneeded 0xII byte, because it just gets shifted out anyway! It also avoids
multiple occurrences of `unsafe`.
There's a similar story with the setting of `self.tail` at the method's end.
The current code uses `u8to64_le` to extract the remaining part of the input,
but the same effect can be achieved more quickly with a right shift on the
zero-extended input.
All that works on little-endian. It doesn't work for big-endian, but we
can just do a `to_le` before calling `short_write` and then it works.
This commit changes `SipHasher128` to use the simpler shift-based approach. The
code is also smaller, which means that `short_write` is now inlined where
previously it wasn't, which makes things faster again. This gives big
speed-ups for all incremental builds, especially "baseline" incremental
builds.
2020-02-06 02:04:51 +00:00
|
|
|
self.short_write(i, i as u64);
|
2017-10-16 11:16:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn write_u16(&mut self, i: u16) {
|
Speed up `SipHasher128`.
The current code in `SipHasher128::short_write` is inefficient. It uses
`u8to64_le` (which is complex and slow) to extract just the right number of
bytes of the input into a u64 and pad the result with zeroes. It then
left-shifts that value in order to bitwise-OR it with `self.tail`.
For example, imagine we have a u32 input 0xIIHH_GGFF and only need three bytes
to fill up `self.tail`. The current code uses `u8to64_le` to construct
0x0000_0000_00HH_GGFF, which is just 0xIIHH_GGFF with the 0xII removed and
zero-extended to a u64. The code then left-shifts that value by five bytes --
discarding the 0x00 byte that replaced the 0xII byte! -- to give
0xHHGG_FF00_0000_0000. It then then ORs that value with self.tail.
There's a much simpler way to do it: zero-extend to u64 first, then left shift.
E.g. 0xIIHH_GGFF is zero-extended to 0x0000_0000_IIHH_GGFF, and then
left-shifted to 0xHHGG_FF00_0000_0000. We don't have to take time to exclude
the unneeded 0xII byte, because it just gets shifted out anyway! It also avoids
multiple occurrences of `unsafe`.
There's a similar story with the setting of `self.tail` at the method's end.
The current code uses `u8to64_le` to extract the remaining part of the input,
but the same effect can be achieved more quickly with a right shift on the
zero-extended input.
All that works on little-endian. It doesn't work for big-endian, but we
can just do a `to_le` before calling `short_write` and then it works.
This commit changes `SipHasher128` to use the simpler shift-based approach. The
code is also smaller, which means that `short_write` is now inlined where
previously it wasn't, which makes things faster again. This gives big
speed-ups for all incremental builds, especially "baseline" incremental
builds.
2020-02-06 02:04:51 +00:00
|
|
|
self.short_write(i, i.to_le() as u64);
|
2017-10-16 11:16:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn write_u32(&mut self, i: u32) {
|
Speed up `SipHasher128`.
The current code in `SipHasher128::short_write` is inefficient. It uses
`u8to64_le` (which is complex and slow) to extract just the right number of
bytes of the input into a u64 and pad the result with zeroes. It then
left-shifts that value in order to bitwise-OR it with `self.tail`.
For example, imagine we have a u32 input 0xIIHH_GGFF and only need three bytes
to fill up `self.tail`. The current code uses `u8to64_le` to construct
0x0000_0000_00HH_GGFF, which is just 0xIIHH_GGFF with the 0xII removed and
zero-extended to a u64. The code then left-shifts that value by five bytes --
discarding the 0x00 byte that replaced the 0xII byte! -- to give
0xHHGG_FF00_0000_0000. It then then ORs that value with self.tail.
There's a much simpler way to do it: zero-extend to u64 first, then left shift.
E.g. 0xIIHH_GGFF is zero-extended to 0x0000_0000_IIHH_GGFF, and then
left-shifted to 0xHHGG_FF00_0000_0000. We don't have to take time to exclude
the unneeded 0xII byte, because it just gets shifted out anyway! It also avoids
multiple occurrences of `unsafe`.
There's a similar story with the setting of `self.tail` at the method's end.
The current code uses `u8to64_le` to extract the remaining part of the input,
but the same effect can be achieved more quickly with a right shift on the
zero-extended input.
All that works on little-endian. It doesn't work for big-endian, but we
can just do a `to_le` before calling `short_write` and then it works.
This commit changes `SipHasher128` to use the simpler shift-based approach. The
code is also smaller, which means that `short_write` is now inlined where
previously it wasn't, which makes things faster again. This gives big
speed-ups for all incremental builds, especially "baseline" incremental
builds.
2020-02-06 02:04:51 +00:00
|
|
|
self.short_write(i, i.to_le() as u64);
|
2017-10-16 11:16:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn write_u64(&mut self, i: u64) {
|
Speed up `SipHasher128`.
The current code in `SipHasher128::short_write` is inefficient. It uses
`u8to64_le` (which is complex and slow) to extract just the right number of
bytes of the input into a u64 and pad the result with zeroes. It then
left-shifts that value in order to bitwise-OR it with `self.tail`.
For example, imagine we have a u32 input 0xIIHH_GGFF and only need three bytes
to fill up `self.tail`. The current code uses `u8to64_le` to construct
0x0000_0000_00HH_GGFF, which is just 0xIIHH_GGFF with the 0xII removed and
zero-extended to a u64. The code then left-shifts that value by five bytes --
discarding the 0x00 byte that replaced the 0xII byte! -- to give
0xHHGG_FF00_0000_0000. It then then ORs that value with self.tail.
There's a much simpler way to do it: zero-extend to u64 first, then left shift.
E.g. 0xIIHH_GGFF is zero-extended to 0x0000_0000_IIHH_GGFF, and then
left-shifted to 0xHHGG_FF00_0000_0000. We don't have to take time to exclude
the unneeded 0xII byte, because it just gets shifted out anyway! It also avoids
multiple occurrences of `unsafe`.
There's a similar story with the setting of `self.tail` at the method's end.
The current code uses `u8to64_le` to extract the remaining part of the input,
but the same effect can be achieved more quickly with a right shift on the
zero-extended input.
All that works on little-endian. It doesn't work for big-endian, but we
can just do a `to_le` before calling `short_write` and then it works.
This commit changes `SipHasher128` to use the simpler shift-based approach. The
code is also smaller, which means that `short_write` is now inlined where
previously it wasn't, which makes things faster again. This gives big
speed-ups for all incremental builds, especially "baseline" incremental
builds.
2020-02-06 02:04:51 +00:00
|
|
|
self.short_write(i, i.to_le() as u64);
|
2017-10-16 11:16:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn write_usize(&mut self, i: usize) {
|
Speed up `SipHasher128`.
The current code in `SipHasher128::short_write` is inefficient. It uses
`u8to64_le` (which is complex and slow) to extract just the right number of
bytes of the input into a u64 and pad the result with zeroes. It then
left-shifts that value in order to bitwise-OR it with `self.tail`.
For example, imagine we have a u32 input 0xIIHH_GGFF and only need three bytes
to fill up `self.tail`. The current code uses `u8to64_le` to construct
0x0000_0000_00HH_GGFF, which is just 0xIIHH_GGFF with the 0xII removed and
zero-extended to a u64. The code then left-shifts that value by five bytes --
discarding the 0x00 byte that replaced the 0xII byte! -- to give
0xHHGG_FF00_0000_0000. It then then ORs that value with self.tail.
There's a much simpler way to do it: zero-extend to u64 first, then left shift.
E.g. 0xIIHH_GGFF is zero-extended to 0x0000_0000_IIHH_GGFF, and then
left-shifted to 0xHHGG_FF00_0000_0000. We don't have to take time to exclude
the unneeded 0xII byte, because it just gets shifted out anyway! It also avoids
multiple occurrences of `unsafe`.
There's a similar story with the setting of `self.tail` at the method's end.
The current code uses `u8to64_le` to extract the remaining part of the input,
but the same effect can be achieved more quickly with a right shift on the
zero-extended input.
All that works on little-endian. It doesn't work for big-endian, but we
can just do a `to_le` before calling `short_write` and then it works.
This commit changes `SipHasher128` to use the simpler shift-based approach. The
code is also smaller, which means that `short_write` is now inlined where
previously it wasn't, which makes things faster again. This gives big
speed-ups for all incremental builds, especially "baseline" incremental
builds.
2020-02-06 02:04:51 +00:00
|
|
|
self.short_write(i, i.to_le() as u64);
|
2017-10-16 11:16:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn write_i8(&mut self, i: i8) {
|
Speed up `SipHasher128`.
The current code in `SipHasher128::short_write` is inefficient. It uses
`u8to64_le` (which is complex and slow) to extract just the right number of
bytes of the input into a u64 and pad the result with zeroes. It then
left-shifts that value in order to bitwise-OR it with `self.tail`.
For example, imagine we have a u32 input 0xIIHH_GGFF and only need three bytes
to fill up `self.tail`. The current code uses `u8to64_le` to construct
0x0000_0000_00HH_GGFF, which is just 0xIIHH_GGFF with the 0xII removed and
zero-extended to a u64. The code then left-shifts that value by five bytes --
discarding the 0x00 byte that replaced the 0xII byte! -- to give
0xHHGG_FF00_0000_0000. It then then ORs that value with self.tail.
There's a much simpler way to do it: zero-extend to u64 first, then left shift.
E.g. 0xIIHH_GGFF is zero-extended to 0x0000_0000_IIHH_GGFF, and then
left-shifted to 0xHHGG_FF00_0000_0000. We don't have to take time to exclude
the unneeded 0xII byte, because it just gets shifted out anyway! It also avoids
multiple occurrences of `unsafe`.
There's a similar story with the setting of `self.tail` at the method's end.
The current code uses `u8to64_le` to extract the remaining part of the input,
but the same effect can be achieved more quickly with a right shift on the
zero-extended input.
All that works on little-endian. It doesn't work for big-endian, but we
can just do a `to_le` before calling `short_write` and then it works.
This commit changes `SipHasher128` to use the simpler shift-based approach. The
code is also smaller, which means that `short_write` is now inlined where
previously it wasn't, which makes things faster again. This gives big
speed-ups for all incremental builds, especially "baseline" incremental
builds.
2020-02-06 02:04:51 +00:00
|
|
|
self.short_write(i, i as u8 as u64);
|
2017-10-16 11:16:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn write_i16(&mut self, i: i16) {
|
Speed up `SipHasher128`.
The current code in `SipHasher128::short_write` is inefficient. It uses
`u8to64_le` (which is complex and slow) to extract just the right number of
bytes of the input into a u64 and pad the result with zeroes. It then
left-shifts that value in order to bitwise-OR it with `self.tail`.
For example, imagine we have a u32 input 0xIIHH_GGFF and only need three bytes
to fill up `self.tail`. The current code uses `u8to64_le` to construct
0x0000_0000_00HH_GGFF, which is just 0xIIHH_GGFF with the 0xII removed and
zero-extended to a u64. The code then left-shifts that value by five bytes --
discarding the 0x00 byte that replaced the 0xII byte! -- to give
0xHHGG_FF00_0000_0000. It then then ORs that value with self.tail.
There's a much simpler way to do it: zero-extend to u64 first, then left shift.
E.g. 0xIIHH_GGFF is zero-extended to 0x0000_0000_IIHH_GGFF, and then
left-shifted to 0xHHGG_FF00_0000_0000. We don't have to take time to exclude
the unneeded 0xII byte, because it just gets shifted out anyway! It also avoids
multiple occurrences of `unsafe`.
There's a similar story with the setting of `self.tail` at the method's end.
The current code uses `u8to64_le` to extract the remaining part of the input,
but the same effect can be achieved more quickly with a right shift on the
zero-extended input.
All that works on little-endian. It doesn't work for big-endian, but we
can just do a `to_le` before calling `short_write` and then it works.
This commit changes `SipHasher128` to use the simpler shift-based approach. The
code is also smaller, which means that `short_write` is now inlined where
previously it wasn't, which makes things faster again. This gives big
speed-ups for all incremental builds, especially "baseline" incremental
builds.
2020-02-06 02:04:51 +00:00
|
|
|
self.short_write(i, (i as u16).to_le() as u64);
|
2017-10-16 11:16:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn write_i32(&mut self, i: i32) {
|
Speed up `SipHasher128`.
The current code in `SipHasher128::short_write` is inefficient. It uses
`u8to64_le` (which is complex and slow) to extract just the right number of
bytes of the input into a u64 and pad the result with zeroes. It then
left-shifts that value in order to bitwise-OR it with `self.tail`.
For example, imagine we have a u32 input 0xIIHH_GGFF and only need three bytes
to fill up `self.tail`. The current code uses `u8to64_le` to construct
0x0000_0000_00HH_GGFF, which is just 0xIIHH_GGFF with the 0xII removed and
zero-extended to a u64. The code then left-shifts that value by five bytes --
discarding the 0x00 byte that replaced the 0xII byte! -- to give
0xHHGG_FF00_0000_0000. It then then ORs that value with self.tail.
There's a much simpler way to do it: zero-extend to u64 first, then left shift.
E.g. 0xIIHH_GGFF is zero-extended to 0x0000_0000_IIHH_GGFF, and then
left-shifted to 0xHHGG_FF00_0000_0000. We don't have to take time to exclude
the unneeded 0xII byte, because it just gets shifted out anyway! It also avoids
multiple occurrences of `unsafe`.
There's a similar story with the setting of `self.tail` at the method's end.
The current code uses `u8to64_le` to extract the remaining part of the input,
but the same effect can be achieved more quickly with a right shift on the
zero-extended input.
All that works on little-endian. It doesn't work for big-endian, but we
can just do a `to_le` before calling `short_write` and then it works.
This commit changes `SipHasher128` to use the simpler shift-based approach. The
code is also smaller, which means that `short_write` is now inlined where
previously it wasn't, which makes things faster again. This gives big
speed-ups for all incremental builds, especially "baseline" incremental
builds.
2020-02-06 02:04:51 +00:00
|
|
|
self.short_write(i, (i as u32).to_le() as u64);
|
2017-10-16 11:16:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn write_i64(&mut self, i: i64) {
|
Speed up `SipHasher128`.
The current code in `SipHasher128::short_write` is inefficient. It uses
`u8to64_le` (which is complex and slow) to extract just the right number of
bytes of the input into a u64 and pad the result with zeroes. It then
left-shifts that value in order to bitwise-OR it with `self.tail`.
For example, imagine we have a u32 input 0xIIHH_GGFF and only need three bytes
to fill up `self.tail`. The current code uses `u8to64_le` to construct
0x0000_0000_00HH_GGFF, which is just 0xIIHH_GGFF with the 0xII removed and
zero-extended to a u64. The code then left-shifts that value by five bytes --
discarding the 0x00 byte that replaced the 0xII byte! -- to give
0xHHGG_FF00_0000_0000. It then then ORs that value with self.tail.
There's a much simpler way to do it: zero-extend to u64 first, then left shift.
E.g. 0xIIHH_GGFF is zero-extended to 0x0000_0000_IIHH_GGFF, and then
left-shifted to 0xHHGG_FF00_0000_0000. We don't have to take time to exclude
the unneeded 0xII byte, because it just gets shifted out anyway! It also avoids
multiple occurrences of `unsafe`.
There's a similar story with the setting of `self.tail` at the method's end.
The current code uses `u8to64_le` to extract the remaining part of the input,
but the same effect can be achieved more quickly with a right shift on the
zero-extended input.
All that works on little-endian. It doesn't work for big-endian, but we
can just do a `to_le` before calling `short_write` and then it works.
This commit changes `SipHasher128` to use the simpler shift-based approach. The
code is also smaller, which means that `short_write` is now inlined where
previously it wasn't, which makes things faster again. This gives big
speed-ups for all incremental builds, especially "baseline" incremental
builds.
2020-02-06 02:04:51 +00:00
|
|
|
self.short_write(i, (i as u64).to_le() as u64);
|
2017-10-16 11:16:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn write_isize(&mut self, i: isize) {
|
Speed up `SipHasher128`.
The current code in `SipHasher128::short_write` is inefficient. It uses
`u8to64_le` (which is complex and slow) to extract just the right number of
bytes of the input into a u64 and pad the result with zeroes. It then
left-shifts that value in order to bitwise-OR it with `self.tail`.
For example, imagine we have a u32 input 0xIIHH_GGFF and only need three bytes
to fill up `self.tail`. The current code uses `u8to64_le` to construct
0x0000_0000_00HH_GGFF, which is just 0xIIHH_GGFF with the 0xII removed and
zero-extended to a u64. The code then left-shifts that value by five bytes --
discarding the 0x00 byte that replaced the 0xII byte! -- to give
0xHHGG_FF00_0000_0000. It then then ORs that value with self.tail.
There's a much simpler way to do it: zero-extend to u64 first, then left shift.
E.g. 0xIIHH_GGFF is zero-extended to 0x0000_0000_IIHH_GGFF, and then
left-shifted to 0xHHGG_FF00_0000_0000. We don't have to take time to exclude
the unneeded 0xII byte, because it just gets shifted out anyway! It also avoids
multiple occurrences of `unsafe`.
There's a similar story with the setting of `self.tail` at the method's end.
The current code uses `u8to64_le` to extract the remaining part of the input,
but the same effect can be achieved more quickly with a right shift on the
zero-extended input.
All that works on little-endian. It doesn't work for big-endian, but we
can just do a `to_le` before calling `short_write` and then it works.
This commit changes `SipHasher128` to use the simpler shift-based approach. The
code is also smaller, which means that `short_write` is now inlined where
previously it wasn't, which makes things faster again. This gives big
speed-ups for all incremental builds, especially "baseline" incremental
builds.
2020-02-06 02:04:51 +00:00
|
|
|
self.short_write(i, (i as usize).to_le() as u64);
|
2017-10-16 11:16:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn write(&mut self, msg: &[u8]) {
|
|
|
|
let length = msg.len();
|
|
|
|
self.length += length;
|
|
|
|
|
|
|
|
let mut needed = 0;
|
|
|
|
|
|
|
|
if self.ntail != 0 {
|
|
|
|
needed = 8 - self.ntail;
|
2020-02-26 11:43:37 +00:00
|
|
|
self.tail |= unsafe { u8to64_le(msg, 0, cmp::min(length, needed)) } << (8 * self.ntail);
|
2017-10-16 11:16:04 +00:00
|
|
|
if length < needed {
|
|
|
|
self.ntail += length;
|
2019-12-22 22:42:04 +00:00
|
|
|
return;
|
2017-10-16 11:16:04 +00:00
|
|
|
} else {
|
|
|
|
self.state.v3 ^= self.tail;
|
|
|
|
Sip24Rounds::c_rounds(&mut self.state);
|
|
|
|
self.state.v0 ^= self.tail;
|
|
|
|
self.ntail = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Buffered tail is now flushed, process new input.
|
|
|
|
let len = length - needed;
|
|
|
|
let left = len & 0x7;
|
|
|
|
|
|
|
|
let mut i = needed;
|
|
|
|
while i < len - left {
|
2020-02-07 00:53:07 +00:00
|
|
|
let mi = unsafe { load_int_le!(msg, i, u64) };
|
2017-10-16 11:16:04 +00:00
|
|
|
|
|
|
|
self.state.v3 ^= mi;
|
|
|
|
Sip24Rounds::c_rounds(&mut self.state);
|
|
|
|
self.state.v0 ^= mi;
|
|
|
|
|
|
|
|
i += 8;
|
|
|
|
}
|
|
|
|
|
2020-02-07 00:53:07 +00:00
|
|
|
self.tail = unsafe { u8to64_le(msg, i, left) };
|
2017-10-16 11:16:04 +00:00
|
|
|
self.ntail = left;
|
|
|
|
}
|
|
|
|
|
|
|
|
fn finish(&self) -> u64 {
|
|
|
|
panic!("SipHasher128 cannot provide valid 64 bit hashes")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug, Clone, Default)]
|
|
|
|
struct Sip24Rounds;
|
|
|
|
|
|
|
|
impl Sip24Rounds {
|
|
|
|
#[inline]
|
|
|
|
fn c_rounds(state: &mut State) {
|
|
|
|
compress!(state);
|
|
|
|
compress!(state);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn d_rounds(state: &mut State) {
|
|
|
|
compress!(state);
|
|
|
|
compress!(state);
|
|
|
|
compress!(state);
|
|
|
|
compress!(state);
|
|
|
|
}
|
|
|
|
}
|