33 #define ALLOW_UNALIGNED_READS 1
37 static inline uint64_t Rot64(uint64_t x,
int k) {
return (x << k) | (x >> (64 - k)); }
52 static inline void Mix(
const uint64_t *data, uint64_t &s0, uint64_t &s1, uint64_t &s2, uint64_t &s3, uint64_t &s4, uint64_t &s5, uint64_t &s6, uint64_t &s7, uint64_t &s8, uint64_t &s9,
53 uint64_t &s10, uint64_t &s11) {
107 s10 = Rot64(s10, 22);
112 s11 = Rot64(s11, 46);
132 static inline void EndPartial(uint64_t &h0, uint64_t &h1, uint64_t &h2, uint64_t &h3, uint64_t &h4, uint64_t &h5, uint64_t &h6, uint64_t &h7, uint64_t &h8, uint64_t &h9, uint64_t &h10,
163 h10 = Rot64(h10, 53);
166 h11 = Rot64(h11, 42);
172 static inline void End(
const uint64_t *data, uint64_t &h0, uint64_t &h1, uint64_t &h2, uint64_t &h3, uint64_t &h4, uint64_t &h5, uint64_t &h6, uint64_t &h7, uint64_t &h8, uint64_t &h9,
173 uint64_t &h10, uint64_t &h11) {
186 EndPartial(h0, h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11);
187 EndPartial(h0, h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11);
188 EndPartial(h0, h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11);
206 static inline void ShortMix(uint64_t &h0, uint64_t &h1, uint64_t &h2, uint64_t &h3) {
257 static inline void ShortEnd(uint64_t &h0, uint64_t &h1, uint64_t &h2, uint64_t &h3) {
295 static const size_t sc_numVars = 12;
298 static const size_t sc_blockSize = sc_numVars * 8;
301 static const size_t sc_bufSize = 2 * sc_blockSize;
310 static const uint64_t sc_const = 0xdeadbeefdeadbeefLL;
323 static void Short128(
const void *data,
size_t length, uint64_t *hash1, uint64_t *hash2) {
324 uint64_t buf[2 * sc_numVars];
332 u.p8 = (
const uint8_t *)data;
335 memcpy(buf, data, length);
339 size_t remainder = length % 32;
342 uint64_t c = sc_const;
343 uint64_t d = sc_const;
346 const uint64_t *end = u.p64 + (length / 32) * 4;
349 for (; u.p64 < end; u.p64 += 4) {
352 ShortMix(a, b, c, d);
358 if (remainder >= 16) {
361 ShortMix(a, b, c, d);
368 d += ((uint64_t)length) << 56;
371 d += ((uint64_t)u.p8[14]) << 48;
373 d += ((uint64_t)u.p8[13]) << 40;
375 d += ((uint64_t)u.p8[12]) << 32;
381 d += ((uint64_t)u.p8[10]) << 16;
383 d += ((uint64_t)u.p8[9]) << 8;
385 d += (uint64_t)u.p8[8];
390 c += ((uint64_t)u.p8[6]) << 48;
392 c += ((uint64_t)u.p8[5]) << 40;
394 c += ((uint64_t)u.p8[4]) << 32;
399 c += ((uint64_t)u.p8[2]) << 16;
401 c += ((uint64_t)u.p8[1]) << 8;
403 c += (uint64_t)u.p8[0];
409 ShortEnd(a, b, c, d);
424 static uint64_t
Short64(
const void *data,
size_t length, uint64_t seed) {
425 uint64_t hash1 = seed;
426 Short128(data, length, &hash1, &seed);
440 static void Hash128(
const void *data,
size_t length, uint64_t *hash1, uint64_t *hash2) {
441 if (length < sc_bufSize) {
442 Short128(data, length, hash1, hash2);
446 uint64_t h0, h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11;
447 uint64_t buf[sc_numVars];
456 h0 = h3 = h6 = h9 = *hash1;
457 h1 = h4 = h7 = h10 = *hash2;
458 h2 = h5 = h8 = h11 = sc_const;
460 u.p8 = (
const uint8_t *)data;
461 end = u.p64 + (length / sc_blockSize) * sc_numVars;
465 while (u.p64 < end) {
466 Mix(u.p64, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11);
470 while (u.p64 < end) {
471 memcpy(buf, u.p64, sc_blockSize);
472 Mix(buf, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11);
478 remainder = (length - ((
const uint8_t *)end - (
const uint8_t *)data));
479 memcpy(buf, end, remainder);
480 memset(((uint8_t *)buf) + remainder, 0, sc_blockSize - remainder);
481 ((uint8_t *)buf)[sc_blockSize - 1] = remainder;
484 End(buf, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11);
499 static uint64_t
Hash64(
const void *data,
size_t length, uint64_t seed) {
500 uint64_t hash1 = seed;
501 Hash128(data, length, &hash1, &seed);