diff options
| author | Srinivasarao P <spathi@codeaurora.org> | 2018-08-28 17:27:24 +0530 |
|---|---|---|
| committer | Srinivasarao P <spathi@codeaurora.org> | 2018-08-28 17:28:39 +0530 |
| commit | b87d31674ac9e8203f3a30b53f6a697bf5cf36b6 (patch) | |
| tree | 6b9b8869b7d60d93b4c78b5ab2cd03b3f4a5a2e6 /lib/xxhash.c | |
| parent | 40eed1f50062df731c0bf79af27c463b7d2003a1 (diff) | |
| parent | 5e24b4e4d3724968a1c106e9dae091ef4dfc578e (diff) | |
Merge android-4.4.153 (5e24b4e) into msm-4.4
* refs/heads/tmp-5e24b4e
Linux 4.4.153
ovl: warn instead of error if d_type is not supported
ovl: Do d_type check only if work dir creation was successful
ovl: Ensure upper filesystem supports d_type
x86/mm: Fix use-after-free of ldt_struct
x86/mm/pat: Fix L1TF stable backport for CPA
ANDROID: x86_64_cuttlefish_defconfig: Enable lz4 compression for zram
UPSTREAM: drivers/block/zram/zram_drv.c: fix bug storing backing_dev
BACKPORT: zram: introduce zram memory tracking
BACKPORT: zram: record accessed second
BACKPORT: zram: mark incompressible page as ZRAM_HUGE
UPSTREAM: zram: correct flag name of ZRAM_ACCESS
UPSTREAM: zram: Delete gendisk before cleaning up the request queue
UPSTREAM: drivers/block/zram/zram_drv.c: make zram_page_end_io() static
BACKPORT: zram: set BDI_CAP_STABLE_WRITES once
UPSTREAM: zram: fix null dereference of handle
UPSTREAM: zram: add config and doc file for writeback feature
BACKPORT: zram: read page from backing device
BACKPORT: zram: write incompressible pages to backing device
BACKPORT: zram: identify asynchronous IO's return value
BACKPORT: zram: add free space management in backing device
UPSTREAM: zram: add interface to specif backing device
UPSTREAM: zram: rename zram_decompress_page to __zram_bvec_read
UPSTREAM: zram: inline zram_compress
UPSTREAM: zram: clean up duplicated codes in __zram_bvec_write
Linux 4.4.152
reiserfs: fix broken xattr handling (heap corruption, bad retval)
i2c: imx: Fix race condition in dma read
PCI: pciehp: Fix use-after-free on unplug
PCI: Skip MPS logic for Virtual Functions (VFs)
PCI: hotplug: Don't leak pci_slot on registration failure
parisc: Remove unnecessary barriers from spinlock.h
bridge: Propagate vlan add failure to user
packet: refine ring v3 block size test to hold one frame
netfilter: conntrack: dccp: treat SYNC/SYNCACK as invalid if no prior state
xfrm_user: prevent leaking 2 bytes of kernel memory
parisc: Remove ordered stores from syscall.S
ext4: fix spectre gadget in ext4_mb_regular_allocator()
KVM: irqfd: fix race between EPOLLHUP and irq_bypass_register_consumer
staging: android: ion: check for kref overflow
tcp: identify cryptic messages as TCP seq # bugs
net: qca_spi: Fix log level if probe fails
net: qca_spi: Make sure the QCA7000 reset is triggered
net: qca_spi: Avoid packet drop during initial sync
net: usb: rtl8150: demote allmulti message to dev_dbg()
net/ethernet/freescale/fman: fix cross-build error
drm/nouveau/gem: off by one bugs in nouveau_gem_pushbuf_reloc_apply()
tcp: remove DELAYED ACK events in DCTCP
qlogic: check kstrtoul() for errors
packet: reset network header if packet shorter than ll reserved space
ixgbe: Be more careful when modifying MAC filters
ARM: dts: am3517.dtsi: Disable reference to OMAP3 OTG controller
ARM: 8780/1: ftrace: Only set kernel memory back to read-only after boot
perf llvm-utils: Remove bashism from kernel include fetch script
bnxt_en: Fix for system hang if request_irq fails
drm/armada: fix colorkey mode property
ieee802154: fakelb: switch from BUG_ON() to WARN_ON() on problem
ieee802154: at86rf230: use __func__ macro for debug messages
ieee802154: at86rf230: switch from BUG_ON() to WARN_ON() on problem
ARM: pxa: irq: fix handling of ICMR registers in suspend/resume
netfilter: x_tables: set module owner for icmp(6) matches
smsc75xx: Add workaround for gigabit link up hardware errata.
kasan: fix shadow_size calculation error in kasan_module_alloc
tracing: Use __printf markup to silence compiler
ARM: imx_v4_v5_defconfig: Select ULPI support
ARM: imx_v6_v7_defconfig: Select ULPI support
HID: wacom: Correct touch maximum XY of 2nd-gen Intuos
m68k: fix "bad page state" oops on ColdFire boot
bnx2x: Fix receiving tx-timeout in error or recovery state.
drm/exynos: decon5433: Fix WINCONx reset value
drm/exynos: decon5433: Fix per-plane global alpha for XRGB modes
drm/exynos: gsc: Fix support for NV16/61, YUV420/YVU420 and YUV422 modes
md/raid10: fix that replacement cannot complete recovery after reassemble
dmaengine: k3dma: Off by one in k3_of_dma_simple_xlate()
ARM: dts: da850: Fix interrups property for gpio
selftests/x86/sigreturn/64: Fix spurious failures on AMD CPUs
perf report powerpc: Fix crash if callchain is empty
perf test session topology: Fix test on s390
usb: xhci: increase CRS timeout value
ARM: dts: am437x: make edt-ft5x06 a wakeup source
brcmfmac: stop watchdog before detach and free everything
cxgb4: when disabling dcb set txq dcb priority to 0
Smack: Mark inode instant in smack_task_to_inode
ipv6: mcast: fix unsolicited report interval after receiving querys
locking/lockdep: Do not record IRQ state within lockdep code
net: davinci_emac: match the mdio device against its compatible if possible
ARC: Enable machine_desc->init_per_cpu for !CONFIG_SMP
net: propagate dev_get_valid_name return code
net: hamradio: use eth_broadcast_addr
enic: initialize enic->rfs_h.lock in enic_probe
qed: Add sanity check for SIMD fastpath handler.
arm64: make secondary_start_kernel() notrace
scsi: xen-scsifront: add error handling for xenbus_printf
usb: gadget: dwc2: fix memory leak in gadget_init()
usb: gadget: composite: fix delayed_status race condition when set_interface
usb: dwc2: fix isoc split in transfer with no data
ARM: dts: Cygnus: Fix I2C controller interrupt type
selftests: sync: add config fragment for testing sync framework
selftests: zram: return Kselftest Skip code for skipped tests
selftests: user: return Kselftest Skip code for skipped tests
selftests: static_keys: return Kselftest Skip code for skipped tests
selftests: pstore: return Kselftest Skip code for skipped tests
netfilter: ipv6: nf_defrag: reduce struct net memory waste
ARC: Explicitly add -mmedium-calls to CFLAGS
ANDROID: x86_64_cuttlefish_defconfig: Enable zram and zstd
BACKPORT: crypto: zstd - Add zstd support
UPSTREAM: zram: add zstd to the supported algorithms list
UPSTREAM: lib: Add zstd modules
UPSTREAM: lib: Add xxhash module
UPSTREAM: zram: rework copy of compressor name in comp_algorithm_store()
UPSTREAM: zram: constify attribute_group structures.
UPSTREAM: zram: count same page write as page_stored
UPSTREAM: zram: reduce load operation in page_same_filled
UPSTREAM: zram: use zram_free_page instead of open-coded
UPSTREAM: zram: introduce zram data accessor
UPSTREAM: zram: remove zram_meta structure
UPSTREAM: zram: use zram_slot_lock instead of raw bit_spin_lock op
BACKPORT: zram: partial IO refactoring
BACKPORT: zram: handle multiple pages attached bio's bvec
UPSTREAM: zram: fix operator precedence to get offset
BACKPORT: zram: extend zero pages to same element pages
BACKPORT: zram: remove waitqueue for IO done
UPSTREAM: zram: remove obsolete sysfs attrs
UPSTREAM: zram: support BDI_CAP_STABLE_WRITES
UPSTREAM: zram: revalidate disk under init_lock
BACKPORT: mm: support anonymous stable page
UPSTREAM: zram: use __GFP_MOVABLE for memory allocation
UPSTREAM: zram: drop gfp_t from zcomp_strm_alloc()
UPSTREAM: zram: add more compression algorithms
UPSTREAM: zram: delete custom lzo/lz4
UPSTREAM: zram: cosmetic: cleanup documentation
UPSTREAM: zram: use crypto api to check alg availability
BACKPORT: zram: switch to crypto compress API
UPSTREAM: zram: rename zstrm find-release functions
UPSTREAM: zram: introduce per-device debug_stat sysfs node
UPSTREAM: zram: remove max_comp_streams internals
UPSTREAM: zram: user per-cpu compression streams
BACKPORT: zsmalloc: require GFP in zs_malloc()
UPSTREAM: zram/zcomp: do not zero out zcomp private pages
UPSTREAM: zram: pass gfp from zcomp frontend to backend
UPSTREAM: socket: close race condition between sock_close() and sockfs_setattr()
ANDROID: Refresh x86_64_cuttlefish_defconfig
Linux 4.4.151
isdn: Disable IIOCDBGVAR
Bluetooth: avoid killing an already killed socket
x86/mm: Simplify p[g4um]d_page() macros
serial: 8250_dw: always set baud rate in dw8250_set_termios
ACPI / PM: save NVS memory for ASUS 1025C laptop
ACPI: save NVS memory for Lenovo G50-45
USB: option: add support for DW5821e
USB: serial: sierra: fix potential deadlock at close
ALSA: vxpocket: Fix invalid endian conversions
ALSA: memalloc: Don't exceed over the requested size
ALSA: hda: Correct Asrock B85M-ITX power_save blacklist entry
ALSA: cs5535audio: Fix invalid endian conversion
ALSA: virmidi: Fix too long output trigger loop
ALSA: vx222: Fix invalid endian conversions
ALSA: hda - Turn CX8200 into D3 as well upon reboot
ALSA: hda - Sleep for 10ms after entering D3 on Conexant codecs
net_sched: fix NULL pointer dereference when delete tcindex filter
vsock: split dwork to avoid reinitializations
net_sched: Fix missing res info when create new tc_index filter
llc: use refcount_inc_not_zero() for llc_sap_find()
l2tp: use sk_dst_check() to avoid race on sk->sk_dst_cache
dccp: fix undefined behavior with 'cwnd' shift in ccid2_cwnd_restart()
Conflicts:
drivers/block/zram/zram_drv.c
drivers/staging/android/ion/ion.c
include/linux/swap.h
mm/zsmalloc.c
Change-Id: I1c437ac5133503a939d06d51ec778b65371df6d1
Signed-off-by: Srinivasarao P <spathi@codeaurora.org>
Diffstat (limited to 'lib/xxhash.c')
| -rw-r--r-- | lib/xxhash.c | 500 |
1 files changed, 500 insertions, 0 deletions
diff --git a/lib/xxhash.c b/lib/xxhash.c new file mode 100644 index 000000000000..aa61e2a3802f --- /dev/null +++ b/lib/xxhash.c @@ -0,0 +1,500 @@ +/* + * xxHash - Extremely Fast Hash algorithm + * Copyright (C) 2012-2016, Yann Collet. + * + * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * This program is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License version 2 as published by the + * Free Software Foundation. This program is dual-licensed; you may select + * either version 2 of the GNU General Public License ("GPL") or BSD license + * ("BSD"). + * + * You can contact the author at: + * - xxHash homepage: http://cyan4973.github.io/xxHash/ + * - xxHash source repository: https://github.com/Cyan4973/xxHash + */ + +#include <asm/unaligned.h> +#include <linux/errno.h> +#include <linux/compiler.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/string.h> +#include <linux/xxhash.h> + +/*-************************************* + * Macros + **************************************/ +#define xxh_rotl32(x, r) ((x << r) | (x >> (32 - r))) +#define xxh_rotl64(x, r) ((x << r) | (x >> (64 - r))) + +#ifdef __LITTLE_ENDIAN +# define XXH_CPU_LITTLE_ENDIAN 1 +#else +# define XXH_CPU_LITTLE_ENDIAN 0 +#endif + +/*-************************************* + * Constants + **************************************/ +static const uint32_t PRIME32_1 = 2654435761U; +static const uint32_t PRIME32_2 = 2246822519U; +static const uint32_t PRIME32_3 = 3266489917U; +static const uint32_t PRIME32_4 = 668265263U; +static const uint32_t PRIME32_5 = 374761393U; + +static const uint64_t PRIME64_1 = 11400714785074694791ULL; +static const uint64_t PRIME64_2 = 14029467366897019727ULL; +static const uint64_t PRIME64_3 = 1609587929392839161ULL; +static const uint64_t PRIME64_4 = 9650029242287828579ULL; +static const uint64_t PRIME64_5 = 2870177450012600261ULL; + +/*-************************** + * Utils + ***************************/ +void xxh32_copy_state(struct xxh32_state *dst, const struct xxh32_state *src) +{ + memcpy(dst, src, sizeof(*dst)); +} +EXPORT_SYMBOL(xxh32_copy_state); + +void xxh64_copy_state(struct xxh64_state *dst, const struct xxh64_state *src) +{ + memcpy(dst, src, sizeof(*dst)); +} +EXPORT_SYMBOL(xxh64_copy_state); + +/*-*************************** + * Simple Hash Functions + ****************************/ +static uint32_t xxh32_round(uint32_t seed, const uint32_t input) +{ + seed += input * PRIME32_2; + seed = xxh_rotl32(seed, 13); + seed *= PRIME32_1; + return seed; +} + +uint32_t xxh32(const void *input, const size_t len, const uint32_t seed) +{ + const uint8_t *p = (const uint8_t *)input; + const uint8_t *b_end = p + len; + uint32_t h32; + + if (len >= 16) { + const uint8_t *const limit = b_end - 16; + uint32_t v1 = seed + PRIME32_1 + PRIME32_2; + uint32_t v2 = seed + PRIME32_2; + uint32_t v3 = seed + 0; + uint32_t v4 = seed - PRIME32_1; + + do { + v1 = xxh32_round(v1, get_unaligned_le32(p)); + p += 4; + v2 = xxh32_round(v2, get_unaligned_le32(p)); + p += 4; + v3 = xxh32_round(v3, get_unaligned_le32(p)); + p += 4; + v4 = xxh32_round(v4, get_unaligned_le32(p)); + p += 4; + } while (p <= limit); + + h32 = xxh_rotl32(v1, 1) + xxh_rotl32(v2, 7) + + xxh_rotl32(v3, 12) + xxh_rotl32(v4, 18); + } else { + h32 = seed + PRIME32_5; + } + + h32 += (uint32_t)len; + + while (p + 4 <= b_end) { + h32 += get_unaligned_le32(p) * PRIME32_3; + h32 = xxh_rotl32(h32, 17) * PRIME32_4; + p += 4; + } + + while (p < b_end) { + h32 += (*p) * PRIME32_5; + h32 = xxh_rotl32(h32, 11) * PRIME32_1; + p++; + } + + h32 ^= h32 >> 15; + h32 *= PRIME32_2; + h32 ^= h32 >> 13; + h32 *= PRIME32_3; + h32 ^= h32 >> 16; + + return h32; +} +EXPORT_SYMBOL(xxh32); + +static uint64_t xxh64_round(uint64_t acc, const uint64_t input) +{ + acc += input * PRIME64_2; + acc = xxh_rotl64(acc, 31); + acc *= PRIME64_1; + return acc; +} + +static uint64_t xxh64_merge_round(uint64_t acc, uint64_t val) +{ + val = xxh64_round(0, val); + acc ^= val; + acc = acc * PRIME64_1 + PRIME64_4; + return acc; +} + +uint64_t xxh64(const void *input, const size_t len, const uint64_t seed) +{ + const uint8_t *p = (const uint8_t *)input; + const uint8_t *const b_end = p + len; + uint64_t h64; + + if (len >= 32) { + const uint8_t *const limit = b_end - 32; + uint64_t v1 = seed + PRIME64_1 + PRIME64_2; + uint64_t v2 = seed + PRIME64_2; + uint64_t v3 = seed + 0; + uint64_t v4 = seed - PRIME64_1; + + do { + v1 = xxh64_round(v1, get_unaligned_le64(p)); + p += 8; + v2 = xxh64_round(v2, get_unaligned_le64(p)); + p += 8; + v3 = xxh64_round(v3, get_unaligned_le64(p)); + p += 8; + v4 = xxh64_round(v4, get_unaligned_le64(p)); + p += 8; + } while (p <= limit); + + h64 = xxh_rotl64(v1, 1) + xxh_rotl64(v2, 7) + + xxh_rotl64(v3, 12) + xxh_rotl64(v4, 18); + h64 = xxh64_merge_round(h64, v1); + h64 = xxh64_merge_round(h64, v2); + h64 = xxh64_merge_round(h64, v3); + h64 = xxh64_merge_round(h64, v4); + + } else { + h64 = seed + PRIME64_5; + } + + h64 += (uint64_t)len; + + while (p + 8 <= b_end) { + const uint64_t k1 = xxh64_round(0, get_unaligned_le64(p)); + + h64 ^= k1; + h64 = xxh_rotl64(h64, 27) * PRIME64_1 + PRIME64_4; + p += 8; + } + + if (p + 4 <= b_end) { + h64 ^= (uint64_t)(get_unaligned_le32(p)) * PRIME64_1; + h64 = xxh_rotl64(h64, 23) * PRIME64_2 + PRIME64_3; + p += 4; + } + + while (p < b_end) { + h64 ^= (*p) * PRIME64_5; + h64 = xxh_rotl64(h64, 11) * PRIME64_1; + p++; + } + + h64 ^= h64 >> 33; + h64 *= PRIME64_2; + h64 ^= h64 >> 29; + h64 *= PRIME64_3; + h64 ^= h64 >> 32; + + return h64; +} +EXPORT_SYMBOL(xxh64); + +/*-************************************************** + * Advanced Hash Functions + ***************************************************/ +void xxh32_reset(struct xxh32_state *statePtr, const uint32_t seed) +{ + /* use a local state for memcpy() to avoid strict-aliasing warnings */ + struct xxh32_state state; + + memset(&state, 0, sizeof(state)); + state.v1 = seed + PRIME32_1 + PRIME32_2; + state.v2 = seed + PRIME32_2; + state.v3 = seed + 0; + state.v4 = seed - PRIME32_1; + memcpy(statePtr, &state, sizeof(state)); +} +EXPORT_SYMBOL(xxh32_reset); + +void xxh64_reset(struct xxh64_state *statePtr, const uint64_t seed) +{ + /* use a local state for memcpy() to avoid strict-aliasing warnings */ + struct xxh64_state state; + + memset(&state, 0, sizeof(state)); + state.v1 = seed + PRIME64_1 + PRIME64_2; + state.v2 = seed + PRIME64_2; + state.v3 = seed + 0; + state.v4 = seed - PRIME64_1; + memcpy(statePtr, &state, sizeof(state)); +} +EXPORT_SYMBOL(xxh64_reset); + +int xxh32_update(struct xxh32_state *state, const void *input, const size_t len) +{ + const uint8_t *p = (const uint8_t *)input; + const uint8_t *const b_end = p + len; + + if (input == NULL) + return -EINVAL; + + state->total_len_32 += (uint32_t)len; + state->large_len |= (len >= 16) | (state->total_len_32 >= 16); + + if (state->memsize + len < 16) { /* fill in tmp buffer */ + memcpy((uint8_t *)(state->mem32) + state->memsize, input, len); + state->memsize += (uint32_t)len; + return 0; + } + + if (state->memsize) { /* some data left from previous update */ + const uint32_t *p32 = state->mem32; + + memcpy((uint8_t *)(state->mem32) + state->memsize, input, + 16 - state->memsize); + + state->v1 = xxh32_round(state->v1, get_unaligned_le32(p32)); + p32++; + state->v2 = xxh32_round(state->v2, get_unaligned_le32(p32)); + p32++; + state->v3 = xxh32_round(state->v3, get_unaligned_le32(p32)); + p32++; + state->v4 = xxh32_round(state->v4, get_unaligned_le32(p32)); + p32++; + + p += 16-state->memsize; + state->memsize = 0; + } + + if (p <= b_end - 16) { + const uint8_t *const limit = b_end - 16; + uint32_t v1 = state->v1; + uint32_t v2 = state->v2; + uint32_t v3 = state->v3; + uint32_t v4 = state->v4; + + do { + v1 = xxh32_round(v1, get_unaligned_le32(p)); + p += 4; + v2 = xxh32_round(v2, get_unaligned_le32(p)); + p += 4; + v3 = xxh32_round(v3, get_unaligned_le32(p)); + p += 4; + v4 = xxh32_round(v4, get_unaligned_le32(p)); + p += 4; + } while (p <= limit); + + state->v1 = v1; + state->v2 = v2; + state->v3 = v3; + state->v4 = v4; + } + + if (p < b_end) { + memcpy(state->mem32, p, (size_t)(b_end-p)); + state->memsize = (uint32_t)(b_end-p); + } + + return 0; +} +EXPORT_SYMBOL(xxh32_update); + +uint32_t xxh32_digest(const struct xxh32_state *state) +{ + const uint8_t *p = (const uint8_t *)state->mem32; + const uint8_t *const b_end = (const uint8_t *)(state->mem32) + + state->memsize; + uint32_t h32; + + if (state->large_len) { + h32 = xxh_rotl32(state->v1, 1) + xxh_rotl32(state->v2, 7) + + xxh_rotl32(state->v3, 12) + xxh_rotl32(state->v4, 18); + } else { + h32 = state->v3 /* == seed */ + PRIME32_5; + } + + h32 += state->total_len_32; + + while (p + 4 <= b_end) { + h32 += get_unaligned_le32(p) * PRIME32_3; + h32 = xxh_rotl32(h32, 17) * PRIME32_4; + p += 4; + } + + while (p < b_end) { + h32 += (*p) * PRIME32_5; + h32 = xxh_rotl32(h32, 11) * PRIME32_1; + p++; + } + + h32 ^= h32 >> 15; + h32 *= PRIME32_2; + h32 ^= h32 >> 13; + h32 *= PRIME32_3; + h32 ^= h32 >> 16; + + return h32; +} +EXPORT_SYMBOL(xxh32_digest); + +int xxh64_update(struct xxh64_state *state, const void *input, const size_t len) +{ + const uint8_t *p = (const uint8_t *)input; + const uint8_t *const b_end = p + len; + + if (input == NULL) + return -EINVAL; + + state->total_len += len; + + if (state->memsize + len < 32) { /* fill in tmp buffer */ + memcpy(((uint8_t *)state->mem64) + state->memsize, input, len); + state->memsize += (uint32_t)len; + return 0; + } + + if (state->memsize) { /* tmp buffer is full */ + uint64_t *p64 = state->mem64; + + memcpy(((uint8_t *)p64) + state->memsize, input, + 32 - state->memsize); + + state->v1 = xxh64_round(state->v1, get_unaligned_le64(p64)); + p64++; + state->v2 = xxh64_round(state->v2, get_unaligned_le64(p64)); + p64++; + state->v3 = xxh64_round(state->v3, get_unaligned_le64(p64)); + p64++; + state->v4 = xxh64_round(state->v4, get_unaligned_le64(p64)); + + p += 32 - state->memsize; + state->memsize = 0; + } + + if (p + 32 <= b_end) { + const uint8_t *const limit = b_end - 32; + uint64_t v1 = state->v1; + uint64_t v2 = state->v2; + uint64_t v3 = state->v3; + uint64_t v4 = state->v4; + + do { + v1 = xxh64_round(v1, get_unaligned_le64(p)); + p += 8; + v2 = xxh64_round(v2, get_unaligned_le64(p)); + p += 8; + v3 = xxh64_round(v3, get_unaligned_le64(p)); + p += 8; + v4 = xxh64_round(v4, get_unaligned_le64(p)); + p += 8; + } while (p <= limit); + + state->v1 = v1; + state->v2 = v2; + state->v3 = v3; + state->v4 = v4; + } + + if (p < b_end) { + memcpy(state->mem64, p, (size_t)(b_end-p)); + state->memsize = (uint32_t)(b_end - p); + } + + return 0; +} +EXPORT_SYMBOL(xxh64_update); + +uint64_t xxh64_digest(const struct xxh64_state *state) +{ + const uint8_t *p = (const uint8_t *)state->mem64; + const uint8_t *const b_end = (const uint8_t *)state->mem64 + + state->memsize; + uint64_t h64; + + if (state->total_len >= 32) { + const uint64_t v1 = state->v1; + const uint64_t v2 = state->v2; + const uint64_t v3 = state->v3; + const uint64_t v4 = state->v4; + + h64 = xxh_rotl64(v1, 1) + xxh_rotl64(v2, 7) + + xxh_rotl64(v3, 12) + xxh_rotl64(v4, 18); + h64 = xxh64_merge_round(h64, v1); + h64 = xxh64_merge_round(h64, v2); + h64 = xxh64_merge_round(h64, v3); + h64 = xxh64_merge_round(h64, v4); + } else { + h64 = state->v3 + PRIME64_5; + } + + h64 += (uint64_t)state->total_len; + + while (p + 8 <= b_end) { + const uint64_t k1 = xxh64_round(0, get_unaligned_le64(p)); + + h64 ^= k1; + h64 = xxh_rotl64(h64, 27) * PRIME64_1 + PRIME64_4; + p += 8; + } + + if (p + 4 <= b_end) { + h64 ^= (uint64_t)(get_unaligned_le32(p)) * PRIME64_1; + h64 = xxh_rotl64(h64, 23) * PRIME64_2 + PRIME64_3; + p += 4; + } + + while (p < b_end) { + h64 ^= (*p) * PRIME64_5; + h64 = xxh_rotl64(h64, 11) * PRIME64_1; + p++; + } + + h64 ^= h64 >> 33; + h64 *= PRIME64_2; + h64 ^= h64 >> 29; + h64 *= PRIME64_3; + h64 ^= h64 >> 32; + + return h64; +} +EXPORT_SYMBOL(xxh64_digest); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DESCRIPTION("xxHash"); |
