diff options
Diffstat (limited to 'src')
-rw-r--r-- | src/Kbuild | 6 | ||||
-rw-r--r-- | src/Makefile | 6 | ||||
-rw-r--r-- | src/compat/Kbuild.include | 11 | ||||
-rw-r--r-- | src/compat/compat.h (renamed from src/compat.h) | 14 | ||||
-rw-r--r-- | src/compat/dst_cache/dst_cache.c | 177 | ||||
-rw-r--r-- | src/compat/dst_cache/include/net/dst_cache.h | 97 | ||||
-rw-r--r-- | src/compat/siphash/include/linux/siphash.h | 140 | ||||
-rw-r--r-- | src/compat/siphash/siphash.c | 551 | ||||
-rw-r--r-- | src/crypto/siphash.c | 204 | ||||
-rw-r--r-- | src/crypto/siphash.h | 92 | ||||
-rw-r--r-- | src/hashtables.c | 11 | ||||
-rw-r--r-- | src/hashtables.h | 2 | ||||
-rw-r--r-- | src/main.c | 3 | ||||
-rw-r--r-- | src/peer.h | 2 |
14 files changed, 990 insertions, 326 deletions
@@ -1,9 +1,9 @@ ccflags-y := -O3 -fvisibility=hidden ccflags-$(CONFIG_WIREGUARD_DEBUG) := -DDEBUG -g ccflags-y += -Wframe-larger-than=8192 -ccflags-y += -D'pr_fmt(fmt)=KBUILD_MODNAME ": " fmt' -include $(src)/compat.h +ccflags-y += -D'pr_fmt(fmt)=KBUILD_MODNAME ": " fmt' wireguard-y := main.o noise.o device.o peer.o timers.o data.o send.o receive.o socket.o config.o hashtables.o routingtable.o ratelimiter.o cookie.o -wireguard-y += crypto/curve25519.o crypto/chacha20poly1305.o crypto/blake2s.o crypto/siphash.o +wireguard-y += crypto/curve25519.o crypto/chacha20poly1305.o crypto/blake2s.o ifeq ($(CONFIG_X86_64),y) wireguard-y += crypto/chacha20-ssse3-x86_64.o crypto/poly1305-sse2-x86_64.o avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1$(comma)4)$(comma)%ymm2,yes,no) @@ -12,6 +12,8 @@ ifeq ($(avx2_supported),yes) endif endif +include $(src)/compat/Kbuild.include + ifneq ($(KBUILD_EXTMOD),) CONFIG_WIREGUARD := m ifeq ($(CONFIG_WIREGUARD_PARALLEL),) diff --git a/src/Makefile b/src/Makefile index 7873007..c054825 100644 --- a/src/Makefile +++ b/src/Makefile @@ -42,10 +42,12 @@ install: DKMS_TOP_LEVEL := Makefile Kbuild Kconfig $(filter-out wireguard.mod.c, $(wildcard *.c)) $(filter-out version.h, $(wildcard *.h)) version.h dkms.conf DKMS_SELFTEST_LEVEL := $(wildcard selftest/*.c) $(wildcard selftest/*.h) DKMS_CRYPTO_LEVEL := $(wildcard crypto/*.c) $(wildcard crypto/*.h) $(wildcard crypto/*.S) -dkms-install: $(DKMS_TOP_LEVEL) $(DKMS_SELFTEST_LEVEL) $(DKMS_CRYPTO_LEVEL) +DKMS_COMPAT_LEVEL := $(shell find compat/ -name '*.c' -o -name '*.h' -o -name '*.include') +dkms-install: $(DKMS_TOP_LEVEL) $(DKMS_SELFTEST_LEVEL) $(DKMS_CRYPTO_LEVEL) $(DKMS_COMPAT_LEVEL) @install -v -m0644 -D -t$(DESTDIR)$(DKMSDIR) $(DKMS_TOP_LEVEL) @install -v -m0644 -D -t$(DESTDIR)$(DKMSDIR)/selftest $(DKMS_SELFTEST_LEVEL) @install -v -m0644 -D -t$(DESTDIR)$(DKMSDIR)/crypto $(DKMS_CRYPTO_LEVEL) + @for file in $(DKMS_COMPAT_LEVEL); do install -v -m0644 -D $$file $(DESTDIR)$(DKMSDIR)/$$file; done tools: $(MAKE) -C tools @@ -55,7 +57,7 @@ check: $(MAKE) -C tools check cloc: clean - cloc $(filter-out compat.h, $(wildcard *.c) $(wildcard *.h)) + cloc $(wildcard *.c) $(wildcard *.h) -include tests/debug.mk diff --git a/src/compat/Kbuild.include b/src/compat/Kbuild.include new file mode 100644 index 0000000..905e268 --- /dev/null +++ b/src/compat/Kbuild.include @@ -0,0 +1,11 @@ +ccflags-y += -include $(src)/compat/compat.h + +ifeq ($(wildcard $(srctree)/include/linux/siphash.h),) +ccflags-y += -I$(src)/compat/siphash/include +wireguard-y += compat/siphash/siphash.o +endif + +ifeq ($(wildcard $(srctree)/include/net/dst_cache.h),) +ccflags-y += -I$(src)/compat/dst_cache/include +wireguard-y += compat/dst_cache/dst_cache.o +endif diff --git a/src/compat.h b/src/compat/compat.h index 7564da1..6a9bfd9 100644 --- a/src/compat.h +++ b/src/compat/compat.h @@ -153,20 +153,6 @@ __attribute__((unused)) static inline int udp_sock_create_new(struct net *net, s #define time_is_after_eq_jiffies64(a) time_before_eq64(get_jiffies_64(), a) #endif -#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) -struct dst_cache { }; -static inline struct dst_entry *dst_cache_get(struct dst_cache *dst_cache) { return NULL; } -static inline struct rtable *dst_cache_get_ip4(struct dst_cache *dst_cache, __be32 *saddr) { return NULL; } -static inline void dst_cache_set_ip4(struct dst_cache *dst_cache, struct dst_entry *dst, __be32 saddr) { } -#if IS_ENABLED(CONFIG_IPV6) -static inline void dst_cache_set_ip6(struct dst_cache *dst_cache, struct dst_entry *dst, const struct in6_addr *addr) { } -static inline struct dst_entry *dst_cache_get_ip6(struct dst_cache *dst_cache, struct in6_addr *saddr) { return NULL; } -#endif -static inline void dst_cache_reset(struct dst_cache *dst_cache) { } -static inline int dst_cache_init(struct dst_cache *dst_cache, gfp_t gfp) { return 0; } -static inline void dst_cache_destroy(struct dst_cache *dst_cache) { } -#endif - /* https://lkml.org/lkml/2015/6/12/415 */ #include <linux/netdevice.h> static inline struct net_device *netdev_pub(void *dev) diff --git a/src/compat/dst_cache/dst_cache.c b/src/compat/dst_cache/dst_cache.c new file mode 100644 index 0000000..145f112 --- /dev/null +++ b/src/compat/dst_cache/dst_cache.c @@ -0,0 +1,177 @@ +/* + * net/core/dst_cache.c - dst entry cache + * + * Copyright (c) 2016 Paolo Abeni <pabeni@redhat.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/kernel.h> +#include <linux/percpu.h> +#include <net/dst_cache.h> +#include <net/route.h> +#if IS_ENABLED(CONFIG_IPV6) +#include <net/ip6_fib.h> +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0) +static inline u32 rt6_get_cookie(const struct rt6_info *rt) +{ + if ((unlikely(rt->dst.flags & DST_NOCACHE) && rt->dst.from)) + rt = (struct rt6_info *)(rt->dst.from); + + return rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0; +} +#endif +#endif +#include <uapi/linux/in.h> + +struct dst_cache_pcpu { + unsigned long refresh_ts; + struct dst_entry *dst; + u32 cookie; + union { + struct in_addr in_saddr; + struct in6_addr in6_saddr; + }; +}; + +static void dst_cache_per_cpu_dst_set(struct dst_cache_pcpu *dst_cache, + struct dst_entry *dst, u32 cookie) +{ + dst_release(dst_cache->dst); + if (dst) + dst_hold(dst); + + dst_cache->cookie = cookie; + dst_cache->dst = dst; +} + +static struct dst_entry *dst_cache_per_cpu_get(struct dst_cache *dst_cache, + struct dst_cache_pcpu *idst) +{ + struct dst_entry *dst; + + dst = idst->dst; + if (!dst) + goto fail; + + /* the cache already hold a dst reference; it can't go away */ + dst_hold(dst); + + if (unlikely(!time_after(idst->refresh_ts, dst_cache->reset_ts) || + (dst->obsolete && !dst->ops->check(dst, idst->cookie)))) { + dst_cache_per_cpu_dst_set(idst, NULL, 0); + dst_release(dst); + goto fail; + } + return dst; + +fail: + idst->refresh_ts = jiffies; + return NULL; +} + +struct dst_entry *dst_cache_get(struct dst_cache *dst_cache) +{ + if (!dst_cache->cache) + return NULL; + + return dst_cache_per_cpu_get(dst_cache, this_cpu_ptr(dst_cache->cache)); +} +EXPORT_SYMBOL_GPL(dst_cache_get); + +struct rtable *dst_cache_get_ip4(struct dst_cache *dst_cache, __be32 *saddr) +{ + struct dst_cache_pcpu *idst; + struct dst_entry *dst; + + if (!dst_cache->cache) + return NULL; + + idst = this_cpu_ptr(dst_cache->cache); + dst = dst_cache_per_cpu_get(dst_cache, idst); + if (!dst) + return NULL; + + *saddr = idst->in_saddr.s_addr; + return container_of(dst, struct rtable, dst); +} +EXPORT_SYMBOL_GPL(dst_cache_get_ip4); + +void dst_cache_set_ip4(struct dst_cache *dst_cache, struct dst_entry *dst, + __be32 saddr) +{ + struct dst_cache_pcpu *idst; + + if (!dst_cache->cache) + return; + + idst = this_cpu_ptr(dst_cache->cache); + dst_cache_per_cpu_dst_set(idst, dst, 0); + idst->in_saddr.s_addr = saddr; +} +EXPORT_SYMBOL_GPL(dst_cache_set_ip4); + +#if IS_ENABLED(CONFIG_IPV6) +void dst_cache_set_ip6(struct dst_cache *dst_cache, struct dst_entry *dst, + const struct in6_addr *addr) +{ + struct dst_cache_pcpu *idst; + + if (!dst_cache->cache) + return; + + idst = this_cpu_ptr(dst_cache->cache); + dst_cache_per_cpu_dst_set(this_cpu_ptr(dst_cache->cache), dst, + rt6_get_cookie((struct rt6_info *)dst)); + idst->in6_saddr = *addr; +} +EXPORT_SYMBOL_GPL(dst_cache_set_ip6); + +struct dst_entry *dst_cache_get_ip6(struct dst_cache *dst_cache, + struct in6_addr *saddr) +{ + struct dst_cache_pcpu *idst; + struct dst_entry *dst; + + if (!dst_cache->cache) + return NULL; + + idst = this_cpu_ptr(dst_cache->cache); + dst = dst_cache_per_cpu_get(dst_cache, idst); + if (!dst) + return NULL; + + *saddr = idst->in6_saddr; + return dst; +} +EXPORT_SYMBOL_GPL(dst_cache_get_ip6); +#endif + +int dst_cache_init(struct dst_cache *dst_cache, gfp_t gfp) +{ + dst_cache->cache = alloc_percpu_gfp(struct dst_cache_pcpu, + gfp | __GFP_ZERO); + if (!dst_cache->cache) + return -ENOMEM; + + dst_cache_reset(dst_cache); + return 0; +} +EXPORT_SYMBOL_GPL(dst_cache_init); + +void dst_cache_destroy(struct dst_cache *dst_cache) +{ + int i; + + if (!dst_cache->cache) + return; + + for_each_possible_cpu(i) + dst_release(per_cpu_ptr(dst_cache->cache, i)->dst); + + free_percpu(dst_cache->cache); +} +EXPORT_SYMBOL_GPL(dst_cache_destroy); diff --git a/src/compat/dst_cache/include/net/dst_cache.h b/src/compat/dst_cache/include/net/dst_cache.h new file mode 100644 index 0000000..151acca --- /dev/null +++ b/src/compat/dst_cache/include/net/dst_cache.h @@ -0,0 +1,97 @@ +#ifndef _NET_DST_CACHE_H +#define _NET_DST_CACHE_H + +#include <linux/jiffies.h> +#include <net/dst.h> +#if IS_ENABLED(CONFIG_IPV6) +#include <net/ip6_fib.h> +#endif + +struct dst_cache { + struct dst_cache_pcpu __percpu *cache; + unsigned long reset_ts; +}; + +/** + * dst_cache_get - perform cache lookup + * @dst_cache: the cache + * + * The caller should use dst_cache_get_ip4() if it need to retrieve the + * source address to be used when xmitting to the cached dst. + * local BH must be disabled. + */ +struct dst_entry *dst_cache_get(struct dst_cache *dst_cache); + +/** + * dst_cache_get_ip4 - perform cache lookup and fetch ipv4 source address + * @dst_cache: the cache + * @saddr: return value for the retrieved source address + * + * local BH must be disabled. + */ +struct rtable *dst_cache_get_ip4(struct dst_cache *dst_cache, __be32 *saddr); + +/** + * dst_cache_set_ip4 - store the ipv4 dst into the cache + * @dst_cache: the cache + * @dst: the entry to be cached + * @saddr: the source address to be stored inside the cache + * + * local BH must be disabled. + */ +void dst_cache_set_ip4(struct dst_cache *dst_cache, struct dst_entry *dst, + __be32 saddr); + +#if IS_ENABLED(CONFIG_IPV6) + +/** + * dst_cache_set_ip6 - store the ipv6 dst into the cache + * @dst_cache: the cache + * @dst: the entry to be cached + * @saddr: the source address to be stored inside the cache + * + * local BH must be disabled. + */ +void dst_cache_set_ip6(struct dst_cache *dst_cache, struct dst_entry *dst, + const struct in6_addr *addr); + +/** + * dst_cache_get_ip6 - perform cache lookup and fetch ipv6 source address + * @dst_cache: the cache + * @saddr: return value for the retrieved source address + * + * local BH must be disabled. + */ +struct dst_entry *dst_cache_get_ip6(struct dst_cache *dst_cache, + struct in6_addr *saddr); +#endif + +/** + * dst_cache_reset - invalidate the cache contents + * @dst_cache: the cache + * + * This do not free the cached dst to avoid races and contentions. + * the dst will be freed on later cache lookup. + */ +static inline void dst_cache_reset(struct dst_cache *dst_cache) +{ + dst_cache->reset_ts = jiffies; +} + +/** + * dst_cache_init - initialize the cache, allocating the required storage + * @dst_cache: the cache + * @gfp: allocation flags + */ +int dst_cache_init(struct dst_cache *dst_cache, gfp_t gfp); + +/** + * dst_cache_destroy - empty the cache and free the allocated storage + * @dst_cache: the cache + * + * No synchronization is enforced: it must be called only when the cache + * is unsed. + */ +void dst_cache_destroy(struct dst_cache *dst_cache); + +#endif diff --git a/src/compat/siphash/include/linux/siphash.h b/src/compat/siphash/include/linux/siphash.h new file mode 100644 index 0000000..fa7a6b9 --- /dev/null +++ b/src/compat/siphash/include/linux/siphash.h @@ -0,0 +1,140 @@ +/* Copyright (C) 2016 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + * + * This file is provided under a dual BSD/GPLv2 license. + * + * SipHash: a fast short-input PRF + * https://131002.net/siphash/ + * + * This implementation is specifically for SipHash2-4 for a secure PRF + * and HalfSipHash1-3/SipHash1-3 for an insecure PRF only suitable for + * hashtables. + */ + +#ifndef _LINUX_SIPHASH_H +#define _LINUX_SIPHASH_H + +#include <linux/types.h> +#include <linux/kernel.h> + +#define SIPHASH_ALIGNMENT __alignof__(u64) +typedef struct { + u64 key[2]; +} siphash_key_t; + +u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key); +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key); +#endif + +u64 siphash_1u64(const u64 a, const siphash_key_t *key); +u64 siphash_2u64(const u64 a, const u64 b, const siphash_key_t *key); +u64 siphash_3u64(const u64 a, const u64 b, const u64 c, + const siphash_key_t *key); +u64 siphash_4u64(const u64 a, const u64 b, const u64 c, const u64 d, + const siphash_key_t *key); +u64 siphash_1u32(const u32 a, const siphash_key_t *key); +u64 siphash_3u32(const u32 a, const u32 b, const u32 c, + const siphash_key_t *key); + +static inline u64 siphash_2u32(const u32 a, const u32 b, + const siphash_key_t *key) +{ + return siphash_1u64((u64)b << 32 | a, key); +} +static inline u64 siphash_4u32(const u32 a, const u32 b, const u32 c, + const u32 d, const siphash_key_t *key) +{ + return siphash_2u64((u64)b << 32 | a, (u64)d << 32 | c, key); +} + + +static inline u64 ___siphash_aligned(const __le64 *data, size_t len, + const siphash_key_t *key) +{ + if (__builtin_constant_p(len) && len == 4) + return siphash_1u32(le32_to_cpup((const __le32 *)data), key); + if (__builtin_constant_p(len) && len == 8) + return siphash_1u64(le64_to_cpu(data[0]), key); + if (__builtin_constant_p(len) && len == 16) + return siphash_2u64(le64_to_cpu(data[0]), le64_to_cpu(data[1]), + key); + if (__builtin_constant_p(len) && len == 24) + return siphash_3u64(le64_to_cpu(data[0]), le64_to_cpu(data[1]), + le64_to_cpu(data[2]), key); + if (__builtin_constant_p(len) && len == 32) + return siphash_4u64(le64_to_cpu(data[0]), le64_to_cpu(data[1]), + le64_to_cpu(data[2]), le64_to_cpu(data[3]), + key); + return __siphash_aligned(data, len, key); +} + +/** + * siphash - compute 64-bit siphash PRF value + * @data: buffer to hash + * @size: size of @data + * @key: the siphash key + */ +static inline u64 siphash(const void *data, size_t len, + const siphash_key_t *key) +{ +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + if (!IS_ALIGNED((unsigned long)data, SIPHASH_ALIGNMENT)) + return __siphash_unaligned(data, len, key); +#endif + return ___siphash_aligned(data, len, key); +} + +#define HSIPHASH_ALIGNMENT __alignof__(unsigned long) +typedef struct { + unsigned long key[2]; +} hsiphash_key_t; + +u32 __hsiphash_aligned(const void *data, size_t len, + const hsiphash_key_t *key); +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +u32 __hsiphash_unaligned(const void *data, size_t len, + const hsiphash_key_t *key); +#endif + +u32 hsiphash_1u32(const u32 a, const hsiphash_key_t *key); +u32 hsiphash_2u32(const u32 a, const u32 b, const hsiphash_key_t *key); +u32 hsiphash_3u32(const u32 a, const u32 b, const u32 c, + const hsiphash_key_t *key); +u32 hsiphash_4u32(const u32 a, const u32 b, const u32 c, const u32 d, + const hsiphash_key_t *key); + +static inline u32 ___hsiphash_aligned(const __le32 *data, size_t len, + const hsiphash_key_t *key) +{ + if (__builtin_constant_p(len) && len == 4) + return hsiphash_1u32(le32_to_cpu(data[0]), key); + if (__builtin_constant_p(len) && len == 8) + return hsiphash_2u32(le32_to_cpu(data[0]), le32_to_cpu(data[1]), + key); + if (__builtin_constant_p(len) && len == 12) + return hsiphash_3u32(le32_to_cpu(data[0]), le32_to_cpu(data[1]), + le32_to_cpu(data[2]), key); + if (__builtin_constant_p(len) && len == 16) + return hsiphash_4u32(le32_to_cpu(data[0]), le32_to_cpu(data[1]), + le32_to_cpu(data[2]), le32_to_cpu(data[3]), + key); + return __hsiphash_aligned(data, len, key); +} + +/** + * hsiphash - compute 32-bit hsiphash PRF value + * @data: buffer to hash + * @size: size of @data + * @key: the hsiphash key + */ +static inline u32 hsiphash(const void *data, size_t len, + const hsiphash_key_t *key) +{ +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + if (!IS_ALIGNED((unsigned long)data, HSIPHASH_ALIGNMENT)) + return __hsiphash_unaligned(data, len, key); +#endif + return ___hsiphash_aligned(data, len, key); +} + +#endif /* _LINUX_SIPHASH_H */ diff --git a/src/compat/siphash/siphash.c b/src/compat/siphash/siphash.c new file mode 100644 index 0000000..3ae58b4 --- /dev/null +++ b/src/compat/siphash/siphash.c @@ -0,0 +1,551 @@ +/* Copyright (C) 2016 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + * + * This file is provided under a dual BSD/GPLv2 license. + * + * SipHash: a fast short-input PRF + * https://131002.net/siphash/ + * + * This implementation is specifically for SipHash2-4 for a secure PRF + * and HalfSipHash1-3/SipHash1-3 for an insecure PRF only suitable for + * hashtables. + */ + +#include <linux/siphash.h> +#include <asm/unaligned.h> + +#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64 +#include <linux/dcache.h> +#include <asm/word-at-a-time.h> +#endif + +#define SIPROUND \ + do { \ + v0 += v1; v1 = rol64(v1, 13); v1 ^= v0; v0 = rol64(v0, 32); \ + v2 += v3; v3 = rol64(v3, 16); v3 ^= v2; \ + v0 += v3; v3 = rol64(v3, 21); v3 ^= v0; \ + v2 += v1; v1 = rol64(v1, 17); v1 ^= v2; v2 = rol64(v2, 32); \ + } while (0) + +#define PREAMBLE(len) \ + u64 v0 = 0x736f6d6570736575ULL; \ + u64 v1 = 0x646f72616e646f6dULL; \ + u64 v2 = 0x6c7967656e657261ULL; \ + u64 v3 = 0x7465646279746573ULL; \ + u64 b = ((u64)(len)) << 56; \ + v3 ^= key->key[1]; \ + v2 ^= key->key[0]; \ + v1 ^= key->key[1]; \ + v0 ^= key->key[0]; + +#define POSTAMBLE \ + v3 ^= b; \ + SIPROUND; \ + SIPROUND; \ + v0 ^= b; \ + v2 ^= 0xff; \ + SIPROUND; \ + SIPROUND; \ + SIPROUND; \ + SIPROUND; \ + return (v0 ^ v1) ^ (v2 ^ v3); + +u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key) +{ + const u8 *end = data + len - (len % sizeof(u64)); + const u8 left = len & (sizeof(u64) - 1); + u64 m; + PREAMBLE(len) + for (; data != end; data += sizeof(u64)) { + m = le64_to_cpup(data); + v3 ^= m; + SIPROUND; + SIPROUND; + v0 ^= m; + } +#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64 + if (left) + b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) & + bytemask_from_count(left))); +#else + switch (left) { + case 7: b |= ((u64)end[6]) << 48; + case 6: b |= ((u64)end[5]) << 40; + case 5: b |= ((u64)end[4]) << 32; + case 4: b |= le32_to_cpup(data); break; + case 3: b |= ((u64)end[2]) << 16; + case 2: b |= le16_to_cpup(data); break; + case 1: b |= end[0]; + } +#endif + POSTAMBLE +} +EXPORT_SYMBOL(__siphash_aligned); + +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key) +{ + const u8 *end = data + len - (len % sizeof(u64)); + const u8 left = len & (sizeof(u64) - 1); + u64 m; + PREAMBLE(len) + for (; data != end; data += sizeof(u64)) { + m = get_unaligned_le64(data); + v3 ^= m; + SIPROUND; + SIPROUND; + v0 ^= m; + } +#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64 + if (left) + b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) & + bytemask_from_count(left))); +#else + switch (left) { + case 7: b |= ((u64)end[6]) << 48; + case 6: b |= ((u64)end[5]) << 40; + case 5: b |= ((u64)end[4]) << 32; + case 4: b |= get_unaligned_le32(end); break; + case 3: b |= ((u64)end[2]) << 16; + case 2: b |= get_unaligned_le16(end); break; + case 1: b |= end[0]; + } +#endif + POSTAMBLE +} +EXPORT_SYMBOL(__siphash_unaligned); +#endif + +/** + * siphash_1u64 - compute 64-bit siphash PRF value of a u64 + * @first: first u64 + * @key: the siphash key + */ +u64 siphash_1u64(const u64 first, const siphash_key_t *key) +{ + PREAMBLE(8) + v3 ^= first; + SIPROUND; + SIPROUND; + v0 ^= first; + POSTAMBLE +} +EXPORT_SYMBOL(siphash_1u64); + +/** + * siphash_2u64 - compute 64-bit siphash PRF value of 2 u64 + * @first: first u64 + * @second: second u64 + * @key: the siphash key + */ +u64 siphash_2u64(const u64 first, const u64 second, const siphash_key_t *key) +{ + PREAMBLE(16) + v3 ^= first; + SIPROUND; + SIPROUND; + v0 ^= first; + v3 ^= second; + SIPROUND; + SIPROUND; + v0 ^= second; + POSTAMBLE +} +EXPORT_SYMBOL(siphash_2u64); + +/** + * siphash_3u64 - compute 64-bit siphash PRF value of 3 u64 + * @first: first u64 + * @second: second u64 + * @third: third u64 + * @key: the siphash key + */ +u64 siphash_3u64(const u64 first, const u64 second, const u64 third, + const siphash_key_t *key) +{ + PREAMBLE(24) + v3 ^= first; + SIPROUND; + SIPROUND; + v0 ^= first; + v3 ^= second; + SIPROUND; + SIPROUND; + v0 ^= second; + v3 ^= third; + SIPROUND; + SIPROUND; + v0 ^= third; + POSTAMBLE +} +EXPORT_SYMBOL(siphash_3u64); + +/** + * siphash_4u64 - compute 64-bit siphash PRF value of 4 u64 + * @first: first u64 + * @second: second u64 + * @third: third u64 + * @forth: forth u64 + * @key: the siphash key + */ +u64 siphash_4u64(const u64 first, const u64 second, const u64 third, + const u64 forth, const siphash_key_t *key) +{ + PREAMBLE(32) + v3 ^= first; + SIPROUND; + SIPROUND; + v0 ^= first; + v3 ^= second; + SIPROUND; + SIPROUND; + v0 ^= second; + v3 ^= third; + SIPROUND; + SIPROUND; + v0 ^= third; + v3 ^= forth; + SIPROUND; + SIPROUND; + v0 ^= forth; + POSTAMBLE +} +EXPORT_SYMBOL(siphash_4u64); + +u64 siphash_1u32(const u32 first, const siphash_key_t *key) +{ + PREAMBLE(4) + b |= first; + POSTAMBLE +} +EXPORT_SYMBOL(siphash_1u32); + +u64 siphash_3u32(const u32 first, const u32 second, const u32 third, + const siphash_key_t *key) +{ + u64 combined = (u64)second << 32 | first; + PREAMBLE(12) + v3 ^= combined; + SIPROUND; + SIPROUND; + v0 ^= combined; + b |= third; + POSTAMBLE +} +EXPORT_SYMBOL(siphash_3u32); + +#if BITS_PER_LONG == 64 +/* Note that on 64-bit, we make HalfSipHash1-3 actually be SipHash1-3, for + * performance reasons. On 32-bit, below, we actually implement HalfSipHash1-3. + */ + +#define HSIPROUND SIPROUND +#define HPREAMBLE(len) PREAMBLE(len) +#define HPOSTAMBLE \ + v3 ^= b; \ + HSIPROUND; \ + v0 ^= b; \ + v2 ^= 0xff; \ + HSIPROUND; \ + HSIPROUND; \ + HSIPROUND; \ + return (v0 ^ v1) ^ (v2 ^ v3); + +u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key) +{ + const u8 *end = data + len - (len % sizeof(u64)); + const u8 left = len & (sizeof(u64) - 1); + u64 m; + HPREAMBLE(len) + for (; data != end; data += sizeof(u64)) { + m = le64_to_cpup(data); + v3 ^= m; + HSIPROUND; + v0 ^= m; + } +#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64 + if (left) + b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) & + bytemask_from_count(left))); +#else + switch (left) { + case 7: b |= ((u64)end[6]) << 48; + case 6: b |= ((u64)end[5]) << 40; + case 5: b |= ((u64)end[4]) << 32; + case 4: b |= le32_to_cpup(data); break; + case 3: b |= ((u64)end[2]) << 16; + case 2: b |= le16_to_cpup(data); break; + case 1: b |= end[0]; + } +#endif + HPOSTAMBLE +} +EXPORT_SYMBOL(__hsiphash_aligned); + +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +u32 __hsiphash_unaligned(const void *data, size_t len, + const hsiphash_key_t *key) +{ + const u8 *end = data + len - (len % sizeof(u64)); + const u8 left = len & (sizeof(u64) - 1); + u64 m; + HPREAMBLE(len) + for (; data != end; data += sizeof(u64)) { + m = get_unaligned_le64(data); + v3 ^= m; + HSIPROUND; + v0 ^= m; + } +#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64 + if (left) + b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) & + bytemask_from_count(left))); +#else + switch (left) { + case 7: b |= ((u64)end[6]) << 48; + case 6: b |= ((u64)end[5]) << 40; + case 5: b |= ((u64)end[4]) << 32; + case 4: b |= get_unaligned_le32(end); break; + case 3: b |= ((u64)end[2]) << 16; + case 2: b |= get_unaligned_le16(end); break; + case 1: b |= end[0]; + } +#endif + HPOSTAMBLE +} +EXPORT_SYMBOL(__hsiphash_unaligned); +#endif + +/** + * hsiphash_1u32 - compute 64-bit hsiphash PRF value of a u32 + * @first: first u32 + * @key: the hsiphash key + */ +u32 hsiphash_1u32(const u32 first, const hsiphash_key_t *key) +{ + HPREAMBLE(4) + b |= first; + HPOSTAMBLE +} +EXPORT_SYMBOL(hsiphash_1u32); + +/** + * hsiphash_2u32 - compute 32-bit hsiphash PRF value of 2 u32 + * @first: first u32 + * @second: second u32 + * @key: the hsiphash key + */ +u32 hsiphash_2u32(const u32 first, const u32 second, const hsiphash_key_t *key) +{ + u64 combined = (u64)second << 32 | first; + HPREAMBLE(8) + v3 ^= combined; + HSIPROUND; + v0 ^= combined; + HPOSTAMBLE +} +EXPORT_SYMBOL(hsiphash_2u32); + +/** + * hsiphash_3u32 - compute 32-bit hsiphash PRF value of 3 u32 + * @first: first u32 + * @second: second u32 + * @third: third u32 + * @key: the hsiphash key + */ +u32 hsiphash_3u32(const u32 first, const u32 second, const u32 third, + const hsiphash_key_t *key) +{ + u64 combined = (u64)second << 32 | first; + HPREAMBLE(12) + v3 ^= combined; + HSIPROUND; + v0 ^= combined; + b |= third; + HPOSTAMBLE +} +EXPORT_SYMBOL(hsiphash_3u32); + +/** + * hsiphash_4u32 - compute 32-bit hsiphash PRF value of 4 u32 + * @first: first u32 + * @second: second u32 + * @third: third u32 + * @forth: forth u32 + * @key: the hsiphash key + */ +u32 hsiphash_4u32(const u32 first, const u32 second, const u32 third, + const u32 forth, const hsiphash_key_t *key) +{ + u64 combined = (u64)second << 32 | first; + HPREAMBLE(16) + v3 ^= combined; + HSIPROUND; + v0 ^= combined; + combined = (u64)forth << 32 | third; + v3 ^= combined; + HSIPROUND; + v0 ^= combined; + HPOSTAMBLE +} +EXPORT_SYMBOL(hsiphash_4u32); +#else +#define HSIPROUND \ + do { \ + v0 += v1; v1 = rol32(v1, 5); v1 ^= v0; v0 = rol32(v0, 16); \ + v2 += v3; v3 = rol32(v3, 8); v3 ^= v2; \ + v0 += v3; v3 = rol32(v3, 7); v3 ^= v0; \ + v2 += v1; v1 = rol32(v1, 13); v1 ^= v2; v2 = rol32(v2, 16); \ + } while (0) + +#define HPREAMBLE(len) \ + u32 v0 = 0; \ + u32 v1 = 0; \ + u32 v2 = 0x6c796765U; \ + u32 v3 = 0x74656462U; \ + u32 b = ((u32)(len)) << 24; \ + v3 ^= key->key[1]; \ + v2 ^= key->key[0]; \ + v1 ^= key->key[1]; \ + v0 ^= key->key[0]; + +#define HPOSTAMBLE \ + v3 ^= b; \ + HSIPROUND; \ + v0 ^= b; \ + v2 ^= 0xff; \ + HSIPROUND; \ + HSIPROUND; \ + HSIPROUND; \ + return v1 ^ v3; + +u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key) +{ + const u8 *end = data + len - (len % sizeof(u32)); + const u8 left = len & (sizeof(u32) - 1); + u32 m; + HPREAMBLE(len) + for (; data != end; data += sizeof(u32)) { + m = le32_to_cpup(data); + v3 ^= m; + HSIPROUND; + v0 ^= m; + } + switch (left) { + case 3: b |= ((u32)end[2]) << 16; + case 2: b |= le16_to_cpup(data); break; + case 1: b |= end[0]; + } + HPOSTAMBLE +} +EXPORT_SYMBOL(__hsiphash_aligned); + +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +u32 __hsiphash_unaligned(const void *data, size_t len, + const hsiphash_key_t *key) +{ + const u8 *end = data + len - (len % sizeof(u32)); + const u8 left = len & (sizeof(u32) - 1); + u32 m; + HPREAMBLE(len) + for (; data != end; data += sizeof(u32)) { + m = get_unaligned_le32(data); + v3 ^= m; + HSIPROUND; + v0 ^= m; + } + switch (left) { + case 3: b |= ((u32)end[2]) << 16; + case 2: b |= get_unaligned_le16(end); break; + case 1: b |= end[0]; + } + HPOSTAMBLE +} +EXPORT_SYMBOL(__hsiphash_unaligned); +#endif + +/** + * hsiphash_1u32 - compute 32-bit hsiphash PRF value of a u32 + * @first: first u32 + * @key: the hsiphash key + */ +u32 hsiphash_1u32(const u32 first, const hsiphash_key_t *key) +{ + HPREAMBLE(4) + v3 ^= first; + HSIPROUND; + v0 ^= first; + HPOSTAMBLE +} +EXPORT_SYMBOL(hsiphash_1u32); + +/** + * hsiphash_2u32 - compute 32-bit hsiphash PRF value of 2 u32 + * @first: first u32 + * @second: second u32 + * @key: the hsiphash key + */ +u32 hsiphash_2u32(const u32 first, const u32 second, const hsiphash_key_t *key) +{ + HPREAMBLE(8) + v3 ^= first; + HSIPROUND; + v0 ^= first; + v3 ^= second; + HSIPROUND; + v0 ^= second; + HPOSTAMBLE +} +EXPORT_SYMBOL(hsiphash_2u32); + +/** + * hsiphash_3u32 - compute 32-bit hsiphash PRF value of 3 u32 + * @first: first u32 + * @second: second u32 + * @third: third u32 + * @key: the hsiphash key + */ +u32 hsiphash_3u32(const u32 first, const u32 second, const u32 third, + const hsiphash_key_t *key) +{ + HPREAMBLE(12) + v3 ^= first; + HSIPROUND; + v0 ^= first; + v3 ^= second; + HSIPROUND; + v0 ^= second; + v3 ^= third; + HSIPROUND; + v0 ^= third; + HPOSTAMBLE +} +EXPORT_SYMBOL(hsiphash_3u32); + +/** + * hsiphash_4u32 - compute 32-bit hsiphash PRF value of 4 u32 + * @first: first u32 + * @second: second u32 + * @third: third u32 + * @forth: forth u32 + * @key: the hsiphash key + */ +u32 hsiphash_4u32(const u32 first, const u32 second, const u32 third, + const u32 forth, const hsiphash_key_t *key) +{ + HPREAMBLE(16) + v3 ^= first; + HSIPROUND; + v0 ^= first; + v3 ^= second; + HSIPROUND; + v0 ^= second; + v3 ^= third; + HSIPROUND; + v0 ^= third; + v3 ^= forth; + HSIPROUND; + v0 ^= forth; + HPOSTAMBLE +} +EXPORT_SYMBOL(hsiphash_4u32); +#endif diff --git a/src/crypto/siphash.c b/src/crypto/siphash.c deleted file mode 100644 index 86ff070..0000000 --- a/src/crypto/siphash.c +++ /dev/null @@ -1,204 +0,0 @@ -/* Copyright (C) 2015-2017 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. - * - * SipHash: a fast short-input PRF - * https://131002.net/siphash/ - * - * This implementation is specifically for SipHash2-4. - */ - -#include "siphash.h" -#include <asm/unaligned.h> - -#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64 -#include <linux/dcache.h> -#include <asm/word-at-a-time.h> -#endif - -#define SIPROUND \ - do { \ - v0 += v1; v1 = rol64(v1, 13); v1 ^= v0; v0 = rol64(v0, 32); \ - v2 += v3; v3 = rol64(v3, 16); v3 ^= v2; \ - v0 += v3; v3 = rol64(v3, 21); v3 ^= v0; \ - v2 += v1; v1 = rol64(v1, 17); v1 ^= v2; v2 = rol64(v2, 32); \ - } while(0) - -#define PREAMBLE(len) \ - u64 v0 = 0x736f6d6570736575ULL; \ - u64 v1 = 0x646f72616e646f6dULL; \ - u64 v2 = 0x6c7967656e657261ULL; \ - u64 v3 = 0x7465646279746573ULL; \ - u64 b = ((u64)len) << 56; \ - v3 ^= key[1]; \ - v2 ^= key[0]; \ - v1 ^= key[1]; \ - v0 ^= key[0]; - -#define POSTAMBLE \ - v3 ^= b; \ - SIPROUND; \ - SIPROUND; \ - v0 ^= b; \ - v2 ^= 0xff; \ - SIPROUND; \ - SIPROUND; \ - SIPROUND; \ - SIPROUND; \ - return (v0 ^ v1) ^ (v2 ^ v3); - -u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t key) -{ - const u8 *end = data + len - (len % sizeof(u64)); - const u8 left = len & (sizeof(u64) - 1); - u64 m; - PREAMBLE(len) - for (; data != end; data += sizeof(u64)) { - m = le64_to_cpup(data); - v3 ^= m; - SIPROUND; - SIPROUND; - v0 ^= m; - } -#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64 - if (left) - b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) & - bytemask_from_count(left))); -#else - switch (left) { - case 7: b |= ((u64)end[6]) << 48; - case 6: b |= ((u64)end[5]) << 40; - case 5: b |= ((u64)end[4]) << 32; - case 4: b |= le32_to_cpup(data); break; - case 3: b |= ((u64)end[2]) << 16; - case 2: b |= le16_to_cpup(data); break; - case 1: b |= end[0]; - } -#endif - POSTAMBLE -} - -#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS -u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t key) -{ - const u8 *end = data + len - (len % sizeof(u64)); - const u8 left = len & (sizeof(u64) - 1); - u64 m; - PREAMBLE(len) - for (; data != end; data += sizeof(u64)) { - m = get_unaligned_le64(data); - v3 ^= m; - SIPROUND; - SIPROUND; - v0 ^= m; - } -#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64 - if (left) - b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) & - bytemask_from_count(left))); -#else - switch (left) { - case 7: b |= ((u64)end[6]) << 48; - case 6: b |= ((u64)end[5]) << 40; - case 5: b |= ((u64)end[4]) << 32; - case 4: b |= get_unaligned_le32(end); break; - case 3: b |= ((u64)end[2]) << 16; - case 2: b |= get_unaligned_le16(end); break; - case 1: b |= end[0]; - } -#endif - POSTAMBLE -} -#endif - -/** - * siphash_1u64 - compute 64-bit siphash PRF value of a u64 - * @first: first u64 - * @key: the siphash key - */ -u64 siphash_1u64(const u64 first, const siphash_key_t key) -{ - PREAMBLE(8) - v3 ^= first; - SIPROUND; - SIPROUND; - v0 ^= first; - POSTAMBLE -} - -/** - * siphash_2u64 - compute 64-bit siphash PRF value of 2 u64 - * @first: first u64 - * @second: second u64 - * @key: the siphash key - */ -u64 siphash_2u64(const u64 first, const u64 second, const siphash_key_t key) -{ - PREAMBLE(16) - v3 ^= first; - SIPROUND; - SIPROUND; - v0 ^= first; - v3 ^= second; - SIPROUND; - SIPROUND; - v0 ^= second; - POSTAMBLE -} - -/** - * siphash_3u64 - compute 64-bit siphash PRF value of 3 u64 - * @first: first u64 - * @second: second u64 - * @third: third u64 - * @key: the siphash key - */ -u64 siphash_3u64(const u64 first, const u64 second, const u64 third, - const siphash_key_t key) -{ - PREAMBLE(24) - v3 ^= first; - SIPROUND; - SIPROUND; - v0 ^= first; - v3 ^= second; - SIPROUND; - SIPROUND; - v0 ^= second; - v3 ^= third; - SIPROUND; - SIPROUND; - v0 ^= third; - POSTAMBLE -} - -/** - * siphash_4u64 - compute 64-bit siphash PRF value of 4 u64 - * @first: first u64 - * @second: second u64 - * @third: third u64 - * @forth: forth u64 - * @key: the siphash key - */ -u64 siphash_4u64(const u64 first, const u64 second, const u64 third, - const u64 forth, const siphash_key_t key) -{ - PREAMBLE(32) - v3 ^= first; - SIPROUND; - SIPROUND; - v0 ^= first; - v3 ^= second; - SIPROUND; - SIPROUND; - v0 ^= second; - v3 ^= third; - SIPROUND; - SIPROUND; - v0 ^= third; - v3 ^= forth; - SIPROUND; - SIPROUND; - v0 ^= forth; - POSTAMBLE -} - -#include "../selftest/siphash.h" diff --git a/src/crypto/siphash.h b/src/crypto/siphash.h deleted file mode 100644 index 454c7d5..0000000 --- a/src/crypto/siphash.h +++ /dev/null @@ -1,92 +0,0 @@ -/* Copyright (C) 2015-2017 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. - * - * SipHash: a fast short-input PRF - * https://131002.net/siphash/ - * - * This implementation is specifically for SipHash2-4. - */ - -#ifndef SIPHASH_H -#define SIPHASH_H - -#include <linux/types.h> -#include <linux/kernel.h> - -#define SIPHASH_ALIGNMENT 8 -typedef u64 siphash_key_t[2]; - -u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t key); -#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS -u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t key); -#endif - -u64 siphash_1u64(const u64 a, const siphash_key_t key); -u64 siphash_2u64(const u64 a, const u64 b, const siphash_key_t key); -u64 siphash_3u64(const u64 a, const u64 b, const u64 c, - const siphash_key_t key); -u64 siphash_4u64(const u64 a, const u64 b, const u64 c, const u64 d, - const siphash_key_t key); - -static inline u64 ___siphash_aligned(const __le64 *data, size_t len, const siphash_key_t key) -{ - if (__builtin_constant_p(len) && len == 8) - return siphash_1u64(le64_to_cpu(data[0]), key); - if (__builtin_constant_p(len) && len == 16) - return siphash_2u64(le64_to_cpu(data[0]), le64_to_cpu(data[1]), - key); - if (__builtin_constant_p(len) && len == 24) - return siphash_3u64(le64_to_cpu(data[0]), le64_to_cpu(data[1]), - le64_to_cpu(data[2]), key); - if (__builtin_constant_p(len) && len == 32) - return siphash_4u64(le64_to_cpu(data[0]), le64_to_cpu(data[1]), - le64_to_cpu(data[2]), le64_to_cpu(data[3]), - key); - return __siphash_aligned(data, len, key); -} - -/** - * siphash - compute 64-bit siphash PRF value - * @data: buffer to hash - * @size: size of @data - * @key: the siphash key - */ -static inline u64 siphash(const void *data, size_t len, const siphash_key_t key) -{ -#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS - if (!IS_ALIGNED((unsigned long)data, SIPHASH_ALIGNMENT)) - return __siphash_unaligned(data, len, key); -#endif - return ___siphash_aligned(data, len, key); -} - -static inline u64 siphash_2u32(const u32 a, const u32 b, const siphash_key_t key) -{ - return siphash_1u64((u64)b << 32 | a, key); -} - -static inline u64 siphash_4u32(const u32 a, const u32 b, const u32 c, const u32 d, - const siphash_key_t key) -{ - return siphash_2u64((u64)b << 32 | a, (u64)d << 32 | c, key); -} - -static inline u64 siphash_6u32(const u32 a, const u32 b, const u32 c, const u32 d, - const u32 e, const u32 f, const siphash_key_t key) -{ - return siphash_3u64((u64)b << 32 | a, (u64)d << 32 | c, (u64)f << 32 | e, - key); -} - -static inline u64 siphash_8u32(const u32 a, const u32 b, const u32 c, const u32 d, - const u32 e, const u32 f, const u32 g, const u32 h, - const siphash_key_t key) -{ - return siphash_4u64((u64)b << 32 | a, (u64)d << 32 | c, (u64)f << 32 | e, - (u64)h << 32 | g, key); -} - -#ifdef DEBUG -bool siphash_selftest(void); -#endif - -#endif /* SIPHASH_H */ diff --git a/src/hashtables.c b/src/hashtables.c index 507e84a..a412265 100644 --- a/src/hashtables.c +++ b/src/hashtables.c @@ -3,20 +3,17 @@ #include "hashtables.h" #include "peer.h" #include "noise.h" -#include "crypto/siphash.h" - -#include <linux/hashtable.h> static inline struct hlist_head *pubkey_bucket(struct pubkey_hashtable *table, const u8 pubkey[NOISE_PUBLIC_KEY_LEN]) { /* siphash gives us a secure 64bit number based on a random key. Since the bits are * uniformly distributed, we can then mask off to get the bits we need. */ - return &table->hashtable[siphash(pubkey, NOISE_PUBLIC_KEY_LEN, table->key) & (HASH_SIZE(table->hashtable) - 1)]; + return &table->hashtable[siphash(pubkey, NOISE_PUBLIC_KEY_LEN, &table->key) & (HASH_SIZE(table->hashtable) - 1)]; } void pubkey_hashtable_init(struct pubkey_hashtable *table) { - get_random_bytes(table->key, sizeof(table->key)); + get_random_bytes(&table->key, sizeof(table->key)); hash_init(table->hashtable); mutex_init(&table->lock); } @@ -60,7 +57,7 @@ static inline struct hlist_head *index_bucket(struct index_hashtable *table, con void index_hashtable_init(struct index_hashtable *table) { - get_random_bytes(table->key, sizeof(table->key)); + get_random_bytes(&table->key, sizeof(table->key)); hash_init(table->hashtable); spin_lock_init(&table->lock); } @@ -78,7 +75,7 @@ __le32 index_hashtable_insert(struct index_hashtable *table, struct index_hashta search_unused_slot: /* First we try to find an unused slot, randomly, while unlocked. */ - entry->index = (__force __le32)siphash_2u32(get_random_int(), counter++, table->key); + entry->index = (__force __le32)siphash_2u32(get_random_int(), counter++, &table->key); hlist_for_each_entry_rcu(existing_entry, index_bucket(table, entry->index), index_hash) { if (existing_entry->index == entry->index) goto search_unused_slot; /* If it's already in use, we continue searching. */ diff --git a/src/hashtables.h b/src/hashtables.h index 48a260e..c66780a 100644 --- a/src/hashtables.h +++ b/src/hashtables.h @@ -4,10 +4,10 @@ #define HASHTABLES_H #include "messages.h" -#include "crypto/siphash.h" #include <linux/hashtable.h> #include <linux/mutex.h> +#include <linux/siphash.h> struct wireguard_peer; @@ -6,7 +6,6 @@ #include "packets.h" #include "crypto/chacha20poly1305.h" #include "crypto/blake2s.h" -#include "crypto/siphash.h" #include "crypto/curve25519.h" #include <linux/version.h> @@ -19,7 +18,7 @@ static int __init mod_init(void) int ret; #ifdef DEBUG - if (!routing_table_selftest() || !packet_counter_selftest() || !curve25519_selftest() || !chacha20poly1305_selftest() || !blake2s_selftest() || !siphash_selftest()) + if (!routing_table_selftest() || !packet_counter_selftest() || !curve25519_selftest() || !chacha20poly1305_selftest() || !blake2s_selftest()) return -ENOTRECOVERABLE; #endif chacha20poly1305_init(); @@ -10,9 +10,7 @@ #include <linux/netfilter.h> #include <linux/spinlock.h> #include <linux/kref.h> -#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0) #include <net/dst_cache.h> -#endif struct wireguard_device; |