summaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorJason A. Donenfeld <Jason@zx2c4.com>2017-03-16 15:28:16 +0100
committerJason A. Donenfeld <Jason@zx2c4.com>2017-03-19 15:34:46 +0100
commitb42320fdd8354b7fee47b52b68d730911e57e534 (patch)
tree693f32855fe14320353b934b15c1b757bb0f62f4
parent0cd737b5c78f6ec118e6aaea249ba22ced6cfd53 (diff)
hashtables: get_random_int is now more secure, so expose directly
On 4.11, get_random_u32 now either uses chacha or rdrand, rather than the horrible former MD5 construction, so we feel more comfortable exposing RNG output directly. On older kernels, we fall back to something a bit disgusting. Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
-rw-r--r--src/compat/compat.h19
-rw-r--r--src/hashtables.c4
-rw-r--r--src/hashtables.h1
3 files changed, 20 insertions, 4 deletions
diff --git a/src/compat/compat.h b/src/compat/compat.h
index 141cad7..4e6010f 100644
--- a/src/compat/compat.h
+++ b/src/compat/compat.h
@@ -171,6 +171,25 @@ static inline void skb_reset_tc(struct sk_buff *skb)
}
#endif
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
+#include <linux/siphash.h>
+static inline u32 get_random_u32(void)
+{
+ static siphash_key_t key;
+ static u32 counter = 0;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)
+ static bool has_seeded = false;
+ if (unlikely(!has_seeded)) {
+ get_random_bytes(&key, sizeof(key));
+ has_seeded = true;
+ }
+#else
+ get_random_once(&key, sizeof(key));
+#endif
+ return siphash_2u32(counter++, get_random_int(), &key);
+}
+#endif
+
/* https://lkml.org/lkml/2015/6/12/415 */
#include <linux/netdevice.h>
static inline struct net_device *netdev_pub(void *dev)
diff --git a/src/hashtables.c b/src/hashtables.c
index a412265..4cb8441 100644
--- a/src/hashtables.c
+++ b/src/hashtables.c
@@ -57,7 +57,6 @@ static inline struct hlist_head *index_bucket(struct index_hashtable *table, con
void index_hashtable_init(struct index_hashtable *table)
{
- get_random_bytes(&table->key, sizeof(table->key));
hash_init(table->hashtable);
spin_lock_init(&table->lock);
}
@@ -65,7 +64,6 @@ void index_hashtable_init(struct index_hashtable *table)
__le32 index_hashtable_insert(struct index_hashtable *table, struct index_hashtable_entry *entry)
{
struct index_hashtable_entry *existing_entry;
- u32 counter = get_random_int();
spin_lock(&table->lock);
hlist_del_init_rcu(&entry->index_hash);
@@ -75,7 +73,7 @@ __le32 index_hashtable_insert(struct index_hashtable *table, struct index_hashta
search_unused_slot:
/* First we try to find an unused slot, randomly, while unlocked. */
- entry->index = (__force __le32)siphash_2u32(get_random_int(), counter++, &table->key);
+ entry->index = (__force __le32)get_random_u32();
hlist_for_each_entry_rcu(existing_entry, index_bucket(table, entry->index), index_hash) {
if (existing_entry->index == entry->index)
goto search_unused_slot; /* If it's already in use, we continue searching. */
diff --git a/src/hashtables.h b/src/hashtables.h
index c66780a..9fa47d5 100644
--- a/src/hashtables.h
+++ b/src/hashtables.h
@@ -24,7 +24,6 @@ struct wireguard_peer *pubkey_hashtable_lookup(struct pubkey_hashtable *table, c
struct index_hashtable {
DECLARE_HASHTABLE(hashtable, 10);
- siphash_key_t key;
spinlock_t lock;
};