summaryrefslogtreecommitdiffhomepage
path: root/src
diff options
context:
space:
mode:
authorJason A. Donenfeld <Jason@zx2c4.com>2016-07-05 17:14:59 +0200
committerJason A. Donenfeld <Jason@zx2c4.com>2016-07-22 16:48:51 +0200
commitd23fe2901d9877a60cd219d0d49df01aa07f5940 (patch)
treee68aaa32ed988ac13b504878ef55898aafcab519 /src
parent82831962b88b03ac4ced09e62e2822dfe482bb5b (diff)
index hashtable: run random indices through siphash
If /dev/urandom is a NOBUS RNG backdoor, like the infamous Dual_EC_DRBG, then sending 4 bytes of raw RNG output over the wire directly might not be such a great idea. This mitigates that vulnerability by, at some point before the indices are generated, creating a random secret. Then, for each session index, we simply run SipHash24 on an incrementing counter. This is probably overkill because /dev/urandom is probably not a backdoored RNG, and itself already uses several rounds of SHA-1 for mixing. If the kernel RNG is backdoored, there may very well be bigger problems at play. Four bytes is also not so many bytes. Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
Diffstat (limited to 'src')
-rw-r--r--src/hashtables.c6
-rw-r--r--src/hashtables.h2
2 files changed, 7 insertions, 1 deletions
diff --git a/src/hashtables.c b/src/hashtables.c
index 8911625..db7c23b 100644
--- a/src/hashtables.c
+++ b/src/hashtables.c
@@ -60,6 +60,8 @@ static inline struct hlist_head *index_bucket(struct index_hashtable *table, con
void index_hashtable_init(struct index_hashtable *table)
{
+ get_random_bytes(table->key, SIPHASH24_KEY_LEN);
+ atomic64_set(&table->counter, 0);
hash_init(table->hashtable);
spin_lock_init(&table->lock);
}
@@ -67,6 +69,7 @@ void index_hashtable_init(struct index_hashtable *table)
__le32 index_hashtable_insert(struct index_hashtable *table, struct index_hashtable_entry *entry)
{
struct index_hashtable_entry *existing_entry;
+ uint64_t counter;
spin_lock(&table->lock);
hlist_del_init_rcu(&entry->index_hash);
@@ -76,7 +79,8 @@ __le32 index_hashtable_insert(struct index_hashtable *table, struct index_hashta
search_unused_slot:
/* First we try to find an unused slot, randomly, while unlocked. */
- get_random_bytes(&entry->index, sizeof(entry->index));
+ counter = atomic64_inc_return(&table->counter);
+ entry->index = (__force __le32)siphash24((uint8_t *)&counter, sizeof(counter), table->key);
hlist_for_each_entry_rcu(existing_entry, index_bucket(table, entry->index), index_hash) {
if (existing_entry->index == entry->index)
goto search_unused_slot; /* If it's already in use, we continue searching. */
diff --git a/src/hashtables.h b/src/hashtables.h
index 495a6f0..ed9506b 100644
--- a/src/hashtables.h
+++ b/src/hashtables.h
@@ -20,6 +20,8 @@ struct wireguard_peer *pubkey_hashtable_lookup(struct pubkey_hashtable *table, c
struct index_hashtable {
DECLARE_HASHTABLE(hashtable, 10);
+ uint8_t key[SIPHASH24_KEY_LEN];
+ atomic64_t counter;
spinlock_t lock;
};
struct index_hashtable_entry;