diff options
author | Jason A. Donenfeld <Jason@zx2c4.com> | 2016-11-29 23:28:14 +0100 |
---|---|---|
committer | Jason A. Donenfeld <Jason@zx2c4.com> | 2016-11-29 23:29:22 +0100 |
commit | 73a09df16fa6e7f303777dd5a55f3b0decc59c78 (patch) | |
tree | 789bf4070dadd0d63bca4af233b2d2242c8aa58c | |
parent | a76963728a08b28a9fb6ff03c88416d8da00419c (diff) |
hashtable: use random number each time
Otherwise timing information might leak information about prior index
entries. We also switch back to an explicit uint64_t because siphash
needs something at least that size.
(This partially reverts 1550e9ba597946c88e3e7e3e8dcf33c13dd76e5b.
Willy's suggestion was wrong.)
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
-rw-r--r-- | src/hashtables.c | 4 |
1 files changed, 2 insertions, 2 deletions
diff --git a/src/hashtables.c b/src/hashtables.c index f0d8769..f30f6cf 100644 --- a/src/hashtables.c +++ b/src/hashtables.c @@ -68,7 +68,7 @@ void index_hashtable_init(struct index_hashtable *table) __le32 index_hashtable_insert(struct index_hashtable *table, struct index_hashtable_entry *entry) { struct index_hashtable_entry *existing_entry; - unsigned long rand = get_random_long(); + uint64_t rand; spin_lock(&table->lock); hlist_del_init_rcu(&entry->index_hash); @@ -78,7 +78,7 @@ __le32 index_hashtable_insert(struct index_hashtable *table, struct index_hashta search_unused_slot: /* First we try to find an unused slot, randomly, while unlocked. */ - ++rand; + rand = get_random_long(); entry->index = (__force __le32)siphash24((uint8_t *)&rand, sizeof(rand), table->key); hlist_for_each_entry_rcu(existing_entry, index_bucket(table, entry->index), index_hash) { if (existing_entry->index == entry->index) |