summaryrefslogtreecommitdiffhomepage
path: root/src/hashtables.c
diff options
context:
space:
mode:
authorJason A. Donenfeld <Jason@zx2c4.com>2016-12-12 06:01:52 +0100
committerJason A. Donenfeld <Jason@zx2c4.com>2016-12-12 06:01:52 +0100
commit13e1a47b6c309885f547459a223603ca91055365 (patch)
tree35b934b318629892d9f8347aabb3ba081d5ef200 /src/hashtables.c
parent26e3b9b54fd2cb2dd5b7ca96c762e90d0bec65aa (diff)
hashtables: ensure we get 64-bits of randomness
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
Diffstat (limited to 'src/hashtables.c')
-rw-r--r--src/hashtables.c8
1 files changed, 7 insertions, 1 deletions
diff --git a/src/hashtables.c b/src/hashtables.c
index 7b9fb7b..b6161bb 100644
--- a/src/hashtables.c
+++ b/src/hashtables.c
@@ -65,6 +65,12 @@ void index_hashtable_init(struct index_hashtable *table)
spin_lock_init(&table->lock);
}
+#if BITS_PER_LONG == 64
+#define get_random_u64() get_random_long()
+#else
+#define get_random_u64() (((u64)get_random_int() << 32) | get_random_int())
+#endif
+
__le32 index_hashtable_insert(struct index_hashtable *table, struct index_hashtable_entry *entry)
{
struct index_hashtable_entry *existing_entry;
@@ -78,7 +84,7 @@ __le32 index_hashtable_insert(struct index_hashtable *table, struct index_hashta
search_unused_slot:
/* First we try to find an unused slot, randomly, while unlocked. */
- rand = get_random_long();
+ rand = get_random_u64();
entry->index = (__force __le32)siphash24((u8 *)&rand, sizeof(rand), table->key);
hlist_for_each_entry_rcu(existing_entry, index_bucket(table, entry->index), index_hash) {
if (existing_entry->index == entry->index)