summaryrefslogtreecommitdiffhomepage
path: root/src/hashtables.c
diff options
context:
space:
mode:
authorJason A. Donenfeld <Jason@zx2c4.com>2016-12-16 06:56:39 +0100
committerJason A. Donenfeld <Jason@zx2c4.com>2016-12-16 06:56:39 +0100
commit6033cf88a7971e27ba95233cd82f36be55dfc769 (patch)
treec30a75a88076b403a8cfc44ec299ef2a884d728d /src/hashtables.c
parent13aa2b4d464c6c606c31b378ea76c23e73f23b2d (diff)
hashtables: use counter and int to ensure forward progress
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
Diffstat (limited to 'src/hashtables.c')
-rw-r--r--src/hashtables.c11
1 files changed, 2 insertions, 9 deletions
diff --git a/src/hashtables.c b/src/hashtables.c
index 45c9737..534ad55 100644
--- a/src/hashtables.c
+++ b/src/hashtables.c
@@ -65,16 +65,10 @@ void index_hashtable_init(struct index_hashtable *table)
spin_lock_init(&table->lock);
}
-#if BITS_PER_LONG == 64
-#define get_random_u64() get_random_long()
-#else
-#define get_random_u64() (((u64)get_random_int() << 32) | get_random_int())
-#endif
-
__le32 index_hashtable_insert(struct index_hashtable *table, struct index_hashtable_entry *entry)
{
struct index_hashtable_entry *existing_entry;
- u64 rand;
+ u32 counter = get_random_int();
spin_lock(&table->lock);
hlist_del_init_rcu(&entry->index_hash);
@@ -84,8 +78,7 @@ __le32 index_hashtable_insert(struct index_hashtable *table, struct index_hashta
search_unused_slot:
/* First we try to find an unused slot, randomly, while unlocked. */
- rand = get_random_u64();
- entry->index = (__force __le32)siphash_1u64(rand, table->key);
+ entry->index = (__force __le32)siphash_2u32(get_random_int(), counter++, table->key);
hlist_for_each_entry_rcu(existing_entry, index_bucket(table, entry->index), index_hash) {
if (existing_entry->index == entry->index)
goto search_unused_slot; /* If it's already in use, we continue searching. */