summaryrefslogtreecommitdiffhomepage
path: root/src/hashtables.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/hashtables.c')
-rw-r--r--src/hashtables.c7
1 files changed, 3 insertions, 4 deletions
diff --git a/src/hashtables.c b/src/hashtables.c
index 965605b..2fb4322 100644
--- a/src/hashtables.c
+++ b/src/hashtables.c
@@ -61,7 +61,6 @@ static inline struct hlist_head *index_bucket(struct index_hashtable *table, con
void index_hashtable_init(struct index_hashtable *table)
{
get_random_bytes(table->key, SIPHASH24_KEY_LEN);
- atomic64_set(&table->counter, 0);
hash_init(table->hashtable);
spin_lock_init(&table->lock);
}
@@ -69,7 +68,7 @@ void index_hashtable_init(struct index_hashtable *table)
__le32 index_hashtable_insert(struct index_hashtable *table, struct index_hashtable_entry *entry)
{
struct index_hashtable_entry *existing_entry;
- uint64_t counter;
+ uint64_t rand;
spin_lock(&table->lock);
hlist_del_init_rcu(&entry->index_hash);
@@ -79,8 +78,8 @@ __le32 index_hashtable_insert(struct index_hashtable *table, struct index_hashta
search_unused_slot:
/* First we try to find an unused slot, randomly, while unlocked. */
- counter = atomic64_inc_return(&table->counter);
- entry->index = (__force __le32)siphash24((uint8_t *)&counter, sizeof(counter), table->key);
+ rand = get_random_long();
+ entry->index = (__force __le32)siphash24((uint8_t *)&rand, sizeof(rand), table->key);
hlist_for_each_entry_rcu(existing_entry, index_bucket(table, entry->index), index_hash) {
if (existing_entry->index == entry->index)
goto search_unused_slot; /* If it's already in use, we continue searching. */