summaryrefslogtreecommitdiffhomepage
path: root/src/hashtables.c
diff options
context:
space:
mode:
authorJason A. Donenfeld <Jason@zx2c4.com>2016-08-21 20:13:17 +0200
committerJason A. Donenfeld <Jason@zx2c4.com>2016-08-22 01:47:28 +0200
commit023946ff3bcf0817f23467a1d900e0f7bcecd1a5 (patch)
treefc2942552ac760ae6db2e3507e5ecc7bca3f8585 /src/hashtables.c
parentc5c984b37db39ecbb5e7547f040b6d2b9e84dee1 (diff)
hashtables: use rdrand() instead of counter
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
Diffstat (limited to 'src/hashtables.c')
-rw-r--r--src/hashtables.c7
1 files changed, 3 insertions, 4 deletions
diff --git a/src/hashtables.c b/src/hashtables.c
index 965605b..2fb4322 100644
--- a/src/hashtables.c
+++ b/src/hashtables.c
@@ -61,7 +61,6 @@ static inline struct hlist_head *index_bucket(struct index_hashtable *table, con
void index_hashtable_init(struct index_hashtable *table)
{
get_random_bytes(table->key, SIPHASH24_KEY_LEN);
- atomic64_set(&table->counter, 0);
hash_init(table->hashtable);
spin_lock_init(&table->lock);
}
@@ -69,7 +68,7 @@ void index_hashtable_init(struct index_hashtable *table)
__le32 index_hashtable_insert(struct index_hashtable *table, struct index_hashtable_entry *entry)
{
struct index_hashtable_entry *existing_entry;
- uint64_t counter;
+ uint64_t rand;
spin_lock(&table->lock);
hlist_del_init_rcu(&entry->index_hash);
@@ -79,8 +78,8 @@ __le32 index_hashtable_insert(struct index_hashtable *table, struct index_hashta
search_unused_slot:
/* First we try to find an unused slot, randomly, while unlocked. */
- counter = atomic64_inc_return(&table->counter);
- entry->index = (__force __le32)siphash24((uint8_t *)&counter, sizeof(counter), table->key);
+ rand = get_random_long();
+ entry->index = (__force __le32)siphash24((uint8_t *)&rand, sizeof(rand), table->key);
hlist_for_each_entry_rcu(existing_entry, index_bucket(table, entry->index), index_hash) {
if (existing_entry->index == entry->index)
goto search_unused_slot; /* If it's already in use, we continue searching. */