summaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorJason A. Donenfeld <Jason@zx2c4.com>2016-12-16 06:56:39 +0100
committerJason A. Donenfeld <Jason@zx2c4.com>2016-12-16 06:56:39 +0100
commit6033cf88a7971e27ba95233cd82f36be55dfc769 (patch)
treec30a75a88076b403a8cfc44ec299ef2a884d728d
parent13aa2b4d464c6c606c31b378ea76c23e73f23b2d (diff)
hashtables: use counter and int to ensure forward progress
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
-rw-r--r--src/compat.h8
-rw-r--r--src/hashtables.c11
2 files changed, 2 insertions, 17 deletions
diff --git a/src/compat.h b/src/compat.h
index 5efb4e4..eb4f31b 100644
--- a/src/compat.h
+++ b/src/compat.h
@@ -25,14 +25,6 @@
#define net_dbg_ratelimited(fmt, ...) do { if (0) no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); } while (0)
#endif
-#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)
-#include <linux/security.h>
-#ifdef GRSECURITY_VERSION
-#include <linux/random.h>
-#endif
-#define get_random_long() (((u64)get_random_int() << 32) | get_random_int())
-#endif
-
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0)
#define RCU_LOCKDEP_WARN(cond, message) rcu_lockdep_assert(!(cond), message)
#endif
diff --git a/src/hashtables.c b/src/hashtables.c
index 45c9737..534ad55 100644
--- a/src/hashtables.c
+++ b/src/hashtables.c
@@ -65,16 +65,10 @@ void index_hashtable_init(struct index_hashtable *table)
spin_lock_init(&table->lock);
}
-#if BITS_PER_LONG == 64
-#define get_random_u64() get_random_long()
-#else
-#define get_random_u64() (((u64)get_random_int() << 32) | get_random_int())
-#endif
-
__le32 index_hashtable_insert(struct index_hashtable *table, struct index_hashtable_entry *entry)
{
struct index_hashtable_entry *existing_entry;
- u64 rand;
+ u32 counter = get_random_int();
spin_lock(&table->lock);
hlist_del_init_rcu(&entry->index_hash);
@@ -84,8 +78,7 @@ __le32 index_hashtable_insert(struct index_hashtable *table, struct index_hashta
search_unused_slot:
/* First we try to find an unused slot, randomly, while unlocked. */
- rand = get_random_u64();
- entry->index = (__force __le32)siphash_1u64(rand, table->key);
+ entry->index = (__force __le32)siphash_2u32(get_random_int(), counter++, table->key);
hlist_for_each_entry_rcu(existing_entry, index_bucket(table, entry->index), index_hash) {
if (existing_entry->index == entry->index)
goto search_unused_slot; /* If it's already in use, we continue searching. */