summaryrefslogtreecommitdiffhomepage
path: root/src/hashtables.c
diff options
context:
space:
mode:
authorJason A. Donenfeld <Jason@zx2c4.com>2017-05-24 19:55:52 +0200
committerJason A. Donenfeld <Jason@zx2c4.com>2017-05-30 18:07:28 +0200
commitf1aaa8a3a5fe783dcb9008fa722bac755751a719 (patch)
tree135bbdf36401524058bdb2e397a24378c954ab9d /src/hashtables.c
parent672b76a098b99102a46712b1d982f550f97806f2 (diff)
style: spaces after for loops
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
Diffstat (limited to 'src/hashtables.c')
-rw-r--r--src/hashtables.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/src/hashtables.c b/src/hashtables.c
index efd7111..db97f7e 100644
--- a/src/hashtables.c
+++ b/src/hashtables.c
@@ -37,7 +37,7 @@ struct wireguard_peer *pubkey_hashtable_lookup(struct pubkey_hashtable *table, c
{
struct wireguard_peer *iter_peer, *peer = NULL;
rcu_read_lock_bh();
- hlist_for_each_entry_rcu_bh(iter_peer, pubkey_bucket(table, pubkey), pubkey_hash) {
+ hlist_for_each_entry_rcu_bh (iter_peer, pubkey_bucket(table, pubkey), pubkey_hash) {
if (!memcmp(pubkey, iter_peer->handshake.remote_static, NOISE_PUBLIC_KEY_LEN)) {
peer = iter_peer;
break;
@@ -74,7 +74,7 @@ __le32 index_hashtable_insert(struct index_hashtable *table, struct index_hashta
search_unused_slot:
/* First we try to find an unused slot, randomly, while unlocked. */
entry->index = (__force __le32)get_random_u32();
- hlist_for_each_entry_rcu_bh(existing_entry, index_bucket(table, entry->index), index_hash) {
+ hlist_for_each_entry_rcu_bh (existing_entry, index_bucket(table, entry->index), index_hash) {
if (existing_entry->index == entry->index)
goto search_unused_slot; /* If it's already in use, we continue searching. */
}
@@ -82,7 +82,7 @@ search_unused_slot:
/* Once we've found an unused slot, we lock it, and then double-check
* that nobody else stole it from us. */
spin_lock_bh(&table->lock);
- hlist_for_each_entry_rcu_bh(existing_entry, index_bucket(table, entry->index), index_hash) {
+ hlist_for_each_entry_rcu_bh (existing_entry, index_bucket(table, entry->index), index_hash) {
if (existing_entry->index == entry->index) {
spin_unlock_bh(&table->lock);
goto search_unused_slot; /* If it was stolen, we start over. */
@@ -118,7 +118,7 @@ struct index_hashtable_entry *index_hashtable_lookup(struct index_hashtable *tab
{
struct index_hashtable_entry *iter_entry, *entry = NULL;
rcu_read_lock_bh();
- hlist_for_each_entry_rcu_bh(iter_entry, index_bucket(table, index), index_hash) {
+ hlist_for_each_entry_rcu_bh (iter_entry, index_bucket(table, index), index_hash) {
if (iter_entry->index == index && (iter_entry->type & type_mask)) {
entry = iter_entry;
break;