summaryrefslogtreecommitdiffhomepage
path: root/src/hashtables.c
diff options
context:
space:
mode:
authorJason A. Donenfeld <Jason@zx2c4.com>2018-09-28 03:05:22 +0200
committerJason A. Donenfeld <Jason@zx2c4.com>2018-10-02 03:41:49 +0200
commita8af31524ad8fb036b03a67823d8372e509b41d9 (patch)
treec89c0b1bf07deda487ac37717969743777236771 /src/hashtables.c
parent09e85633a7c689f8e605f5ae544b83b7940a1437 (diff)
global: prefix all functions with wg_
I understand why this must be done, though I'm not so happy about having to do it. In some places, it puts us over 80 chars and we have to break lines up in further ugly ways. And in general, I think this makes things harder to read. Yet another thing we must do to please upstream. Maybe this can be replaced in the future by some kind of automatic module namespacing logic in the linker, or even combined with LTO and aggressive symbol stripping. Suggested-by: Andrew Lunn <andrew@lunn.ch> Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
Diffstat (limited to 'src/hashtables.c')
-rw-r--r--src/hashtables.c40
1 files changed, 20 insertions, 20 deletions
diff --git a/src/hashtables.c b/src/hashtables.c
index afe17e9..6e5518b 100644
--- a/src/hashtables.c
+++ b/src/hashtables.c
@@ -19,15 +19,15 @@ static struct hlist_head *pubkey_bucket(struct pubkey_hashtable *table,
(HASH_SIZE(table->hashtable) - 1)];
}
-void pubkey_hashtable_init(struct pubkey_hashtable *table)
+void wg_pubkey_hashtable_init(struct pubkey_hashtable *table)
{
get_random_bytes(&table->key, sizeof(table->key));
hash_init(table->hashtable);
mutex_init(&table->lock);
}
-void pubkey_hashtable_add(struct pubkey_hashtable *table,
- struct wireguard_peer *peer)
+void wg_pubkey_hashtable_add(struct pubkey_hashtable *table,
+ struct wireguard_peer *peer)
{
mutex_lock(&table->lock);
hlist_add_head_rcu(&peer->pubkey_hash,
@@ -35,8 +35,8 @@ void pubkey_hashtable_add(struct pubkey_hashtable *table,
mutex_unlock(&table->lock);
}
-void pubkey_hashtable_remove(struct pubkey_hashtable *table,
- struct wireguard_peer *peer)
+void wg_pubkey_hashtable_remove(struct pubkey_hashtable *table,
+ struct wireguard_peer *peer)
{
mutex_lock(&table->lock);
hlist_del_init_rcu(&peer->pubkey_hash);
@@ -45,8 +45,8 @@ void pubkey_hashtable_remove(struct pubkey_hashtable *table,
/* Returns a strong reference to a peer */
struct wireguard_peer *
-pubkey_hashtable_lookup(struct pubkey_hashtable *table,
- const u8 pubkey[NOISE_PUBLIC_KEY_LEN])
+wg_pubkey_hashtable_lookup(struct pubkey_hashtable *table,
+ const u8 pubkey[NOISE_PUBLIC_KEY_LEN])
{
struct wireguard_peer *iter_peer, *peer = NULL;
@@ -59,7 +59,7 @@ pubkey_hashtable_lookup(struct pubkey_hashtable *table,
break;
}
}
- peer = peer_get_maybe_zero(peer);
+ peer = wg_peer_get_maybe_zero(peer);
rcu_read_unlock_bh();
return peer;
}
@@ -74,7 +74,7 @@ static struct hlist_head *index_bucket(struct index_hashtable *table,
(HASH_SIZE(table->hashtable) - 1)];
}
-void index_hashtable_init(struct index_hashtable *table)
+void wg_index_hashtable_init(struct index_hashtable *table)
{
hash_init(table->hashtable);
spin_lock_init(&table->lock);
@@ -104,8 +104,8 @@ void index_hashtable_init(struct index_hashtable *table)
* is another thing to consider moving forward.
*/
-__le32 index_hashtable_insert(struct index_hashtable *table,
- struct index_hashtable_entry *entry)
+__le32 wg_index_hashtable_insert(struct index_hashtable *table,
+ struct index_hashtable_entry *entry)
{
struct index_hashtable_entry *existing_entry;
@@ -151,9 +151,9 @@ search_unused_slot:
return entry->index;
}
-bool index_hashtable_replace(struct index_hashtable *table,
- struct index_hashtable_entry *old,
- struct index_hashtable_entry *new)
+bool wg_index_hashtable_replace(struct index_hashtable *table,
+ struct index_hashtable_entry *old,
+ struct index_hashtable_entry *new)
{
if (unlikely(hlist_unhashed(&old->index_hash)))
return false;
@@ -172,8 +172,8 @@ bool index_hashtable_replace(struct index_hashtable *table,
return true;
}
-void index_hashtable_remove(struct index_hashtable *table,
- struct index_hashtable_entry *entry)
+void wg_index_hashtable_remove(struct index_hashtable *table,
+ struct index_hashtable_entry *entry)
{
spin_lock_bh(&table->lock);
hlist_del_init_rcu(&entry->index_hash);
@@ -182,9 +182,9 @@ void index_hashtable_remove(struct index_hashtable *table,
/* Returns a strong reference to a entry->peer */
struct index_hashtable_entry *
-index_hashtable_lookup(struct index_hashtable *table,
- const enum index_hashtable_type type_mask,
- const __le32 index, struct wireguard_peer **peer)
+wg_index_hashtable_lookup(struct index_hashtable *table,
+ const enum index_hashtable_type type_mask,
+ const __le32 index, struct wireguard_peer **peer)
{
struct index_hashtable_entry *iter_entry, *entry = NULL;
@@ -198,7 +198,7 @@ index_hashtable_lookup(struct index_hashtable *table,
}
}
if (likely(entry)) {
- entry->peer = peer_get_maybe_zero(entry->peer);
+ entry->peer = wg_peer_get_maybe_zero(entry->peer);
if (likely(entry->peer))
*peer = entry->peer;
else