summaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--src/allowedips.c41
-rw-r--r--src/allowedips.h41
-rw-r--r--src/cookie.c41
-rw-r--r--src/cookie.h30
-rw-r--r--src/device.c73
-rw-r--r--src/device.h4
-rw-r--r--src/hashtables.c40
-rw-r--r--src/hashtables.h35
-rw-r--r--src/main.c16
-rw-r--r--src/netlink.c68
-rw-r--r--src/netlink.h4
-rw-r--r--src/noise.c97
-rw-r--r--src/noise.h51
-rw-r--r--src/peer.c63
-rw-r--r--src/peer.h20
-rw-r--r--src/queueing.c10
-rw-r--r--src/queueing.h76
-rw-r--r--src/ratelimiter.c6
-rw-r--r--src/ratelimiter.h8
-rw-r--r--src/receive.c125
-rw-r--r--src/selftest/allowedips.h60
-rw-r--r--src/selftest/counter.h2
-rw-r--r--src/selftest/ratelimiter.h33
-rw-r--r--src/send.c133
-rw-r--r--src/socket.c46
-rw-r--r--src/socket.h36
-rw-r--r--src/timers.c50
-rw-r--r--src/timers.h23
28 files changed, 627 insertions, 605 deletions
diff --git a/src/allowedips.c b/src/allowedips.c
index f8f026c..b7dc387 100644
--- a/src/allowedips.c
+++ b/src/allowedips.c
@@ -212,7 +212,7 @@ lookup(struct allowedips_node __rcu *root, u8 bits, const void *be_ip)
retry:
node = find_node(rcu_dereference_bh(root), bits, ip);
if (node) {
- peer = peer_get_maybe_zero(rcu_dereference_bh(node->peer));
+ peer = wg_peer_get_maybe_zero(rcu_dereference_bh(node->peer));
if (!peer)
goto retry;
}
@@ -312,13 +312,13 @@ static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *be_key,
return 0;
}
-void allowedips_init(struct allowedips *table)
+void wg_allowedips_init(struct allowedips *table)
{
table->root4 = table->root6 = NULL;
table->seq = 1;
}
-void allowedips_free(struct allowedips *table, struct mutex *lock)
+void wg_allowedips_free(struct allowedips *table, struct mutex *lock)
{
struct allowedips_node __rcu *old4 = table->root4, *old6 = table->root6;
++table->seq;
@@ -332,35 +332,36 @@ void allowedips_free(struct allowedips *table, struct mutex *lock)
lockdep_is_held(lock))->rcu, root_free_rcu);
}
-int allowedips_insert_v4(struct allowedips *table, const struct in_addr *ip,
- u8 cidr, struct wireguard_peer *peer,
- struct mutex *lock)
+int wg_allowedips_insert_v4(struct allowedips *table, const struct in_addr *ip,
+ u8 cidr, struct wireguard_peer *peer,
+ struct mutex *lock)
{
++table->seq;
return add(&table->root4, 32, (const u8 *)ip, cidr, peer, lock);
}
-int allowedips_insert_v6(struct allowedips *table, const struct in6_addr *ip,
- u8 cidr, struct wireguard_peer *peer,
- struct mutex *lock)
+int wg_allowedips_insert_v6(struct allowedips *table, const struct in6_addr *ip,
+ u8 cidr, struct wireguard_peer *peer,
+ struct mutex *lock)
{
++table->seq;
return add(&table->root6, 128, (const u8 *)ip, cidr, peer, lock);
}
-void allowedips_remove_by_peer(struct allowedips *table,
- struct wireguard_peer *peer, struct mutex *lock)
+void wg_allowedips_remove_by_peer(struct allowedips *table,
+ struct wireguard_peer *peer,
+ struct mutex *lock)
{
++table->seq;
walk_remove_by_peer(&table->root4, peer, lock);
walk_remove_by_peer(&table->root6, peer, lock);
}
-int allowedips_walk_by_peer(struct allowedips *table,
- struct allowedips_cursor *cursor,
- struct wireguard_peer *peer,
- int (*func)(void *ctx, const u8 *ip, u8 cidr, int family),
- void *ctx, struct mutex *lock)
+int wg_allowedips_walk_by_peer(struct allowedips *table,
+ struct allowedips_cursor *cursor,
+ struct wireguard_peer *peer,
+ int (*func)(void *ctx, const u8 *ip, u8 cidr, int family),
+ void *ctx, struct mutex *lock)
{
int ret;
@@ -380,8 +381,8 @@ int allowedips_walk_by_peer(struct allowedips *table,
}
/* Returns a strong reference to a peer */
-struct wireguard_peer *allowedips_lookup_dst(struct allowedips *table,
- struct sk_buff *skb)
+struct wireguard_peer *wg_allowedips_lookup_dst(struct allowedips *table,
+ struct sk_buff *skb)
{
if (skb->protocol == htons(ETH_P_IP))
return lookup(table->root4, 32, &ip_hdr(skb)->daddr);
@@ -391,8 +392,8 @@ struct wireguard_peer *allowedips_lookup_dst(struct allowedips *table,
}
/* Returns a strong reference to a peer */
-struct wireguard_peer *allowedips_lookup_src(struct allowedips *table,
- struct sk_buff *skb)
+struct wireguard_peer *wg_allowedips_lookup_src(struct allowedips *table,
+ struct sk_buff *skb)
{
if (skb->protocol == htons(ETH_P_IP))
return lookup(table->root4, 32, &ip_hdr(skb)->saddr);
diff --git a/src/allowedips.h b/src/allowedips.h
index ace1542..c34e216 100644
--- a/src/allowedips.h
+++ b/src/allowedips.h
@@ -26,30 +26,31 @@ struct allowedips_cursor {
bool second_half;
};
-void allowedips_init(struct allowedips *table);
-void allowedips_free(struct allowedips *table, struct mutex *mutex);
-int allowedips_insert_v4(struct allowedips *table, const struct in_addr *ip,
- u8 cidr, struct wireguard_peer *peer,
- struct mutex *lock);
-int allowedips_insert_v6(struct allowedips *table, const struct in6_addr *ip,
- u8 cidr, struct wireguard_peer *peer,
- struct mutex *lock);
-void allowedips_remove_by_peer(struct allowedips *table,
- struct wireguard_peer *peer, struct mutex *lock);
-int allowedips_walk_by_peer(struct allowedips *table,
- struct allowedips_cursor *cursor,
- struct wireguard_peer *peer,
- int (*func)(void *ctx, const u8 *ip, u8 cidr, int family),
- void *ctx, struct mutex *lock);
+void wg_allowedips_init(struct allowedips *table);
+void wg_allowedips_free(struct allowedips *table, struct mutex *mutex);
+int wg_allowedips_insert_v4(struct allowedips *table, const struct in_addr *ip,
+ u8 cidr, struct wireguard_peer *peer,
+ struct mutex *lock);
+int wg_allowedips_insert_v6(struct allowedips *table, const struct in6_addr *ip,
+ u8 cidr, struct wireguard_peer *peer,
+ struct mutex *lock);
+void wg_allowedips_remove_by_peer(struct allowedips *table,
+ struct wireguard_peer *peer,
+ struct mutex *lock);
+int wg_allowedips_walk_by_peer(struct allowedips *table,
+ struct allowedips_cursor *cursor,
+ struct wireguard_peer *peer,
+ int (*func)(void *ctx, const u8 *ip, u8 cidr, int family),
+ void *ctx, struct mutex *lock);
/* These return a strong reference to a peer: */
-struct wireguard_peer *allowedips_lookup_dst(struct allowedips *table,
- struct sk_buff *skb);
-struct wireguard_peer *allowedips_lookup_src(struct allowedips *table,
- struct sk_buff *skb);
+struct wireguard_peer *wg_allowedips_lookup_dst(struct allowedips *table,
+ struct sk_buff *skb);
+struct wireguard_peer *wg_allowedips_lookup_src(struct allowedips *table,
+ struct sk_buff *skb);
#ifdef DEBUG
-bool allowedips_selftest(void);
+bool wg_allowedips_selftest(void);
#endif
#endif /* _WG_ALLOWEDIPS_H */
diff --git a/src/cookie.c b/src/cookie.c
index afd82a0..3ac05e6 100644
--- a/src/cookie.c
+++ b/src/cookie.c
@@ -16,8 +16,8 @@
#include <net/ipv6.h>
#include <crypto/algapi.h>
-void cookie_checker_init(struct cookie_checker *checker,
- struct wireguard_device *wg)
+void wg_cookie_checker_init(struct cookie_checker *checker,
+ struct wireguard_device *wg)
{
init_rwsem(&checker->secret_lock);
checker->secret_birthdate = ktime_get_boot_fast_ns();
@@ -42,7 +42,7 @@ static void precompute_key(u8 key[NOISE_SYMMETRIC_KEY_LEN],
}
/* Must hold peer->handshake.static_identity->lock */
-void cookie_checker_precompute_device_keys(struct cookie_checker *checker)
+void wg_cookie_checker_precompute_device_keys(struct cookie_checker *checker)
{
if (likely(checker->device->static_identity.has_identity)) {
precompute_key(checker->cookie_encryption_key,
@@ -58,7 +58,7 @@ void cookie_checker_precompute_device_keys(struct cookie_checker *checker)
}
}
-void cookie_checker_precompute_peer_keys(struct wireguard_peer *peer)
+void wg_cookie_checker_precompute_peer_keys(struct wireguard_peer *peer)
{
precompute_key(peer->latest_cookie.cookie_decryption_key,
peer->handshake.remote_static, cookie_key_label);
@@ -66,7 +66,7 @@ void cookie_checker_precompute_peer_keys(struct wireguard_peer *peer)
peer->handshake.remote_static, mac1_key_label);
}
-void cookie_init(struct cookie *cookie)
+void wg_cookie_init(struct cookie *cookie)
{
memset(cookie, 0, sizeof(*cookie));
init_rwsem(&cookie->lock);
@@ -93,7 +93,8 @@ static void make_cookie(u8 cookie[COOKIE_LEN], struct sk_buff *skb,
{
struct blake2s_state state;
- if (has_expired(checker->secret_birthdate, COOKIE_SECRET_MAX_AGE)) {
+ if (wg_birthdate_has_expired(checker->secret_birthdate,
+ COOKIE_SECRET_MAX_AGE)) {
down_write(&checker->secret_lock);
checker->secret_birthdate = ktime_get_boot_fast_ns();
get_random_bytes(checker->secret, NOISE_HASH_LEN);
@@ -115,9 +116,9 @@ static void make_cookie(u8 cookie[COOKIE_LEN], struct sk_buff *skb,
up_read(&checker->secret_lock);
}
-enum cookie_mac_state cookie_validate_packet(struct cookie_checker *checker,
- struct sk_buff *skb,
- bool check_cookie)
+enum cookie_mac_state wg_cookie_validate_packet(struct cookie_checker *checker,
+ struct sk_buff *skb,
+ bool check_cookie)
{
struct message_macs *macs = (struct message_macs *)
(skb->data + skb->len - sizeof(*macs));
@@ -143,7 +144,7 @@ enum cookie_mac_state cookie_validate_packet(struct cookie_checker *checker,
goto out;
ret = VALID_MAC_WITH_COOKIE_BUT_RATELIMITED;
- if (!ratelimiter_allow(skb, dev_net(checker->device->dev)))
+ if (!wg_ratelimiter_allow(skb, dev_net(checker->device->dev)))
goto out;
ret = VALID_MAC_WITH_COOKIE;
@@ -152,8 +153,8 @@ out:
return ret;
}
-void cookie_add_mac_to_packet(void *message, size_t len,
- struct wireguard_peer *peer)
+void wg_cookie_add_mac_to_packet(void *message, size_t len,
+ struct wireguard_peer *peer)
{
struct message_macs *macs = (struct message_macs *)
((u8 *)message + len - sizeof(*macs));
@@ -167,7 +168,7 @@ void cookie_add_mac_to_packet(void *message, size_t len,
down_read(&peer->latest_cookie.lock);
if (peer->latest_cookie.is_valid &&
- !has_expired(peer->latest_cookie.birthdate,
+ !wg_birthdate_has_expired(peer->latest_cookie.birthdate,
COOKIE_SECRET_MAX_AGE - COOKIE_SECRET_LATENCY))
compute_mac2(macs->mac2, message, len,
peer->latest_cookie.cookie);
@@ -176,9 +177,9 @@ void cookie_add_mac_to_packet(void *message, size_t len,
up_read(&peer->latest_cookie.lock);
}
-void cookie_message_create(struct message_handshake_cookie *dst,
- struct sk_buff *skb, __le32 index,
- struct cookie_checker *checker)
+void wg_cookie_message_create(struct message_handshake_cookie *dst,
+ struct sk_buff *skb, __le32 index,
+ struct cookie_checker *checker)
{
struct message_macs *macs = (struct message_macs *)
((u8 *)skb->data + skb->len - sizeof(*macs));
@@ -194,14 +195,14 @@ void cookie_message_create(struct message_handshake_cookie *dst,
checker->cookie_encryption_key);
}
-void cookie_message_consume(struct message_handshake_cookie *src,
- struct wireguard_device *wg)
+void wg_cookie_message_consume(struct message_handshake_cookie *src,
+ struct wireguard_device *wg)
{
struct wireguard_peer *peer = NULL;
u8 cookie[COOKIE_LEN];
bool ret;
- if (unlikely(!index_hashtable_lookup(&wg->index_hashtable,
+ if (unlikely(!wg_index_hashtable_lookup(&wg->index_hashtable,
INDEX_HASHTABLE_HANDSHAKE |
INDEX_HASHTABLE_KEYPAIR,
src->receiver_index, &peer)))
@@ -230,5 +231,5 @@ void cookie_message_consume(struct message_handshake_cookie *src,
wg->dev->name);
out:
- peer_put(peer);
+ wg_peer_put(peer);
}
diff --git a/src/cookie.h b/src/cookie.h
index 41122ff..409093f 100644
--- a/src/cookie.h
+++ b/src/cookie.h
@@ -38,22 +38,22 @@ enum cookie_mac_state {
VALID_MAC_WITH_COOKIE
};
-void cookie_checker_init(struct cookie_checker *checker,
- struct wireguard_device *wg);
-void cookie_checker_precompute_device_keys(struct cookie_checker *checker);
-void cookie_checker_precompute_peer_keys(struct wireguard_peer *peer);
-void cookie_init(struct cookie *cookie);
-
-enum cookie_mac_state cookie_validate_packet(struct cookie_checker *checker,
- struct sk_buff *skb,
- bool check_cookie);
-void cookie_add_mac_to_packet(void *message, size_t len,
- struct wireguard_peer *peer);
-
-void cookie_message_create(struct message_handshake_cookie *src,
+void wg_cookie_checker_init(struct cookie_checker *checker,
+ struct wireguard_device *wg);
+void wg_cookie_checker_precompute_device_keys(struct cookie_checker *checker);
+void wg_cookie_checker_precompute_peer_keys(struct wireguard_peer *peer);
+void wg_cookie_init(struct cookie *cookie);
+
+enum cookie_mac_state wg_cookie_validate_packet(struct cookie_checker *checker,
+ struct sk_buff *skb,
+ bool check_cookie);
+void wg_cookie_add_mac_to_packet(void *message, size_t len,
+ struct wireguard_peer *peer);
+
+void wg_cookie_message_create(struct message_handshake_cookie *src,
struct sk_buff *skb, __le32 index,
struct cookie_checker *checker);
-void cookie_message_consume(struct message_handshake_cookie *src,
- struct wireguard_device *wg);
+void wg_cookie_message_consume(struct message_handshake_cookie *src,
+ struct wireguard_device *wg);
#endif /* _WG_COOKIE_H */
diff --git a/src/device.c b/src/device.c
index 75b817e..72b93c7 100644
--- a/src/device.c
+++ b/src/device.c
@@ -53,14 +53,14 @@ static int open(struct net_device *dev)
#endif
#endif
- ret = socket_init(wg, wg->incoming_port);
+ ret = wg_socket_init(wg, wg->incoming_port);
if (ret < 0)
return ret;
mutex_lock(&wg->device_update_lock);
list_for_each_entry (peer, &wg->peer_list, peer_list) {
- packet_send_staged_packets(peer);
+ wg_packet_send_staged_packets(peer);
if (peer->persistent_keepalive_interval)
- packet_send_keepalive(peer);
+ wg_packet_send_keepalive(peer);
}
mutex_unlock(&wg->device_update_lock);
return 0;
@@ -80,8 +80,8 @@ static int pm_notification(struct notifier_block *nb, unsigned long action,
list_for_each_entry (wg, &device_list, device_list) {
mutex_lock(&wg->device_update_lock);
list_for_each_entry (peer, &wg->peer_list, peer_list) {
- noise_handshake_clear(&peer->handshake);
- noise_keypairs_clear(&peer->keypairs);
+ wg_noise_handshake_clear(&peer->handshake);
+ wg_noise_keypairs_clear(&peer->keypairs);
if (peer->timers_enabled)
del_timer(&peer->timer_zero_key_material);
}
@@ -102,16 +102,16 @@ static int stop(struct net_device *dev)
mutex_lock(&wg->device_update_lock);
list_for_each_entry (peer, &wg->peer_list, peer_list) {
skb_queue_purge(&peer->staged_packet_queue);
- timers_stop(peer);
- noise_handshake_clear(&peer->handshake);
- noise_keypairs_clear(&peer->keypairs);
+ wg_timers_stop(peer);
+ wg_noise_handshake_clear(&peer->handshake);
+ wg_noise_keypairs_clear(&peer->keypairs);
atomic64_set(&peer->last_sent_handshake,
ktime_get_boot_fast_ns() -
(u64)(REKEY_TIMEOUT + 1) * NSEC_PER_SEC);
}
mutex_unlock(&wg->device_update_lock);
skb_queue_purge(&wg->incoming_handshakes);
- socket_reinit(wg, NULL, NULL);
+ wg_socket_reinit(wg, NULL, NULL);
return 0;
}
@@ -125,13 +125,13 @@ static netdev_tx_t xmit(struct sk_buff *skb, struct net_device *dev)
u32 mtu;
int ret;
- if (unlikely(skb_examine_untrusted_ip_hdr(skb) != skb->protocol)) {
+ if (unlikely(wg_skb_examine_untrusted_ip_hdr(skb) != skb->protocol)) {
ret = -EPROTONOSUPPORT;
net_dbg_ratelimited("%s: Invalid IP packet\n", dev->name);
goto err;
}
- peer = allowedips_lookup_dst(&wg->peer_allowedips, skb);
+ peer = wg_allowedips_lookup_dst(&wg->peer_allowedips, skb);
if (unlikely(!peer)) {
ret = -ENOKEY;
if (skb->protocol == htons(ETH_P_IP))
@@ -194,13 +194,13 @@ static netdev_tx_t xmit(struct sk_buff *skb, struct net_device *dev)
skb_queue_splice_tail(&packets, &peer->staged_packet_queue);
spin_unlock_bh(&peer->staged_packet_queue.lock);
- packet_send_staged_packets(peer);
+ wg_packet_send_staged_packets(peer);
- peer_put(peer);
+ wg_peer_put(peer);
return NETDEV_TX_OK;
err_peer:
- peer_put(peer);
+ wg_peer_put(peer);
err:
++dev->stats.tx_errors;
if (skb->protocol == htons(ETH_P_IP))
@@ -227,17 +227,17 @@ static void destruct(struct net_device *dev)
rtnl_unlock();
mutex_lock(&wg->device_update_lock);
wg->incoming_port = 0;
- socket_reinit(wg, NULL, NULL);
- allowedips_free(&wg->peer_allowedips, &wg->device_update_lock);
+ wg_socket_reinit(wg, NULL, NULL);
+ wg_allowedips_free(&wg->peer_allowedips, &wg->device_update_lock);
/* The final references are cleared in the below calls to destroy_workqueue. */
- peer_remove_all(wg);
+ wg_peer_remove_all(wg);
destroy_workqueue(wg->handshake_receive_wq);
destroy_workqueue(wg->handshake_send_wq);
destroy_workqueue(wg->packet_crypt_wq);
- packet_queue_free(&wg->decrypt_queue, true);
- packet_queue_free(&wg->encrypt_queue, true);
+ wg_packet_queue_free(&wg->decrypt_queue, true);
+ wg_packet_queue_free(&wg->encrypt_queue, true);
rcu_barrier_bh(); /* Wait for all the peers to be actually freed. */
- ratelimiter_uninit();
+ wg_ratelimiter_uninit();
memzero_explicit(&wg->static_identity, sizeof(wg->static_identity));
skb_queue_purge(&wg->incoming_handshakes);
free_percpu(dev->tstats);
@@ -300,10 +300,10 @@ static int newlink(struct net *src_net, struct net_device *dev,
mutex_init(&wg->socket_update_lock);
mutex_init(&wg->device_update_lock);
skb_queue_head_init(&wg->incoming_handshakes);
- pubkey_hashtable_init(&wg->peer_hashtable);
- index_hashtable_init(&wg->index_hashtable);
- allowedips_init(&wg->peer_allowedips);
- cookie_checker_init(&wg->cookie_checker, wg);
+ wg_pubkey_hashtable_init(&wg->peer_hashtable);
+ wg_index_hashtable_init(&wg->index_hashtable);
+ wg_allowedips_init(&wg->peer_allowedips);
+ wg_cookie_checker_init(&wg->cookie_checker, wg);
INIT_LIST_HEAD(&wg->peer_list);
wg->device_update_gen = 1;
@@ -311,8 +311,9 @@ static int newlink(struct net *src_net, struct net_device *dev,
if (!dev->tstats)
goto error_1;
- wg->incoming_handshakes_worker = packet_alloc_percpu_multicore_worker(
- packet_handshake_receive_worker, wg);
+ wg->incoming_handshakes_worker =
+ wg_packet_alloc_percpu_multicore_worker(
+ wg_packet_handshake_receive_worker, wg);
if (!wg->incoming_handshakes_worker)
goto error_2;
@@ -331,15 +332,15 @@ static int newlink(struct net *src_net, struct net_device *dev,
if (!wg->packet_crypt_wq)
goto error_5;
- if (packet_queue_init(&wg->encrypt_queue, packet_encrypt_worker, true,
- MAX_QUEUED_PACKETS) < 0)
+ if (wg_packet_queue_init(&wg->encrypt_queue, wg_packet_encrypt_worker,
+ true, MAX_QUEUED_PACKETS) < 0)
goto error_6;
- if (packet_queue_init(&wg->decrypt_queue, packet_decrypt_worker, true,
- MAX_QUEUED_PACKETS) < 0)
+ if (wg_packet_queue_init(&wg->decrypt_queue, wg_packet_decrypt_worker,
+ true, MAX_QUEUED_PACKETS) < 0)
goto error_7;
- ret = ratelimiter_init();
+ ret = wg_ratelimiter_init();
if (ret < 0)
goto error_8;
@@ -358,11 +359,11 @@ static int newlink(struct net *src_net, struct net_device *dev,
return ret;
error_9:
- ratelimiter_uninit();
+ wg_ratelimiter_uninit();
error_8:
- packet_queue_free(&wg->decrypt_queue, true);
+ wg_packet_queue_free(&wg->decrypt_queue, true);
error_7:
- packet_queue_free(&wg->encrypt_queue, true);
+ wg_packet_queue_free(&wg->encrypt_queue, true);
error_6:
destroy_workqueue(wg->packet_crypt_wq);
error_5:
@@ -410,7 +411,7 @@ static struct notifier_block netdevice_notifier = {
.notifier_call = netdevice_notification
};
-int __init device_init(void)
+int __init wg_device_init(void)
{
int ret;
@@ -439,7 +440,7 @@ error_pm:
return ret;
}
-void device_uninit(void)
+void wg_device_uninit(void)
{
rtnl_link_unregister(&link_ops);
unregister_netdevice_notifier(&netdevice_notifier);
diff --git a/src/device.h b/src/device.h
index 6589c9d..2bd1429 100644
--- a/src/device.h
+++ b/src/device.h
@@ -59,7 +59,7 @@ struct wireguard_device {
bool have_creating_net_ref;
};
-int device_init(void);
-void device_uninit(void);
+int wg_device_init(void);
+void wg_device_uninit(void);
#endif /* _WG_DEVICE_H */
diff --git a/src/hashtables.c b/src/hashtables.c
index afe17e9..6e5518b 100644
--- a/src/hashtables.c
+++ b/src/hashtables.c
@@ -19,15 +19,15 @@ static struct hlist_head *pubkey_bucket(struct pubkey_hashtable *table,
(HASH_SIZE(table->hashtable) - 1)];
}
-void pubkey_hashtable_init(struct pubkey_hashtable *table)
+void wg_pubkey_hashtable_init(struct pubkey_hashtable *table)
{
get_random_bytes(&table->key, sizeof(table->key));
hash_init(table->hashtable);
mutex_init(&table->lock);
}
-void pubkey_hashtable_add(struct pubkey_hashtable *table,
- struct wireguard_peer *peer)
+void wg_pubkey_hashtable_add(struct pubkey_hashtable *table,
+ struct wireguard_peer *peer)
{
mutex_lock(&table->lock);
hlist_add_head_rcu(&peer->pubkey_hash,
@@ -35,8 +35,8 @@ void pubkey_hashtable_add(struct pubkey_hashtable *table,
mutex_unlock(&table->lock);
}
-void pubkey_hashtable_remove(struct pubkey_hashtable *table,
- struct wireguard_peer *peer)
+void wg_pubkey_hashtable_remove(struct pubkey_hashtable *table,
+ struct wireguard_peer *peer)
{
mutex_lock(&table->lock);
hlist_del_init_rcu(&peer->pubkey_hash);
@@ -45,8 +45,8 @@ void pubkey_hashtable_remove(struct pubkey_hashtable *table,
/* Returns a strong reference to a peer */
struct wireguard_peer *
-pubkey_hashtable_lookup(struct pubkey_hashtable *table,
- const u8 pubkey[NOISE_PUBLIC_KEY_LEN])
+wg_pubkey_hashtable_lookup(struct pubkey_hashtable *table,
+ const u8 pubkey[NOISE_PUBLIC_KEY_LEN])
{
struct wireguard_peer *iter_peer, *peer = NULL;
@@ -59,7 +59,7 @@ pubkey_hashtable_lookup(struct pubkey_hashtable *table,
break;
}
}
- peer = peer_get_maybe_zero(peer);
+ peer = wg_peer_get_maybe_zero(peer);
rcu_read_unlock_bh();
return peer;
}
@@ -74,7 +74,7 @@ static struct hlist_head *index_bucket(struct index_hashtable *table,
(HASH_SIZE(table->hashtable) - 1)];
}
-void index_hashtable_init(struct index_hashtable *table)
+void wg_index_hashtable_init(struct index_hashtable *table)
{
hash_init(table->hashtable);
spin_lock_init(&table->lock);
@@ -104,8 +104,8 @@ void index_hashtable_init(struct index_hashtable *table)
* is another thing to consider moving forward.
*/
-__le32 index_hashtable_insert(struct index_hashtable *table,
- struct index_hashtable_entry *entry)
+__le32 wg_index_hashtable_insert(struct index_hashtable *table,
+ struct index_hashtable_entry *entry)
{
struct index_hashtable_entry *existing_entry;
@@ -151,9 +151,9 @@ search_unused_slot:
return entry->index;
}
-bool index_hashtable_replace(struct index_hashtable *table,
- struct index_hashtable_entry *old,
- struct index_hashtable_entry *new)
+bool wg_index_hashtable_replace(struct index_hashtable *table,
+ struct index_hashtable_entry *old,
+ struct index_hashtable_entry *new)
{
if (unlikely(hlist_unhashed(&old->index_hash)))
return false;
@@ -172,8 +172,8 @@ bool index_hashtable_replace(struct index_hashtable *table,
return true;
}
-void index_hashtable_remove(struct index_hashtable *table,
- struct index_hashtable_entry *entry)
+void wg_index_hashtable_remove(struct index_hashtable *table,
+ struct index_hashtable_entry *entry)
{
spin_lock_bh(&table->lock);
hlist_del_init_rcu(&entry->index_hash);
@@ -182,9 +182,9 @@ void index_hashtable_remove(struct index_hashtable *table,
/* Returns a strong reference to a entry->peer */
struct index_hashtable_entry *
-index_hashtable_lookup(struct index_hashtable *table,
- const enum index_hashtable_type type_mask,
- const __le32 index, struct wireguard_peer **peer)
+wg_index_hashtable_lookup(struct index_hashtable *table,
+ const enum index_hashtable_type type_mask,
+ const __le32 index, struct wireguard_peer **peer)
{
struct index_hashtable_entry *iter_entry, *entry = NULL;
@@ -198,7 +198,7 @@ index_hashtable_lookup(struct index_hashtable *table,
}
}
if (likely(entry)) {
- entry->peer = peer_get_maybe_zero(entry->peer);
+ entry->peer = wg_peer_get_maybe_zero(entry->peer);
if (likely(entry->peer))
*peer = entry->peer;
else
diff --git a/src/hashtables.h b/src/hashtables.h
index 263a2a5..8b855d7 100644
--- a/src/hashtables.h
+++ b/src/hashtables.h
@@ -21,14 +21,14 @@ struct pubkey_hashtable {
struct mutex lock;
};
-void pubkey_hashtable_init(struct pubkey_hashtable *table);
-void pubkey_hashtable_add(struct pubkey_hashtable *table,
- struct wireguard_peer *peer);
-void pubkey_hashtable_remove(struct pubkey_hashtable *table,
+void wg_pubkey_hashtable_init(struct pubkey_hashtable *table);
+void wg_pubkey_hashtable_add(struct pubkey_hashtable *table,
struct wireguard_peer *peer);
+void wg_pubkey_hashtable_remove(struct pubkey_hashtable *table,
+ struct wireguard_peer *peer);
struct wireguard_peer *
-pubkey_hashtable_lookup(struct pubkey_hashtable *table,
- const u8 pubkey[NOISE_PUBLIC_KEY_LEN]);
+wg_pubkey_hashtable_lookup(struct pubkey_hashtable *table,
+ const u8 pubkey[NOISE_PUBLIC_KEY_LEN]);
struct index_hashtable {
/* TODO: move to rhashtable */
@@ -47,17 +47,18 @@ struct index_hashtable_entry {
enum index_hashtable_type type;
__le32 index;
};
-void index_hashtable_init(struct index_hashtable *table);
-__le32 index_hashtable_insert(struct index_hashtable *table,
- struct index_hashtable_entry *entry);
-bool index_hashtable_replace(struct index_hashtable *table,
- struct index_hashtable_entry *old,
- struct index_hashtable_entry *new);
-void index_hashtable_remove(struct index_hashtable *table,
- struct index_hashtable_entry *entry);
+
+void wg_index_hashtable_init(struct index_hashtable *table);
+__le32 wg_index_hashtable_insert(struct index_hashtable *table,
+ struct index_hashtable_entry *entry);
+bool wg_index_hashtable_replace(struct index_hashtable *table,
+ struct index_hashtable_entry *old,
+ struct index_hashtable_entry *new);
+void wg_index_hashtable_remove(struct index_hashtable *table,
+ struct index_hashtable_entry *entry);
struct index_hashtable_entry *
-index_hashtable_lookup(struct index_hashtable *table,
- const enum index_hashtable_type type_mask,
- const __le32 index, struct wireguard_peer **peer);
+wg_index_hashtable_lookup(struct index_hashtable *table,
+ const enum index_hashtable_type type_mask,
+ const __le32 index, struct wireguard_peer **peer);
#endif /* _WG_HASHTABLES_H */
diff --git a/src/main.c b/src/main.c
index 2b2e5b6..fc0cc31 100644
--- a/src/main.c
+++ b/src/main.c
@@ -28,17 +28,17 @@ static int __init mod_init(void)
return ret;
#ifdef DEBUG
- if (!allowedips_selftest() || !packet_counter_selftest() ||
- !ratelimiter_selftest())
+ if (!wg_allowedips_selftest() || !wg_packet_counter_selftest() ||
+ !wg_ratelimiter_selftest())
return -ENOTRECOVERABLE;
#endif
- noise_init();
+ wg_noise_init();
- ret = device_init();
+ ret = wg_device_init();
if (ret < 0)
goto err_device;
- ret = genetlink_init();
+ ret = wg_genetlink_init();
if (ret < 0)
goto err_netlink;
@@ -48,15 +48,15 @@ static int __init mod_init(void)
return 0;
err_netlink:
- device_uninit();
+ wg_device_uninit();
err_device:
return ret;
}
static void __exit mod_exit(void)
{
- genetlink_uninit();
- device_uninit();
+ wg_genetlink_uninit();
+ wg_device_uninit();
pr_debug("WireGuard unloaded\n");
}
diff --git a/src/netlink.c b/src/netlink.c
index 3d9099e..56749ba 100644
--- a/src/netlink.c
+++ b/src/netlink.c
@@ -150,9 +150,9 @@ static int get_peer(struct wireguard_peer *peer, unsigned int index,
allowedips_nest = nla_nest_start(skb, WGPEER_A_ALLOWEDIPS);
if (!allowedips_nest)
goto err;
- if (allowedips_walk_by_peer(&peer->device->peer_allowedips, rt_cursor,
- peer, get_allowedips, &ctx,
- &peer->device->device_update_lock)) {
+ if (wg_allowedips_walk_by_peer(&peer->device->peer_allowedips,
+ rt_cursor, peer, get_allowedips, &ctx,
+ &peer->device->device_update_lock)) {
nla_nest_end(skb, allowedips_nest);
nla_nest_end(skb, peer_nest);
return -EMSGSIZE;
@@ -266,8 +266,8 @@ static int get_device_dump(struct sk_buff *skb, struct netlink_callback *cb)
out:
if (!ret && !done && next_peer_cursor)
- peer_get(next_peer_cursor);
- peer_put(last_peer_cursor);
+ wg_peer_get(next_peer_cursor);
+ wg_peer_put(last_peer_cursor);
mutex_unlock(&wg->device_update_lock);
rtnl_unlock();
@@ -299,7 +299,7 @@ static int get_device_done(struct netlink_callback *cb)
if (wg)
dev_put(wg->dev);
kfree(rt_cursor);
- peer_put(peer);
+ wg_peer_put(peer);
return 0;
}
@@ -310,12 +310,12 @@ static int set_port(struct wireguard_device *wg, u16 port)
if (wg->incoming_port == port)
return 0;
list_for_each_entry (peer, &wg->peer_list, peer_list)
- socket_clear_peer_endpoint_src(peer);
+ wg_socket_clear_peer_endpoint_src(peer);
if (!netif_running(wg->dev)) {
wg->incoming_port = port;
return 0;
}
- return socket_init(wg, port);
+ return wg_socket_init(wg, port);
}
static int set_allowedip(struct wireguard_peer *peer, struct nlattr **attrs)
@@ -332,13 +332,13 @@ static int set_allowedip(struct wireguard_peer *peer, struct nlattr **attrs)
if (family == AF_INET && cidr <= 32 &&
nla_len(attrs[WGALLOWEDIP_A_IPADDR]) == sizeof(struct in_addr))
- ret = allowedips_insert_v4(
+ ret = wg_allowedips_insert_v4(
&peer->device->peer_allowedips,
nla_data(attrs[WGALLOWEDIP_A_IPADDR]), cidr, peer,
&peer->device->device_update_lock);
else if (family == AF_INET6 && cidr <= 128 &&
nla_len(attrs[WGALLOWEDIP_A_IPADDR]) == sizeof(struct in6_addr))
- ret = allowedips_insert_v6(
+ ret = wg_allowedips_insert_v6(
&peer->device->peer_allowedips,
nla_data(attrs[WGALLOWEDIP_A_IPADDR]), cidr, peer,
&peer->device->device_update_lock);
@@ -371,8 +371,8 @@ static int set_peer(struct wireguard_device *wg, struct nlattr **attrs)
goto out;
}
- peer = pubkey_hashtable_lookup(&wg->peer_hashtable,
- nla_data(attrs[WGPEER_A_PUBLIC_KEY]));
+ peer = wg_pubkey_hashtable_lookup(&wg->peer_hashtable,
+ nla_data(attrs[WGPEER_A_PUBLIC_KEY]));
if (!peer) { /* Peer doesn't exist yet. Add a new one. */
ret = -ENODEV;
if (flags & WGPEER_F_REMOVE_ME)
@@ -395,18 +395,18 @@ static int set_peer(struct wireguard_device *wg, struct nlattr **attrs)
up_read(&wg->static_identity.lock);
ret = -ENOMEM;
- peer = peer_create(wg, public_key, preshared_key);
+ peer = wg_peer_create(wg, public_key, preshared_key);
if (!peer)
goto out;
/* Take additional reference, as though we've just been
* looked up.
*/
- peer_get(peer);
+ wg_peer_get(peer);
}
ret = 0;
if (flags & WGPEER_F_REMOVE_ME) {
- peer_remove(peer);
+ wg_peer_remove(peer);
goto out;
}
@@ -428,13 +428,13 @@ static int set_peer(struct wireguard_device *wg, struct nlattr **attrs)
struct endpoint endpoint = { { { 0 } } };
memcpy(&endpoint.addr, addr, len);
- socket_set_peer_endpoint(peer, &endpoint);
+ wg_socket_set_peer_endpoint(peer, &endpoint);
}
}
if (flags & WGPEER_F_REPLACE_ALLOWEDIPS)
- allowedips_remove_by_peer(&wg->peer_allowedips, peer,
- &wg->device_update_lock);
+ wg_allowedips_remove_by_peer(&wg->peer_allowedips, peer,
+ &wg->device_update_lock);
if (attrs[WGPEER_A_ALLOWEDIPS]) {
struct nlattr *attr, *allowedip[WGALLOWEDIP_A_MAX + 1];
@@ -461,14 +461,14 @@ static int set_peer(struct wireguard_device *wg, struct nlattr **attrs)
peer->persistent_keepalive_interval = persistent_keepalive_interval;
if (send_keepalive)
- packet_send_keepalive(peer);
+ wg_packet_send_keepalive(peer);
}
if (netif_running(wg->dev))
- packet_send_staged_packets(peer);
+ wg_packet_send_staged_packets(peer);
out:
- peer_put(peer);
+ wg_peer_put(peer);
if (attrs[WGPEER_A_PRESHARED_KEY])
memzero_explicit(nla_data(attrs[WGPEER_A_PRESHARED_KEY]),
nla_len(attrs[WGPEER_A_PRESHARED_KEY]));
@@ -494,7 +494,7 @@ static int set_device(struct sk_buff *skb, struct genl_info *info)
wg->fwmark = nla_get_u32(info->attrs[WGDEVICE_A_FWMARK]);
list_for_each_entry (peer, &wg->peer_list, peer_list)
- socket_clear_peer_endpoint_src(peer);
+ wg_socket_clear_peer_endpoint_src(peer);
}
if (info->attrs[WGDEVICE_A_LISTEN_PORT]) {
@@ -507,7 +507,7 @@ static int set_device(struct sk_buff *skb, struct genl_info *info)
if (info->attrs[WGDEVICE_A_FLAGS] &&
nla_get_u32(info->attrs[WGDEVICE_A_FLAGS]) &
WGDEVICE_F_REPLACE_PEERS)
- peer_remove_all(wg);
+ wg_peer_remove_all(wg);
if (info->attrs[WGDEVICE_A_PRIVATE_KEY] &&
nla_len(info->attrs[WGDEVICE_A_PRIVATE_KEY]) ==
@@ -520,23 +520,23 @@ static int set_device(struct sk_buff *skb, struct genl_info *info)
* two 25519-genpub ops.
*/
if (curve25519_generate_public(public_key, private_key)) {
- peer = pubkey_hashtable_lookup(&wg->peer_hashtable,
- public_key);
+ peer = wg_pubkey_hashtable_lookup(&wg->peer_hashtable,
+ public_key);
if (peer) {
- peer_put(peer);
- peer_remove(peer);
+ wg_peer_put(peer);
+ wg_peer_remove(peer);
}
}
down_write(&wg->static_identity.lock);
- noise_set_static_identity_private_key(&wg->static_identity,
- private_key);
+ wg_noise_set_static_identity_private_key(&wg->static_identity,
+ private_key);
list_for_each_entry_safe (peer, temp, &wg->peer_list,
peer_list) {
- if (!noise_precompute_static_static(peer))
- peer_remove(peer);
+ if (!wg_noise_precompute_static_static(peer))
+ wg_peer_remove(peer);
}
- cookie_checker_precompute_device_keys(&wg->cookie_checker);
+ wg_cookie_checker_precompute_device_keys(&wg->cookie_checker);
up_write(&wg->static_identity.lock);
}
@@ -605,12 +605,12 @@ __ro_after_init = {
.netnsok = true
};
-int __init genetlink_init(void)
+int __init wg_genetlink_init(void)
{
return genl_register_family(&genl_family);
}
-void __exit genetlink_uninit(void)
+void __exit wg_genetlink_uninit(void)
{
genl_unregister_family(&genl_family);
}
diff --git a/src/netlink.h b/src/netlink.h
index 657fe1a..1dc6a67 100644
--- a/src/netlink.h
+++ b/src/netlink.h
@@ -6,7 +6,7 @@
#ifndef _WG_NETLINK_H
#define _WG_NETLINK_H
-int genetlink_init(void);
-void genetlink_uninit(void);
+int wg_genetlink_init(void);
+void wg_genetlink_uninit(void);
#endif /* _WG_NETLINK_H */
diff --git a/src/noise.c b/src/noise.c
index 7ab3890..814dc11 100644
--- a/src/noise.c
+++ b/src/noise.c
@@ -31,7 +31,7 @@ static u8 handshake_init_hash[NOISE_HASH_LEN] __ro_after_init;
static u8 handshake_init_chaining_key[NOISE_HASH_LEN] __ro_after_init;
static atomic64_t keypair_counter = ATOMIC64_INIT(0);
-void __init noise_init(void)
+void __init wg_noise_init(void)
{
struct blake2s_state blake;
@@ -44,7 +44,7 @@ void __init noise_init(void)
}
/* Must hold peer->handshake.static_identity->lock */
-bool noise_precompute_static_static(struct wireguard_peer *peer)
+bool wg_noise_precompute_static_static(struct wireguard_peer *peer)
{
bool ret = true;
@@ -61,11 +61,11 @@ bool noise_precompute_static_static(struct wireguard_peer *peer)
return ret;
}
-bool noise_handshake_init(struct noise_handshake *handshake,
- struct noise_static_identity *static_identity,
- const u8 peer_public_key[NOISE_PUBLIC_KEY_LEN],
- const u8 peer_preshared_key[NOISE_SYMMETRIC_KEY_LEN],
- struct wireguard_peer *peer)
+bool wg_noise_handshake_init(struct noise_handshake *handshake,
+ struct noise_static_identity *static_identity,
+ const u8 peer_public_key[NOISE_PUBLIC_KEY_LEN],
+ const u8 peer_preshared_key[NOISE_SYMMETRIC_KEY_LEN],
+ struct wireguard_peer *peer)
{
memset(handshake, 0, sizeof(*handshake));
init_rwsem(&handshake->lock);
@@ -77,7 +77,7 @@ bool noise_handshake_init(struct noise_handshake *handshake,
NOISE_SYMMETRIC_KEY_LEN);
handshake->static_identity = static_identity;
handshake->state = HANDSHAKE_ZEROED;
- return noise_precompute_static_static(peer);
+ return wg_noise_precompute_static_static(peer);
}
static void handshake_zero(struct noise_handshake *handshake)
@@ -90,15 +90,17 @@ static void handshake_zero(struct noise_handshake *handshake)
handshake->state = HANDSHAKE_ZEROED;
}
-void noise_handshake_clear(struct noise_handshake *handshake)
+void wg_noise_handshake_clear(struct noise_handshake *handshake)
{
- index_hashtable_remove(&handshake->entry.peer->device->index_hashtable,
- &handshake->entry);
+ wg_index_hashtable_remove(
+ &handshake->entry.peer->device->index_hashtable,
+ &handshake->entry);
down_write(&handshake->lock);
handshake_zero(handshake);
up_write(&handshake->lock);
- index_hashtable_remove(&handshake->entry.peer->device->index_hashtable,
- &handshake->entry);
+ wg_index_hashtable_remove(
+ &handshake->entry.peer->device->index_hashtable,
+ &handshake->entry);
}
static struct noise_keypair *keypair_create(struct wireguard_peer *peer)
@@ -127,23 +129,23 @@ static void keypair_free_kref(struct kref *kref)
keypair->entry.peer->device->dev->name,
keypair->internal_id,
keypair->entry.peer->internal_id);
- index_hashtable_remove(&keypair->entry.peer->device->index_hashtable,
- &keypair->entry);
+ wg_index_hashtable_remove(&keypair->entry.peer->device->index_hashtable,
+ &keypair->entry);
call_rcu_bh(&keypair->rcu, keypair_free_rcu);
}
-void noise_keypair_put(struct noise_keypair *keypair, bool unreference_now)
+void wg_noise_keypair_put(struct noise_keypair *keypair, bool unreference_now)
{
if (unlikely(!keypair))
return;
if (unlikely(unreference_now))
- index_hashtable_remove(
+ wg_index_hashtable_remove(
&keypair->entry.peer->device->index_hashtable,
&keypair->entry);
kref_put(&keypair->refcount, keypair_free_kref);
}
-struct noise_keypair *noise_keypair_get(struct noise_keypair *keypair)
+struct noise_keypair *wg_noise_keypair_get(struct noise_keypair *keypair)
{
RCU_LOCKDEP_WARN(!rcu_read_lock_bh_held(),
"Taking noise keypair reference without holding the RCU BH read lock");
@@ -152,7 +154,7 @@ struct noise_keypair *noise_keypair_get(struct noise_keypair *keypair)
return keypair;
}
-void noise_keypairs_clear(struct noise_keypairs *keypairs)
+void wg_noise_keypairs_clear(struct noise_keypairs *keypairs)
{
struct noise_keypair *old;
@@ -160,15 +162,15 @@ void noise_keypairs_clear(struct noise_keypairs *keypairs)
old = rcu_dereference_protected(keypairs->previous_keypair,
lockdep_is_held(&keypairs->keypair_update_lock));
RCU_INIT_POINTER(keypairs->previous_keypair, NULL);
- noise_keypair_put(old, true);
+ wg_noise_keypair_put(old, true);
old = rcu_dereference_protected(keypairs->next_keypair,
lockdep_is_held(&keypairs->keypair_update_lock));
RCU_INIT_POINTER(keypairs->next_keypair, NULL);
- noise_keypair_put(old, true);
+ wg_noise_keypair_put(old, true);
old = rcu_dereference_protected(keypairs->current_keypair,
lockdep_is_held(&keypairs->keypair_update_lock));
RCU_INIT_POINTER(keypairs->current_keypair, NULL);
- noise_keypair_put(old, true);
+ wg_noise_keypair_put(old, true);
spin_unlock_bh(&keypairs->keypair_update_lock);
}
@@ -202,7 +204,7 @@ static void add_new_keypair(struct noise_keypairs *keypairs,
RCU_INIT_POINTER(keypairs->next_keypair, NULL);
rcu_assign_pointer(keypairs->previous_keypair,
next_keypair);
- noise_keypair_put(current_keypair, true);
+ wg_noise_keypair_put(current_keypair, true);
} else /* If there wasn't an existing next keypair, we replace
* the previous with the current one.
*/
@@ -211,7 +213,7 @@ static void add_new_keypair(struct noise_keypairs *keypairs,
/* At this point we can get rid of the old previous keypair, and
* set up the new keypair.
*/
- noise_keypair_put(previous_keypair, true);
+ wg_noise_keypair_put(previous_keypair, true);
rcu_assign_pointer(keypairs->current_keypair, new_keypair);
} else {
/* If we're the responder, it means we can't use the new keypair
@@ -220,15 +222,15 @@ static void add_new_keypair(struct noise_keypairs *keypairs,
* existing next one, and slide in the new next one.
*/
rcu_assign_pointer(keypairs->next_keypair, new_keypair);
- noise_keypair_put(next_keypair, true);
+ wg_noise_keypair_put(next_keypair, true);
RCU_INIT_POINTER(keypairs->previous_keypair, NULL);
- noise_keypair_put(previous_keypair, true);
+ wg_noise_keypair_put(previous_keypair, true);
}
spin_unlock_bh(&keypairs->keypair_update_lock);
}
-bool noise_received_with_keypair(struct noise_keypairs *keypairs,
- struct noise_keypair *received_keypair)
+bool wg_noise_received_with_keypair(struct noise_keypairs *keypairs,
+ struct noise_keypair *received_keypair)
{
struct noise_keypair *old_keypair;
bool key_is_new;
@@ -259,7 +261,7 @@ bool noise_received_with_keypair(struct noise_keypairs *keypairs,
rcu_assign_pointer(keypairs->previous_keypair,
rcu_dereference_protected(keypairs->current_keypair,
lockdep_is_held(&keypairs->keypair_update_lock)));
- noise_keypair_put(old_keypair, true);
+ wg_noise_keypair_put(old_keypair, true);
rcu_assign_pointer(keypairs->current_keypair, received_keypair);
RCU_INIT_POINTER(keypairs->next_keypair, NULL);
@@ -268,7 +270,7 @@ bool noise_received_with_keypair(struct noise_keypairs *keypairs,
}
/* Must hold static_identity->lock */
-void noise_set_static_identity_private_key(
+void wg_noise_set_static_identity_private_key(
struct noise_static_identity *static_identity,
const u8 private_key[NOISE_PUBLIC_KEY_LEN])
{
@@ -445,8 +447,9 @@ static void tai64n_now(u8 output[NOISE_TIMESTAMP_LEN])
*(__be32 *)(output + sizeof(__be64)) = cpu_to_be32(now.tv_nsec);
}
-bool noise_handshake_create_initiation(struct message_handshake_initiation *dst,
- struct noise_handshake *handshake)
+bool
+wg_noise_handshake_create_initiation(struct message_handshake_initiation *dst,
+ struct noise_handshake *handshake)
{
u8 timestamp[NOISE_TIMESTAMP_LEN];
u8 key[NOISE_SYMMETRIC_KEY_LEN];
@@ -498,7 +501,7 @@ bool noise_handshake_create_initiation(struct message_handshake_initiation *dst,
message_encrypt(dst->encrypted_timestamp, timestamp,
NOISE_TIMESTAMP_LEN, key, handshake->hash);
- dst->sender_index = index_hashtable_insert(
+ dst->sender_index = wg_index_hashtable_insert(
&handshake->entry.peer->device->index_hashtable,
&handshake->entry);
@@ -513,8 +516,8 @@ out:
}
struct wireguard_peer *
-noise_handshake_consume_initiation(struct message_handshake_initiation *src,
- struct wireguard_device *wg)
+wg_noise_handshake_consume_initiation(struct message_handshake_initiation *src,
+ struct wireguard_device *wg)
{
struct wireguard_peer *peer = NULL, *ret_peer = NULL;
struct noise_handshake *handshake;
@@ -545,7 +548,7 @@ noise_handshake_consume_initiation(struct message_handshake_initiation *src,
goto out;
/* Lookup which peer we're actually talking to */
- peer = pubkey_hashtable_lookup(&wg->peer_hashtable, s);
+ peer = wg_pubkey_hashtable_lookup(&wg->peer_hashtable, s);
if (!peer)
goto out;
handshake = &peer->handshake;
@@ -588,12 +591,12 @@ out:
memzero_explicit(chaining_key, NOISE_HASH_LEN);
up_read(&wg->static_identity.lock);
if (!ret_peer)
- peer_put(peer);
+ wg_peer_put(peer);
return ret_peer;
}
-bool noise_handshake_create_response(struct message_handshake_response *dst,
- struct noise_handshake *handshake)
+bool wg_noise_handshake_create_response(struct message_handshake_response *dst,
+ struct noise_handshake *handshake)
{
bool ret = false;
u8 key[NOISE_SYMMETRIC_KEY_LEN];
@@ -638,7 +641,7 @@ bool noise_handshake_create_response(struct message_handshake_response *dst,
/* {} */
message_encrypt(dst->encrypted_nothing, NULL, 0, key, handshake->hash);
- dst->sender_index = index_hashtable_insert(
+ dst->sender_index = wg_index_hashtable_insert(
&handshake->entry.peer->device->index_hashtable,
&handshake->entry);
@@ -653,8 +656,8 @@ out:
}
struct wireguard_peer *
-noise_handshake_consume_response(struct message_handshake_response *src,
- struct wireguard_device *wg)
+wg_noise_handshake_consume_response(struct message_handshake_response *src,
+ struct wireguard_device *wg)
{
struct noise_handshake *handshake;
struct wireguard_peer *peer = NULL, *ret_peer = NULL;
@@ -671,7 +674,7 @@ noise_handshake_consume_response(struct message_handshake_response *src,
if (unlikely(!wg->static_identity.has_identity))
goto out;
- handshake = (struct noise_handshake *)index_hashtable_lookup(
+ handshake = (struct noise_handshake *)wg_index_hashtable_lookup(
&wg->index_hashtable, INDEX_HASHTABLE_HANDSHAKE,
src->receiver_index, &peer);
if (unlikely(!handshake))
@@ -726,7 +729,7 @@ noise_handshake_consume_response(struct message_handshake_response *src,
goto out;
fail:
- peer_put(peer);
+ wg_peer_put(peer);
out:
memzero_explicit(key, NOISE_SYMMETRIC_KEY_LEN);
memzero_explicit(hash, NOISE_HASH_LEN);
@@ -737,8 +740,8 @@ out:
return ret_peer;
}
-bool noise_handshake_begin_session(struct noise_handshake *handshake,
- struct noise_keypairs *keypairs)
+bool wg_noise_handshake_begin_session(struct noise_handshake *handshake,
+ struct noise_keypairs *keypairs)
{
struct noise_keypair *new_keypair;
bool ret = false;
@@ -771,7 +774,7 @@ bool noise_handshake_begin_session(struct noise_handshake *handshake,
handshake->entry.peer->device->dev->name,
new_keypair->internal_id,
handshake->entry.peer->internal_id);
- ret = index_hashtable_replace(
+ ret = wg_index_hashtable_replace(
&handshake->entry.peer->device->index_hashtable,
&handshake->entry, &new_keypair->entry);
} else
diff --git a/src/noise.h b/src/noise.h
index 1fc25ef..7fe2c62 100644
--- a/src/noise.h
+++ b/src/noise.h
@@ -93,37 +93,38 @@ struct noise_handshake {
struct wireguard_device;
-void noise_init(void);
-bool noise_handshake_init(struct noise_handshake *handshake,
- struct noise_static_identity *static_identity,
- const u8 peer_public_key[NOISE_PUBLIC_KEY_LEN],
- const u8 peer_preshared_key[NOISE_SYMMETRIC_KEY_LEN],
- struct wireguard_peer *peer);
-void noise_handshake_clear(struct noise_handshake *handshake);
-void noise_keypair_put(struct noise_keypair *keypair, bool unreference_now);
-struct noise_keypair *noise_keypair_get(struct noise_keypair *keypair);
-void noise_keypairs_clear(struct noise_keypairs *keypairs);
-bool noise_received_with_keypair(struct noise_keypairs *keypairs,
- struct noise_keypair *received_keypair);
-
-void noise_set_static_identity_private_key(
+void wg_noise_init(void);
+bool wg_noise_handshake_init(struct noise_handshake *handshake,
+ struct noise_static_identity *static_identity,
+ const u8 peer_public_key[NOISE_PUBLIC_KEY_LEN],
+ const u8 peer_preshared_key[NOISE_SYMMETRIC_KEY_LEN],
+ struct wireguard_peer *peer);
+void wg_noise_handshake_clear(struct noise_handshake *handshake);
+void wg_noise_keypair_put(struct noise_keypair *keypair, bool unreference_now);
+struct noise_keypair *wg_noise_keypair_get(struct noise_keypair *keypair);
+void wg_noise_keypairs_clear(struct noise_keypairs *keypairs);
+bool wg_noise_received_with_keypair(struct noise_keypairs *keypairs,
+ struct noise_keypair *received_keypair);
+
+void wg_noise_set_static_identity_private_key(
struct noise_static_identity *static_identity,
const u8 private_key[NOISE_PUBLIC_KEY_LEN]);
-bool noise_precompute_static_static(struct wireguard_peer *peer);
+bool wg_noise_precompute_static_static(struct wireguard_peer *peer);
-bool noise_handshake_create_initiation(struct message_handshake_initiation *dst,
- struct noise_handshake *handshake);
+bool
+wg_noise_handshake_create_initiation(struct message_handshake_initiation *dst,
+ struct noise_handshake *handshake);
struct wireguard_peer *
-noise_handshake_consume_initiation(struct message_handshake_initiation *src,
- struct wireguard_device *wg);
+wg_noise_handshake_consume_initiation(struct message_handshake_initiation *src,
+ struct wireguard_device *wg);
-bool noise_handshake_create_response(struct message_handshake_response *dst,
- struct noise_handshake *handshake);
+bool wg_noise_handshake_create_response(struct message_handshake_response *dst,
+ struct noise_handshake *handshake);
struct wireguard_peer *
-noise_handshake_consume_response(struct message_handshake_response *src,
- struct wireguard_device *wg);
+wg_noise_handshake_consume_response(struct message_handshake_response *src,
+ struct wireguard_device *wg);
-bool noise_handshake_begin_session(struct noise_handshake *handshake,
- struct noise_keypairs *keypairs);
+bool wg_noise_handshake_begin_session(struct noise_handshake *handshake,
+ struct noise_keypairs *keypairs);
#endif /* _WG_NOISE_H */
diff --git a/src/peer.c b/src/peer.c
index d9ac366..c4737ae 100644
--- a/src/peer.c
+++ b/src/peer.c
@@ -18,9 +18,9 @@
static atomic64_t peer_counter = ATOMIC64_INIT(0);
struct wireguard_peer *
-peer_create(struct wireguard_device *wg,
- const u8 public_key[NOISE_PUBLIC_KEY_LEN],
- const u8 preshared_key[NOISE_SYMMETRIC_KEY_LEN])
+wg_peer_create(struct wireguard_device *wg,
+ const u8 public_key[NOISE_PUBLIC_KEY_LEN],
+ const u8 preshared_key[NOISE_SYMMETRIC_KEY_LEN])
{
struct wireguard_peer *peer;
@@ -34,24 +34,26 @@ peer_create(struct wireguard_device *wg,
return NULL;
peer->device = wg;
- if (!noise_handshake_init(&peer->handshake, &wg->static_identity,
- public_key, preshared_key, peer))
+ if (!wg_noise_handshake_init(&peer->handshake, &wg->static_identity,
+ public_key, preshared_key, peer))
goto err_1;
if (dst_cache_init(&peer->endpoint_cache, GFP_KERNEL))
goto err_1;
- if (packet_queue_init(&peer->tx_queue, packet_tx_worker, false,
- MAX_QUEUED_PACKETS))
+ if (wg_packet_queue_init(&peer->tx_queue, wg_packet_tx_worker, false,
+ MAX_QUEUED_PACKETS))
goto err_2;
- if (packet_queue_init(&peer->rx_queue, NULL, false, MAX_QUEUED_PACKETS))
+ if (wg_packet_queue_init(&peer->rx_queue, NULL, false,
+ MAX_QUEUED_PACKETS))
goto err_3;
peer->internal_id = atomic64_inc_return(&peer_counter);
peer->serial_work_cpu = nr_cpumask_bits;
- cookie_init(&peer->latest_cookie);
- timers_init(peer);
- cookie_checker_precompute_peer_keys(peer);
+ wg_cookie_init(&peer->latest_cookie);
+ wg_timers_init(peer);
+ wg_cookie_checker_precompute_peer_keys(peer);
spin_lock_init(&peer->keypairs.keypair_update_lock);
- INIT_WORK(&peer->transmit_handshake_work, packet_handshake_send_worker);
+ INIT_WORK(&peer->transmit_handshake_work,
+ wg_packet_handshake_send_worker);
rwlock_init(&peer->endpoint_lock);
kref_init(&peer->refcount);
skb_queue_head_init(&peer->staged_packet_queue);
@@ -59,16 +61,17 @@ peer_create(struct wireguard_device *wg,
ktime_get_boot_fast_ns() -
(u64)(REKEY_TIMEOUT + 1) * NSEC_PER_SEC);
set_bit(NAPI_STATE_NO_BUSY_POLL, &peer->napi.state);
- netif_napi_add(wg->dev, &peer->napi, packet_rx_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(wg->dev, &peer->napi, wg_packet_rx_poll,
+ NAPI_POLL_WEIGHT);
napi_enable(&peer->napi);
list_add_tail(&peer->peer_list, &wg->peer_list);
- pubkey_hashtable_add(&wg->peer_hashtable, peer);
+ wg_pubkey_hashtable_add(&wg->peer_hashtable, peer);
++wg->num_peers;
pr_debug("%s: Peer %llu created\n", wg->dev->name, peer->internal_id);
return peer;
err_3:
- packet_queue_free(&peer->tx_queue, false);
+ wg_packet_queue_free(&peer->tx_queue, false);
err_2:
dst_cache_destroy(&peer->endpoint_cache);
err_1:
@@ -76,7 +79,7 @@ err_1:
return NULL;
}
-struct wireguard_peer *peer_get_maybe_zero(struct wireguard_peer *peer)
+struct wireguard_peer *wg_peer_get_maybe_zero(struct wireguard_peer *peer)
{
RCU_LOCKDEP_WARN(!rcu_read_lock_bh_held(),
"Taking peer reference without holding the RCU read lock");
@@ -89,7 +92,7 @@ struct wireguard_peer *peer_get_maybe_zero(struct wireguard_peer *peer)
* because peer_list, clearing handshakes, and flushing all require mutexes
* which requires sleeping, which must only be done from certain contexts.
*/
-void peer_remove(struct wireguard_peer *peer)
+void wg_peer_remove(struct wireguard_peer *peer)
{
if (unlikely(!peer))
return;
@@ -99,9 +102,9 @@ void peer_remove(struct wireguard_peer *peer)
* can't enter.
*/
list_del_init(&peer->peer_list);
- allowedips_remove_by_peer(&peer->device->peer_allowedips, peer,
- &peer->device->device_update_lock);
- pubkey_hashtable_remove(&peer->device->peer_hashtable, peer);
+ wg_allowedips_remove_by_peer(&peer->device->peer_allowedips, peer,
+ &peer->device->device_update_lock);
+ wg_pubkey_hashtable_remove(&peer->device->peer_hashtable, peer);
/* Mark as dead, so that we don't allow jumping contexts after. */
WRITE_ONCE(peer->is_dead, true);
@@ -110,12 +113,12 @@ void peer_remove(struct wireguard_peer *peer)
/* Now that no more keypairs can be created for this peer, we destroy
* existing ones.
*/
- noise_keypairs_clear(&peer->keypairs);
+ wg_noise_keypairs_clear(&peer->keypairs);
/* Destroy all ongoing timers that were in-flight at the beginning of
* this function.
*/
- timers_stop(peer);
+ wg_timers_stop(peer);
/* The transition between packet encryption/decryption queues isn't
* guarded by is_dead, but each reference's life is strictly bounded by
@@ -141,7 +144,7 @@ void peer_remove(struct wireguard_peer *peer)
flush_workqueue(peer->device->handshake_send_wq);
--peer->device->num_peers;
- peer_put(peer);
+ wg_peer_put(peer);
}
static void rcu_release(struct rcu_head *rcu)
@@ -149,8 +152,8 @@ static void rcu_release(struct rcu_head *rcu)
struct wireguard_peer *peer =
container_of(rcu, struct wireguard_peer, rcu);
dst_cache_destroy(&peer->endpoint_cache);
- packet_queue_free(&peer->rx_queue, false);
- packet_queue_free(&peer->tx_queue, false);
+ wg_packet_queue_free(&peer->rx_queue, false);
+ wg_packet_queue_free(&peer->tx_queue, false);
kzfree(peer);
}
@@ -164,8 +167,8 @@ static void kref_release(struct kref *refcount)
/* Remove ourself from dynamic runtime lookup structures, now that the
* last reference is gone.
*/
- index_hashtable_remove(&peer->device->index_hashtable,
- &peer->handshake.entry);
+ wg_index_hashtable_remove(&peer->device->index_hashtable,
+ &peer->handshake.entry);
/* Remove any lingering packets that didn't have a chance to be
* transmitted.
*/
@@ -174,18 +177,18 @@ static void kref_release(struct kref *refcount)
call_rcu_bh(&peer->rcu, rcu_release);
}
-void peer_put(struct wireguard_peer *peer)
+void wg_peer_put(struct wireguard_peer *peer)
{
if (unlikely(!peer))
return;
kref_put(&peer->refcount, kref_release);
}
-void peer_remove_all(struct wireguard_device *wg)
+void wg_peer_remove_all(struct wireguard_device *wg)
{
struct wireguard_peer *peer, *temp;
lockdep_assert_held(&wg->device_update_lock);
list_for_each_entry_safe (peer, temp, &wg->peer_list, peer_list)
- peer_remove(peer);
+ wg_peer_remove(peer);
}
diff --git a/src/peer.h b/src/peer.h
index b95c3ed..2811b61 100644
--- a/src/peer.h
+++ b/src/peer.h
@@ -66,22 +66,22 @@ struct wireguard_peer {
};
struct wireguard_peer *
-peer_create(struct wireguard_device *wg,
- const u8 public_key[NOISE_PUBLIC_KEY_LEN],
- const u8 preshared_key[NOISE_SYMMETRIC_KEY_LEN]);
+wg_peer_create(struct wireguard_device *wg,
+ const u8 public_key[NOISE_PUBLIC_KEY_LEN],
+ const u8 preshared_key[NOISE_SYMMETRIC_KEY_LEN]);
struct wireguard_peer *__must_check
-peer_get_maybe_zero(struct wireguard_peer *peer);
-static inline struct wireguard_peer *peer_get(struct wireguard_peer *peer)
+wg_peer_get_maybe_zero(struct wireguard_peer *peer);
+static inline struct wireguard_peer *wg_peer_get(struct wireguard_peer *peer)
{
kref_get(&peer->refcount);
return peer;
}
-void peer_put(struct wireguard_peer *peer);
-void peer_remove(struct wireguard_peer *peer);
-void peer_remove_all(struct wireguard_device *wg);
+void wg_peer_put(struct wireguard_peer *peer);
+void wg_peer_remove(struct wireguard_peer *peer);
+void wg_peer_remove_all(struct wireguard_device *wg);
-struct wireguard_peer *peer_lookup_by_index(struct wireguard_device *wg,
- u32 index);
+struct wireguard_peer *wg_peer_lookup_by_index(struct wireguard_device *wg,
+ u32 index);
#endif /* _WG_PEER_H */
diff --git a/src/queueing.c b/src/queueing.c
index 09eb93e..939aac9 100644
--- a/src/queueing.c
+++ b/src/queueing.c
@@ -6,7 +6,7 @@
#include "queueing.h"
struct multicore_worker __percpu *
-packet_alloc_percpu_multicore_worker(work_func_t function, void *ptr)
+wg_packet_alloc_percpu_multicore_worker(work_func_t function, void *ptr)
{
int cpu;
struct multicore_worker __percpu *worker =
@@ -22,8 +22,8 @@ packet_alloc_percpu_multicore_worker(work_func_t function, void *ptr)
return worker;
}
-int packet_queue_init(struct crypt_queue *queue, work_func_t function,
- bool multicore, unsigned int len)
+int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
+ bool multicore, unsigned int len)
{
int ret;
@@ -33,7 +33,7 @@ int packet_queue_init(struct crypt_queue *queue, work_func_t function,
return ret;
if (function) {
if (multicore) {
- queue->worker = packet_alloc_percpu_multicore_worker(
+ queue->worker = wg_packet_alloc_percpu_multicore_worker(
function, queue);
if (!queue->worker)
return -ENOMEM;
@@ -43,7 +43,7 @@ int packet_queue_init(struct crypt_queue *queue, work_func_t function,
return 0;
}
-void packet_queue_free(struct crypt_queue *queue, bool multicore)
+void wg_packet_queue_free(struct crypt_queue *queue, bool multicore)
{
if (multicore)
free_percpu(queue->worker);
diff --git a/src/queueing.h b/src/queueing.h
index 758a57d..9a089ca 100644
--- a/src/queueing.h
+++ b/src/queueing.h
@@ -19,33 +19,33 @@ struct crypt_queue;
struct sk_buff;
/* queueing.c APIs: */
-int packet_queue_init(struct crypt_queue *queue, work_func_t function,
- bool multicore, unsigned int len);
-void packet_queue_free(struct crypt_queue *queue, bool multicore);
+int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
+ bool multicore, unsigned int len);
+void wg_packet_queue_free(struct crypt_queue *queue, bool multicore);
struct multicore_worker __percpu *
-packet_alloc_percpu_multicore_worker(work_func_t function, void *ptr);
+wg_packet_alloc_percpu_multicore_worker(work_func_t function, void *ptr);
/* receive.c APIs: */
-void packet_receive(struct wireguard_device *wg, struct sk_buff *skb);
-void packet_handshake_receive_worker(struct work_struct *work);
+void wg_packet_receive(struct wireguard_device *wg, struct sk_buff *skb);
+void wg_packet_handshake_receive_worker(struct work_struct *work);
/* NAPI poll function: */
-int packet_rx_poll(struct napi_struct *napi, int budget);
+int wg_packet_rx_poll(struct napi_struct *napi, int budget);
/* Workqueue worker: */
-void packet_decrypt_worker(struct work_struct *work);
+void wg_packet_decrypt_worker(struct work_struct *work);
/* send.c APIs: */
-void packet_send_queued_handshake_initiation(struct wireguard_peer *peer,
- bool is_retry);
-void packet_send_handshake_response(struct wireguard_peer *peer);
-void packet_send_handshake_cookie(struct wireguard_device *wg,
- struct sk_buff *initiating_skb,
- __le32 sender_index);
-void packet_send_keepalive(struct wireguard_peer *peer);
-void packet_send_staged_packets(struct wireguard_peer *peer);
+void wg_packet_send_queued_handshake_initiation(struct wireguard_peer *peer,
+ bool is_retry);
+void wg_packet_send_handshake_response(struct wireguard_peer *peer);
+void wg_packet_send_handshake_cookie(struct wireguard_device *wg,
+ struct sk_buff *initiating_skb,
+ __le32 sender_index);
+void wg_packet_send_keepalive(struct wireguard_peer *peer);
+void wg_packet_send_staged_packets(struct wireguard_peer *peer);
/* Workqueue workers: */
-void packet_handshake_send_worker(struct work_struct *work);
-void packet_tx_worker(struct work_struct *work);
-void packet_encrypt_worker(struct work_struct *work);
+void wg_packet_handshake_send_worker(struct work_struct *work);
+void wg_packet_tx_worker(struct work_struct *work);
+void wg_packet_encrypt_worker(struct work_struct *work);
enum packet_state {
PACKET_STATE_UNCRYPTED,
@@ -65,7 +65,7 @@ struct packet_cb {
#define PACKET_CB(skb) ((struct packet_cb *)skb->cb)
/* Returns either the correct skb->protocol value, or 0 if invalid. */
-static inline __be16 skb_examine_untrusted_ip_hdr(struct sk_buff *skb)
+static inline __be16 wg_skb_examine_untrusted_ip_hdr(struct sk_buff *skb)
{
if (skb_network_header(skb) >= skb->head &&
(skb_network_header(skb) + sizeof(struct iphdr)) <=
@@ -80,7 +80,7 @@ static inline __be16 skb_examine_untrusted_ip_hdr(struct sk_buff *skb)
return 0;
}
-static inline void skb_reset(struct sk_buff *skb)
+static inline void wg_reset_packet(struct sk_buff *skb)
{
const int pfmemalloc = skb->pfmemalloc;
skb_scrub_packet(skb, true);
@@ -104,7 +104,7 @@ static inline void skb_reset(struct sk_buff *skb)
skb_reset_inner_headers(skb);
}
-static inline int cpumask_choose_online(int *stored_cpu, unsigned int id)
+static inline int wg_cpumask_choose_online(int *stored_cpu, unsigned int id)
{
unsigned int cpu = *stored_cpu, cpu_index, i;
@@ -126,7 +126,7 @@ static inline int cpumask_choose_online(int *stored_cpu, unsigned int id)
* a bit slower, and it doesn't seem like this potential race actually
* introduces any performance loss, so we live with it.
*/
-static inline int cpumask_next_online(int *next)
+static inline int wg_cpumask_next_online(int *next)
{
int cpu = *next;
@@ -136,7 +136,7 @@ static inline int cpumask_next_online(int *next)
return cpu;
}
-static inline int queue_enqueue_per_device_and_peer(
+static inline int wg_queue_enqueue_per_device_and_peer(
struct crypt_queue *device_queue, struct crypt_queue *peer_queue,
struct sk_buff *skb, struct workqueue_struct *wq, int *next_cpu)
{
@@ -151,43 +151,43 @@ static inline int queue_enqueue_per_device_and_peer(
/* Then we queue it up in the device queue, which consumes the
* packet as soon as it can.
*/
- cpu = cpumask_next_online(next_cpu);
+ cpu = wg_cpumask_next_online(next_cpu);
if (unlikely(ptr_ring_produce_bh(&device_queue->ring, skb)))
return -EPIPE;
queue_work_on(cpu, wq, &per_cpu_ptr(device_queue->worker, cpu)->work);
return 0;
}
-static inline void queue_enqueue_per_peer(struct crypt_queue *queue,
- struct sk_buff *skb,
- enum packet_state state)
+static inline void wg_queue_enqueue_per_peer(struct crypt_queue *queue,
+ struct sk_buff *skb,
+ enum packet_state state)
{
/* We take a reference, because as soon as we call atomic_set, the
* peer can be freed from below us.
*/
- struct wireguard_peer *peer = peer_get(PACKET_PEER(skb));
+ struct wireguard_peer *peer = wg_peer_get(PACKET_PEER(skb));
atomic_set_release(&PACKET_CB(skb)->state, state);
- queue_work_on(cpumask_choose_online(&peer->serial_work_cpu,
- peer->internal_id),
+ queue_work_on(wg_cpumask_choose_online(&peer->serial_work_cpu,
+ peer->internal_id),
peer->device->packet_crypt_wq, &queue->work);
- peer_put(peer);
+ wg_peer_put(peer);
}
-static inline void queue_enqueue_per_peer_napi(struct crypt_queue *queue,
- struct sk_buff *skb,
- enum packet_state state)
+static inline void wg_queue_enqueue_per_peer_napi(struct crypt_queue *queue,
+ struct sk_buff *skb,
+ enum packet_state state)
{
/* We take a reference, because as soon as we call atomic_set, the
* peer can be freed from below us.
*/
- struct wireguard_peer *peer = peer_get(PACKET_PEER(skb));
+ struct wireguard_peer *peer = wg_peer_get(PACKET_PEER(skb));
atomic_set_release(&PACKET_CB(skb)->state, state);
napi_schedule(&peer->napi);
- peer_put(peer);
+ wg_peer_put(peer);
}
#ifdef DEBUG
-bool packet_counter_selftest(void);
+bool wg_packet_counter_selftest(void);
#endif
#endif /* _WG_QUEUEING_H */
diff --git a/src/ratelimiter.c b/src/ratelimiter.c
index 6e43d6e..a5fa3a4 100644
--- a/src/ratelimiter.c
+++ b/src/ratelimiter.c
@@ -82,7 +82,7 @@ static void gc_entries(struct work_struct *work)
queue_delayed_work(system_power_efficient_wq, &gc_work, HZ);
}
-bool ratelimiter_allow(struct sk_buff *skb, struct net *net)
+bool wg_ratelimiter_allow(struct sk_buff *skb, struct net *net)
{
struct { __be64 ip; u32 net; } data = {
.net = (unsigned long)net & 0xffffffff };
@@ -152,7 +152,7 @@ err_oom:
return false;
}
-int ratelimiter_init(void)
+int wg_ratelimiter_init(void)
{
mutex_lock(&init_lock);
if (atomic64_inc_return(&refcnt) != 1)
@@ -199,7 +199,7 @@ err:
return -ENOMEM;
}
-void ratelimiter_uninit(void)
+void wg_ratelimiter_uninit(void)
{
mutex_lock(&init_lock);
if (atomic64_dec_if_positive(&refcnt))
diff --git a/src/ratelimiter.h b/src/ratelimiter.h
index 83e4203..0325d10 100644
--- a/src/ratelimiter.h
+++ b/src/ratelimiter.h
@@ -8,12 +8,12 @@
#include <linux/skbuff.h>
-int ratelimiter_init(void);
-void ratelimiter_uninit(void);
-bool ratelimiter_allow(struct sk_buff *skb, struct net *net);
+int wg_ratelimiter_init(void);
+void wg_ratelimiter_uninit(void);
+bool wg_ratelimiter_allow(struct sk_buff *skb, struct net *net);
#ifdef DEBUG
-bool ratelimiter_selftest(void);
+bool wg_ratelimiter_selftest(void);
#endif
#endif /* _WG_RATELIMITER_H */
diff --git a/src/receive.c b/src/receive.c
index 6a27bdd..8872a6b 100644
--- a/src/receive.c
+++ b/src/receive.c
@@ -57,7 +57,7 @@ static int skb_prepare_header(struct sk_buff *skb, struct wireguard_device *wg)
size_t data_offset, data_len, header_len;
struct udphdr *udp;
- if (unlikely(skb_examine_untrusted_ip_hdr(skb) != skb->protocol ||
+ if (unlikely(wg_skb_examine_untrusted_ip_hdr(skb) != skb->protocol ||
skb_transport_header(skb) < skb->head ||
(skb_transport_header(skb) + sizeof(struct udphdr)) >
skb_tail_pointer(skb)))
@@ -112,7 +112,7 @@ static void receive_handshake_packet(struct wireguard_device *wg,
if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_HANDSHAKE_COOKIE)) {
net_dbg_skb_ratelimited("%s: Receiving cookie response from %pISpfsc\n",
wg->dev->name, skb);
- cookie_message_consume(
+ wg_cookie_message_consume(
(struct message_handshake_cookie *)skb->data, wg);
return;
}
@@ -122,9 +122,9 @@ static void receive_handshake_packet(struct wireguard_device *wg,
if (under_load)
last_under_load = ktime_get_boot_fast_ns();
else if (last_under_load)
- under_load = !has_expired(last_under_load, 1);
- mac_state = cookie_validate_packet(&wg->cookie_checker, skb,
- under_load);
+ under_load = !wg_birthdate_has_expired(last_under_load, 1);
+ mac_state = wg_cookie_validate_packet(&wg->cookie_checker, skb,
+ under_load);
if ((under_load && mac_state == VALID_MAC_WITH_COOKIE) ||
(!under_load && mac_state == VALID_MAC_BUT_NO_COOKIE))
packet_needs_cookie = false;
@@ -142,21 +142,21 @@ static void receive_handshake_packet(struct wireguard_device *wg,
(struct message_handshake_initiation *)skb->data;
if (packet_needs_cookie) {
- packet_send_handshake_cookie(wg, skb,
- message->sender_index);
+ wg_packet_send_handshake_cookie(wg, skb,
+ message->sender_index);
return;
}
- peer = noise_handshake_consume_initiation(message, wg);
+ peer = wg_noise_handshake_consume_initiation(message, wg);
if (unlikely(!peer)) {
net_dbg_skb_ratelimited("%s: Invalid handshake initiation from %pISpfsc\n",
wg->dev->name, skb);
return;
}
- socket_set_peer_endpoint_from_skb(peer, skb);
+ wg_socket_set_peer_endpoint_from_skb(peer, skb);
net_dbg_ratelimited("%s: Receiving handshake initiation from peer %llu (%pISpfsc)\n",
wg->dev->name, peer->internal_id,
&peer->endpoint.addr);
- packet_send_handshake_response(peer);
+ wg_packet_send_handshake_response(peer);
break;
}
case cpu_to_le32(MESSAGE_HANDSHAKE_RESPONSE): {
@@ -164,31 +164,31 @@ static void receive_handshake_packet(struct wireguard_device *wg,
(struct message_handshake_response *)skb->data;
if (packet_needs_cookie) {
- packet_send_handshake_cookie(wg, skb,
- message->sender_index);
+ wg_packet_send_handshake_cookie(wg, skb,
+ message->sender_index);
return;
}
- peer = noise_handshake_consume_response(message, wg);
+ peer = wg_noise_handshake_consume_response(message, wg);
if (unlikely(!peer)) {
net_dbg_skb_ratelimited("%s: Invalid handshake response from %pISpfsc\n",
wg->dev->name, skb);
return;
}
- socket_set_peer_endpoint_from_skb(peer, skb);
+ wg_socket_set_peer_endpoint_from_skb(peer, skb);
net_dbg_ratelimited("%s: Receiving handshake response from peer %llu (%pISpfsc)\n",
wg->dev->name, peer->internal_id,
&peer->endpoint.addr);
- if (noise_handshake_begin_session(&peer->handshake,
+ if (wg_noise_handshake_begin_session(&peer->handshake,
&peer->keypairs)) {
- timers_session_derived(peer);
- timers_handshake_complete(peer);
+ wg_timers_session_derived(peer);
+ wg_timers_handshake_complete(peer);
/* Calling this function will either send any existing
* packets in the queue and not send a keepalive, which
* is the best case, Or, if there's nothing in the
* queue, it will send a keepalive, in order to give
* immediate confirmation of the session.
*/
- packet_send_keepalive(peer);
+ wg_packet_send_keepalive(peer);
}
break;
}
@@ -203,12 +203,12 @@ static void receive_handshake_packet(struct wireguard_device *wg,
rx_stats(peer, skb->len);
local_bh_enable();
- timers_any_authenticated_packet_received(peer);
- timers_any_authenticated_packet_traversal(peer);
- peer_put(peer);
+ wg_timers_any_authenticated_packet_received(peer);
+ wg_timers_any_authenticated_packet_traversal(peer);
+ wg_peer_put(peer);
}
-void packet_handshake_receive_worker(struct work_struct *work)
+void wg_packet_handshake_receive_worker(struct work_struct *work)
{
struct wireguard_device *wg =
container_of(work, struct multicore_worker, work)->ptr;
@@ -233,19 +233,19 @@ static void keep_key_fresh(struct wireguard_peer *peer)
keypair = rcu_dereference_bh(peer->keypairs.current_keypair);
if (likely(keypair && keypair->sending.is_valid) &&
keypair->i_am_the_initiator &&
- unlikely(has_expired(keypair->sending.birthdate,
+ unlikely(wg_birthdate_has_expired(keypair->sending.birthdate,
REJECT_AFTER_TIME - KEEPALIVE_TIMEOUT - REKEY_TIMEOUT)))
send = true;
rcu_read_unlock_bh();
if (send) {
peer->sent_lastminute_handshake = true;
- packet_send_queued_handshake_initiation(peer, false);
+ wg_packet_send_queued_handshake_initiation(peer, false);
}
}
-static bool skb_decrypt(struct sk_buff *skb, struct noise_symmetric_key *key,
- simd_context_t *simd_context)
+static bool decrypt_packet(struct sk_buff *skb, struct noise_symmetric_key *key,
+ simd_context_t *simd_context)
{
struct scatterlist sg[MAX_SKB_FRAGS + 8];
struct sk_buff *trailer;
@@ -256,8 +256,8 @@ static bool skb_decrypt(struct sk_buff *skb, struct noise_symmetric_key *key,
return false;
if (unlikely(!key->is_valid ||
- has_expired(key->birthdate, REJECT_AFTER_TIME) ||
- key->counter.receive.counter >= REJECT_AFTER_MESSAGES)) {
+ wg_birthdate_has_expired(key->birthdate, REJECT_AFTER_TIME) ||
+ key->counter.receive.counter >= REJECT_AFTER_MESSAGES)) {
key->is_valid = false;
return false;
}
@@ -345,18 +345,18 @@ static void packet_consume_data_done(struct wireguard_peer *peer,
struct wireguard_peer *routed_peer;
unsigned int len, len_before_trim;
- socket_set_peer_endpoint(peer, endpoint);
+ wg_socket_set_peer_endpoint(peer, endpoint);
- if (unlikely(noise_received_with_keypair(&peer->keypairs,
- PACKET_CB(skb)->keypair))) {
- timers_handshake_complete(peer);
- packet_send_staged_packets(peer);
+ if (unlikely(wg_noise_received_with_keypair(&peer->keypairs,
+ PACKET_CB(skb)->keypair))) {
+ wg_timers_handshake_complete(peer);
+ wg_packet_send_staged_packets(peer);
}
keep_key_fresh(peer);
- timers_any_authenticated_packet_received(peer);
- timers_any_authenticated_packet_traversal(peer);
+ wg_timers_any_authenticated_packet_received(peer);
+ wg_timers_any_authenticated_packet_traversal(peer);
/* A packet with length 0 is a keepalive packet */
if (unlikely(!skb->len)) {
@@ -367,7 +367,7 @@ static void packet_consume_data_done(struct wireguard_peer *peer,
goto packet_processed;
}
- timers_data_received(peer);
+ wg_timers_data_received(peer);
if (unlikely(skb_network_header(skb) < skb->head))
goto dishonest_packet_size;
@@ -379,7 +379,7 @@ static void packet_consume_data_done(struct wireguard_peer *peer,
skb->dev = dev;
skb->ip_summed = CHECKSUM_UNNECESSARY;
- skb->protocol = skb_examine_untrusted_ip_hdr(skb);
+ skb->protocol = wg_skb_examine_untrusted_ip_hdr(skb);
if (skb->protocol == htons(ETH_P_IP)) {
len = ntohs(ip_hdr(skb)->tot_len);
if (unlikely(len < sizeof(struct iphdr)))
@@ -400,8 +400,9 @@ static void packet_consume_data_done(struct wireguard_peer *peer,
if (unlikely(pskb_trim(skb, len)))
goto packet_processed;
- routed_peer = allowedips_lookup_src(&peer->device->peer_allowedips, skb);
- peer_put(routed_peer); /* We don't need the extra reference. */
+ routed_peer = wg_allowedips_lookup_src(&peer->device->peer_allowedips,
+ skb);
+ wg_peer_put(routed_peer); /* We don't need the extra reference. */
if (unlikely(routed_peer != peer))
goto dishonest_packet_peer;
@@ -438,7 +439,7 @@ packet_processed:
dev_kfree_skb(skb);
}
-int packet_rx_poll(struct napi_struct *napi, int budget)
+int wg_packet_rx_poll(struct napi_struct *napi, int budget)
{
struct wireguard_peer *peer =
container_of(napi, struct wireguard_peer, napi);
@@ -473,16 +474,16 @@ int packet_rx_poll(struct napi_struct *napi, int budget)
goto next;
}
- if (unlikely(socket_endpoint_from_skb(&endpoint, skb)))
+ if (unlikely(wg_socket_endpoint_from_skb(&endpoint, skb)))
goto next;
- skb_reset(skb);
+ wg_reset_packet(skb);
packet_consume_data_done(peer, skb, &endpoint);
free = false;
next:
- noise_keypair_put(keypair, false);
- peer_put(peer);
+ wg_noise_keypair_put(keypair, false);
+ wg_peer_put(peer);
if (unlikely(free))
dev_kfree_skb(skb);
@@ -496,7 +497,7 @@ int packet_rx_poll(struct napi_struct *napi, int budget)
return work_done;
}
-void packet_decrypt_worker(struct work_struct *work)
+void wg_packet_decrypt_worker(struct work_struct *work)
{
struct crypt_queue *queue =
container_of(work, struct multicore_worker, work)->ptr;
@@ -505,20 +506,20 @@ void packet_decrypt_worker(struct work_struct *work)
simd_get(&simd_context);
while ((skb = ptr_ring_consume_bh(&queue->ring)) != NULL) {
- enum packet_state state = likely(skb_decrypt(skb,
+ enum packet_state state = likely(decrypt_packet(skb,
&PACKET_CB(skb)->keypair->receiving,
&simd_context)) ?
PACKET_STATE_CRYPTED : PACKET_STATE_DEAD;
- queue_enqueue_per_peer_napi(&PACKET_PEER(skb)->rx_queue, skb,
- state);
+ wg_queue_enqueue_per_peer_napi(&PACKET_PEER(skb)->rx_queue, skb,
+ state);
simd_relax(&simd_context);
}
simd_put(&simd_context);
}
-static void packet_consume_data(struct wireguard_device *wg,
- struct sk_buff *skb)
+static void wg_packet_consume_data(struct wireguard_device *wg,
+ struct sk_buff *skb)
{
__le32 idx = ((struct message_data *)skb->data)->key_idx;
struct wireguard_peer *peer = NULL;
@@ -526,34 +527,34 @@ static void packet_consume_data(struct wireguard_device *wg,
rcu_read_lock_bh();
PACKET_CB(skb)->keypair =
- (struct noise_keypair *)index_hashtable_lookup(
+ (struct noise_keypair *)wg_index_hashtable_lookup(
&wg->index_hashtable, INDEX_HASHTABLE_KEYPAIR, idx,
&peer);
- if (unlikely(!noise_keypair_get(PACKET_CB(skb)->keypair)))
+ if (unlikely(!wg_noise_keypair_get(PACKET_CB(skb)->keypair)))
goto err_keypair;
if (unlikely(peer->is_dead))
goto err;
- ret = queue_enqueue_per_device_and_peer(&wg->decrypt_queue,
- &peer->rx_queue, skb,
- wg->packet_crypt_wq,
- &wg->decrypt_queue.last_cpu);
+ ret = wg_queue_enqueue_per_device_and_peer(&wg->decrypt_queue,
+ &peer->rx_queue, skb,
+ wg->packet_crypt_wq,
+ &wg->decrypt_queue.last_cpu);
if (unlikely(ret == -EPIPE))
- queue_enqueue_per_peer(&peer->rx_queue, skb, PACKET_STATE_DEAD);
+ wg_queue_enqueue_per_peer(&peer->rx_queue, skb, PACKET_STATE_DEAD);
if (likely(!ret || ret == -EPIPE)) {
rcu_read_unlock_bh();
return;
}
err:
- noise_keypair_put(PACKET_CB(skb)->keypair, false);
+ wg_noise_keypair_put(PACKET_CB(skb)->keypair, false);
err_keypair:
rcu_read_unlock_bh();
- peer_put(peer);
+ wg_peer_put(peer);
dev_kfree_skb(skb);
}
-void packet_receive(struct wireguard_device *wg, struct sk_buff *skb)
+void wg_packet_receive(struct wireguard_device *wg, struct sk_buff *skb)
{
if (unlikely(skb_prepare_header(skb, wg) < 0))
goto err;
@@ -574,14 +575,14 @@ void packet_receive(struct wireguard_device *wg, struct sk_buff *skb)
/* Queues up a call to packet_process_queued_handshake_
* packets(skb):
*/
- cpu = cpumask_next_online(&wg->incoming_handshake_cpu);
+ cpu = wg_cpumask_next_online(&wg->incoming_handshake_cpu);
queue_work_on(cpu, wg->handshake_receive_wq,
&per_cpu_ptr(wg->incoming_handshakes_worker, cpu)->work);
break;
}
case cpu_to_le32(MESSAGE_DATA):
PACKET_CB(skb)->ds = ip_tunnel_get_dsfield(ip_hdr(skb), skb);
- packet_consume_data(wg, skb);
+ wg_packet_consume_data(wg, skb);
break;
default:
net_dbg_skb_ratelimited("%s: Invalid packet from %pISpfsc\n",
diff --git a/src/selftest/allowedips.h b/src/selftest/allowedips.h
index ca5f256..95f247e 100644
--- a/src/selftest/allowedips.h
+++ b/src/selftest/allowedips.h
@@ -176,7 +176,8 @@ static __init int
horrible_allowedips_insert_v4(struct horrible_allowedips *table,
struct in_addr *ip, uint8_t cidr, void *value)
{
- struct horrible_allowedips_node *node = kzalloc(sizeof(*node), GFP_KERNEL);
+ struct horrible_allowedips_node *node = kzalloc(sizeof(*node),
+ GFP_KERNEL);
if (unlikely(!node))
return -ENOMEM;
@@ -192,7 +193,8 @@ static __init int
horrible_allowedips_insert_v6(struct horrible_allowedips *table,
struct in6_addr *ip, uint8_t cidr, void *value)
{
- struct horrible_allowedips_node *node = kzalloc(sizeof(*node), GFP_KERNEL);
+ struct horrible_allowedips_node *node = kzalloc(sizeof(*node),
+ GFP_KERNEL);
if (unlikely(!node))
return -ENOMEM;
@@ -251,7 +253,7 @@ static __init bool randomized_test(void)
mutex_init(&mutex);
- allowedips_init(&t);
+ wg_allowedips_init(&t);
horrible_allowedips_init(&h);
peers = kcalloc(NUM_PEERS, sizeof(*peers), GFP_KERNEL);
@@ -274,8 +276,8 @@ static __init bool randomized_test(void)
prandom_bytes(ip, 4);
cidr = prandom_u32_max(32) + 1;
peer = peers[prandom_u32_max(NUM_PEERS)];
- if (allowedips_insert_v4(&t, (struct in_addr *)ip, cidr, peer,
- &mutex) < 0) {
+ if (wg_allowedips_insert_v4(&t, (struct in_addr *)ip, cidr,
+ peer, &mutex) < 0) {
pr_info("allowedips random self-test: out of memory\n");
goto free;
}
@@ -300,8 +302,9 @@ static __init bool randomized_test(void)
prandom_u32_max(256));
cidr = prandom_u32_max(32) + 1;
peer = peers[prandom_u32_max(NUM_PEERS)];
- if (allowedips_insert_v4(&t, (struct in_addr *)mutated,
- cidr, peer, &mutex) < 0) {
+ if (wg_allowedips_insert_v4(&t,
+ (struct in_addr *)mutated,
+ cidr, peer, &mutex) < 0) {
pr_info("allowedips random self-test: out of memory\n");
goto free;
}
@@ -317,8 +320,8 @@ static __init bool randomized_test(void)
prandom_bytes(ip, 16);
cidr = prandom_u32_max(128) + 1;
peer = peers[prandom_u32_max(NUM_PEERS)];
- if (allowedips_insert_v6(&t, (struct in6_addr *)ip, cidr, peer,
- &mutex) < 0) {
+ if (wg_allowedips_insert_v6(&t, (struct in6_addr *)ip, cidr,
+ peer, &mutex) < 0) {
pr_info("allowedips random self-test: out of memory\n");
goto free;
}
@@ -343,8 +346,9 @@ static __init bool randomized_test(void)
prandom_u32_max(256));
cidr = prandom_u32_max(128) + 1;
peer = peers[prandom_u32_max(NUM_PEERS)];
- if (allowedips_insert_v6(&t, (struct in6_addr *)mutated,
- cidr, peer, &mutex) < 0) {
+ if (wg_allowedips_insert_v6(&t,
+ (struct in6_addr *)mutated,
+ cidr, peer, &mutex) < 0) {
pr_info("allowedips random self-test: out of memory\n");
goto free;
}
@@ -385,7 +389,7 @@ static __init bool randomized_test(void)
free:
mutex_lock(&mutex);
- allowedips_free(&t, &mutex);
+ wg_allowedips_free(&t, &mutex);
mutex_unlock(&mutex);
horrible_allowedips_free(&h);
if (peers) {
@@ -462,9 +466,9 @@ static __init int walk_callback(void *ctx, const u8 *ip, u8 cidr, int family)
kref_init(&name->refcount); \
} while (0)
-#define insert(version, mem, ipa, ipb, ipc, ipd, cidr) \
- allowedips_insert_v##version(&t, ip##version(ipa, ipb, ipc, ipd), \
- cidr, mem, &mutex)
+#define insert(version, mem, ipa, ipb, ipc, ipd, cidr) \
+ wg_allowedips_insert_v##version(&t, ip##version(ipa, ipb, ipc, ipd), \
+ cidr, mem, &mutex)
#define maybe_fail() do { \
++i; \
@@ -491,7 +495,7 @@ static __init int walk_callback(void *ctx, const u8 *ip, u8 cidr, int family)
maybe_fail(); \
} while (0)
-bool __init allowedips_selftest(void)
+bool __init wg_allowedips_selftest(void)
{
struct wireguard_peer *a = NULL, *b = NULL, *c = NULL, *d = NULL,
*e = NULL, *f = NULL, *g = NULL, *h = NULL;
@@ -513,7 +517,7 @@ bool __init allowedips_selftest(void)
mutex_init(&mutex);
mutex_lock(&mutex);
- allowedips_init(&t);
+ wg_allowedips_init(&t);
init_peer(a);
init_peer(b);
init_peer(c);
@@ -592,37 +596,39 @@ bool __init allowedips_selftest(void)
insert(4, a, 128, 0, 0, 0, 32);
insert(4, a, 192, 0, 0, 0, 32);
insert(4, a, 255, 0, 0, 0, 32);
- allowedips_remove_by_peer(&t, a, &mutex);
+ wg_allowedips_remove_by_peer(&t, a, &mutex);
test_negative(4, a, 1, 0, 0, 0);
test_negative(4, a, 64, 0, 0, 0);
test_negative(4, a, 128, 0, 0, 0);
test_negative(4, a, 192, 0, 0, 0);
test_negative(4, a, 255, 0, 0, 0);
- allowedips_free(&t, &mutex);
- allowedips_init(&t);
+ wg_allowedips_free(&t, &mutex);
+ wg_allowedips_init(&t);
insert(4, a, 192, 168, 0, 0, 16);
insert(4, a, 192, 168, 0, 0, 24);
- allowedips_remove_by_peer(&t, a, &mutex);
+ wg_allowedips_remove_by_peer(&t, a, &mutex);
test_negative(4, a, 192, 168, 0, 1);
- /* These will hit the WARN_ON(len >= 128) in free_node if something goes wrong. */
+ /* These will hit the WARN_ON(len >= 128) in free_node if something
+ * goes wrong.
+ */
for (i = 0; i < 128; ++i) {
part = cpu_to_be64(~(1LLU << (i % 64)));
memset(&ip, 0xff, 16);
memcpy((u8 *)&ip + (i < 64) * 8, &part, 8);
- allowedips_insert_v6(&t, &ip, 128, a, &mutex);
+ wg_allowedips_insert_v6(&t, &ip, 128, a, &mutex);
}
- allowedips_free(&t, &mutex);
+ wg_allowedips_free(&t, &mutex);
- allowedips_init(&t);
+ wg_allowedips_init(&t);
insert(4, a, 192, 95, 5, 93, 27);
insert(6, a, 0x26075300, 0x60006b00, 0, 0xc05f0543, 128);
insert(4, a, 10, 1, 0, 20, 29);
insert(6, a, 0x26075300, 0x6d8a6bf8, 0xdab1f1df, 0xc05f1523, 83);
insert(6, a, 0x26075300, 0x6d8a6bf8, 0xdab1f1df, 0xc05f1523, 21);
- allowedips_walk_by_peer(&t, cursor, a, walk_callback, &wctx, &mutex);
+ wg_allowedips_walk_by_peer(&t, cursor, a, walk_callback, &wctx, &mutex);
test_boolean(wctx.count == 5);
test_boolean(wctx.found_a);
test_boolean(wctx.found_b);
@@ -640,7 +646,7 @@ bool __init allowedips_selftest(void)
pr_info("allowedips self-tests: pass\n");
free:
- allowedips_free(&t, &mutex);
+ wg_allowedips_free(&t, &mutex);
kfree(a);
kfree(b);
kfree(c);
diff --git a/src/selftest/counter.h b/src/selftest/counter.h
index 2a0e5a5..1b78eb6 100644
--- a/src/selftest/counter.h
+++ b/src/selftest/counter.h
@@ -4,7 +4,7 @@
*/
#ifdef DEBUG
-bool __init packet_counter_selftest(void)
+bool __init wg_packet_counter_selftest(void)
{
unsigned int test_num = 0, i;
union noise_counter counter;
diff --git a/src/selftest/ratelimiter.h b/src/selftest/ratelimiter.h
index e0c65ea..cf94407 100644
--- a/src/selftest/ratelimiter.h
+++ b/src/selftest/ratelimiter.h
@@ -30,7 +30,7 @@ static __init unsigned int maximum_jiffies_at_index(int index)
return msecs_to_jiffies(total_msecs);
}
-bool __init ratelimiter_selftest(void)
+bool __init wg_ratelimiter_selftest(void)
{
int i, test = 0, tries = 0, ret = false;
unsigned long loop_start_time;
@@ -47,17 +47,17 @@ bool __init ratelimiter_selftest(void)
BUILD_BUG_ON(MSEC_PER_SEC % PACKETS_PER_SECOND != 0);
- if (ratelimiter_init())
+ if (wg_ratelimiter_init())
goto out;
++test;
- if (ratelimiter_init()) {
- ratelimiter_uninit();
+ if (wg_ratelimiter_init()) {
+ wg_ratelimiter_uninit();
goto out;
}
++test;
- if (ratelimiter_init()) {
- ratelimiter_uninit();
- ratelimiter_uninit();
+ if (wg_ratelimiter_init()) {
+ wg_ratelimiter_uninit();
+ wg_ratelimiter_uninit();
goto out;
}
++test;
@@ -104,13 +104,13 @@ restart:
msleep(expected_results[i].msec_to_sleep_before);
ensure_time;
- if (ratelimiter_allow(skb4, &init_net) !=
+ if (wg_ratelimiter_allow(skb4, &init_net) !=
expected_results[i].result)
goto err;
++test;
hdr4->saddr = htonl(ntohl(hdr4->saddr) + i + 1);
ensure_time;
- if (!ratelimiter_allow(skb4, &init_net))
+ if (!wg_ratelimiter_allow(skb4, &init_net))
goto err;
++test;
hdr4->saddr = htonl(ntohl(hdr4->saddr) - i - 1);
@@ -119,14 +119,14 @@ restart:
hdr6->saddr.in6_u.u6_addr32[2] =
hdr6->saddr.in6_u.u6_addr32[3] = htonl(i);
ensure_time;
- if (ratelimiter_allow(skb6, &init_net) !=
+ if (wg_ratelimiter_allow(skb6, &init_net) !=
expected_results[i].result)
goto err;
++test;
hdr6->saddr.in6_u.u6_addr32[0] =
htonl(ntohl(hdr6->saddr.in6_u.u6_addr32[0]) + i + 1);
ensure_time;
- if (!ratelimiter_allow(skb6, &init_net))
+ if (!wg_ratelimiter_allow(skb6, &init_net))
goto err;
++test;
hdr6->saddr.in6_u.u6_addr32[0] =
@@ -146,7 +146,8 @@ restart2:
for (i = 0; i <= max_entries; ++i) {
hdr4->saddr = htonl(i);
- if (ratelimiter_allow(skb4, &init_net) != (i != max_entries)) {
+ if (wg_ratelimiter_allow(skb4, &init_net) !=
+ (i != max_entries)) {
if (++tries < 5000)
goto restart2;
goto err;
@@ -162,11 +163,11 @@ err:
kfree_skb(skb6);
#endif
err_nofree:
- ratelimiter_uninit();
- ratelimiter_uninit();
- ratelimiter_uninit();
+ wg_ratelimiter_uninit();
+ wg_ratelimiter_uninit();
+ wg_ratelimiter_uninit();
/* Uninit one extra time to check underflow detection. */
- ratelimiter_uninit();
+ wg_ratelimiter_uninit();
out:
if (ret)
pr_info("ratelimiter self-tests: pass\n");
diff --git a/src/send.c b/src/send.c
index 5dde5a3..d4bf1a0 100644
--- a/src/send.c
+++ b/src/send.c
@@ -23,7 +23,7 @@ static void packet_send_handshake_initiation(struct wireguard_peer *peer)
{
struct message_handshake_initiation packet;
- if (!has_expired(atomic64_read(&peer->last_sent_handshake),
+ if (!wg_birthdate_has_expired(atomic64_read(&peer->last_sent_handshake),
REKEY_TIMEOUT))
return; /* This function is rate limited. */
@@ -32,29 +32,29 @@ static void packet_send_handshake_initiation(struct wireguard_peer *peer)
peer->device->dev->name, peer->internal_id,
&peer->endpoint.addr);
- if (noise_handshake_create_initiation(&packet, &peer->handshake)) {
- cookie_add_mac_to_packet(&packet, sizeof(packet), peer);
- timers_any_authenticated_packet_traversal(peer);
- timers_any_authenticated_packet_sent(peer);
+ if (wg_noise_handshake_create_initiation(&packet, &peer->handshake)) {
+ wg_cookie_add_mac_to_packet(&packet, sizeof(packet), peer);
+ wg_timers_any_authenticated_packet_traversal(peer);
+ wg_timers_any_authenticated_packet_sent(peer);
atomic64_set(&peer->last_sent_handshake,
ktime_get_boot_fast_ns());
- socket_send_buffer_to_peer(peer, &packet, sizeof(packet),
- HANDSHAKE_DSCP);
- timers_handshake_initiated(peer);
+ wg_socket_send_buffer_to_peer(peer, &packet, sizeof(packet),
+ HANDSHAKE_DSCP);
+ wg_timers_handshake_initiated(peer);
}
}
-void packet_handshake_send_worker(struct work_struct *work)
+void wg_packet_handshake_send_worker(struct work_struct *work)
{
struct wireguard_peer *peer = container_of(work, struct wireguard_peer,
transmit_handshake_work);
packet_send_handshake_initiation(peer);
- peer_put(peer);
+ wg_peer_put(peer);
}
-void packet_send_queued_handshake_initiation(struct wireguard_peer *peer,
- bool is_retry)
+void wg_packet_send_queued_handshake_initiation(struct wireguard_peer *peer,
+ bool is_retry)
{
if (!is_retry)
peer->timer_handshake_attempts = 0;
@@ -64,11 +64,11 @@ void packet_send_queued_handshake_initiation(struct wireguard_peer *peer,
* we're queueing up, so that we don't queue things if not strictly
* necessary:
*/
- if (!has_expired(atomic64_read(&peer->last_sent_handshake),
- REKEY_TIMEOUT) || unlikely(peer->is_dead))
+ if (!wg_birthdate_has_expired(atomic64_read(&peer->last_sent_handshake),
+ REKEY_TIMEOUT) || unlikely(peer->is_dead))
goto out;
- peer_get(peer);
+ wg_peer_get(peer);
/* Queues up calling packet_send_queued_handshakes(peer), where we do a
* peer_put(peer) after:
*/
@@ -77,12 +77,12 @@ void packet_send_queued_handshake_initiation(struct wireguard_peer *peer,
/* If the work was already queued, we want to drop the
* extra reference:
*/
- peer_put(peer);
+ wg_peer_put(peer);
out:
rcu_read_unlock_bh();
}
-void packet_send_handshake_response(struct wireguard_peer *peer)
+void wg_packet_send_handshake_response(struct wireguard_peer *peer)
{
struct message_handshake_response packet;
@@ -91,34 +91,34 @@ void packet_send_handshake_response(struct wireguard_peer *peer)
peer->device->dev->name, peer->internal_id,
&peer->endpoint.addr);
- if (noise_handshake_create_response(&packet, &peer->handshake)) {
- cookie_add_mac_to_packet(&packet, sizeof(packet), peer);
- if (noise_handshake_begin_session(&peer->handshake,
- &peer->keypairs)) {
- timers_session_derived(peer);
- timers_any_authenticated_packet_traversal(peer);
- timers_any_authenticated_packet_sent(peer);
+ if (wg_noise_handshake_create_response(&packet, &peer->handshake)) {
+ wg_cookie_add_mac_to_packet(&packet, sizeof(packet), peer);
+ if (wg_noise_handshake_begin_session(&peer->handshake,
+ &peer->keypairs)) {
+ wg_timers_session_derived(peer);
+ wg_timers_any_authenticated_packet_traversal(peer);
+ wg_timers_any_authenticated_packet_sent(peer);
atomic64_set(&peer->last_sent_handshake,
ktime_get_boot_fast_ns());
- socket_send_buffer_to_peer(peer, &packet,
- sizeof(packet),
- HANDSHAKE_DSCP);
+ wg_socket_send_buffer_to_peer(peer, &packet,
+ sizeof(packet),
+ HANDSHAKE_DSCP);
}
}
}
-void packet_send_handshake_cookie(struct wireguard_device *wg,
- struct sk_buff *initiating_skb,
- __le32 sender_index)
+void wg_packet_send_handshake_cookie(struct wireguard_device *wg,
+ struct sk_buff *initiating_skb,
+ __le32 sender_index)
{
struct message_handshake_cookie packet;
net_dbg_skb_ratelimited("%s: Sending cookie response for denied handshake message for %pISpfsc\n",
wg->dev->name, initiating_skb);
- cookie_message_create(&packet, initiating_skb, sender_index,
- &wg->cookie_checker);
- socket_send_buffer_as_reply_to_skb(wg, initiating_skb, &packet,
- sizeof(packet));
+ wg_cookie_message_create(&packet, initiating_skb, sender_index,
+ &wg->cookie_checker);
+ wg_socket_send_buffer_as_reply_to_skb(wg, initiating_skb, &packet,
+ sizeof(packet));
}
static void keep_key_fresh(struct wireguard_peer *peer)
@@ -132,13 +132,13 @@ static void keep_key_fresh(struct wireguard_peer *peer)
(unlikely(atomic64_read(&keypair->sending.counter.counter) >
REKEY_AFTER_MESSAGES) ||
(keypair->i_am_the_initiator &&
- unlikely(has_expired(keypair->sending.birthdate,
- REKEY_AFTER_TIME)))))
+ unlikely(wg_birthdate_has_expired(keypair->sending.birthdate,
+ REKEY_AFTER_TIME)))))
send = true;
rcu_read_unlock_bh();
if (send)
- packet_send_queued_handshake_initiation(peer, false);
+ wg_packet_send_queued_handshake_initiation(peer, false);
}
static unsigned int skb_padding(struct sk_buff *skb)
@@ -156,7 +156,7 @@ static unsigned int skb_padding(struct sk_buff *skb)
return padded_size - last_unit;
}
-static bool skb_encrypt(struct sk_buff *skb, struct noise_keypair *keypair,
+static bool encrypt_packet(struct sk_buff *skb, struct noise_keypair *keypair,
simd_context_t *simd_context)
{
unsigned int padding_len, plaintext_len, trailer_len;
@@ -212,7 +212,7 @@ static bool skb_encrypt(struct sk_buff *skb, struct noise_keypair *keypair,
keypair->sending.key, simd_context);
}
-void packet_send_keepalive(struct wireguard_peer *peer)
+void wg_packet_send_keepalive(struct wireguard_peer *peer)
{
struct sk_buff *skb;
@@ -230,7 +230,7 @@ void packet_send_keepalive(struct wireguard_peer *peer)
&peer->endpoint.addr);
}
- packet_send_staged_packets(peer);
+ wg_packet_send_staged_packets(peer);
}
#define skb_walk_null_queue_safe(first, skb, next) \
@@ -250,22 +250,22 @@ static void packet_create_data_done(struct sk_buff *first,
struct sk_buff *skb, *next;
bool is_keepalive, data_sent = false;
- timers_any_authenticated_packet_traversal(peer);
- timers_any_authenticated_packet_sent(peer);
+ wg_timers_any_authenticated_packet_traversal(peer);
+ wg_timers_any_authenticated_packet_sent(peer);
skb_walk_null_queue_safe (first, skb, next) {
is_keepalive = skb->len == message_data_len(0);
- if (likely(!socket_send_skb_to_peer(peer, skb,
+ if (likely(!wg_socket_send_skb_to_peer(peer, skb,
PACKET_CB(skb)->ds) && !is_keepalive))
data_sent = true;
}
if (likely(data_sent))
- timers_data_sent(peer);
+ wg_timers_data_sent(peer);
keep_key_fresh(peer);
}
-void packet_tx_worker(struct work_struct *work)
+void wg_packet_tx_worker(struct work_struct *work)
{
struct crypt_queue *queue =
container_of(work, struct crypt_queue, work);
@@ -286,12 +286,12 @@ void packet_tx_worker(struct work_struct *work)
else
skb_free_null_queue(first);
- noise_keypair_put(keypair, false);
- peer_put(peer);
+ wg_noise_keypair_put(keypair, false);
+ wg_peer_put(peer);
}
}
-void packet_encrypt_worker(struct work_struct *work)
+void wg_packet_encrypt_worker(struct work_struct *work)
{
struct crypt_queue *queue =
container_of(work, struct multicore_worker, work)->ptr;
@@ -303,16 +303,16 @@ void packet_encrypt_worker(struct work_struct *work)
enum packet_state state = PACKET_STATE_CRYPTED;
skb_walk_null_queue_safe (first, skb, next) {
- if (likely(skb_encrypt(skb, PACKET_CB(first)->keypair,
+ if (likely(encrypt_packet(skb, PACKET_CB(first)->keypair,
&simd_context)))
- skb_reset(skb);
+ wg_reset_packet(skb);
else {
state = PACKET_STATE_DEAD;
break;
}
}
- queue_enqueue_per_peer(&PACKET_PEER(first)->tx_queue, first,
- state);
+ wg_queue_enqueue_per_peer(&PACKET_PEER(first)->tx_queue, first,
+ state);
simd_relax(&simd_context);
}
@@ -329,23 +329,23 @@ static void packet_create_data(struct sk_buff *first)
if (unlikely(peer->is_dead))
goto err;
- ret = queue_enqueue_per_device_and_peer(&wg->encrypt_queue,
- &peer->tx_queue, first,
- wg->packet_crypt_wq,
- &wg->encrypt_queue.last_cpu);
+ ret = wg_queue_enqueue_per_device_and_peer(&wg->encrypt_queue,
+ &peer->tx_queue, first,
+ wg->packet_crypt_wq,
+ &wg->encrypt_queue.last_cpu);
if (unlikely(ret == -EPIPE))
- queue_enqueue_per_peer(&peer->tx_queue, first,
- PACKET_STATE_DEAD);
+ wg_queue_enqueue_per_peer(&peer->tx_queue, first,
+ PACKET_STATE_DEAD);
err:
rcu_read_unlock_bh();
if (likely(!ret || ret == -EPIPE))
return;
- noise_keypair_put(PACKET_CB(first)->keypair, false);
- peer_put(peer);
+ wg_noise_keypair_put(PACKET_CB(first)->keypair, false);
+ wg_peer_put(peer);
skb_free_null_queue(first);
}
-void packet_send_staged_packets(struct wireguard_peer *peer)
+void wg_packet_send_staged_packets(struct wireguard_peer *peer)
{
struct noise_symmetric_key *key;
struct noise_keypair *keypair;
@@ -362,7 +362,7 @@ void packet_send_staged_packets(struct wireguard_peer *peer)
/* First we make sure we have a valid reference to a valid key. */
rcu_read_lock_bh();
- keypair = noise_keypair_get(
+ keypair = wg_noise_keypair_get(
rcu_dereference_bh(peer->keypairs.current_keypair));
rcu_read_unlock_bh();
if (unlikely(!keypair))
@@ -370,7 +370,8 @@ void packet_send_staged_packets(struct wireguard_peer *peer)
key = &keypair->sending;
if (unlikely(!key->is_valid))
goto out_nokey;
- if (unlikely(has_expired(key->birthdate, REJECT_AFTER_TIME)))
+ if (unlikely(wg_birthdate_has_expired(
+ key->birthdate, REJECT_AFTER_TIME)))
goto out_invalid;
/* After we know we have a somewhat valid key, we now try to assign
@@ -389,7 +390,7 @@ void packet_send_staged_packets(struct wireguard_peer *peer)
}
packets.prev->next = NULL;
- peer_get(keypair->entry.peer);
+ wg_peer_get(keypair->entry.peer);
PACKET_CB(packets.next)->keypair = keypair;
packet_create_data(packets.next);
return;
@@ -397,7 +398,7 @@ void packet_send_staged_packets(struct wireguard_peer *peer)
out_invalid:
key->is_valid = false;
out_nokey:
- noise_keypair_put(keypair, false);
+ wg_noise_keypair_put(keypair, false);
/* We orphan the packets if we're waiting on a handshake, so that they
* don't block a socket's pool.
@@ -416,5 +417,5 @@ out_nokey:
/* If we're exiting because there's something wrong with the key, it
* means we should initiate a new handshake.
*/
- packet_send_queued_handshake_initiation(peer, false);
+ wg_packet_send_queued_handshake_initiation(peer, false);
}
diff --git a/src/socket.c b/src/socket.c
index e87fc4c..8e9adfd 100644
--- a/src/socket.c
+++ b/src/socket.c
@@ -172,8 +172,8 @@ out:
#endif
}
-int socket_send_skb_to_peer(struct wireguard_peer *peer, struct sk_buff *skb,
- u8 ds)
+int wg_socket_send_skb_to_peer(struct wireguard_peer *peer, struct sk_buff *skb,
+ u8 ds)
{
size_t skb_len = skb->len;
int ret = -EAFNOSUPPORT;
@@ -194,8 +194,8 @@ int socket_send_skb_to_peer(struct wireguard_peer *peer, struct sk_buff *skb,
return ret;
}
-int socket_send_buffer_to_peer(struct wireguard_peer *peer, void *buffer,
- size_t len, u8 ds)
+int wg_socket_send_buffer_to_peer(struct wireguard_peer *peer, void *buffer,
+ size_t len, u8 ds)
{
struct sk_buff *skb = alloc_skb(len + SKB_HEADER_LEN, GFP_ATOMIC);
@@ -205,12 +205,12 @@ int socket_send_buffer_to_peer(struct wireguard_peer *peer, void *buffer,
skb_reserve(skb, SKB_HEADER_LEN);
skb_set_inner_network_header(skb, 0);
skb_put_data(skb, buffer, len);
- return socket_send_skb_to_peer(peer, skb, ds);
+ return wg_socket_send_skb_to_peer(peer, skb, ds);
}
-int socket_send_buffer_as_reply_to_skb(struct wireguard_device *wg,
- struct sk_buff *in_skb, void *buffer,
- size_t len)
+int wg_socket_send_buffer_as_reply_to_skb(struct wireguard_device *wg,
+ struct sk_buff *in_skb, void *buffer,
+ size_t len)
{
int ret = 0;
struct sk_buff *skb;
@@ -218,7 +218,7 @@ int socket_send_buffer_as_reply_to_skb(struct wireguard_device *wg,
if (unlikely(!in_skb))
return -EINVAL;
- ret = socket_endpoint_from_skb(&endpoint, in_skb);
+ ret = wg_socket_endpoint_from_skb(&endpoint, in_skb);
if (unlikely(ret < 0))
return ret;
@@ -240,8 +240,8 @@ int socket_send_buffer_as_reply_to_skb(struct wireguard_device *wg,
return ret;
}
-int socket_endpoint_from_skb(struct endpoint *endpoint,
- const struct sk_buff *skb)
+int wg_socket_endpoint_from_skb(struct endpoint *endpoint,
+ const struct sk_buff *skb)
{
memset(endpoint, 0, sizeof(*endpoint));
if (skb->protocol == htons(ETH_P_IP)) {
@@ -277,8 +277,8 @@ static bool endpoint_eq(const struct endpoint *a, const struct endpoint *b)
unlikely(!a->addr.sa_family && !b->addr.sa_family);
}
-void socket_set_peer_endpoint(struct wireguard_peer *peer,
- const struct endpoint *endpoint)
+void wg_socket_set_peer_endpoint(struct wireguard_peer *peer,
+ const struct endpoint *endpoint)
{
/* First we check unlocked, in order to optimize, since it's pretty rare
* that an endpoint will change. If we happen to be mid-write, and two
@@ -302,16 +302,16 @@ out:
write_unlock_bh(&peer->endpoint_lock);
}
-void socket_set_peer_endpoint_from_skb(struct wireguard_peer *peer,
- const struct sk_buff *skb)
+void wg_socket_set_peer_endpoint_from_skb(struct wireguard_peer *peer,
+ const struct sk_buff *skb)
{
struct endpoint endpoint;
- if (!socket_endpoint_from_skb(&endpoint, skb))
- socket_set_peer_endpoint(peer, &endpoint);
+ if (!wg_socket_endpoint_from_skb(&endpoint, skb))
+ wg_socket_set_peer_endpoint(peer, &endpoint);
}
-void socket_clear_peer_endpoint_src(struct wireguard_peer *peer)
+void wg_socket_clear_peer_endpoint_src(struct wireguard_peer *peer)
{
write_lock_bh(&peer->endpoint_lock);
memset(&peer->endpoint.src6, 0, sizeof(peer->endpoint.src6));
@@ -328,7 +328,7 @@ static int receive(struct sock *sk, struct sk_buff *skb)
wg = sk->sk_user_data;
if (unlikely(!wg))
goto err;
- packet_receive(wg, skb);
+ wg_packet_receive(wg, skb);
return 0;
err:
@@ -351,7 +351,7 @@ static void set_sock_opts(struct socket *sock)
sk_set_memalloc(sock->sk);
}
-int socket_init(struct wireguard_device *wg, u16 port)
+int wg_socket_init(struct wireguard_device *wg, u16 port)
{
int ret;
struct udp_tunnel_sock_cfg cfg = {
@@ -406,12 +406,12 @@ retry:
}
#endif
- socket_reinit(wg, new4 ? new4->sk : NULL, new6 ? new6->sk : NULL);
+ wg_socket_reinit(wg, new4 ? new4->sk : NULL, new6 ? new6->sk : NULL);
return 0;
}
-void socket_reinit(struct wireguard_device *wg, struct sock *new4,
- struct sock *new6)
+void wg_socket_reinit(struct wireguard_device *wg, struct sock *new4,
+ struct sock *new6)
{
struct sock *old4, *old6;
diff --git a/src/socket.h b/src/socket.h
index 4978499..ee5eb15 100644
--- a/src/socket.h
+++ b/src/socket.h
@@ -11,29 +11,29 @@
#include <linux/if_vlan.h>
#include <linux/if_ether.h>
-int socket_init(struct wireguard_device *wg, u16 port);
-void socket_reinit(struct wireguard_device *wg, struct sock *new4,
- struct sock *new6);
-int socket_send_buffer_to_peer(struct wireguard_peer *peer, void *data,
- size_t len, u8 ds);
-int socket_send_skb_to_peer(struct wireguard_peer *peer, struct sk_buff *skb,
- u8 ds);
-int socket_send_buffer_as_reply_to_skb(struct wireguard_device *wg,
- struct sk_buff *in_skb, void *out_buffer,
- size_t len);
+int wg_socket_init(struct wireguard_device *wg, u16 port);
+void wg_socket_reinit(struct wireguard_device *wg, struct sock *new4,
+ struct sock *new6);
+int wg_socket_send_buffer_to_peer(struct wireguard_peer *peer, void *data,
+ size_t len, u8 ds);
+int wg_socket_send_skb_to_peer(struct wireguard_peer *peer, struct sk_buff *skb,
+ u8 ds);
+int wg_socket_send_buffer_as_reply_to_skb(struct wireguard_device *wg,
+ struct sk_buff *in_skb,
+ void *out_buffer, size_t len);
-int socket_endpoint_from_skb(struct endpoint *endpoint,
- const struct sk_buff *skb);
-void socket_set_peer_endpoint(struct wireguard_peer *peer,
- const struct endpoint *endpoint);
-void socket_set_peer_endpoint_from_skb(struct wireguard_peer *peer,
- const struct sk_buff *skb);
-void socket_clear_peer_endpoint_src(struct wireguard_peer *peer);
+int wg_socket_endpoint_from_skb(struct endpoint *endpoint,
+ const struct sk_buff *skb);
+void wg_socket_set_peer_endpoint(struct wireguard_peer *peer,
+ const struct endpoint *endpoint);
+void wg_socket_set_peer_endpoint_from_skb(struct wireguard_peer *peer,
+ const struct sk_buff *skb);
+void wg_socket_clear_peer_endpoint_src(struct wireguard_peer *peer);
#if defined(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG)
#define net_dbg_skb_ratelimited(fmt, dev, skb, ...) do { \
struct endpoint __endpoint; \
- socket_endpoint_from_skb(&__endpoint, skb); \
+ wg_socket_endpoint_from_skb(&__endpoint, skb); \
net_dbg_ratelimited(fmt, dev, &__endpoint.addr, \
##__VA_ARGS__); \
} while (0)
diff --git a/src/timers.c b/src/timers.c
index fdc8c38..f8cd5c5 100644
--- a/src/timers.c
+++ b/src/timers.c
@@ -29,7 +29,7 @@
#define peer_get_from_timer(timer_name) \
struct wireguard_peer *peer; \
rcu_read_lock_bh(); \
- peer = peer_get_maybe_zero(from_timer(peer, timer, timer_name)); \
+ peer = wg_peer_get_maybe_zero(from_timer(peer, timer, timer_name)); \
rcu_read_unlock_bh(); \
if (unlikely(!peer)) \
return;
@@ -84,24 +84,24 @@ static void expired_retransmit_handshake(struct timer_list *timer)
/* We clear the endpoint address src address, in case this is
* the cause of trouble.
*/
- socket_clear_peer_endpoint_src(peer);
+ wg_socket_clear_peer_endpoint_src(peer);
- packet_send_queued_handshake_initiation(peer, true);
+ wg_packet_send_queued_handshake_initiation(peer, true);
}
- peer_put(peer);
+ wg_peer_put(peer);
}
static void expired_send_keepalive(struct timer_list *timer)
{
peer_get_from_timer(timer_send_keepalive);
- packet_send_keepalive(peer);
+ wg_packet_send_keepalive(peer);
if (peer->timer_need_another_keepalive) {
peer->timer_need_another_keepalive = false;
mod_peer_timer(peer, &peer->timer_send_keepalive,
jiffies + KEEPALIVE_TIMEOUT * HZ);
}
- peer_put(peer);
+ wg_peer_put(peer);
}
static void expired_new_handshake(struct timer_list *timer)
@@ -114,9 +114,9 @@ static void expired_new_handshake(struct timer_list *timer)
/* We clear the endpoint address src address, in case this is the cause
* of trouble.
*/
- socket_clear_peer_endpoint_src(peer);
- packet_send_queued_handshake_initiation(peer, false);
- peer_put(peer);
+ wg_socket_clear_peer_endpoint_src(peer);
+ wg_packet_send_queued_handshake_initiation(peer, false);
+ wg_peer_put(peer);
}
static void expired_zero_key_material(struct timer_list *timer)
@@ -129,7 +129,7 @@ static void expired_zero_key_material(struct timer_list *timer)
if (!queue_work(peer->device->handshake_send_wq,
&peer->clear_peer_work))
/* If the work was already on the queue, we want to drop the extra reference */
- peer_put(peer);
+ wg_peer_put(peer);
}
rcu_read_unlock_bh();
}
@@ -141,9 +141,9 @@ static void queued_expired_zero_key_material(struct work_struct *work)
pr_debug("%s: Zeroing out all keys for peer %llu (%pISpfsc), since we haven't received a new one in %d seconds\n",
peer->device->dev->name, peer->internal_id,
&peer->endpoint.addr, REJECT_AFTER_TIME * 3);
- noise_handshake_clear(&peer->handshake);
- noise_keypairs_clear(&peer->keypairs);
- peer_put(peer);
+ wg_noise_handshake_clear(&peer->handshake);
+ wg_noise_keypairs_clear(&peer->keypairs);
+ wg_peer_put(peer);
}
static void expired_send_persistent_keepalive(struct timer_list *timer)
@@ -151,12 +151,12 @@ static void expired_send_persistent_keepalive(struct timer_list *timer)
peer_get_from_timer(timer_persistent_keepalive);
if (likely(peer->persistent_keepalive_interval))
- packet_send_keepalive(peer);
- peer_put(peer);
+ wg_packet_send_keepalive(peer);
+ wg_peer_put(peer);
}
/* Should be called after an authenticated data packet is sent. */
-void timers_data_sent(struct wireguard_peer *peer)
+void wg_timers_data_sent(struct wireguard_peer *peer)
{
if (!timer_pending(&peer->timer_new_handshake))
mod_peer_timer(peer, &peer->timer_new_handshake,
@@ -164,7 +164,7 @@ void timers_data_sent(struct wireguard_peer *peer)
}
/* Should be called after an authenticated data packet is received. */
-void timers_data_received(struct wireguard_peer *peer)
+void wg_timers_data_received(struct wireguard_peer *peer)
{
if (likely(netif_running(peer->device->dev))) {
if (!timer_pending(&peer->timer_send_keepalive))
@@ -178,7 +178,7 @@ void timers_data_received(struct wireguard_peer *peer)
/* Should be called after any type of authenticated packet is sent, whether
* keepalive, data, or handshake.
*/
-void timers_any_authenticated_packet_sent(struct wireguard_peer *peer)
+void wg_timers_any_authenticated_packet_sent(struct wireguard_peer *peer)
{
del_peer_timer(peer, &peer->timer_send_keepalive);
}
@@ -186,13 +186,13 @@ void timers_any_authenticated_packet_sent(struct wireguard_peer *peer)
/* Should be called after any type of authenticated packet is received, whether
* keepalive, data, or handshake.
*/
-void timers_any_authenticated_packet_received(struct wireguard_peer *peer)
+void wg_timers_any_authenticated_packet_received(struct wireguard_peer *peer)
{
del_peer_timer(peer, &peer->timer_new_handshake);
}
/* Should be called after a handshake initiation message is sent. */
-void timers_handshake_initiated(struct wireguard_peer *peer)
+void wg_timers_handshake_initiated(struct wireguard_peer *peer)
{
mod_peer_timer(
peer, &peer->timer_retransmit_handshake,
@@ -203,7 +203,7 @@ void timers_handshake_initiated(struct wireguard_peer *peer)
/* Should be called after a handshake response message is received and processed
* or when getting key confirmation via the first data message.
*/
-void timers_handshake_complete(struct wireguard_peer *peer)
+void wg_timers_handshake_complete(struct wireguard_peer *peer)
{
del_peer_timer(peer, &peer->timer_retransmit_handshake);
peer->timer_handshake_attempts = 0;
@@ -214,7 +214,7 @@ void timers_handshake_complete(struct wireguard_peer *peer)
/* Should be called after an ephemeral key is created, which is before sending a
* handshake response or after receiving a handshake response.
*/
-void timers_session_derived(struct wireguard_peer *peer)
+void wg_timers_session_derived(struct wireguard_peer *peer)
{
mod_peer_timer(peer, &peer->timer_zero_key_material,
jiffies + REJECT_AFTER_TIME * 3 * HZ);
@@ -223,14 +223,14 @@ void timers_session_derived(struct wireguard_peer *peer)
/* Should be called before a packet with authentication, whether
* keepalive, data, or handshakem is sent, or after one is received.
*/
-void timers_any_authenticated_packet_traversal(struct wireguard_peer *peer)
+void wg_timers_any_authenticated_packet_traversal(struct wireguard_peer *peer)
{
if (peer->persistent_keepalive_interval)
mod_peer_timer(peer, &peer->timer_persistent_keepalive,
jiffies + peer->persistent_keepalive_interval * HZ);
}
-void timers_init(struct wireguard_peer *peer)
+void wg_timers_init(struct wireguard_peer *peer)
{
timer_setup(&peer->timer_retransmit_handshake,
expired_retransmit_handshake, 0);
@@ -245,7 +245,7 @@ void timers_init(struct wireguard_peer *peer)
peer->timer_need_another_keepalive = false;
}
-void timers_stop(struct wireguard_peer *peer)
+void wg_timers_stop(struct wireguard_peer *peer)
{
del_timer_sync(&peer->timer_retransmit_handshake);
del_timer_sync(&peer->timer_send_keepalive);
diff --git a/src/timers.h b/src/timers.h
index 50014d2..eef4248 100644
--- a/src/timers.h
+++ b/src/timers.h
@@ -10,18 +10,19 @@
struct wireguard_peer;
-void timers_init(struct wireguard_peer *peer);
-void timers_stop(struct wireguard_peer *peer);
-void timers_data_sent(struct wireguard_peer *peer);
-void timers_data_received(struct wireguard_peer *peer);
-void timers_any_authenticated_packet_sent(struct wireguard_peer *peer);
-void timers_any_authenticated_packet_received(struct wireguard_peer *peer);
-void timers_handshake_initiated(struct wireguard_peer *peer);
-void timers_handshake_complete(struct wireguard_peer *peer);
-void timers_session_derived(struct wireguard_peer *peer);
-void timers_any_authenticated_packet_traversal(struct wireguard_peer *peer);
+void wg_timers_init(struct wireguard_peer *peer);
+void wg_timers_stop(struct wireguard_peer *peer);
+void wg_timers_data_sent(struct wireguard_peer *peer);
+void wg_timers_data_received(struct wireguard_peer *peer);
+void wg_timers_any_authenticated_packet_sent(struct wireguard_peer *peer);
+void wg_timers_any_authenticated_packet_received(struct wireguard_peer *peer);
+void wg_timers_handshake_initiated(struct wireguard_peer *peer);
+void wg_timers_handshake_complete(struct wireguard_peer *peer);
+void wg_timers_session_derived(struct wireguard_peer *peer);
+void wg_timers_any_authenticated_packet_traversal(struct wireguard_peer *peer);
-static inline bool has_expired(u64 birthday_nanoseconds, u64 expiration_seconds)
+static inline bool wg_birthdate_has_expired(u64 birthday_nanoseconds,
+ u64 expiration_seconds)
{
return (s64)(birthday_nanoseconds + expiration_seconds * NSEC_PER_SEC)
<= (s64)ktime_get_boot_fast_ns();