summaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--src/cookie.c2
-rw-r--r--src/device.c8
-rw-r--r--src/hashtables.c20
-rw-r--r--src/messages.h2
-rw-r--r--src/netlink.c14
-rw-r--r--src/noise.c7
-rw-r--r--src/noise.h3
-rw-r--r--src/peer.c4
-rw-r--r--src/queueing.c2
-rw-r--r--src/queueing.h7
-rw-r--r--src/ratelimiter.c6
-rw-r--r--src/receive.c1
-rw-r--r--src/selftest/allowedips.c69
-rw-r--r--src/selftest/counter.c3
-rw-r--r--src/send.c21
-rw-r--r--src/socket.c4
-rw-r--r--src/timers.c1
17 files changed, 91 insertions, 83 deletions
diff --git a/src/cookie.c b/src/cookie.c
index 2aa4cae..d4a6cf2 100644
--- a/src/cookie.c
+++ b/src/cookie.c
@@ -169,7 +169,7 @@ void wg_cookie_add_mac_to_packet(void *message, size_t len,
down_read(&peer->latest_cookie.lock);
if (peer->latest_cookie.is_valid &&
!wg_birthdate_has_expired(peer->latest_cookie.birthdate,
- COOKIE_SECRET_MAX_AGE - COOKIE_SECRET_LATENCY))
+ COOKIE_SECRET_MAX_AGE - COOKIE_SECRET_LATENCY))
compute_mac2(macs->mac2, message, len,
peer->latest_cookie.cookie);
else
diff --git a/src/device.c b/src/device.c
index 3a98c86..2641c18 100644
--- a/src/device.c
+++ b/src/device.c
@@ -57,7 +57,7 @@ static int wg_open(struct net_device *dev)
if (ret < 0)
return ret;
mutex_lock(&wg->device_update_lock);
- list_for_each_entry (peer, &wg->peer_list, peer_list) {
+ list_for_each_entry(peer, &wg->peer_list, peer_list) {
wg_packet_send_staged_packets(peer);
if (peer->persistent_keepalive_interval)
wg_packet_send_keepalive(peer);
@@ -77,9 +77,9 @@ static int wg_pm_notification(struct notifier_block *nb, unsigned long action,
return 0;
rtnl_lock();
- list_for_each_entry (wg, &device_list, device_list) {
+ list_for_each_entry(wg, &device_list, device_list) {
mutex_lock(&wg->device_update_lock);
- list_for_each_entry (peer, &wg->peer_list, peer_list) {
+ list_for_each_entry(peer, &wg->peer_list, peer_list) {
wg_noise_handshake_clear(&peer->handshake);
wg_noise_keypairs_clear(&peer->keypairs);
if (peer->timers_enabled)
@@ -100,7 +100,7 @@ static int wg_stop(struct net_device *dev)
struct wg_peer *peer;
mutex_lock(&wg->device_update_lock);
- list_for_each_entry (peer, &wg->peer_list, peer_list) {
+ list_for_each_entry(peer, &wg->peer_list, peer_list) {
skb_queue_purge(&peer->staged_packet_queue);
wg_timers_stop(peer);
wg_noise_handshake_clear(&peer->handshake);
diff --git a/src/hashtables.c b/src/hashtables.c
index 86b15a0..ee55b8e 100644
--- a/src/hashtables.c
+++ b/src/hashtables.c
@@ -51,8 +51,8 @@ wg_pubkey_hashtable_lookup(struct pubkey_hashtable *table,
struct wg_peer *iter_peer, *peer = NULL;
rcu_read_lock_bh();
- hlist_for_each_entry_rcu_bh (iter_peer, pubkey_bucket(table, pubkey),
- pubkey_hash) {
+ hlist_for_each_entry_rcu_bh(iter_peer, pubkey_bucket(table, pubkey),
+ pubkey_hash) {
if (!memcmp(pubkey, iter_peer->handshake.remote_static,
NOISE_PUBLIC_KEY_LEN)) {
peer = iter_peer;
@@ -118,9 +118,9 @@ __le32 wg_index_hashtable_insert(struct index_hashtable *table,
search_unused_slot:
/* First we try to find an unused slot, randomly, while unlocked. */
entry->index = (__force __le32)get_random_u32();
- hlist_for_each_entry_rcu_bh (existing_entry,
- index_bucket(table, entry->index),
- index_hash) {
+ hlist_for_each_entry_rcu_bh(existing_entry,
+ index_bucket(table, entry->index),
+ index_hash) {
if (existing_entry->index == entry->index)
/* If it's already in use, we continue searching. */
goto search_unused_slot;
@@ -130,9 +130,9 @@ search_unused_slot:
* that nobody else stole it from us.
*/
spin_lock_bh(&table->lock);
- hlist_for_each_entry_rcu_bh (existing_entry,
- index_bucket(table, entry->index),
- index_hash) {
+ hlist_for_each_entry_rcu_bh(existing_entry,
+ index_bucket(table, entry->index),
+ index_hash) {
if (existing_entry->index == entry->index) {
spin_unlock_bh(&table->lock);
/* If it was stolen, we start over. */
@@ -189,8 +189,8 @@ wg_index_hashtable_lookup(struct index_hashtable *table,
struct index_hashtable_entry *iter_entry, *entry = NULL;
rcu_read_lock_bh();
- hlist_for_each_entry_rcu_bh (iter_entry, index_bucket(table, index),
- index_hash) {
+ hlist_for_each_entry_rcu_bh(iter_entry, index_bucket(table, index),
+ index_hash) {
if (iter_entry->index == index) {
if (likely(iter_entry->type & type_mask))
entry = iter_entry;
diff --git a/src/messages.h b/src/messages.h
index 090e6f0..c4061cb 100644
--- a/src/messages.h
+++ b/src/messages.h
@@ -22,7 +22,7 @@ enum noise_lengths {
NOISE_HASH_LEN = BLAKE2S_HASH_SIZE
};
-#define noise_encrypted_len(plain_len) (plain_len + NOISE_AUTHTAG_LEN)
+#define noise_encrypted_len(plain_len) ((plain_len) + NOISE_AUTHTAG_LEN)
enum cookie_values {
COOKIE_SECRET_MAX_AGE = 2 * 60,
diff --git a/src/netlink.c b/src/netlink.c
index 63526ab..18bebb8 100644
--- a/src/netlink.c
+++ b/src/netlink.c
@@ -248,7 +248,7 @@ static int wg_get_device_dump(struct sk_buff *skb, struct netlink_callback *cb)
}
lockdep_assert_held(&wg->device_update_lock);
peer = list_prepare_entry(last_peer_cursor, &wg->peer_list, peer_list);
- list_for_each_entry_continue (peer, &wg->peer_list, peer_list) {
+ list_for_each_entry_continue(peer, &wg->peer_list, peer_list) {
if (get_peer(peer, rt_cursor, skb)) {
done = false;
break;
@@ -302,7 +302,7 @@ static int set_port(struct wg_device *wg, u16 port)
if (wg->incoming_port == port)
return 0;
- list_for_each_entry (peer, &wg->peer_list, peer_list)
+ list_for_each_entry(peer, &wg->peer_list, peer_list)
wg_socket_clear_peer_endpoint_src(peer);
if (!netif_running(wg->dev)) {
wg->incoming_port = port;
@@ -433,7 +433,7 @@ static int set_peer(struct wg_device *wg, struct nlattr **attrs)
struct nlattr *attr, *allowedip[WGALLOWEDIP_A_MAX + 1];
int rem;
- nla_for_each_nested (attr, attrs[WGPEER_A_ALLOWEDIPS], rem) {
+ nla_for_each_nested(attr, attrs[WGPEER_A_ALLOWEDIPS], rem) {
ret = nla_parse_nested(allowedip, WGALLOWEDIP_A_MAX,
attr, allowedip_policy, NULL);
if (ret < 0)
@@ -486,7 +486,7 @@ static int wg_set_device(struct sk_buff *skb, struct genl_info *info)
struct wg_peer *peer;
wg->fwmark = nla_get_u32(info->attrs[WGDEVICE_A_FWMARK]);
- list_for_each_entry (peer, &wg->peer_list, peer_list)
+ list_for_each_entry(peer, &wg->peer_list, peer_list)
wg_socket_clear_peer_endpoint_src(peer);
}
@@ -524,8 +524,8 @@ static int wg_set_device(struct sk_buff *skb, struct genl_info *info)
down_write(&wg->static_identity.lock);
wg_noise_set_static_identity_private_key(&wg->static_identity,
private_key);
- list_for_each_entry_safe (peer, temp, &wg->peer_list,
- peer_list) {
+ list_for_each_entry_safe(peer, temp, &wg->peer_list,
+ peer_list) {
if (!wg_noise_precompute_static_static(peer))
wg_peer_remove(peer);
}
@@ -537,7 +537,7 @@ static int wg_set_device(struct sk_buff *skb, struct genl_info *info)
struct nlattr *attr, *peer[WGPEER_A_MAX + 1];
int rem;
- nla_for_each_nested (attr, info->attrs[WGDEVICE_A_PEERS], rem) {
+ nla_for_each_nested(attr, info->attrs[WGDEVICE_A_PEERS], rem) {
ret = nla_parse_nested(peer, WGPEER_A_MAX, attr,
peer_policy, NULL);
if (ret < 0)
diff --git a/src/noise.c b/src/noise.c
index 00310c2..0de09fb 100644
--- a/src/noise.c
+++ b/src/noise.c
@@ -206,8 +206,8 @@ static void add_new_keypair(struct noise_keypairs *keypairs,
next_keypair);
wg_noise_keypair_put(current_keypair, true);
} else /* If there wasn't an existing next keypair, we replace
- * the previous with the current one.
- */
+ * the previous with the current one.
+ */
rcu_assign_pointer(keypairs->previous_keypair,
current_keypair);
/* At this point we can get rid of the old previous keypair, and
@@ -292,7 +292,8 @@ static void kdf(u8 *first_dst, u8 *second_dst, u8 *third_dst, const u8 *data,
u8 secret[BLAKE2S_HASH_SIZE];
WARN_ON(IS_ENABLED(DEBUG) &&
- (first_len > BLAKE2S_HASH_SIZE || second_len > BLAKE2S_HASH_SIZE ||
+ (first_len > BLAKE2S_HASH_SIZE ||
+ second_len > BLAKE2S_HASH_SIZE ||
third_len > BLAKE2S_HASH_SIZE ||
((second_len || second_dst || third_len || third_dst) &&
(!first_len || !first_dst)) ||
diff --git a/src/noise.h b/src/noise.h
index a67fd3f..48fb3fc 100644
--- a/src/noise.h
+++ b/src/noise.h
@@ -87,7 +87,8 @@ struct noise_handshake {
__le32 remote_index;
/* Protects all members except the immutable (after noise_handshake_
- * init): remote_static, precomputed_static_static, static_identity. */
+ * init): remote_static, precomputed_static_static, static_identity.
+ */
struct rw_semaphore lock;
};
diff --git a/src/peer.c b/src/peer.c
index 9fb88b8..58ad831 100644
--- a/src/peer.c
+++ b/src/peer.c
@@ -149,6 +149,7 @@ void wg_peer_remove(struct wg_peer *peer)
static void rcu_release(struct rcu_head *rcu)
{
struct wg_peer *peer = container_of(rcu, struct wg_peer, rcu);
+
dst_cache_destroy(&peer->endpoint_cache);
wg_packet_queue_free(&peer->rx_queue, false);
wg_packet_queue_free(&peer->tx_queue, false);
@@ -158,6 +159,7 @@ static void rcu_release(struct rcu_head *rcu)
static void kref_release(struct kref *refcount)
{
struct wg_peer *peer = container_of(refcount, struct wg_peer, refcount);
+
pr_debug("%s: Peer %llu (%pISpfsc) destroyed\n",
peer->device->dev->name, peer->internal_id,
&peer->endpoint.addr);
@@ -186,6 +188,6 @@ void wg_peer_remove_all(struct wg_device *wg)
struct wg_peer *peer, *temp;
lockdep_assert_held(&wg->device_update_lock);
- list_for_each_entry_safe (peer, temp, &wg->peer_list, peer_list)
+ list_for_each_entry_safe(peer, temp, &wg->peer_list, peer_list)
wg_peer_remove(peer);
}
diff --git a/src/queueing.c b/src/queueing.c
index 939aac9..70e9394 100644
--- a/src/queueing.c
+++ b/src/queueing.c
@@ -15,7 +15,7 @@ wg_packet_alloc_percpu_multicore_worker(work_func_t function, void *ptr)
if (!worker)
return NULL;
- for_each_possible_cpu (cpu) {
+ for_each_possible_cpu(cpu) {
per_cpu_ptr(worker, cpu)->ptr = ptr;
INIT_WORK(&per_cpu_ptr(worker, cpu)->work, function);
}
diff --git a/src/queueing.h b/src/queueing.h
index 281d325..ce522d8 100644
--- a/src/queueing.h
+++ b/src/queueing.h
@@ -61,8 +61,8 @@ struct packet_cb {
u8 ds;
};
-#define PACKET_PEER(skb) (((struct packet_cb *)skb->cb)->keypair->entry.peer)
-#define PACKET_CB(skb) ((struct packet_cb *)skb->cb)
+#define PACKET_CB(skb) ((struct packet_cb *)((skb)->cb))
+#define PACKET_PEER(skb) (PACKET_CB(skb)->keypair->entry.peer)
/* Returns either the correct skb->protocol value, or 0 if invalid. */
static inline __be16 wg_skb_examine_untrusted_ip_hdr(struct sk_buff *skb)
@@ -83,6 +83,7 @@ static inline __be16 wg_skb_examine_untrusted_ip_hdr(struct sk_buff *skb)
static inline void wg_reset_packet(struct sk_buff *skb)
{
const int pfmemalloc = skb->pfmemalloc;
+
skb_scrub_packet(skb, true);
memset(&skb->headers_start, 0,
offsetof(struct sk_buff, headers_end) -
@@ -166,6 +167,7 @@ static inline void wg_queue_enqueue_per_peer(struct crypt_queue *queue,
* peer can be freed from below us.
*/
struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb));
+
atomic_set_release(&PACKET_CB(skb)->state, state);
queue_work_on(wg_cpumask_choose_online(&peer->serial_work_cpu,
peer->internal_id),
@@ -181,6 +183,7 @@ static inline void wg_queue_enqueue_per_peer_napi(struct crypt_queue *queue,
* peer can be freed from below us.
*/
struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb));
+
atomic_set_release(&PACKET_CB(skb)->state, state);
napi_schedule(&peer->napi);
wg_peer_put(peer);
diff --git a/src/ratelimiter.c b/src/ratelimiter.c
index 4e79032..a3d334b 100644
--- a/src/ratelimiter.c
+++ b/src/ratelimiter.c
@@ -62,13 +62,13 @@ static void wg_ratelimiter_gc_entries(struct work_struct *work)
for (i = 0; i < table_size; ++i) {
spin_lock(&table_lock);
- hlist_for_each_entry_safe (entry, temp, &table_v4[i], hash) {
+ hlist_for_each_entry_safe(entry, temp, &table_v4[i], hash) {
if (unlikely(!work) ||
now - entry->last_time_ns > NSEC_PER_SEC)
entry_uninit(entry);
}
#if IS_ENABLED(CONFIG_IPV6)
- hlist_for_each_entry_safe (entry, temp, &table_v6[i], hash) {
+ hlist_for_each_entry_safe(entry, temp, &table_v6[i], hash) {
if (unlikely(!work) ||
now - entry->last_time_ns > NSEC_PER_SEC)
entry_uninit(entry);
@@ -105,7 +105,7 @@ bool wg_ratelimiter_allow(struct sk_buff *skb, struct net *net)
else
return false;
rcu_read_lock();
- hlist_for_each_entry_rcu (entry, bucket, hash) {
+ hlist_for_each_entry_rcu(entry, bucket, hash) {
if (entry->net == net && entry->ip == data.ip) {
u64 now, tokens;
bool ret;
diff --git a/src/receive.c b/src/receive.c
index f3d0c40..0a48933 100644
--- a/src/receive.c
+++ b/src/receive.c
@@ -335,6 +335,7 @@ out:
spin_unlock_bh(&counter->receive.lock);
return ret;
}
+
#include "selftest/counter.c"
static void wg_packet_consume_data_done(struct wg_peer *peer,
diff --git a/src/selftest/allowedips.c b/src/selftest/allowedips.c
index 6c98486..fdedfef 100644
--- a/src/selftest/allowedips.c
+++ b/src/selftest/allowedips.c
@@ -48,6 +48,7 @@ static __init void print_node(struct allowedips_node *node, u8 bits)
}
if (node->peer) {
hsiphash_key_t key = { 0 };
+
memcpy(&key, &node->peer, sizeof(node->peer));
color = hsiphash_1u32(0xdeadbeef, &key) % 200 << 16 |
hsiphash_1u32(0xbabecafe, &key) % 200 << 8 |
@@ -93,7 +94,7 @@ struct horrible_allowedips_node {
struct hlist_node table;
union nf_inet_addr ip;
union nf_inet_addr mask;
- uint8_t ip_version;
+ u8 ip_version;
void *value;
};
@@ -107,13 +108,13 @@ static __init void horrible_allowedips_free(struct horrible_allowedips *table)
struct horrible_allowedips_node *node;
struct hlist_node *h;
- hlist_for_each_entry_safe (node, h, &table->head, table) {
+ hlist_for_each_entry_safe(node, h, &table->head, table) {
hlist_del(&node->table);
kfree(node);
}
}
-static __init inline union nf_inet_addr horrible_cidr_to_mask(uint8_t cidr)
+static __init inline union nf_inet_addr horrible_cidr_to_mask(u8 cidr)
{
union nf_inet_addr mask;
@@ -125,7 +126,7 @@ static __init inline union nf_inet_addr horrible_cidr_to_mask(uint8_t cidr)
return mask;
}
-static __init inline uint8_t horrible_mask_to_cidr(union nf_inet_addr subnet)
+static __init inline u8 horrible_mask_to_cidr(union nf_inet_addr subnet)
{
return hweight32(subnet.all[0]) + hweight32(subnet.all[1]) +
hweight32(subnet.all[2]) + hweight32(subnet.all[3]);
@@ -169,9 +170,9 @@ horrible_insert_ordered(struct horrible_allowedips *table,
struct horrible_allowedips_node *node)
{
struct horrible_allowedips_node *other = NULL, *where = NULL;
- uint8_t my_cidr = horrible_mask_to_cidr(node->mask);
+ u8 my_cidr = horrible_mask_to_cidr(node->mask);
- hlist_for_each_entry (other, &table->head, table) {
+ hlist_for_each_entry(other, &table->head, table) {
if (!memcmp(&other->mask, &node->mask,
sizeof(union nf_inet_addr)) &&
!memcmp(&other->ip, &node->ip,
@@ -195,7 +196,7 @@ horrible_insert_ordered(struct horrible_allowedips *table,
static __init int
horrible_allowedips_insert_v4(struct horrible_allowedips *table,
- struct in_addr *ip, uint8_t cidr, void *value)
+ struct in_addr *ip, u8 cidr, void *value)
{
struct horrible_allowedips_node *node = kzalloc(sizeof(*node),
GFP_KERNEL);
@@ -213,7 +214,7 @@ horrible_allowedips_insert_v4(struct horrible_allowedips *table,
static __init int
horrible_allowedips_insert_v6(struct horrible_allowedips *table,
- struct in6_addr *ip, uint8_t cidr, void *value)
+ struct in6_addr *ip, u8 cidr, void *value)
{
struct horrible_allowedips_node *node = kzalloc(sizeof(*node),
GFP_KERNEL);
@@ -236,7 +237,7 @@ horrible_allowedips_lookup_v4(struct horrible_allowedips *table,
struct horrible_allowedips_node *node;
void *ret = NULL;
- hlist_for_each_entry (node, &table->head, table) {
+ hlist_for_each_entry(node, &table->head, table) {
if (node->ip_version != 4)
continue;
if (horrible_match_v4(node, ip)) {
@@ -254,7 +255,7 @@ horrible_allowedips_lookup_v6(struct horrible_allowedips *table,
struct horrible_allowedips_node *node;
void *ret = NULL;
- hlist_for_each_entry (node, &table->head, table) {
+ hlist_for_each_entry(node, &table->head, table) {
if (node->ip_version != 6)
continue;
if (horrible_match_v6(node, ip)) {
@@ -428,6 +429,7 @@ static __init inline struct in_addr *ip4(u8 a, u8 b, u8 c, u8 d)
{
static struct in_addr ip;
u8 *split = (u8 *)&ip;
+
split[0] = a;
split[1] = b;
split[2] = c;
@@ -439,6 +441,7 @@ static __init inline struct in6_addr *ip6(u32 a, u32 b, u32 c, u32 d)
{
static struct in6_addr ip;
__be32 *split = (__be32 *)&ip;
+
split[0] = cpu_to_be32(a);
split[1] = cpu_to_be32(b);
split[2] = cpu_to_be32(c);
@@ -481,11 +484,13 @@ static __init int walk_callback(void *ctx, const u8 *ip, u8 cidr, int family)
return 0;
}
-#define init_peer(name) do { \
- name = kzalloc(sizeof(*name), GFP_KERNEL); \
- if (name) \
- kref_init(&name->refcount); \
- } while (0)
+static __init struct wg_peer *init_peer(void)
+{
+ struct wg_peer *peer = kzalloc(sizeof(*peer), GFP_KERNEL);
+ if (peer)
+ kref_init(&peer->refcount);
+ return peer;
+}
#define insert(version, mem, ipa, ipb, ipc, ipd, cidr) \
wg_allowedips_insert_v##version(&t, ip##version(ipa, ipb, ipc, ipd), \
@@ -499,16 +504,16 @@ static __init int walk_callback(void *ctx, const u8 *ip, u8 cidr, int family)
} \
} while (0)
-#define test(version, mem, ipa, ipb, ipc, ipd) do { \
- bool _s = lookup(t.root##version, version == 4 ? 32 : 128, \
- ip##version(ipa, ipb, ipc, ipd)) == mem; \
- maybe_fail(); \
+#define test(version, mem, ipa, ipb, ipc, ipd) do { \
+ bool _s = lookup(t.root##version, (version) == 4 ? 32 : 128, \
+ ip##version(ipa, ipb, ipc, ipd)) == (mem); \
+ maybe_fail(); \
} while (0)
-#define test_negative(version, mem, ipa, ipb, ipc, ipd) do { \
- bool _s = lookup(t.root##version, version == 4 ? 32 : 128, \
- ip##version(ipa, ipb, ipc, ipd)) != mem; \
- maybe_fail(); \
+#define test_negative(version, mem, ipa, ipb, ipc, ipd) do { \
+ bool _s = lookup(t.root##version, (version) == 4 ? 32 : 128, \
+ ip##version(ipa, ipb, ipc, ipd)) != (mem); \
+ maybe_fail(); \
} while (0)
#define test_boolean(cond) do { \
@@ -518,9 +523,10 @@ static __init int walk_callback(void *ctx, const u8 *ip, u8 cidr, int family)
bool __init wg_allowedips_selftest(void)
{
- struct wg_peer *a = NULL, *b = NULL, *c = NULL, *d = NULL, *e = NULL,
- *f = NULL, *g = NULL, *h = NULL;
- struct allowedips_cursor *cursor = NULL;
+ struct allowedips_cursor *cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
+ struct wg_peer *a = init_peer(), *b = init_peer(), *c = init_peer(),
+ *d = init_peer(), *e = init_peer(), *f = init_peer(),
+ *g = init_peer(), *h = init_peer();
struct walk_ctx wctx = { 0 };
bool success = false;
struct allowedips t;
@@ -531,17 +537,7 @@ bool __init wg_allowedips_selftest(void)
mutex_init(&mutex);
mutex_lock(&mutex);
-
wg_allowedips_init(&t);
- init_peer(a);
- init_peer(b);
- init_peer(c);
- init_peer(d);
- init_peer(e);
- init_peer(f);
- init_peer(g);
- init_peer(h);
- cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
if (!cursor || !a || !b || !c || !d || !e || !f || !g || !h) {
pr_err("allowedips self-test malloc: FAIL\n");
@@ -679,6 +675,7 @@ free:
return success;
}
+
#undef test_negative
#undef test
#undef remove
diff --git a/src/selftest/counter.c b/src/selftest/counter.c
index 7e14eeb..0d8b192 100644
--- a/src/selftest/counter.c
+++ b/src/selftest/counter.c
@@ -17,7 +17,7 @@ bool __init wg_packet_counter_selftest(void)
#define T_LIM (COUNTER_WINDOW_SIZE + 1)
#define T(n, v) do { \
++test_num; \
- if (counter_validate(&counter, n) != v) { \
+ if (counter_validate(&counter, n) != (v)) { \
pr_err("nonce counter self-test %u: FAIL\n", \
test_num); \
success = false; \
@@ -92,6 +92,7 @@ bool __init wg_packet_counter_selftest(void)
T(i, true);
T(0, true);
T(COUNTER_WINDOW_SIZE + 1, true);
+
#undef T
#undef T_LIM
#undef T_INIT
diff --git a/src/send.c b/src/send.c
index 81a77e0..ca6fa3e 100644
--- a/src/send.c
+++ b/src/send.c
@@ -24,7 +24,7 @@ static void wg_packet_send_handshake_initiation(struct wg_peer *peer)
struct message_handshake_initiation packet;
if (!wg_birthdate_has_expired(atomic64_read(&peer->last_sent_handshake),
- REKEY_TIMEOUT))
+ REKEY_TIMEOUT))
return; /* This function is rate limited. */
atomic64_set(&peer->last_sent_handshake, ktime_get_boot_fast_ns());
@@ -240,7 +240,7 @@ static void skb_free_null_queue(struct sk_buff *first)
{
struct sk_buff *skb, *next;
- skb_walk_null_queue_safe (first, skb, next)
+ skb_walk_null_queue_safe(first, skb, next)
dev_kfree_skb(skb);
}
@@ -252,7 +252,7 @@ static void wg_packet_create_data_done(struct sk_buff *first,
wg_timers_any_authenticated_packet_traversal(peer);
wg_timers_any_authenticated_packet_sent(peer);
- skb_walk_null_queue_safe (first, skb, next) {
+ skb_walk_null_queue_safe(first, skb, next) {
is_keepalive = skb->len == message_data_len(0);
if (likely(!wg_socket_send_skb_to_peer(peer, skb,
PACKET_CB(skb)->ds) && !is_keepalive))
@@ -302,9 +302,10 @@ void wg_packet_encrypt_worker(struct work_struct *work)
while ((first = ptr_ring_consume_bh(&queue->ring)) != NULL) {
enum packet_state state = PACKET_STATE_CRYPTED;
- skb_walk_null_queue_safe (first, skb, next) {
- if (likely(encrypt_packet(skb, PACKET_CB(first)->keypair,
- &simd_context)))
+ skb_walk_null_queue_safe(first, skb, next) {
+ if (likely(encrypt_packet(skb,
+ PACKET_CB(first)->keypair,
+ &simd_context)))
wg_reset_packet(skb);
else {
state = PACKET_STATE_DEAD;
@@ -370,8 +371,8 @@ void wg_packet_send_staged_packets(struct wg_peer *peer)
key = &keypair->sending;
if (unlikely(!key->is_valid))
goto out_nokey;
- if (unlikely(wg_birthdate_has_expired(
- key->birthdate, REJECT_AFTER_TIME)))
+ if (unlikely(wg_birthdate_has_expired(key->birthdate,
+ REJECT_AFTER_TIME)))
goto out_invalid;
/* After we know we have a somewhat valid key, we now try to assign
@@ -379,7 +380,7 @@ void wg_packet_send_staged_packets(struct wg_peer *peer)
* for all of them, we just consider it a failure and wait for the next
* handshake.
*/
- skb_queue_walk (&packets, skb) {
+ skb_queue_walk(&packets, skb) {
/* 0 for no outer TOS: no leak. TODO: at some later point, we
* might consider using flowi->tos as outer instead.
*/
@@ -404,7 +405,7 @@ out_nokey:
/* We orphan the packets if we're waiting on a handshake, so that they
* don't block a socket's pool.
*/
- skb_queue_walk (&packets, skb)
+ skb_queue_walk(&packets, skb)
skb_orphan(skb);
/* Then we put them back on the top of the queue. We're not too
* concerned about accidentally getting things a little out of order if
diff --git a/src/socket.c b/src/socket.c
index 03dcb65..7fa9fa6 100644
--- a/src/socket.c
+++ b/src/socket.c
@@ -60,8 +60,8 @@ static int send4(struct wg_device *wg, struct sk_buff *skb,
}
rt = ip_route_output_flow(sock_net(sock), &fl, sock);
if (unlikely(endpoint->src_if4 && ((IS_ERR(rt) &&
- PTR_ERR(rt) == -EINVAL) || (!IS_ERR(rt) &&
- rt->dst.dev->ifindex != endpoint->src_if4)))) {
+ PTR_ERR(rt) == -EINVAL) || (!IS_ERR(rt) &&
+ rt->dst.dev->ifindex != endpoint->src_if4)))) {
endpoint->src4.s_addr = 0;
*(__force __be32 *)&endpoint->src_if4 = 0;
fl.saddr = 0;
diff --git a/src/timers.c b/src/timers.c
index 563f03b..e6aacc5 100644
--- a/src/timers.c
+++ b/src/timers.c
@@ -133,6 +133,7 @@ static void wg_expired_zero_key_material(struct timer_list *timer)
}
rcu_read_unlock_bh();
}
+
static void wg_queued_expired_zero_key_material(struct work_struct *work)
{
struct wg_peer *peer = container_of(work, struct wg_peer,