diff options
Diffstat (limited to 'src')
-rw-r--r-- | src/compat/compat.h | 2 | ||||
-rw-r--r-- | src/device.c | 8 | ||||
-rw-r--r-- | src/hashtables.c | 8 | ||||
-rw-r--r-- | src/netlink.c | 12 | ||||
-rw-r--r-- | src/peer.c | 2 | ||||
-rw-r--r-- | src/queueing.c | 2 | ||||
-rw-r--r-- | src/ratelimiter.c | 6 | ||||
-rw-r--r-- | src/selftest/routingtable.h | 8 | ||||
-rw-r--r-- | src/send.c | 10 |
9 files changed, 29 insertions, 29 deletions
diff --git a/src/compat/compat.h b/src/compat/compat.h index e8076db..34353ea 100644 --- a/src/compat/compat.h +++ b/src/compat/compat.h @@ -162,7 +162,7 @@ static inline void netif_keep_dst(struct net_device *dev) typeof(type) __percpu *pcpu_stats = alloc_percpu(type); \ if (pcpu_stats) { \ int __cpu; \ - for_each_possible_cpu (__cpu) { \ + for_each_possible_cpu(__cpu) { \ typeof(type) *stat; \ stat = per_cpu_ptr(pcpu_stats, __cpu); \ u64_stats_init(&stat->syncp); \ diff --git a/src/device.c b/src/device.c index cf6e2b2..8a2eb0a 100644 --- a/src/device.c +++ b/src/device.c @@ -54,7 +54,7 @@ static int open(struct net_device *dev) if (ret < 0) return ret; mutex_lock(&wg->device_update_lock); - list_for_each_entry (peer, &wg->peer_list, peer_list) { + list_for_each_entry(peer, &wg->peer_list, peer_list) { packet_send_staged_packets(peer); if (peer->persistent_keepalive_interval) packet_send_keepalive(peer); @@ -73,9 +73,9 @@ static int suspending_clear_noise_peers(struct notifier_block *nb, unsigned long return 0; rtnl_lock(); - list_for_each_entry (wg, &device_list, device_list) { + list_for_each_entry(wg, &device_list, device_list) { mutex_lock(&wg->device_update_lock); - list_for_each_entry (peer, &wg->peer_list, peer_list) { + list_for_each_entry(peer, &wg->peer_list, peer_list) { noise_handshake_clear(&peer->handshake); noise_keypairs_clear(&peer->keypairs); if (peer->timers_enabled) @@ -96,7 +96,7 @@ static int stop(struct net_device *dev) struct wireguard_peer *peer; mutex_lock(&wg->device_update_lock); - list_for_each_entry (peer, &wg->peer_list, peer_list) { + list_for_each_entry(peer, &wg->peer_list, peer_list) { skb_queue_purge(&peer->staged_packet_queue); timers_stop(peer); noise_handshake_clear(&peer->handshake); diff --git a/src/hashtables.c b/src/hashtables.c index 4a3798c..a0c0c64 100644 --- a/src/hashtables.c +++ b/src/hashtables.c @@ -38,7 +38,7 @@ struct wireguard_peer *pubkey_hashtable_lookup(struct pubkey_hashtable *table, c struct wireguard_peer *iter_peer, *peer = NULL; rcu_read_lock_bh(); - hlist_for_each_entry_rcu_bh (iter_peer, pubkey_bucket(table, pubkey), pubkey_hash) { + hlist_for_each_entry_rcu_bh(iter_peer, pubkey_bucket(table, pubkey), pubkey_hash) { if (!memcmp(pubkey, iter_peer->handshake.remote_static, NOISE_PUBLIC_KEY_LEN)) { peer = iter_peer; break; @@ -97,7 +97,7 @@ __le32 index_hashtable_insert(struct index_hashtable *table, struct index_hashta search_unused_slot: /* First we try to find an unused slot, randomly, while unlocked. */ entry->index = (__force __le32)get_random_u32(); - hlist_for_each_entry_rcu_bh (existing_entry, index_bucket(table, entry->index), index_hash) { + hlist_for_each_entry_rcu_bh(existing_entry, index_bucket(table, entry->index), index_hash) { if (existing_entry->index == entry->index) goto search_unused_slot; /* If it's already in use, we continue searching. */ } @@ -105,7 +105,7 @@ search_unused_slot: /* Once we've found an unused slot, we lock it, and then double-check * that nobody else stole it from us. */ spin_lock_bh(&table->lock); - hlist_for_each_entry_rcu_bh (existing_entry, index_bucket(table, entry->index), index_hash) { + hlist_for_each_entry_rcu_bh(existing_entry, index_bucket(table, entry->index), index_hash) { if (existing_entry->index == entry->index) { spin_unlock_bh(&table->lock); goto search_unused_slot; /* If it was stolen, we start over. */ @@ -145,7 +145,7 @@ struct index_hashtable_entry *index_hashtable_lookup(struct index_hashtable *tab struct index_hashtable_entry *iter_entry, *entry = NULL; rcu_read_lock_bh(); - hlist_for_each_entry_rcu_bh (iter_entry, index_bucket(table, index), index_hash) { + hlist_for_each_entry_rcu_bh(iter_entry, index_bucket(table, index), index_hash) { if (iter_entry->index == index) { if (likely(iter_entry->type & type_mask)) entry = iter_entry; diff --git a/src/netlink.c b/src/netlink.c index fed16dc..60c58e0 100644 --- a/src/netlink.c +++ b/src/netlink.c @@ -206,7 +206,7 @@ static int get_device_dump(struct sk_buff *skb, struct netlink_callback *cb) } lockdep_assert_held(&wg->device_update_lock); peer = list_prepare_entry(last_peer_cursor, &wg->peer_list, peer_list); - list_for_each_entry_continue (peer, &wg->peer_list, peer_list) { + list_for_each_entry_continue(peer, &wg->peer_list, peer_list) { if (get_peer(peer, peer_idx++, rt_cursor, skb)) { done = false; break; @@ -260,7 +260,7 @@ static int set_port(struct wireguard_device *wg, u16 port) return 0; socket_uninit(wg); wg->incoming_port = port; - list_for_each_entry (peer, &wg->peer_list, peer_list) + list_for_each_entry(peer, &wg->peer_list, peer_list) socket_clear_peer_endpoint_src(peer); if (!netif_running(wg->dev)) return 0; @@ -356,7 +356,7 @@ static int set_peer(struct wireguard_device *wg, struct nlattr **attrs) int rem; struct nlattr *attr, *allowedip[WGALLOWEDIP_A_MAX + 1]; - nla_for_each_nested (attr, attrs[WGPEER_A_ALLOWEDIPS], rem) { + nla_for_each_nested(attr, attrs[WGPEER_A_ALLOWEDIPS], rem) { ret = nla_parse_nested(allowedip, WGALLOWEDIP_A_MAX, attr, allowedip_policy, NULL); if (ret < 0) goto out; @@ -403,7 +403,7 @@ static int set_device(struct sk_buff *skb, struct genl_info *info) struct wireguard_peer *peer; wg->fwmark = nla_get_u32(info->attrs[WGDEVICE_A_FWMARK]); - list_for_each_entry (peer, &wg->peer_list, peer_list) + list_for_each_entry(peer, &wg->peer_list, peer_list) socket_clear_peer_endpoint_src(peer); } @@ -428,7 +428,7 @@ static int set_device(struct sk_buff *skb, struct genl_info *info) peer_remove(peer); } noise_set_static_identity_private_key(&wg->static_identity, private_key); - list_for_each_entry_safe (peer, temp, &wg->peer_list, peer_list) { + list_for_each_entry_safe(peer, temp, &wg->peer_list, peer_list) { if (!noise_precompute_static_static(peer)) peer_remove(peer); } @@ -439,7 +439,7 @@ static int set_device(struct sk_buff *skb, struct genl_info *info) int rem; struct nlattr *attr, *peer[WGPEER_A_MAX + 1]; - nla_for_each_nested (attr, info->attrs[WGDEVICE_A_PEERS], rem) { + nla_for_each_nested(attr, info->attrs[WGDEVICE_A_PEERS], rem) { ret = nla_parse_nested(peer, WGPEER_A_MAX, attr, peer_policy, NULL); if (ret < 0) goto out; @@ -126,6 +126,6 @@ void peer_remove_all(struct wireguard_device *wg) struct wireguard_peer *peer, *temp; lockdep_assert_held(&wg->device_update_lock); - list_for_each_entry_safe (peer, temp, &wg->peer_list, peer_list) + list_for_each_entry_safe(peer, temp, &wg->peer_list, peer_list) peer_remove(peer); } diff --git a/src/queueing.c b/src/queueing.c index fa50511..bce406a 100644 --- a/src/queueing.c +++ b/src/queueing.c @@ -10,7 +10,7 @@ struct multicore_worker __percpu *packet_alloc_percpu_multicore_worker(work_func if (!worker) return NULL; - for_each_possible_cpu (cpu) { + for_each_possible_cpu(cpu) { per_cpu_ptr(worker, cpu)->ptr = ptr; INIT_WORK(&per_cpu_ptr(worker, cpu)->work, function); } diff --git a/src/ratelimiter.c b/src/ratelimiter.c index 0afcdac..a9caf32 100644 --- a/src/ratelimiter.c +++ b/src/ratelimiter.c @@ -57,12 +57,12 @@ static void gc_entries(struct work_struct *work) for (i = 0; i < table_size; ++i) { spin_lock(&table_lock); - hlist_for_each_entry_safe (entry, temp, &table_v4[i], hash) { + hlist_for_each_entry_safe(entry, temp, &table_v4[i], hash) { if (unlikely(!work) || now - entry->last_time_ns > NSEC_PER_SEC) entry_uninit(entry); } #if IS_ENABLED(CONFIG_IPV6) - hlist_for_each_entry_safe (entry, temp, &table_v6[i], hash) { + hlist_for_each_entry_safe(entry, temp, &table_v6[i], hash) { if (unlikely(!work) || now - entry->last_time_ns > NSEC_PER_SEC) entry_uninit(entry); } @@ -94,7 +94,7 @@ bool ratelimiter_allow(struct sk_buff *skb, struct net *net) else return false; rcu_read_lock(); - hlist_for_each_entry_rcu (entry, bucket, hash) { + hlist_for_each_entry_rcu(entry, bucket, hash) { if (entry->net == net && entry->ip == data.ip) { u64 now, tokens; bool ret; diff --git a/src/selftest/routingtable.h b/src/selftest/routingtable.h index 473f0f9..434c6fc 100644 --- a/src/selftest/routingtable.h +++ b/src/selftest/routingtable.h @@ -65,7 +65,7 @@ static __init void horrible_routing_table_free(struct horrible_routing_table *ta { struct hlist_node *h; struct horrible_routing_table_node *node; - hlist_for_each_entry_safe (node, h, &table->head, table) { + hlist_for_each_entry_safe(node, h, &table->head, table) { hlist_del(&node->table); kfree(node); } @@ -112,7 +112,7 @@ static __init void horrible_insert_ordered(struct horrible_routing_table *table, { struct horrible_routing_table_node *other = NULL, *where = NULL; uint8_t my_cidr = horrible_mask_to_cidr(node->mask); - hlist_for_each_entry (other, &table->head, table) { + hlist_for_each_entry(other, &table->head, table) { if (!memcmp(&other->mask, &node->mask, sizeof(union nf_inet_addr)) && !memcmp(&other->ip, &node->ip, sizeof(union nf_inet_addr)) && other->ip_version == node->ip_version) { @@ -161,7 +161,7 @@ static __init void *horrible_routing_table_lookup_v4(struct horrible_routing_tab { struct horrible_routing_table_node *node; void *ret = NULL; - hlist_for_each_entry (node, &table->head, table) { + hlist_for_each_entry(node, &table->head, table) { if (node->ip_version != 4) continue; if (horrible_match_v4(node, ip)) { @@ -175,7 +175,7 @@ static __init void *horrible_routing_table_lookup_v6(struct horrible_routing_tab { struct horrible_routing_table_node *node; void *ret = NULL; - hlist_for_each_entry (node, &table->head, table) { + hlist_for_each_entry(node, &table->head, table) { if (node->ip_version != 6) continue; if (horrible_match_v6(node, ip)) { @@ -185,7 +185,7 @@ static inline void skb_free_null_queue(struct sk_buff *first) { struct sk_buff *skb, *next; - skb_walk_null_queue_safe (first, skb, next) + skb_walk_null_queue_safe(first, skb, next) dev_kfree_skb(skb); } @@ -195,7 +195,7 @@ static void packet_create_data_done(struct sk_buff *first, struct wireguard_peer bool is_keepalive, data_sent = false; timers_any_authenticated_packet_traversal(peer); - skb_walk_null_queue_safe (first, skb, next) { + skb_walk_null_queue_safe(first, skb, next) { is_keepalive = skb->len == message_data_len(0); if (likely(!socket_send_skb_to_peer(peer, skb, PACKET_CB(skb)->ds) && !is_keepalive)) data_sent = true; @@ -241,7 +241,7 @@ void packet_encrypt_worker(struct work_struct *work) while ((first = ptr_ring_consume_bh(&queue->ring)) != NULL) { enum packet_state state = PACKET_STATE_CRYPTED; - skb_walk_null_queue_safe (first, skb, next) { + skb_walk_null_queue_safe(first, skb, next) { if (likely(skb_encrypt(skb, PACKET_CB(first)->keypair, have_simd))) skb_reset(skb); else { @@ -303,7 +303,7 @@ void packet_send_staged_packets(struct wireguard_peer *peer) /* After we know we have a somewhat valid key, we now try to assign nonces to * all of the packets in the queue. If we can't assign nonces for all of them, * we just consider it a failure and wait for the next handshake. */ - skb_queue_walk (&packets, skb) { + skb_queue_walk(&packets, skb) { PACKET_CB(skb)->ds = ip_tunnel_ecn_encap(0 /* No outer TOS: no leak. TODO: should we use flowi->tos as outer? */, ip_hdr(skb), skb); PACKET_CB(skb)->nonce = atomic64_inc_return(&key->counter.counter) - 1; if (unlikely(PACKET_CB(skb)->nonce >= REJECT_AFTER_MESSAGES)) @@ -323,7 +323,7 @@ out_nokey: /* We orphan the packets if we're waiting on a handshake, so that they * don't block a socket's pool. */ - skb_queue_walk (&packets, skb) + skb_queue_walk(&packets, skb) skb_orphan(skb); /* Then we put them back on the top of the queue. We're not too concerned about * accidently getting things a little out of order if packets are being added |