diff options
-rw-r--r-- | src/allowedips.c | 2 | ||||
-rw-r--r-- | src/hashtables.c | 6 | ||||
-rw-r--r-- | src/receive.c | 17 | ||||
-rw-r--r-- | src/send.c | 11 | ||||
-rw-r--r-- | src/socket.c | 17 |
5 files changed, 23 insertions, 30 deletions
diff --git a/src/allowedips.c b/src/allowedips.c index fab15ad..5681c2a 100644 --- a/src/allowedips.c +++ b/src/allowedips.c @@ -220,7 +220,7 @@ retry: return peer; } -__attribute__((nonnull(1))) static inline bool +__attribute__((nonnull(1))) static bool node_placement(struct allowedips_node __rcu *trie, const u8 *key, u8 cidr, u8 bits, struct allowedips_node **rnode, struct mutex *lock) { diff --git a/src/hashtables.c b/src/hashtables.c index 4ba2288..5aaebb6 100644 --- a/src/hashtables.c +++ b/src/hashtables.c @@ -7,7 +7,7 @@ #include "peer.h" #include "noise.h" -static inline struct hlist_head *pubkey_bucket(struct pubkey_hashtable *table, +static struct hlist_head *pubkey_bucket(struct pubkey_hashtable *table, const u8 pubkey[NOISE_PUBLIC_KEY_LEN]) { /* siphash gives us a secure 64bit number based on a random key. Since @@ -64,8 +64,8 @@ pubkey_hashtable_lookup(struct pubkey_hashtable *table, return peer; } -static inline struct hlist_head *index_bucket(struct index_hashtable *table, - const __le32 index) +static struct hlist_head *index_bucket(struct index_hashtable *table, + const __le32 index) { /* Since the indices are random and thus all bits are uniformly * distributed, we can find its bucket simply by masking. diff --git a/src/receive.c b/src/receive.c index 5f5e276..4500a85 100644 --- a/src/receive.c +++ b/src/receive.c @@ -18,7 +18,7 @@ #include <net/ip_tunnels.h> /* Must be called with bh disabled. */ -static inline void rx_stats(struct wireguard_peer *peer, size_t len) +static void rx_stats(struct wireguard_peer *peer, size_t len) { struct pcpu_sw_netstats *tstats = get_cpu_ptr(peer->device->dev->tstats); @@ -33,7 +33,7 @@ static inline void rx_stats(struct wireguard_peer *peer, size_t len) #define SKB_TYPE_LE32(skb) (((struct message_header *)(skb)->data)->type) -static inline size_t validate_header_len(struct sk_buff *skb) +static size_t validate_header_len(struct sk_buff *skb) { if (unlikely(skb->len < sizeof(struct message_header))) return 0; @@ -52,8 +52,7 @@ static inline size_t validate_header_len(struct sk_buff *skb) return 0; } -static inline int skb_prepare_header(struct sk_buff *skb, - struct wireguard_device *wg) +static int skb_prepare_header(struct sk_buff *skb, struct wireguard_device *wg) { size_t data_offset, data_len, header_len; struct udphdr *udp; @@ -222,7 +221,7 @@ void packet_handshake_receive_worker(struct work_struct *work) } } -static inline void keep_key_fresh(struct wireguard_peer *peer) +static void keep_key_fresh(struct wireguard_peer *peer) { struct noise_keypair *keypair; bool send = false; @@ -245,9 +244,8 @@ static inline void keep_key_fresh(struct wireguard_peer *peer) } } -static inline bool skb_decrypt(struct sk_buff *skb, - struct noise_symmetric_key *key, - simd_context_t simd_context) +static bool skb_decrypt(struct sk_buff *skb, struct noise_symmetric_key *key, + simd_context_t simd_context) { struct scatterlist sg[MAX_SKB_FRAGS + 8]; struct sk_buff *trailer; @@ -300,8 +298,7 @@ static inline bool skb_decrypt(struct sk_buff *skb, } /* This is RFC6479, a replay detection bitmap algorithm that avoids bitshifts */ -static inline bool counter_validate(union noise_counter *counter, - u64 their_counter) +static bool counter_validate(union noise_counter *counter, u64 their_counter) { unsigned long index, index_current, top, i; bool ret = false; @@ -121,7 +121,7 @@ void packet_send_handshake_cookie(struct wireguard_device *wg, sizeof(packet)); } -static inline void keep_key_fresh(struct wireguard_peer *peer) +static void keep_key_fresh(struct wireguard_peer *peer) { struct noise_keypair *keypair; bool send = false; @@ -141,7 +141,7 @@ static inline void keep_key_fresh(struct wireguard_peer *peer) packet_send_queued_handshake_initiation(peer, false); } -static inline unsigned int skb_padding(struct sk_buff *skb) +static unsigned int skb_padding(struct sk_buff *skb) { /* We do this modulo business with the MTU, just in case the networking * layer gives us a packet that's bigger than the MTU. In that case, we @@ -156,9 +156,8 @@ static inline unsigned int skb_padding(struct sk_buff *skb) return padded_size - last_unit; } -static inline bool skb_encrypt(struct sk_buff *skb, - struct noise_keypair *keypair, - simd_context_t simd_context) +static bool skb_encrypt(struct sk_buff *skb, struct noise_keypair *keypair, + simd_context_t simd_context) { unsigned int padding_len, plaintext_len, trailer_len; struct scatterlist sg[MAX_SKB_FRAGS + 8]; @@ -237,7 +236,7 @@ void packet_send_keepalive(struct wireguard_peer *peer) #define skb_walk_null_queue_safe(first, skb, next) \ for (skb = first, next = skb->next; skb; \ skb = next, next = skb ? skb->next : NULL) -static inline void skb_free_null_queue(struct sk_buff *first) +static void skb_free_null_queue(struct sk_buff *first) { struct sk_buff *skb, *next; diff --git a/src/socket.c b/src/socket.c index 2e9e44f..da6aaa7 100644 --- a/src/socket.c +++ b/src/socket.c @@ -17,9 +17,8 @@ #include <net/udp_tunnel.h> #include <net/ipv6.h> -static inline int send4(struct wireguard_device *wg, struct sk_buff *skb, - struct endpoint *endpoint, u8 ds, - struct dst_cache *cache) +static int send4(struct wireguard_device *wg, struct sk_buff *skb, + struct endpoint *endpoint, u8 ds, struct dst_cache *cache) { struct flowi4 fl = { .saddr = endpoint->src4.s_addr, @@ -99,9 +98,8 @@ out: return ret; } -static inline int send6(struct wireguard_device *wg, struct sk_buff *skb, - struct endpoint *endpoint, u8 ds, - struct dst_cache *cache) +static int send6(struct wireguard_device *wg, struct sk_buff *skb, + struct endpoint *endpoint, u8 ds, struct dst_cache *cache) { #if IS_ENABLED(CONFIG_IPV6) struct flowi6 fl = { @@ -264,8 +262,7 @@ int socket_endpoint_from_skb(struct endpoint *endpoint, return 0; } -static inline bool endpoint_eq(const struct endpoint *a, - const struct endpoint *b) +static bool endpoint_eq(const struct endpoint *a, const struct endpoint *b) { return (a->addr.sa_family == AF_INET && b->addr.sa_family == AF_INET && a->addr4.sin_port == b->addr4.sin_port && @@ -339,7 +336,7 @@ err: return 0; } -static inline void sock_free(struct sock *sock) +static void sock_free(struct sock *sock) { if (unlikely(!sock)) return; @@ -347,7 +344,7 @@ static inline void sock_free(struct sock *sock) udp_tunnel_sock_release(sock->sk_socket); } -static inline void set_sock_opts(struct socket *sock) +static void set_sock_opts(struct socket *sock) { sock->sk->sk_allocation = GFP_ATOMIC; sock->sk->sk_sndbuf = INT_MAX; |