summaryrefslogtreecommitdiffhomepage
path: root/src
diff options
context:
space:
mode:
authorJason A. Donenfeld <Jason@zx2c4.com>2017-10-05 23:39:07 +0200
committerJason A. Donenfeld <Jason@zx2c4.com>2017-10-05 23:40:19 +0200
commite47f6657edadcb38db80d62dfd1e2e2ddf0b3d78 (patch)
treee229d6ff6821d8aaff15187ec8155e1ea48772db /src
parent1dd83eb7d4546c17eecb14a5f52b34d908ee1581 (diff)
queueing: cleanup skb_padding
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
Diffstat (limited to 'src')
-rw-r--r--src/queueing.h14
-rw-r--r--src/send.c14
2 files changed, 14 insertions, 14 deletions
diff --git a/src/queueing.h b/src/queueing.h
index 62dee51..2b79639 100644
--- a/src/queueing.h
+++ b/src/queueing.h
@@ -58,19 +58,6 @@ static inline __be16 skb_examine_untrusted_ip_hdr(struct sk_buff *skb)
return 0;
}
-static inline unsigned int skb_padding(struct sk_buff *skb)
-{
- /* We do this modulo business with the MTU, just in case the networking layer
- * gives us a packet that's bigger than the MTU. Now that we support GSO, this
- * shouldn't be a real problem, and this can likely be removed. But, caution! */
- unsigned int last_unit = skb->len % skb->dev->mtu;
- unsigned int padded_size = (last_unit + MESSAGE_PADDING_MULTIPLE - 1) & ~(MESSAGE_PADDING_MULTIPLE - 1);
-
- if (padded_size > skb->dev->mtu)
- padded_size = skb->dev->mtu;
- return padded_size - last_unit;
-}
-
static inline void skb_reset(struct sk_buff *skb)
{
skb_scrub_packet(skb, false);
@@ -142,7 +129,6 @@ static inline void queue_enqueue_per_peer(struct crypt_queue *queue, struct sk_b
atomic_set(&PACKET_CB(skb)->state, state);
queue_work_on(cpumask_choose_online(&peer->serial_work_cpu, peer->internal_id), peer->device->packet_crypt_wq, &queue->work);
peer_put(peer);
-
}
#ifdef DEBUG
diff --git a/src/send.c b/src/send.c
index 0a952b8..ed19e58 100644
--- a/src/send.c
+++ b/src/send.c
@@ -105,6 +105,20 @@ static inline void keep_key_fresh(struct wireguard_peer *peer)
packet_send_queued_handshake_initiation(peer, false);
}
+static inline unsigned int skb_padding(struct sk_buff *skb)
+{
+ /* We do this modulo business with the MTU, just in case the networking layer
+ * gives us a packet that's bigger than the MTU. Since we support GSO, this
+ * isn't strictly neccessary, but it's better to be cautious here, especially
+ * if that code ever changes. */
+ unsigned int last_unit = skb->len % skb->dev->mtu;
+ unsigned int padded_size = (last_unit + MESSAGE_PADDING_MULTIPLE - 1) & ~(MESSAGE_PADDING_MULTIPLE - 1);
+
+ if (padded_size > skb->dev->mtu)
+ padded_size = skb->dev->mtu;
+ return padded_size - last_unit;
+}
+
static inline bool skb_encrypt(struct sk_buff *skb, struct noise_keypair *keypair, bool have_simd)
{
struct scatterlist sg[MAX_SKB_FRAGS * 2 + 1];