summaryrefslogtreecommitdiffhomepage
path: root/src/send.c
diff options
context:
space:
mode:
authorJason A. Donenfeld <Jason@zx2c4.com>2020-02-14 18:36:19 +0100
committerJason A. Donenfeld <Jason@zx2c4.com>2020-02-14 18:36:19 +0100
commitccf83958942334da31a6688a994796b049594c76 (patch)
tree767e2857b320bf79c348f45da5c751dcc3723d62 /src/send.c
parent994376d9d0c798c864a209ae3068502fbac8d77c (diff)
send: cleanup skb padding calculation
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
Diffstat (limited to 'src/send.c')
-rw-r--r--src/send.c17
1 files changed, 11 insertions, 6 deletions
diff --git a/src/send.c b/src/send.c
index db5efd2..3c3f04e 100644
--- a/src/send.c
+++ b/src/send.c
@@ -144,17 +144,22 @@ static void keep_key_fresh(struct wg_peer *peer)
static unsigned int calculate_skb_padding(struct sk_buff *skb)
{
+ unsigned int padded_size, last_unit = skb->len;
+
+ if (unlikely(!PACKET_CB(skb)->mtu))
+ return -last_unit % MESSAGE_PADDING_MULTIPLE;
+
/* We do this modulo business with the MTU, just in case the networking
* layer gives us a packet that's bigger than the MTU. In that case, we
* wouldn't want the final subtraction to overflow in the case of the
- * padded_size being clamped.
+ * padded_size being clamped. Fortunately, that's very rarely the case,
+ * so we optimize for that not happening.
*/
- unsigned int last_unit = PACKET_CB(skb)->mtu ?
- skb->len % PACKET_CB(skb)->mtu : skb->len;
- unsigned int padded_size = ALIGN(last_unit, MESSAGE_PADDING_MULTIPLE);
+ if (unlikely(last_unit > PACKET_CB(skb)->mtu))
+ last_unit %= PACKET_CB(skb)->mtu;
- if (padded_size > PACKET_CB(skb)->mtu)
- padded_size = PACKET_CB(skb)->mtu;
+ padded_size = min(PACKET_CB(skb)->mtu,
+ ALIGN(last_unit, MESSAGE_PADDING_MULTIPLE));
return padded_size - last_unit;
}