summaryrefslogtreecommitdiffhomepage
path: root/src/send.c
diff options
context:
space:
mode:
authorJason A. Donenfeld <Jason@zx2c4.com>2016-10-05 02:40:34 +0200
committerJason A. Donenfeld <Jason@zx2c4.com>2016-10-05 02:40:34 +0200
commitb228ae7d5911d0bd39ec61164594ebece2f4e510 (patch)
tree9f7e9ac832c84fdc665985d969d14367e6a438d5 /src/send.c
parente86a1c29783ad598175a28959b21757199774591 (diff)
send: requeue jobs for later if padata is full
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
Diffstat (limited to 'src/send.c')
-rw-r--r--src/send.c15
1 files changed, 13 insertions, 2 deletions
diff --git a/src/send.c b/src/send.c
index a7e1d72..c8df288 100644
--- a/src/send.c
+++ b/src/send.c
@@ -159,8 +159,12 @@ static void message_create_data_done(struct sk_buff *skb, struct wireguard_peer
{
/* A packet completed successfully, so we deincrement the counter of packets
* remaining, and if we hit zero we can send it off. */
- if (atomic_dec_and_test(&PACKET_CB(skb)->bundle->count))
+ if (atomic_dec_and_test(&PACKET_CB(skb)->bundle->count)) {
send_off_bundle(PACKET_CB(skb)->bundle, peer);
+ /* We queue the remaining ones only after sending, to retain packet order. */
+ if (unlikely(peer->need_resend_queue))
+ packet_send_queue(peer);
+ }
keep_key_fresh(peer);
}
@@ -172,6 +176,8 @@ int packet_send_queue(struct wireguard_peer *peer)
unsigned long flags;
bool parallel = true;
+ peer->need_resend_queue = false;
+
/* Steal the current queue into our local one. */
skb_queue_head_init(&local_queue);
spin_lock_irqsave(&peer->tx_packet_queue.lock, flags);
@@ -222,10 +228,15 @@ int packet_send_queue(struct wireguard_peer *peer)
/* ENOKEY means that we don't have a valid session for the peer, which
* means we should initiate a session, and then requeue everything. */
ratelimit_packet_send_handshake_initiation(peer);
- /* Fall through */
+ goto requeue;
case -EBUSY:
/* EBUSY happens when the parallel workers are all filled up, in which
* case we should requeue everything. */
+
+ /* First, we mark that we should try to do this later, when existing
+ * jobs are done. */
+ peer->need_resend_queue = true;
+ requeue:
if (skb->prev) {
/* Since we're requeuing skb and everything after skb, we make
* sure that the previously successfully sent packets don't link