summaryrefslogtreecommitdiffhomepage
path: root/src/receive.c
diff options
context:
space:
mode:
authorJason A. Donenfeld <Jason@zx2c4.com>2018-06-16 02:23:42 +0200
committerJason A. Donenfeld <Jason@zx2c4.com>2018-06-16 04:40:16 +0200
commit8186537ff6272a5533e4dcf4b1b428d9328bed44 (patch)
tree92471a58662137a7122f38fd8ea0c7148dabda42 /src/receive.c
parenta63830c6bbbf251765a5caee4111835048665f23 (diff)
queueing: re-enable preemption periodically to lower latency
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
Diffstat (limited to 'src/receive.c')
-rw-r--r--src/receive.c12
1 files changed, 12 insertions, 0 deletions
diff --git a/src/receive.c b/src/receive.c
index 99d6b19..f33941b 100644
--- a/src/receive.c
+++ b/src/receive.c
@@ -404,6 +404,12 @@ next:
peer_put(peer);
if (unlikely(free))
dev_kfree_skb(skb);
+
+ /* Don't totally kill scheduling latency by keeping preemption disabled forever. */
+ if (need_resched()) {
+ local_bh_enable();
+ local_bh_disable();
+ }
}
local_bh_enable();
}
@@ -418,6 +424,12 @@ void packet_decrypt_worker(struct work_struct *work)
enum packet_state state = likely(skb_decrypt(skb, &PACKET_CB(skb)->keypair->receiving, have_simd)) ? PACKET_STATE_CRYPTED : PACKET_STATE_DEAD;
queue_enqueue_per_peer(&PACKET_PEER(skb)->rx_queue, skb, state);
+
+ /* Don't totally kill scheduling latency by keeping preemption disabled forever. */
+ if (have_simd && need_resched()) {
+ chacha20poly1305_deinit_simd(have_simd);
+ have_simd = chacha20poly1305_init_simd();
+ }
}
chacha20poly1305_deinit_simd(have_simd);