diff options
-rw-r--r-- | src/queueing.h | 12 |
1 files changed, 12 insertions, 0 deletions
diff --git a/src/queueing.h b/src/queueing.h index 4967eef..52cac40 100644 --- a/src/queueing.h +++ b/src/queueing.h @@ -121,8 +121,14 @@ static inline int queue_enqueue_per_device_and_peer(struct crypt_queue *device_q int cpu; atomic_set(&PACKET_CB(skb)->state, PACKET_STATE_UNCRYPTED); + /* We first queue this up for the peer ingestion, but the consumer + * will wait for the state to change to CRYPTED or DEAD before. + */ if (unlikely(ptr_ring_produce_bh(&peer_queue->ring, skb))) return -ENOSPC; + /* Then we queue it up in the device queue, which consumes the + * packet as soon as it can. + */ cpu = cpumask_next_online(next_cpu); if (unlikely(ptr_ring_produce_bh(&device_queue->ring, skb))) return -EPIPE; @@ -132,6 +138,9 @@ static inline int queue_enqueue_per_device_and_peer(struct crypt_queue *device_q static inline void queue_enqueue_per_peer(struct crypt_queue *queue, struct sk_buff *skb, enum packet_state state) { + /* We take a reference, because as soon as we call atomic_set, the + * peer can be freed from below us. + */ struct wireguard_peer *peer = peer_get(PACKET_PEER(skb)); atomic_set(&PACKET_CB(skb)->state, state); queue_work_on(cpumask_choose_online(&peer->serial_work_cpu, peer->internal_id), peer->device->packet_crypt_wq, &queue->work); @@ -140,6 +149,9 @@ static inline void queue_enqueue_per_peer(struct crypt_queue *queue, struct sk_b static inline void queue_enqueue_per_peer_napi(struct crypt_queue *queue, struct sk_buff *skb, enum packet_state state) { + /* We take a reference, because as soon as we call atomic_set, the + * peer can be freed from below us. + */ struct wireguard_peer *peer = peer_get(PACKET_PEER(skb)); atomic_set(&PACKET_CB(skb)->state, state); napi_schedule(&peer->napi); |