diff options
author | Jason A. Donenfeld <Jason@zx2c4.com> | 2017-04-03 05:20:25 +0200 |
---|---|---|
committer | Jason A. Donenfeld <Jason@zx2c4.com> | 2017-04-04 03:44:35 +0200 |
commit | f7d65b3268ec8f11d0fbf455ce8f7b00b0f79c47 (patch) | |
tree | e7ddc04349076e5432dd196a4472e04b8bd4811b /src/send.c | |
parent | bf71248a46a64955c53b59fe6bcbc9c35094d1c6 (diff) |
locking: always use _bh
All locks are potentially between user context and softirq,
which means we need to take the _bh variant.
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
Diffstat (limited to 'src/send.c')
-rw-r--r-- | src/send.c | 19 |
1 files changed, 9 insertions, 10 deletions
@@ -91,13 +91,13 @@ static inline void keep_key_fresh(struct wireguard_peer *peer) struct noise_keypair *keypair; bool send = false; - rcu_read_lock(); - keypair = rcu_dereference(peer->keypairs.current_keypair); + rcu_read_lock_bh(); + keypair = rcu_dereference_bh(peer->keypairs.current_keypair); if (likely(keypair && keypair->sending.is_valid) && (unlikely(atomic64_read(&keypair->sending.counter.counter) > REKEY_AFTER_MESSAGES) || (keypair->i_am_the_initiator && unlikely(time_is_before_eq_jiffies64(keypair->sending.birthdate + REKEY_AFTER_TIME))))) send = true; - rcu_read_unlock(); + rcu_read_unlock_bh(); if (send) packet_queue_handshake_initiation(peer); @@ -144,15 +144,14 @@ static void message_create_data_done(struct sk_buff_head *queue, struct wireguar void packet_send_queue(struct wireguard_peer *peer) { struct sk_buff_head queue; - unsigned long flags; peer->need_resend_queue = false; /* Steal the current queue into our local one. */ skb_queue_head_init(&queue); - spin_lock_irqsave(&peer->tx_packet_queue.lock, flags); + spin_lock_bh(&peer->tx_packet_queue.lock); skb_queue_splice_init(&peer->tx_packet_queue, &queue); - spin_unlock_irqrestore(&peer->tx_packet_queue.lock, flags); + spin_unlock_bh(&peer->tx_packet_queue.lock); if (unlikely(!skb_queue_len(&queue))) return; @@ -172,17 +171,17 @@ void packet_send_queue(struct wireguard_peer *peer) /* We stick the remaining skbs from local_queue at the top of the peer's * queue again, setting the top of local_queue to be the skb that begins * the requeueing. */ - spin_lock_irqsave(&peer->tx_packet_queue.lock, flags); + spin_lock_bh(&peer->tx_packet_queue.lock); skb_queue_splice(&queue, &peer->tx_packet_queue); - spin_unlock_irqrestore(&peer->tx_packet_queue.lock, flags); + spin_unlock_bh(&peer->tx_packet_queue.lock); break; case -ENOKEY: /* ENOKEY means that we don't have a valid session for the peer, which * means we should initiate a session, but after requeuing like above. */ - spin_lock_irqsave(&peer->tx_packet_queue.lock, flags); + spin_lock_bh(&peer->tx_packet_queue.lock); skb_queue_splice(&queue, &peer->tx_packet_queue); - spin_unlock_irqrestore(&peer->tx_packet_queue.lock, flags); + spin_unlock_bh(&peer->tx_packet_queue.lock); packet_queue_handshake_initiation(peer); break; |