summaryrefslogtreecommitdiffhomepage
path: root/src
diff options
context:
space:
mode:
authorJason A. Donenfeld <Jason@zx2c4.com>2018-08-04 02:50:51 +0200
committerJason A. Donenfeld <Jason@zx2c4.com>2018-08-04 05:14:15 +0200
commitf1ccce3fccbf21422c18fdc45fc89cc5af46b290 (patch)
tree9d64d51f821c712aaf21795aa2652fa054b18de9 /src
parent875c926fc72a1aa847c215534ee49c36106d2b56 (diff)
send: switch handshake stamp to an atomic
Rather than abusing the handshake lock, we're much better off just using a boring atomic64 for this. It's simpler and performs better. Also, while we're at it, we set the handshake stamp both before and after the calculations, in case the calculations block for a really long time waiting for the RNG to initialize. Otherwise it's possible that when the RNG finally initializes, two handshakes are sent back to back, which isn't sensible. Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
Diffstat (limited to 'src')
-rw-r--r--src/device.c2
-rw-r--r--src/peer.c2
-rw-r--r--src/peer.h2
-rw-r--r--src/send.c20
4 files changed, 12 insertions, 14 deletions
diff --git a/src/device.c b/src/device.c
index fd1aa60..297125a 100644
--- a/src/device.c
+++ b/src/device.c
@@ -105,7 +105,7 @@ static int stop(struct net_device *dev)
timers_stop(peer);
noise_handshake_clear(&peer->handshake);
noise_keypairs_clear(&peer->keypairs);
- peer->last_sent_handshake = ktime_get_boot_fast_ns() - (u64)(REKEY_TIMEOUT + 1) * NSEC_PER_SEC;
+ atomic64_set(&peer->last_sent_handshake, ktime_get_boot_fast_ns() - (u64)(REKEY_TIMEOUT + 1) * NSEC_PER_SEC);
}
mutex_unlock(&wg->device_update_lock);
skb_queue_purge(&wg->incoming_handshakes);
diff --git a/src/peer.c b/src/peer.c
index 97a472a..b9703d5 100644
--- a/src/peer.c
+++ b/src/peer.c
@@ -50,7 +50,7 @@ struct wireguard_peer *peer_create(struct wireguard_device *wg, const u8 public_
rwlock_init(&peer->endpoint_lock);
kref_init(&peer->refcount);
skb_queue_head_init(&peer->staged_packet_queue);
- peer->last_sent_handshake = ktime_get_boot_fast_ns() - (u64)(REKEY_TIMEOUT + 1) * NSEC_PER_SEC;
+ atomic64_set(&peer->last_sent_handshake, ktime_get_boot_fast_ns() - (u64)(REKEY_TIMEOUT + 1) * NSEC_PER_SEC);
set_bit(NAPI_STATE_NO_BUSY_POLL, &peer->napi.state);
netif_napi_add(wg->dev, &peer->napi, packet_rx_poll, NAPI_POLL_WEIGHT);
napi_enable(&peer->napi);
diff --git a/src/peer.h b/src/peer.h
index 8daa053..29d2e00 100644
--- a/src/peer.h
+++ b/src/peer.h
@@ -43,7 +43,7 @@ struct wireguard_peer {
struct dst_cache endpoint_cache;
rwlock_t endpoint_lock;
struct noise_handshake handshake;
- u64 last_sent_handshake;
+ atomic64_t last_sent_handshake;
struct work_struct transmit_handshake_work, clear_peer_work;
struct cookie latest_cookie;
struct hlist_node pubkey_hash;
diff --git a/src/send.c b/src/send.c
index 481d153..3fc2a17 100644
--- a/src/send.c
+++ b/src/send.c
@@ -23,20 +23,17 @@ static void packet_send_handshake_initiation(struct wireguard_peer *peer)
{
struct message_handshake_initiation packet;
- down_write(&peer->handshake.lock);
- if (!has_expired(peer->last_sent_handshake, REKEY_TIMEOUT)) {
- up_write(&peer->handshake.lock);
+ if (!has_expired(atomic64_read(&peer->last_sent_handshake), REKEY_TIMEOUT))
return; /* This function is rate limited. */
- }
- peer->last_sent_handshake = ktime_get_boot_fast_ns();
- up_write(&peer->handshake.lock);
+ atomic64_set(&peer->last_sent_handshake, ktime_get_boot_fast_ns());
net_dbg_ratelimited("%s: Sending handshake initiation to peer %llu (%pISpfsc)\n", peer->device->dev->name, peer->internal_id, &peer->endpoint.addr);
if (noise_handshake_create_initiation(&packet, &peer->handshake)) {
cookie_add_mac_to_packet(&packet, sizeof(packet), peer);
timers_any_authenticated_packet_traversal(peer);
timers_any_authenticated_packet_sent(peer);
+ atomic64_set(&peer->last_sent_handshake, ktime_get_boot_fast_ns());
socket_send_buffer_to_peer(peer, &packet, sizeof(struct message_handshake_initiation), HANDSHAKE_DSCP);
timers_handshake_initiated(peer);
}
@@ -55,11 +52,11 @@ void packet_send_queued_handshake_initiation(struct wireguard_peer *peer, bool i
if (!is_retry)
peer->timer_handshake_attempts = 0;
- /* First checking the timestamp here is just an optimization; it will
- * be caught while properly locked inside the actual work queue.
- */
rcu_read_lock_bh();
- if (!has_expired(peer->last_sent_handshake, REKEY_TIMEOUT) || unlikely(peer->is_dead))
+ /* We check last_sent_handshake here in addition to the actual function we're queueing
+ * up, so that we don't queue things if not strictly necessary.
+ */
+ if (!has_expired(atomic64_read(&peer->last_sent_handshake), REKEY_TIMEOUT) || unlikely(peer->is_dead))
goto out;
peer_get(peer);
@@ -74,8 +71,8 @@ void packet_send_handshake_response(struct wireguard_peer *peer)
{
struct message_handshake_response packet;
+ atomic64_set(&peer->last_sent_handshake, ktime_get_boot_fast_ns());
net_dbg_ratelimited("%s: Sending handshake response to peer %llu (%pISpfsc)\n", peer->device->dev->name, peer->internal_id, &peer->endpoint.addr);
- peer->last_sent_handshake = ktime_get_boot_fast_ns();
if (noise_handshake_create_response(&packet, &peer->handshake)) {
cookie_add_mac_to_packet(&packet, sizeof(packet), peer);
@@ -83,6 +80,7 @@ void packet_send_handshake_response(struct wireguard_peer *peer)
timers_session_derived(peer);
timers_any_authenticated_packet_traversal(peer);
timers_any_authenticated_packet_sent(peer);
+ atomic64_set(&peer->last_sent_handshake, ktime_get_boot_fast_ns());
socket_send_buffer_to_peer(peer, &packet, sizeof(struct message_handshake_response), HANDSHAKE_DSCP);
}
}