diff options
Diffstat (limited to 'src/send.c')
-rw-r--r-- | src/send.c | 203 |
1 files changed, 136 insertions, 67 deletions
@@ -23,46 +23,63 @@ static void packet_send_handshake_initiation(struct wireguard_peer *peer) { struct message_handshake_initiation packet; - if (!has_expired(atomic64_read(&peer->last_sent_handshake), REKEY_TIMEOUT)) + if (!has_expired(atomic64_read(&peer->last_sent_handshake), + REKEY_TIMEOUT)) return; /* This function is rate limited. */ atomic64_set(&peer->last_sent_handshake, ktime_get_boot_fast_ns()); - net_dbg_ratelimited("%s: Sending handshake initiation to peer %llu (%pISpfsc)\n", peer->device->dev->name, peer->internal_id, &peer->endpoint.addr); + net_dbg_ratelimited("%s: Sending handshake initiation to peer %llu (%pISpfsc)\n", + peer->device->dev->name, peer->internal_id, + &peer->endpoint.addr); if (noise_handshake_create_initiation(&packet, &peer->handshake)) { cookie_add_mac_to_packet(&packet, sizeof(packet), peer); timers_any_authenticated_packet_traversal(peer); timers_any_authenticated_packet_sent(peer); - atomic64_set(&peer->last_sent_handshake, ktime_get_boot_fast_ns()); - socket_send_buffer_to_peer(peer, &packet, sizeof(struct message_handshake_initiation), HANDSHAKE_DSCP); + atomic64_set(&peer->last_sent_handshake, + ktime_get_boot_fast_ns()); + socket_send_buffer_to_peer( + peer, &packet, + sizeof(struct message_handshake_initiation), + HANDSHAKE_DSCP); timers_handshake_initiated(peer); } } void packet_handshake_send_worker(struct work_struct *work) { - struct wireguard_peer *peer = container_of(work, struct wireguard_peer, transmit_handshake_work); + struct wireguard_peer *peer = container_of(work, struct wireguard_peer, + transmit_handshake_work); packet_send_handshake_initiation(peer); peer_put(peer); } -void packet_send_queued_handshake_initiation(struct wireguard_peer *peer, bool is_retry) +void packet_send_queued_handshake_initiation(struct wireguard_peer *peer, + bool is_retry) { if (!is_retry) peer->timer_handshake_attempts = 0; rcu_read_lock_bh(); - /* We check last_sent_handshake here in addition to the actual function we're queueing - * up, so that we don't queue things if not strictly necessary. + /* We check last_sent_handshake here in addition to the actual function + * we're queueing up, so that we don't queue things if not strictly + * necessary: */ - if (!has_expired(atomic64_read(&peer->last_sent_handshake), REKEY_TIMEOUT) || unlikely(peer->is_dead)) + if (!has_expired(atomic64_read(&peer->last_sent_handshake), + REKEY_TIMEOUT) || unlikely(peer->is_dead)) goto out; peer_get(peer); - /* Queues up calling packet_send_queued_handshakes(peer), where we do a peer_put(peer) after: */ - if (!queue_work(peer->device->handshake_send_wq, &peer->transmit_handshake_work)) - peer_put(peer); /* If the work was already queued, we want to drop the extra reference */ + /* Queues up calling packet_send_queued_handshakes(peer), where we do a + * peer_put(peer) after: + */ + if (!queue_work(peer->device->handshake_send_wq, + &peer->transmit_handshake_work)) + /* If the work was already queued, we want to drop the + * extra reference: + */ + peer_put(peer); out: rcu_read_unlock_bh(); } @@ -72,27 +89,39 @@ void packet_send_handshake_response(struct wireguard_peer *peer) struct message_handshake_response packet; atomic64_set(&peer->last_sent_handshake, ktime_get_boot_fast_ns()); - net_dbg_ratelimited("%s: Sending handshake response to peer %llu (%pISpfsc)\n", peer->device->dev->name, peer->internal_id, &peer->endpoint.addr); + net_dbg_ratelimited("%s: Sending handshake response to peer %llu (%pISpfsc)\n", + peer->device->dev->name, peer->internal_id, + &peer->endpoint.addr); if (noise_handshake_create_response(&packet, &peer->handshake)) { cookie_add_mac_to_packet(&packet, sizeof(packet), peer); - if (noise_handshake_begin_session(&peer->handshake, &peer->keypairs)) { + if (noise_handshake_begin_session(&peer->handshake, + &peer->keypairs)) { timers_session_derived(peer); timers_any_authenticated_packet_traversal(peer); timers_any_authenticated_packet_sent(peer); - atomic64_set(&peer->last_sent_handshake, ktime_get_boot_fast_ns()); - socket_send_buffer_to_peer(peer, &packet, sizeof(struct message_handshake_response), HANDSHAKE_DSCP); + atomic64_set(&peer->last_sent_handshake, + ktime_get_boot_fast_ns()); + socket_send_buffer_to_peer( + peer, &packet, + sizeof(struct message_handshake_response), + HANDSHAKE_DSCP); } } } -void packet_send_handshake_cookie(struct wireguard_device *wg, struct sk_buff *initiating_skb, __le32 sender_index) +void packet_send_handshake_cookie(struct wireguard_device *wg, + struct sk_buff *initiating_skb, + __le32 sender_index) { struct message_handshake_cookie packet; - net_dbg_skb_ratelimited("%s: Sending cookie response for denied handshake message for %pISpfsc\n", wg->dev->name, initiating_skb); - cookie_message_create(&packet, initiating_skb, sender_index, &wg->cookie_checker); - socket_send_buffer_as_reply_to_skb(wg, initiating_skb, &packet, sizeof(packet)); + net_dbg_skb_ratelimited("%s: Sending cookie response for denied handshake message for %pISpfsc\n", + wg->dev->name, initiating_skb); + cookie_message_create(&packet, initiating_skb, sender_index, + &wg->cookie_checker); + socket_send_buffer_as_reply_to_skb(wg, initiating_skb, &packet, + sizeof(packet)); } static inline void keep_key_fresh(struct wireguard_peer *peer) @@ -103,8 +132,11 @@ static inline void keep_key_fresh(struct wireguard_peer *peer) rcu_read_lock_bh(); keypair = rcu_dereference_bh(peer->keypairs.current_keypair); if (likely(keypair && keypair->sending.is_valid) && - (unlikely(atomic64_read(&keypair->sending.counter.counter) > REKEY_AFTER_MESSAGES) || - (keypair->i_am_the_initiator && unlikely(has_expired(keypair->sending.birthdate, REKEY_AFTER_TIME))))) + (unlikely(atomic64_read(&keypair->sending.counter.counter) > + REKEY_AFTER_MESSAGES) || + (keypair->i_am_the_initiator && + unlikely(has_expired(keypair->sending.birthdate, + REKEY_AFTER_TIME))))) send = true; rcu_read_unlock_bh(); @@ -114,9 +146,10 @@ static inline void keep_key_fresh(struct wireguard_peer *peer) static inline unsigned int skb_padding(struct sk_buff *skb) { - /* We do this modulo business with the MTU, just in case the networking layer - * gives us a packet that's bigger than the MTU. In that case, we wouldn't want - * the final subtraction to overflow in the case of the padded_size being clamped. + /* We do this modulo business with the MTU, just in case the networking + * layer gives us a packet that's bigger than the MTU. In that case, we + * wouldn't want the final subtraction to overflow in the case of the + * padded_size being clamped. */ unsigned int last_unit = skb->len % PACKET_CB(skb)->mtu; unsigned int padded_size = ALIGN(last_unit, MESSAGE_PADDING_MULTIPLE); @@ -126,38 +159,49 @@ static inline unsigned int skb_padding(struct sk_buff *skb) return padded_size - last_unit; } -static inline bool skb_encrypt(struct sk_buff *skb, struct noise_keypair *keypair, simd_context_t simd_context) +static inline bool skb_encrypt(struct sk_buff *skb, + struct noise_keypair *keypair, + simd_context_t simd_context) { + unsigned int padding_len, plaintext_len, trailer_len; struct scatterlist sg[MAX_SKB_FRAGS * 2 + 1]; struct message_data *header; - unsigned int padding_len, plaintext_len, trailer_len; - int num_frags; struct sk_buff *trailer; + int num_frags; - /* Calculate lengths */ + /* Calculate lengths. */ padding_len = skb_padding(skb); trailer_len = padding_len + noise_encrypted_len(0); plaintext_len = skb->len + padding_len; - /* Expand data section to have room for padding and auth tag */ + /* Expand data section to have room for padding and auth tag. */ num_frags = skb_cow_data(skb, trailer_len, &trailer); if (unlikely(num_frags < 0 || num_frags > ARRAY_SIZE(sg))) return false; - /* Set the padding to zeros, and make sure it and the auth tag are part of the skb */ + /* Set the padding to zeros, and make sure it and the auth tag are part + * of the skb. + */ memset(skb_tail_pointer(trailer), 0, padding_len); - /* Expand head section to have room for our header and the network stack's headers. */ + /* Expand head section to have room for our header and the network + * stack's headers. + */ if (unlikely(skb_cow_head(skb, DATA_PACKET_HEAD_ROOM) < 0)) return false; - /* We have to remember to add the checksum to the innerpacket, in case the receiver forwards it. */ + /* We have to remember to add the checksum to the innerpacket, in case + * the receiver forwards it. + */ if (likely(!skb_checksum_setup(skb, true))) skb_checksum_help(skb); - /* Only after checksumming can we safely add on the padding at the end and the header. */ + /* Only after checksumming can we safely add on the padding at the end + * and the header. + */ skb_set_inner_network_header(skb, 0); - header = (struct message_data *)skb_push(skb, sizeof(struct message_data)); + header = (struct message_data *)skb_push(skb, + sizeof(struct message_data)); header->header.type = cpu_to_le32(MESSAGE_DATA); header->key_idx = keypair->remote_index; header->counter = cpu_to_le64(PACKET_CB(skb)->nonce); @@ -165,9 +209,12 @@ static inline bool skb_encrypt(struct sk_buff *skb, struct noise_keypair *keypai /* Now we can encrypt the scattergather segments */ sg_init_table(sg, num_frags); - if (skb_to_sgvec(skb, sg, sizeof(struct message_data), noise_encrypted_len(plaintext_len)) <= 0) + if (skb_to_sgvec(skb, sg, sizeof(struct message_data), + noise_encrypted_len(plaintext_len)) <= 0) return false; - return chacha20poly1305_encrypt_sg(sg, sg, plaintext_len, NULL, 0, PACKET_CB(skb)->nonce, keypair->sending.key, simd_context); + return chacha20poly1305_encrypt_sg(sg, sg, plaintext_len, NULL, 0, + PACKET_CB(skb)->nonce, + keypair->sending.key, simd_context); } void packet_send_keepalive(struct wireguard_peer *peer) @@ -175,38 +222,45 @@ void packet_send_keepalive(struct wireguard_peer *peer) struct sk_buff *skb; if (skb_queue_empty(&peer->staged_packet_queue)) { - skb = alloc_skb(DATA_PACKET_HEAD_ROOM + MESSAGE_MINIMUM_LENGTH, GFP_ATOMIC); + skb = alloc_skb(DATA_PACKET_HEAD_ROOM + MESSAGE_MINIMUM_LENGTH, + GFP_ATOMIC); if (unlikely(!skb)) return; skb_reserve(skb, DATA_PACKET_HEAD_ROOM); skb->dev = peer->device->dev; PACKET_CB(skb)->mtu = skb->dev->mtu; skb_queue_tail(&peer->staged_packet_queue, skb); - net_dbg_ratelimited("%s: Sending keepalive packet to peer %llu (%pISpfsc)\n", peer->device->dev->name, peer->internal_id, &peer->endpoint.addr); + net_dbg_ratelimited("%s: Sending keepalive packet to peer %llu (%pISpfsc)\n", + peer->device->dev->name, peer->internal_id, + &peer->endpoint.addr); } packet_send_staged_packets(peer); } -#define skb_walk_null_queue_safe(first, skb, next) for (skb = first, next = skb->next; skb; skb = next, next = skb ? skb->next : NULL) +#define skb_walk_null_queue_safe(first, skb, next) \ + for (skb = first, next = skb->next; skb; \ + skb = next, next = skb ? skb->next : NULL) static inline void skb_free_null_queue(struct sk_buff *first) { struct sk_buff *skb, *next; - skb_walk_null_queue_safe(first, skb, next) + skb_walk_null_queue_safe (first, skb, next) dev_kfree_skb(skb); } -static void packet_create_data_done(struct sk_buff *first, struct wireguard_peer *peer) +static void packet_create_data_done(struct sk_buff *first, + struct wireguard_peer *peer) { struct sk_buff *skb, *next; bool is_keepalive, data_sent = false; timers_any_authenticated_packet_traversal(peer); timers_any_authenticated_packet_sent(peer); - skb_walk_null_queue_safe(first, skb, next) { + skb_walk_null_queue_safe (first, skb, next) { is_keepalive = skb->len == message_data_len(0); - if (likely(!socket_send_skb_to_peer(peer, skb, PACKET_CB(skb)->ds) && !is_keepalive)) + if (likely(!socket_send_skb_to_peer(peer, skb, + PACKET_CB(skb)->ds) && !is_keepalive)) data_sent = true; } @@ -218,13 +272,16 @@ static void packet_create_data_done(struct sk_buff *first, struct wireguard_peer void packet_tx_worker(struct work_struct *work) { - struct crypt_queue *queue = container_of(work, struct crypt_queue, work); + struct crypt_queue *queue = + container_of(work, struct crypt_queue, work); struct wireguard_peer *peer; struct noise_keypair *keypair; struct sk_buff *first; enum packet_state state; - while ((first = __ptr_ring_peek(&queue->ring)) != NULL && (state = atomic_read_acquire(&PACKET_CB(first)->state)) != PACKET_STATE_UNCRYPTED) { + while ((first = __ptr_ring_peek(&queue->ring)) != NULL && + (state = atomic_read_acquire(&PACKET_CB(first)->state)) != + PACKET_STATE_UNCRYPTED) { __ptr_ring_discard_one(&queue->ring); peer = PACKET_PEER(first); keypair = PACKET_CB(first)->keypair; @@ -241,22 +298,25 @@ void packet_tx_worker(struct work_struct *work) void packet_encrypt_worker(struct work_struct *work) { - struct crypt_queue *queue = container_of(work, struct multicore_worker, work)->ptr; + struct crypt_queue *queue = + container_of(work, struct multicore_worker, work)->ptr; struct sk_buff *first, *skb, *next; simd_context_t simd_context = simd_get(); while ((first = ptr_ring_consume_bh(&queue->ring)) != NULL) { enum packet_state state = PACKET_STATE_CRYPTED; - skb_walk_null_queue_safe(first, skb, next) { - if (likely(skb_encrypt(skb, PACKET_CB(first)->keypair, simd_context))) + skb_walk_null_queue_safe (first, skb, next) { + if (likely(skb_encrypt(skb, PACKET_CB(first)->keypair, + simd_context))) skb_reset(skb); else { state = PACKET_STATE_DEAD; break; } } - queue_enqueue_per_peer(&PACKET_PEER(first)->tx_queue, first, state); + queue_enqueue_per_peer(&PACKET_PEER(first)->tx_queue, first, + state); simd_context = simd_relax(simd_context); } @@ -273,9 +333,13 @@ static void packet_create_data(struct sk_buff *first) if (unlikely(peer->is_dead)) goto err; - ret = queue_enqueue_per_device_and_peer(&wg->encrypt_queue, &peer->tx_queue, first, wg->packet_crypt_wq, &wg->encrypt_queue.last_cpu); + ret = queue_enqueue_per_device_and_peer(&wg->encrypt_queue, + &peer->tx_queue, first, + wg->packet_crypt_wq, + &wg->encrypt_queue.last_cpu); if (unlikely(ret == -EPIPE)) - queue_enqueue_per_peer(&peer->tx_queue, first, PACKET_STATE_DEAD); + queue_enqueue_per_peer(&peer->tx_queue, first, + PACKET_STATE_DEAD); err: rcu_read_unlock_bh(); if (likely(!ret || ret == -EPIPE)) @@ -287,8 +351,8 @@ err: void packet_send_staged_packets(struct wireguard_peer *peer) { - struct noise_keypair *keypair; struct noise_symmetric_key *key; + struct noise_keypair *keypair; struct sk_buff_head packets; struct sk_buff *skb; @@ -302,7 +366,8 @@ void packet_send_staged_packets(struct wireguard_peer *peer) /* First we make sure we have a valid reference to a valid key. */ rcu_read_lock_bh(); - keypair = noise_keypair_get(rcu_dereference_bh(peer->keypairs.current_keypair)); + keypair = noise_keypair_get( + rcu_dereference_bh(peer->keypairs.current_keypair)); rcu_read_unlock_bh(); if (unlikely(!keypair)) goto out_nokey; @@ -312,13 +377,17 @@ void packet_send_staged_packets(struct wireguard_peer *peer) if (unlikely(has_expired(key->birthdate, REJECT_AFTER_TIME))) goto out_invalid; - /* After we know we have a somewhat valid key, we now try to assign nonces to - * all of the packets in the queue. If we can't assign nonces for all of them, - * we just consider it a failure and wait for the next handshake. + /* After we know we have a somewhat valid key, we now try to assign + * nonces to all of the packets in the queue. If we can't assign nonces + * for all of them, we just consider it a failure and wait for the next + * handshake. */ - skb_queue_walk(&packets, skb) { - PACKET_CB(skb)->ds = ip_tunnel_ecn_encap(0 /* No outer TOS: no leak. TODO: should we use flowi->tos as outer? */, ip_hdr(skb), skb); - PACKET_CB(skb)->nonce = atomic64_inc_return(&key->counter.counter) - 1; + skb_queue_walk (&packets, skb) { + /* 0 for no outer TOS: no leak. TODO: should we use flowi->tos + * as outer? */ + PACKET_CB(skb)->ds = ip_tunnel_ecn_encap(0, ip_hdr(skb), skb); + PACKET_CB(skb)->nonce = + atomic64_inc_return(&key->counter.counter) - 1; if (unlikely(PACKET_CB(skb)->nonce >= REJECT_AFTER_MESSAGES)) goto out_invalid; } @@ -337,19 +406,19 @@ out_nokey: /* We orphan the packets if we're waiting on a handshake, so that they * don't block a socket's pool. */ - skb_queue_walk(&packets, skb) + skb_queue_walk (&packets, skb) skb_orphan(skb); - /* Then we put them back on the top of the queue. We're not too concerned about - * accidentally getting things a little out of order if packets are being added - * really fast, because this queue is for before packets can even be sent and - * it's small anyway. + /* Then we put them back on the top of the queue. We're not too + * concerned about accidentally getting things a little out of order if + * packets are being added really fast, because this queue is for before + * packets can even be sent and it's small anyway. */ spin_lock_bh(&peer->staged_packet_queue.lock); skb_queue_splice(&packets, &peer->staged_packet_queue); spin_unlock_bh(&peer->staged_packet_queue.lock); - /* If we're exiting because there's something wrong with the key, it means - * we should initiate a new handshake. + /* If we're exiting because there's something wrong with the key, it + * means we should initiate a new handshake. */ packet_send_queued_handshake_initiation(peer, false); } |