summaryrefslogtreecommitdiffhomepage
path: root/src/queueing.h
diff options
context:
space:
mode:
authorJason A. Donenfeld <Jason@zx2c4.com>2018-02-20 02:23:08 +0100
committerJason A. Donenfeld <Jason@zx2c4.com>2018-02-20 02:24:29 +0100
commit8f6e4572112e17e0484e0be581d03e39b1c2e305 (patch)
treea072f22dd70a9a3c6c63d74b5a786d6f7b7dda9c /src/queueing.h
parentd7ba9ec699aefaf6b5eec107a342b6548c46757a (diff)
queueing: skb_reset: mark as xnet
This was avoided for a long time, because I wanted the packet to be charged to the original socket for as long as possible. However, this broke net_cls, which looks at skb->sk for additional late-stage routing decisions. So we had no choice but to ensure that skb->sk is NULL by the time of xmit, and this means calling the skb destructor. Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
Diffstat (limited to 'src/queueing.h')
-rw-r--r--src/queueing.h2
1 files changed, 1 insertions, 1 deletions
diff --git a/src/queueing.h b/src/queueing.h
index de8b7b2..d5948f3 100644
--- a/src/queueing.h
+++ b/src/queueing.h
@@ -63,7 +63,7 @@ static inline __be16 skb_examine_untrusted_ip_hdr(struct sk_buff *skb)
static inline void skb_reset(struct sk_buff *skb)
{
- skb_scrub_packet(skb, false);
+ skb_scrub_packet(skb, true);
memset(&skb->headers_start, 0, offsetof(struct sk_buff, headers_end) - offsetof(struct sk_buff, headers_start));
skb->queue_mapping = 0;
skb->nohdr = 0;