summaryrefslogtreecommitdiffhomepage
path: root/pkg/tcpip
diff options
context:
space:
mode:
authorAyush Ranjan <ayushranjan@google.com>2021-02-11 22:03:45 -0800
committergVisor bot <gvisor-bot@google.com>2021-02-11 22:06:09 -0800
commit845d0a65f449ade1952568b5fc120e7cb8cd709f (patch)
treedac960f709630d6e20f4202c05c952df514b52c0 /pkg/tcpip
parent34614c39860a7316132a2bf572366618e7a78be9 (diff)
[rack] TLP: ACK Processing and PTO scheduling.
This change implements TLP details enumerated in https://tools.ietf.org/html/draft-ietf-tcpm-rack-08#section-7.5.3 Fixes #5085 PiperOrigin-RevId: 357125037
Diffstat (limited to 'pkg/tcpip')
-rw-r--r--pkg/tcpip/tcpip.go4
-rw-r--r--pkg/tcpip/transport/tcp/rack.go9
-rw-r--r--pkg/tcpip/transport/tcp/sack_recovery.go2
-rw-r--r--pkg/tcpip/transport/tcp/snd.go38
-rw-r--r--pkg/tcpip/transport/tcp/tcp_rack_test.go347
-rw-r--r--pkg/tcpip/transport/tcp/tcp_sack_test.go15
6 files changed, 366 insertions, 49 deletions
diff --git a/pkg/tcpip/tcpip.go b/pkg/tcpip/tcpip.go
index d5c2b0487..ba063dc26 100644
--- a/pkg/tcpip/tcpip.go
+++ b/pkg/tcpip/tcpip.go
@@ -1731,6 +1731,10 @@ type TCPStats struct {
// recover from packet loss.
SACKRecovery *StatCounter
+ // TLPRecovery is the number of times recovery was accomplished by the tail
+ // loss probe.
+ TLPRecovery *StatCounter
+
// SlowStartRetransmits is the number of segments retransmitted in slow
// start.
SlowStartRetransmits *StatCounter
diff --git a/pkg/tcpip/transport/tcp/rack.go b/pkg/tcpip/transport/tcp/rack.go
index 9959b60b8..ecabeceb4 100644
--- a/pkg/tcpip/transport/tcp/rack.go
+++ b/pkg/tcpip/transport/tcp/rack.go
@@ -263,7 +263,10 @@ func (s *sender) probeTimerExpired() tcpip.Error {
}
}
- s.postXmit(dataSent)
+ // Whether or not the probe was sent, the sender must arm the resend timer,
+ // not the probe timer. This ensures that the sender does not send repeated,
+ // back-to-back tail loss probes.
+ s.postXmit(dataSent, false /* shouldScheduleProbe */)
return nil
}
@@ -477,7 +480,5 @@ func (rc *rackControl) DoRecovery(_ *segment, fastRetransmit bool) {
snd.sendSegment(seg)
}
- // Rearm the RTO.
- snd.resendTimer.enable(snd.rto)
- snd.postXmit(dataSent)
+ snd.postXmit(dataSent, true /* shouldScheduleProbe */)
}
diff --git a/pkg/tcpip/transport/tcp/sack_recovery.go b/pkg/tcpip/transport/tcp/sack_recovery.go
index 7e813fa96..9d406b0bc 100644
--- a/pkg/tcpip/transport/tcp/sack_recovery.go
+++ b/pkg/tcpip/transport/tcp/sack_recovery.go
@@ -116,5 +116,5 @@ func (sr *sackRecovery) DoRecovery(rcvdSeg *segment, fastRetransmit bool) {
// RFC 6675 recovery algorithm step C 1-5.
end := snd.sndUna.Add(snd.sndWnd)
dataSent := sr.handleSACKRecovery(snd.maxPayloadSize, end)
- snd.postXmit(dataSent)
+ snd.postXmit(dataSent, true /* shouldScheduleProbe */)
}
diff --git a/pkg/tcpip/transport/tcp/snd.go b/pkg/tcpip/transport/tcp/snd.go
index 7911e6b85..83c8deb0e 100644
--- a/pkg/tcpip/transport/tcp/snd.go
+++ b/pkg/tcpip/transport/tcp/snd.go
@@ -966,7 +966,7 @@ func (s *sender) disableZeroWindowProbing() {
s.resendTimer.disable()
}
-func (s *sender) postXmit(dataSent bool) {
+func (s *sender) postXmit(dataSent bool, shouldScheduleProbe bool) {
if dataSent {
// We sent data, so we should stop the keepalive timer to ensure
// that no keepalives are sent while there is pending data.
@@ -980,13 +980,22 @@ func (s *sender) postXmit(dataSent bool) {
s.enableZeroWindowProbing()
}
- // Enable the timer if we have pending data and it's not enabled yet.
- if !s.resendTimer.enabled() && s.sndUna != s.sndNxt {
- s.resendTimer.enable(s.rto)
- }
// If we have no more pending data, start the keepalive timer.
if s.sndUna == s.sndNxt {
s.ep.resetKeepaliveTimer(false)
+ } else {
+ // Enable timers if we have pending data.
+ if shouldScheduleProbe && s.shouldSchedulePTO() {
+ // Schedule PTO after transmitting new data that wasn't itself a TLP probe.
+ s.schedulePTO()
+ } else if !s.resendTimer.enabled() {
+ s.probeTimer.disable()
+ if s.outstanding > 0 {
+ // Enable the resend timer if it's not enabled yet and there is
+ // outstanding data.
+ s.resendTimer.enable(s.rto)
+ }
+ }
}
}
@@ -1029,7 +1038,7 @@ func (s *sender) sendData() {
s.writeNext = seg.Next()
}
- s.postXmit(dataSent)
+ s.postXmit(dataSent, true /* shouldScheduleProbe */)
}
func (s *sender) enterRecovery() {
@@ -1052,6 +1061,10 @@ func (s *sender) enterRecovery() {
s.ep.stack.Stats().TCP.SACKRecovery.Increment()
// Set TLPRxtOut to false according to
// https://tools.ietf.org/html/draft-ietf-tcpm-rack-08#section-7.6.1.
+ if s.rc.tlpRxtOut {
+ // The tail loss probe triggered recovery.
+ s.ep.stack.Stats().TCP.TLPRecovery.Increment()
+ }
s.rc.tlpRxtOut = false
return
}
@@ -1415,9 +1428,16 @@ func (s *sender) handleRcvdSegment(rcvdSeg *segment) {
s.updateRTO(elapsed)
}
- // When an ack is received we must rearm the timer.
- // RFC 6298 5.3
- s.resendTimer.enable(s.rto)
+ if s.shouldSchedulePTO() {
+ // Schedule PTO upon receiving an ACK that cumulatively acknowledges data.
+ // See https://tools.ietf.org/html/draft-ietf-tcpm-rack-08#section-7.5.1.
+ s.schedulePTO()
+ } else {
+ // When an ack is received we must rearm the timer.
+ // RFC 6298 5.3
+ s.probeTimer.disable()
+ s.resendTimer.enable(s.rto)
+ }
// Remove all acknowledged data from the write list.
acked := s.sndUna.Size(ack)
diff --git a/pkg/tcpip/transport/tcp/tcp_rack_test.go b/pkg/tcpip/transport/tcp/tcp_rack_test.go
index 92f0f6cee..3c13fc8a3 100644
--- a/pkg/tcpip/transport/tcp/tcp_rack_test.go
+++ b/pkg/tcpip/transport/tcp/tcp_rack_test.go
@@ -159,7 +159,7 @@ func TestRACKDetectReorder(t *testing.T) {
<-probeDone
}
-func sendAndReceive(t *testing.T, c *context.Context, numPackets int, enableRACK bool) []byte {
+func sendAndReceiveWithSACK(t *testing.T, c *context.Context, numPackets int, enableRACK bool) []byte {
setStackSACKPermitted(t, c, true)
if enableRACK {
setStackRACKPermitted(t, c)
@@ -213,6 +213,291 @@ func addDSACKSeenCheckerProbe(t *testing.T, c *context.Context, numACK int, prob
})
}
+// TestRACKTLPRecovery tests that RACK sends a tail loss probe (TLP) in the
+// case of a tail loss. This simulates a situation where the TLP is able to
+// insinuate the SACK holes and sender is able to retransmit the rest.
+func TestRACKTLPRecovery(t *testing.T) {
+ c := context.New(t, uint32(mtu))
+ defer c.Cleanup()
+
+ // Send 8 packets.
+ numPackets := 8
+ data := sendAndReceiveWithSACK(t, c, numPackets, true /* enableRACK */)
+
+ // Packets [6-8] are lost. Send cumulative ACK for [1-5].
+ seq := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
+ bytesRead := 5 * maxPayload
+ c.SendAck(seq, bytesRead)
+
+ // PTO should fire and send #8 packet as a TLP.
+ c.ReceiveAndCheckPacketWithOptions(data, 7*maxPayload, maxPayload, tsOptionSize)
+ var info tcpip.TCPInfoOption
+ if err := c.EP.GetSockOpt(&info); err != nil {
+ t.Fatalf("GetSockOpt failed: %v", err)
+ }
+
+ // Send the SACK after RTT because RACK RFC states that if the ACK for a
+ // retransmission arrives before the smoothed RTT then the sender should not
+ // update RACK state as it could be a spurious inference.
+ time.Sleep(info.RTT)
+
+ // Okay, let the sender know we got #8 using a SACK block.
+ eighthPStart := c.IRS.Add(1 + seqnum.Size(7*maxPayload))
+ eighthPEnd := eighthPStart.Add(maxPayload)
+ c.SendAckWithSACK(seq, bytesRead, []header.SACKBlock{{eighthPStart, eighthPEnd}})
+
+ // The sender should be entering RACK based loss-recovery and sending #6 and
+ // #7 one after another.
+ c.ReceiveAndCheckPacketWithOptions(data, bytesRead, maxPayload, tsOptionSize)
+ bytesRead += maxPayload
+ c.ReceiveAndCheckPacketWithOptions(data, bytesRead, maxPayload, tsOptionSize)
+ bytesRead += 2 * maxPayload
+ c.SendAck(seq, bytesRead)
+
+ metricPollFn := func() error {
+ tcpStats := c.Stack().Stats().TCP
+ stats := []struct {
+ stat *tcpip.StatCounter
+ name string
+ want uint64
+ }{
+ // One fast retransmit after the SACK.
+ {tcpStats.FastRetransmit, "stats.TCP.FastRetransmit", 1},
+ // Recovery should be SACK recovery.
+ {tcpStats.SACKRecovery, "stats.TCP.SACKRecovery", 1},
+ // Packets 6, 7 and 8 were retransmitted.
+ {tcpStats.Retransmits, "stats.TCP.Retransmits", 3},
+ // TLP recovery should have been detected.
+ {tcpStats.TLPRecovery, "stats.TCP.TLPRecovery", 1},
+ // No RTOs should have occurred.
+ {tcpStats.Timeouts, "stats.TCP.Timeouts", 0},
+ }
+ for _, s := range stats {
+ if got, want := s.stat.Value(), s.want; got != want {
+ return fmt.Errorf("got %s.Value() = %d, want = %d", s.name, got, want)
+ }
+ }
+ return nil
+ }
+ if err := testutil.Poll(metricPollFn, 1*time.Second); err != nil {
+ t.Error(err)
+ }
+}
+
+// TestRACKTLPFallbackRTO tests that RACK sends a tail loss probe (TLP) in the
+// case of a tail loss. This simulates a situation where either the TLP or its
+// ACK is lost. The sender should retransmit when RTO fires.
+func TestRACKTLPFallbackRTO(t *testing.T) {
+ c := context.New(t, uint32(mtu))
+ defer c.Cleanup()
+
+ // Send 8 packets.
+ numPackets := 8
+ data := sendAndReceiveWithSACK(t, c, numPackets, true /* enableRACK */)
+
+ // Packets [6-8] are lost. Send cumulative ACK for [1-5].
+ seq := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
+ bytesRead := 5 * maxPayload
+ c.SendAck(seq, bytesRead)
+
+ // PTO should fire and send #8 packet as a TLP.
+ c.ReceiveAndCheckPacketWithOptions(data, 7*maxPayload, maxPayload, tsOptionSize)
+
+ // Either the TLP or the ACK the receiver sent with SACK blocks was lost.
+
+ // Confirm that RTO fires and retransmits packet #6.
+ c.ReceiveAndCheckPacketWithOptions(data, bytesRead, maxPayload, tsOptionSize)
+
+ metricPollFn := func() error {
+ tcpStats := c.Stack().Stats().TCP
+ stats := []struct {
+ stat *tcpip.StatCounter
+ name string
+ want uint64
+ }{
+ // No fast retransmits happened.
+ {tcpStats.FastRetransmit, "stats.TCP.FastRetransmit", 0},
+ // No SACK recovery happened.
+ {tcpStats.SACKRecovery, "stats.TCP.SACKRecovery", 0},
+ // TLP was unsuccessful.
+ {tcpStats.TLPRecovery, "stats.TCP.TLPRecovery", 0},
+ // RTO should have fired.
+ {tcpStats.Timeouts, "stats.TCP.Timeouts", 1},
+ }
+ for _, s := range stats {
+ if got, want := s.stat.Value(), s.want; got != want {
+ return fmt.Errorf("got %s.Value() = %d, want = %d", s.name, got, want)
+ }
+ }
+ return nil
+ }
+ if err := testutil.Poll(metricPollFn, 1*time.Second); err != nil {
+ t.Error(err)
+ }
+}
+
+// TestNoTLPRecoveryOnDSACK tests the scenario where the sender speculates a
+// tail loss and sends a TLP. Everything is received and acked. The probe
+// segment is DSACKed. No fast recovery should be triggered in this case.
+func TestNoTLPRecoveryOnDSACK(t *testing.T) {
+ c := context.New(t, uint32(mtu))
+ defer c.Cleanup()
+
+ // Send 8 packets.
+ numPackets := 8
+ data := sendAndReceiveWithSACK(t, c, numPackets, true /* enableRACK */)
+
+ // Packets [1-5] are received first. [6-8] took a detour and will take a
+ // while to arrive. Ack [1-5].
+ seq := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
+ bytesRead := 5 * maxPayload
+ c.SendAck(seq, bytesRead)
+
+ // The tail loss probe (#8 packet) is received.
+ c.ReceiveAndCheckPacketWithOptions(data, 7*maxPayload, maxPayload, tsOptionSize)
+
+ // Now that all 8 packets are received + duplicate 8th packet, send ack.
+ bytesRead += 3 * maxPayload
+ eighthPStart := c.IRS.Add(1 + seqnum.Size(7*maxPayload))
+ eighthPEnd := eighthPStart.Add(maxPayload)
+ c.SendAckWithSACK(seq, bytesRead, []header.SACKBlock{{eighthPStart, eighthPEnd}})
+
+ // Wait for RTO and make sure that nothing else is received.
+ var info tcpip.TCPInfoOption
+ if err := c.EP.GetSockOpt(&info); err != nil {
+ t.Fatalf("GetSockOpt failed: %v", err)
+ }
+ if p := c.GetPacketWithTimeout(info.RTO); p != nil {
+ t.Errorf("received an unexpected packet: %v", p)
+ }
+
+ metricPollFn := func() error {
+ tcpStats := c.Stack().Stats().TCP
+ stats := []struct {
+ stat *tcpip.StatCounter
+ name string
+ want uint64
+ }{
+ // Make sure no recovery was entered.
+ {tcpStats.FastRetransmit, "stats.TCP.FastRetransmit", 0},
+ {tcpStats.SACKRecovery, "stats.TCP.SACKRecovery", 0},
+ {tcpStats.TLPRecovery, "stats.TCP.TLPRecovery", 0},
+ // RTO should not have fired.
+ {tcpStats.Timeouts, "stats.TCP.Timeouts", 0},
+ // Only #8 was retransmitted.
+ {tcpStats.Retransmits, "stats.TCP.Retransmits", 1},
+ }
+ for _, s := range stats {
+ if got, want := s.stat.Value(), s.want; got != want {
+ return fmt.Errorf("got %s.Value() = %d, want = %d", s.name, got, want)
+ }
+ }
+ return nil
+ }
+ if err := testutil.Poll(metricPollFn, 1*time.Second); err != nil {
+ t.Error(err)
+ }
+}
+
+// TestNoTLPOnSACK tests the scenario where there is not exactly a tail loss
+// due to the presence of multiple SACK holes. In such a scenario, TLP should
+// not be sent.
+func TestNoTLPOnSACK(t *testing.T) {
+ c := context.New(t, uint32(mtu))
+ defer c.Cleanup()
+
+ // Send 8 packets.
+ numPackets := 8
+ data := sendAndReceiveWithSACK(t, c, numPackets, true /* enableRACK */)
+
+ // Packets [1-5] and #7 were received. #6 and #8 were dropped.
+ seq := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
+ bytesRead := 5 * maxPayload
+ seventhStart := c.IRS.Add(1 + seqnum.Size(6*maxPayload))
+ seventhEnd := seventhStart.Add(maxPayload)
+ c.SendAckWithSACK(seq, bytesRead, []header.SACKBlock{{seventhStart, seventhEnd}})
+
+ // The sender should retransmit #6. If the sender sends a TLP, then #8 will
+ // received and fail this test.
+ c.ReceiveAndCheckPacketWithOptions(data, 5*maxPayload, maxPayload, tsOptionSize)
+
+ metricPollFn := func() error {
+ tcpStats := c.Stack().Stats().TCP
+ stats := []struct {
+ stat *tcpip.StatCounter
+ name string
+ want uint64
+ }{
+ // #6 was retransmitted due to SACK recovery.
+ {tcpStats.FastRetransmit, "stats.TCP.FastRetransmit", 1},
+ {tcpStats.SACKRecovery, "stats.TCP.SACKRecovery", 1},
+ {tcpStats.TLPRecovery, "stats.TCP.TLPRecovery", 0},
+ // RTO should not have fired.
+ {tcpStats.Timeouts, "stats.TCP.Timeouts", 0},
+ // Only #6 was retransmitted.
+ {tcpStats.Retransmits, "stats.TCP.Retransmits", 1},
+ }
+ for _, s := range stats {
+ if got, want := s.stat.Value(), s.want; got != want {
+ return fmt.Errorf("got %s.Value() = %d, want = %d", s.name, got, want)
+ }
+ }
+ return nil
+ }
+ if err := testutil.Poll(metricPollFn, 1*time.Second); err != nil {
+ t.Error(err)
+ }
+}
+
+// TestRACKOnePacketTailLoss tests the trivial case of a tail loss of only one
+// packet. The probe should itself repairs the loss instead of having to go
+// into any recovery.
+func TestRACKOnePacketTailLoss(t *testing.T) {
+ c := context.New(t, uint32(mtu))
+ defer c.Cleanup()
+
+ // Send 3 packets.
+ numPackets := 3
+ data := sendAndReceiveWithSACK(t, c, numPackets, true /* enableRACK */)
+
+ // Packets [1-2] are received. #3 is lost.
+ seq := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
+ bytesRead := 2 * maxPayload
+ c.SendAck(seq, bytesRead)
+
+ // PTO should fire and send #3 packet as a TLP.
+ c.ReceiveAndCheckPacketWithOptions(data, 2*maxPayload, maxPayload, tsOptionSize)
+ bytesRead += maxPayload
+ c.SendAck(seq, bytesRead)
+
+ metricPollFn := func() error {
+ tcpStats := c.Stack().Stats().TCP
+ stats := []struct {
+ stat *tcpip.StatCounter
+ name string
+ want uint64
+ }{
+ // #3 was retransmitted as TLP.
+ {tcpStats.FastRetransmit, "stats.TCP.FastRetransmit", 0},
+ {tcpStats.SACKRecovery, "stats.TCP.SACKRecovery", 0},
+ {tcpStats.TLPRecovery, "stats.TCP.TLPRecovery", 0},
+ // RTO should not have fired.
+ {tcpStats.Timeouts, "stats.TCP.Timeouts", 0},
+ // Only #3 was retransmitted.
+ {tcpStats.Retransmits, "stats.TCP.Retransmits", 1},
+ }
+ for _, s := range stats {
+ if got, want := s.stat.Value(), s.want; got != want {
+ return fmt.Errorf("got %s.Value() = %d, want = %d", s.name, got, want)
+ }
+ }
+ return nil
+ }
+ if err := testutil.Poll(metricPollFn, 1*time.Second); err != nil {
+ t.Error(err)
+ }
+}
+
// TestRACKDetectDSACK tests that RACK detects DSACK with duplicate segments.
// See: https://tools.ietf.org/html/rfc2883#section-4.1.1.
func TestRACKDetectDSACK(t *testing.T) {
@@ -224,22 +509,24 @@ func TestRACKDetectDSACK(t *testing.T) {
addDSACKSeenCheckerProbe(t, c, ackNumToVerify, probeDone)
numPackets := 8
- data := sendAndReceive(t, c, numPackets, true /* enableRACK */)
+ data := sendAndReceiveWithSACK(t, c, numPackets, true /* enableRACK */)
- // Cumulative ACK for [1-5] packets.
+ // Cumulative ACK for [1-5] packets and SACK #8 packet (to prevent TLP).
seq := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
bytesRead := 5 * maxPayload
- c.SendAck(seq, bytesRead)
+ eighthPStart := c.IRS.Add(1 + seqnum.Size(7*maxPayload))
+ eighthPEnd := eighthPStart.Add(maxPayload)
+ c.SendAckWithSACK(seq, bytesRead, []header.SACKBlock{{eighthPStart, eighthPEnd}})
- // Expect retransmission of #6 packet.
+ // Expect retransmission of #6 packet after RTO expires.
c.ReceiveAndCheckPacketWithOptions(data, bytesRead, maxPayload, tsOptionSize)
// Send DSACK block for #6 packet indicating both
// initial and retransmitted packet are received and
- // packets [1-7] are received.
- start := c.IRS.Add(seqnum.Size(bytesRead))
+ // packets [1-8] are received.
+ start := c.IRS.Add(1 + seqnum.Size(bytesRead))
end := start.Add(maxPayload)
- bytesRead += 2 * maxPayload
+ bytesRead += 3 * maxPayload
c.SendAckWithSACK(seq, bytesRead, []header.SACKBlock{{start, end}})
// Wait for the probe function to finish processing the
@@ -265,12 +552,14 @@ func TestRACKDetectDSACKWithOutOfOrder(t *testing.T) {
addDSACKSeenCheckerProbe(t, c, ackNumToVerify, probeDone)
numPackets := 10
- data := sendAndReceive(t, c, numPackets, true /* enableRACK */)
+ data := sendAndReceiveWithSACK(t, c, numPackets, true /* enableRACK */)
- // Cumulative ACK for [1-5] packets.
+ // Cumulative ACK for [1-5] packets and SACK for #7 packet (to prevent TLP).
seq := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
bytesRead := 5 * maxPayload
- c.SendAck(seq, bytesRead)
+ seventhPStart := c.IRS.Add(1 + seqnum.Size(6*maxPayload))
+ seventhPEnd := seventhPStart.Add(maxPayload)
+ c.SendAckWithSACK(seq, bytesRead, []header.SACKBlock{{seventhPStart, seventhPEnd}})
// Expect retransmission of #6 packet.
c.ReceiveAndCheckPacketWithOptions(data, bytesRead, maxPayload, tsOptionSize)
@@ -278,12 +567,12 @@ func TestRACKDetectDSACKWithOutOfOrder(t *testing.T) {
// Send DSACK block for #6 packet indicating both
// initial and retransmitted packet are received and
// packets [1-7] are received.
- start := c.IRS.Add(seqnum.Size(bytesRead))
+ start := c.IRS.Add(1 + seqnum.Size(bytesRead))
end := start.Add(maxPayload)
bytesRead += 2 * maxPayload
- // Send DSACK block for #6 along with out of
- // order #9 packet is received.
- start1 := c.IRS.Add(seqnum.Size(bytesRead) + maxPayload)
+ // Send DSACK block for #6 along with SACK for out of
+ // order #9 packet.
+ start1 := c.IRS.Add(1 + seqnum.Size(bytesRead) + maxPayload)
end1 := start1.Add(maxPayload)
c.SendAckWithSACK(seq, bytesRead, []header.SACKBlock{{start, end}, {start1, end1}})
@@ -310,7 +599,7 @@ func TestRACKDetectDSACKWithOutOfOrderDup(t *testing.T) {
addDSACKSeenCheckerProbe(t, c, ackNumToVerify, probeDone)
numPackets := 10
- sendAndReceive(t, c, numPackets, true /* enableRACK */)
+ sendAndReceiveWithSACK(t, c, numPackets, true /* enableRACK */)
// ACK [1-5] packets.
seq := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
@@ -354,7 +643,7 @@ func TestRACKDetectDSACKSingleDup(t *testing.T) {
addDSACKSeenCheckerProbe(t, c, ackNumToVerify, probeDone)
numPackets := 4
- data := sendAndReceive(t, c, numPackets, true /* enableRACK */)
+ data := sendAndReceiveWithSACK(t, c, numPackets, true /* enableRACK */)
// Send ACK for #1 packet.
bytesRead := maxPayload
@@ -403,7 +692,7 @@ func TestRACKDetectDSACKDupWithCumulativeACK(t *testing.T) {
addDSACKSeenCheckerProbe(t, c, ackNumToVerify, probeDone)
numPackets := 6
- data := sendAndReceive(t, c, numPackets, true /* enableRACK */)
+ data := sendAndReceiveWithSACK(t, c, numPackets, true /* enableRACK */)
// Send ACK for #1 packet.
bytesRead := maxPayload
@@ -457,7 +746,7 @@ func TestRACKDetectDSACKDup(t *testing.T) {
addDSACKSeenCheckerProbe(t, c, ackNumToVerify, probeDone)
numPackets := 7
- data := sendAndReceive(t, c, numPackets, true /* enableRACK */)
+ data := sendAndReceiveWithSACK(t, c, numPackets, true /* enableRACK */)
// Send ACK for #1 packet.
bytesRead := maxPayload
@@ -525,12 +814,14 @@ func TestRACKWithInvalidDSACKBlock(t *testing.T) {
})
numPackets := 10
- data := sendAndReceive(t, c, numPackets, true /* enableRACK */)
+ data := sendAndReceiveWithSACK(t, c, numPackets, true /* enableRACK */)
- // Cumulative ACK for [1-5] packets.
+ // Cumulative ACK for [1-5] packets and SACK for #7 packet (to prevent TLP).
seq := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
bytesRead := 5 * maxPayload
- c.SendAck(seq, bytesRead)
+ seventhPStart := c.IRS.Add(1 + seqnum.Size(6*maxPayload))
+ seventhPEnd := seventhPStart.Add(maxPayload)
+ c.SendAckWithSACK(seq, bytesRead, []header.SACKBlock{{seventhPStart, seventhPEnd}})
// Expect retransmission of #6 packet.
c.ReceiveAndCheckPacketWithOptions(data, bytesRead, maxPayload, tsOptionSize)
@@ -538,12 +829,12 @@ func TestRACKWithInvalidDSACKBlock(t *testing.T) {
// Send DSACK block for #6 packet indicating both
// initial and retransmitted packet are received and
// packets [1-7] are received.
- start := c.IRS.Add(seqnum.Size(bytesRead))
+ start := c.IRS.Add(1 + seqnum.Size(bytesRead))
end := start.Add(maxPayload)
bytesRead += 2 * maxPayload
- // Send DSACK block as second block.
- start1 := c.IRS.Add(seqnum.Size(bytesRead) + maxPayload)
+ // Send DSACK block as second block. The first block is a SACK for #9 packet.
+ start1 := c.IRS.Add(1 + seqnum.Size(bytesRead) + maxPayload)
end1 := start1.Add(maxPayload)
c.SendAckWithSACK(seq, bytesRead, []header.SACKBlock{{start1, end1}, {start, end}})
@@ -588,7 +879,7 @@ func TestRACKCheckReorderWindow(t *testing.T) {
addReorderWindowCheckerProbe(c, ackNumToVerify, probeDone)
const numPackets = 7
- sendAndReceive(t, c, numPackets, true /* enableRACK */)
+ sendAndReceiveWithSACK(t, c, numPackets, true /* enableRACK */)
// Send ACK for #1 packet.
bytesRead := maxPayload
@@ -617,7 +908,7 @@ func TestRACKWithDuplicateACK(t *testing.T) {
defer c.Cleanup()
const numPackets = 4
- data := sendAndReceive(t, c, numPackets, true /* enableRACK */)
+ data := sendAndReceiveWithSACK(t, c, numPackets, true /* enableRACK */)
// Send three duplicate ACKs to trigger fast recovery. The first
// segment is considered as lost and will be retransmitted after
@@ -680,7 +971,7 @@ func TestRACKUpdateSackedOut(t *testing.T) {
ackNum++
})
- sendAndReceive(t, c, 8, true /* enableRACK */)
+ sendAndReceiveWithSACK(t, c, 8, true /* enableRACK */)
// ACK for [3-5] packets.
seq := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
diff --git a/pkg/tcpip/transport/tcp/tcp_sack_test.go b/pkg/tcpip/transport/tcp/tcp_sack_test.go
index f15d0a2d1..81f800cad 100644
--- a/pkg/tcpip/transport/tcp/tcp_sack_test.go
+++ b/pkg/tcpip/transport/tcp/tcp_sack_test.go
@@ -409,6 +409,7 @@ func TestSACKRecovery(t *testing.T) {
}
// Do slow start for a few iterations.
+ seq := seqnum.Value(context.TestInitialSequenceNumber).Add(1)
expected := tcp.InitialCwnd
bytesRead := 0
for i := 0; i < iterations; i++ {
@@ -416,7 +417,7 @@ func TestSACKRecovery(t *testing.T) {
if i > 0 {
// Acknowledge all the data received so far if not on
// first iteration.
- c.SendAck(790, bytesRead)
+ c.SendAck(seq, bytesRead)
}
// Read all packets expected on this iteration. Don't
@@ -438,7 +439,7 @@ func TestSACKRecovery(t *testing.T) {
start := c.IRS.Add(seqnum.Size(rtxOffset) + 30 + 1)
end := start.Add(10)
for i := 0; i < 3; i++ {
- c.SendAckWithSACK(790, rtxOffset, []header.SACKBlock{{start, end}})
+ c.SendAckWithSACK(seq, rtxOffset, []header.SACKBlock{{start, end}})
end = end.Add(10)
}
@@ -475,7 +476,7 @@ func TestSACKRecovery(t *testing.T) {
// the cwnd at this point after entering recovery should be half of the
// outstanding number of packets in flight.
for i := 0; i < 7; i++ {
- c.SendAckWithSACK(790, rtxOffset, []header.SACKBlock{{start, end}})
+ c.SendAckWithSACK(seq, rtxOffset, []header.SACKBlock{{start, end}})
end = end.Add(10)
}
@@ -497,7 +498,7 @@ func TestSACKRecovery(t *testing.T) {
// segment is retransmitted per ACK.
start = c.IRS.Add(seqnum.Size(rtxOffset) + 30 + 1)
end = start.Add(60)
- c.SendAckWithSACK(790, rtxOffset, []header.SACKBlock{{start, end}})
+ c.SendAckWithSACK(seq, rtxOffset, []header.SACKBlock{{start, end}})
// At this point, we acked expected/2 packets and we SACKED 6 packets and
// 3 segments were considered lost due to the SACK block we sent.
@@ -557,7 +558,7 @@ func TestSACKRecovery(t *testing.T) {
c.CheckNoPacketTimeout("More packets received than expected during recovery after partial ack for this cwnd.", 50*time.Millisecond)
// Acknowledge all pending data to recover point.
- c.SendAck(790, recover)
+ c.SendAck(seq, recover)
// At this point, the cwnd should reset to expected/2 and there are 9
// packets outstanding.
@@ -579,7 +580,7 @@ func TestSACKRecovery(t *testing.T) {
c.CheckNoPacketTimeout(fmt.Sprintf("More packets received(after deflation) than expected %d for this cwnd and iteration: %d.", expected, i), 50*time.Millisecond)
// Acknowledge all the data received so far.
- c.SendAck(790, bytesRead)
+ c.SendAck(seq, bytesRead)
// In cogestion avoidance, the packets trains increase by 1 in
// each iteration.
@@ -603,7 +604,7 @@ func TestRecoveryEntry(t *testing.T) {
defer c.Cleanup()
numPackets := 5
- data := sendAndReceive(t, c, numPackets, false /* enableRACK */)
+ data := sendAndReceiveWithSACK(t, c, numPackets, false /* enableRACK */)
// Ack #1 packet.
seq := seqnum.Value(context.TestInitialSequenceNumber).Add(1)