summaryrefslogtreecommitdiffhomepage
path: root/pkg/tcpip/transport/tcp/snd.go
blob: cf2e8dcd8668d350423bc4ac336e3bbd3ad269c8 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
// Copyright 2018 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package tcp

import (
	"fmt"
	"math"
	"sort"
	"time"

	"gvisor.dev/gvisor/pkg/sleep"
	"gvisor.dev/gvisor/pkg/sync"
	"gvisor.dev/gvisor/pkg/tcpip"
	"gvisor.dev/gvisor/pkg/tcpip/buffer"
	"gvisor.dev/gvisor/pkg/tcpip/header"
	"gvisor.dev/gvisor/pkg/tcpip/seqnum"
	"gvisor.dev/gvisor/pkg/tcpip/stack"
)

const (
	// MinRTO is the minimum allowed value for the retransmit timeout.
	MinRTO = 200 * time.Millisecond

	// MaxRTO is the maximum allowed value for the retransmit timeout.
	MaxRTO = 120 * time.Second

	// InitialCwnd is the initial congestion window.
	InitialCwnd = 10

	// nDupAckThreshold is the number of duplicate ACK's required
	// before fast-retransmit is entered.
	nDupAckThreshold = 3

	// MaxRetries is the maximum number of probe retries sender does
	// before timing out the connection.
	// Linux default TCP_RETR2, net.ipv4.tcp_retries2.
	MaxRetries = 15
)

// congestionControl is an interface that must be implemented by any supported
// congestion control algorithm.
type congestionControl interface {
	// HandleLossDetected is invoked when the loss is detected by RACK or
	// sender.dupAckCount >= nDupAckThreshold just before entering fast
	// retransmit.
	HandleLossDetected()

	// HandleRTOExpired is invoked when the retransmit timer expires.
	HandleRTOExpired()

	// Update is invoked when processing inbound acks. It's passed the
	// number of packet's that were acked by the most recent cumulative
	// acknowledgement.
	Update(packetsAcked int)

	// PostRecovery is invoked when the sender is exiting a fast retransmit/
	// recovery phase. This provides congestion control algorithms a way
	// to adjust their state when exiting recovery.
	PostRecovery()
}

// lossRecovery is an interface that must be implemented by any supported
// loss recovery algorithm.
type lossRecovery interface {
	// DoRecovery is invoked when loss is detected and segments need
	// to be retransmitted. The cumulative or selective ACK is passed along
	// with the flag which identifies whether the connection entered fast
	// retransmit with this ACK and to retransmit the first unacknowledged
	// segment.
	DoRecovery(rcvdSeg *segment, fastRetransmit bool)
}

// sender holds the state necessary to send TCP segments.
//
// +stateify savable
type sender struct {
	stack.TCPSenderState
	ep *endpoint

	// lr is the loss recovery algorithm used by the sender.
	lr lossRecovery

	// firstRetransmittedSegXmitTime is the original transmit time of
	// the first segment that was retransmitted due to RTO expiration.
	firstRetransmittedSegXmitTime time.Time `state:".(unixTime)"`

	// zeroWindowProbing is set if the sender is currently probing
	// for zero receive window.
	zeroWindowProbing bool `state:"nosave"`

	// unackZeroWindowProbes is the number of unacknowledged zero
	// window probes.
	unackZeroWindowProbes uint32 `state:"nosave"`

	writeNext   *segment
	writeList   segmentList
	resendTimer timer       `state:"nosave"`
	resendWaker sleep.Waker `state:"nosave"`

	// rtt.TCPRTTState.SRTT and rtt.TCPRTTState.RTTVar are the "smoothed
	// round-trip time", and "round-trip time variation", as defined in
	// section 2 of RFC 6298.
	rtt rtt

	// minRTO is the minimum permitted value for sender.rto.
	minRTO time.Duration

	// maxRTO is the maximum permitted value for sender.rto.
	maxRTO time.Duration

	// maxRetries is the maximum permitted retransmissions.
	maxRetries uint32

	// gso is set if generic segmentation offload is enabled.
	gso bool

	// state is the current state of congestion control for this endpoint.
	state tcpip.CongestionControlState

	// cc is the congestion control algorithm in use for this sender.
	cc congestionControl

	// rc has the fields needed for implementing RACK loss detection
	// algorithm.
	rc rackControl

	// reorderTimer is the timer used to retransmit the segments after RACK
	// detects them as lost.
	reorderTimer timer       `state:"nosave"`
	reorderWaker sleep.Waker `state:"nosave"`

	// probeTimer and probeWaker are used to schedule PTO for RACK TLP algorithm.
	probeTimer timer       `state:"nosave"`
	probeWaker sleep.Waker `state:"nosave"`
}

// rtt is a synchronization wrapper used to appease stateify. See the comment
// in sender, where it is used.
//
// +stateify savable
type rtt struct {
	sync.Mutex `state:"nosave"`

	stack.TCPRTTState
}

func newSender(ep *endpoint, iss, irs seqnum.Value, sndWnd seqnum.Size, mss uint16, sndWndScale int) *sender {
	// The sender MUST reduce the TCP data length to account for any IP or
	// TCP options that it is including in the packets that it sends.
	// See: https://tools.ietf.org/html/rfc6691#section-2
	maxPayloadSize := int(mss) - ep.maxOptionSize()

	s := &sender{
		ep: ep,
		TCPSenderState: stack.TCPSenderState{
			SndWnd:           sndWnd,
			SndUna:           iss + 1,
			SndNxt:           iss + 1,
			RTTMeasureSeqNum: iss + 1,
			LastSendTime:     time.Now(),
			MaxPayloadSize:   maxPayloadSize,
			MaxSentAck:       irs + 1,
			FastRecovery: stack.TCPFastRecoveryState{
				// See: https://tools.ietf.org/html/rfc6582#section-3.2 Step 1.
				Last:      iss,
				HighRxt:   iss,
				RescueRxt: iss,
			},
			RTO: 1 * time.Second,
		},
		gso: ep.gso != nil,
	}

	if s.gso {
		s.ep.gso.MSS = uint16(maxPayloadSize)
	}

	s.cc = s.initCongestionControl(ep.cc)
	s.lr = s.initLossRecovery()
	s.rc.init(s, iss)

	// A negative sndWndScale means that no scaling is in use, otherwise we
	// store the scaling value.
	if sndWndScale > 0 {
		s.SndWndScale = uint8(sndWndScale)
	}

	s.resendTimer.init(&s.resendWaker)
	s.reorderTimer.init(&s.reorderWaker)
	s.probeTimer.init(&s.probeWaker)

	s.updateMaxPayloadSize(int(ep.route.MTU()), 0)

	// Initialize SACK Scoreboard after updating max payload size as we use
	// the maxPayloadSize as the smss when determining if a segment is lost
	// etc.
	s.ep.scoreboard = NewSACKScoreboard(uint16(s.MaxPayloadSize), iss)

	// Get Stack wide config.
	var minRTO tcpip.TCPMinRTOOption
	if err := ep.stack.TransportProtocolOption(ProtocolNumber, &minRTO); err != nil {
		panic(fmt.Sprintf("unable to get minRTO from stack: %s", err))
	}
	s.minRTO = time.Duration(minRTO)

	var maxRTO tcpip.TCPMaxRTOOption
	if err := ep.stack.TransportProtocolOption(ProtocolNumber, &maxRTO); err != nil {
		panic(fmt.Sprintf("unable to get maxRTO from stack: %s", err))
	}
	s.maxRTO = time.Duration(maxRTO)

	var maxRetries tcpip.TCPMaxRetriesOption
	if err := ep.stack.TransportProtocolOption(ProtocolNumber, &maxRetries); err != nil {
		panic(fmt.Sprintf("unable to get maxRetries from stack: %s", err))
	}
	s.maxRetries = uint32(maxRetries)

	return s
}

// initCongestionControl initializes the specified congestion control module and
// returns a handle to it. It also initializes the sndCwnd and sndSsThresh to
// their initial values.
func (s *sender) initCongestionControl(congestionControlName tcpip.CongestionControlOption) congestionControl {
	s.SndCwnd = InitialCwnd
	// Set sndSsthresh to the maximum int value, which depends on the
	// platform.
	s.Ssthresh = int(^uint(0) >> 1)

	switch congestionControlName {
	case ccCubic:
		return newCubicCC(s)
	case ccReno:
		fallthrough
	default:
		return newRenoCC(s)
	}
}

// initLossRecovery initiates the loss recovery algorithm for the sender.
func (s *sender) initLossRecovery() lossRecovery {
	if s.ep.SACKPermitted {
		return newSACKRecovery(s)
	}
	return newRenoRecovery(s)
}

// updateMaxPayloadSize updates the maximum payload size based on the given
// MTU. If this is in response to "packet too big" control packets (indicated
// by the count argument), it also reduces the number of outstanding packets and
// attempts to retransmit the first packet above the MTU size.
func (s *sender) updateMaxPayloadSize(mtu, count int) {
	m := mtu - header.TCPMinimumSize

	m -= s.ep.maxOptionSize()

	// We don't adjust up for now.
	if m >= s.MaxPayloadSize {
		return
	}

	// Make sure we can transmit at least one byte.
	if m <= 0 {
		m = 1
	}

	oldMSS := s.MaxPayloadSize
	s.MaxPayloadSize = m
	if s.gso {
		s.ep.gso.MSS = uint16(m)
	}

	if count == 0 {
		// updateMaxPayloadSize is also called when the sender is created.
		// and there is no data to send in such cases. Return immediately.
		return
	}

	// Update the scoreboard's smss to reflect the new lowered
	// maxPayloadSize.
	s.ep.scoreboard.smss = uint16(m)

	s.Outstanding -= count
	if s.Outstanding < 0 {
		s.Outstanding = 0
	}

	// Rewind writeNext to the first segment exceeding the MTU. Do nothing
	// if it is already before such a packet.
	nextSeg := s.writeNext
	for seg := s.writeList.Front(); seg != nil; seg = seg.Next() {
		if seg == s.writeNext {
			// We got to writeNext before we could find a segment
			// exceeding the MTU.
			break
		}

		if nextSeg == s.writeNext && seg.data.Size() > m {
			// We found a segment exceeding the MTU. Rewind
			// writeNext and try to retransmit it.
			nextSeg = seg
		}

		if s.ep.SACKPermitted && s.ep.scoreboard.IsSACKED(seg.sackBlock()) {
			// Update sackedOut for new maximum payload size.
			s.SackedOut -= s.pCount(seg, oldMSS)
			s.SackedOut += s.pCount(seg, s.MaxPayloadSize)
		}
	}

	// Since we likely reduced the number of outstanding packets, we may be
	// ready to send some more.
	s.writeNext = nextSeg
	s.sendData()
}

// sendAck sends an ACK segment.
func (s *sender) sendAck() {
	s.sendSegmentFromView(buffer.VectorisedView{}, header.TCPFlagAck, s.SndNxt)
}

// updateRTO updates the retransmit timeout when a new roud-trip time is
// available. This is done in accordance with section 2 of RFC 6298.
func (s *sender) updateRTO(rtt time.Duration) {
	s.rtt.Lock()
	if !s.rtt.TCPRTTState.SRTTInited {
		s.rtt.TCPRTTState.RTTVar = rtt / 2
		s.rtt.TCPRTTState.SRTT = rtt
		s.rtt.TCPRTTState.SRTTInited = true
	} else {
		diff := s.rtt.TCPRTTState.SRTT - rtt
		if diff < 0 {
			diff = -diff
		}
		// Use RFC6298 standard algorithm to update TCPRTTState.RTTVar and TCPRTTState.SRTT when
		// no timestamps are available.
		if !s.ep.SendTSOk {
			s.rtt.TCPRTTState.RTTVar = (3*s.rtt.TCPRTTState.RTTVar + diff) / 4
			s.rtt.TCPRTTState.SRTT = (7*s.rtt.TCPRTTState.SRTT + rtt) / 8
		} else {
			// When we are taking RTT measurements of every ACK then
			// we need to use a modified method as specified in
			// https://tools.ietf.org/html/rfc7323#appendix-G
			if s.Outstanding == 0 {
				s.rtt.Unlock()
				return
			}
			// Netstack measures congestion window/inflight all in
			// terms of packets and not bytes. This is similar to
			// how linux also does cwnd and inflight. In practice
			// this approximation works as expected.
			expectedSamples := math.Ceil(float64(s.Outstanding) / 2)

			// alpha & beta values are the original values as recommended in
			// https://tools.ietf.org/html/rfc6298#section-2.3.
			const alpha = 0.125
			const beta = 0.25

			alphaPrime := alpha / expectedSamples
			betaPrime := beta / expectedSamples
			rttVar := (1-betaPrime)*s.rtt.TCPRTTState.RTTVar.Seconds() + betaPrime*diff.Seconds()
			srtt := (1-alphaPrime)*s.rtt.TCPRTTState.SRTT.Seconds() + alphaPrime*rtt.Seconds()
			s.rtt.TCPRTTState.RTTVar = time.Duration(rttVar * float64(time.Second))
			s.rtt.TCPRTTState.SRTT = time.Duration(srtt * float64(time.Second))
		}
	}

	s.RTO = s.rtt.TCPRTTState.SRTT + 4*s.rtt.TCPRTTState.RTTVar
	s.rtt.Unlock()
	if s.RTO < s.minRTO {
		s.RTO = s.minRTO
	}
}

// resendSegment resends the first unacknowledged segment.
func (s *sender) resendSegment() {
	// Don't use any segments we already sent to measure RTT as they may
	// have been affected by packets being lost.
	s.RTTMeasureSeqNum = s.SndNxt

	// Resend the segment.
	if seg := s.writeList.Front(); seg != nil {
		if seg.data.Size() > s.MaxPayloadSize {
			s.splitSeg(seg, s.MaxPayloadSize)
		}

		// See: RFC 6675 section 5 Step 4.3
		//
		// To prevent retransmission, set both the HighRXT and RescueRXT
		// to the highest sequence number in the retransmitted segment.
		s.FastRecovery.HighRxt = seg.sequenceNumber.Add(seqnum.Size(seg.data.Size())) - 1
		s.FastRecovery.RescueRxt = seg.sequenceNumber.Add(seqnum.Size(seg.data.Size())) - 1
		s.sendSegment(seg)
		s.ep.stack.Stats().TCP.FastRetransmit.Increment()
		s.ep.stats.SendErrors.FastRetransmit.Increment()

		// Run SetPipe() as per RFC 6675 section 5 Step 4.4
		s.SetPipe()
	}
}

// retransmitTimerExpired is called when the retransmit timer expires, and
// unacknowledged segments are assumed lost, and thus need to be resent.
// Returns true if the connection is still usable, or false if the connection
// is deemed lost.
func (s *sender) retransmitTimerExpired() bool {
	// Check if the timer actually expired or if it's a spurious wake due
	// to a previously orphaned runtime timer.
	if !s.resendTimer.checkExpiration() {
		return true
	}

	// TODO(b/147297758): Band-aid fix, retransmitTimer can fire in some edge cases
	// when writeList is empty. Remove this once we have a proper fix for this
	// issue.
	if s.writeList.Front() == nil {
		return true
	}

	s.ep.stack.Stats().TCP.Timeouts.Increment()
	s.ep.stats.SendErrors.Timeouts.Increment()

	// Set TLPRxtOut to false according to
	// https://tools.ietf.org/html/draft-ietf-tcpm-rack-08#section-7.6.1.
	s.rc.tlpRxtOut = false

	// Give up if we've waited more than a minute since the last resend or
	// if a user time out is set and we have exceeded the user specified
	// timeout since the first retransmission.
	uto := s.ep.userTimeout

	if s.firstRetransmittedSegXmitTime.IsZero() {
		// We store the original xmitTime of the segment that we are
		// about to retransmit as the retransmission time. This is
		// required as by the time the retransmitTimer has expired the
		// segment has already been sent and unacked for the RTO at the
		// time the segment was sent.
		s.firstRetransmittedSegXmitTime = s.writeList.Front().xmitTime
	}

	elapsed := time.Since(s.firstRetransmittedSegXmitTime)
	remaining := s.maxRTO
	if uto != 0 {
		// Cap to the user specified timeout if one is specified.
		remaining = uto - elapsed
	}

	// Always honor the user-timeout irrespective of whether the zero
	// window probes were acknowledged.
	// net/ipv4/tcp_timer.c::tcp_probe_timer()
	if remaining <= 0 || s.unackZeroWindowProbes >= s.maxRetries {
		return false
	}

	// Set new timeout. The timer will be restarted by the call to sendData
	// below.
	s.RTO *= 2
	// Cap the RTO as per RFC 1122 4.2.3.1, RFC 6298 5.5
	if s.RTO > s.maxRTO {
		s.RTO = s.maxRTO
	}

	// Cap RTO to remaining time.
	if s.RTO > remaining {
		s.RTO = remaining
	}

	// See: https://tools.ietf.org/html/rfc6582#section-3.2 Step 4.
	//
	// Retransmit timeouts:
	//     After a retransmit timeout, record the highest sequence number
	//     transmitted in the variable recover, and exit the fast recovery
	//     procedure if applicable.
	s.FastRecovery.Last = s.SndNxt - 1

	if s.FastRecovery.Active {
		// We were attempting fast recovery but were not successful.
		// Leave the state. We don't need to update ssthresh because it
		// has already been updated when entered fast-recovery.
		s.leaveRecovery()
	}

	s.state = tcpip.RTORecovery
	s.cc.HandleRTOExpired()

	// Mark the next segment to be sent as the first unacknowledged one and
	// start sending again. Set the number of outstanding packets to 0 so
	// that we'll be able to retransmit.
	//
	// We'll keep on transmitting (or retransmitting) as we get acks for
	// the data we transmit.
	s.Outstanding = 0

	// Expunge all SACK information as per https://tools.ietf.org/html/rfc6675#section-5.1
	//
	//  In order to avoid memory deadlocks, the TCP receiver is allowed to
	//  discard data that has already been selectively acknowledged. As a
	//  result, [RFC2018] suggests that a TCP sender SHOULD expunge the SACK
	//  information gathered from a receiver upon a retransmission timeout
	//  (RTO) "since the timeout might indicate that the data receiver has
	//  reneged." Additionally, a TCP sender MUST "ignore prior SACK
	//  information in determining which data to retransmit."
	//
	// NOTE: We take the stricter interpretation and just expunge all
	// information as we lack more rigorous checks to validate if the SACK
	// information is usable after an RTO.
	s.ep.scoreboard.Reset()
	s.writeNext = s.writeList.Front()

	// RFC 1122 4.2.2.17: Start sending zero window probes when we still see a
	// zero receive window after retransmission interval and we have data to
	// send.
	if s.zeroWindowProbing {
		s.sendZeroWindowProbe()
		// RFC 1122 4.2.2.17: A TCP MAY keep its offered receive window closed
		// indefinitely.  As long as the receiving TCP continues to send
		// acknowledgments in response to the probe segments, the sending TCP
		// MUST allow the connection to stay open.
		return true
	}

	seg := s.writeNext
	// RFC 1122 4.2.3.5: Close the connection when the number of
	// retransmissions for this segment is beyond a limit.
	if seg != nil && seg.xmitCount > s.maxRetries {
		return false
	}

	s.sendData()

	return true
}

// pCount returns the number of packets in the segment. Due to GSO, a segment
// can be composed of multiple packets.
func (s *sender) pCount(seg *segment, maxPayloadSize int) int {
	size := seg.data.Size()
	if size == 0 {
		return 1
	}

	return (size-1)/maxPayloadSize + 1
}

// splitSeg splits a given segment at the size specified and inserts the
// remainder as a new segment after the current one in the write list.
func (s *sender) splitSeg(seg *segment, size int) {
	if seg.data.Size() <= size {
		return
	}
	// Split this segment up.
	nSeg := seg.clone()
	nSeg.data.TrimFront(size)
	nSeg.sequenceNumber.UpdateForward(seqnum.Size(size))
	s.writeList.InsertAfter(seg, nSeg)

	// The segment being split does not carry PUSH flag because it is
	// followed by the newly split segment.
	// RFC1122 section 4.2.2.2: MUST set the PSH bit in the last buffered
	// segment (i.e., when there is no more queued data to be sent).
	// Linux removes PSH flag only when the segment is being split over MSS
	// and retains it when we are splitting the segment over lack of sender
	// window space.
	// ref: net/ipv4/tcp_output.c::tcp_write_xmit(), tcp_mss_split_point()
	// ref: net/ipv4/tcp_output.c::tcp_write_wakeup(), tcp_snd_wnd_test()
	if seg.data.Size() > s.MaxPayloadSize {
		seg.flags ^= header.TCPFlagPsh
	}

	seg.data.CapLength(size)
}

// NextSeg implements the RFC6675 NextSeg() operation.
//
// NextSeg starts scanning the writeList starting from nextSegHint and returns
// the hint to be passed on the next call to NextSeg. This is required to avoid
// iterating the write list repeatedly when NextSeg is invoked in a loop during
// recovery. The returned hint will be nil if there are no more segments that
// can match rules defined by NextSeg operation in RFC6675.
//
// rescueRtx will be true only if nextSeg is a rescue retransmission as
// described by Step 4) of the NextSeg algorithm.
func (s *sender) NextSeg(nextSegHint *segment) (nextSeg, hint *segment, rescueRtx bool) {
	var s3 *segment
	var s4 *segment
	// Step 1.
	for seg := nextSegHint; seg != nil; seg = seg.Next() {
		// Stop iteration if we hit a segment that has never been
		// transmitted (i.e. either it has no assigned sequence number
		// or if it does have one, it's >= the next sequence number
		// to be sent [i.e. >= s.sndNxt]).
		if !s.isAssignedSequenceNumber(seg) || s.SndNxt.LessThanEq(seg.sequenceNumber) {
			hint = nil
			break
		}
		segSeq := seg.sequenceNumber
		if smss := s.ep.scoreboard.SMSS(); seg.data.Size() > int(smss) {
			s.splitSeg(seg, int(smss))
		}

		// See RFC 6675 Section 4
		//
		//     1. If there exists a smallest unSACKED sequence number
		//     'S2' that meets the following 3 criteria for determinig
		//     loss, the sequence range of one segment of up to SMSS
		//     octects starting with S2 MUST be returned.
		if !s.ep.scoreboard.IsSACKED(header.SACKBlock{segSeq, segSeq.Add(1)}) {
			// NextSeg():
			//
			//    (1.a) S2 is greater than HighRxt
			//    (1.b) S2 is less than highest octect covered by
			//    any received SACK.
			if s.FastRecovery.HighRxt.LessThan(segSeq) && segSeq.LessThan(s.ep.scoreboard.maxSACKED) {
				// NextSeg():
				//     (1.c) IsLost(S2) returns true.
				if s.ep.scoreboard.IsLost(segSeq) {
					return seg, seg.Next(), false
				}

				// NextSeg():
				//
				// (3): If the conditions for rules (1) and (2)
				// fail, but there exists an unSACKed sequence
				// number S3 that meets the criteria for
				// detecting loss given in steps 1.a and 1.b
				// above (specifically excluding (1.c)) then one
				// segment of upto SMSS octets starting with S3
				// SHOULD be returned.
				if s3 == nil {
					s3 = seg
					hint = seg.Next()
				}
			}
			// NextSeg():
			//
			//     (4) If the conditions for (1), (2) and (3) fail,
			//     but there exists outstanding unSACKED data, we
			//     provide the opportunity for a single "rescue"
			//     retransmission per entry into loss recovery. If
			//     HighACK is greater than RescueRxt (or RescueRxt
			//     is undefined), then one segment of upto SMSS
			//     octects that MUST include the highest outstanding
			//     unSACKed sequence number SHOULD be returned, and
			//     RescueRxt set to RecoveryPoint. HighRxt MUST NOT
			//     be updated.
			if s.FastRecovery.RescueRxt.LessThan(s.SndUna - 1) {
				if s4 != nil {
					if s4.sequenceNumber.LessThan(segSeq) {
						s4 = seg
					}
				} else {
					s4 = seg
				}
			}
		}
	}

	// If we got here then no segment matched step (1).
	// Step (2): "If no sequence number 'S2' per rule (1)
	// exists but there exists available unsent data and the
	// receiver's advertised window allows, the sequence
	// range of one segment of up to SMSS octets of
	// previously unsent data starting with sequence number
	// HighData+1 MUST be returned."
	for seg := s.writeNext; seg != nil; seg = seg.Next() {
		if s.isAssignedSequenceNumber(seg) && seg.sequenceNumber.LessThan(s.SndNxt) {
			continue
		}
		// We do not split the segment here to <= smss as it has
		// potentially not been assigned a sequence number yet.
		return seg, nil, false
	}

	if s3 != nil {
		return s3, hint, false
	}

	return s4, nil, true
}

// maybeSendSegment tries to send the specified segment and either coalesces
// other segments into this one or splits the specified segment based on the
// lower of the specified limit value or the receivers window size specified by
// end.
func (s *sender) maybeSendSegment(seg *segment, limit int, end seqnum.Value) (sent bool) {
	// We abuse the flags field to determine if we have already
	// assigned a sequence number to this segment.
	if !s.isAssignedSequenceNumber(seg) {
		// Merge segments if allowed.
		if seg.data.Size() != 0 {
			available := int(s.SndNxt.Size(end))
			if available > limit {
				available = limit
			}

			// nextTooBig indicates that the next segment was too
			// large to entirely fit in the current segment. It
			// would be possible to split the next segment and merge
			// the portion that fits, but unexpectedly splitting
			// segments can have user visible side-effects which can
			// break applications. For example, RFC 7766 section 8
			// says that the length and data of a DNS response
			// should be sent in the same TCP segment to avoid
			// triggering bugs in poorly written DNS
			// implementations.
			var nextTooBig bool
			for seg.Next() != nil && seg.Next().data.Size() != 0 {
				if seg.data.Size()+seg.Next().data.Size() > available {
					nextTooBig = true
					break
				}
				seg.data.Append(seg.Next().data)

				// Consume the segment that we just merged in.
				s.writeList.Remove(seg.Next())
			}
			if !nextTooBig && seg.data.Size() < available {
				// Segment is not full.
				if s.Outstanding > 0 && s.ep.ops.GetDelayOption() {
					// Nagle's algorithm. From Wikipedia:
					//   Nagle's algorithm works by
					//   combining a number of small
					//   outgoing messages and sending them
					//   all at once. Specifically, as long
					//   as there is a sent packet for which
					//   the sender has received no
					//   acknowledgment, the sender should
					//   keep buffering its output until it
					//   has a full packet's worth of
					//   output, thus allowing output to be
					//   sent all at once.
					return false
				}
				// With TCP_CORK, hold back until minimum of the available
				// send space and MSS.
				// TODO(gvisor.dev/issue/2833): Drain the held segments after a
				// timeout.
				if seg.data.Size() < s.MaxPayloadSize && s.ep.ops.GetCorkOption() {
					return false
				}
			}
		}

		// Assign flags. We don't do it above so that we can merge
		// additional data if Nagle holds the segment.
		seg.sequenceNumber = s.SndNxt
		seg.flags = header.TCPFlagAck | header.TCPFlagPsh
	}

	var segEnd seqnum.Value
	if seg.data.Size() == 0 {
		if s.writeList.Back() != seg {
			panic("FIN segments must be the final segment in the write list.")
		}
		seg.flags = header.TCPFlagAck | header.TCPFlagFin
		segEnd = seg.sequenceNumber.Add(1)
		// Update the state to reflect that we have now
		// queued a FIN.
		switch s.ep.EndpointState() {
		case StateCloseWait:
			s.ep.setEndpointState(StateLastAck)
		default:
			s.ep.setEndpointState(StateFinWait1)
		}
	} else {
		// We're sending a non-FIN segment.
		if seg.flags&header.TCPFlagFin != 0 {
			panic("Netstack queues FIN segments without data.")
		}

		if !seg.sequenceNumber.LessThan(end) {
			return false
		}

		available := int(seg.sequenceNumber.Size(end))
		if available == 0 {
			return false
		}

		// If the whole segment or at least 1MSS sized segment cannot
		// be accomodated in the receiver advertized window, skip
		// splitting and sending of the segment. ref:
		// net/ipv4/tcp_output.c::tcp_snd_wnd_test()
		//
		// Linux checks this for all segment transmits not triggered by
		// a probe timer. On this condition, it defers the segment split
		// and transmit to a short probe timer.
		//
		// ref: include/net/tcp.h::tcp_check_probe_timer()
		// ref: net/ipv4/tcp_output.c::tcp_write_wakeup()
		//
		// Instead of defining a new transmit timer, we attempt to split
		// the segment right here if there are no pending segments. If
		// there are pending segments, segment transmits are deferred to
		// the retransmit timer handler.
		if s.SndUna != s.SndNxt {
			switch {
			case available >= seg.data.Size():
				// OK to send, the whole segments fits in the
				// receiver's advertised window.
			case available >= s.MaxPayloadSize:
				// OK to send, at least 1 MSS sized segment fits
				// in the receiver's advertised window.
			default:
				return false
			}
		}

		// The segment size limit is computed as a function of sender
		// congestion window and MSS. When sender congestion window is >
		// 1, this limit can be larger than MSS. Ensure that the
		// currently available send space is not greater than minimum of
		// this limit and MSS.
		if available > limit {
			available = limit
		}

		// If GSO is not in use then cap available to
		// maxPayloadSize. When GSO is in use the gVisor GSO logic or
		// the host GSO logic will cap the segment to the correct size.
		if s.ep.gso == nil && available > s.MaxPayloadSize {
			available = s.MaxPayloadSize
		}

		if seg.data.Size() > available {
			s.splitSeg(seg, available)
		}

		segEnd = seg.sequenceNumber.Add(seqnum.Size(seg.data.Size()))
	}

	s.sendSegment(seg)

	// Update sndNxt if we actually sent new data (as opposed to
	// retransmitting some previously sent data).
	if s.SndNxt.LessThan(segEnd) {
		s.SndNxt = segEnd
	}

	return true
}

func (s *sender) sendZeroWindowProbe() {
	ack, win := s.ep.rcv.getSendParams()
	s.unackZeroWindowProbes++
	// Send a zero window probe with sequence number pointing to
	// the last acknowledged byte.
	s.ep.sendRaw(buffer.VectorisedView{}, header.TCPFlagAck, s.SndUna-1, ack, win)
	// Rearm the timer to continue probing.
	s.resendTimer.enable(s.RTO)
}

func (s *sender) enableZeroWindowProbing() {
	s.zeroWindowProbing = true
	// We piggyback the probing on the retransmit timer with the
	// current retranmission interval, as we may start probing while
	// segment retransmissions.
	if s.firstRetransmittedSegXmitTime.IsZero() {
		s.firstRetransmittedSegXmitTime = time.Now()
	}
	s.resendTimer.enable(s.RTO)
}

func (s *sender) disableZeroWindowProbing() {
	s.zeroWindowProbing = false
	s.unackZeroWindowProbes = 0
	s.firstRetransmittedSegXmitTime = time.Time{}
	s.resendTimer.disable()
}

func (s *sender) postXmit(dataSent bool, shouldScheduleProbe bool) {
	if dataSent {
		// We sent data, so we should stop the keepalive timer to ensure
		// that no keepalives are sent while there is pending data.
		s.ep.disableKeepaliveTimer()
	}

	// If the sender has advertized zero receive window and we have
	// data to be sent out, start zero window probing to query the
	// the remote for it's receive window size.
	if s.writeNext != nil && s.SndWnd == 0 {
		s.enableZeroWindowProbing()
	}

	// If we have no more pending data, start the keepalive timer.
	if s.SndUna == s.SndNxt {
		s.ep.resetKeepaliveTimer(false)
	} else {
		// Enable timers if we have pending data.
		if shouldScheduleProbe && s.shouldSchedulePTO() {
			// Schedule PTO after transmitting new data that wasn't itself a TLP probe.
			s.schedulePTO()
		} else if !s.resendTimer.enabled() {
			s.probeTimer.disable()
			if s.Outstanding > 0 {
				// Enable the resend timer if it's not enabled yet and there is
				// outstanding data.
				s.resendTimer.enable(s.RTO)
			}
		}
	}
}

// sendData sends new data segments. It is called when data becomes available or
// when the send window opens up.
func (s *sender) sendData() {
	limit := s.MaxPayloadSize
	if s.gso {
		limit = int(s.ep.gso.MaxSize - header.TCPHeaderMaximumSize)
	}
	end := s.SndUna.Add(s.SndWnd)

	// Reduce the congestion window to min(IW, cwnd) per RFC 5681, page 10.
	// "A TCP SHOULD set cwnd to no more than RW before beginning
	// transmission if the TCP has not sent data in the interval exceeding
	// the retrasmission timeout."
	if !s.FastRecovery.Active && s.state != tcpip.RTORecovery && time.Now().Sub(s.LastSendTime) > s.RTO {
		if s.SndCwnd > InitialCwnd {
			s.SndCwnd = InitialCwnd
		}
	}

	var dataSent bool
	for seg := s.writeNext; seg != nil && s.Outstanding < s.SndCwnd; seg = seg.Next() {
		cwndLimit := (s.SndCwnd - s.Outstanding) * s.MaxPayloadSize
		if cwndLimit < limit {
			limit = cwndLimit
		}
		if s.isAssignedSequenceNumber(seg) && s.ep.SACKPermitted && s.ep.scoreboard.IsSACKED(seg.sackBlock()) {
			// Move writeNext along so that we don't try and scan data that
			// has already been SACKED.
			s.writeNext = seg.Next()
			continue
		}
		if sent := s.maybeSendSegment(seg, limit, end); !sent {
			break
		}
		dataSent = true
		s.Outstanding += s.pCount(seg, s.MaxPayloadSize)
		s.writeNext = seg.Next()
	}

	s.postXmit(dataSent, true /* shouldScheduleProbe */)
}

func (s *sender) enterRecovery() {
	s.FastRecovery.Active = true
	// Save state to reflect we're now in fast recovery.
	//
	// See : https://tools.ietf.org/html/rfc5681#section-3.2 Step 3.
	// We inflate the cwnd by 3 to account for the 3 packets which triggered
	// the 3 duplicate ACKs and are now not in flight.
	s.SndCwnd = s.Ssthresh + 3
	s.SackedOut = 0
	s.DupAckCount = 0
	s.FastRecovery.First = s.SndUna
	s.FastRecovery.Last = s.SndNxt - 1
	s.FastRecovery.MaxCwnd = s.SndCwnd + s.Outstanding
	s.FastRecovery.HighRxt = s.SndUna
	s.FastRecovery.RescueRxt = s.SndUna
	if s.ep.SACKPermitted {
		s.state = tcpip.SACKRecovery
		s.ep.stack.Stats().TCP.SACKRecovery.Increment()
		// Set TLPRxtOut to false according to
		// https://tools.ietf.org/html/draft-ietf-tcpm-rack-08#section-7.6.1.
		if s.rc.tlpRxtOut {
			// The tail loss probe triggered recovery.
			s.ep.stack.Stats().TCP.TLPRecovery.Increment()
		}
		s.rc.tlpRxtOut = false
		return
	}
	s.state = tcpip.FastRecovery
	s.ep.stack.Stats().TCP.FastRecovery.Increment()
}

func (s *sender) leaveRecovery() {
	s.FastRecovery.Active = false
	s.FastRecovery.MaxCwnd = 0
	s.DupAckCount = 0

	// Deflate cwnd. It had been artificially inflated when new dups arrived.
	s.SndCwnd = s.Ssthresh
	s.cc.PostRecovery()
}

// isAssignedSequenceNumber relies on the fact that we only set flags once a
// sequencenumber is assigned and that is only done right before we send the
// segment. As a result any segment that has a non-zero flag has a valid
// sequence number assigned to it.
func (s *sender) isAssignedSequenceNumber(seg *segment) bool {
	return seg.flags != 0
}

// SetPipe implements the SetPipe() function described in RFC6675. Netstack
// maintains the congestion window in number of packets and not bytes, so
// SetPipe() here measures number of outstanding packets rather than actual
// outstanding bytes in the network.
func (s *sender) SetPipe() {
	// If SACK isn't permitted or it is permitted but recovery is not active
	// then ignore pipe calculations.
	if !s.ep.SACKPermitted || !s.FastRecovery.Active {
		return
	}
	pipe := 0
	smss := seqnum.Size(s.ep.scoreboard.SMSS())
	for s1 := s.writeList.Front(); s1 != nil && s1.data.Size() != 0 && s.isAssignedSequenceNumber(s1); s1 = s1.Next() {
		// With GSO each segment can be much larger than SMSS. So check the segment
		// in SMSS sized ranges.
		segEnd := s1.sequenceNumber.Add(seqnum.Size(s1.data.Size()))
		for startSeq := s1.sequenceNumber; startSeq.LessThan(segEnd); startSeq = startSeq.Add(smss) {
			endSeq := startSeq.Add(smss)
			if segEnd.LessThan(endSeq) {
				endSeq = segEnd
			}
			sb := header.SACKBlock{startSeq, endSeq}
			// SetPipe():
			//
			// After initializing pipe to zero, the following steps are
			// taken for each octet 'S1' in the sequence space between
			// HighACK and HighData that has not been SACKed:
			if !s1.sequenceNumber.LessThan(s.SndNxt) {
				break
			}
			if s.ep.scoreboard.IsSACKED(sb) {
				continue
			}

			// SetPipe():
			//
			//    (a) If IsLost(S1) returns false, Pipe is incremened by 1.
			//
			// NOTE: here we mark the whole segment as lost. We do not try
			// and test every byte in our write buffer as we maintain our
			// pipe in terms of oustanding packets and not bytes.
			if !s.ep.scoreboard.IsRangeLost(sb) {
				pipe++
			}
			// SetPipe():
			//    (b) If S1 <= HighRxt, Pipe is incremented by 1.
			if s1.sequenceNumber.LessThanEq(s.FastRecovery.HighRxt) {
				pipe++
			}
		}
	}
	s.Outstanding = pipe
}

// shouldEnterRecovery returns true if the sender should enter fast recovery
// based on dupAck count and sack scoreboard.
// See RFC 6675 section 5.
func (s *sender) shouldEnterRecovery() bool {
	return s.DupAckCount >= nDupAckThreshold ||
		(s.ep.SACKPermitted && s.ep.tcpRecovery&tcpip.TCPRACKLossDetection == 0 && s.ep.scoreboard.IsLost(s.SndUna))
}

// detectLoss is called when an ack is received and returns whether a loss is
// detected. It manages the state related to duplicate acks and determines if
// a retransmit is needed according to the rules in RFC 6582 (NewReno).
func (s *sender) detectLoss(seg *segment) (fastRetransmit bool) {
	// We're not in fast recovery yet.

	// If RACK is enabled and there is no reordering we should honor the
	// three duplicate ACK rule to enter recovery.
	// See: https://tools.ietf.org/html/draft-ietf-tcpm-rack-08#section-4
	if s.ep.SACKPermitted && s.ep.tcpRecovery&tcpip.TCPRACKLossDetection != 0 {
		if s.rc.Reord {
			return false
		}
	}

	if !s.isDupAck(seg) {
		s.DupAckCount = 0
		return false
	}

	s.DupAckCount++

	// Do not enter fast recovery until we reach nDupAckThreshold or the
	// first unacknowledged byte is considered lost as per SACK scoreboard.
	if !s.shouldEnterRecovery() {
		// RFC 6675 Step 3.
		s.FastRecovery.HighRxt = s.SndUna - 1
		// Do run SetPipe() to calculate the outstanding segments.
		s.SetPipe()
		s.state = tcpip.Disorder
		return false
	}

	// See: https://tools.ietf.org/html/rfc6582#section-3.2 Step 2
	//
	// We only do the check here, the incrementing of last to the highest
	// sequence number transmitted till now is done when enterRecovery
	// is invoked.
	//
	// Note that we only enter recovery when at least one more byte of data
	// beyond s.fr.last (the highest byte that was outstanding when fast
	// retransmit was last entered) is acked.
	if !s.FastRecovery.Last.LessThan(seg.ackNumber - 1) {
		s.DupAckCount = 0
		return false
	}
	s.cc.HandleLossDetected()
	s.enterRecovery()
	return true
}

// isDupAck determines if seg is a duplicate ack as defined in
// https://tools.ietf.org/html/rfc5681#section-2.
func (s *sender) isDupAck(seg *segment) bool {
	// A TCP that utilizes selective acknowledgments (SACKs) [RFC2018, RFC2883]
	// can leverage the SACK information to determine when an incoming ACK is a
	// "duplicate" (e.g., if the ACK contains previously unknown SACK
	// information).
	if s.ep.SACKPermitted && !seg.hasNewSACKInfo {
		return false
	}

	// (a) The receiver of the ACK has outstanding data.
	return s.SndUna != s.SndNxt &&
		// (b) The incoming acknowledgment carries no data.
		seg.logicalLen() == 0 &&
		// (c) The SYN and FIN bits are both off.
		!seg.flagIsSet(header.TCPFlagFin) && !seg.flagIsSet(header.TCPFlagSyn) &&
		// (d) the ACK number is equal to the greatest acknowledgment received on
		// the given connection (TCP.UNA from RFC793).
		seg.ackNumber == s.SndUna &&
		// (e) the advertised window in the incoming acknowledgment equals the
		// advertised window in the last incoming acknowledgment.
		s.SndWnd == seg.window
}

// Iterate the writeList and update RACK for each segment which is newly acked
// either cumulatively or selectively. Loop through the segments which are
// sacked, and update the RACK related variables and check for reordering.
//
// See: https://tools.ietf.org/html/draft-ietf-tcpm-rack-08#section-7.2
// steps 2 and 3.
func (s *sender) walkSACK(rcvdSeg *segment) {
	s.rc.setDSACKSeen(false)

	// Look for DSACK block.
	idx := 0
	n := len(rcvdSeg.parsedOptions.SACKBlocks)
	if checkDSACK(rcvdSeg) {
		s.rc.setDSACKSeen(true)
		idx = 1
		n--
	}

	if n == 0 {
		return
	}

	// Sort the SACK blocks. The first block is the most recent unacked
	// block. The following blocks can be in arbitrary order.
	sackBlocks := make([]header.SACKBlock, n)
	copy(sackBlocks, rcvdSeg.parsedOptions.SACKBlocks[idx:])
	sort.Slice(sackBlocks, func(i, j int) bool {
		return sackBlocks[j].Start.LessThan(sackBlocks[i].Start)
	})

	seg := s.writeList.Front()
	for _, sb := range sackBlocks {
		for seg != nil && seg.sequenceNumber.LessThan(sb.End) && seg.xmitCount != 0 {
			if sb.Start.LessThanEq(seg.sequenceNumber) && !seg.acked {
				s.rc.update(seg, rcvdSeg)
				s.rc.detectReorder(seg)
				seg.acked = true
				s.SackedOut += s.pCount(seg, s.MaxPayloadSize)
			}
			seg = seg.Next()
		}
	}
}

// checkDSACK checks if a DSACK is reported.
func checkDSACK(rcvdSeg *segment) bool {
	n := len(rcvdSeg.parsedOptions.SACKBlocks)
	if n == 0 {
		return false
	}

	sb := rcvdSeg.parsedOptions.SACKBlocks[0]
	// Check if SACK block is invalid.
	if sb.End.LessThan(sb.Start) {
		return false
	}

	// See: https://tools.ietf.org/html/rfc2883#section-5 DSACK is sent in
	// at most one SACK block. DSACK is detected in the below two cases:
	// * If the SACK sequence space is less than this cumulative ACK, it is
	//   an indication that the segment identified by the SACK block has
	//   been received more than once by the receiver.
	// * If the sequence space in the first SACK block is greater than the
	//   cumulative ACK, then the sender next compares the sequence space
	//   in the first SACK block with the sequence space in the second SACK
	//   block, if there is one. This comparison can determine if the first
	//   SACK block is reporting duplicate data that lies above the
	//   cumulative ACK.
	if sb.Start.LessThan(rcvdSeg.ackNumber) {
		return true
	}

	if n > 1 {
		sb1 := rcvdSeg.parsedOptions.SACKBlocks[1]
		if sb1.End.LessThan(sb1.Start) {
			return false
		}

		// If the first SACK block is fully covered by second SACK
		// block, then the first block is a DSACK block.
		if sb.End.LessThanEq(sb1.End) && sb1.Start.LessThanEq(sb.Start) {
			return true
		}
	}

	return false
}

// handleRcvdSegment is called when a segment is received; it is responsible for
// updating the send-related state.
func (s *sender) handleRcvdSegment(rcvdSeg *segment) {
	// Check if we can extract an RTT measurement from this ack.
	if !rcvdSeg.parsedOptions.TS && s.RTTMeasureSeqNum.LessThan(rcvdSeg.ackNumber) {
		s.updateRTO(time.Now().Sub(s.RTTMeasureTime))
		s.RTTMeasureSeqNum = s.SndNxt
	}

	// Update Timestamp if required. See RFC7323, section-4.3.
	if s.ep.SendTSOk && rcvdSeg.parsedOptions.TS {
		s.ep.updateRecentTimestamp(rcvdSeg.parsedOptions.TSVal, s.MaxSentAck, rcvdSeg.sequenceNumber)
	}

	// Insert SACKBlock information into our scoreboard.
	if s.ep.SACKPermitted {
		for _, sb := range rcvdSeg.parsedOptions.SACKBlocks {
			// Only insert the SACK block if the following holds
			// true:
			//  * SACK block acks data after the ack number in the
			//    current segment.
			//  * SACK block represents a sequence
			//    between sndUna and sndNxt (i.e. data that is
			//    currently unacked and in-flight).
			//  * SACK block that has not been SACKed already.
			//
			// NOTE: This check specifically excludes DSACK blocks
			// which have start/end before sndUna and are used to
			// indicate spurious retransmissions.
			if rcvdSeg.ackNumber.LessThan(sb.Start) && s.SndUna.LessThan(sb.Start) && sb.End.LessThanEq(s.SndNxt) && !s.ep.scoreboard.IsSACKED(sb) {
				s.ep.scoreboard.Insert(sb)
				rcvdSeg.hasNewSACKInfo = true
			}
		}

		// See: https://tools.ietf.org/html/draft-ietf-tcpm-rack-08
		// section-7.2
		// * Step 2: Update RACK stats.
		//   If the ACK is not ignored as invalid, update the RACK.rtt
		//   to be the RTT sample calculated using this ACK, and
		//   continue.  If this ACK or SACK was for the most recently
		//   sent packet, then record the RACK.xmit_ts timestamp and
		//   RACK.end_seq sequence implied by this ACK.
		// * Step 3: Detect packet reordering.
		//   If the ACK selectively or cumulatively acknowledges an
		//   unacknowledged and also never retransmitted sequence below
		//   RACK.fack, then the corresponding packet has been
		//   reordered and RACK.reord is set to TRUE.
		if s.ep.tcpRecovery&tcpip.TCPRACKLossDetection != 0 {
			s.walkSACK(rcvdSeg)
		}
		s.SetPipe()
	}

	ack := rcvdSeg.ackNumber
	fastRetransmit := false
	// Do not leave fast recovery, if the ACK is out of range.
	if s.FastRecovery.Active {
		// Leave fast recovery if it acknowledges all the data covered by
		// this fast recovery session.
		if (ack-1).InRange(s.SndUna, s.SndNxt) && s.FastRecovery.Last.LessThan(ack) {
			s.leaveRecovery()
		}
	} else {
		// Detect loss by counting the duplicates and enter recovery.
		fastRetransmit = s.detectLoss(rcvdSeg)
	}

	// See if TLP based recovery was successful.
	if s.ep.tcpRecovery&tcpip.TCPRACKLossDetection != 0 {
		s.detectTLPRecovery(ack, rcvdSeg)
	}

	// Stash away the current window size.
	s.SndWnd = rcvdSeg.window

	// Disable zero window probing if remote advertizes a non-zero receive
	// window. This can be with an ACK to the zero window probe (where the
	// acknumber refers to the already acknowledged byte) OR to any previously
	// unacknowledged segment.
	if s.zeroWindowProbing && rcvdSeg.window > 0 &&
		(ack == s.SndUna || (ack-1).InRange(s.SndUna, s.SndNxt)) {
		s.disableZeroWindowProbing()
	}

	// On receiving the ACK for the zero window probe, account for it and
	// skip trying to send any segment as we are still probing for
	// receive window to become non-zero.
	if s.zeroWindowProbing && s.unackZeroWindowProbes > 0 && ack == s.SndUna {
		s.unackZeroWindowProbes--
		return
	}

	// Ignore ack if it doesn't acknowledge any new data.
	if (ack - 1).InRange(s.SndUna, s.SndNxt) {
		s.DupAckCount = 0

		// See : https://tools.ietf.org/html/rfc1323#section-3.3.
		// Specifically we should only update the RTO using TSEcr if the
		// following condition holds:
		//
		//    A TSecr value received in a segment is used to update the
		//    averaged RTT measurement only if the segment acknowledges
		//    some new data, i.e., only if it advances the left edge of
		//    the send window.
		if s.ep.SendTSOk && rcvdSeg.parsedOptions.TSEcr != 0 {
			// TSVal/Ecr values sent by Netstack are at a millisecond
			// granularity.
			elapsed := time.Duration(s.ep.timestamp()-rcvdSeg.parsedOptions.TSEcr) * time.Millisecond
			s.updateRTO(elapsed)
		}

		if s.shouldSchedulePTO() {
			// Schedule PTO upon receiving an ACK that cumulatively acknowledges data.
			// See https://tools.ietf.org/html/draft-ietf-tcpm-rack-08#section-7.5.1.
			s.schedulePTO()
		} else {
			// When an ack is received we must rearm the timer.
			// RFC 6298 5.3
			s.probeTimer.disable()
			s.resendTimer.enable(s.RTO)
		}

		// Remove all acknowledged data from the write list.
		acked := s.SndUna.Size(ack)
		s.SndUna = ack

		// The remote ACK-ing at least 1 byte is an indication that we have a
		// full-duplex connection to the remote as the only way we will receive an
		// ACK is if the remote received data that we previously sent.
		//
		// As of writing, linux seems to only confirm a route as reachable when
		// forward progress is made which is indicated by an ACK that removes data
		// from the retransmit queue.
		if acked > 0 {
			s.ep.route.ConfirmReachable()
		}

		ackLeft := acked
		originalOutstanding := s.Outstanding
		for ackLeft > 0 {
			// We use logicalLen here because we can have FIN
			// segments (which are always at the end of list) that
			// have no data, but do consume a sequence number.
			seg := s.writeList.Front()
			datalen := seg.logicalLen()

			if datalen > ackLeft {
				prevCount := s.pCount(seg, s.MaxPayloadSize)
				seg.data.TrimFront(int(ackLeft))
				seg.sequenceNumber.UpdateForward(ackLeft)
				s.Outstanding -= prevCount - s.pCount(seg, s.MaxPayloadSize)
				break
			}

			if s.writeNext == seg {
				s.writeNext = seg.Next()
			}

			// Update the RACK fields if SACK is enabled.
			if s.ep.SACKPermitted && !seg.acked && s.ep.tcpRecovery&tcpip.TCPRACKLossDetection != 0 {
				s.rc.update(seg, rcvdSeg)
				s.rc.detectReorder(seg)
			}

			s.writeList.Remove(seg)

			// If SACK is enabled then only reduce outstanding if
			// the segment was not previously SACKED as these have
			// already been accounted for in SetPipe().
			if !s.ep.SACKPermitted || !s.ep.scoreboard.IsSACKED(seg.sackBlock()) {
				s.Outstanding -= s.pCount(seg, s.MaxPayloadSize)
			} else {
				s.SackedOut -= s.pCount(seg, s.MaxPayloadSize)
			}
			seg.decRef()
			ackLeft -= datalen
		}

		// Update the send buffer usage and notify potential waiters.
		s.ep.updateSndBufferUsage(int(acked))

		// Clear SACK information for all acked data.
		s.ep.scoreboard.Delete(s.SndUna)

		// If we are not in fast recovery then update the congestion
		// window based on the number of acknowledged packets.
		if !s.FastRecovery.Active {
			s.cc.Update(originalOutstanding - s.Outstanding)
			if s.FastRecovery.Last.LessThan(s.SndUna) {
				s.state = tcpip.Open
				// Update RACK when we are exiting fast or RTO
				// recovery as described in the RFC
				// draft-ietf-tcpm-rack-08 Section-7.2 Step 4.
				if s.ep.tcpRecovery&tcpip.TCPRACKLossDetection != 0 {
					s.rc.exitRecovery()
				}
				s.reorderTimer.disable()
			}
		}

		// It is possible for s.outstanding to drop below zero if we get
		// a retransmit timeout, reset outstanding to zero but later
		// get an ack that cover previously sent data.
		if s.Outstanding < 0 {
			s.Outstanding = 0
		}

		s.SetPipe()

		// If all outstanding data was acknowledged the disable the timer.
		// RFC 6298 Rule 5.3
		if s.SndUna == s.SndNxt {
			s.Outstanding = 0
			// Reset firstRetransmittedSegXmitTime to the zero value.
			s.firstRetransmittedSegXmitTime = time.Time{}
			s.resendTimer.disable()
			s.probeTimer.disable()
		}
	}

	if s.ep.SACKPermitted && s.ep.tcpRecovery&tcpip.TCPRACKLossDetection != 0 {
		// Update RACK reorder window.
		// See: https://tools.ietf.org/html/draft-ietf-tcpm-rack-08#section-7.2
		// * Upon receiving an ACK:
		// * Step 4: Update RACK reordering window
		s.rc.updateRACKReorderWindow(rcvdSeg)

		// After the reorder window is calculated, detect any loss by checking
		// if the time elapsed after the segments are sent is greater than the
		// reorder window.
		if numLost := s.rc.detectLoss(rcvdSeg.rcvdTime); numLost > 0 && !s.FastRecovery.Active {
			// If any segment is marked as lost by
			// RACK, enter recovery and retransmit
			// the lost segments.
			s.cc.HandleLossDetected()
			s.enterRecovery()
			fastRetransmit = true
		}

		if s.FastRecovery.Active {
			s.rc.DoRecovery(nil, fastRetransmit)
		}
	}

	// Now that we've popped all acknowledged data from the retransmit
	// queue, retransmit if needed.
	if s.FastRecovery.Active && s.ep.tcpRecovery&tcpip.TCPRACKLossDetection == 0 {
		s.lr.DoRecovery(rcvdSeg, fastRetransmit)
		// When SACK is enabled data sending is governed by steps in
		// RFC 6675 Section 5 recovery steps  A-C.
		// See: https://tools.ietf.org/html/rfc6675#section-5.
		if s.ep.SACKPermitted {
			return
		}
	}

	// Send more data now that some of the pending data has been ack'd, or
	// that the window opened up, or the congestion window was inflated due
	// to a duplicate ack during fast recovery. This will also re-enable
	// the retransmit timer if needed.
	s.sendData()
}

// sendSegment sends the specified segment.
func (s *sender) sendSegment(seg *segment) tcpip.Error {
	if seg.xmitCount > 0 {
		s.ep.stack.Stats().TCP.Retransmits.Increment()
		s.ep.stats.SendErrors.Retransmits.Increment()
		if s.SndCwnd < s.Ssthresh {
			s.ep.stack.Stats().TCP.SlowStartRetransmits.Increment()
		}
	}
	seg.xmitTime = time.Now()
	seg.xmitCount++
	seg.lost = false
	err := s.sendSegmentFromView(seg.data, seg.flags, seg.sequenceNumber)

	// Every time a packet containing data is sent (including a
	// retransmission), if SACK is enabled and we are retransmitting data
	// then use the conservative timer described in RFC6675 Section 6.0,
	// otherwise follow the standard time described in RFC6298 Section 5.1.
	if err != nil && seg.data.Size() != 0 {
		if s.FastRecovery.Active && seg.xmitCount > 1 && s.ep.SACKPermitted {
			s.resendTimer.enable(s.RTO)
		} else {
			if !s.resendTimer.enabled() {
				s.resendTimer.enable(s.RTO)
			}
		}
	}

	return err
}

// sendSegmentFromView sends a new segment containing the given payload, flags
// and sequence number.
func (s *sender) sendSegmentFromView(data buffer.VectorisedView, flags header.TCPFlags, seq seqnum.Value) tcpip.Error {
	s.LastSendTime = time.Now()
	if seq == s.RTTMeasureSeqNum {
		s.RTTMeasureTime = s.LastSendTime
	}

	rcvNxt, rcvWnd := s.ep.rcv.getSendParams()

	// Remember the max sent ack.
	s.MaxSentAck = rcvNxt

	return s.ep.sendRaw(data, flags, seq, rcvNxt, rcvWnd)
}

// maybeSendOutOfWindowAck sends an ACK if we are not being rate limited
// currently.
func (s *sender) maybeSendOutOfWindowAck(seg *segment) {
	// Data packets are unlikely to be part of an ACK loop. So always send
	// an ACK for a packet w/ data.
	if seg.payloadSize() > 0 || s.ep.allowOutOfWindowAck() {
		s.sendAck()
	}
}