summaryrefslogtreecommitdiffhomepage
path: root/pkg
diff options
context:
space:
mode:
Diffstat (limited to 'pkg')
-rw-r--r--pkg/abi/linux/linux_abi_autogen_unsafe.go28
-rw-r--r--pkg/sentry/fsimpl/host/connected_endpoint_refs.go5
-rw-r--r--pkg/sentry/fsimpl/host/inode_refs.go5
-rw-r--r--pkg/sentry/fsimpl/tmpfs/inode_refs.go5
-rw-r--r--pkg/sentry/platform/ring0/defs_impl_arm64.go2
-rw-r--r--pkg/sentry/socket/unix/socket_refs.go5
-rw-r--r--pkg/tcpip/transport/tcp/connect.go11
-rw-r--r--pkg/tcpip/transport/tcp/endpoint.go4
-rw-r--r--pkg/tcpip/transport/tcp/segment.go23
-rw-r--r--pkg/tcpip/transport/tcp/snd.go41
-rw-r--r--pkg/tcpip/transport/tcp/tcp_rack_segment_list.go178
-rw-r--r--pkg/tcpip/transport/tcp/tcp_segment_list.go47
-rw-r--r--pkg/tcpip/transport/tcp/tcp_state_autogen.go178
13 files changed, 136 insertions, 396 deletions
diff --git a/pkg/abi/linux/linux_abi_autogen_unsafe.go b/pkg/abi/linux/linux_abi_autogen_unsafe.go
index ddd8dbc91..150993b02 100644
--- a/pkg/abi/linux/linux_abi_autogen_unsafe.go
+++ b/pkg/abi/linux/linux_abi_autogen_unsafe.go
@@ -167,7 +167,7 @@ func (s *Statx) MarshalUnsafe(dst []byte) {
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
func (s *Statx) UnmarshalUnsafe(src []byte) {
- if s.Atime.Packed() && s.Btime.Packed() && s.Ctime.Packed() && s.Mtime.Packed() {
+ if s.Btime.Packed() && s.Ctime.Packed() && s.Mtime.Packed() && s.Atime.Packed() {
safecopy.CopyOut(unsafe.Pointer(s), src)
} else {
// Type Statx doesn't have a packed layout in memory, fallback to UnmarshalBytes.
@@ -178,7 +178,7 @@ func (s *Statx) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
func (s *Statx) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (int, error) {
- if !s.Atime.Packed() && s.Btime.Packed() && s.Ctime.Packed() && s.Mtime.Packed() {
+ if !s.Btime.Packed() && s.Ctime.Packed() && s.Mtime.Packed() && s.Atime.Packed() {
// Type Statx doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := task.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
s.MarshalBytes(buf) // escapes: fallback.
@@ -208,7 +208,7 @@ func (s *Statx) CopyOut(task marshal.Task, addr usermem.Addr) (int, error) {
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
func (s *Statx) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
- if !s.Atime.Packed() && s.Btime.Packed() && s.Ctime.Packed() && s.Mtime.Packed() {
+ if !s.Mtime.Packed() && s.Atime.Packed() && s.Btime.Packed() && s.Ctime.Packed() {
// Type Statx doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := task.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
@@ -683,7 +683,7 @@ func (f *FUSEHeaderIn) CopyOut(task marshal.Task, addr usermem.Addr) (int, error
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
func (f *FUSEHeaderIn) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
- if !f.Unique.Packed() && f.Opcode.Packed() {
+ if !f.Opcode.Packed() && f.Unique.Packed() {
// Type FUSEHeaderIn doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := task.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
@@ -2025,7 +2025,7 @@ func (i *IPTEntry) Packed() bool {
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
func (i *IPTEntry) MarshalUnsafe(dst []byte) {
- if i.IP.Packed() && i.Counters.Packed() {
+ if i.Counters.Packed() && i.IP.Packed() {
safecopy.CopyIn(dst, unsafe.Pointer(i))
} else {
// Type IPTEntry doesn't have a packed layout in memory, fallback to MarshalBytes.
@@ -2208,12 +2208,12 @@ func (i *IPTIP) UnmarshalBytes(src []byte) {
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
func (i *IPTIP) Packed() bool {
- return i.Src.Packed() && i.Dst.Packed() && i.SrcMask.Packed() && i.DstMask.Packed()
+ return i.DstMask.Packed() && i.Src.Packed() && i.Dst.Packed() && i.SrcMask.Packed()
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
func (i *IPTIP) MarshalUnsafe(dst []byte) {
- if i.Dst.Packed() && i.SrcMask.Packed() && i.DstMask.Packed() && i.Src.Packed() {
+ if i.DstMask.Packed() && i.Src.Packed() && i.Dst.Packed() && i.SrcMask.Packed() {
safecopy.CopyIn(dst, unsafe.Pointer(i))
} else {
// Type IPTIP doesn't have a packed layout in memory, fallback to MarshalBytes.
@@ -2264,7 +2264,7 @@ func (i *IPTIP) CopyOut(task marshal.Task, addr usermem.Addr) (int, error) {
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
func (i *IPTIP) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
- if !i.Src.Packed() && i.Dst.Packed() && i.SrcMask.Packed() && i.DstMask.Packed() {
+ if !i.DstMask.Packed() && i.Src.Packed() && i.Dst.Packed() && i.SrcMask.Packed() {
// Type IPTIP doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := task.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
@@ -2290,7 +2290,7 @@ func (i *IPTIP) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
// WriteTo implements io.WriterTo.WriteTo.
func (i *IPTIP) WriteTo(w io.Writer) (int64, error) {
- if !i.Src.Packed() && i.Dst.Packed() && i.SrcMask.Packed() && i.DstMask.Packed() {
+ if !i.Dst.Packed() && i.SrcMask.Packed() && i.DstMask.Packed() && i.Src.Packed() {
// Type IPTIP doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := make([]byte, i.SizeBytes())
i.MarshalBytes(buf)
@@ -2999,7 +2999,7 @@ func (i *IP6TEntry) UnmarshalBytes(src []byte) {
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
func (i *IP6TEntry) Packed() bool {
- return i.Counters.Packed() && i.IPv6.Packed()
+ return i.IPv6.Packed() && i.Counters.Packed()
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
@@ -3196,7 +3196,7 @@ func (i *IP6TIP) UnmarshalBytes(src []byte) {
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
func (i *IP6TIP) Packed() bool {
- return i.DstMask.Packed() && i.Src.Packed() && i.Dst.Packed() && i.SrcMask.Packed()
+ return i.Src.Packed() && i.Dst.Packed() && i.SrcMask.Packed() && i.DstMask.Packed()
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
@@ -3211,7 +3211,7 @@ func (i *IP6TIP) MarshalUnsafe(dst []byte) {
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
func (i *IP6TIP) UnmarshalUnsafe(src []byte) {
- if i.Src.Packed() && i.Dst.Packed() && i.SrcMask.Packed() && i.DstMask.Packed() {
+ if i.Dst.Packed() && i.SrcMask.Packed() && i.DstMask.Packed() && i.Src.Packed() {
safecopy.CopyOut(unsafe.Pointer(i), src)
} else {
// Type IP6TIP doesn't have a packed layout in memory, fallback to UnmarshalBytes.
@@ -3252,7 +3252,7 @@ func (i *IP6TIP) CopyOut(task marshal.Task, addr usermem.Addr) (int, error) {
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
func (i *IP6TIP) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
- if !i.Src.Packed() && i.Dst.Packed() && i.SrcMask.Packed() && i.DstMask.Packed() {
+ if !i.Dst.Packed() && i.SrcMask.Packed() && i.DstMask.Packed() && i.Src.Packed() {
// Type IP6TIP doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := task.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
@@ -3278,7 +3278,7 @@ func (i *IP6TIP) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
// WriteTo implements io.WriterTo.WriteTo.
func (i *IP6TIP) WriteTo(w io.Writer) (int64, error) {
- if !i.Dst.Packed() && i.SrcMask.Packed() && i.DstMask.Packed() && i.Src.Packed() {
+ if !i.Src.Packed() && i.Dst.Packed() && i.SrcMask.Packed() && i.DstMask.Packed() {
// Type IP6TIP doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := make([]byte, i.SizeBytes())
i.MarshalBytes(buf)
diff --git a/pkg/sentry/fsimpl/host/connected_endpoint_refs.go b/pkg/sentry/fsimpl/host/connected_endpoint_refs.go
index 3b7bf599e..1fcf8ab73 100644
--- a/pkg/sentry/fsimpl/host/connected_endpoint_refs.go
+++ b/pkg/sentry/fsimpl/host/connected_endpoint_refs.go
@@ -1,11 +1,10 @@
package host
import (
- "runtime"
- "sync/atomic"
-
"gvisor.dev/gvisor/pkg/log"
refs_vfs1 "gvisor.dev/gvisor/pkg/refs"
+ "runtime"
+ "sync/atomic"
)
// ownerType is used to customize logging. Note that we use a pointer to T so
diff --git a/pkg/sentry/fsimpl/host/inode_refs.go b/pkg/sentry/fsimpl/host/inode_refs.go
index 55c0fb3a9..c920a3c4e 100644
--- a/pkg/sentry/fsimpl/host/inode_refs.go
+++ b/pkg/sentry/fsimpl/host/inode_refs.go
@@ -1,11 +1,10 @@
package host
import (
- "runtime"
- "sync/atomic"
-
"gvisor.dev/gvisor/pkg/log"
refs_vfs1 "gvisor.dev/gvisor/pkg/refs"
+ "runtime"
+ "sync/atomic"
)
// ownerType is used to customize logging. Note that we use a pointer to T so
diff --git a/pkg/sentry/fsimpl/tmpfs/inode_refs.go b/pkg/sentry/fsimpl/tmpfs/inode_refs.go
index 8b7ff185f..38eddde7e 100644
--- a/pkg/sentry/fsimpl/tmpfs/inode_refs.go
+++ b/pkg/sentry/fsimpl/tmpfs/inode_refs.go
@@ -1,11 +1,10 @@
package tmpfs
import (
- "runtime"
- "sync/atomic"
-
"gvisor.dev/gvisor/pkg/log"
refs_vfs1 "gvisor.dev/gvisor/pkg/refs"
+ "runtime"
+ "sync/atomic"
)
// ownerType is used to customize logging. Note that we use a pointer to T so
diff --git a/pkg/sentry/platform/ring0/defs_impl_arm64.go b/pkg/sentry/platform/ring0/defs_impl_arm64.go
index 424b66f76..8ebfbfdb6 100644
--- a/pkg/sentry/platform/ring0/defs_impl_arm64.go
+++ b/pkg/sentry/platform/ring0/defs_impl_arm64.go
@@ -1,10 +1,10 @@
package ring0
import (
- "gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/platform/ring0/pagetables"
"fmt"
+ "gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/usermem"
"io"
"reflect"
diff --git a/pkg/sentry/socket/unix/socket_refs.go b/pkg/sentry/socket/unix/socket_refs.go
index 4c6ec186b..8eb0c9327 100644
--- a/pkg/sentry/socket/unix/socket_refs.go
+++ b/pkg/sentry/socket/unix/socket_refs.go
@@ -1,11 +1,10 @@
package unix
import (
- "runtime"
- "sync/atomic"
-
"gvisor.dev/gvisor/pkg/log"
refs_vfs1 "gvisor.dev/gvisor/pkg/refs"
+ "runtime"
+ "sync/atomic"
)
// ownerType is used to customize logging. Note that we use a pointer to T so
diff --git a/pkg/tcpip/transport/tcp/connect.go b/pkg/tcpip/transport/tcp/connect.go
index 87980c0a1..290172ac9 100644
--- a/pkg/tcpip/transport/tcp/connect.go
+++ b/pkg/tcpip/transport/tcp/connect.go
@@ -924,18 +924,7 @@ func (e *endpoint) handleWrite() *tcpip.Error {
first := e.sndQueue.Front()
if first != nil {
- lastSeg := e.snd.writeList.Back()
e.snd.writeList.PushBackList(&e.sndQueue)
- if lastSeg == nil {
- lastSeg = e.snd.writeList.Front()
- } else {
- lastSeg = lastSeg.segEntry.Next()
- }
- // Add new segments to rcList, as rcList and writeList should
- // be consistent.
- for seg := lastSeg; seg != nil; seg = seg.segEntry.Next() {
- e.snd.rcList.PushBack(seg)
- }
e.sndBufInQueue = 0
}
diff --git a/pkg/tcpip/transport/tcp/endpoint.go b/pkg/tcpip/transport/tcp/endpoint.go
index 9df22ac84..4ba0ea1c0 100644
--- a/pkg/tcpip/transport/tcp/endpoint.go
+++ b/pkg/tcpip/transport/tcp/endpoint.go
@@ -1428,7 +1428,7 @@ func (e *endpoint) Peek(vec [][]byte) (int64, tcpip.ControlMessages, *tcpip.Erro
vec = append([][]byte(nil), vec...)
var num int64
- for s := e.rcvList.Front(); s != nil; s = s.segEntry.Next() {
+ for s := e.rcvList.Front(); s != nil; s = s.Next() {
views := s.data.Views()
for i := s.viewToDeliver; i < len(views); i++ {
@@ -2249,7 +2249,7 @@ func (e *endpoint) connect(addr tcpip.FullAddress, handshake bool, run bool) *tc
if !handshake {
e.segmentQueue.mu.Lock()
for _, l := range []segmentList{e.segmentQueue.list, e.sndQueue, e.snd.writeList} {
- for s := l.Front(); s != nil; s = s.segEntry.Next() {
+ for s := l.Front(); s != nil; s = s.Next() {
s.id = e.ID
s.route = r.Clone()
e.sndWaker.Assert()
diff --git a/pkg/tcpip/transport/tcp/segment.go b/pkg/tcpip/transport/tcp/segment.go
index a20755f78..94307d31a 100644
--- a/pkg/tcpip/transport/tcp/segment.go
+++ b/pkg/tcpip/transport/tcp/segment.go
@@ -30,13 +30,12 @@ import (
//
// +stateify savable
type segment struct {
- segEntry segmentEntry
- rackSegEntry rackSegmentEntry
- refCnt int32
- id stack.TransportEndpointID `state:"manual"`
- route stack.Route `state:"manual"`
- data buffer.VectorisedView `state:".(buffer.VectorisedView)"`
- hdr header.TCP
+ segmentEntry
+ refCnt int32
+ id stack.TransportEndpointID `state:"manual"`
+ route stack.Route `state:"manual"`
+ data buffer.VectorisedView `state:".(buffer.VectorisedView)"`
+ hdr header.TCP
// views is used as buffer for data when its length is large
// enough to store a VectorisedView.
views [8]buffer.View `state:"nosave"`
@@ -62,16 +61,6 @@ type segment struct {
xmitCount uint32
}
-// segmentMapper is the ElementMapper for the writeList.
-type segmentMapper struct{}
-
-func (segmentMapper) linkerFor(seg *segment) *segmentEntry { return &seg.segEntry }
-
-// rackSegmentMapper is the ElementMapper for the rcList.
-type rackSegmentMapper struct{}
-
-func (rackSegmentMapper) linkerFor(seg *segment) *rackSegmentEntry { return &seg.rackSegEntry }
-
func newSegment(r *stack.Route, id stack.TransportEndpointID, pkt *stack.PacketBuffer) *segment {
s := &segment{
refCnt: 1,
diff --git a/pkg/tcpip/transport/tcp/snd.go b/pkg/tcpip/transport/tcp/snd.go
index 31151f23d..c55589c45 100644
--- a/pkg/tcpip/transport/tcp/snd.go
+++ b/pkg/tcpip/transport/tcp/snd.go
@@ -154,7 +154,6 @@ type sender struct {
closed bool
writeNext *segment
writeList segmentList
- rcList rackSegmentList
resendTimer timer `state:"nosave"`
resendWaker sleep.Waker `state:"nosave"`
@@ -368,7 +367,7 @@ func (s *sender) updateMaxPayloadSize(mtu, count int) {
// Rewind writeNext to the first segment exceeding the MTU. Do nothing
// if it is already before such a packet.
- for seg := s.writeList.Front(); seg != nil; seg = seg.segEntry.Next() {
+ for seg := s.writeList.Front(); seg != nil; seg = seg.Next() {
if seg == s.writeNext {
// We got to writeNext before we could find a segment
// exceeding the MTU.
@@ -623,7 +622,6 @@ func (s *sender) splitSeg(seg *segment, size int) {
nSeg.data.TrimFront(size)
nSeg.sequenceNumber.UpdateForward(seqnum.Size(size))
s.writeList.InsertAfter(seg, nSeg)
- s.rcList.InsertAfter(seg, nSeg)
// The segment being split does not carry PUSH flag because it is
// followed by the newly split segment.
@@ -655,7 +653,7 @@ func (s *sender) NextSeg(nextSegHint *segment) (nextSeg, hint *segment, rescueRt
var s3 *segment
var s4 *segment
// Step 1.
- for seg := nextSegHint; seg != nil; seg = seg.segEntry.Next() {
+ for seg := nextSegHint; seg != nil; seg = seg.Next() {
// Stop iteration if we hit a segment that has never been
// transmitted (i.e. either it has no assigned sequence number
// or if it does have one, it's >= the next sequence number
@@ -685,7 +683,7 @@ func (s *sender) NextSeg(nextSegHint *segment) (nextSeg, hint *segment, rescueRt
// NextSeg():
// (1.c) IsLost(S2) returns true.
if s.ep.scoreboard.IsLost(segSeq) {
- return seg, seg.segEntry.Next(), false
+ return seg, seg.Next(), false
}
// NextSeg():
@@ -699,7 +697,7 @@ func (s *sender) NextSeg(nextSegHint *segment) (nextSeg, hint *segment, rescueRt
// SHOULD be returned.
if s3 == nil {
s3 = seg
- hint = seg.segEntry.Next()
+ hint = seg.Next()
}
}
// NextSeg():
@@ -733,7 +731,7 @@ func (s *sender) NextSeg(nextSegHint *segment) (nextSeg, hint *segment, rescueRt
// range of one segment of up to SMSS octets of
// previously unsent data starting with sequence number
// HighData+1 MUST be returned."
- for seg := s.writeNext; seg != nil; seg = seg.segEntry.Next() {
+ for seg := s.writeNext; seg != nil; seg = seg.Next() {
if s.isAssignedSequenceNumber(seg) && seg.sequenceNumber.LessThan(s.sndNxt) {
continue
}
@@ -775,16 +773,15 @@ func (s *sender) maybeSendSegment(seg *segment, limit int, end seqnum.Value) (se
// triggering bugs in poorly written DNS
// implementations.
var nextTooBig bool
- for seg.segEntry.Next() != nil && seg.segEntry.Next().data.Size() != 0 {
- if seg.data.Size()+seg.segEntry.Next().data.Size() > available {
+ for seg.Next() != nil && seg.Next().data.Size() != 0 {
+ if seg.data.Size()+seg.Next().data.Size() > available {
nextTooBig = true
break
}
- seg.data.Append(seg.segEntry.Next().data)
+ seg.data.Append(seg.Next().data)
// Consume the segment that we just merged in.
- s.writeList.Remove(seg.segEntry.Next())
- s.rcList.Remove(seg.rackSegEntry.Next())
+ s.writeList.Remove(seg.Next())
}
if !nextTooBig && seg.data.Size() < available {
// Segment is not full.
@@ -951,7 +948,7 @@ func (s *sender) handleSACKRecovery(limit int, end seqnum.Value) (dataSent bool)
}
dataSent = true
s.outstanding++
- s.writeNext = nextSeg.segEntry.Next()
+ s.writeNext = nextSeg.Next()
continue
}
@@ -964,7 +961,6 @@ func (s *sender) handleSACKRecovery(limit int, end seqnum.Value) (dataSent bool)
// transmitted in (C.1)."
s.outstanding++
dataSent = true
-
s.sendSegment(nextSeg)
segEnd := nextSeg.sequenceNumber.Add(nextSeg.logicalLen())
@@ -1039,7 +1035,7 @@ func (s *sender) sendData() {
if s.fr.active && s.ep.sackPermitted {
dataSent = s.handleSACKRecovery(s.maxPayloadSize, end)
} else {
- for seg := s.writeNext; seg != nil && s.outstanding < s.sndCwnd; seg = seg.segEntry.Next() {
+ for seg := s.writeNext; seg != nil && s.outstanding < s.sndCwnd; seg = seg.Next() {
cwndLimit := (s.sndCwnd - s.outstanding) * s.maxPayloadSize
if cwndLimit < limit {
limit = cwndLimit
@@ -1047,7 +1043,7 @@ func (s *sender) sendData() {
if s.isAssignedSequenceNumber(seg) && s.ep.sackPermitted && s.ep.scoreboard.IsSACKED(seg.sackBlock()) {
// Move writeNext along so that we don't try and scan data that
// has already been SACKED.
- s.writeNext = seg.segEntry.Next()
+ s.writeNext = seg.Next()
continue
}
if sent := s.maybeSendSegment(seg, limit, end); !sent {
@@ -1055,7 +1051,7 @@ func (s *sender) sendData() {
}
dataSent = true
s.outstanding += s.pCount(seg)
- s.writeNext = seg.segEntry.Next()
+ s.writeNext = seg.Next()
}
}
@@ -1186,7 +1182,7 @@ func (s *sender) SetPipe() {
}
pipe := 0
smss := seqnum.Size(s.ep.scoreboard.SMSS())
- for s1 := s.writeList.Front(); s1 != nil && s1.data.Size() != 0 && s.isAssignedSequenceNumber(s1); s1 = s1.segEntry.Next() {
+ for s1 := s.writeList.Front(); s1 != nil && s1.data.Size() != 0 && s.isAssignedSequenceNumber(s1); s1 = s1.Next() {
// With GSO each segment can be much larger than SMSS. So check the segment
// in SMSS sized ranges.
segEnd := s1.sequenceNumber.Add(seqnum.Size(s1.data.Size()))
@@ -1388,7 +1384,7 @@ func (s *sender) handleRcvdSegment(rcvdSeg *segment) {
}
if s.writeNext == seg {
- s.writeNext = seg.segEntry.Next()
+ s.writeNext = seg.Next()
}
// Update the RACK fields if SACK is enabled.
@@ -1397,7 +1393,6 @@ func (s *sender) handleRcvdSegment(rcvdSeg *segment) {
}
s.writeList.Remove(seg)
- s.rcList.Remove(seg)
// if SACK is enabled then Only reduce outstanding if
// the segment was not previously SACKED as these have
@@ -1465,12 +1460,6 @@ func (s *sender) sendSegment(seg *segment) *tcpip.Error {
if s.sndCwnd < s.sndSsthresh {
s.ep.stack.Stats().TCP.SlowStartRetransmits.Increment()
}
-
- // Move the segment which has to be retransmitted to the end of the list, as
- // RACK requires the segments in the order of their transmission times.
- // See: https://tools.ietf.org/html/draft-ietf-tcpm-rack-09#section-6.2
- // Step 5
- s.rcList.PushBack(seg)
}
seg.xmitTime = time.Now()
seg.xmitCount++
diff --git a/pkg/tcpip/transport/tcp/tcp_rack_segment_list.go b/pkg/tcpip/transport/tcp/tcp_rack_segment_list.go
deleted file mode 100644
index ccdaab4df..000000000
--- a/pkg/tcpip/transport/tcp/tcp_rack_segment_list.go
+++ /dev/null
@@ -1,178 +0,0 @@
-package tcp
-
-// List is an intrusive list. Entries can be added to or removed from the list
-// in O(1) time and with no additional memory allocations.
-//
-// The zero value for List is an empty list ready to use.
-//
-// To iterate over a list (where l is a List):
-// for e := l.Front(); e != nil; e = e.Next() {
-// // do something with e.
-// }
-//
-// +stateify savable
-type rackSegmentList struct {
- head *segment
- tail *segment
-}
-
-// Reset resets list l to the empty state.
-func (l *rackSegmentList) Reset() {
- l.head = nil
- l.tail = nil
-}
-
-// Empty returns true iff the list is empty.
-func (l *rackSegmentList) Empty() bool {
- return l.head == nil
-}
-
-// Front returns the first element of list l or nil.
-func (l *rackSegmentList) Front() *segment {
- return l.head
-}
-
-// Back returns the last element of list l or nil.
-func (l *rackSegmentList) Back() *segment {
- return l.tail
-}
-
-// Len returns the number of elements in the list.
-//
-// NOTE: This is an O(n) operation.
-func (l *rackSegmentList) Len() (count int) {
- for e := l.Front(); e != nil; e = (rackSegmentMapper{}.linkerFor(e)).Next() {
- count++
- }
- return count
-}
-
-// PushFront inserts the element e at the front of list l.
-func (l *rackSegmentList) PushFront(e *segment) {
- linker := rackSegmentMapper{}.linkerFor(e)
- linker.SetNext(l.head)
- linker.SetPrev(nil)
- if l.head != nil {
- rackSegmentMapper{}.linkerFor(l.head).SetPrev(e)
- } else {
- l.tail = e
- }
-
- l.head = e
-}
-
-// PushBack inserts the element e at the back of list l.
-func (l *rackSegmentList) PushBack(e *segment) {
- linker := rackSegmentMapper{}.linkerFor(e)
- linker.SetNext(nil)
- linker.SetPrev(l.tail)
- if l.tail != nil {
- rackSegmentMapper{}.linkerFor(l.tail).SetNext(e)
- } else {
- l.head = e
- }
-
- l.tail = e
-}
-
-// PushBackList inserts list m at the end of list l, emptying m.
-func (l *rackSegmentList) PushBackList(m *rackSegmentList) {
- if l.head == nil {
- l.head = m.head
- l.tail = m.tail
- } else if m.head != nil {
- rackSegmentMapper{}.linkerFor(l.tail).SetNext(m.head)
- rackSegmentMapper{}.linkerFor(m.head).SetPrev(l.tail)
-
- l.tail = m.tail
- }
- m.head = nil
- m.tail = nil
-}
-
-// InsertAfter inserts e after b.
-func (l *rackSegmentList) InsertAfter(b, e *segment) {
- bLinker := rackSegmentMapper{}.linkerFor(b)
- eLinker := rackSegmentMapper{}.linkerFor(e)
-
- a := bLinker.Next()
-
- eLinker.SetNext(a)
- eLinker.SetPrev(b)
- bLinker.SetNext(e)
-
- if a != nil {
- rackSegmentMapper{}.linkerFor(a).SetPrev(e)
- } else {
- l.tail = e
- }
-}
-
-// InsertBefore inserts e before a.
-func (l *rackSegmentList) InsertBefore(a, e *segment) {
- aLinker := rackSegmentMapper{}.linkerFor(a)
- eLinker := rackSegmentMapper{}.linkerFor(e)
-
- b := aLinker.Prev()
- eLinker.SetNext(a)
- eLinker.SetPrev(b)
- aLinker.SetPrev(e)
-
- if b != nil {
- rackSegmentMapper{}.linkerFor(b).SetNext(e)
- } else {
- l.head = e
- }
-}
-
-// Remove removes e from l.
-func (l *rackSegmentList) Remove(e *segment) {
- linker := rackSegmentMapper{}.linkerFor(e)
- prev := linker.Prev()
- next := linker.Next()
-
- if prev != nil {
- rackSegmentMapper{}.linkerFor(prev).SetNext(next)
- } else if l.head == e {
- l.head = next
- }
-
- if next != nil {
- rackSegmentMapper{}.linkerFor(next).SetPrev(prev)
- } else if l.tail == e {
- l.tail = prev
- }
-
- linker.SetNext(nil)
- linker.SetPrev(nil)
-}
-
-// Entry is a default implementation of Linker. Users can add anonymous fields
-// of this type to their structs to make them automatically implement the
-// methods needed by List.
-//
-// +stateify savable
-type rackSegmentEntry struct {
- next *segment
- prev *segment
-}
-
-// Next returns the entry that follows e in the list.
-func (e *rackSegmentEntry) Next() *segment {
- return e.next
-}
-
-// Prev returns the entry that precedes e in the list.
-func (e *rackSegmentEntry) Prev() *segment {
- return e.prev
-}
-
-// SetNext assigns 'entry' as the entry that follows e in the list.
-func (e *rackSegmentEntry) SetNext(elem *segment) {
- e.next = elem
-}
-
-// SetPrev assigns 'entry' as the entry that precedes e in the list.
-func (e *rackSegmentEntry) SetPrev(elem *segment) {
- e.prev = elem
-}
diff --git a/pkg/tcpip/transport/tcp/tcp_segment_list.go b/pkg/tcpip/transport/tcp/tcp_segment_list.go
index 5c1f7ee41..fcd0c7ec1 100644
--- a/pkg/tcpip/transport/tcp/tcp_segment_list.go
+++ b/pkg/tcpip/transport/tcp/tcp_segment_list.go
@@ -1,5 +1,20 @@
package tcp
+// ElementMapper provides an identity mapping by default.
+//
+// This can be replaced to provide a struct that maps elements to linker
+// objects, if they are not the same. An ElementMapper is not typically
+// required if: Linker is left as is, Element is left as is, or Linker and
+// Element are the same type.
+type segmentElementMapper struct{}
+
+// linkerFor maps an Element to a Linker.
+//
+// This default implementation should be inlined.
+//
+//go:nosplit
+func (segmentElementMapper) linkerFor(elem *segment) *segment { return elem }
+
// List is an intrusive list. Entries can be added to or removed from the list
// in O(1) time and with no additional memory allocations.
//
@@ -41,7 +56,7 @@ func (l *segmentList) Back() *segment {
//
// NOTE: This is an O(n) operation.
func (l *segmentList) Len() (count int) {
- for e := l.Front(); e != nil; e = (segmentMapper{}.linkerFor(e)).Next() {
+ for e := l.Front(); e != nil; e = (segmentElementMapper{}.linkerFor(e)).Next() {
count++
}
return count
@@ -49,11 +64,11 @@ func (l *segmentList) Len() (count int) {
// PushFront inserts the element e at the front of list l.
func (l *segmentList) PushFront(e *segment) {
- linker := segmentMapper{}.linkerFor(e)
+ linker := segmentElementMapper{}.linkerFor(e)
linker.SetNext(l.head)
linker.SetPrev(nil)
if l.head != nil {
- segmentMapper{}.linkerFor(l.head).SetPrev(e)
+ segmentElementMapper{}.linkerFor(l.head).SetPrev(e)
} else {
l.tail = e
}
@@ -63,11 +78,11 @@ func (l *segmentList) PushFront(e *segment) {
// PushBack inserts the element e at the back of list l.
func (l *segmentList) PushBack(e *segment) {
- linker := segmentMapper{}.linkerFor(e)
+ linker := segmentElementMapper{}.linkerFor(e)
linker.SetNext(nil)
linker.SetPrev(l.tail)
if l.tail != nil {
- segmentMapper{}.linkerFor(l.tail).SetNext(e)
+ segmentElementMapper{}.linkerFor(l.tail).SetNext(e)
} else {
l.head = e
}
@@ -81,8 +96,8 @@ func (l *segmentList) PushBackList(m *segmentList) {
l.head = m.head
l.tail = m.tail
} else if m.head != nil {
- segmentMapper{}.linkerFor(l.tail).SetNext(m.head)
- segmentMapper{}.linkerFor(m.head).SetPrev(l.tail)
+ segmentElementMapper{}.linkerFor(l.tail).SetNext(m.head)
+ segmentElementMapper{}.linkerFor(m.head).SetPrev(l.tail)
l.tail = m.tail
}
@@ -92,8 +107,8 @@ func (l *segmentList) PushBackList(m *segmentList) {
// InsertAfter inserts e after b.
func (l *segmentList) InsertAfter(b, e *segment) {
- bLinker := segmentMapper{}.linkerFor(b)
- eLinker := segmentMapper{}.linkerFor(e)
+ bLinker := segmentElementMapper{}.linkerFor(b)
+ eLinker := segmentElementMapper{}.linkerFor(e)
a := bLinker.Next()
@@ -102,7 +117,7 @@ func (l *segmentList) InsertAfter(b, e *segment) {
bLinker.SetNext(e)
if a != nil {
- segmentMapper{}.linkerFor(a).SetPrev(e)
+ segmentElementMapper{}.linkerFor(a).SetPrev(e)
} else {
l.tail = e
}
@@ -110,8 +125,8 @@ func (l *segmentList) InsertAfter(b, e *segment) {
// InsertBefore inserts e before a.
func (l *segmentList) InsertBefore(a, e *segment) {
- aLinker := segmentMapper{}.linkerFor(a)
- eLinker := segmentMapper{}.linkerFor(e)
+ aLinker := segmentElementMapper{}.linkerFor(a)
+ eLinker := segmentElementMapper{}.linkerFor(e)
b := aLinker.Prev()
eLinker.SetNext(a)
@@ -119,7 +134,7 @@ func (l *segmentList) InsertBefore(a, e *segment) {
aLinker.SetPrev(e)
if b != nil {
- segmentMapper{}.linkerFor(b).SetNext(e)
+ segmentElementMapper{}.linkerFor(b).SetNext(e)
} else {
l.head = e
}
@@ -127,18 +142,18 @@ func (l *segmentList) InsertBefore(a, e *segment) {
// Remove removes e from l.
func (l *segmentList) Remove(e *segment) {
- linker := segmentMapper{}.linkerFor(e)
+ linker := segmentElementMapper{}.linkerFor(e)
prev := linker.Prev()
next := linker.Next()
if prev != nil {
- segmentMapper{}.linkerFor(prev).SetNext(next)
+ segmentElementMapper{}.linkerFor(prev).SetNext(next)
} else if l.head == e {
l.head = next
}
if next != nil {
- segmentMapper{}.linkerFor(next).SetPrev(prev)
+ segmentElementMapper{}.linkerFor(next).SetPrev(prev)
} else if l.tail == e {
l.tail = prev
}
diff --git a/pkg/tcpip/transport/tcp/tcp_state_autogen.go b/pkg/tcpip/transport/tcp/tcp_state_autogen.go
index 09f6c6399..bed45e9a1 100644
--- a/pkg/tcpip/transport/tcp/tcp_state_autogen.go
+++ b/pkg/tcpip/transport/tcp/tcp_state_autogen.go
@@ -535,8 +535,7 @@ func (x *segment) StateTypeName() string {
func (x *segment) StateFields() []string {
return []string{
- "segEntry",
- "rackSegEntry",
+ "segmentEntry",
"refCnt",
"data",
"hdr",
@@ -561,50 +560,48 @@ func (x *segment) beforeSave() {}
func (x *segment) StateSave(m state.Sink) {
x.beforeSave()
var data buffer.VectorisedView = x.saveData()
- m.SaveValue(3, data)
+ m.SaveValue(2, data)
var options []byte = x.saveOptions()
- m.SaveValue(13, options)
+ m.SaveValue(12, options)
var rcvdTime unixTime = x.saveRcvdTime()
- m.SaveValue(15, rcvdTime)
+ m.SaveValue(14, rcvdTime)
var xmitTime unixTime = x.saveXmitTime()
- m.SaveValue(16, xmitTime)
- m.Save(0, &x.segEntry)
- m.Save(1, &x.rackSegEntry)
- m.Save(2, &x.refCnt)
- m.Save(4, &x.hdr)
- m.Save(5, &x.viewToDeliver)
- m.Save(6, &x.sequenceNumber)
- m.Save(7, &x.ackNumber)
- m.Save(8, &x.flags)
- m.Save(9, &x.window)
- m.Save(10, &x.csum)
- m.Save(11, &x.csumValid)
- m.Save(12, &x.parsedOptions)
- m.Save(14, &x.hasNewSACKInfo)
- m.Save(17, &x.xmitCount)
+ m.SaveValue(15, xmitTime)
+ m.Save(0, &x.segmentEntry)
+ m.Save(1, &x.refCnt)
+ m.Save(3, &x.hdr)
+ m.Save(4, &x.viewToDeliver)
+ m.Save(5, &x.sequenceNumber)
+ m.Save(6, &x.ackNumber)
+ m.Save(7, &x.flags)
+ m.Save(8, &x.window)
+ m.Save(9, &x.csum)
+ m.Save(10, &x.csumValid)
+ m.Save(11, &x.parsedOptions)
+ m.Save(13, &x.hasNewSACKInfo)
+ m.Save(16, &x.xmitCount)
}
func (x *segment) afterLoad() {}
func (x *segment) StateLoad(m state.Source) {
- m.Load(0, &x.segEntry)
- m.Load(1, &x.rackSegEntry)
- m.Load(2, &x.refCnt)
- m.Load(4, &x.hdr)
- m.Load(5, &x.viewToDeliver)
- m.Load(6, &x.sequenceNumber)
- m.Load(7, &x.ackNumber)
- m.Load(8, &x.flags)
- m.Load(9, &x.window)
- m.Load(10, &x.csum)
- m.Load(11, &x.csumValid)
- m.Load(12, &x.parsedOptions)
- m.Load(14, &x.hasNewSACKInfo)
- m.Load(17, &x.xmitCount)
- m.LoadValue(3, new(buffer.VectorisedView), func(y interface{}) { x.loadData(y.(buffer.VectorisedView)) })
- m.LoadValue(13, new([]byte), func(y interface{}) { x.loadOptions(y.([]byte)) })
- m.LoadValue(15, new(unixTime), func(y interface{}) { x.loadRcvdTime(y.(unixTime)) })
- m.LoadValue(16, new(unixTime), func(y interface{}) { x.loadXmitTime(y.(unixTime)) })
+ m.Load(0, &x.segmentEntry)
+ m.Load(1, &x.refCnt)
+ m.Load(3, &x.hdr)
+ m.Load(4, &x.viewToDeliver)
+ m.Load(5, &x.sequenceNumber)
+ m.Load(6, &x.ackNumber)
+ m.Load(7, &x.flags)
+ m.Load(8, &x.window)
+ m.Load(9, &x.csum)
+ m.Load(10, &x.csumValid)
+ m.Load(11, &x.parsedOptions)
+ m.Load(13, &x.hasNewSACKInfo)
+ m.Load(16, &x.xmitCount)
+ m.LoadValue(2, new(buffer.VectorisedView), func(y interface{}) { x.loadData(y.(buffer.VectorisedView)) })
+ m.LoadValue(12, new([]byte), func(y interface{}) { x.loadOptions(y.([]byte)) })
+ m.LoadValue(14, new(unixTime), func(y interface{}) { x.loadRcvdTime(y.(unixTime)) })
+ m.LoadValue(15, new(unixTime), func(y interface{}) { x.loadXmitTime(y.(unixTime)) })
}
func (x *segmentQueue) StateTypeName() string {
@@ -659,7 +656,6 @@ func (x *sender) StateFields() []string {
"closed",
"writeNext",
"writeList",
- "rcList",
"rtt",
"rto",
"minRTO",
@@ -699,19 +695,18 @@ func (x *sender) StateSave(m state.Sink) {
m.Save(14, &x.closed)
m.Save(15, &x.writeNext)
m.Save(16, &x.writeList)
- m.Save(17, &x.rcList)
- m.Save(18, &x.rtt)
- m.Save(19, &x.rto)
- m.Save(20, &x.minRTO)
- m.Save(21, &x.maxRTO)
- m.Save(22, &x.maxRetries)
- m.Save(23, &x.maxPayloadSize)
- m.Save(24, &x.gso)
- m.Save(25, &x.sndWndScale)
- m.Save(26, &x.maxSentAck)
- m.Save(27, &x.state)
- m.Save(28, &x.cc)
- m.Save(29, &x.rc)
+ m.Save(17, &x.rtt)
+ m.Save(18, &x.rto)
+ m.Save(19, &x.minRTO)
+ m.Save(20, &x.maxRTO)
+ m.Save(21, &x.maxRetries)
+ m.Save(22, &x.maxPayloadSize)
+ m.Save(23, &x.gso)
+ m.Save(24, &x.sndWndScale)
+ m.Save(25, &x.maxSentAck)
+ m.Save(26, &x.state)
+ m.Save(27, &x.cc)
+ m.Save(28, &x.rc)
}
func (x *sender) StateLoad(m state.Source) {
@@ -729,19 +724,18 @@ func (x *sender) StateLoad(m state.Source) {
m.Load(14, &x.closed)
m.Load(15, &x.writeNext)
m.Load(16, &x.writeList)
- m.Load(17, &x.rcList)
- m.Load(18, &x.rtt)
- m.Load(19, &x.rto)
- m.Load(20, &x.minRTO)
- m.Load(21, &x.maxRTO)
- m.Load(22, &x.maxRetries)
- m.Load(23, &x.maxPayloadSize)
- m.Load(24, &x.gso)
- m.Load(25, &x.sndWndScale)
- m.Load(26, &x.maxSentAck)
- m.Load(27, &x.state)
- m.Load(28, &x.cc)
- m.Load(29, &x.rc)
+ m.Load(17, &x.rtt)
+ m.Load(18, &x.rto)
+ m.Load(19, &x.minRTO)
+ m.Load(20, &x.maxRTO)
+ m.Load(21, &x.maxRetries)
+ m.Load(22, &x.maxPayloadSize)
+ m.Load(23, &x.gso)
+ m.Load(24, &x.sndWndScale)
+ m.Load(25, &x.maxSentAck)
+ m.Load(26, &x.state)
+ m.Load(27, &x.cc)
+ m.Load(28, &x.rc)
m.LoadValue(1, new(unixTime), func(y interface{}) { x.loadLastSendTime(y.(unixTime)) })
m.LoadValue(12, new(unixTime), func(y interface{}) { x.loadRttMeasureTime(y.(unixTime)) })
m.LoadValue(13, new(unixTime), func(y interface{}) { x.loadFirstRetransmittedSegXmitTime(y.(unixTime)) })
@@ -893,58 +887,6 @@ func (x *endpointEntry) StateLoad(m state.Source) {
m.Load(1, &x.prev)
}
-func (x *rackSegmentList) StateTypeName() string {
- return "pkg/tcpip/transport/tcp.rackSegmentList"
-}
-
-func (x *rackSegmentList) StateFields() []string {
- return []string{
- "head",
- "tail",
- }
-}
-
-func (x *rackSegmentList) beforeSave() {}
-
-func (x *rackSegmentList) StateSave(m state.Sink) {
- x.beforeSave()
- m.Save(0, &x.head)
- m.Save(1, &x.tail)
-}
-
-func (x *rackSegmentList) afterLoad() {}
-
-func (x *rackSegmentList) StateLoad(m state.Source) {
- m.Load(0, &x.head)
- m.Load(1, &x.tail)
-}
-
-func (x *rackSegmentEntry) StateTypeName() string {
- return "pkg/tcpip/transport/tcp.rackSegmentEntry"
-}
-
-func (x *rackSegmentEntry) StateFields() []string {
- return []string{
- "next",
- "prev",
- }
-}
-
-func (x *rackSegmentEntry) beforeSave() {}
-
-func (x *rackSegmentEntry) StateSave(m state.Sink) {
- x.beforeSave()
- m.Save(0, &x.next)
- m.Save(1, &x.prev)
-}
-
-func (x *rackSegmentEntry) afterLoad() {}
-
-func (x *rackSegmentEntry) StateLoad(m state.Source) {
- m.Load(0, &x.next)
- m.Load(1, &x.prev)
-}
-
func (x *segmentList) StateTypeName() string {
return "pkg/tcpip/transport/tcp.segmentList"
}
@@ -1016,8 +958,6 @@ func init() {
state.Register((*unixTime)(nil))
state.Register((*endpointList)(nil))
state.Register((*endpointEntry)(nil))
- state.Register((*rackSegmentList)(nil))
- state.Register((*rackSegmentEntry)(nil))
state.Register((*segmentList)(nil))
state.Register((*segmentEntry)(nil))
}