diff options
-rw-r--r-- | pkg/abi/linux/linux_abi_autogen_unsafe.go | 24 | ||||
-rw-r--r-- | pkg/abi/linux/linux_amd64_abi_autogen_unsafe.go | 6 | ||||
-rw-r--r-- | pkg/abi/linux/linux_arm64_abi_autogen_unsafe.go | 6 | ||||
-rw-r--r-- | pkg/tcpip/transport/tcp/protocol.go | 2 | ||||
-rw-r--r-- | pkg/tcpip/transport/tcp/rcv.go | 4 | ||||
-rw-r--r-- | pkg/tcpip/transport/tcp/segment.go | 6 | ||||
-rw-r--r-- | pkg/tcpip/transport/tcp/segment_unsafe.go | 23 |
7 files changed, 50 insertions, 21 deletions
diff --git a/pkg/abi/linux/linux_abi_autogen_unsafe.go b/pkg/abi/linux/linux_abi_autogen_unsafe.go index f9caaa7bb..c3ecf8b5b 100644 --- a/pkg/abi/linux/linux_abi_autogen_unsafe.go +++ b/pkg/abi/linux/linux_abi_autogen_unsafe.go @@ -168,7 +168,7 @@ func (s *Statx) UnmarshalUnsafe(src []byte) { // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit func (s *Statx) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (int, error) { - if !s.Btime.Packed() && s.Ctime.Packed() && s.Mtime.Packed() && s.Atime.Packed() { + if !s.Atime.Packed() && s.Btime.Packed() && s.Ctime.Packed() && s.Mtime.Packed() { // Type Statx doesn't have a packed layout in memory, fall back to MarshalBytes. buf := task.CopyScratchBuffer(s.SizeBytes()) // escapes: okay. s.MarshalBytes(buf) // escapes: fallback. @@ -617,12 +617,12 @@ func (f *FUSEHeaderIn) UnmarshalBytes(src []byte) { // Packed implements marshal.Marshallable.Packed. //go:nosplit func (f *FUSEHeaderIn) Packed() bool { - return f.Opcode.Packed() && f.Unique.Packed() + return f.Unique.Packed() && f.Opcode.Packed() } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. func (f *FUSEHeaderIn) MarshalUnsafe(dst []byte) { - if f.Opcode.Packed() && f.Unique.Packed() { + if f.Unique.Packed() && f.Opcode.Packed() { safecopy.CopyIn(dst, unsafe.Pointer(f)) } else { // Type FUSEHeaderIn doesn't have a packed layout in memory, fallback to MarshalBytes. @@ -643,7 +643,7 @@ func (f *FUSEHeaderIn) UnmarshalUnsafe(src []byte) { // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit func (f *FUSEHeaderIn) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (int, error) { - if !f.Opcode.Packed() && f.Unique.Packed() { + if !f.Unique.Packed() && f.Opcode.Packed() { // Type FUSEHeaderIn doesn't have a packed layout in memory, fall back to MarshalBytes. buf := task.CopyScratchBuffer(f.SizeBytes()) // escapes: okay. f.MarshalBytes(buf) // escapes: fallback. @@ -699,7 +699,7 @@ func (f *FUSEHeaderIn) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) // WriteTo implements io.WriterTo.WriteTo. func (f *FUSEHeaderIn) WriteTo(w io.Writer) (int64, error) { - if !f.Unique.Packed() && f.Opcode.Packed() { + if !f.Opcode.Packed() && f.Unique.Packed() { // Type FUSEHeaderIn doesn't have a packed layout in memory, fall back to MarshalBytes. buf := make([]byte, f.SizeBytes()) f.MarshalBytes(buf) @@ -1396,12 +1396,12 @@ func (i *IPTEntry) UnmarshalBytes(src []byte) { // Packed implements marshal.Marshallable.Packed. //go:nosplit func (i *IPTEntry) Packed() bool { - return i.Counters.Packed() && i.IP.Packed() + return i.IP.Packed() && i.Counters.Packed() } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. func (i *IPTEntry) MarshalUnsafe(dst []byte) { - if i.IP.Packed() && i.Counters.Packed() { + if i.Counters.Packed() && i.IP.Packed() { safecopy.CopyIn(dst, unsafe.Pointer(i)) } else { // Type IPTEntry doesn't have a packed layout in memory, fallback to MarshalBytes. @@ -1411,7 +1411,7 @@ func (i *IPTEntry) MarshalUnsafe(dst []byte) { // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. func (i *IPTEntry) UnmarshalUnsafe(src []byte) { - if i.IP.Packed() && i.Counters.Packed() { + if i.Counters.Packed() && i.IP.Packed() { safecopy.CopyOut(unsafe.Pointer(i), src) } else { // Type IPTEntry doesn't have a packed layout in memory, fallback to UnmarshalBytes. @@ -1422,7 +1422,7 @@ func (i *IPTEntry) UnmarshalUnsafe(src []byte) { // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit func (i *IPTEntry) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (int, error) { - if !i.IP.Packed() && i.Counters.Packed() { + if !i.Counters.Packed() && i.IP.Packed() { // Type IPTEntry doesn't have a packed layout in memory, fall back to MarshalBytes. buf := task.CopyScratchBuffer(i.SizeBytes()) // escapes: okay. i.MarshalBytes(buf) // escapes: fallback. @@ -1584,12 +1584,12 @@ func (i *IPTIP) UnmarshalBytes(src []byte) { // Packed implements marshal.Marshallable.Packed. //go:nosplit func (i *IPTIP) Packed() bool { - return i.DstMask.Packed() && i.Src.Packed() && i.Dst.Packed() && i.SrcMask.Packed() + return i.Src.Packed() && i.Dst.Packed() && i.SrcMask.Packed() && i.DstMask.Packed() } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. func (i *IPTIP) MarshalUnsafe(dst []byte) { - if i.DstMask.Packed() && i.Src.Packed() && i.Dst.Packed() && i.SrcMask.Packed() { + if i.Src.Packed() && i.Dst.Packed() && i.SrcMask.Packed() && i.DstMask.Packed() { safecopy.CopyIn(dst, unsafe.Pointer(i)) } else { // Type IPTIP doesn't have a packed layout in memory, fallback to MarshalBytes. @@ -1599,7 +1599,7 @@ func (i *IPTIP) MarshalUnsafe(dst []byte) { // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. func (i *IPTIP) UnmarshalUnsafe(src []byte) { - if i.Src.Packed() && i.Dst.Packed() && i.SrcMask.Packed() && i.DstMask.Packed() { + if i.DstMask.Packed() && i.Src.Packed() && i.Dst.Packed() && i.SrcMask.Packed() { safecopy.CopyOut(unsafe.Pointer(i), src) } else { // Type IPTIP doesn't have a packed layout in memory, fallback to UnmarshalBytes. diff --git a/pkg/abi/linux/linux_amd64_abi_autogen_unsafe.go b/pkg/abi/linux/linux_amd64_abi_autogen_unsafe.go index a104e8382..c18da4ef2 100644 --- a/pkg/abi/linux/linux_amd64_abi_autogen_unsafe.go +++ b/pkg/abi/linux/linux_amd64_abi_autogen_unsafe.go @@ -288,7 +288,7 @@ func (s *Stat) UnmarshalBytes(src []byte) { // Packed implements marshal.Marshallable.Packed. //go:nosplit func (s *Stat) Packed() bool { - return s.MTime.Packed() && s.CTime.Packed() && s.ATime.Packed() + return s.ATime.Packed() && s.MTime.Packed() && s.CTime.Packed() } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. @@ -314,7 +314,7 @@ func (s *Stat) UnmarshalUnsafe(src []byte) { // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit func (s *Stat) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (int, error) { - if !s.CTime.Packed() && s.ATime.Packed() && s.MTime.Packed() { + if !s.ATime.Packed() && s.MTime.Packed() && s.CTime.Packed() { // Type Stat doesn't have a packed layout in memory, fall back to MarshalBytes. buf := task.CopyScratchBuffer(s.SizeBytes()) // escapes: okay. s.MarshalBytes(buf) // escapes: fallback. @@ -344,7 +344,7 @@ func (s *Stat) CopyOut(task marshal.Task, addr usermem.Addr) (int, error) { // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit func (s *Stat) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) { - if !s.CTime.Packed() && s.ATime.Packed() && s.MTime.Packed() { + if !s.ATime.Packed() && s.MTime.Packed() && s.CTime.Packed() { // Type Stat doesn't have a packed layout in memory, fall back to UnmarshalBytes. buf := task.CopyScratchBuffer(s.SizeBytes()) // escapes: okay. length, err := task.CopyInBytes(addr, buf) // escapes: okay. diff --git a/pkg/abi/linux/linux_arm64_abi_autogen_unsafe.go b/pkg/abi/linux/linux_arm64_abi_autogen_unsafe.go index ad3e95bc2..071292720 100644 --- a/pkg/abi/linux/linux_arm64_abi_autogen_unsafe.go +++ b/pkg/abi/linux/linux_arm64_abi_autogen_unsafe.go @@ -310,7 +310,7 @@ func (s *Stat) MarshalUnsafe(dst []byte) { // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. func (s *Stat) UnmarshalUnsafe(src []byte) { - if s.CTime.Packed() && s.ATime.Packed() && s.MTime.Packed() { + if s.MTime.Packed() && s.CTime.Packed() && s.ATime.Packed() { safecopy.CopyOut(unsafe.Pointer(s), src) } else { // Type Stat doesn't have a packed layout in memory, fallback to UnmarshalBytes. @@ -351,7 +351,7 @@ func (s *Stat) CopyOut(task marshal.Task, addr usermem.Addr) (int, error) { // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit func (s *Stat) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) { - if !s.MTime.Packed() && s.CTime.Packed() && s.ATime.Packed() { + if !s.CTime.Packed() && s.ATime.Packed() && s.MTime.Packed() { // Type Stat doesn't have a packed layout in memory, fall back to UnmarshalBytes. buf := task.CopyScratchBuffer(s.SizeBytes()) // escapes: okay. length, err := task.CopyInBytes(addr, buf) // escapes: okay. @@ -377,7 +377,7 @@ func (s *Stat) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) { // WriteTo implements io.WriterTo.WriteTo. func (s *Stat) WriteTo(w io.Writer) (int64, error) { - if !s.CTime.Packed() && s.ATime.Packed() && s.MTime.Packed() { + if !s.MTime.Packed() && s.CTime.Packed() && s.ATime.Packed() { // Type Stat doesn't have a packed layout in memory, fall back to MarshalBytes. buf := make([]byte, s.SizeBytes()) s.MarshalBytes(buf) diff --git a/pkg/tcpip/transport/tcp/protocol.go b/pkg/tcpip/transport/tcp/protocol.go index b34e47bbd..5d6174a59 100644 --- a/pkg/tcpip/transport/tcp/protocol.go +++ b/pkg/tcpip/transport/tcp/protocol.go @@ -49,7 +49,7 @@ const ( // DefaultReceiveBufferSize is the default size of the receive buffer // for an endpoint. - DefaultReceiveBufferSize = 1 << 20 // 1MB + DefaultReceiveBufferSize = 32 << 10 // 32KB // MaxBufferSize is the largest size a receive/send buffer can grow to. MaxBufferSize = 4 << 20 // 4MB diff --git a/pkg/tcpip/transport/tcp/rcv.go b/pkg/tcpip/transport/tcp/rcv.go index dd89a292a..5e0bfe585 100644 --- a/pkg/tcpip/transport/tcp/rcv.go +++ b/pkg/tcpip/transport/tcp/rcv.go @@ -372,7 +372,7 @@ func (r *receiver) handleRcvdSegment(s *segment) (drop bool, err *tcpip.Error) { // We only store the segment if it's within our buffer // size limit. if r.pendingBufUsed < r.pendingBufSize { - r.pendingBufUsed += s.logicalLen() + r.pendingBufUsed += seqnum.Size(s.segMemSize()) s.incRef() heap.Push(&r.pendingRcvdSegments, s) UpdateSACKBlocks(&r.ep.sack, segSeq, segSeq.Add(segLen), r.rcvNxt) @@ -406,7 +406,7 @@ func (r *receiver) handleRcvdSegment(s *segment) (drop bool, err *tcpip.Error) { } heap.Pop(&r.pendingRcvdSegments) - r.pendingBufUsed -= s.logicalLen() + r.pendingBufUsed -= seqnum.Size(s.segMemSize()) s.decRef() } return false, nil diff --git a/pkg/tcpip/transport/tcp/segment.go b/pkg/tcpip/transport/tcp/segment.go index 0280892a8..bb60dc29d 100644 --- a/pkg/tcpip/transport/tcp/segment.go +++ b/pkg/tcpip/transport/tcp/segment.go @@ -138,6 +138,12 @@ func (s *segment) logicalLen() seqnum.Size { return l } +// segMemSize is the amount of memory used to hold the segment data and +// the associated metadata. +func (s *segment) segMemSize() int { + return segSize + s.data.Size() +} + // parse populates the sequence & ack numbers, flags, and window fields of the // segment from the TCP header stored in the data. It then updates the view to // skip the header. diff --git a/pkg/tcpip/transport/tcp/segment_unsafe.go b/pkg/tcpip/transport/tcp/segment_unsafe.go new file mode 100644 index 000000000..0ab7b8f56 --- /dev/null +++ b/pkg/tcpip/transport/tcp/segment_unsafe.go @@ -0,0 +1,23 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tcp + +import ( + "unsafe" +) + +const ( + segSize = int(unsafe.Sizeof(segment{})) +) |