summaryrefslogtreecommitdiffhomepage
path: root/pkg
diff options
context:
space:
mode:
Diffstat (limited to 'pkg')
-rw-r--r--pkg/tcpip/buffer/buffer_unsafe_state_autogen.go3
-rw-r--r--pkg/tcpip/buffer/view.go10
-rw-r--r--pkg/tcpip/buffer/view_unsafe.go22
-rw-r--r--pkg/tcpip/network/fragmentation/fragmentation.go32
-rw-r--r--pkg/tcpip/network/fragmentation/reassembler.go39
-rw-r--r--pkg/tcpip/network/ipv4/ipv4.go5
-rw-r--r--pkg/tcpip/network/ipv6/ipv6.go4
-rw-r--r--pkg/tcpip/stack/packet_buffer.go6
-rw-r--r--pkg/tcpip/stack/packet_buffer_unsafe.go19
-rw-r--r--pkg/tcpip/stack/stack_unsafe_state_autogen.go3
10 files changed, 103 insertions, 40 deletions
diff --git a/pkg/tcpip/buffer/buffer_unsafe_state_autogen.go b/pkg/tcpip/buffer/buffer_unsafe_state_autogen.go
new file mode 100644
index 000000000..5a5c40722
--- /dev/null
+++ b/pkg/tcpip/buffer/buffer_unsafe_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package buffer
diff --git a/pkg/tcpip/buffer/view.go b/pkg/tcpip/buffer/view.go
index 91cc62cc8..b05e81526 100644
--- a/pkg/tcpip/buffer/view.go
+++ b/pkg/tcpip/buffer/view.go
@@ -239,6 +239,16 @@ func (vv *VectorisedView) Size() int {
return vv.size
}
+// MemSize returns the estimation size of the vv in memory, including backing
+// buffer data.
+func (vv *VectorisedView) MemSize() int {
+ var size int
+ for _, v := range vv.views {
+ size += cap(v)
+ }
+ return size + cap(vv.views)*viewStructSize + vectorisedViewStructSize
+}
+
// ToView returns a single view containing the content of the vectorised view.
//
// If the vectorised view contains a single view, that view will be returned
diff --git a/pkg/tcpip/buffer/view_unsafe.go b/pkg/tcpip/buffer/view_unsafe.go
new file mode 100644
index 000000000..75ccd40f8
--- /dev/null
+++ b/pkg/tcpip/buffer/view_unsafe.go
@@ -0,0 +1,22 @@
+// Copyright 2021 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package buffer
+
+import "unsafe"
+
+const (
+ vectorisedViewStructSize = int(unsafe.Sizeof(VectorisedView{}))
+ viewStructSize = int(unsafe.Sizeof(View{}))
+)
diff --git a/pkg/tcpip/network/fragmentation/fragmentation.go b/pkg/tcpip/network/fragmentation/fragmentation.go
index 1af87d713..243738951 100644
--- a/pkg/tcpip/network/fragmentation/fragmentation.go
+++ b/pkg/tcpip/network/fragmentation/fragmentation.go
@@ -84,7 +84,7 @@ type Fragmentation struct {
lowLimit int
reassemblers map[FragmentID]*reassembler
rList reassemblerList
- size int
+ memSize int
timeout time.Duration
blockSize uint16
clock tcpip.Clock
@@ -156,22 +156,22 @@ func NewFragmentation(blockSize uint16, highMemoryLimit, lowMemoryLimit int, rea
// the protocol to identify a fragment.
func (f *Fragmentation) Process(
id FragmentID, first, last uint16, more bool, proto uint8, pkt *stack.PacketBuffer) (
- buffer.VectorisedView, uint8, bool, error) {
+ *stack.PacketBuffer, uint8, bool, error) {
if first > last {
- return buffer.VectorisedView{}, 0, false, fmt.Errorf("first=%d is greater than last=%d: %w", first, last, ErrInvalidArgs)
+ return nil, 0, false, fmt.Errorf("first=%d is greater than last=%d: %w", first, last, ErrInvalidArgs)
}
if first%f.blockSize != 0 {
- return buffer.VectorisedView{}, 0, false, fmt.Errorf("first=%d is not a multiple of block size=%d: %w", first, f.blockSize, ErrInvalidArgs)
+ return nil, 0, false, fmt.Errorf("first=%d is not a multiple of block size=%d: %w", first, f.blockSize, ErrInvalidArgs)
}
fragmentSize := last - first + 1
if more && fragmentSize%f.blockSize != 0 {
- return buffer.VectorisedView{}, 0, false, fmt.Errorf("fragment size=%d bytes is not a multiple of block size=%d on non-final fragment: %w", fragmentSize, f.blockSize, ErrInvalidArgs)
+ return nil, 0, false, fmt.Errorf("fragment size=%d bytes is not a multiple of block size=%d on non-final fragment: %w", fragmentSize, f.blockSize, ErrInvalidArgs)
}
if l := pkt.Data.Size(); l != int(fragmentSize) {
- return buffer.VectorisedView{}, 0, false, fmt.Errorf("got fragment size=%d bytes not equal to the expected fragment size=%d bytes (first=%d last=%d): %w", l, fragmentSize, first, last, ErrInvalidArgs)
+ return nil, 0, false, fmt.Errorf("got fragment size=%d bytes not equal to the expected fragment size=%d bytes (first=%d last=%d): %w", l, fragmentSize, first, last, ErrInvalidArgs)
}
f.mu.Lock()
@@ -190,24 +190,24 @@ func (f *Fragmentation) Process(
}
f.mu.Unlock()
- res, firstFragmentProto, done, consumed, err := r.process(first, last, more, proto, pkt)
+ resPkt, firstFragmentProto, done, memConsumed, err := r.process(first, last, more, proto, pkt)
if err != nil {
// We probably got an invalid sequence of fragments. Just
// discard the reassembler and move on.
f.mu.Lock()
f.release(r, false /* timedOut */)
f.mu.Unlock()
- return buffer.VectorisedView{}, 0, false, fmt.Errorf("fragmentation processing error: %w", err)
+ return nil, 0, false, fmt.Errorf("fragmentation processing error: %w", err)
}
f.mu.Lock()
- f.size += consumed
+ f.memSize += memConsumed
if done {
f.release(r, false /* timedOut */)
}
// Evict reassemblers if we are consuming more memory than highLimit until
// we reach lowLimit.
- if f.size > f.highLimit {
- for f.size > f.lowLimit {
+ if f.memSize > f.highLimit {
+ for f.memSize > f.lowLimit {
tail := f.rList.Back()
if tail == nil {
break
@@ -216,7 +216,7 @@ func (f *Fragmentation) Process(
}
}
f.mu.Unlock()
- return res, firstFragmentProto, done, nil
+ return resPkt, firstFragmentProto, done, nil
}
func (f *Fragmentation) release(r *reassembler, timedOut bool) {
@@ -228,10 +228,10 @@ func (f *Fragmentation) release(r *reassembler, timedOut bool) {
delete(f.reassemblers, r.id)
f.rList.Remove(r)
- f.size -= r.size
- if f.size < 0 {
- log.Printf("memory counter < 0 (%d), this is an accounting bug that requires investigation", f.size)
- f.size = 0
+ f.memSize -= r.memSize
+ if f.memSize < 0 {
+ log.Printf("memory counter < 0 (%d), this is an accounting bug that requires investigation", f.memSize)
+ f.memSize = 0
}
if h := f.timeoutHandler; timedOut && h != nil {
diff --git a/pkg/tcpip/network/fragmentation/reassembler.go b/pkg/tcpip/network/fragmentation/reassembler.go
index 9b20bb1d8..933d63d32 100644
--- a/pkg/tcpip/network/fragmentation/reassembler.go
+++ b/pkg/tcpip/network/fragmentation/reassembler.go
@@ -20,7 +20,6 @@ import (
"gvisor.dev/gvisor/pkg/sync"
"gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/buffer"
"gvisor.dev/gvisor/pkg/tcpip/stack"
)
@@ -29,13 +28,15 @@ type hole struct {
last uint16
filled bool
final bool
- data buffer.View
+ // pkt is the fragment packet if hole is filled. We keep the whole pkt rather
+ // than the fragmented payload to prevent binding to specific buffer types.
+ pkt *stack.PacketBuffer
}
type reassembler struct {
reassemblerEntry
id FragmentID
- size int
+ memSize int
proto uint8
mu sync.Mutex
holes []hole
@@ -59,18 +60,18 @@ func newReassembler(id FragmentID, clock tcpip.Clock) *reassembler {
return r
}
-func (r *reassembler) process(first, last uint16, more bool, proto uint8, pkt *stack.PacketBuffer) (buffer.VectorisedView, uint8, bool, int, error) {
+func (r *reassembler) process(first, last uint16, more bool, proto uint8, pkt *stack.PacketBuffer) (*stack.PacketBuffer, uint8, bool, int, error) {
r.mu.Lock()
defer r.mu.Unlock()
if r.done {
// A concurrent goroutine might have already reassembled
// the packet and emptied the heap while this goroutine
// was waiting on the mutex. We don't have to do anything in this case.
- return buffer.VectorisedView{}, 0, false, 0, nil
+ return nil, 0, false, 0, nil
}
var holeFound bool
- var consumed int
+ var memConsumed int
for i := range r.holes {
currentHole := &r.holes[i]
@@ -90,12 +91,12 @@ func (r *reassembler) process(first, last uint16, more bool, proto uint8, pkt *s
// https://github.com/torvalds/linux/blob/38525c6/net/ipv4/inet_fragment.c#L349
if first < currentHole.first || currentHole.last < last {
// Incoming fragment only partially fits in the free hole.
- return buffer.VectorisedView{}, 0, false, 0, ErrFragmentOverlap
+ return nil, 0, false, 0, ErrFragmentOverlap
}
if !more {
if !currentHole.final || currentHole.filled && currentHole.last != last {
// We have another final fragment, which does not perfectly overlap.
- return buffer.VectorisedView{}, 0, false, 0, ErrFragmentConflict
+ return nil, 0, false, 0, ErrFragmentConflict
}
}
@@ -124,16 +125,15 @@ func (r *reassembler) process(first, last uint16, more bool, proto uint8, pkt *s
})
currentHole.final = false
}
- v := pkt.Data.ToOwnedView()
- consumed = v.Size()
- r.size += consumed
+ memConsumed = pkt.MemSize()
+ r.memSize += memConsumed
// Update the current hole to precisely match the incoming fragment.
r.holes[i] = hole{
first: first,
last: last,
filled: true,
final: currentHole.final,
- data: v,
+ pkt: pkt,
}
r.filled++
// For IPv6, it is possible to have different Protocol values between
@@ -153,25 +153,24 @@ func (r *reassembler) process(first, last uint16, more bool, proto uint8, pkt *s
}
if !holeFound {
// Incoming fragment is beyond end.
- return buffer.VectorisedView{}, 0, false, 0, ErrFragmentConflict
+ return nil, 0, false, 0, ErrFragmentConflict
}
// Check if all the holes have been filled and we are ready to reassemble.
if r.filled < len(r.holes) {
- return buffer.VectorisedView{}, 0, false, consumed, nil
+ return nil, 0, false, memConsumed, nil
}
sort.Slice(r.holes, func(i, j int) bool {
return r.holes[i].first < r.holes[j].first
})
- var size int
- views := make([]buffer.View, 0, len(r.holes))
- for _, hole := range r.holes {
- views = append(views, hole.data)
- size += hole.data.Size()
+ resPkt := r.holes[0].pkt
+ for i := 1; i < len(r.holes); i++ {
+ fragPkt := r.holes[i].pkt
+ fragPkt.Data.ReadToVV(&resPkt.Data, fragPkt.Data.Size())
}
- return buffer.NewVectorisedView(size, views), r.proto, true, consumed, nil
+ return resPkt, r.proto, true, memConsumed, nil
}
func (r *reassembler) checkDoneOrMark() bool {
diff --git a/pkg/tcpip/network/ipv4/ipv4.go b/pkg/tcpip/network/ipv4/ipv4.go
index b0703715a..04c6a6708 100644
--- a/pkg/tcpip/network/ipv4/ipv4.go
+++ b/pkg/tcpip/network/ipv4/ipv4.go
@@ -740,7 +740,7 @@ func (e *endpoint) handlePacket(pkt *stack.PacketBuffer) {
}
proto := h.Protocol()
- data, _, ready, err := e.protocol.fragmentation.Process(
+ resPkt, _, ready, err := e.protocol.fragmentation.Process(
// As per RFC 791 section 2.3, the identification value is unique
// for a source-destination pair and protocol.
fragmentation.FragmentID{
@@ -763,7 +763,8 @@ func (e *endpoint) handlePacket(pkt *stack.PacketBuffer) {
if !ready {
return
}
- pkt.Data = data
+ pkt = resPkt
+ h = header.IPv4(pkt.NetworkHeader().View())
// The reassembler doesn't take care of fixing up the header, so we need
// to do it here.
diff --git a/pkg/tcpip/network/ipv6/ipv6.go b/pkg/tcpip/network/ipv6/ipv6.go
index 94043ed4e..caa62b3a2 100644
--- a/pkg/tcpip/network/ipv6/ipv6.go
+++ b/pkg/tcpip/network/ipv6/ipv6.go
@@ -1167,7 +1167,7 @@ func (e *endpoint) handlePacket(pkt *stack.PacketBuffer) {
// Note that pkt doesn't have its transport header set after reassembly,
// and won't until DeliverNetworkPacket sets it.
- data, proto, ready, err := e.protocol.fragmentation.Process(
+ resPkt, proto, ready, err := e.protocol.fragmentation.Process(
// IPv6 ignores the Protocol field since the ID only needs to be unique
// across source-destination pairs, as per RFC 8200 section 4.5.
fragmentation.FragmentID{
@@ -1188,7 +1188,7 @@ func (e *endpoint) handlePacket(pkt *stack.PacketBuffer) {
}
if ready {
- pkt.Data = data
+ pkt = resPkt
// We create a new iterator with the reassembled packet because we could
// have more extension headers in the reassembled payload, as per RFC
diff --git a/pkg/tcpip/stack/packet_buffer.go b/pkg/tcpip/stack/packet_buffer.go
index 9d4fc3e48..4f013b212 100644
--- a/pkg/tcpip/stack/packet_buffer.go
+++ b/pkg/tcpip/stack/packet_buffer.go
@@ -187,6 +187,12 @@ func (pk *PacketBuffer) Size() int {
return pk.HeaderSize() + pk.Data.Size()
}
+// MemSize returns the estimation size of the pk in memory, including backing
+// buffer data.
+func (pk *PacketBuffer) MemSize() int {
+ return pk.HeaderSize() + pk.Data.MemSize() + packetBufferStructSize
+}
+
// Views returns the underlying storage of the whole packet.
func (pk *PacketBuffer) Views() []buffer.View {
// Optimization for outbound packets that headers are in pk.header.
diff --git a/pkg/tcpip/stack/packet_buffer_unsafe.go b/pkg/tcpip/stack/packet_buffer_unsafe.go
new file mode 100644
index 000000000..ee3d47270
--- /dev/null
+++ b/pkg/tcpip/stack/packet_buffer_unsafe.go
@@ -0,0 +1,19 @@
+// Copyright 2021 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package stack
+
+import "unsafe"
+
+const packetBufferStructSize = int(unsafe.Sizeof(PacketBuffer{}))
diff --git a/pkg/tcpip/stack/stack_unsafe_state_autogen.go b/pkg/tcpip/stack/stack_unsafe_state_autogen.go
new file mode 100644
index 000000000..758ab3457
--- /dev/null
+++ b/pkg/tcpip/stack/stack_unsafe_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package stack