summaryrefslogtreecommitdiffhomepage
path: root/pkg
diff options
context:
space:
mode:
authorToshi Kikuchi <toshik@google.com>2020-11-16 13:05:01 -0800
committergVisor bot <gvisor-bot@google.com>2020-11-16 13:06:38 -0800
commit758e45618f3d663a092b31baf29f24b3e4dc4d54 (patch)
treea5096027088f04b0323f35624376a3922b666908 /pkg
parenta73877ac944cf3f6b7763c62a5c4a9a4a32455d5 (diff)
Clean up fragmentation.Process
- Pass a PacketBuffer directly instead of releaseCB - No longer pass a VectorisedView, which is included in the PacketBuffer - Make it an error if data size is not equal to (last - first + 1) - Set the callback for the reassembly timeout on NewFragmentation PiperOrigin-RevId: 342702432
Diffstat (limited to 'pkg')
-rw-r--r--pkg/tcpip/header/parse/parse.go3
-rw-r--r--pkg/tcpip/network/fragmentation/BUILD1
-rw-r--r--pkg/tcpip/network/fragmentation/fragmentation.go71
-rw-r--r--pkg/tcpip/network/fragmentation/fragmentation_test.go207
-rw-r--r--pkg/tcpip/network/fragmentation/reassembler.go50
-rw-r--r--pkg/tcpip/network/fragmentation/reassembler_test.go23
-rw-r--r--pkg/tcpip/network/ipv4/icmp.go15
-rw-r--r--pkg/tcpip/network/ipv4/ipv4.go41
-rw-r--r--pkg/tcpip/network/ipv6/icmp.go13
-rw-r--r--pkg/tcpip/network/ipv6/ipv6.go27
10 files changed, 227 insertions, 224 deletions
diff --git a/pkg/tcpip/header/parse/parse.go b/pkg/tcpip/header/parse/parse.go
index 5ca75c834..2042f214a 100644
--- a/pkg/tcpip/header/parse/parse.go
+++ b/pkg/tcpip/header/parse/parse.go
@@ -109,6 +109,9 @@ traverseExtensions:
fragOffset = extHdr.FragmentOffset()
fragMore = extHdr.More()
}
+ rawPayload := it.AsRawHeader(true /* consume */)
+ extensionsSize = dataClone.Size() - rawPayload.Buf.Size()
+ break traverseExtensions
case header.IPv6RawPayloadHeader:
// We've found the payload after any extensions.
diff --git a/pkg/tcpip/network/fragmentation/BUILD b/pkg/tcpip/network/fragmentation/BUILD
index 47fb63290..d8e4a3b54 100644
--- a/pkg/tcpip/network/fragmentation/BUILD
+++ b/pkg/tcpip/network/fragmentation/BUILD
@@ -47,6 +47,7 @@ go_test(
"//pkg/tcpip/buffer",
"//pkg/tcpip/faketime",
"//pkg/tcpip/network/testutil",
+ "//pkg/tcpip/stack",
"@com_github_google_go_cmp//cmp:go_default_library",
],
)
diff --git a/pkg/tcpip/network/fragmentation/fragmentation.go b/pkg/tcpip/network/fragmentation/fragmentation.go
index 936601287..c75ca7d71 100644
--- a/pkg/tcpip/network/fragmentation/fragmentation.go
+++ b/pkg/tcpip/network/fragmentation/fragmentation.go
@@ -71,16 +71,25 @@ type FragmentID struct {
// Fragmentation is the main structure that other modules
// of the stack should use to implement IP Fragmentation.
type Fragmentation struct {
- mu sync.Mutex
- highLimit int
- lowLimit int
- reassemblers map[FragmentID]*reassembler
- rList reassemblerList
- size int
- timeout time.Duration
- blockSize uint16
- clock tcpip.Clock
- releaseJob *tcpip.Job
+ mu sync.Mutex
+ highLimit int
+ lowLimit int
+ reassemblers map[FragmentID]*reassembler
+ rList reassemblerList
+ size int
+ timeout time.Duration
+ blockSize uint16
+ clock tcpip.Clock
+ releaseJob *tcpip.Job
+ timeoutHandler TimeoutHandler
+}
+
+// TimeoutHandler is consulted if a packet reassembly has timed out.
+type TimeoutHandler interface {
+ // OnReassemblyTimeout will be called with the first fragment (or nil, if the
+ // first fragment has not been received) of a packet whose reassembly has
+ // timed out.
+ OnReassemblyTimeout(pkt *stack.PacketBuffer)
}
// NewFragmentation creates a new Fragmentation.
@@ -97,7 +106,7 @@ type Fragmentation struct {
// reassemblingTimeout specifies the maximum time allowed to reassemble a packet.
// Fragments are lazily evicted only when a new a packet with an
// already existing fragmentation-id arrives after the timeout.
-func NewFragmentation(blockSize uint16, highMemoryLimit, lowMemoryLimit int, reassemblingTimeout time.Duration, clock tcpip.Clock) *Fragmentation {
+func NewFragmentation(blockSize uint16, highMemoryLimit, lowMemoryLimit int, reassemblingTimeout time.Duration, clock tcpip.Clock, timeoutHandler TimeoutHandler) *Fragmentation {
if lowMemoryLimit >= highMemoryLimit {
lowMemoryLimit = highMemoryLimit
}
@@ -111,12 +120,13 @@ func NewFragmentation(blockSize uint16, highMemoryLimit, lowMemoryLimit int, rea
}
f := &Fragmentation{
- reassemblers: make(map[FragmentID]*reassembler),
- highLimit: highMemoryLimit,
- lowLimit: lowMemoryLimit,
- timeout: reassemblingTimeout,
- blockSize: blockSize,
- clock: clock,
+ reassemblers: make(map[FragmentID]*reassembler),
+ highLimit: highMemoryLimit,
+ lowLimit: lowMemoryLimit,
+ timeout: reassemblingTimeout,
+ blockSize: blockSize,
+ clock: clock,
+ timeoutHandler: timeoutHandler,
}
f.releaseJob = tcpip.NewJob(f.clock, &f.mu, f.releaseReassemblersLocked)
@@ -136,16 +146,8 @@ func NewFragmentation(blockSize uint16, highMemoryLimit, lowMemoryLimit int, rea
// proto is the protocol number marked in the fragment being processed. It has
// to be given here outside of the FragmentID struct because IPv6 should not use
// the protocol to identify a fragment.
-//
-// releaseCB is a callback that will run when the fragment reassembly of a
-// packet is complete or cancelled. releaseCB take a a boolean argument which is
-// true iff the reassembly is cancelled due to timeout. releaseCB should be
-// passed only with the first fragment of a packet. If more than one releaseCB
-// are passed for the same packet, only the first releaseCB will be saved for
-// the packet and the succeeding ones will be dropped by running them
-// immediately with a false argument.
func (f *Fragmentation) Process(
- id FragmentID, first, last uint16, more bool, proto uint8, vv buffer.VectorisedView, releaseCB func(bool)) (
+ id FragmentID, first, last uint16, more bool, proto uint8, pkt *stack.PacketBuffer) (
buffer.VectorisedView, uint8, bool, error) {
if first > last {
return buffer.VectorisedView{}, 0, false, fmt.Errorf("first=%d is greater than last=%d: %w", first, last, ErrInvalidArgs)
@@ -160,10 +162,9 @@ func (f *Fragmentation) Process(
return buffer.VectorisedView{}, 0, false, fmt.Errorf("fragment size=%d bytes is not a multiple of block size=%d on non-final fragment: %w", fragmentSize, f.blockSize, ErrInvalidArgs)
}
- if l := vv.Size(); l < int(fragmentSize) {
- return buffer.VectorisedView{}, 0, false, fmt.Errorf("got fragment size=%d bytes less than the expected fragment size=%d bytes (first=%d last=%d): %w", l, fragmentSize, first, last, ErrInvalidArgs)
+ if l := pkt.Data.Size(); l != int(fragmentSize) {
+ return buffer.VectorisedView{}, 0, false, fmt.Errorf("got fragment size=%d bytes not equal to the expected fragment size=%d bytes (first=%d last=%d): %w", l, fragmentSize, first, last, ErrInvalidArgs)
}
- vv.CapLength(int(fragmentSize))
f.mu.Lock()
r, ok := f.reassemblers[id]
@@ -179,15 +180,9 @@ func (f *Fragmentation) Process(
f.releaseReassemblersLocked()
}
}
- if releaseCB != nil {
- if !r.setCallback(releaseCB) {
- // We got a duplicate callback. Release it immediately.
- releaseCB(false /* timedOut */)
- }
- }
f.mu.Unlock()
- res, firstFragmentProto, done, consumed, err := r.process(first, last, more, proto, vv)
+ res, firstFragmentProto, done, consumed, err := r.process(first, last, more, proto, pkt)
if err != nil {
// We probably got an invalid sequence of fragments. Just
// discard the reassembler and move on.
@@ -231,7 +226,9 @@ func (f *Fragmentation) release(r *reassembler, timedOut bool) {
f.size = 0
}
- r.release(timedOut) // releaseCB may run.
+ if h := f.timeoutHandler; timedOut && h != nil {
+ h.OnReassemblyTimeout(r.pkt)
+ }
}
// releaseReassemblersLocked releases already-expired reassemblers, then
diff --git a/pkg/tcpip/network/fragmentation/fragmentation_test.go b/pkg/tcpip/network/fragmentation/fragmentation_test.go
index 5dcd10730..3a79688a8 100644
--- a/pkg/tcpip/network/fragmentation/fragmentation_test.go
+++ b/pkg/tcpip/network/fragmentation/fragmentation_test.go
@@ -24,6 +24,7 @@ import (
"gvisor.dev/gvisor/pkg/tcpip/buffer"
"gvisor.dev/gvisor/pkg/tcpip/faketime"
"gvisor.dev/gvisor/pkg/tcpip/network/testutil"
+ "gvisor.dev/gvisor/pkg/tcpip/stack"
)
// reassembleTimeout is dummy timeout used for testing, where the clock never
@@ -40,13 +41,19 @@ func vv(size int, pieces ...string) buffer.VectorisedView {
return buffer.NewVectorisedView(size, views)
}
+func pkt(size int, pieces ...string) *stack.PacketBuffer {
+ return stack.NewPacketBuffer(stack.PacketBufferOptions{
+ Data: vv(size, pieces...),
+ })
+}
+
type processInput struct {
id FragmentID
first uint16
last uint16
more bool
proto uint8
- vv buffer.VectorisedView
+ pkt *stack.PacketBuffer
}
type processOutput struct {
@@ -63,8 +70,8 @@ var processTestCases = []struct {
{
comment: "One ID",
in: []processInput{
- {id: FragmentID{ID: 0}, first: 0, last: 1, more: true, vv: vv(2, "01")},
- {id: FragmentID{ID: 0}, first: 2, last: 3, more: false, vv: vv(2, "23")},
+ {id: FragmentID{ID: 0}, first: 0, last: 1, more: true, pkt: pkt(2, "01")},
+ {id: FragmentID{ID: 0}, first: 2, last: 3, more: false, pkt: pkt(2, "23")},
},
out: []processOutput{
{vv: buffer.VectorisedView{}, done: false},
@@ -74,8 +81,8 @@ var processTestCases = []struct {
{
comment: "Next Header protocol mismatch",
in: []processInput{
- {id: FragmentID{ID: 0}, first: 0, last: 1, more: true, proto: 6, vv: vv(2, "01")},
- {id: FragmentID{ID: 0}, first: 2, last: 3, more: false, proto: 17, vv: vv(2, "23")},
+ {id: FragmentID{ID: 0}, first: 0, last: 1, more: true, proto: 6, pkt: pkt(2, "01")},
+ {id: FragmentID{ID: 0}, first: 2, last: 3, more: false, proto: 17, pkt: pkt(2, "23")},
},
out: []processOutput{
{vv: buffer.VectorisedView{}, done: false},
@@ -85,10 +92,10 @@ var processTestCases = []struct {
{
comment: "Two IDs",
in: []processInput{
- {id: FragmentID{ID: 0}, first: 0, last: 1, more: true, vv: vv(2, "01")},
- {id: FragmentID{ID: 1}, first: 0, last: 1, more: true, vv: vv(2, "ab")},
- {id: FragmentID{ID: 1}, first: 2, last: 3, more: false, vv: vv(2, "cd")},
- {id: FragmentID{ID: 0}, first: 2, last: 3, more: false, vv: vv(2, "23")},
+ {id: FragmentID{ID: 0}, first: 0, last: 1, more: true, pkt: pkt(2, "01")},
+ {id: FragmentID{ID: 1}, first: 0, last: 1, more: true, pkt: pkt(2, "ab")},
+ {id: FragmentID{ID: 1}, first: 2, last: 3, more: false, pkt: pkt(2, "cd")},
+ {id: FragmentID{ID: 0}, first: 2, last: 3, more: false, pkt: pkt(2, "23")},
},
out: []processOutput{
{vv: buffer.VectorisedView{}, done: false},
@@ -102,17 +109,17 @@ var processTestCases = []struct {
func TestFragmentationProcess(t *testing.T) {
for _, c := range processTestCases {
t.Run(c.comment, func(t *testing.T) {
- f := NewFragmentation(minBlockSize, 1024, 512, reassembleTimeout, &faketime.NullClock{})
+ f := NewFragmentation(minBlockSize, 1024, 512, reassembleTimeout, &faketime.NullClock{}, nil)
firstFragmentProto := c.in[0].proto
for i, in := range c.in {
- vv, proto, done, err := f.Process(in.id, in.first, in.last, in.more, in.proto, in.vv, nil)
+ vv, proto, done, err := f.Process(in.id, in.first, in.last, in.more, in.proto, in.pkt)
if err != nil {
- t.Fatalf("f.Process(%+v, %d, %d, %t, %d, %X) failed: %s",
- in.id, in.first, in.last, in.more, in.proto, in.vv.ToView(), err)
+ t.Fatalf("f.Process(%+v, %d, %d, %t, %d, %#v) failed: %s",
+ in.id, in.first, in.last, in.more, in.proto, in.pkt, err)
}
if !reflect.DeepEqual(vv, c.out[i].vv) {
- t.Errorf("got Process(%+v, %d, %d, %t, %d, %X) = (%X, _, _, _), want = (%X, _, _, _)",
- in.id, in.first, in.last, in.more, in.proto, in.vv.ToView(), vv.ToView(), c.out[i].vv.ToView())
+ t.Errorf("got Process(%+v, %d, %d, %t, %d, %#v) = (%X, _, _, _), want = (%X, _, _, _)",
+ in.id, in.first, in.last, in.more, in.proto, in.pkt, vv.ToView(), c.out[i].vv.ToView())
}
if done != c.out[i].done {
t.Errorf("got Process(%+v, %d, %d, %t, %d, _) = (_, _, %t, _), want = (_, _, %t, _)",
@@ -236,11 +243,11 @@ func TestReassemblingTimeout(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
clock := faketime.NewManualClock()
- f := NewFragmentation(minBlockSize, HighFragThreshold, LowFragThreshold, reassemblyTimeout, clock)
+ f := NewFragmentation(minBlockSize, HighFragThreshold, LowFragThreshold, reassemblyTimeout, clock, nil)
for _, event := range test.events {
clock.Advance(event.clockAdvance)
if frag := event.fragment; frag != nil {
- _, _, done, err := f.Process(FragmentID{}, frag.first, frag.last, frag.more, protocol, vv(len(frag.data), frag.data), nil)
+ _, _, done, err := f.Process(FragmentID{}, frag.first, frag.last, frag.more, protocol, pkt(len(frag.data), frag.data))
if err != nil {
t.Fatalf("%s: f.Process failed: %s", event.name, err)
}
@@ -257,17 +264,17 @@ func TestReassemblingTimeout(t *testing.T) {
}
func TestMemoryLimits(t *testing.T) {
- f := NewFragmentation(minBlockSize, 3, 1, reassembleTimeout, &faketime.NullClock{})
+ f := NewFragmentation(minBlockSize, 3, 1, reassembleTimeout, &faketime.NullClock{}, nil)
// Send first fragment with id = 0.
- f.Process(FragmentID{ID: 0}, 0, 0, true, 0xFF, vv(1, "0"), nil)
+ f.Process(FragmentID{ID: 0}, 0, 0, true, 0xFF, pkt(1, "0"))
// Send first fragment with id = 1.
- f.Process(FragmentID{ID: 1}, 0, 0, true, 0xFF, vv(1, "1"), nil)
+ f.Process(FragmentID{ID: 1}, 0, 0, true, 0xFF, pkt(1, "1"))
// Send first fragment with id = 2.
- f.Process(FragmentID{ID: 2}, 0, 0, true, 0xFF, vv(1, "2"), nil)
+ f.Process(FragmentID{ID: 2}, 0, 0, true, 0xFF, pkt(1, "2"))
// Send first fragment with id = 3. This should caused id = 0 and id = 1 to be
// evicted.
- f.Process(FragmentID{ID: 3}, 0, 0, true, 0xFF, vv(1, "3"), nil)
+ f.Process(FragmentID{ID: 3}, 0, 0, true, 0xFF, pkt(1, "3"))
if _, ok := f.reassemblers[FragmentID{ID: 0}]; ok {
t.Errorf("Memory limits are not respected: id=0 has not been evicted.")
@@ -281,11 +288,11 @@ func TestMemoryLimits(t *testing.T) {
}
func TestMemoryLimitsIgnoresDuplicates(t *testing.T) {
- f := NewFragmentation(minBlockSize, 1, 0, reassembleTimeout, &faketime.NullClock{})
+ f := NewFragmentation(minBlockSize, 1, 0, reassembleTimeout, &faketime.NullClock{}, nil)
// Send first fragment with id = 0.
- f.Process(FragmentID{}, 0, 0, true, 0xFF, vv(1, "0"), nil)
+ f.Process(FragmentID{}, 0, 0, true, 0xFF, pkt(1, "0"))
// Send the same packet again.
- f.Process(FragmentID{}, 0, 0, true, 0xFF, vv(1, "0"), nil)
+ f.Process(FragmentID{}, 0, 0, true, 0xFF, pkt(1, "0"))
got := f.size
want := 1
@@ -327,6 +334,7 @@ func TestErrors(t *testing.T) {
last: 3,
more: true,
data: "012",
+ err: ErrInvalidArgs,
},
{
name: "exact block size with more and too little data",
@@ -376,8 +384,8 @@ func TestErrors(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
- f := NewFragmentation(test.blockSize, HighFragThreshold, LowFragThreshold, reassembleTimeout, &faketime.NullClock{})
- _, _, done, err := f.Process(FragmentID{}, test.first, test.last, test.more, 0, vv(len(test.data), test.data), nil)
+ f := NewFragmentation(test.blockSize, HighFragThreshold, LowFragThreshold, reassembleTimeout, &faketime.NullClock{}, nil)
+ _, _, done, err := f.Process(FragmentID{}, test.first, test.last, test.more, 0, pkt(len(test.data), test.data))
if !errors.Is(err, test.err) {
t.Errorf("got Process(_, %d, %d, %t, _, %q) = (_, _, _, %v), want = (_, _, _, %v)", test.first, test.last, test.more, test.data, err, test.err)
}
@@ -498,57 +506,92 @@ func TestPacketFragmenter(t *testing.T) {
}
}
-func TestReleaseCallback(t *testing.T) {
+type testTimeoutHandler struct {
+ pkt *stack.PacketBuffer
+}
+
+func (h *testTimeoutHandler) OnReassemblyTimeout(pkt *stack.PacketBuffer) {
+ h.pkt = pkt
+}
+
+func TestTimeoutHandler(t *testing.T) {
const (
proto = 99
)
- var result int
- var callbackReasonIsTimeout bool
- cb1 := func(timedOut bool) { result = 1; callbackReasonIsTimeout = timedOut }
- cb2 := func(timedOut bool) { result = 2; callbackReasonIsTimeout = timedOut }
+ pk1 := pkt(1, "1")
+ pk2 := pkt(1, "2")
+
+ type processParam struct {
+ first uint16
+ last uint16
+ more bool
+ pkt *stack.PacketBuffer
+ }
tests := []struct {
- name string
- callbacks []func(bool)
- timeout bool
- wantResult int
- wantCallbackReasonIsTimeout bool
+ name string
+ params []processParam
+ wantError bool
+ wantPkt *stack.PacketBuffer
}{
{
- name: "callback runs on release",
- callbacks: []func(bool){cb1},
- timeout: false,
- wantResult: 1,
- wantCallbackReasonIsTimeout: false,
- },
- {
- name: "first callback is nil",
- callbacks: []func(bool){nil, cb2},
- timeout: false,
- wantResult: 2,
- wantCallbackReasonIsTimeout: false,
+ name: "onTimeout runs",
+ params: []processParam{
+ {
+ first: 0,
+ last: 0,
+ more: true,
+ pkt: pk1,
+ },
+ },
+ wantError: false,
+ wantPkt: pk1,
},
{
- name: "two callbacks - first one is set",
- callbacks: []func(bool){cb1, cb2},
- timeout: false,
- wantResult: 1,
- wantCallbackReasonIsTimeout: false,
+ name: "no first fragment",
+ params: []processParam{
+ {
+ first: 1,
+ last: 1,
+ more: true,
+ pkt: pk1,
+ },
+ },
+ wantError: false,
+ wantPkt: nil,
},
{
- name: "callback runs on timeout",
- callbacks: []func(bool){cb1},
- timeout: true,
- wantResult: 1,
- wantCallbackReasonIsTimeout: true,
+ name: "second pkt is ignored",
+ params: []processParam{
+ {
+ first: 0,
+ last: 0,
+ more: true,
+ pkt: pk1,
+ },
+ {
+ first: 0,
+ last: 0,
+ more: true,
+ pkt: pk2,
+ },
+ },
+ wantError: false,
+ wantPkt: pk1,
},
{
- name: "no callbacks",
- callbacks: []func(bool){nil},
- timeout: false,
- wantResult: 0,
- wantCallbackReasonIsTimeout: false,
+ name: "invalid args - first is greater than last",
+ params: []processParam{
+ {
+ first: 1,
+ last: 0,
+ more: true,
+ pkt: pk1,
+ },
+ },
+ wantError: true,
+ wantPkt: nil,
},
}
@@ -556,29 +599,31 @@ func TestReleaseCallback(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
- result = 0
- callbackReasonIsTimeout = false
+ handler := &testTimeoutHandler{pkt: nil}
- f := NewFragmentation(minBlockSize, HighFragThreshold, LowFragThreshold, reassembleTimeout, &faketime.NullClock{})
+ f := NewFragmentation(minBlockSize, HighFragThreshold, LowFragThreshold, reassembleTimeout, &faketime.NullClock{}, handler)
- for i, cb := range test.callbacks {
- _, _, _, err := f.Process(id, uint16(i), uint16(i), true, proto, vv(1, "0"), cb)
- if err != nil {
+ for _, p := range test.params {
+ if _, _, _, err := f.Process(id, p.first, p.last, p.more, proto, p.pkt); err != nil && !test.wantError {
t.Errorf("f.Process error = %s", err)
}
}
-
- r, ok := f.reassemblers[id]
- if !ok {
- t.Fatalf("Reassemberr not found")
- }
- f.release(r, test.timeout)
-
- if result != test.wantResult {
- t.Errorf("got result = %d, want = %d", result, test.wantResult)
+ if !test.wantError {
+ r, ok := f.reassemblers[id]
+ if !ok {
+ t.Fatal("Reassembler not found")
+ }
+ f.release(r, true)
}
- if callbackReasonIsTimeout != test.wantCallbackReasonIsTimeout {
- t.Errorf("got callbackReasonIsTimeout = %t, want = %t", callbackReasonIsTimeout, test.wantCallbackReasonIsTimeout)
+ switch {
+ case handler.pkt != nil && test.wantPkt == nil:
+ t.Errorf("got handler.pkt = not nil (pkt.Data = %x), want = nil", handler.pkt.Data.ToView())
+ case handler.pkt == nil && test.wantPkt != nil:
+ t.Errorf("got handler.pkt = nil, want = not nil (pkt.Data = %x)", test.wantPkt.Data.ToView())
+ case handler.pkt != nil && test.wantPkt != nil:
+ if diff := cmp.Diff(test.wantPkt.Data.ToView(), handler.pkt.Data.ToView()); diff != "" {
+ t.Errorf("pkt.Data mismatch (-want, +got):\n%s", diff)
+ }
}
})
}
diff --git a/pkg/tcpip/network/fragmentation/reassembler.go b/pkg/tcpip/network/fragmentation/reassembler.go
index c0cc0bde0..19f4920b3 100644
--- a/pkg/tcpip/network/fragmentation/reassembler.go
+++ b/pkg/tcpip/network/fragmentation/reassembler.go
@@ -22,6 +22,7 @@ import (
"gvisor.dev/gvisor/pkg/sync"
"gvisor.dev/gvisor/pkg/tcpip"
"gvisor.dev/gvisor/pkg/tcpip/buffer"
+ "gvisor.dev/gvisor/pkg/tcpip/stack"
)
type hole struct {
@@ -41,7 +42,7 @@ type reassembler struct {
heap fragHeap
done bool
creationTime int64
- callback func(bool)
+ pkt *stack.PacketBuffer
}
func newReassembler(id FragmentID, clock tcpip.Clock) *reassembler {
@@ -79,7 +80,7 @@ func (r *reassembler) updateHoles(first, last uint16, more bool) bool {
return used
}
-func (r *reassembler) process(first, last uint16, more bool, proto uint8, vv buffer.VectorisedView) (buffer.VectorisedView, uint8, bool, int, error) {
+func (r *reassembler) process(first, last uint16, more bool, proto uint8, pkt *stack.PacketBuffer) (buffer.VectorisedView, uint8, bool, int, error) {
r.mu.Lock()
defer r.mu.Unlock()
consumed := 0
@@ -89,18 +90,20 @@ func (r *reassembler) process(first, last uint16, more bool, proto uint8, vv buf
// was waiting on the mutex. We don't have to do anything in this case.
return buffer.VectorisedView{}, 0, false, consumed, nil
}
- // For IPv6, it is possible to have different Protocol values between
- // fragments of a packet (because, unlike IPv4, the Protocol is not used to
- // identify a fragment). In this case, only the Protocol of the first
- // fragment must be used as per RFC 8200 Section 4.5.
- //
- // TODO(gvisor.dev/issue/3648): The entire first IP header should be recorded
- // here (instead of just the protocol) because most IP options should be
- // derived from the first fragment.
- if first == 0 {
- r.proto = proto
- }
if r.updateHoles(first, last, more) {
+ // For IPv6, it is possible to have different Protocol values between
+ // fragments of a packet (because, unlike IPv4, the Protocol is not used to
+ // identify a fragment). In this case, only the Protocol of the first
+ // fragment must be used as per RFC 8200 Section 4.5.
+ //
+ // TODO(gvisor.dev/issue/3648): During reassembly of an IPv6 packet, IP
+ // options received in the first fragment should be used - and they should
+ // override options from following fragments.
+ if first == 0 {
+ r.pkt = pkt
+ r.proto = proto
+ }
+ vv := pkt.Data
// We store the incoming packet only if it filled some holes.
heap.Push(&r.heap, fragment{offset: first, vv: vv.Clone(nil)})
consumed = vv.Size()
@@ -124,24 +127,3 @@ func (r *reassembler) checkDoneOrMark() bool {
r.mu.Unlock()
return prev
}
-
-func (r *reassembler) setCallback(c func(bool)) bool {
- r.mu.Lock()
- defer r.mu.Unlock()
- if r.callback != nil {
- return false
- }
- r.callback = c
- return true
-}
-
-func (r *reassembler) release(timedOut bool) {
- r.mu.Lock()
- callback := r.callback
- r.callback = nil
- r.mu.Unlock()
-
- if callback != nil {
- callback(timedOut)
- }
-}
diff --git a/pkg/tcpip/network/fragmentation/reassembler_test.go b/pkg/tcpip/network/fragmentation/reassembler_test.go
index fa2a70dc8..a0a04a027 100644
--- a/pkg/tcpip/network/fragmentation/reassembler_test.go
+++ b/pkg/tcpip/network/fragmentation/reassembler_test.go
@@ -105,26 +105,3 @@ func TestUpdateHoles(t *testing.T) {
}
}
}
-
-func TestSetCallback(t *testing.T) {
- result := 0
- reasonTimeout := false
-
- cb1 := func(timedOut bool) { result = 1; reasonTimeout = timedOut }
- cb2 := func(timedOut bool) { result = 2; reasonTimeout = timedOut }
-
- r := newReassembler(FragmentID{}, &faketime.NullClock{})
- if !r.setCallback(cb1) {
- t.Errorf("setCallback failed")
- }
- if r.setCallback(cb2) {
- t.Errorf("setCallback should fail if one is already set")
- }
- r.release(true)
- if result != 1 {
- t.Errorf("got result = %d, want = 1", result)
- }
- if !reasonTimeout {
- t.Errorf("got reasonTimeout = %t, want = true", reasonTimeout)
- }
-}
diff --git a/pkg/tcpip/network/ipv4/icmp.go b/pkg/tcpip/network/ipv4/icmp.go
index 204b182e6..488945226 100644
--- a/pkg/tcpip/network/ipv4/icmp.go
+++ b/pkg/tcpip/network/ipv4/icmp.go
@@ -514,3 +514,18 @@ func (p *protocol) returnError(reason icmpReason, pkt *stack.PacketBuffer) *tcpi
counter.Increment()
return nil
}
+
+// OnReassemblyTimeout implements fragmentation.TimeoutHandler.
+func (p *protocol) OnReassemblyTimeout(pkt *stack.PacketBuffer) {
+ // OnReassemblyTimeout sends a Time Exceeded Message, as per RFC 792:
+ //
+ // If a host reassembling a fragmented datagram cannot complete the
+ // reassembly due to missing fragments within its time limit it discards the
+ // datagram, and it may send a time exceeded message.
+ //
+ // If fragment zero is not available then no time exceeded need be sent at
+ // all.
+ if pkt != nil {
+ p.returnError(&icmpReasonReassemblyTimeout{}, pkt)
+ }
+}
diff --git a/pkg/tcpip/network/ipv4/ipv4.go b/pkg/tcpip/network/ipv4/ipv4.go
index a9a38b851..1efe6297a 100644
--- a/pkg/tcpip/network/ipv4/ipv4.go
+++ b/pkg/tcpip/network/ipv4/ipv4.go
@@ -650,29 +650,8 @@ func (e *endpoint) handlePacket(pkt *stack.PacketBuffer) {
return
}
- // Set up a callback in case we need to send a Time Exceeded Message, as per
- // RFC 792:
- //
- // If a host reassembling a fragmented datagram cannot complete the
- // reassembly due to missing fragments within its time limit it discards
- // the datagram, and it may send a time exceeded message.
- //
- // If fragment zero is not available then no time exceeded need be sent at
- // all.
- var releaseCB func(bool)
- if start == 0 {
- pkt := pkt.Clone()
- releaseCB = func(timedOut bool) {
- if timedOut {
- _ = e.protocol.returnError(&icmpReasonReassemblyTimeout{}, pkt)
- }
- }
- }
-
- var ready bool
- var err error
proto := h.Protocol()
- pkt.Data, _, ready, err = e.protocol.fragmentation.Process(
+ data, _, ready, err := e.protocol.fragmentation.Process(
// As per RFC 791 section 2.3, the identification value is unique
// for a source-destination pair and protocol.
fragmentation.FragmentID{
@@ -685,8 +664,7 @@ func (e *endpoint) handlePacket(pkt *stack.PacketBuffer) {
start+uint16(pkt.Data.Size())-1,
h.More(),
proto,
- pkt.Data,
- releaseCB,
+ pkt,
)
if err != nil {
stats.IP.MalformedPacketsReceived.Increment()
@@ -696,6 +674,7 @@ func (e *endpoint) handlePacket(pkt *stack.PacketBuffer) {
if !ready {
return
}
+ pkt.Data = data
// The reassembler doesn't take care of fixing up the header, so we need
// to do it here.
@@ -863,6 +842,7 @@ func (e *endpoint) IsInGroup(addr tcpip.Address) bool {
var _ stack.ForwardingNetworkProtocol = (*protocol)(nil)
var _ stack.NetworkProtocol = (*protocol)(nil)
+var _ fragmentation.TimeoutHandler = (*protocol)(nil)
type protocol struct {
stack *stack.Stack
@@ -1027,13 +1007,14 @@ func NewProtocol(s *stack.Stack) stack.NetworkProtocol {
}
hashIV := r[buckets]
- return &protocol{
- stack: s,
- ids: ids,
- hashIV: hashIV,
- defaultTTL: DefaultTTL,
- fragmentation: fragmentation.NewFragmentation(fragmentblockSize, fragmentation.HighFragThreshold, fragmentation.LowFragThreshold, ReassembleTimeout, s.Clock()),
+ p := &protocol{
+ stack: s,
+ ids: ids,
+ hashIV: hashIV,
+ defaultTTL: DefaultTTL,
}
+ p.fragmentation = fragmentation.NewFragmentation(fragmentblockSize, fragmentation.HighFragThreshold, fragmentation.LowFragThreshold, ReassembleTimeout, s.Clock(), p)
+ return p
}
func buildNextFragment(pf *fragmentation.PacketFragmenter, originalIPHeader header.IPv4) (*stack.PacketBuffer, bool) {
diff --git a/pkg/tcpip/network/ipv6/icmp.go b/pkg/tcpip/network/ipv6/icmp.go
index 8d788af80..beb8f562e 100644
--- a/pkg/tcpip/network/ipv6/icmp.go
+++ b/pkg/tcpip/network/ipv6/icmp.go
@@ -922,3 +922,16 @@ func (p *protocol) returnError(reason icmpReason, pkt *stack.PacketBuffer) *tcpi
counter.Increment()
return nil
}
+
+// OnReassemblyTimeout implements fragmentation.TimeoutHandler.
+func (p *protocol) OnReassemblyTimeout(pkt *stack.PacketBuffer) {
+ // OnReassemblyTimeout sends a Time Exceeded Message as per RFC 2460 Section
+ // 4.5:
+ //
+ // If the first fragment (i.e., the one with a Fragment Offset of zero) has
+ // been received, an ICMP Time Exceeded -- Fragment Reassembly Time Exceeded
+ // message should be sent to the source of that fragment.
+ if pkt != nil {
+ p.returnError(&icmpReasonReassemblyTimeout{}, pkt)
+ }
+}
diff --git a/pkg/tcpip/network/ipv6/ipv6.go b/pkg/tcpip/network/ipv6/ipv6.go
index 7697ff987..7a00f6314 100644
--- a/pkg/tcpip/network/ipv6/ipv6.go
+++ b/pkg/tcpip/network/ipv6/ipv6.go
@@ -967,18 +967,6 @@ func (e *endpoint) handlePacket(pkt *stack.PacketBuffer) {
return
}
- // Set up a callback in case we need to send a Time Exceeded Message as
- // per RFC 2460 Section 4.5.
- var releaseCB func(bool)
- if start == 0 {
- pkt := pkt.Clone()
- releaseCB = func(timedOut bool) {
- if timedOut {
- _ = e.protocol.returnError(&icmpReasonReassemblyTimeout{}, pkt)
- }
- }
- }
-
// Note that pkt doesn't have its transport header set after reassembly,
// and won't until DeliverNetworkPacket sets it.
data, proto, ready, err := e.protocol.fragmentation.Process(
@@ -993,17 +981,17 @@ func (e *endpoint) handlePacket(pkt *stack.PacketBuffer) {
start+uint16(fragmentPayloadLen)-1,
extHdr.More(),
uint8(rawPayload.Identifier),
- rawPayload.Buf,
- releaseCB,
+ pkt,
)
if err != nil {
stats.IP.MalformedPacketsReceived.Increment()
stats.IP.MalformedFragmentsReceived.Increment()
return
}
- pkt.Data = data
if ready {
+ pkt.Data = data
+
// We create a new iterator with the reassembled packet because we could
// have more extension headers in the reassembled payload, as per RFC
// 8200 section 4.5. We also use the NextHeader value from the first
@@ -1414,6 +1402,7 @@ func (e *endpoint) IsInGroup(addr tcpip.Address) bool {
var _ stack.ForwardingNetworkProtocol = (*protocol)(nil)
var _ stack.NetworkProtocol = (*protocol)(nil)
+var _ fragmentation.TimeoutHandler = (*protocol)(nil)
type protocol struct {
stack *stack.Stack
@@ -1669,10 +1658,9 @@ func NewProtocolWithOptions(opts Options) stack.NetworkProtocolFactory {
return func(s *stack.Stack) stack.NetworkProtocol {
p := &protocol{
- stack: s,
- fragmentation: fragmentation.NewFragmentation(header.IPv6FragmentExtHdrFragmentOffsetBytesPerUnit, fragmentation.HighFragThreshold, fragmentation.LowFragThreshold, ReassembleTimeout, s.Clock()),
- ids: ids,
- hashIV: hashIV,
+ stack: s,
+ ids: ids,
+ hashIV: hashIV,
ndpDisp: opts.NDPDisp,
ndpConfigs: opts.NDPConfigs,
@@ -1680,6 +1668,7 @@ func NewProtocolWithOptions(opts Options) stack.NetworkProtocolFactory {
tempIIDSeed: opts.TempIIDSeed,
autoGenIPv6LinkLocal: opts.AutoGenIPv6LinkLocal,
}
+ p.fragmentation = fragmentation.NewFragmentation(header.IPv6FragmentExtHdrFragmentOffsetBytesPerUnit, fragmentation.HighFragThreshold, fragmentation.LowFragThreshold, ReassembleTimeout, s.Clock(), p)
p.mu.eps = make(map[*endpoint]struct{})
p.SetDefaultTTL(DefaultTTL)
return p