diff options
Diffstat (limited to 'test/packetimpact')
25 files changed, 1299 insertions, 774 deletions
diff --git a/test/packetimpact/runner/defs.bzl b/test/packetimpact/runner/defs.bzl index afe73a69a..19f6cc0e0 100644 --- a/test/packetimpact/runner/defs.bzl +++ b/test/packetimpact/runner/defs.bzl @@ -115,7 +115,7 @@ def packetimpact_netstack_test( **kwargs ) -def packetimpact_go_test(name, expect_native_failure = False, expect_netstack_failure = False, num_duts = 1): +def packetimpact_go_test(name, expect_native_failure = False, expect_netstack_failure = False, num_duts = 1, **kwargs): """Add packetimpact tests written in go. Args: @@ -123,6 +123,7 @@ def packetimpact_go_test(name, expect_native_failure = False, expect_netstack_fa expect_native_failure: the test must fail natively expect_netstack_failure: the test must fail for Netstack num_duts: how many DUTs are needed for the test + **kwargs: all the other args, forwarded to packetimpact_native_test and packetimpact_netstack_test """ testbench_binary = name + "_test" packetimpact_native_test( @@ -130,12 +131,14 @@ def packetimpact_go_test(name, expect_native_failure = False, expect_netstack_fa expect_failure = expect_native_failure, num_duts = num_duts, testbench_binary = testbench_binary, + **kwargs ) packetimpact_netstack_test( name = name, expect_failure = expect_netstack_failure, num_duts = num_duts, testbench_binary = testbench_binary, + **kwargs ) def packetimpact_testbench(name, size = "small", pure = True, **kwargs): @@ -163,6 +166,7 @@ PacketimpactTestInfo = provider( doc = "Provide information for packetimpact tests", fields = [ "name", + "timeout", "expect_netstack_failure", "num_duts", ], @@ -271,9 +275,6 @@ ALL_TESTS = [ num_duts = 3, ), PacketimpactTestInfo( - name = "udp_send_recv_dgram", - ), - PacketimpactTestInfo( name = "tcp_linger", ), PacketimpactTestInfo( @@ -289,6 +290,10 @@ ALL_TESTS = [ PacketimpactTestInfo( name = "tcp_fin_retransmission", ), + PacketimpactTestInfo( + name = "generic_dgram_socket_send_recv", + timeout = "long", + ), ] def validate_all_tests(): diff --git a/test/packetimpact/runner/dut.go b/test/packetimpact/runner/dut.go index 4fb2f5c4b..02678a76a 100644 --- a/test/packetimpact/runner/dut.go +++ b/test/packetimpact/runner/dut.go @@ -363,6 +363,9 @@ func TestWithDUT(ctx context.Context, t *testing.T, mkDevice func(*dockerutil.Co // and receives packets and also sends POSIX socket commands to the // posix_server to be executed on the DUT. testArgs := []string{containerTestbenchBinary} + if testing.Verbose() { + testArgs = append(testArgs, "-test.v") + } testArgs = append(testArgs, extraTestArgs...) testArgs = append(testArgs, fmt.Sprintf("--native=%t", native), @@ -395,6 +398,8 @@ func TestWithDUT(ctx context.Context, t *testing.T, mkDevice func(*dockerutil.Co } else if expectFailure { t.Logf(`test failed as expected: %v %s`, err, testLogs) + } else if testing.Verbose() { + t.Log(testLogs) } } diff --git a/test/packetimpact/testbench/BUILD b/test/packetimpact/testbench/BUILD index 616215dc3..d8059ab98 100644 --- a/test/packetimpact/testbench/BUILD +++ b/test/packetimpact/testbench/BUILD @@ -16,6 +16,8 @@ go_library( ], visibility = ["//test/packetimpact:__subpackages__"], deps = [ + "//pkg/abi/linux", + "//pkg/binary", "//pkg/hostarch", "//pkg/tcpip", "//pkg/tcpip/buffer", diff --git a/test/packetimpact/testbench/connections.go b/test/packetimpact/testbench/connections.go index 8ad9040ff..ed56f9ac7 100644 --- a/test/packetimpact/testbench/connections.go +++ b/test/packetimpact/testbench/connections.go @@ -594,32 +594,50 @@ func (conn *Connection) Expect(t *testing.T, layer Layer, timeout time.Duration) func (conn *Connection) ExpectFrame(t *testing.T, layers Layers, timeout time.Duration) (Layers, error) { t.Helper() - deadline := time.Now().Add(timeout) + frames, ok := conn.ListenForFrame(t, layers, timeout) + if ok { + return frames[len(frames)-1], nil + } + if len(frames) == 0 { + return nil, fmt.Errorf("got no frames matching %s during %s", layers, timeout) + } + var errs error + for _, got := range frames { + want := conn.incoming(layers) + if err := want.merge(layers); err != nil { + errs = multierr.Combine(errs, err) + } else { + errs = multierr.Combine(errs, &layersError{got: got, want: want}) + } + } + return nil, fmt.Errorf("got frames:\n%w want %s during %s", errs, layers, timeout) +} + +// ListenForFrame captures all frames until a frame matches the provided Layers, +// or until the timeout specified. Returns all captured frames, including the +// matched frame, and true if the desired frame was found. +func (conn *Connection) ListenForFrame(t *testing.T, layers Layers, timeout time.Duration) ([]Layers, bool) { + t.Helper() + + deadline := time.Now().Add(timeout) + var frames []Layers for { - var gotLayers Layers + var got Layers if timeout := time.Until(deadline); timeout > 0 { - gotLayers = conn.recvFrame(t, timeout) + got = conn.recvFrame(t, timeout) } - if gotLayers == nil { - if errs == nil { - return nil, fmt.Errorf("got no frames matching %s during %s", layers, timeout) - } - return nil, fmt.Errorf("got frames:\n%w want %s during %s", errs, layers, timeout) + if got == nil { + return frames, false } - if conn.match(layers, gotLayers) { + frames = append(frames, got) + if conn.match(layers, got) { for i, s := range conn.layerStates { - if err := s.received(gotLayers[i]); err != nil { + if err := s.received(got[i]); err != nil { t.Fatalf("failed to update test connection's layer states based on received frame: %s", err) } } - return gotLayers, nil - } - want := conn.incoming(layers) - if err := want.merge(layers); err != nil { - errs = multierr.Combine(errs, err) - } else { - errs = multierr.Combine(errs, &layersError{got: gotLayers, want: want}) + return frames, true } } } @@ -1025,6 +1043,8 @@ func (conn *UDPIPv4) SendIP(t *testing.T, ip IPv4, udp UDP, additionalLayers ... // SendFrame sends a frame on the wire and updates the state of all layers. func (conn *UDPIPv4) SendFrame(t *testing.T, overrideLayers Layers, additionalLayers ...Layer) { + t.Helper() + conn.send(t, overrideLayers, additionalLayers...) } diff --git a/test/packetimpact/testbench/dut.go b/test/packetimpact/testbench/dut.go index eabdc8cb3..7e89ba2b3 100644 --- a/test/packetimpact/testbench/dut.go +++ b/test/packetimpact/testbench/dut.go @@ -22,11 +22,13 @@ import ( "testing" "time" - pb "gvisor.dev/gvisor/test/packetimpact/proto/posix_server_go_proto" - "golang.org/x/sys/unix" "google.golang.org/grpc" "google.golang.org/grpc/keepalive" + "gvisor.dev/gvisor/pkg/abi/linux" + bin "gvisor.dev/gvisor/pkg/binary" + "gvisor.dev/gvisor/pkg/hostarch" + pb "gvisor.dev/gvisor/test/packetimpact/proto/posix_server_go_proto" ) // DUT communicates with the DUT to force it to make POSIX calls. @@ -178,9 +180,7 @@ func (dut *DUT) CreateListener(t *testing.T, typ, proto, backlog int32) (int32, func (dut *DUT) Accept(t *testing.T, sockfd int32) (int32, unix.Sockaddr) { t.Helper() - ctx, cancel := context.WithTimeout(context.Background(), RPCTimeout) - defer cancel() - fd, sa, err := dut.AcceptWithErrno(ctx, t, sockfd) + fd, sa, err := dut.AcceptWithErrno(context.Background(), t, sockfd) if fd < 0 { t.Fatalf("failed to accept: %s", err) } @@ -207,9 +207,7 @@ func (dut *DUT) AcceptWithErrno(ctx context.Context, t *testing.T, sockfd int32) func (dut *DUT) Bind(t *testing.T, fd int32, sa unix.Sockaddr) { t.Helper() - ctx, cancel := context.WithTimeout(context.Background(), RPCTimeout) - defer cancel() - ret, err := dut.BindWithErrno(ctx, t, fd, sa) + ret, err := dut.BindWithErrno(context.Background(), t, fd, sa) if ret != 0 { t.Fatalf("failed to bind socket: %s", err) } @@ -236,9 +234,7 @@ func (dut *DUT) BindWithErrno(ctx context.Context, t *testing.T, fd int32, sa un func (dut *DUT) Close(t *testing.T, fd int32) { t.Helper() - ctx, cancel := context.WithTimeout(context.Background(), RPCTimeout) - defer cancel() - ret, err := dut.CloseWithErrno(ctx, t, fd) + ret, err := dut.CloseWithErrno(context.Background(), t, fd) if ret != 0 { t.Fatalf("failed to close: %s", err) } @@ -264,9 +260,7 @@ func (dut *DUT) CloseWithErrno(ctx context.Context, t *testing.T, fd int32) (int func (dut *DUT) Connect(t *testing.T, fd int32, sa unix.Sockaddr) { t.Helper() - ctx, cancel := context.WithTimeout(context.Background(), RPCTimeout) - defer cancel() - ret, err := dut.ConnectWithErrno(ctx, t, fd, sa) + ret, err := dut.ConnectWithErrno(context.Background(), t, fd, sa) // Ignore 'operation in progress' error that can be returned when the socket // is non-blocking. if err != unix.EINPROGRESS && ret != 0 { @@ -295,9 +289,7 @@ func (dut *DUT) ConnectWithErrno(ctx context.Context, t *testing.T, fd int32, sa func (dut *DUT) GetSockName(t *testing.T, sockfd int32) unix.Sockaddr { t.Helper() - ctx, cancel := context.WithTimeout(context.Background(), RPCTimeout) - defer cancel() - ret, sa, err := dut.GetSockNameWithErrno(ctx, t, sockfd) + ret, sa, err := dut.GetSockNameWithErrno(context.Background(), t, sockfd) if ret != 0 { t.Fatalf("failed to getsockname: %s", err) } @@ -347,9 +339,7 @@ func (dut *DUT) getSockOpt(ctx context.Context, t *testing.T, sockfd, level, opt func (dut *DUT) GetSockOpt(t *testing.T, sockfd, level, optname, optlen int32) []byte { t.Helper() - ctx, cancel := context.WithTimeout(context.Background(), RPCTimeout) - defer cancel() - ret, optval, err := dut.GetSockOptWithErrno(ctx, t, sockfd, level, optname, optlen) + ret, optval, err := dut.GetSockOptWithErrno(context.Background(), t, sockfd, level, optname, optlen) if ret != 0 { t.Fatalf("failed to GetSockOpt: %s", err) } @@ -376,9 +366,7 @@ func (dut *DUT) GetSockOptWithErrno(ctx context.Context, t *testing.T, sockfd, l func (dut *DUT) GetSockOptInt(t *testing.T, sockfd, level, optname int32) int32 { t.Helper() - ctx, cancel := context.WithTimeout(context.Background(), RPCTimeout) - defer cancel() - ret, intval, err := dut.GetSockOptIntWithErrno(ctx, t, sockfd, level, optname) + ret, intval, err := dut.GetSockOptIntWithErrno(context.Background(), t, sockfd, level, optname) if ret != 0 { t.Fatalf("failed to GetSockOptInt: %s", err) } @@ -403,9 +391,7 @@ func (dut *DUT) GetSockOptIntWithErrno(ctx context.Context, t *testing.T, sockfd func (dut *DUT) GetSockOptTimeval(t *testing.T, sockfd, level, optname int32) unix.Timeval { t.Helper() - ctx, cancel := context.WithTimeout(context.Background(), RPCTimeout) - defer cancel() - ret, timeval, err := dut.GetSockOptTimevalWithErrno(ctx, t, sockfd, level, optname) + ret, timeval, err := dut.GetSockOptTimevalWithErrno(context.Background(), t, sockfd, level, optname) if ret != 0 { t.Fatalf("failed to GetSockOptTimeval: %s", err) } @@ -428,15 +414,38 @@ func (dut *DUT) GetSockOptTimevalWithErrno(ctx context.Context, t *testing.T, so return ret, timeval, errno } +// GetSockOptTCPInfo retreives TCPInfo for the given socket descriptor. +func (dut *DUT) GetSockOptTCPInfo(t *testing.T, sockfd int32) linux.TCPInfo { + t.Helper() + + ret, info, err := dut.GetSockOptTCPInfoWithErrno(context.Background(), t, sockfd) + if ret != 0 || err != unix.Errno(0) { + t.Fatalf("failed to GetSockOptTCPInfo: %s", err) + } + return info +} + +// GetSockOptTCPInfoWithErrno retreives TCPInfo with any errno. +func (dut *DUT) GetSockOptTCPInfoWithErrno(ctx context.Context, t *testing.T, sockfd int32) (int32, linux.TCPInfo, error) { + t.Helper() + + info := linux.TCPInfo{} + ret, infoBytes, errno := dut.GetSockOptWithErrno(ctx, t, sockfd, unix.SOL_TCP, unix.TCP_INFO, int32(linux.SizeOfTCPInfo)) + if got, want := len(infoBytes), linux.SizeOfTCPInfo; got != want { + t.Fatalf("expected %T, got %d bytes want %d bytes", info, got, want) + } + bin.Unmarshal(infoBytes, hostarch.ByteOrder, &info) + + return ret, info, errno +} + // Listen calls listen on the DUT and causes a fatal test failure if it doesn't // succeed. If more control over the timeout or error handling is needed, use // ListenWithErrno. func (dut *DUT) Listen(t *testing.T, sockfd, backlog int32) { t.Helper() - ctx, cancel := context.WithTimeout(context.Background(), RPCTimeout) - defer cancel() - ret, err := dut.ListenWithErrno(ctx, t, sockfd, backlog) + ret, err := dut.ListenWithErrno(context.Background(), t, sockfd, backlog) if ret != 0 { t.Fatalf("failed to listen: %s", err) } @@ -469,8 +478,8 @@ func (dut *DUT) PollOne(t *testing.T, fd int32, events int16, timeout time.Durat if readyFd := pfds[0].Fd; readyFd != fd { t.Fatalf("Poll returned an fd %d that was not requested (%d)", readyFd, fd) } - if got, want := pfds[0].Revents, int16(events); got&want == 0 { - t.Fatalf("Poll returned no events in our interest, got: %#b, want: %#b", got, want) + if got, want := pfds[0].Revents, int16(events); got&want != want { + t.Fatalf("Poll returned events does not include all of the interested events, got: %#b, want: %#b", got, want) } } @@ -481,13 +490,7 @@ func (dut *DUT) PollOne(t *testing.T, fd int32, events int16, timeout time.Durat func (dut *DUT) Poll(t *testing.T, pfds []unix.PollFd, timeout time.Duration) []unix.PollFd { t.Helper() - ctx := context.Background() - var cancel context.CancelFunc - if timeout >= 0 { - ctx, cancel = context.WithTimeout(ctx, timeout+RPCTimeout) - defer cancel() - } - ret, result, err := dut.PollWithErrno(ctx, t, pfds, timeout) + ret, result, err := dut.PollWithErrno(context.Background(), t, pfds, timeout) if ret < 0 { t.Fatalf("failed to poll: %s", err) } @@ -530,9 +533,7 @@ func (dut *DUT) PollWithErrno(ctx context.Context, t *testing.T, pfds []unix.Pol func (dut *DUT) Send(t *testing.T, sockfd int32, buf []byte, flags int32) int32 { t.Helper() - ctx, cancel := context.WithTimeout(context.Background(), RPCTimeout) - defer cancel() - ret, err := dut.SendWithErrno(ctx, t, sockfd, buf, flags) + ret, err := dut.SendWithErrno(context.Background(), t, sockfd, buf, flags) if ret == -1 { t.Fatalf("failed to send: %s", err) } @@ -561,9 +562,7 @@ func (dut *DUT) SendWithErrno(ctx context.Context, t *testing.T, sockfd int32, b func (dut *DUT) SendTo(t *testing.T, sockfd int32, buf []byte, flags int32, destAddr unix.Sockaddr) int32 { t.Helper() - ctx, cancel := context.WithTimeout(context.Background(), RPCTimeout) - defer cancel() - ret, err := dut.SendToWithErrno(ctx, t, sockfd, buf, flags, destAddr) + ret, err := dut.SendToWithErrno(context.Background(), t, sockfd, buf, flags, destAddr) if ret == -1 { t.Fatalf("failed to sendto: %s", err) } @@ -596,10 +595,8 @@ func (dut *DUT) SetNonBlocking(t *testing.T, fd int32, nonblocking bool) { Fd: fd, Nonblocking: nonblocking, } - ctx, cancel := context.WithTimeout(context.Background(), RPCTimeout) - defer cancel() - resp, err := dut.posixServer.SetNonblocking(ctx, req) + resp, err := dut.posixServer.SetNonblocking(context.Background(), req) if err != nil { t.Fatalf("failed to call SetNonblocking: %s", err) } @@ -632,9 +629,7 @@ func (dut *DUT) setSockOpt(ctx context.Context, t *testing.T, sockfd, level, opt func (dut *DUT) SetSockOpt(t *testing.T, sockfd, level, optname int32, optval []byte) { t.Helper() - ctx, cancel := context.WithTimeout(context.Background(), RPCTimeout) - defer cancel() - ret, err := dut.SetSockOptWithErrno(ctx, t, sockfd, level, optname, optval) + ret, err := dut.SetSockOptWithErrno(context.Background(), t, sockfd, level, optname, optval) if ret != 0 { t.Fatalf("failed to SetSockOpt: %s", err) } @@ -655,9 +650,7 @@ func (dut *DUT) SetSockOptWithErrno(ctx context.Context, t *testing.T, sockfd, l func (dut *DUT) SetSockOptInt(t *testing.T, sockfd, level, optname, optval int32) { t.Helper() - ctx, cancel := context.WithTimeout(context.Background(), RPCTimeout) - defer cancel() - ret, err := dut.SetSockOptIntWithErrno(ctx, t, sockfd, level, optname, optval) + ret, err := dut.SetSockOptIntWithErrno(context.Background(), t, sockfd, level, optname, optval) if ret != 0 { t.Fatalf("failed to SetSockOptInt: %s", err) } @@ -676,9 +669,7 @@ func (dut *DUT) SetSockOptIntWithErrno(ctx context.Context, t *testing.T, sockfd func (dut *DUT) SetSockOptTimeval(t *testing.T, sockfd, level, optname int32, tv *unix.Timeval) { t.Helper() - ctx, cancel := context.WithTimeout(context.Background(), RPCTimeout) - defer cancel() - ret, err := dut.SetSockOptTimevalWithErrno(ctx, t, sockfd, level, optname, tv) + ret, err := dut.SetSockOptTimevalWithErrno(context.Background(), t, sockfd, level, optname, tv) if ret != 0 { t.Fatalf("failed to SetSockOptTimeval: %s", err) } @@ -717,8 +708,7 @@ func (dut *DUT) SocketWithErrno(t *testing.T, domain, typ, proto int32) (int32, Type: typ, Protocol: proto, } - ctx := context.Background() - resp, err := dut.posixServer.Socket(ctx, req) + resp, err := dut.posixServer.Socket(context.Background(), req) if err != nil { t.Fatalf("failed to call Socket: %s", err) } @@ -731,9 +721,7 @@ func (dut *DUT) SocketWithErrno(t *testing.T, domain, typ, proto int32) (int32, func (dut *DUT) Recv(t *testing.T, sockfd, len, flags int32) []byte { t.Helper() - ctx, cancel := context.WithTimeout(context.Background(), RPCTimeout) - defer cancel() - ret, buf, err := dut.RecvWithErrno(ctx, t, sockfd, len, flags) + ret, buf, err := dut.RecvWithErrno(context.Background(), t, sockfd, len, flags) if ret == -1 { t.Fatalf("failed to recv: %s", err) } @@ -776,9 +764,7 @@ func (dut *DUT) SetSockLingerOption(t *testing.T, sockfd int32, timeout time.Dur func (dut *DUT) Shutdown(t *testing.T, fd, how int32) { t.Helper() - ctx, cancel := context.WithTimeout(context.Background(), RPCTimeout) - defer cancel() - ret, err := dut.ShutdownWithErrno(ctx, t, fd, how) + ret, err := dut.ShutdownWithErrno(context.Background(), t, fd, how) if ret != 0 { t.Fatalf("failed to shutdown(%d, %d): %s", fd, how, err) } diff --git a/test/packetimpact/testbench/layers.go b/test/packetimpact/testbench/layers.go index 2311f7686..2644b3248 100644 --- a/test/packetimpact/testbench/layers.go +++ b/test/packetimpact/testbench/layers.go @@ -357,7 +357,7 @@ func (l *IPv4) ToBytes() ([]byte, error) { case *ICMPv4: fields.Protocol = uint8(header.ICMPv4ProtocolNumber) default: - // TODO(b/150301488): Support more protocols as needed. + // We can add support for more protocols as needed. return nil, fmt.Errorf("ipv4 header's next layer is unrecognized: %#v", n) } } @@ -824,6 +824,8 @@ type ICMPv6 struct { Type *header.ICMPv6Type Code *header.ICMPv6Code Checksum *uint16 + Ident *uint16 // Only in Echo Request/Reply. + Pointer *uint32 // Only in Parameter Problem. Payload []byte } @@ -835,7 +837,7 @@ func (l *ICMPv6) String() string { // ToBytes implements Layer.ToBytes. func (l *ICMPv6) ToBytes() ([]byte, error) { - b := make([]byte, header.ICMPv6HeaderSize+len(l.Payload)) + b := make([]byte, header.ICMPv6MinimumSize+len(l.Payload)) h := header.ICMPv6(b) if l.Type != nil { h.SetType(*l.Type) @@ -843,27 +845,34 @@ func (l *ICMPv6) ToBytes() ([]byte, error) { if l.Code != nil { h.SetCode(*l.Code) } - if n := copy(h.MessageBody(), l.Payload); n != len(l.Payload) { + if n := copy(h.Payload(), l.Payload); n != len(l.Payload) { panic(fmt.Sprintf("copied %d bytes, expected to copy %d bytes", n, len(l.Payload))) } + typ := h.Type() + switch typ { + case header.ICMPv6EchoRequest, header.ICMPv6EchoReply: + if l.Ident != nil { + h.SetIdent(*l.Ident) + } + case header.ICMPv6ParamProblem: + if l.Pointer != nil { + h.SetTypeSpecific(*l.Pointer) + } + } if l.Checksum != nil { h.SetChecksum(*l.Checksum) } else { // It is possible that the ICMPv6 header does not follow the IPv6 header // immediately, there could be one or more extension headers in between. - // We need to search forward to find the IPv6 header. - for prev := l.Prev(); prev != nil; prev = prev.Prev() { - if ipv6, ok := prev.(*IPv6); ok { - payload, err := payload(l) - if err != nil { - return nil, err - } + // We need to search backwards to find the IPv6 header. + for layer := l.Prev(); layer != nil; layer = layer.Prev() { + if ipv6, ok := layer.(*IPv6); ok { h.SetChecksum(header.ICMPv6Checksum(header.ICMPv6ChecksumParams{ - Header: h, + Header: h[:header.ICMPv6PayloadOffset], Src: *ipv6.SrcAddr, Dst: *ipv6.DstAddr, - PayloadCsum: header.ChecksumVV(payload, 0 /* initial */), - PayloadLen: payload.Size(), + PayloadCsum: header.Checksum(l.Payload, 0 /* initial */), + PayloadLen: len(l.Payload), })) break } @@ -884,20 +893,21 @@ func ICMPv6Code(v header.ICMPv6Code) *header.ICMPv6Code { return &v } -// Byte is a helper routine that allocates a new byte value to store -// v and returns a pointer to it. -func Byte(v byte) *byte { - return &v -} - // parseICMPv6 parses the bytes assuming that they start with an ICMPv6 header. func parseICMPv6(b []byte) (Layer, layerParser) { h := header.ICMPv6(b) + msgType := h.Type() icmpv6 := ICMPv6{ - Type: ICMPv6Type(h.Type()), + Type: ICMPv6Type(msgType), Code: ICMPv6Code(h.Code()), Checksum: Uint16(h.Checksum()), - Payload: h.MessageBody(), + Payload: h.Payload(), + } + switch msgType { + case header.ICMPv6EchoRequest, header.ICMPv6EchoReply: + icmpv6.Ident = Uint16(h.Ident()) + case header.ICMPv6ParamProblem: + icmpv6.Pointer = Uint32(h.TypeSpecific()) } return &icmpv6, nil } @@ -907,7 +917,7 @@ func (l *ICMPv6) match(other Layer) bool { } func (l *ICMPv6) length() int { - return header.ICMPv6HeaderSize + len(l.Payload) + return header.ICMPv6MinimumSize + len(l.Payload) } // merge overrides the values in l with the values from other but only in fields @@ -954,8 +964,8 @@ func (l *ICMPv4) ToBytes() ([]byte, error) { if l.Code != nil { h.SetCode(*l.Code) } - if copied := copy(h.Payload(), l.Payload); copied != len(l.Payload) { - panic(fmt.Sprintf("wrong number of bytes copied into h.Payload(): got = %d, want = %d", len(h.Payload()), len(l.Payload))) + if n := copy(h.Payload(), l.Payload); n != len(l.Payload) { + panic(fmt.Sprintf("wrong number of bytes copied into h.Payload(): got = %d, want = %d", n, len(l.Payload))) } typ := h.Type() switch typ { @@ -977,16 +987,7 @@ func (l *ICMPv4) ToBytes() ([]byte, error) { if l.Checksum != nil { h.SetChecksum(*l.Checksum) } else { - // Compute the checksum based on the ICMPv4.Payload and also the subsequent - // layers. - payload, err := payload(l) - if err != nil { - return nil, err - } - var vv buffer.VectorisedView - vv.AppendView(buffer.View(l.Payload)) - vv.Append(payload) - h.SetChecksum(header.ICMPv4Checksum(h, header.ChecksumVV(vv, 0 /* initial */))) + h.SetChecksum(^header.Checksum(h, 0)) } return h, nil @@ -1019,7 +1020,7 @@ func (l *ICMPv4) match(other Layer) bool { } func (l *ICMPv4) length() int { - return header.ICMPv4MinimumSize + return header.ICMPv4MinimumSize + len(l.Payload) } // merge overrides the values in l with the values from other but only in fields diff --git a/test/packetimpact/testbench/layers_test.go b/test/packetimpact/testbench/layers_test.go index 614a5de1e..bc96e0c88 100644 --- a/test/packetimpact/testbench/layers_test.go +++ b/test/packetimpact/testbench/layers_test.go @@ -596,7 +596,7 @@ func TestIPv6ExtHdrOptions(t *testing.T) { Type: ICMPv6Type(header.ICMPv6ParamProblem), Code: ICMPv6Code(header.ICMPv6ErroneousHeader), Checksum: Uint16(0x5f98), - Payload: []byte{0x00, 0x00, 0x00, 0x06}, + Pointer: Uint32(6), }, }, }, diff --git a/test/packetimpact/testbench/rawsockets.go b/test/packetimpact/testbench/rawsockets.go index feeb0888a..6d95c033d 100644 --- a/test/packetimpact/testbench/rawsockets.go +++ b/test/packetimpact/testbench/rawsockets.go @@ -17,7 +17,6 @@ package testbench import ( "encoding/binary" "fmt" - "math" "net" "testing" "time" @@ -81,19 +80,20 @@ func (s *Sniffer) Recv(t *testing.T, timeout time.Duration) []byte { deadline := time.Now().Add(timeout) for { - timeout = deadline.Sub(time.Now()) + timeout = time.Until(deadline) if timeout <= 0 { return nil } - whole, frac := math.Modf(timeout.Seconds()) - tv := unix.Timeval{ - Sec: int64(whole), - Usec: int64(frac * float64(time.Second/time.Microsecond)), + usec := timeout.Microseconds() + if usec == 0 { + // Timeout is less than a microsecond; set usec to 1 to avoid + // blocking indefinitely. + usec = 1 } - // The following should never happen, but having this guard here is better - // than blocking indefinitely in the future. - if tv.Sec == 0 && tv.Usec == 0 { - t.Fatal("setting SO_RCVTIMEO to 0 means blocking indefinitely") + const microsInOne = 1e6 + tv := unix.Timeval{ + Sec: usec / microsInOne, + Usec: usec % microsInOne, } if err := unix.SetsockoptTimeval(s.fd, unix.SOL_SOCKET, unix.SO_RCVTIMEO, &tv); err != nil { t.Fatalf("can't setsockopt SO_RCVTIMEO: %s", err) diff --git a/test/packetimpact/testbench/testbench.go b/test/packetimpact/testbench/testbench.go index 37d02365a..38ae9c1d7 100644 --- a/test/packetimpact/testbench/testbench.go +++ b/test/packetimpact/testbench/testbench.go @@ -31,8 +31,6 @@ var ( Native = false // RPCKeepalive is the gRPC keepalive. RPCKeepalive = 10 * time.Second - // RPCTimeout is the gRPC timeout. - RPCTimeout = 100 * time.Millisecond // dutInfosJSON is the json string that describes information about all the // duts available to use. @@ -57,11 +55,21 @@ type DUTUname struct { OperatingSystem string } -// IsLinux returns true if we are running natively on Linux. +// IsLinux returns true if the DUT is running Linux. func (n *DUTUname) IsLinux() bool { return Native && n.OperatingSystem == "GNU/Linux" } +// IsGvisor returns true if the DUT is running gVisor. +func (*DUTUname) IsGvisor() bool { + return !Native +} + +// IsFuchsia returns true if the DUT is running Fuchsia. +func (n *DUTUname) IsFuchsia() bool { + return Native && n.OperatingSystem == "Fuchsia" +} + // DUTTestNet describes the test network setup on dut and how the testbench // should connect with an existing DUT. type DUTTestNet struct { @@ -99,12 +107,21 @@ type DUTTestNet struct { POSIXServerPort uint16 } +// SubnetBroadcast returns the test network's subnet broadcast address. +func (n *DUTTestNet) SubnetBroadcast() net.IP { + addr := append([]byte(nil), n.RemoteIPv4...) + mask := net.CIDRMask(n.IPv4PrefixLength, net.IPv4len*8) + for i := range addr { + addr[i] |= ^mask[i] + } + return addr +} + // registerFlags defines flags and associates them with the package-level // exported variables above. It should be called by tests in their init // functions. func registerFlags(fs *flag.FlagSet) { fs.BoolVar(&Native, "native", Native, "whether the test is running natively") - fs.DurationVar(&RPCTimeout, "rpc_timeout", RPCTimeout, "gRPC timeout") fs.DurationVar(&RPCKeepalive, "rpc_keepalive", RPCKeepalive, "gRPC keepalive") fs.StringVar(&dutInfosJSON, "dut_infos_json", dutInfosJSON, "json that describes the DUTs") } @@ -112,6 +129,7 @@ func registerFlags(fs *flag.FlagSet) { // Initialize initializes the testbench, it parse the flags and sets up the // pool of test networks for testbench's later use. func Initialize(fs *flag.FlagSet) { + testing.Init() registerFlags(fs) flag.Parse() if err := loadDUTInfos(); err != nil { diff --git a/test/packetimpact/tests/BUILD b/test/packetimpact/tests/BUILD index c4fe293e0..4cff0cf4c 100644 --- a/test/packetimpact/tests/BUILD +++ b/test/packetimpact/tests/BUILD @@ -104,8 +104,6 @@ packetimpact_testbench( srcs = ["tcp_retransmits_test.go"], deps = [ "//pkg/abi/linux", - "//pkg/binary", - "//pkg/hostarch", "//pkg/tcpip/header", "//test/packetimpact/testbench", "@org_golang_x_sys//unix:go_default_library", @@ -189,6 +187,7 @@ packetimpact_testbench( name = "tcp_synsent_reset", srcs = ["tcp_synsent_reset_test.go"], deps = [ + "//pkg/abi/linux", "//pkg/tcpip/header", "//test/packetimpact/testbench", "@org_golang_x_sys//unix:go_default_library", @@ -307,18 +306,6 @@ packetimpact_testbench( ) packetimpact_testbench( - name = "udp_send_recv_dgram", - srcs = ["udp_send_recv_dgram_test.go"], - deps = [ - "//pkg/tcpip", - "//pkg/tcpip/header", - "//test/packetimpact/testbench", - "@com_github_google_go_cmp//cmp:go_default_library", - "@org_golang_x_sys//unix:go_default_library", - ], -) - -packetimpact_testbench( name = "tcp_linger", srcs = ["tcp_linger_test.go"], deps = [ @@ -353,8 +340,6 @@ packetimpact_testbench( srcs = ["tcp_rack_test.go"], deps = [ "//pkg/abi/linux", - "//pkg/binary", - "//pkg/hostarch", "//pkg/tcpip/header", "//pkg/tcpip/seqnum", "//test/packetimpact/testbench", @@ -367,8 +352,6 @@ packetimpact_testbench( srcs = ["tcp_info_test.go"], deps = [ "//pkg/abi/linux", - "//pkg/binary", - "//pkg/hostarch", "//pkg/tcpip/header", "//test/packetimpact/testbench", "@org_golang_x_sys//unix:go_default_library", @@ -401,6 +384,7 @@ packetimpact_testbench( deps = [ "//pkg/tcpip/header", "//test/packetimpact/testbench", + "@com_github_google_go_cmp//cmp:go_default_library", "@org_golang_x_sys//unix:go_default_library", ], ) @@ -415,10 +399,23 @@ packetimpact_testbench( ], ) +packetimpact_testbench( + name = "generic_dgram_socket_send_recv", + srcs = ["generic_dgram_socket_send_recv_test.go"], + deps = [ + "//pkg/tcpip", + "//pkg/tcpip/header", + "//test/packetimpact/testbench", + "@com_github_google_go_cmp//cmp:go_default_library", + "@org_golang_x_sys//unix:go_default_library", + ], +) + validate_all_tests() [packetimpact_go_test( name = t.name, + timeout = t.timeout if hasattr(t, "timeout") else "moderate", expect_netstack_failure = hasattr(t, "expect_netstack_failure"), num_duts = t.num_duts if hasattr(t, "num_duts") else 1, ) for t in ALL_TESTS] diff --git a/test/packetimpact/tests/generic_dgram_socket_send_recv_test.go b/test/packetimpact/tests/generic_dgram_socket_send_recv_test.go new file mode 100644 index 000000000..a9ffafc74 --- /dev/null +++ b/test/packetimpact/tests/generic_dgram_socket_send_recv_test.go @@ -0,0 +1,781 @@ +// Copyright 2021 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package generic_dgram_socket_send_recv_test + +import ( + "context" + "flag" + "fmt" + "net" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "golang.org/x/sys/unix" + "gvisor.dev/gvisor/pkg/tcpip" + "gvisor.dev/gvisor/pkg/tcpip/header" + "gvisor.dev/gvisor/test/packetimpact/testbench" +) + +const ( + // Even though sockets allow larger datagrams we don't test it here as they + // need to be fragmented and written out as individual frames. + + maxICMPv4PayloadSize = header.IPv4MinimumMTU - header.EthernetMinimumSize - header.IPv4MinimumSize - header.ICMPv4MinimumSize + maxICMPv6PayloadSize = header.IPv6MinimumMTU - header.EthernetMinimumSize - header.IPv6MinimumSize - header.ICMPv6MinimumSize + maxUDPv4PayloadSize = header.IPv4MinimumMTU - header.EthernetMinimumSize - header.IPv4MinimumSize - header.UDPMinimumSize + maxUDPv6PayloadSize = header.IPv6MinimumMTU - header.EthernetMinimumSize - header.IPv6MinimumSize - header.UDPMinimumSize +) + +func maxUDPPayloadSize(addr net.IP) int { + if addr.To4() != nil { + return maxUDPv4PayloadSize + } + return maxUDPv6PayloadSize +} + +func init() { + testbench.Initialize(flag.CommandLine) +} + +func expectedEthLayer(t *testing.T, dut testbench.DUT, socketFD int32, sendTo net.IP) testbench.Layer { + t.Helper() + dst := func() tcpip.LinkAddress { + if isBroadcast(dut, sendTo) { + dut.SetSockOptInt(t, socketFD, unix.SOL_SOCKET, unix.SO_BROADCAST, 1) + + // When sending to broadcast (subnet or limited), the expected ethernet + // address is also broadcast. + return header.EthernetBroadcastAddress + } + if sendTo.IsMulticast() { + if sendTo4 := sendTo.To4(); sendTo4 != nil { + return header.EthernetAddressFromMulticastIPv4Address(tcpip.Address(sendTo4)) + } + return header.EthernetAddressFromMulticastIPv6Address(tcpip.Address(sendTo.To16())) + } + return "" + }() + var ether testbench.Ether + if len(dst) != 0 { + ether.DstAddr = &dst + } + return ðer +} + +type protocolTest interface { + Name() string + Send(t *testing.T, dut testbench.DUT, bindTo, sendTo net.IP, bindToDevice bool) + Receive(t *testing.T, dut testbench.DUT, bindTo, sendTo net.IP, bindToDevice bool) +} + +func TestSocket(t *testing.T) { + dut := testbench.NewDUT(t) + subnetBroadcast := dut.Net.SubnetBroadcast() + + for _, proto := range []protocolTest{ + &icmpV4Test{}, + &icmpV6Test{}, + &udpTest{}, + } { + t.Run(proto.Name(), func(t *testing.T) { + // Test every combination of bound/unbound, broadcast/multicast/unicast + // bound/destination address, and bound/not-bound to device. + for _, bindTo := range []net.IP{ + nil, // Do not bind. + net.IPv4zero, + net.IPv4bcast, + net.IPv4allsys, + net.IPv6zero, + subnetBroadcast, + dut.Net.RemoteIPv4, + dut.Net.RemoteIPv6, + } { + t.Run(fmt.Sprintf("bindTo=%s", bindTo), func(t *testing.T) { + for _, sendTo := range []net.IP{ + net.IPv4bcast, + net.IPv4allsys, + subnetBroadcast, + dut.Net.LocalIPv4, + dut.Net.LocalIPv6, + dut.Net.RemoteIPv4, + dut.Net.RemoteIPv6, + } { + t.Run(fmt.Sprintf("sendTo=%s", sendTo), func(t *testing.T) { + for _, bindToDevice := range []bool{true, false} { + t.Run(fmt.Sprintf("bindToDevice=%t", bindToDevice), func(t *testing.T) { + t.Run("Send", func(t *testing.T) { + proto.Send(t, dut, bindTo, sendTo, bindToDevice) + }) + t.Run("Receive", func(t *testing.T) { + proto.Receive(t, dut, bindTo, sendTo, bindToDevice) + }) + }) + } + }) + } + }) + } + }) + } +} + +type icmpV4TestEnv struct { + socketFD int32 + ident uint16 + conn testbench.IPv4Conn + layers testbench.Layers +} + +type icmpV4Test struct{} + +func (test *icmpV4Test) setup(t *testing.T, dut testbench.DUT, bindTo, sendTo net.IP, bindToDevice bool) icmpV4TestEnv { + t.Helper() + + // Tell the DUT to create a socket. + var socketFD int32 + var ident uint16 + + if bindTo != nil { + socketFD, ident = dut.CreateBoundSocket(t, unix.SOCK_DGRAM, unix.IPPROTO_ICMP, bindTo) + } else { + // An unbound socket will auto-bind to INADDR_ANY. + socketFD = dut.Socket(t, unix.AF_INET, unix.SOCK_DGRAM, unix.IPPROTO_ICMP) + } + t.Cleanup(func() { + dut.Close(t, socketFD) + }) + + if bindToDevice { + dut.SetSockOpt(t, socketFD, unix.SOL_SOCKET, unix.SO_BINDTODEVICE, []byte(dut.Net.RemoteDevName)) + } + + // Create a socket on the test runner. + conn := dut.Net.NewIPv4Conn(t, testbench.IPv4{}, testbench.IPv4{}) + t.Cleanup(func() { + conn.Close(t) + }) + + return icmpV4TestEnv{ + socketFD: socketFD, + ident: ident, + conn: conn, + layers: testbench.Layers{ + expectedEthLayer(t, dut, socketFD, sendTo), + &testbench.IPv4{ + DstAddr: testbench.Address(tcpip.Address(sendTo.To4())), + }, + }, + } +} + +var _ protocolTest = (*icmpV4Test)(nil) + +func (*icmpV4Test) Name() string { return "icmpv4" } + +func (test *icmpV4Test) Send(t *testing.T, dut testbench.DUT, bindTo, sendTo net.IP, bindToDevice bool) { + if bindTo.To4() == nil || isBroadcastOrMulticast(dut, bindTo) { + // ICMPv4 sockets cannot bind to IPv6, broadcast, or multicast + // addresses. + return + } + + isV4 := sendTo.To4() != nil + + // TODO(gvisor.dev/issue/5681): Remove this case once ICMP sockets allow + // sending to broadcast and multicast addresses. + if (dut.Uname.IsGvisor() || dut.Uname.IsFuchsia()) && isV4 && isBroadcastOrMulticast(dut, sendTo) { + // expectPacket cannot be false. In some cases the packet will send, but + // with IPv4 destination incorrectly set to RemoteIPv4. It's all bad and + // not worth the effort to create a special case when this occurs. + t.Skip("TODO(gvisor.dev/issue/5681): Allow sending to broadcast and multicast addresses with ICMP sockets.") + } + + expectPacket := isV4 && !sendTo.Equal(dut.Net.RemoteIPv4) + switch { + case bindTo.Equal(dut.Net.RemoteIPv4): + // If we're explicitly bound to an interface's unicast address, + // packets are always sent on that interface. + case bindToDevice: + // If we're explicitly bound to an interface, packets are always + // sent on that interface. + case !sendTo.Equal(net.IPv4bcast) && !sendTo.IsMulticast(): + // If we're not sending to limited broadcast or multicast, the route + // table will be consulted and packets will be sent on the correct + // interface. + default: + expectPacket = false + } + + env := test.setup(t, dut, bindTo, sendTo, bindToDevice) + + for name, payload := range map[string][]byte{ + "empty": nil, + "small": []byte("hello world"), + "random1k": testbench.GenerateRandomPayload(t, maxICMPv4PayloadSize), + } { + t.Run(name, func(t *testing.T) { + icmpLayer := &testbench.ICMPv4{ + Type: testbench.ICMPv4Type(header.ICMPv4Echo), + Payload: payload, + } + bytes, err := icmpLayer.ToBytes() + if err != nil { + t.Fatalf("icmpLayer.ToBytes() = %s", err) + } + destSockaddr := unix.SockaddrInet4{} + copy(destSockaddr.Addr[:], sendTo.To4()) + + // Tell the DUT to send a packet out the ICMP socket. + if got, want := dut.SendTo(t, env.socketFD, bytes, 0, &destSockaddr), len(bytes); int(got) != want { + t.Fatalf("got dut.SendTo = %d, want %d", got, want) + } + + // Verify the test runner received an ICMP packet with the correctly + // set "ident". + if env.ident != 0 { + icmpLayer.Ident = &env.ident + } + want := append(env.layers, icmpLayer) + if got, ok := env.conn.ListenForFrame(t, want, time.Second); !ok && expectPacket { + t.Fatalf("did not receive expected frame matching %s\nGot frames: %s", want, got) + } else if ok && !expectPacket { + matchedFrame := got[len(got)-1] + t.Fatalf("got unexpected frame matching %s\nGot frame: %s", want, matchedFrame) + } + }) + } +} + +func (test *icmpV4Test) Receive(t *testing.T, dut testbench.DUT, bindTo, sendTo net.IP, bindToDevice bool) { + if bindTo.To4() == nil || isBroadcastOrMulticast(dut, bindTo) { + // ICMPv4 sockets cannot bind to IPv6, broadcast, or multicast + // addresses. + return + } + + expectPacket := (bindTo.Equal(dut.Net.RemoteIPv4) || bindTo.Equal(net.IPv4zero)) && sendTo.Equal(dut.Net.RemoteIPv4) + + // TODO(gvisor.dev/issue/5763): Remove this if statement once gVisor + // restricts ICMP sockets to receive only from unicast addresses. + if (dut.Uname.IsGvisor() || dut.Uname.IsFuchsia()) && bindTo.Equal(net.IPv4zero) && isBroadcastOrMulticast(dut, sendTo) { + expectPacket = true + } + + env := test.setup(t, dut, bindTo, sendTo, bindToDevice) + + for name, payload := range map[string][]byte{ + "empty": nil, + "small": []byte("hello world"), + "random1k": testbench.GenerateRandomPayload(t, maxICMPv4PayloadSize), + } { + t.Run(name, func(t *testing.T) { + icmpLayer := &testbench.ICMPv4{ + Type: testbench.ICMPv4Type(header.ICMPv4EchoReply), + Payload: payload, + } + if env.ident != 0 { + icmpLayer.Ident = &env.ident + } + + // Send an ICMPv4 packet from the test runner to the DUT. + frame := env.conn.CreateFrame(t, env.layers, icmpLayer) + env.conn.SendFrame(t, frame) + + // Verify the behavior of the ICMP socket on the DUT. + if expectPacket { + payload, err := icmpLayer.ToBytes() + if err != nil { + t.Fatalf("icmpLayer.ToBytes() = %s", err) + } + + // Receive one extra byte to assert the length of the + // packet received in the case where the packet contains + // more data than expected. + len := int32(len(payload)) + 1 + got, want := dut.Recv(t, env.socketFD, len, 0), payload + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("received payload does not match sent payload, diff (-want, +got):\n%s", diff) + } + } else { + // Expected receive error, set a short receive timeout. + dut.SetSockOptTimeval( + t, + env.socketFD, + unix.SOL_SOCKET, + unix.SO_RCVTIMEO, + &unix.Timeval{ + Sec: 1, + Usec: 0, + }, + ) + ret, recvPayload, errno := dut.RecvWithErrno(context.Background(), t, env.socketFD, maxICMPv4PayloadSize, 0) + if errno != unix.EAGAIN || errno != unix.EWOULDBLOCK { + t.Errorf("Recv got unexpected result, ret=%d, payload=%q, errno=%s", ret, recvPayload, errno) + } + } + }) + } +} + +type icmpV6TestEnv struct { + socketFD int32 + ident uint16 + conn testbench.IPv6Conn + layers testbench.Layers +} + +// icmpV6Test and icmpV4Test look substantially similar at first look, but have +// enough subtle differences in setup and test expectations to discourage +// refactoring: +// - Different IP layers +// - Different testbench.Connections +// - Different UNIX domain and proto arguments +// - Different expectPacket and wantErrno for send and receive +type icmpV6Test struct{} + +func (test *icmpV6Test) setup(t *testing.T, dut testbench.DUT, bindTo, sendTo net.IP, bindToDevice bool) icmpV6TestEnv { + t.Helper() + + // Tell the DUT to create a socket. + var socketFD int32 + var ident uint16 + + if bindTo != nil { + socketFD, ident = dut.CreateBoundSocket(t, unix.SOCK_DGRAM, unix.IPPROTO_ICMPV6, bindTo) + } else { + // An unbound socket will auto-bind to IN6ADDR_ANY_INIT. + socketFD = dut.Socket(t, unix.AF_INET6, unix.SOCK_DGRAM, unix.IPPROTO_ICMPV6) + } + t.Cleanup(func() { + dut.Close(t, socketFD) + }) + + if bindToDevice { + dut.SetSockOpt(t, socketFD, unix.SOL_SOCKET, unix.SO_BINDTODEVICE, []byte(dut.Net.RemoteDevName)) + } + + // Create a socket on the test runner. + conn := dut.Net.NewIPv6Conn(t, testbench.IPv6{}, testbench.IPv6{}) + t.Cleanup(func() { + conn.Close(t) + }) + + return icmpV6TestEnv{ + socketFD: socketFD, + ident: ident, + conn: conn, + layers: testbench.Layers{ + expectedEthLayer(t, dut, socketFD, sendTo), + &testbench.IPv6{ + DstAddr: testbench.Address(tcpip.Address(sendTo.To16())), + }, + }, + } +} + +var _ protocolTest = (*icmpV6Test)(nil) + +func (*icmpV6Test) Name() string { return "icmpv6" } + +func (test *icmpV6Test) Send(t *testing.T, dut testbench.DUT, bindTo, sendTo net.IP, bindToDevice bool) { + if bindTo.To4() != nil || bindTo.IsMulticast() { + // ICMPv6 sockets cannot bind to IPv4 or multicast addresses. + return + } + + expectPacket := sendTo.Equal(dut.Net.LocalIPv6) + wantErrno := unix.Errno(0) + + if sendTo.To4() != nil { + wantErrno = unix.EINVAL + } + + // TODO(gvisor.dev/issue/5966): Remove this if statement once ICMPv6 sockets + // return EINVAL after calling sendto with an IPv4 address. + if (dut.Uname.IsGvisor() || dut.Uname.IsFuchsia()) && sendTo.To4() != nil { + switch { + case bindTo.Equal(dut.Net.RemoteIPv6): + wantErrno = unix.ENETUNREACH + case bindTo.Equal(net.IPv6zero) || bindTo == nil: + wantErrno = unix.Errno(0) + } + } + + env := test.setup(t, dut, bindTo, sendTo, bindToDevice) + + for name, payload := range map[string][]byte{ + "empty": nil, + "small": []byte("hello world"), + "random1k": testbench.GenerateRandomPayload(t, maxICMPv6PayloadSize), + } { + t.Run(name, func(t *testing.T) { + icmpLayer := &testbench.ICMPv6{ + Type: testbench.ICMPv6Type(header.ICMPv6EchoRequest), + Payload: payload, + } + bytes, err := icmpLayer.ToBytes() + if err != nil { + t.Fatalf("icmpLayer.ToBytes() = %s", err) + } + destSockaddr := unix.SockaddrInet6{ + ZoneId: dut.Net.RemoteDevID, + } + copy(destSockaddr.Addr[:], sendTo.To16()) + + // Tell the DUT to send a packet out the ICMPv6 socket. + gotRet, gotErrno := dut.SendToWithErrno(context.Background(), t, env.socketFD, bytes, 0, &destSockaddr) + + if gotErrno != wantErrno { + t.Fatalf("got dut.SendToWithErrno(_, _, %d, _, _, %s) = (_, %s), want = (_, %s)", env.socketFD, sendTo, gotErrno, wantErrno) + } + if wantErrno != 0 { + return + } + if got, want := int(gotRet), len(bytes); got != want { + t.Fatalf("got dut.SendToWithErrno(_, _, %d, _, _, %s) = (%d, _), want = (%d, _)", env.socketFD, sendTo, got, want) + } + + // Verify the test runner received an ICMPv6 packet with the + // correctly set "ident". + if env.ident != 0 { + icmpLayer.Ident = &env.ident + } + want := append(env.layers, icmpLayer) + if got, ok := env.conn.ListenForFrame(t, want, time.Second); !ok && expectPacket { + t.Fatalf("did not receive expected frame matching %s\nGot frames: %s", want, got) + } else if ok && !expectPacket { + matchedFrame := got[len(got)-1] + t.Fatalf("got unexpected frame matching %s\nGot frame: %s", want, matchedFrame) + } + }) + } +} + +func (test *icmpV6Test) Receive(t *testing.T, dut testbench.DUT, bindTo, sendTo net.IP, bindToDevice bool) { + if bindTo.To4() != nil || bindTo.IsMulticast() { + // ICMPv6 sockets cannot bind to IPv4 or multicast addresses. + return + } + + expectPacket := true + switch { + case bindTo.Equal(dut.Net.RemoteIPv6) && sendTo.Equal(dut.Net.RemoteIPv6): + case bindTo.Equal(net.IPv6zero) && sendTo.Equal(dut.Net.RemoteIPv6): + case bindTo.Equal(net.IPv6zero) && sendTo.Equal(net.IPv6linklocalallnodes): + default: + expectPacket = false + } + + // TODO(gvisor.dev/issue/5763): Remove this if statement once gVisor + // restricts ICMP sockets to receive only from unicast addresses. + if (dut.Uname.IsGvisor() || dut.Uname.IsFuchsia()) && bindTo.Equal(net.IPv6zero) && isBroadcastOrMulticast(dut, sendTo) { + expectPacket = false + } + + env := test.setup(t, dut, bindTo, sendTo, bindToDevice) + + for name, payload := range map[string][]byte{ + "empty": nil, + "small": []byte("hello world"), + "random1k": testbench.GenerateRandomPayload(t, maxICMPv6PayloadSize), + } { + t.Run(name, func(t *testing.T) { + icmpLayer := &testbench.ICMPv6{ + Type: testbench.ICMPv6Type(header.ICMPv6EchoReply), + Payload: payload, + } + if env.ident != 0 { + icmpLayer.Ident = &env.ident + } + + // Send an ICMPv6 packet from the test runner to the DUT. + frame := env.conn.CreateFrame(t, env.layers, icmpLayer) + env.conn.SendFrame(t, frame) + + // Verify the behavior of the ICMPv6 socket on the DUT. + if expectPacket { + payload, err := icmpLayer.ToBytes() + if err != nil { + t.Fatalf("icmpLayer.ToBytes() = %s", err) + } + + // Receive one extra byte to assert the length of the + // packet received in the case where the packet contains + // more data than expected. + len := int32(len(payload)) + 1 + got, want := dut.Recv(t, env.socketFD, len, 0), payload + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("received payload does not match sent payload, diff (-want, +got):\n%s", diff) + } + } else { + // Expected receive error, set a short receive timeout. + dut.SetSockOptTimeval( + t, + env.socketFD, + unix.SOL_SOCKET, + unix.SO_RCVTIMEO, + &unix.Timeval{ + Sec: 1, + Usec: 0, + }, + ) + ret, recvPayload, errno := dut.RecvWithErrno(context.Background(), t, env.socketFD, maxICMPv6PayloadSize, 0) + if errno != unix.EAGAIN || errno != unix.EWOULDBLOCK { + t.Errorf("Recv got unexpected result, ret=%d, payload=%q, errno=%s", ret, recvPayload, errno) + } + } + }) + } +} + +type udpConn interface { + SrcPort(*testing.T) uint16 + SendFrame(*testing.T, testbench.Layers, ...testbench.Layer) + ListenForFrame(*testing.T, testbench.Layers, time.Duration) ([]testbench.Layers, bool) + Close(*testing.T) +} + +type udpTestEnv struct { + socketFD int32 + conn udpConn + layers testbench.Layers +} + +type udpTest struct{} + +func (test *udpTest) setup(t *testing.T, dut testbench.DUT, bindTo, sendTo net.IP, bindToDevice bool) udpTestEnv { + t.Helper() + + var ( + socketFD int32 + outgoingUDP, incomingUDP testbench.UDP + ) + + // Tell the DUT to create a socket. + if bindTo != nil { + var remotePort uint16 + socketFD, remotePort = dut.CreateBoundSocket(t, unix.SOCK_DGRAM, unix.IPPROTO_UDP, bindTo) + outgoingUDP.DstPort = &remotePort + incomingUDP.SrcPort = &remotePort + } else { + // An unbound socket will auto-bind to INADDR_ANY. + socketFD = dut.Socket(t, unix.AF_INET6, unix.SOCK_DGRAM, unix.IPPROTO_UDP) + } + t.Cleanup(func() { + dut.Close(t, socketFD) + }) + + if bindToDevice { + dut.SetSockOpt(t, socketFD, unix.SOL_SOCKET, unix.SO_BINDTODEVICE, []byte(dut.Net.RemoteDevName)) + } + + // Create a socket on the test runner. + var conn udpConn + var ipLayer testbench.Layer + if addr := sendTo.To4(); addr != nil { + udpConn := dut.Net.NewUDPIPv4(t, outgoingUDP, incomingUDP) + conn = &udpConn + ipLayer = &testbench.IPv4{ + DstAddr: testbench.Address(tcpip.Address(addr)), + } + } else { + udpConn := dut.Net.NewUDPIPv6(t, outgoingUDP, incomingUDP) + conn = &udpConn + ipLayer = &testbench.IPv6{ + DstAddr: testbench.Address(tcpip.Address(sendTo.To16())), + } + } + t.Cleanup(func() { + conn.Close(t) + }) + + return udpTestEnv{ + socketFD: socketFD, + conn: conn, + layers: testbench.Layers{ + expectedEthLayer(t, dut, socketFD, sendTo), + ipLayer, + &incomingUDP, + }, + } +} + +var _ protocolTest = (*udpTest)(nil) + +func (*udpTest) Name() string { return "udp" } + +func (test *udpTest) Send(t *testing.T, dut testbench.DUT, bindTo, sendTo net.IP, bindToDevice bool) { + canSend := bindTo == nil || bindTo.Equal(net.IPv6zero) || sameIPVersion(sendTo, bindTo) + expectPacket := canSend && !isRemoteAddr(dut, sendTo) + switch { + case bindTo.Equal(dut.Net.RemoteIPv4): + // If we're explicitly bound to an interface's unicast address, + // packets are always sent on that interface. + case bindToDevice: + // If we're explicitly bound to an interface, packets are always + // sent on that interface. + case !sendTo.Equal(net.IPv4bcast) && !sendTo.IsMulticast(): + // If we're not sending to limited broadcast, multicast, or local, the + // route table will be consulted and packets will be sent on the correct + // interface. + default: + expectPacket = false + } + + wantErrno := unix.Errno(0) + switch { + case !canSend && bindTo.To4() != nil: + wantErrno = unix.EAFNOSUPPORT + case !canSend && bindTo.To4() == nil: + wantErrno = unix.ENETUNREACH + } + + // TODO(gvisor.dev/issue/5967): Remove this if statement once UDPv4 sockets + // returns EAFNOSUPPORT after calling sendto with an IPv6 address. + if dut.Uname.IsGvisor() && !canSend && bindTo.To4() != nil { + wantErrno = unix.EINVAL + } + + env := test.setup(t, dut, bindTo, sendTo, bindToDevice) + + for name, payload := range map[string][]byte{ + "empty": nil, + "small": []byte("hello world"), + "random1k": testbench.GenerateRandomPayload(t, maxUDPPayloadSize(bindTo)), + } { + t.Run(name, func(t *testing.T) { + var destSockaddr unix.Sockaddr + if sendTo4 := sendTo.To4(); sendTo4 != nil { + addr := unix.SockaddrInet4{ + Port: int(env.conn.SrcPort(t)), + } + copy(addr.Addr[:], sendTo4) + destSockaddr = &addr + } else { + addr := unix.SockaddrInet6{ + Port: int(env.conn.SrcPort(t)), + ZoneId: dut.Net.RemoteDevID, + } + copy(addr.Addr[:], sendTo.To16()) + destSockaddr = &addr + } + + // Tell the DUT to send a packet out the UDP socket. + gotRet, gotErrno := dut.SendToWithErrno(context.Background(), t, env.socketFD, payload, 0, destSockaddr) + + if gotErrno != wantErrno { + t.Fatalf("got dut.SendToWithErrno(_, _, %d, _, _, %s) = (_, %s), want = (_, %s)", env.socketFD, sendTo, gotErrno, wantErrno) + } + if wantErrno != 0 { + return + } + if got, want := int(gotRet), len(payload); got != want { + t.Fatalf("got dut.SendToWithErrno(_, _, %d, _, _, %s) = (%d, _), want = (%d, _)", env.socketFD, sendTo, got, want) + } + + // Verify the test runner received a UDP packet with the + // correct payload. + want := append(env.layers, &testbench.Payload{ + Bytes: payload, + }) + if got, ok := env.conn.ListenForFrame(t, want, time.Second); !ok && expectPacket { + t.Fatalf("did not receive expected frame matching %s\nGot frames: %s", want, got) + } else if ok && !expectPacket { + matchedFrame := got[len(got)-1] + t.Fatalf("got unexpected frame matching %s\nGot frame: %s", want, matchedFrame) + } + }) + } +} + +func (test *udpTest) Receive(t *testing.T, dut testbench.DUT, bindTo, sendTo net.IP, bindToDevice bool) { + subnetBroadcast := dut.Net.SubnetBroadcast() + + expectPacket := true + switch { + case bindTo.Equal(sendTo): + case bindTo.Equal(net.IPv4zero) && sameIPVersion(bindTo, sendTo) && !sendTo.Equal(dut.Net.LocalIPv4): + case bindTo.Equal(net.IPv6zero) && isBroadcast(dut, sendTo): + case bindTo.Equal(net.IPv6zero) && isRemoteAddr(dut, sendTo): + case bindTo.Equal(subnetBroadcast) && sendTo.Equal(subnetBroadcast): + default: + expectPacket = false + } + + // TODO(gvisor.dev/issue/5956): Remove this if statement once gVisor + // restricts ICMP sockets to receive only from unicast addresses. + if (dut.Uname.IsGvisor() || dut.Uname.IsFuchsia()) && bindTo.Equal(net.IPv6zero) && sendTo.Equal(net.IPv4allsys) { + expectPacket = true + } + + env := test.setup(t, dut, bindTo, sendTo, bindToDevice) + maxPayloadSize := maxUDPPayloadSize(bindTo) + + for name, payload := range map[string][]byte{ + "empty": nil, + "small": []byte("hello world"), + "random1k": testbench.GenerateRandomPayload(t, maxPayloadSize), + } { + t.Run(name, func(t *testing.T) { + // Send a UDP packet from the test runner to the DUT. + env.conn.SendFrame(t, env.layers, &testbench.Payload{Bytes: payload}) + + // Verify the behavior of the ICMP socket on the DUT. + if expectPacket { + // Receive one extra byte to assert the length of the + // packet received in the case where the packet contains + // more data than expected. + len := int32(len(payload)) + 1 + got, want := dut.Recv(t, env.socketFD, len, 0), payload + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("received payload does not match sent payload, diff (-want, +got):\n%s", diff) + } + } else { + // Expected receive error, set a short receive timeout. + dut.SetSockOptTimeval( + t, + env.socketFD, + unix.SOL_SOCKET, + unix.SO_RCVTIMEO, + &unix.Timeval{ + Sec: 1, + Usec: 0, + }, + ) + ret, recvPayload, errno := dut.RecvWithErrno(context.Background(), t, env.socketFD, int32(maxPayloadSize), 0) + if errno != unix.EAGAIN || errno != unix.EWOULDBLOCK { + t.Errorf("Recv got unexpected result, ret=%d, payload=%q, errno=%s", ret, recvPayload, errno) + } + } + }) + } +} + +func isBroadcast(dut testbench.DUT, ip net.IP) bool { + return ip.Equal(net.IPv4bcast) || ip.Equal(dut.Net.SubnetBroadcast()) +} + +func isBroadcastOrMulticast(dut testbench.DUT, ip net.IP) bool { + return isBroadcast(dut, ip) || ip.IsMulticast() +} + +func sameIPVersion(a, b net.IP) bool { + return (a.To4() == nil) == (b.To4() == nil) +} + +func isRemoteAddr(dut testbench.DUT, ip net.IP) bool { + return ip.Equal(dut.Net.RemoteIPv4) || ip.Equal(dut.Net.RemoteIPv6) +} diff --git a/test/packetimpact/tests/icmpv6_param_problem_test.go b/test/packetimpact/tests/icmpv6_param_problem_test.go index 1beccb6cf..fdabcf1ad 100644 --- a/test/packetimpact/tests/icmpv6_param_problem_test.go +++ b/test/packetimpact/tests/icmpv6_param_problem_test.go @@ -15,7 +15,6 @@ package icmpv6_param_problem_test import ( - "encoding/binary" "flag" "testing" "time" @@ -56,13 +55,11 @@ func TestICMPv6ParamProblemTest(t *testing.T) { t.Fatalf("can't convert %s to bytes: %s", ipv6Sent, err) } - // The problematic field is the NextHeader. - b := make([]byte, 4) - binary.BigEndian.PutUint32(b, header.IPv6NextHeaderOffset) - expectedPayload = append(b, expectedPayload...) expectedICMPv6 := testbench.ICMPv6{ Type: testbench.ICMPv6Type(header.ICMPv6ParamProblem), Payload: expectedPayload, + // The problematic field is the NextHeader. + Pointer: testbench.Uint32(header.IPv6NextHeaderOffset), } paramProblem := testbench.Layers{ diff --git a/test/packetimpact/tests/ipv6_unknown_options_action_test.go b/test/packetimpact/tests/ipv6_unknown_options_action_test.go index f999d13d2..d762c43a7 100644 --- a/test/packetimpact/tests/ipv6_unknown_options_action_test.go +++ b/test/packetimpact/tests/ipv6_unknown_options_action_test.go @@ -15,7 +15,6 @@ package ipv6_unknown_options_action_test import ( - "encoding/binary" "flag" "net" "testing" @@ -154,23 +153,22 @@ func TestIPv6UnknownOptionAction(t *testing.T) { outgoing := conn.CreateFrame(t, outgoingOverride, tt.mkExtHdr(optionTypeFromAction(tt.action))) conn.SendFrame(t, outgoing) ipv6Sent := outgoing[1:] - invokingPacket, err := ipv6Sent.ToBytes() + icmpv6Payload, err := ipv6Sent.ToBytes() if err != nil { t.Fatalf("failed to serialize the outgoing packet: %s", err) } - icmpv6Payload := make([]byte, 4) - // The pointer in the ICMPv6 parameter problem message should point to - // the option type of the unknown option. In our test case, it is the - // first option in the extension header whose option type is 2 bytes - // after the IPv6 header (after NextHeader and ExtHdrLen). - binary.BigEndian.PutUint32(icmpv6Payload, header.IPv6MinimumSize+2) - icmpv6Payload = append(icmpv6Payload, invokingPacket...) gotICMPv6, err := conn.ExpectFrame(t, testbench.Layers{ &testbench.Ether{}, &testbench.IPv6{}, &testbench.ICMPv6{ - Type: testbench.ICMPv6Type(header.ICMPv6ParamProblem), - Code: testbench.ICMPv6Code(header.ICMPv6UnknownOption), + Type: testbench.ICMPv6Type(header.ICMPv6ParamProblem), + Code: testbench.ICMPv6Code(header.ICMPv6UnknownOption), + // The pointer in the ICMPv6 parameter problem message + // should point to the option type of the unknown option. In + // our test case, it is the first option in the extension + // header whose option type is 2 bytes after the IPv6 header + // (after NextHeader and ExtHdrLen). + Pointer: testbench.Uint32(header.IPv6MinimumSize + 2), Payload: icmpv6Payload, }, }, time.Second) diff --git a/test/packetimpact/tests/tcp_connect_icmp_error_test.go b/test/packetimpact/tests/tcp_connect_icmp_error_test.go index 79bfe9eb7..15d603328 100644 --- a/test/packetimpact/tests/tcp_connect_icmp_error_test.go +++ b/test/packetimpact/tests/tcp_connect_icmp_error_test.go @@ -15,9 +15,7 @@ package tcp_connect_icmp_error_test import ( - "context" "flag" - "sync" "testing" "time" @@ -33,17 +31,18 @@ func init() { func sendICMPError(t *testing.T, conn *testbench.TCPIPv4, tcp *testbench.TCP) { t.Helper() - layers := conn.CreateFrame(t, nil) - layers = layers[:len(layers)-1] - ip, ok := tcp.Prev().(*testbench.IPv4) - if !ok { - t.Fatalf("expected %s to be IPv4", tcp.Prev()) + icmpPayload := testbench.Layers{tcp.Prev(), tcp} + bytes, err := icmpPayload.ToBytes() + if err != nil { + t.Fatalf("got icmpPayload.ToBytes() = (_, %s), want = (_, nil)", err) } - icmpErr := &testbench.ICMPv4{ - Type: testbench.ICMPv4Type(header.ICMPv4DstUnreachable), - Code: testbench.ICMPv4Code(header.ICMPv4HostUnreachable)} - layers = append(layers, icmpErr, ip, tcp) + layers := conn.CreateFrame(t, nil) + layers[len(layers)-1] = &testbench.ICMPv4{ + Type: testbench.ICMPv4Type(header.ICMPv4DstUnreachable), + Code: testbench.ICMPv4Code(header.ICMPv4HostUnreachable), + Payload: bytes, + } conn.SendFrameStateless(t, layers) } @@ -65,35 +64,38 @@ func TestTCPConnectICMPError(t *testing.T) { t.Fatalf("expected SYN, %s", err) } - done := make(chan bool) - defer close(done) - var wg sync.WaitGroup - defer wg.Wait() - wg.Add(1) - var block sync.WaitGroup - block.Add(1) + // Continuously try to read the ICMP error in an attempt to trigger a race + // condition. + start := make(chan struct{}) + done := make(chan struct{}) go func() { - defer wg.Done() - _, cancel := context.WithTimeout(context.Background(), time.Second*3) - defer cancel() + defer close(done) - block.Done() + close(start) for { select { case <-done: return default: - if errno := dut.GetSockOptInt(t, clientFD, unix.SOL_SOCKET, unix.SO_ERROR); errno != 0 { - return - } } + const want = unix.EHOSTUNREACH + switch got := unix.Errno(dut.GetSockOptInt(t, clientFD, unix.SOL_SOCKET, unix.SO_ERROR)); got { + case unix.Errno(0): + continue + case want: + return + default: + t.Fatalf("got SO_ERROR = %s, want %s", got, want) + } + } }() - block.Wait() + <-start sendICMPError(t, &conn, tcp) dut.PollOne(t, clientFD, unix.POLLHUP, time.Second) + <-done conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)}) // The DUT should reply with RST to our ACK as the state should have diff --git a/test/packetimpact/tests/tcp_info_test.go b/test/packetimpact/tests/tcp_info_test.go index 93f58ec49..5410cc368 100644 --- a/test/packetimpact/tests/tcp_info_test.go +++ b/test/packetimpact/tests/tcp_info_test.go @@ -21,8 +21,6 @@ import ( "golang.org/x/sys/unix" "gvisor.dev/gvisor/pkg/abi/linux" - "gvisor.dev/gvisor/pkg/binary" - "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/tcpip/header" "gvisor.dev/gvisor/test/packetimpact/testbench" ) @@ -49,31 +47,31 @@ func TestTCPInfo(t *testing.T) { samplePayload := &testbench.Payload{Bytes: sampleData} dut.Send(t, acceptFD, sampleData, 0) if _, err := conn.ExpectData(t, &testbench.TCP{}, samplePayload, time.Second); err != nil { - t.Fatalf("expected a packet with payload %v: %s", samplePayload, err) + t.Fatalf("expected a packet with payload %s: %s", samplePayload, err) } conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)}) - info := linux.TCPInfo{} - infoBytes := dut.GetSockOpt(t, acceptFD, unix.SOL_TCP, unix.TCP_INFO, int32(linux.SizeOfTCPInfo)) - if got, want := len(infoBytes), linux.SizeOfTCPInfo; got != want { - t.Fatalf("expected %T, got %d bytes want %d bytes", info, got, want) + info := dut.GetSockOptTCPInfo(t, acceptFD) + if got, want := uint32(info.State), linux.TCP_ESTABLISHED; got != want { + t.Fatalf("got %d want %d", got, want) } - binary.Unmarshal(infoBytes, hostarch.ByteOrder, &info) - - rtt := time.Duration(info.RTT) * time.Microsecond - rttvar := time.Duration(info.RTTVar) * time.Microsecond - rto := time.Duration(info.RTO) * time.Microsecond - if rtt == 0 || rttvar == 0 || rto == 0 { - t.Errorf("expected rtt(%v), rttvar(%v) and rto(%v) to be greater than zero", rtt, rttvar, rto) + if info.RTT == 0 { + t.Errorf("got RTT=0, want nonzero") + } + if info.RTTVar == 0 { + t.Errorf("got RTTVar=0, want nonzero") + } + if info.RTO == 0 { + t.Errorf("got RTO=0, want nonzero") } if info.ReordSeen != 0 { - t.Errorf("expected the connection to not have any reordering, got: %v want: 0", info.ReordSeen) + t.Errorf("expected the connection to not have any reordering, got: %d want: 0", info.ReordSeen) } if info.SndCwnd == 0 { t.Errorf("expected send congestion window to be greater than zero") } if info.CaState != linux.TCP_CA_Open { - t.Errorf("expected the connection to be in open state, got: %v want: %v", info.CaState, linux.TCP_CA_Open) + t.Errorf("expected the connection to be in open state, got: %d want: %d", info.CaState, linux.TCP_CA_Open) } if t.Failed() { @@ -85,25 +83,20 @@ func TestTCPInfo(t *testing.T) { seq := testbench.Uint32(uint32(*conn.RemoteSeqNum(t))) dut.Send(t, acceptFD, sampleData, 0) if _, err := conn.ExpectData(t, &testbench.TCP{}, samplePayload, time.Second); err != nil { - t.Fatalf("expected a packet with payload %v: %s", samplePayload, err) + t.Fatalf("expected a packet with payload %s: %s", samplePayload, err) } - // Expect retransmission of the packet within 1.5*RTO. - timeout := time.Duration(float64(info.RTO)*1.5) * time.Microsecond + // Given a generous retransmission timeout. + timeout := time.Duration(info.RTO) * 2 * time.Microsecond if _, err := conn.ExpectData(t, &testbench.TCP{SeqNum: seq}, samplePayload, timeout); err != nil { - t.Fatalf("expected a packet with payload %v: %s", samplePayload, err) + t.Fatalf("expected a packet with payload %s: %s", samplePayload, err) } - info = linux.TCPInfo{} - infoBytes = dut.GetSockOpt(t, acceptFD, unix.SOL_TCP, unix.TCP_INFO, int32(linux.SizeOfTCPInfo)) - if got, want := len(infoBytes), linux.SizeOfTCPInfo; got != want { - t.Fatalf("expected %T, got %d bytes want %d bytes", info, got, want) - } - binary.Unmarshal(infoBytes, hostarch.ByteOrder, &info) + info = dut.GetSockOptTCPInfo(t, acceptFD) if info.CaState != linux.TCP_CA_Loss { - t.Errorf("expected the connection to be in loss recovery, got: %v want: %v", info.CaState, linux.TCP_CA_Loss) + t.Errorf("expected the connection to be in loss recovery, got: %d want: %d", info.CaState, linux.TCP_CA_Loss) } if info.SndCwnd != 1 { - t.Errorf("expected send congestion window to be 1, got: %v %v", info.SndCwnd) + t.Errorf("expected send congestion window to be 1, got: %d", info.SndCwnd) } } diff --git a/test/packetimpact/tests/tcp_linger_test.go b/test/packetimpact/tests/tcp_linger_test.go index 88942904d..46b5ca5d8 100644 --- a/test/packetimpact/tests/tcp_linger_test.go +++ b/test/packetimpact/tests/tcp_linger_test.go @@ -98,20 +98,19 @@ func TestTCPLingerNonZeroTimeout(t *testing.T) { dut.SetSockLingerOption(t, acceptFD, lingerDuration, tt.lingerOn) - // Increase timeout as Close will take longer time to - // return when SO_LINGER is set with non-zero timeout. - timeout := lingerDuration + 1*time.Second - ctx, cancel := context.WithTimeout(context.Background(), timeout) - defer cancel() start := time.Now() - dut.CloseWithErrno(ctx, t, acceptFD) - end := time.Now() - diff := end.Sub(start) - - if tt.lingerOn && diff < lingerDuration { - t.Errorf("expected close to return after %v seconds, but returned sooner", lingerDuration) - } else if !tt.lingerOn && diff > 1*time.Second { - t.Errorf("expected close to return within a second, but returned later") + dut.CloseWithErrno(context.Background(), t, acceptFD) + elapsed := time.Since(start) + + expectedMaximum := time.Second + if tt.lingerOn { + expectedMaximum += lingerDuration + if elapsed < lingerDuration { + t.Errorf("expected close to take at least %s, but took %s", lingerDuration, elapsed) + } + } + if elapsed >= expectedMaximum { + t.Errorf("expected close to take at most %s, but took %s", expectedMaximum, elapsed) } if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagFin | header.TCPFlagAck)}, time.Second); err != nil { @@ -144,20 +143,19 @@ func TestTCPLingerSendNonZeroTimeout(t *testing.T) { sampleData := []byte("Sample Data") dut.Send(t, acceptFD, sampleData, 0) - // Increase timeout as Close will take longer time to - // return when SO_LINGER is set with non-zero timeout. - timeout := lingerDuration + 1*time.Second - ctx, cancel := context.WithTimeout(context.Background(), timeout) - defer cancel() start := time.Now() - dut.CloseWithErrno(ctx, t, acceptFD) - end := time.Now() - diff := end.Sub(start) - - if tt.lingerOn && diff < lingerDuration { - t.Errorf("expected close to return after %v seconds, but returned sooner", lingerDuration) - } else if !tt.lingerOn && diff > 1*time.Second { - t.Errorf("expected close to return within a second, but returned later") + dut.CloseWithErrno(context.Background(), t, acceptFD) + elapsed := time.Since(start) + + expectedMaximum := time.Second + if tt.lingerOn { + expectedMaximum += lingerDuration + if elapsed < lingerDuration { + t.Errorf("expected close to take at least %s, but took %s", lingerDuration, elapsed) + } + } + if elapsed >= expectedMaximum { + t.Errorf("expected close to take at most %s, but took %s", expectedMaximum, elapsed) } samplePayload := &testbench.Payload{Bytes: sampleData} @@ -221,20 +219,19 @@ func TestTCPLingerShutdownSendNonZeroTimeout(t *testing.T) { dut.Shutdown(t, acceptFD, unix.SHUT_RDWR) - // Increase timeout as Close will take longer time to - // return when SO_LINGER is set with non-zero timeout. - timeout := lingerDuration + 1*time.Second - ctx, cancel := context.WithTimeout(context.Background(), timeout) - defer cancel() start := time.Now() - dut.CloseWithErrno(ctx, t, acceptFD) - end := time.Now() - diff := end.Sub(start) - - if tt.lingerOn && diff < lingerDuration { - t.Errorf("expected close to return after %v seconds, but returned sooner", lingerDuration) - } else if !tt.lingerOn && diff > 1*time.Second { - t.Errorf("expected close to return within a second, but returned later") + dut.CloseWithErrno(context.Background(), t, acceptFD) + elapsed := time.Since(start) + + expectedMaximum := time.Second + if tt.lingerOn { + expectedMaximum += lingerDuration + if elapsed < lingerDuration { + t.Errorf("expected close to take at least %s, but took %s", lingerDuration, elapsed) + } + } + if elapsed >= expectedMaximum { + t.Errorf("expected close to take at most %s, but took %s", expectedMaximum, elapsed) } samplePayload := &testbench.Payload{Bytes: sampleData} @@ -259,9 +256,10 @@ func TestTCPLingerNonEstablished(t *testing.T) { // and return immediately. start := time.Now() dut.CloseWithErrno(context.Background(), t, newFD) - diff := time.Since(start) + elapsed := time.Since(start) - if diff > lingerDuration { - t.Errorf("expected close to return within %s, but returned after %s", lingerDuration, diff) + expectedMaximum := time.Second + if elapsed >= time.Second { + t.Errorf("expected close to take at most %s, but took %s", expectedMaximum, elapsed) } } diff --git a/test/packetimpact/tests/tcp_listen_backlog_test.go b/test/packetimpact/tests/tcp_listen_backlog_test.go index 26c812d0a..fea7d5b6f 100644 --- a/test/packetimpact/tests/tcp_listen_backlog_test.go +++ b/test/packetimpact/tests/tcp_listen_backlog_test.go @@ -55,15 +55,23 @@ func TestTCPListenBacklog(t *testing.T) { // Send the ACK to complete handshake. establishedConn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)}) + + // Poll for the established connection ready for accept. dut.PollOne(t, listenFd, unix.POLLIN, time.Second) - // Send the ACK to complete handshake, expect this to be ignored by the - // listener. + // Send the ACK to complete handshake, expect this to be dropped by the + // listener as the accept queue would be full because of the previous + // handshake. incompleteConn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)}) + // Let the test wait for sometime so that the ACK is indeed dropped by + // the listener. Without such a wait, the DUT accept can race with + // ACK handling (dropping) causing the test to be flaky. + time.Sleep(100 * time.Millisecond) // Drain the accept queue to enable poll for subsequent connections on the // listener. - dut.Accept(t, listenFd) + fd, _ := dut.Accept(t, listenFd) + dut.Close(t, fd) // The ACK for the incomplete connection should be ignored by the // listening endpoint and the poll on listener should now time out. diff --git a/test/packetimpact/tests/tcp_network_unreachable_test.go b/test/packetimpact/tests/tcp_network_unreachable_test.go index 5168450ad..e92e6aa9b 100644 --- a/test/packetimpact/tests/tcp_network_unreachable_test.go +++ b/test/packetimpact/tests/tcp_network_unreachable_test.go @@ -41,39 +41,31 @@ func TestTCPSynSentUnreachable(t *testing.T) { defer conn.Close(t) // Bring the DUT to SYN-SENT state with a non-blocking connect. - ctx, cancel := context.WithTimeout(context.Background(), testbench.RPCTimeout) - defer cancel() sa := unix.SockaddrInet4{Port: int(port)} copy(sa.Addr[:], dut.Net.LocalIPv4) - if _, err := dut.ConnectWithErrno(ctx, t, clientFD, &sa); err != unix.EINPROGRESS { + if _, err := dut.ConnectWithErrno(context.Background(), t, clientFD, &sa); err != unix.EINPROGRESS { t.Errorf("got connect() = %v, want EINPROGRESS", err) } // Get the SYN. - tcpLayers, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagSyn)}, nil, time.Second) + tcp, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagSyn)}, time.Second) if err != nil { t.Fatalf("expected SYN: %s", err) } // Send a host unreachable message. - layers := conn.CreateFrame(t, nil) - layers = layers[:len(layers)-1] - const ipLayer = 1 - const tcpLayer = ipLayer + 1 - ip, ok := tcpLayers[ipLayer].(*testbench.IPv4) - if !ok { - t.Fatalf("expected %s to be IPv4", tcpLayers[ipLayer]) - } - tcp, ok := tcpLayers[tcpLayer].(*testbench.TCP) - if !ok { - t.Fatalf("expected %s to be TCP", tcpLayers[tcpLayer]) - } - var icmpv4 testbench.ICMPv4 = testbench.ICMPv4{ - Type: testbench.ICMPv4Type(header.ICMPv4DstUnreachable), - Code: testbench.ICMPv4Code(header.ICMPv4HostUnreachable), + icmpPayload := testbench.Layers{tcp.Prev(), tcp} + bytes, err := icmpPayload.ToBytes() + if err != nil { + t.Fatalf("got icmpPayload.ToBytes() = (_, %s), want = (_, nil)", err) } - layers = append(layers, &icmpv4, ip, tcp) + layers := conn.CreateFrame(t, nil) + layers[len(layers)-1] = &testbench.ICMPv4{ + Type: testbench.ICMPv4Type(header.ICMPv4DstUnreachable), + Code: testbench.ICMPv4Code(header.ICMPv4HostUnreachable), + Payload: bytes, + } conn.SendFrameStateless(t, layers) if err := getConnectError(t, &dut, clientFD); err != unix.EHOSTUNREACH { @@ -92,43 +84,34 @@ func TestTCPSynSentUnreachable6(t *testing.T) { defer conn.Close(t) // Bring the DUT to SYN-SENT state with a non-blocking connect. - ctx, cancel := context.WithTimeout(context.Background(), testbench.RPCTimeout) - defer cancel() sa := unix.SockaddrInet6{ Port: int(conn.SrcPort()), ZoneId: dut.Net.RemoteDevID, } copy(sa.Addr[:], dut.Net.LocalIPv6) - if _, err := dut.ConnectWithErrno(ctx, t, clientFD, &sa); err != unix.EINPROGRESS { + if _, err := dut.ConnectWithErrno(context.Background(), t, clientFD, &sa); err != unix.EINPROGRESS { t.Errorf("got connect() = %v, want EINPROGRESS", err) } // Get the SYN. - tcpLayers, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagSyn)}, nil, time.Second) + tcp, err := conn.Expect(t, &testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagSyn)}, time.Second) if err != nil { t.Fatalf("expected SYN: %s", err) } // Send a host unreachable message. - layers := conn.CreateFrame(t, nil) - layers = layers[:len(layers)-1] - const ipLayer = 1 - const tcpLayer = ipLayer + 1 - ip, ok := tcpLayers[ipLayer].(*testbench.IPv6) - if !ok { - t.Fatalf("expected %s to be IPv6", tcpLayers[ipLayer]) - } - tcp, ok := tcpLayers[tcpLayer].(*testbench.TCP) - if !ok { - t.Fatalf("expected %s to be TCP", tcpLayers[tcpLayer]) + icmpPayload := testbench.Layers{tcp.Prev(), tcp} + bytes, err := icmpPayload.ToBytes() + if err != nil { + t.Fatalf("got icmpPayload.ToBytes() = (_, %s), want = (_, nil)", err) } - var icmpv6 testbench.ICMPv6 = testbench.ICMPv6{ - Type: testbench.ICMPv6Type(header.ICMPv6DstUnreachable), - Code: testbench.ICMPv6Code(header.ICMPv6NetworkUnreachable), - // Per RFC 4443 3.1, the payload contains 4 zeroed bytes. - Payload: []byte{0, 0, 0, 0}, + + layers := conn.CreateFrame(t, nil) + layers[len(layers)-1] = &testbench.ICMPv6{ + Type: testbench.ICMPv6Type(header.ICMPv6DstUnreachable), + Code: testbench.ICMPv6Code(header.ICMPv6NetworkUnreachable), + Payload: bytes, } - layers = append(layers, &icmpv6, ip, tcp) conn.SendFrameStateless(t, layers) if err := getConnectError(t, &dut, clientFD); err != unix.ENETUNREACH { diff --git a/test/packetimpact/tests/tcp_queue_send_recv_in_syn_sent_test.go b/test/packetimpact/tests/tcp_queue_send_recv_in_syn_sent_test.go index 1c8b72ebe..974c15384 100644 --- a/test/packetimpact/tests/tcp_queue_send_recv_in_syn_sent_test.go +++ b/test/packetimpact/tests/tcp_queue_send_recv_in_syn_sent_test.go @@ -20,7 +20,6 @@ import ( "encoding/hex" "errors" "flag" - "sync" "testing" "time" @@ -54,37 +53,39 @@ func TestQueueSendInSynSentHandshake(t *testing.T) { // Test blocking send. dut.SetNonBlocking(t, socket, false) - var wg sync.WaitGroup - defer wg.Wait() - wg.Add(1) - var block sync.WaitGroup - block.Add(1) + start := make(chan struct{}) + done := make(chan struct{}) go func() { - defer wg.Done() - ctx, cancel := context.WithTimeout(context.Background(), time.Second*3) - defer cancel() + defer close(done) - block.Done() + close(start) // Issue SEND call in SYN-SENT, this should be queued for // process until the connection is established. - n, err := dut.SendWithErrno(ctx, t, socket, sampleData, 0) - if n == -1 { + if _, err := dut.SendWithErrno(context.Background(), t, socket, sampleData, 0); err != unix.Errno(0) { t.Errorf("failed to send on DUT: %s", err) - return } }() // Wait for the goroutine to be scheduled and before it // blocks on endpoint send/receive. - block.Wait() + <-start // The following sleep is used to prevent the connection // from being established before we are blocked: there is // still a small time window between we sending the RPC // request and the system actually being blocked. time.Sleep(100 * time.Millisecond) + select { + case <-done: + t.Fatal("expected send to be blocked in SYN-SENT") + default: + } + // Bring the connection to Established. conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagSyn | header.TCPFlagAck)}) + + <-done + // Expect the data from the DUT's enqueued send request. // // On Linux, this can be piggybacked with the ACK completing the @@ -126,21 +127,16 @@ func TestQueueRecvInSynSentHandshake(t *testing.T) { // Test blocking read. dut.SetNonBlocking(t, socket, false) - var wg sync.WaitGroup - defer wg.Wait() - wg.Add(1) - var block sync.WaitGroup - block.Add(1) + start := make(chan struct{}) + done := make(chan struct{}) go func() { - defer wg.Done() - ctx, cancel := context.WithTimeout(context.Background(), time.Second*3) - defer cancel() + defer close(done) - block.Done() + close(start) // Issue RECEIVE call in SYN-SENT, this should be queued for // process until the connection is established. - n, buff, err := dut.RecvWithErrno(ctx, t, socket, int32(len(sampleData)), 0) - if n == -1 { + n, buff, err := dut.RecvWithErrno(context.Background(), t, socket, int32(len(sampleData)), 0) + if err != unix.Errno(0) { t.Errorf("failed to recv on DUT: %s", err) return } @@ -151,7 +147,8 @@ func TestQueueRecvInSynSentHandshake(t *testing.T) { // Wait for the goroutine to be scheduled and before it // blocks on endpoint send/receive. - block.Wait() + <-start + // The following sleep is used to prevent the connection // from being established before we are blocked: there is // still a small time window between we sending the RPC @@ -169,6 +166,8 @@ func TestQueueRecvInSynSentHandshake(t *testing.T) { if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)}, time.Second); err != nil { t.Fatalf("expected an ACK from DUT, but got none: %s", err) } + + <-done } // TestQueueSendInSynSentRST tests send behavior when the TCP state @@ -192,20 +191,15 @@ func TestQueueSendInSynSentRST(t *testing.T) { // Test blocking send. dut.SetNonBlocking(t, socket, false) - var wg sync.WaitGroup - defer wg.Wait() - wg.Add(1) - var block sync.WaitGroup - block.Add(1) + start := make(chan struct{}) + done := make(chan struct{}) go func() { - defer wg.Done() - ctx, cancel := context.WithTimeout(context.Background(), time.Second*3) - defer cancel() + defer close(done) - block.Done() + close(start) // Issue SEND call in SYN-SENT, this should be queued for // process until the connection is established. - n, err := dut.SendWithErrno(ctx, t, socket, sampleData, 0) + n, err := dut.SendWithErrno(context.Background(), t, socket, sampleData, 0) if err != unix.ECONNREFUSED { t.Errorf("expected error %s, got %s", unix.ECONNREFUSED, err) } @@ -216,14 +210,23 @@ func TestQueueSendInSynSentRST(t *testing.T) { // Wait for the goroutine to be scheduled and before it // blocks on endpoint send/receive. - block.Wait() + <-start + // The following sleep is used to prevent the connection // from being established before we are blocked: there is // still a small time window between we sending the RPC // request and the system actually being blocked. time.Sleep(100 * time.Millisecond) + select { + case <-done: + t.Fatal("expected send to be blocked in SYN-SENT") + default: + } + conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagRst | header.TCPFlagAck)}) + + <-done } // TestQueueRecvInSynSentRST tests recv behavior when the TCP state @@ -251,20 +254,15 @@ func TestQueueRecvInSynSentRST(t *testing.T) { // Test blocking read. dut.SetNonBlocking(t, socket, false) - var wg sync.WaitGroup - defer wg.Wait() - wg.Add(1) - var block sync.WaitGroup - block.Add(1) + start := make(chan struct{}) + done := make(chan struct{}) go func() { - defer wg.Done() - ctx, cancel := context.WithTimeout(context.Background(), time.Second*3) - defer cancel() + defer close(done) - block.Done() + close(start) // Issue RECEIVE call in SYN-SENT, this should be queued for // process until the connection is established. - n, _, err := dut.RecvWithErrno(ctx, t, socket, int32(len(sampleData)), 0) + n, _, err := dut.RecvWithErrno(context.Background(), t, socket, int32(len(sampleData)), 0) if err != unix.ECONNREFUSED { t.Errorf("expected error %s, got %s", unix.ECONNREFUSED, err) } @@ -275,7 +273,8 @@ func TestQueueRecvInSynSentRST(t *testing.T) { // Wait for the goroutine to be scheduled and before it // blocks on endpoint send/receive. - block.Wait() + <-start + // The following sleep is used to prevent the connection // from being established before we are blocked: there is // still a small time window between we sending the RPC @@ -283,4 +282,5 @@ func TestQueueRecvInSynSentRST(t *testing.T) { time.Sleep(100 * time.Millisecond) conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagRst | header.TCPFlagAck)}) + <-done } diff --git a/test/packetimpact/tests/tcp_rack_test.go b/test/packetimpact/tests/tcp_rack_test.go index ff1431bbf..5a60bf712 100644 --- a/test/packetimpact/tests/tcp_rack_test.go +++ b/test/packetimpact/tests/tcp_rack_test.go @@ -21,8 +21,6 @@ import ( "golang.org/x/sys/unix" "gvisor.dev/gvisor/pkg/abi/linux" - "gvisor.dev/gvisor/pkg/binary" - "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/tcpip/header" "gvisor.dev/gvisor/pkg/tcpip/seqnum" "gvisor.dev/gvisor/test/packetimpact/testbench" @@ -69,12 +67,7 @@ func closeSACKConnection(t *testing.T, dut testbench.DUT, conn testbench.TCPIPv4 } func getRTTAndRTO(t *testing.T, dut testbench.DUT, acceptFd int32) (rtt, rto time.Duration) { - info := linux.TCPInfo{} - infoBytes := dut.GetSockOpt(t, acceptFd, unix.SOL_TCP, unix.TCP_INFO, int32(linux.SizeOfTCPInfo)) - if got, want := len(infoBytes), linux.SizeOfTCPInfo; got != want { - t.Fatalf("expected %T, got %d bytes want %d bytes", info, got, want) - } - binary.Unmarshal(infoBytes, hostarch.ByteOrder, &info) + info := dut.GetSockOptTCPInfo(t, acceptFd) return time.Duration(info.RTT) * time.Microsecond, time.Duration(info.RTO) * time.Microsecond } @@ -402,12 +395,7 @@ func TestRACKWithLostRetransmission(t *testing.T) { } // Check the congestion control state. - info := linux.TCPInfo{} - infoBytes := dut.GetSockOpt(t, acceptFd, unix.SOL_TCP, unix.TCP_INFO, int32(linux.SizeOfTCPInfo)) - if got, want := len(infoBytes), linux.SizeOfTCPInfo; got != want { - t.Fatalf("expected %T, got %d bytes want %d bytes", info, got, want) - } - binary.Unmarshal(infoBytes, hostarch.ByteOrder, &info) + info := dut.GetSockOptTCPInfo(t, acceptFd) if info.CaState != linux.TCP_CA_Recovery { t.Fatalf("expected connection to be in fast recovery, want: %v got: %v", linux.TCP_CA_Recovery, info.CaState) } diff --git a/test/packetimpact/tests/tcp_retransmits_test.go b/test/packetimpact/tests/tcp_retransmits_test.go index 1eafe20c3..d3fb789f4 100644 --- a/test/packetimpact/tests/tcp_retransmits_test.go +++ b/test/packetimpact/tests/tcp_retransmits_test.go @@ -21,9 +21,6 @@ import ( "time" "golang.org/x/sys/unix" - "gvisor.dev/gvisor/pkg/abi/linux" - "gvisor.dev/gvisor/pkg/binary" - "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/tcpip/header" "gvisor.dev/gvisor/test/packetimpact/testbench" ) @@ -33,12 +30,7 @@ func init() { } func getRTO(t *testing.T, dut testbench.DUT, acceptFd int32) (rto time.Duration) { - info := linux.TCPInfo{} - infoBytes := dut.GetSockOpt(t, acceptFd, unix.SOL_TCP, unix.TCP_INFO, int32(linux.SizeOfTCPInfo)) - if got, want := len(infoBytes), linux.SizeOfTCPInfo; got != want { - t.Fatalf("unexpected size for TCP_INFO, got %d bytes want %d bytes", got, want) - } - binary.Unmarshal(infoBytes, hostarch.ByteOrder, &info) + info := dut.GetSockOptTCPInfo(t, acceptFd) return time.Duration(info.RTO) * time.Microsecond } diff --git a/test/packetimpact/tests/tcp_syncookie_test.go b/test/packetimpact/tests/tcp_syncookie_test.go index 1c21c62ff..6be09996b 100644 --- a/test/packetimpact/tests/tcp_syncookie_test.go +++ b/test/packetimpact/tests/tcp_syncookie_test.go @@ -16,9 +16,12 @@ package tcp_syncookie_test import ( "flag" + "fmt" + "math" "testing" "time" + "github.com/google/go-cmp/cmp" "golang.org/x/sys/unix" "gvisor.dev/gvisor/pkg/tcpip/header" "gvisor.dev/gvisor/test/packetimpact/testbench" @@ -28,43 +31,121 @@ func init() { testbench.Initialize(flag.CommandLine) } -// TestSynCookie test if the DUT listener is replying back using syn cookies. -// The test does not complete the handshake by not sending the ACK to SYNACK. -// When syncookies are not used, this forces the listener to retransmit SYNACK. -// And when syncookies are being used, there is no such retransmit. +// TestTCPSynCookie tests for ACK handling for connections in SYNRCVD state +// connections with and without syncookies. It verifies if the passive open +// connection is indeed using syncookies before proceeding. func TestTCPSynCookie(t *testing.T) { dut := testbench.NewDUT(t) + for _, tt := range []struct { + accept bool + flags header.TCPFlags + }{ + {accept: true, flags: header.TCPFlagAck}, + {accept: true, flags: header.TCPFlagAck | header.TCPFlagPsh}, + {accept: false, flags: header.TCPFlagAck | header.TCPFlagSyn}, + {accept: true, flags: header.TCPFlagAck | header.TCPFlagFin}, + {accept: false, flags: header.TCPFlagAck | header.TCPFlagRst}, + {accept: false, flags: header.TCPFlagRst}, + } { + t.Run(fmt.Sprintf("flags=%s", tt.flags), func(t *testing.T) { + // Make a copy before parallelizing the test and refer to that + // within the test. Otherwise, the test reference could be pointing + // to an incorrect variant based on how it is scheduled. + test := tt - // Listening endpoint accepts one more connection than the listen backlog. - _, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1 /*backlog*/) + t.Parallel() - var withoutSynCookieConn testbench.TCPIPv4 - var withSynCookieConn testbench.TCPIPv4 + // Listening endpoint accepts one more connection than the listen + // backlog. Listener starts using syncookies when it sees a new SYN + // and has backlog size of connections in SYNRCVD state. Keep the + // listen backlog 1, so that the test can define 2 connections + // without and with using syncookies. + listenFD, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1 /*backlog*/) + defer dut.Close(t, listenFD) - // Test if the DUT listener replies to more SYNs than listen backlog+1 - for _, conn := range []*testbench.TCPIPv4{&withoutSynCookieConn, &withSynCookieConn} { - *conn = dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort}) - } - defer withoutSynCookieConn.Close(t) - defer withSynCookieConn.Close(t) + var withoutSynCookieConn testbench.TCPIPv4 + var withSynCookieConn testbench.TCPIPv4 - checkSynAck := func(t *testing.T, conn *testbench.TCPIPv4, expectRetransmit bool) { - // Expect dut connection to have transitioned to SYN-RCVD state. - conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagSyn)}) - if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagSyn | header.TCPFlagAck)}, nil, time.Second); err != nil { - t.Fatalf("expected SYN-ACK, but got %s", err) - } + for _, conn := range []*testbench.TCPIPv4{&withoutSynCookieConn, &withSynCookieConn} { + *conn = dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort}) + } + defer withoutSynCookieConn.Close(t) + defer withSynCookieConn.Close(t) - // If the DUT listener is using syn cookies, it will not retransmit SYNACK - got, err := conn.ExpectData(t, &testbench.TCP{SeqNum: testbench.Uint32(uint32(*conn.RemoteSeqNum(t) - 1)), Flags: testbench.TCPFlags(header.TCPFlagSyn | header.TCPFlagAck)}, nil, 2*time.Second) - if expectRetransmit && err != nil { - t.Fatalf("expected retransmitted SYN-ACK, but got %s", err) - } - if !expectRetransmit && err == nil { - t.Fatalf("expected no retransmitted SYN-ACK, but got %s", got) - } - } + // Setup the 2 connections in SYNRCVD state and verify if one of the + // connection is indeed using syncookies by checking for absence of + // SYNACK retransmits. + for _, c := range []struct { + desc string + conn *testbench.TCPIPv4 + expectRetransmit bool + }{ + {desc: "without syncookies", conn: &withoutSynCookieConn, expectRetransmit: true}, + {desc: "with syncookies", conn: &withSynCookieConn, expectRetransmit: false}, + } { + t.Run(c.desc, func(t *testing.T) { + // Expect dut connection to have transitioned to SYNRCVD state. + c.conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagSyn)}) + if _, err := c.conn.ExpectData(t, &testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagSyn | header.TCPFlagAck)}, nil, time.Second); err != nil { + t.Fatalf("expected SYNACK, but got %s", err) + } + + // If the DUT listener is using syn cookies, it will not retransmit SYNACK. + got, err := c.conn.ExpectData(t, &testbench.TCP{SeqNum: testbench.Uint32(uint32(*c.conn.RemoteSeqNum(t) - 1)), Flags: testbench.TCPFlags(header.TCPFlagSyn | header.TCPFlagAck)}, nil, 2*time.Second) + if c.expectRetransmit && err != nil { + t.Fatalf("expected retransmitted SYNACK, but got %s", err) + } + if !c.expectRetransmit && err == nil { + t.Fatalf("expected no retransmitted SYNACK, but got %s", got) + } + }) + } - t.Run("without syncookies", func(t *testing.T) { checkSynAck(t, &withoutSynCookieConn, true /*expectRetransmit*/) }) - t.Run("with syncookies", func(t *testing.T) { checkSynAck(t, &withSynCookieConn, false /*expectRetransmit*/) }) + // Check whether ACKs with the given flags completes the handshake. + for _, c := range []struct { + desc string + conn *testbench.TCPIPv4 + }{ + {desc: "with syncookies", conn: &withSynCookieConn}, + {desc: "without syncookies", conn: &withoutSynCookieConn}, + } { + t.Run(c.desc, func(t *testing.T) { + pfds := dut.Poll(t, []unix.PollFd{{Fd: listenFD, Events: math.MaxInt16}}, 0 /*timeout*/) + if got, want := len(pfds), 0; got != want { + t.Fatalf("dut.Poll(...) = %d, want = %d", got, want) + } + + sampleData := []byte("Sample Data") + samplePayload := &testbench.Payload{Bytes: sampleData} + + c.conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(test.flags)}, samplePayload) + pfds = dut.Poll(t, []unix.PollFd{{Fd: listenFD, Events: unix.POLLIN}}, time.Second) + want := 0 + if test.accept { + want = 1 + } + if got := len(pfds); got != want { + t.Fatalf("got dut.Poll(...) = %d, want = %d", got, want) + } + // Accept the connection to enable poll on any subsequent connection. + if test.accept { + fd, _ := dut.Accept(t, listenFD) + if test.flags.Contains(header.TCPFlagFin) { + if dut.Uname.IsLinux() { + dut.PollOne(t, fd, unix.POLLIN|unix.POLLRDHUP, time.Second) + } else { + // TODO(gvisor.dev/issue/6015): Notify POLLIN|POLLRDHUP on incoming FIN. + dut.PollOne(t, fd, unix.POLLIN, time.Second) + } + } + got := dut.Recv(t, fd, int32(len(sampleData)), 0) + if diff := cmp.Diff(got, sampleData); diff != "" { + t.Fatalf("dut.Recv: data mismatch (-want +got):\n%s", diff) + } + dut.Close(t, fd) + } + }) + } + }) + } } diff --git a/test/packetimpact/tests/tcp_synsent_reset_test.go b/test/packetimpact/tests/tcp_synsent_reset_test.go index cccb0abc6..fe53e7061 100644 --- a/test/packetimpact/tests/tcp_synsent_reset_test.go +++ b/test/packetimpact/tests/tcp_synsent_reset_test.go @@ -20,6 +20,7 @@ import ( "time" "golang.org/x/sys/unix" + "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/tcpip/header" "gvisor.dev/gvisor/test/packetimpact/testbench" ) @@ -29,7 +30,7 @@ func init() { } // dutSynSentState sets up the dut connection in SYN-SENT state. -func dutSynSentState(t *testing.T) (*testbench.DUT, *testbench.TCPIPv4, uint16, uint16) { +func dutSynSentState(t *testing.T) (*testbench.DUT, *testbench.TCPIPv4, int32, uint16, uint16) { t.Helper() dut := testbench.NewDUT(t) @@ -46,26 +47,29 @@ func dutSynSentState(t *testing.T) (*testbench.DUT, *testbench.TCPIPv4, uint16, t.Fatalf("expected SYN\n") } - return &dut, &conn, port, clientPort + return &dut, &conn, clientFD, port, clientPort } // TestTCPSynSentReset tests RFC793, p67: SYN-SENT to CLOSED transition. func TestTCPSynSentReset(t *testing.T) { - _, conn, _, _ := dutSynSentState(t) + dut, conn, fd, _, _ := dutSynSentState(t) defer conn.Close(t) conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagRst | header.TCPFlagAck)}) // Expect the connection to have closed. - // TODO(gvisor.dev/issue/478): Check for TCP_INFO on the dut side. conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)}) if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagRst)}, nil, time.Second); err != nil { t.Fatalf("expected a TCP RST") } + info := dut.GetSockOptTCPInfo(t, fd) + if got, want := uint32(info.State), linux.TCP_CLOSE; got != want { + t.Fatalf("got %d want %d", got, want) + } } // TestTCPSynSentRcvdReset tests RFC793, p70, SYN-SENT to SYN-RCVD to CLOSED // transitions. func TestTCPSynSentRcvdReset(t *testing.T) { - dut, c, remotePort, clientPort := dutSynSentState(t) + dut, c, fd, remotePort, clientPort := dutSynSentState(t) defer c.Close(t) conn := dut.Net.NewTCPIPv4(t, testbench.TCP{SrcPort: &remotePort, DstPort: &clientPort}, testbench.TCP{SrcPort: &clientPort, DstPort: &remotePort}) @@ -79,9 +83,12 @@ func TestTCPSynSentRcvdReset(t *testing.T) { } conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagRst)}) // Expect the connection to have transitioned SYN-RCVD to CLOSED. - // TODO(gvisor.dev/issue/478): Check for TCP_INFO on the dut side. conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)}) if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagRst)}, nil, time.Second); err != nil { t.Fatalf("expected a TCP RST") } + info := dut.GetSockOptTCPInfo(t, fd) + if got, want := uint32(info.State), linux.TCP_CLOSE; got != want { + t.Fatalf("got %d want %d", got, want) + } } diff --git a/test/packetimpact/tests/udp_icmp_error_propagation_test.go b/test/packetimpact/tests/udp_icmp_error_propagation_test.go index 3159d5b89..bb33ca4b3 100644 --- a/test/packetimpact/tests/udp_icmp_error_propagation_test.go +++ b/test/packetimpact/tests/udp_icmp_error_propagation_test.go @@ -58,16 +58,20 @@ func (e icmpError) String() string { return "Unknown ICMP error" } -func (e icmpError) ToICMPv4() *testbench.ICMPv4 { +func (e icmpError) ToICMPv4(payload []byte) *testbench.ICMPv4 { switch e { case portUnreachable: return &testbench.ICMPv4{ - Type: testbench.ICMPv4Type(header.ICMPv4DstUnreachable), - Code: testbench.ICMPv4Code(header.ICMPv4PortUnreachable)} + Type: testbench.ICMPv4Type(header.ICMPv4DstUnreachable), + Code: testbench.ICMPv4Code(header.ICMPv4PortUnreachable), + Payload: payload, + } case timeToLiveExceeded: return &testbench.ICMPv4{ - Type: testbench.ICMPv4Type(header.ICMPv4TimeExceeded), - Code: testbench.ICMPv4Code(header.ICMPv4TTLExceeded)} + Type: testbench.ICMPv4Type(header.ICMPv4TimeExceeded), + Code: testbench.ICMPv4Code(header.ICMPv4TTLExceeded), + Payload: payload, + } } return nil } @@ -101,8 +105,6 @@ func wantErrno(c connectionMode, icmpErr icmpError) unix.Errno { func sendICMPError(t *testing.T, conn *testbench.UDPIPv4, icmpErr icmpError, udp *testbench.UDP) { t.Helper() - layers := conn.CreateFrame(t, nil) - layers = layers[:len(layers)-1] ip, ok := udp.Prev().(*testbench.IPv4) if !ok { t.Fatalf("expected %s to be IPv4", udp.Prev()) @@ -113,12 +115,15 @@ func sendICMPError(t *testing.T, conn *testbench.UDPIPv4, icmpErr icmpError, udp // to 1. ip.Checksum = nil } - // Note that the ICMP payload is valid in this case because the UDP - // payload is empty. If the UDP payload were not empty, the packet - // length during serialization may not be calculated correctly, - // resulting in a mal-formed packet. - layers = append(layers, icmpErr.ToICMPv4(), ip, udp) + icmpPayload := testbench.Layers{ip, udp} + bytes, err := icmpPayload.ToBytes() + if err != nil { + t.Fatalf("got icmpPayload.ToBytes() = (_, %s), want = (_, nil)", err) + } + + layers := conn.CreateFrame(t, nil) + layers[len(layers)-1] = icmpErr.ToICMPv4(bytes) conn.SendFrameStateless(t, layers) } @@ -136,8 +141,6 @@ func testRecv(ctx context.Context, t *testing.T, d testData) { d.conn.Send(t, testbench.UDP{}) if d.wantErrno != unix.Errno(0) { - ctx, cancel := context.WithTimeout(ctx, time.Second) - defer cancel() ret, _, err := d.dut.RecvWithErrno(ctx, t, d.remoteFD, 100, 0) if ret != -1 { t.Fatalf("recv after ICMP error succeeded unexpectedly, expected (%[1]d) %[1]v", d.wantErrno) @@ -162,8 +165,6 @@ func testSendTo(ctx context.Context, t *testing.T, d testData) { } if d.wantErrno != unix.Errno(0) { - ctx, cancel := context.WithTimeout(ctx, time.Second) - defer cancel() ret, err := d.dut.SendToWithErrno(ctx, t, d.remoteFD, nil, 0, d.conn.LocalAddr(t)) if ret != -1 { @@ -310,10 +311,7 @@ func TestICMPErrorDuringUDPRecv(t *testing.T) { defer wg.Done() if wantErrno != unix.Errno(0) { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - - ret, _, err := dut.RecvWithErrno(ctx, t, remoteFD, 100, 0) + ret, _, err := dut.RecvWithErrno(context.Background(), t, remoteFD, 100, 0) if ret != -1 { t.Errorf("recv during ICMP error succeeded unexpectedly, expected (%[1]d) %[1]v", wantErrno) return @@ -324,10 +322,7 @@ func TestICMPErrorDuringUDPRecv(t *testing.T) { } } - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - - if ret, _, err := dut.RecvWithErrno(ctx, t, remoteFD, 100, 0); ret == -1 { + if ret, _, err := dut.RecvWithErrno(context.Background(), t, remoteFD, 100, 0); ret == -1 { t.Errorf("recv after ICMP error failed with (%[1]d) %[1]", err) } }() @@ -335,10 +330,7 @@ func TestICMPErrorDuringUDPRecv(t *testing.T) { go func() { defer wg.Done() - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - - if ret, _, err := dut.RecvWithErrno(ctx, t, cleanFD, 100, 0); ret == -1 { + if ret, _, err := dut.RecvWithErrno(context.Background(), t, cleanFD, 100, 0); ret == -1 { t.Errorf("recv on clean socket failed with (%[1]d) %[1]", err) } }() diff --git a/test/packetimpact/tests/udp_send_recv_dgram_test.go b/test/packetimpact/tests/udp_send_recv_dgram_test.go deleted file mode 100644 index 230b012c7..000000000 --- a/test/packetimpact/tests/udp_send_recv_dgram_test.go +++ /dev/null @@ -1,329 +0,0 @@ -// Copyright 2020 The gVisor Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package udp_send_recv_dgram_test - -import ( - "context" - "flag" - "fmt" - "net" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - "golang.org/x/sys/unix" - "gvisor.dev/gvisor/pkg/tcpip" - "gvisor.dev/gvisor/pkg/tcpip/header" - "gvisor.dev/gvisor/test/packetimpact/testbench" -) - -func init() { - testbench.Initialize(flag.CommandLine) - testbench.RPCTimeout = 500 * time.Millisecond -} - -type udpConn interface { - SrcPort(*testing.T) uint16 - SendFrame(*testing.T, testbench.Layers, ...testbench.Layer) - ExpectFrame(*testing.T, testbench.Layers, time.Duration) (testbench.Layers, error) - Close(*testing.T) -} - -type testCase struct { - bindTo, sendTo net.IP - sendToBroadcast, bindToDevice, expectData bool -} - -func TestUDP(t *testing.T) { - dut := testbench.NewDUT(t) - subnetBcast := func() net.IP { - subnet := (&tcpip.AddressWithPrefix{ - Address: tcpip.Address(dut.Net.RemoteIPv4.To4()), - PrefixLen: dut.Net.IPv4PrefixLength, - }).Subnet() - return net.IP(subnet.Broadcast()) - }() - - t.Run("Send", func(t *testing.T) { - var testCases []testCase - // Test every valid combination of bound/unbound, broadcast/multicast/unicast - // bound/destination address, and bound/not-bound to device. - for _, bindTo := range []net.IP{ - nil, // Do not bind. - net.IPv4zero, - net.IPv4bcast, - net.IPv4allsys, - subnetBcast, - dut.Net.RemoteIPv4, - dut.Net.RemoteIPv6, - } { - for _, sendTo := range []net.IP{ - net.IPv4bcast, - net.IPv4allsys, - subnetBcast, - dut.Net.LocalIPv4, - dut.Net.LocalIPv6, - } { - // Cannot send to an IPv4 address from a socket bound to IPv6 (except for IPv4-mapped IPv6), - // and viceversa. - if bindTo != nil && ((bindTo.To4() == nil) != (sendTo.To4() == nil)) { - continue - } - for _, bindToDevice := range []bool{true, false} { - expectData := true - switch { - case bindTo.Equal(dut.Net.RemoteIPv4): - // If we're explicitly bound to an interface's unicast address, - // packets are always sent on that interface. - case bindToDevice: - // If we're explicitly bound to an interface, packets are always - // sent on that interface. - case !sendTo.Equal(net.IPv4bcast) && !sendTo.IsMulticast(): - // If we're not sending to limited broadcast or multicast, the route table - // will be consulted and packets will be sent on the correct interface. - default: - expectData = false - } - testCases = append( - testCases, - testCase{ - bindTo: bindTo, - sendTo: sendTo, - sendToBroadcast: sendTo.Equal(subnetBcast) || sendTo.Equal(net.IPv4bcast), - bindToDevice: bindToDevice, - expectData: expectData, - }, - ) - } - } - } - for _, tc := range testCases { - boundTestCaseName := "unbound" - if tc.bindTo != nil { - boundTestCaseName = fmt.Sprintf("bindTo=%s", tc.bindTo) - } - t.Run(fmt.Sprintf("%s/sendTo=%s/bindToDevice=%t/expectData=%t", boundTestCaseName, tc.sendTo, tc.bindToDevice, tc.expectData), func(t *testing.T) { - runTestCase( - t, - dut, - tc, - func(t *testing.T, dut testbench.DUT, conn udpConn, socketFD int32, tc testCase, payload []byte, layers testbench.Layers) { - var destSockaddr unix.Sockaddr - if sendTo4 := tc.sendTo.To4(); sendTo4 != nil { - addr := unix.SockaddrInet4{ - Port: int(conn.SrcPort(t)), - } - copy(addr.Addr[:], sendTo4) - destSockaddr = &addr - } else { - addr := unix.SockaddrInet6{ - Port: int(conn.SrcPort(t)), - ZoneId: dut.Net.RemoteDevID, - } - copy(addr.Addr[:], tc.sendTo.To16()) - destSockaddr = &addr - } - if got, want := dut.SendTo(t, socketFD, payload, 0, destSockaddr), len(payload); int(got) != want { - t.Fatalf("got dut.SendTo = %d, want %d", got, want) - } - layers = append(layers, &testbench.Payload{ - Bytes: payload, - }) - _, err := conn.ExpectFrame(t, layers, time.Second) - - if !tc.expectData && err == nil { - t.Fatal("received unexpected packet, socket is not bound to device") - } - if err != nil && tc.expectData { - t.Fatal(err) - } - }, - ) - }) - } - }) - t.Run("Recv", func(t *testing.T) { - // Test every valid combination of broadcast/multicast/unicast - // bound/destination address, and bound/not-bound to device. - var testCases []testCase - for _, addr := range []net.IP{ - net.IPv4bcast, - net.IPv4allsys, - dut.Net.RemoteIPv4, - dut.Net.RemoteIPv6, - } { - for _, bindToDevice := range []bool{true, false} { - testCases = append( - testCases, - testCase{ - bindTo: addr, - sendTo: addr, - sendToBroadcast: addr.Equal(subnetBcast) || addr.Equal(net.IPv4bcast), - bindToDevice: bindToDevice, - expectData: true, - }, - ) - } - } - for _, bindTo := range []net.IP{ - net.IPv4zero, - subnetBcast, - dut.Net.RemoteIPv4, - } { - for _, sendTo := range []net.IP{ - subnetBcast, - net.IPv4bcast, - net.IPv4allsys, - } { - // TODO(gvisor.dev/issue/4896): Add bindTo=subnetBcast/sendTo=IPv4bcast - // and bindTo=subnetBcast/sendTo=IPv4allsys test cases. - if bindTo.Equal(subnetBcast) && (sendTo.Equal(net.IPv4bcast) || sendTo.IsMulticast()) { - continue - } - // Expect that a socket bound to a unicast address does not receive - // packets sent to an address other than the bound unicast address. - // - // Note: we cannot use net.IP.IsGlobalUnicast to test this condition - // because IsGlobalUnicast does not check whether the address is the - // subnet broadcast, and returns true in that case. - expectData := !bindTo.Equal(dut.Net.RemoteIPv4) || sendTo.Equal(dut.Net.RemoteIPv4) - for _, bindToDevice := range []bool{true, false} { - testCases = append( - testCases, - testCase{ - bindTo: bindTo, - sendTo: sendTo, - sendToBroadcast: sendTo.Equal(subnetBcast) || sendTo.Equal(net.IPv4bcast), - bindToDevice: bindToDevice, - expectData: expectData, - }, - ) - } - } - } - for _, tc := range testCases { - t.Run(fmt.Sprintf("bindTo=%s/sendTo=%s/bindToDevice=%t/expectData=%t", tc.bindTo, tc.sendTo, tc.bindToDevice, tc.expectData), func(t *testing.T) { - runTestCase( - t, - dut, - tc, - func(t *testing.T, dut testbench.DUT, conn udpConn, socketFD int32, tc testCase, payload []byte, layers testbench.Layers) { - conn.SendFrame(t, layers, &testbench.Payload{Bytes: payload}) - - if tc.expectData { - got, want := dut.Recv(t, socketFD, int32(len(payload)+1), 0), payload - if diff := cmp.Diff(want, got); diff != "" { - t.Errorf("received payload does not match sent payload, diff (-want, +got):\n%s", diff) - } - } else { - // Expected receive error, set a short receive timeout. - dut.SetSockOptTimeval( - t, - socketFD, - unix.SOL_SOCKET, - unix.SO_RCVTIMEO, - &unix.Timeval{ - Sec: 1, - Usec: 0, - }, - ) - ret, recvPayload, errno := dut.RecvWithErrno(context.Background(), t, socketFD, 100, 0) - if errno != unix.EAGAIN || errno != unix.EWOULDBLOCK { - t.Errorf("Recv got unexpected result, ret=%d, payload=%q, errno=%s", ret, recvPayload, errno) - } - } - }, - ) - }) - } - }) -} - -func runTestCase( - t *testing.T, - dut testbench.DUT, - tc testCase, - runTc func(t *testing.T, dut testbench.DUT, conn udpConn, socketFD int32, tc testCase, payload []byte, layers testbench.Layers), -) { - var ( - socketFD int32 - outgoingUDP, incomingUDP testbench.UDP - ) - if tc.bindTo != nil { - var remotePort uint16 - socketFD, remotePort = dut.CreateBoundSocket(t, unix.SOCK_DGRAM, unix.IPPROTO_UDP, tc.bindTo) - outgoingUDP.DstPort = &remotePort - incomingUDP.SrcPort = &remotePort - } else { - // An unbound socket will auto-bind to INNADDR_ANY and a random - // port on sendto. - socketFD = dut.Socket(t, unix.AF_INET6, unix.SOCK_DGRAM, unix.IPPROTO_UDP) - } - defer dut.Close(t, socketFD) - if tc.bindToDevice { - dut.SetSockOpt(t, socketFD, unix.SOL_SOCKET, unix.SO_BINDTODEVICE, []byte(dut.Net.RemoteDevName)) - } - - var ethernetLayer testbench.Ether - if tc.sendToBroadcast { - dut.SetSockOptInt(t, socketFD, unix.SOL_SOCKET, unix.SO_BROADCAST, 1) - - // When sending to broadcast (subnet or limited), the expected ethernet - // address is also broadcast. - ethernetBroadcastAddress := header.EthernetBroadcastAddress - ethernetLayer.DstAddr = ðernetBroadcastAddress - } else if tc.sendTo.IsMulticast() { - ethernetMulticastAddress := header.EthernetAddressFromMulticastIPv4Address(tcpip.Address(tc.sendTo.To4())) - ethernetLayer.DstAddr = ðernetMulticastAddress - } - expectedLayers := testbench.Layers{ðernetLayer} - - var conn udpConn - if sendTo4 := tc.sendTo.To4(); sendTo4 != nil { - v4Conn := dut.Net.NewUDPIPv4(t, outgoingUDP, incomingUDP) - conn = &v4Conn - expectedLayers = append( - expectedLayers, - &testbench.IPv4{ - DstAddr: testbench.Address(tcpip.Address(sendTo4)), - }, - ) - } else { - v6Conn := dut.Net.NewUDPIPv6(t, outgoingUDP, incomingUDP) - conn = &v6Conn - expectedLayers = append( - expectedLayers, - &testbench.IPv6{ - DstAddr: testbench.Address(tcpip.Address(tc.sendTo)), - }, - ) - } - defer conn.Close(t) - - expectedLayers = append(expectedLayers, &incomingUDP) - for _, v := range []struct { - name string - payload []byte - }{ - {"emptypayload", nil}, - {"small payload", []byte("hello world")}, - {"1kPayload", testbench.GenerateRandomPayload(t, 1<<10)}, - // Even though UDP allows larger dgrams we don't test it here as - // they need to be fragmented and written out as individual - // frames. - } { - runTc(t, dut, conn, socketFD, tc, v.payload, expectedLayers) - } -} |