diff options
Diffstat (limited to 'test/packetimpact')
21 files changed, 1632 insertions, 562 deletions
diff --git a/test/packetimpact/README.md b/test/packetimpact/README.md index ffa96ba98..fe0976ba5 100644 --- a/test/packetimpact/README.md +++ b/test/packetimpact/README.md @@ -694,6 +694,13 @@ func TestMyTcpTest(t *testing.T) { } ``` +### Adding a new packetimpact test + +* Create a go test in the [tests directory](tests/) +* Add a `packetimpact_testbench` rule in [BUILD](tests/BUILD) +* Add the test into the `ALL_TESTS` list in [defs.bzl](runner/defs.bzl), + otherwise you will see an error message complaining about a missing test. + ## Other notes * The time between receiving a SYN-ACK and replying with an ACK in `Handshake` diff --git a/test/packetimpact/dut/BUILD b/test/packetimpact/dut/BUILD index 3ce63c2c6..ccf1c735f 100644 --- a/test/packetimpact/dut/BUILD +++ b/test/packetimpact/dut/BUILD @@ -16,3 +16,13 @@ cc_binary( "//test/packetimpact/proto:posix_server_cc_proto", ], ) + +cc_binary( + name = "posix_server_dynamic", + srcs = ["posix_server.cc"], + deps = [ + grpcpp, + "//test/packetimpact/proto:posix_server_cc_grpc_proto", + "//test/packetimpact/proto:posix_server_cc_proto", + ], +) diff --git a/test/packetimpact/dut/posix_server.cc b/test/packetimpact/dut/posix_server.cc index 29d4cc6fe..4de8540f6 100644 --- a/test/packetimpact/dut/posix_server.cc +++ b/test/packetimpact/dut/posix_server.cc @@ -21,6 +21,7 @@ #include <string.h> #include <sys/socket.h> #include <sys/types.h> +#include <time.h> #include <unistd.h> #include <iostream> @@ -28,6 +29,7 @@ #include "include/grpcpp/security/server_credentials.h" #include "include/grpcpp/server_builder.h" +#include "include/grpcpp/server_context.h" #include "test/packetimpact/proto/posix_server.grpc.pb.h" #include "test/packetimpact/proto/posix_server.pb.h" @@ -108,18 +110,20 @@ } class PosixImpl final : public posix_server::Posix::Service { - ::grpc::Status Accept(grpc_impl::ServerContext *context, + ::grpc::Status Accept(grpc::ServerContext *context, const ::posix_server::AcceptRequest *request, ::posix_server::AcceptResponse *response) override { sockaddr_storage addr; socklen_t addrlen = sizeof(addr); response->set_fd(accept(request->sockfd(), reinterpret_cast<sockaddr *>(&addr), &addrlen)); - response->set_errno_(errno); + if (response->fd() < 0) { + response->set_errno_(errno); + } return sockaddr_to_proto(addr, addrlen, response->mutable_addr()); } - ::grpc::Status Bind(grpc_impl::ServerContext *context, + ::grpc::Status Bind(grpc::ServerContext *context, const ::posix_server::BindRequest *request, ::posix_server::BindResponse *response) override { if (!request->has_addr()) { @@ -136,19 +140,23 @@ class PosixImpl final : public posix_server::Posix::Service { response->set_ret( bind(request->sockfd(), reinterpret_cast<sockaddr *>(&addr), addr_len)); - response->set_errno_(errno); + if (response->ret() < 0) { + response->set_errno_(errno); + } return ::grpc::Status::OK; } - ::grpc::Status Close(grpc_impl::ServerContext *context, + ::grpc::Status Close(grpc::ServerContext *context, const ::posix_server::CloseRequest *request, ::posix_server::CloseResponse *response) override { response->set_ret(close(request->fd())); - response->set_errno_(errno); + if (response->ret() < 0) { + response->set_errno_(errno); + } return ::grpc::Status::OK; } - ::grpc::Status Connect(grpc_impl::ServerContext *context, + ::grpc::Status Connect(grpc::ServerContext *context, const ::posix_server::ConnectRequest *request, ::posix_server::ConnectResponse *response) override { if (!request->has_addr()) { @@ -164,32 +172,38 @@ class PosixImpl final : public posix_server::Posix::Service { response->set_ret(connect(request->sockfd(), reinterpret_cast<sockaddr *>(&addr), addr_len)); - response->set_errno_(errno); + if (response->ret() < 0) { + response->set_errno_(errno); + } return ::grpc::Status::OK; } - ::grpc::Status Fcntl(grpc_impl::ServerContext *context, + ::grpc::Status Fcntl(grpc::ServerContext *context, const ::posix_server::FcntlRequest *request, ::posix_server::FcntlResponse *response) override { response->set_ret(::fcntl(request->fd(), request->cmd(), request->arg())); - response->set_errno_(errno); + if (response->ret() < 0) { + response->set_errno_(errno); + } return ::grpc::Status::OK; } ::grpc::Status GetSockName( - grpc_impl::ServerContext *context, + grpc::ServerContext *context, const ::posix_server::GetSockNameRequest *request, ::posix_server::GetSockNameResponse *response) override { sockaddr_storage addr; socklen_t addrlen = sizeof(addr); response->set_ret(getsockname( request->sockfd(), reinterpret_cast<sockaddr *>(&addr), &addrlen)); - response->set_errno_(errno); + if (response->ret() < 0) { + response->set_errno_(errno); + } return sockaddr_to_proto(addr, addrlen, response->mutable_addr()); } ::grpc::Status GetSockOpt( - grpc_impl::ServerContext *context, + grpc::ServerContext *context, const ::posix_server::GetSockOptRequest *request, ::posix_server::GetSockOptResponse *response) override { switch (request->type()) { @@ -226,15 +240,19 @@ class PosixImpl final : public posix_server::Posix::Service { return ::grpc::Status(grpc::StatusCode::INVALID_ARGUMENT, "Unknown SockOpt Type"); } - response->set_errno_(errno); + if (response->ret() < 0) { + response->set_errno_(errno); + } return ::grpc::Status::OK; } - ::grpc::Status Listen(grpc_impl::ServerContext *context, + ::grpc::Status Listen(grpc::ServerContext *context, const ::posix_server::ListenRequest *request, ::posix_server::ListenResponse *response) override { response->set_ret(listen(request->sockfd(), request->backlog())); - response->set_errno_(errno); + if (response->ret() < 0) { + response->set_errno_(errno); + } return ::grpc::Status::OK; } @@ -243,7 +261,9 @@ class PosixImpl final : public posix_server::Posix::Service { ::posix_server::SendResponse *response) override { response->set_ret(::send(request->sockfd(), request->buf().data(), request->buf().size(), request->flags())); - response->set_errno_(errno); + if (response->ret() < 0) { + response->set_errno_(errno); + } return ::grpc::Status::OK; } @@ -264,12 +284,14 @@ class PosixImpl final : public posix_server::Posix::Service { response->set_ret(::sendto(request->sockfd(), request->buf().data(), request->buf().size(), request->flags(), reinterpret_cast<sockaddr *>(&addr), addr_len)); - response->set_errno_(errno); + if (response->ret() < 0) { + response->set_errno_(errno); + } return ::grpc::Status::OK; } ::grpc::Status SetSockOpt( - grpc_impl::ServerContext *context, + grpc::ServerContext *context, const ::posix_server::SetSockOptRequest *request, ::posix_server::SetSockOptResponse *response) override { switch (request->optval().val_case()) { @@ -286,9 +308,9 @@ class PosixImpl final : public posix_server::Posix::Service { break; } case ::posix_server::SockOptVal::kTimeval: { - timeval tv = {.tv_sec = static_cast<__time_t>( + timeval tv = {.tv_sec = static_cast<time_t>( request->optval().timeval().seconds()), - .tv_usec = static_cast<__suseconds_t>( + .tv_usec = static_cast<suseconds_t>( request->optval().timeval().microseconds())}; response->set_ret(setsockopt(request->sockfd(), request->level(), request->optname(), &tv, sizeof(tv))); @@ -298,16 +320,29 @@ class PosixImpl final : public posix_server::Posix::Service { return ::grpc::Status(grpc::StatusCode::INVALID_ARGUMENT, "Unknown SockOpt Type"); } - response->set_errno_(errno); + if (response->ret() < 0) { + response->set_errno_(errno); + } return ::grpc::Status::OK; } - ::grpc::Status Socket(grpc_impl::ServerContext *context, + ::grpc::Status Socket(grpc::ServerContext *context, const ::posix_server::SocketRequest *request, ::posix_server::SocketResponse *response) override { response->set_fd( socket(request->domain(), request->type(), request->protocol())); - response->set_errno_(errno); + if (response->fd() < 0) { + response->set_errno_(errno); + } + return ::grpc::Status::OK; + } + + ::grpc::Status Shutdown(grpc::ServerContext *context, + const ::posix_server::ShutdownRequest *request, + ::posix_server::ShutdownResponse *response) override { + if (shutdown(request->fd(), request->how()) < 0) { + response->set_errno_(errno); + } return ::grpc::Status::OK; } @@ -320,7 +355,9 @@ class PosixImpl final : public posix_server::Posix::Service { if (response->ret() >= 0) { response->set_buf(buf.data(), response->ret()); } - response->set_errno_(errno); + if (response->ret() < 0) { + response->set_errno_(errno); + } return ::grpc::Status::OK; } }; diff --git a/test/packetimpact/proto/posix_server.proto b/test/packetimpact/proto/posix_server.proto index ccd20b10d..f32ed54ef 100644 --- a/test/packetimpact/proto/posix_server.proto +++ b/test/packetimpact/proto/posix_server.proto @@ -188,6 +188,15 @@ message SocketResponse { int32 errno_ = 2; // "errno" may fail to compile in c++. } +message ShutdownRequest { + int32 fd = 1; + int32 how = 2; +} + +message ShutdownResponse { + int32 errno_ = 1; // "errno" may fail to compile in c++. +} + message RecvRequest { int32 sockfd = 1; int32 len = 2; @@ -225,6 +234,8 @@ service Posix { rpc SetSockOpt(SetSockOptRequest) returns (SetSockOptResponse); // Call socket() on the DUT. rpc Socket(SocketRequest) returns (SocketResponse); + // Call shutdown() on the DUT. + rpc Shutdown(ShutdownRequest) returns (ShutdownResponse); // Call recv() on the DUT. rpc Recv(RecvRequest) returns (RecvResponse); } diff --git a/test/packetimpact/runner/BUILD b/test/packetimpact/runner/BUILD index ff2be9b30..605dd4972 100644 --- a/test/packetimpact/runner/BUILD +++ b/test/packetimpact/runner/BUILD @@ -1,4 +1,4 @@ -load("//tools:defs.bzl", "bzl_library", "go_test") +load("//tools:defs.bzl", "bzl_library", "go_library", "go_test") package( default_visibility = ["//test/packetimpact:__subpackages__"], @@ -7,21 +7,31 @@ package( go_test( name = "packetimpact_test", - srcs = ["packetimpact_test.go"], + srcs = [ + "packetimpact_test.go", + ], tags = [ # Not intended to be run directly. "local", "manual", ], - deps = [ - "//pkg/test/dockerutil", - "//test/packetimpact/netdevs", - "@com_github_docker_docker//api/types/mount:go_default_library", - ], + deps = [":runner"], ) bzl_library( name = "defs_bzl", srcs = ["defs.bzl"], - visibility = ["//visibility:private"], + visibility = ["//test/packetimpact:__subpackages__"], +) + +go_library( + name = "runner", + testonly = True, + srcs = ["dut.go"], + visibility = ["//test/packetimpact:__subpackages__"], + deps = [ + "//pkg/test/dockerutil", + "//test/packetimpact/netdevs", + "@com_github_docker_docker//api/types/mount:go_default_library", + ], ) diff --git a/test/packetimpact/runner/defs.bzl b/test/packetimpact/runner/defs.bzl index 93a36c6c2..1546d0d51 100644 --- a/test/packetimpact/runner/defs.bzl +++ b/test/packetimpact/runner/defs.bzl @@ -23,8 +23,9 @@ def _packetimpact_test_impl(ctx): transitive_files = [] if hasattr(ctx.attr._test_runner, "data_runfiles"): transitive_files.append(ctx.attr._test_runner.data_runfiles.files) + files = [test_runner] + ctx.files.testbench_binary + ctx.files._posix_server runfiles = ctx.runfiles( - files = [test_runner] + ctx.files.testbench_binary + ctx.files._posix_server_binary, + files = files, transitive_files = depset(transitive = transitive_files), collect_default = True, collect_data = True, @@ -38,7 +39,7 @@ _packetimpact_test = rule( cfg = "target", default = ":packetimpact_test", ), - "_posix_server_binary": attr.label( + "_posix_server": attr.label( cfg = "target", default = "//test/packetimpact/dut:posix_server", ), @@ -109,28 +110,15 @@ def packetimpact_netstack_test( **kwargs ) -def packetimpact_go_test(name, size = "small", pure = True, expect_native_failure = False, expect_netstack_failure = False, **kwargs): +def packetimpact_go_test(name, expect_native_failure = False, expect_netstack_failure = False): """Add packetimpact tests written in go. Args: name: name of the test - size: size of the test - pure: make a static go binary expect_native_failure: the test must fail natively expect_netstack_failure: the test must fail for Netstack - **kwargs: all the other args, forwarded to go_test """ testbench_binary = name + "_test" - go_test( - name = testbench_binary, - size = size, - pure = pure, - tags = [ - "local", - "manual", - ], - **kwargs - ) packetimpact_native_test( name = name, expect_failure = expect_native_failure, @@ -141,3 +129,156 @@ def packetimpact_go_test(name, size = "small", pure = True, expect_native_failur expect_failure = expect_netstack_failure, testbench_binary = testbench_binary, ) + +def packetimpact_testbench(name, size = "small", pure = True, **kwargs): + """Build packetimpact testbench written in go. + + Args: + name: name of the test + size: size of the test + pure: make a static go binary + **kwargs: all the other args, forwarded to go_test + """ + go_test( + name = name + "_test", + size = size, + pure = pure, + nogo = False, # FIXME(gvisor.dev/issue/3374): Not working with all build systems. + tags = [ + "local", + "manual", + ], + **kwargs + ) + +PacketimpactTestInfo = provider( + doc = "Provide information for packetimpact tests", + fields = ["name", "expect_netstack_failure"], +) + +ALL_TESTS = [ + PacketimpactTestInfo( + name = "fin_wait2_timeout", + ), + PacketimpactTestInfo( + name = "ipv4_id_uniqueness", + ), + PacketimpactTestInfo( + name = "udp_discard_mcast_source_addr", + ), + PacketimpactTestInfo( + name = "udp_recv_mcast_bcast", + ), + PacketimpactTestInfo( + name = "udp_any_addr_recv_unicast", + ), + PacketimpactTestInfo( + name = "udp_icmp_error_propagation", + ), + PacketimpactTestInfo( + name = "tcp_reordering", + # TODO(b/139368047): Fix netstack then remove the line below. + expect_netstack_failure = True, + ), + PacketimpactTestInfo( + name = "tcp_window_shrink", + ), + PacketimpactTestInfo( + name = "tcp_zero_window_probe", + ), + PacketimpactTestInfo( + name = "tcp_zero_window_probe_retransmit", + ), + PacketimpactTestInfo( + name = "tcp_zero_window_probe_usertimeout", + ), + PacketimpactTestInfo( + name = "tcp_retransmits", + ), + PacketimpactTestInfo( + name = "tcp_outside_the_window", + ), + PacketimpactTestInfo( + name = "tcp_noaccept_close_rst", + ), + PacketimpactTestInfo( + name = "tcp_send_window_sizes_piggyback", + ), + PacketimpactTestInfo( + name = "tcp_unacc_seq_ack", + ), + PacketimpactTestInfo( + name = "tcp_paws_mechanism", + # TODO(b/156682000): Fix netstack then remove the line below. + expect_netstack_failure = True, + ), + PacketimpactTestInfo( + name = "tcp_user_timeout", + ), + PacketimpactTestInfo( + name = "tcp_queue_receive_in_syn_sent", + ), + PacketimpactTestInfo( + name = "tcp_synsent_reset", + ), + PacketimpactTestInfo( + name = "tcp_synrcvd_reset", + ), + PacketimpactTestInfo( + name = "tcp_network_unreachable", + ), + PacketimpactTestInfo( + name = "tcp_cork_mss", + ), + PacketimpactTestInfo( + name = "tcp_handshake_window_size", + ), + PacketimpactTestInfo( + name = "tcp_timewait_reset", + # TODO(b/168523247): Fix netstack then remove the line below. + expect_netstack_failure = True, + ), + PacketimpactTestInfo( + name = "tcp_queue_send_in_syn_sent", + ), + PacketimpactTestInfo( + name = "icmpv6_param_problem", + # TODO(b/153485026): Fix netstack then remove the line below. + expect_netstack_failure = True, + ), + PacketimpactTestInfo( + name = "ipv6_unknown_options_action", + # TODO(b/159928940): Fix netstack then remove the line below. + expect_netstack_failure = True, + ), + PacketimpactTestInfo( + name = "ipv6_fragment_reassembly", + ), + PacketimpactTestInfo( + name = "udp_send_recv_dgram", + ), + PacketimpactTestInfo( + name = "tcp_linger", + ), + PacketimpactTestInfo( + name = "tcp_rcv_buf_space", + ), +] + +def validate_all_tests(): + """ + Make sure that ALL_TESTS list is in sync with the rules in BUILD. + + This function is order-dependent, it is intended to be used after + all packetimpact_testbench rules and before using ALL_TESTS list + at the end of BUILD. + """ + all_tests_dict = {} # there is no set, using dict to approximate. + for test in ALL_TESTS: + rule_name = test.name + "_test" + all_tests_dict[rule_name] = True + if not native.existing_rule(rule_name): + fail("%s does not have a packetimpact_testbench rule in BUILD" % test.name) + for name in native.existing_rules(): + if name.endswith("_test") and name not in all_tests_dict: + fail("%s is not declared in ALL_TESTS list in defs.bzl" % name[:-5]) diff --git a/test/packetimpact/runner/dut.go b/test/packetimpact/runner/dut.go new file mode 100644 index 000000000..59bb68eb1 --- /dev/null +++ b/test/packetimpact/runner/dut.go @@ -0,0 +1,442 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package runner starts docker containers and networking for a packetimpact test. +package runner + +import ( + "context" + "flag" + "fmt" + "io/ioutil" + "log" + "math/rand" + "net" + "os" + "os/exec" + "path" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/docker/docker/api/types/mount" + "gvisor.dev/gvisor/pkg/test/dockerutil" + "gvisor.dev/gvisor/test/packetimpact/netdevs" +) + +// stringList implements flag.Value. +type stringList []string + +// String implements flag.Value.String. +func (l *stringList) String() string { + return strings.Join(*l, ",") +} + +// Set implements flag.Value.Set. +func (l *stringList) Set(value string) error { + *l = append(*l, value) + return nil +} + +var ( + native = false + testbenchBinary = "" + tshark = false + extraTestArgs = stringList{} + expectFailure = false + + // DutAddr is the IP addres for DUT. + DutAddr = net.IPv4(0, 0, 0, 10) + testbenchAddr = net.IPv4(0, 0, 0, 20) +) + +// RegisterFlags defines flags and associates them with the package-level +// exported variables above. It should be called by tests in their init +// functions. +func RegisterFlags(fs *flag.FlagSet) { + fs.BoolVar(&native, "native", false, "whether the test should be run natively") + fs.StringVar(&testbenchBinary, "testbench_binary", "", "path to the testbench binary") + fs.BoolVar(&tshark, "tshark", false, "use more verbose tshark in logs instead of tcpdump") + fs.Var(&extraTestArgs, "extra_test_arg", "extra arguments to pass to the testbench") + fs.BoolVar(&expectFailure, "expect_failure", false, "expect that the test will fail when run") +} + +// CtrlPort is the port that posix_server listens on. +const CtrlPort = "40000" + +// logger implements testutil.Logger. +// +// Labels logs based on their source and formats multi-line logs. +type logger string + +// Name implements testutil.Logger.Name. +func (l logger) Name() string { + return string(l) +} + +// Logf implements testutil.Logger.Logf. +func (l logger) Logf(format string, args ...interface{}) { + lines := strings.Split(fmt.Sprintf(format, args...), "\n") + log.Printf("%s: %s", l, lines[0]) + for _, line := range lines[1:] { + log.Printf("%*s %s", len(l), "", line) + } +} + +// TestWithDUT runs a packetimpact test with the given information. +func TestWithDUT(ctx context.Context, t *testing.T, mkDevice func(*dockerutil.Container) DUT, containerAddr net.IP) { + if testbenchBinary == "" { + t.Fatal("--testbench_binary is missing") + } + dockerutil.EnsureSupportedDockerVersion() + + // Create the networks needed for the test. One control network is needed for + // the gRPC control packets and one test network on which to transmit the test + // packets. + ctrlNet := dockerutil.NewNetwork(ctx, logger("ctrlNet")) + testNet := dockerutil.NewNetwork(ctx, logger("testNet")) + for _, dn := range []*dockerutil.Network{ctrlNet, testNet} { + for { + if err := createDockerNetwork(ctx, dn); err != nil { + t.Log("creating docker network:", err) + const wait = 100 * time.Millisecond + t.Logf("sleeping %s and will try creating docker network again", wait) + // This can fail if another docker network claimed the same IP so we'll + // just try again. + time.Sleep(wait) + continue + } + break + } + dn := dn + t.Cleanup(func() { + if err := dn.Cleanup(ctx); err != nil { + t.Errorf("unable to cleanup container %s: %s", dn.Name, err) + } + }) + // Sanity check. + if inspect, err := dn.Inspect(ctx); err != nil { + t.Fatalf("failed to inspect network %s: %v", dn.Name, err) + } else if inspect.Name != dn.Name { + t.Fatalf("name mismatch for network want: %s got: %s", dn.Name, inspect.Name) + } + } + + tmpDir, err := ioutil.TempDir("", "container-output") + if err != nil { + t.Fatal("creating temp dir:", err) + } + t.Cleanup(func() { + if err := exec.Command("/bin/cp", "-r", tmpDir, os.Getenv("TEST_UNDECLARED_OUTPUTS_DIR")).Run(); err != nil { + t.Errorf("unable to copy container output files: %s", err) + } + if err := os.RemoveAll(tmpDir); err != nil { + t.Errorf("failed to remove tmpDir %s: %s", tmpDir, err) + } + }) + + const testOutputDir = "/tmp/testoutput" + + // Create the Docker container for the DUT. + var dut *dockerutil.Container + if native { + dut = dockerutil.MakeNativeContainer(ctx, logger("dut")) + } else { + dut = dockerutil.MakeContainer(ctx, logger("dut")) + } + t.Cleanup(func() { + dut.CleanUp(ctx) + }) + + runOpts := dockerutil.RunOpts{ + Image: "packetimpact", + CapAdd: []string{"NET_ADMIN"}, + Mounts: []mount.Mount{{ + Type: mount.TypeBind, + Source: tmpDir, + Target: testOutputDir, + ReadOnly: false, + }}, + } + + device := mkDevice(dut) + remoteIPv6, remoteMAC, dutDeviceID, dutTestNetDev := device.Prepare(ctx, t, runOpts, ctrlNet, testNet, containerAddr) + + // Create the Docker container for the testbench. + testbench := dockerutil.MakeNativeContainer(ctx, logger("testbench")) + + tbb := path.Base(testbenchBinary) + containerTestbenchBinary := filepath.Join("/packetimpact", tbb) + testbench.CopyFiles(&runOpts, "/packetimpact", filepath.Join("test/packetimpact/tests", tbb)) + + // snifferNetDev is a network device on the test orchestrator that we will + // run sniffer (tcpdump or tshark) on and inject traffic to, not to be + // confused with the device on the DUT. + const snifferNetDev = "eth2" + // Run tcpdump in the test bench unbuffered, without DNS resolution, just on + // the interface with the test packets. + snifferArgs := []string{ + "tcpdump", + "-S", "-vvv", "-U", "-n", + "-i", snifferNetDev, + "-w", testOutputDir + "/dump.pcap", + } + snifferRegex := "tcpdump: listening.*\n" + if tshark { + // Run tshark in the test bench unbuffered, without DNS resolution, just on + // the interface with the test packets. + snifferArgs = []string{ + "tshark", "-V", "-l", "-n", "-i", snifferNetDev, + "-o", "tcp.check_checksum:TRUE", + "-o", "udp.check_checksum:TRUE", + } + snifferRegex = "Capturing on.*\n" + } + + if err := StartContainer( + ctx, + runOpts, + testbench, + testbenchAddr, + []*dockerutil.Network{ctrlNet, testNet}, + snifferArgs..., + ); err != nil { + t.Fatalf("failed to start docker container for testbench sniffer: %s", err) + } + // Kill so that it will flush output. + t.Cleanup(func() { + time.Sleep(1 * time.Second) + testbench.Exec(ctx, dockerutil.ExecOpts{}, "killall", snifferArgs[0]) + }) + + if _, err := testbench.WaitForOutput(ctx, snifferRegex, 60*time.Second); err != nil { + t.Fatalf("sniffer on %s never listened: %s", dut.Name, err) + } + + // When the Linux kernel receives a SYN-ACK for a SYN it didn't send, it + // will respond with an RST. In most packetimpact tests, the SYN is sent + // by the raw socket and the kernel knows nothing about the connection, this + // behavior will break lots of TCP related packetimpact tests. To prevent + // this, we can install the following iptables rules. The raw socket that + // packetimpact tests use will still be able to see everything. + for _, bin := range []string{"iptables", "ip6tables"} { + if logs, err := testbench.Exec(ctx, dockerutil.ExecOpts{}, bin, "-A", "INPUT", "-i", snifferNetDev, "-p", "tcp", "-j", "DROP"); err != nil { + t.Fatalf("unable to Exec %s on container %s: %s, logs from testbench:\n%s", bin, testbench.Name, err, logs) + } + } + + // FIXME(b/156449515): Some piece of the system has a race. The old + // bash script version had a sleep, so we have one too. The race should + // be fixed and this sleep removed. + time.Sleep(time.Second) + + // Start a packetimpact test on the test bench. The packetimpact test sends + // and receives packets and also sends POSIX socket commands to the + // posix_server to be executed on the DUT. + testArgs := []string{containerTestbenchBinary} + testArgs = append(testArgs, extraTestArgs...) + testArgs = append(testArgs, + "--posix_server_ip", AddressInSubnet(DutAddr, *ctrlNet.Subnet).String(), + "--posix_server_port", CtrlPort, + "--remote_ipv4", AddressInSubnet(DutAddr, *testNet.Subnet).String(), + "--local_ipv4", AddressInSubnet(testbenchAddr, *testNet.Subnet).String(), + "--remote_ipv6", remoteIPv6.String(), + "--remote_mac", remoteMAC.String(), + "--remote_interface_id", fmt.Sprintf("%d", dutDeviceID), + "--local_device", snifferNetDev, + "--remote_device", dutTestNetDev, + fmt.Sprintf("--native=%t", native), + ) + testbenchLogs, err := testbench.Exec(ctx, dockerutil.ExecOpts{}, testArgs...) + if (err != nil) != expectFailure { + var dutLogs string + if logs, err := device.Logs(ctx); err != nil { + dutLogs = fmt.Sprintf("failed to fetch DUT logs: %s", err) + } else { + dutLogs = logs + } + + t.Errorf(`test error: %v, expect failure: %t + +%s + +====== Begin of Testbench Logs ====== + +%s + +====== End of Testbench Logs ======`, + err, expectFailure, dutLogs, testbenchLogs) + } +} + +// DUT describes how to setup/teardown the dut for packetimpact tests. +type DUT interface { + // Prepare prepares the dut, starts posix_server and returns the IPv6, MAC + // address, the interface ID, and the interface name for the testNet on DUT. + Prepare(ctx context.Context, t *testing.T, runOpts dockerutil.RunOpts, ctrlNet, testNet *dockerutil.Network, containerAddr net.IP) (net.IP, net.HardwareAddr, uint32, string) + // Logs retrieves the logs from the dut. + Logs(ctx context.Context) (string, error) +} + +// DockerDUT describes a docker based DUT. +type DockerDUT struct { + c *dockerutil.Container +} + +// NewDockerDUT creates a docker based DUT. +func NewDockerDUT(c *dockerutil.Container) DUT { + return &DockerDUT{ + c: c, + } +} + +// Prepare implements DUT.Prepare. +func (dut *DockerDUT) Prepare(ctx context.Context, t *testing.T, runOpts dockerutil.RunOpts, ctrlNet, testNet *dockerutil.Network, containerAddr net.IP) (net.IP, net.HardwareAddr, uint32, string) { + const containerPosixServerBinary = "/packetimpact/posix_server" + dut.c.CopyFiles(&runOpts, "/packetimpact", "test/packetimpact/dut/posix_server") + + if err := StartContainer( + ctx, + runOpts, + dut.c, + containerAddr, + []*dockerutil.Network{ctrlNet, testNet}, + containerPosixServerBinary, + "--ip=0.0.0.0", + "--port="+CtrlPort, + ); err != nil { + t.Fatalf("failed to start docker container for DUT: %s", err) + } + + if _, err := dut.c.WaitForOutput(ctx, "Server listening.*\n", 60*time.Second); err != nil { + t.Fatalf("%s on container %s never listened: %s", containerPosixServerBinary, dut.c.Name, err) + } + + dutTestDevice, dutDeviceInfo, err := deviceByIP(ctx, dut.c, AddressInSubnet(containerAddr, *testNet.Subnet)) + if err != nil { + t.Fatal(err) + } + + remoteMAC := dutDeviceInfo.MAC + remoteIPv6 := dutDeviceInfo.IPv6Addr + // Netstack as DUT doesn't assign IPv6 addresses automatically so do it if + // needed. + if remoteIPv6 == nil { + if _, err := dut.c.Exec(ctx, dockerutil.ExecOpts{}, "ip", "addr", "add", netdevs.MACToIP(remoteMAC).String(), "scope", "link", "dev", dutTestDevice); err != nil { + t.Fatalf("unable to ip addr add on container %s: %s", dut.c.Name, err) + } + // Now try again, to make sure that it worked. + _, dutDeviceInfo, err = deviceByIP(ctx, dut.c, AddressInSubnet(containerAddr, *testNet.Subnet)) + if err != nil { + t.Fatal(err) + } + remoteIPv6 = dutDeviceInfo.IPv6Addr + if remoteIPv6 == nil { + t.Fatalf("unable to set IPv6 address on container %s", dut.c.Name) + } + } + const testNetDev = "eth2" + + return remoteIPv6, dutDeviceInfo.MAC, dutDeviceInfo.ID, testNetDev +} + +// Logs implements DUT.Logs. +func (dut *DockerDUT) Logs(ctx context.Context) (string, error) { + logs, err := dut.c.Logs(ctx) + if err != nil { + return "", err + } + return fmt.Sprintf(`====== Begin of DUT Logs ====== + +%s + +====== End of DUT Logs ======`, logs), nil +} + +// AddNetworks connects docker network with the container and assigns the specific IP. +func AddNetworks(ctx context.Context, d *dockerutil.Container, addr net.IP, networks []*dockerutil.Network) error { + for _, dn := range networks { + ip := AddressInSubnet(addr, *dn.Subnet) + // Connect to the network with the specified IP address. + if err := dn.Connect(ctx, d, ip.String(), ""); err != nil { + return fmt.Errorf("unable to connect container %s to network %s: %w", d.Name, dn.Name, err) + } + } + return nil +} + +// AddressInSubnet combines the subnet provided with the address and returns a +// new address. The return address bits come from the subnet where the mask is 1 +// and from the ip address where the mask is 0. +func AddressInSubnet(addr net.IP, subnet net.IPNet) net.IP { + var octets []byte + for i := 0; i < 4; i++ { + octets = append(octets, (subnet.IP.To4()[i]&subnet.Mask[i])+(addr.To4()[i]&(^subnet.Mask[i]))) + } + return net.IP(octets) +} + +// deviceByIP finds a deviceInfo and device name from an IP address. +func deviceByIP(ctx context.Context, d *dockerutil.Container, ip net.IP) (string, netdevs.DeviceInfo, error) { + out, err := d.Exec(ctx, dockerutil.ExecOpts{}, "ip", "addr", "show") + if err != nil { + return "", netdevs.DeviceInfo{}, fmt.Errorf("listing devices on %s container: %w\n%s", d.Name, err, out) + } + devs, err := netdevs.ParseDevices(out) + if err != nil { + return "", netdevs.DeviceInfo{}, fmt.Errorf("parsing devices from %s container: %w\n%s", d.Name, err, out) + } + testDevice, deviceInfo, err := netdevs.FindDeviceByIP(ip, devs) + if err != nil { + return "", netdevs.DeviceInfo{}, fmt.Errorf("can't find deviceInfo for container %s: %w", d.Name, err) + } + return testDevice, deviceInfo, nil +} + +// createDockerNetwork makes a randomly-named network that will start with the +// namePrefix. The network will be a random /24 subnet. +func createDockerNetwork(ctx context.Context, n *dockerutil.Network) error { + randSource := rand.NewSource(time.Now().UnixNano()) + r1 := rand.New(randSource) + // Class C, 192.0.0.0 to 223.255.255.255, transitionally has mask 24. + ip := net.IPv4(byte(r1.Intn(224-192)+192), byte(r1.Intn(256)), byte(r1.Intn(256)), 0) + n.Subnet = &net.IPNet{ + IP: ip, + Mask: ip.DefaultMask(), + } + return n.Create(ctx) +} + +// StartContainer will create a container instance from runOpts, connect it +// with the specified docker networks and start executing the specified cmd. +func StartContainer(ctx context.Context, runOpts dockerutil.RunOpts, c *dockerutil.Container, containerAddr net.IP, ns []*dockerutil.Network, cmd ...string) error { + conf, hostconf, netconf := c.ConfigsFrom(runOpts, cmd...) + _ = netconf + hostconf.AutoRemove = true + hostconf.Sysctls = map[string]string{"net.ipv6.conf.all.disable_ipv6": "0"} + + if err := c.CreateFrom(ctx, conf, hostconf, nil); err != nil { + return fmt.Errorf("unable to create container %s: %w", c.Name, err) + } + + if err := AddNetworks(ctx, c, containerAddr, ns); err != nil { + return fmt.Errorf("unable to connect the container with the networks: %w", err) + } + + if err := c.Start(ctx); err != nil { + return fmt.Errorf("unable to start container %s: %w", c.Name, err) + } + return nil +} diff --git a/test/packetimpact/runner/packetimpact_test.go b/test/packetimpact/runner/packetimpact_test.go index e8c183977..c598bfc29 100644 --- a/test/packetimpact/runner/packetimpact_test.go +++ b/test/packetimpact/runner/packetimpact_test.go @@ -18,366 +18,15 @@ package packetimpact_test import ( "context" "flag" - "fmt" - "io/ioutil" - "log" - "math/rand" - "net" - "os" - "os/exec" - "path" - "strings" "testing" - "time" - "github.com/docker/docker/api/types/mount" - "gvisor.dev/gvisor/pkg/test/dockerutil" - "gvisor.dev/gvisor/test/packetimpact/netdevs" + "gvisor.dev/gvisor/test/packetimpact/runner" ) -// stringList implements flag.Value. -type stringList []string - -// String implements flag.Value.String. -func (l *stringList) String() string { - return strings.Join(*l, ",") -} - -// Set implements flag.Value.Set. -func (l *stringList) Set(value string) error { - *l = append(*l, value) - return nil -} - -var ( - native = flag.Bool("native", false, "whether the test should be run natively") - testbenchBinary = flag.String("testbench_binary", "", "path to the testbench binary") - tshark = flag.Bool("tshark", false, "use more verbose tshark in logs instead of tcpdump") - extraTestArgs = stringList{} - expectFailure = flag.Bool("expect_failure", false, "expect that the test will fail when run") - - dutAddr = net.IPv4(0, 0, 0, 10) - testbenchAddr = net.IPv4(0, 0, 0, 20) -) - -const ctrlPort = "40000" - -// logger implements testutil.Logger. -// -// Labels logs based on their source and formats multi-line logs. -type logger string - -// Name implements testutil.Logger.Name. -func (l logger) Name() string { - return string(l) -} - -// Logf implements testutil.Logger.Logf. -func (l logger) Logf(format string, args ...interface{}) { - lines := strings.Split(fmt.Sprintf(format, args...), "\n") - log.Printf("%s: %s", l, lines[0]) - for _, line := range lines[1:] { - log.Printf("%*s %s", len(l), "", line) - } +func init() { + runner.RegisterFlags(flag.CommandLine) } func TestOne(t *testing.T) { - flag.Var(&extraTestArgs, "extra_test_arg", "extra arguments to pass to the testbench") - flag.Parse() - if *testbenchBinary == "" { - t.Fatal("--testbench_binary is missing") - } - dockerutil.EnsureSupportedDockerVersion() - ctx := context.Background() - - // Create the networks needed for the test. One control network is needed for - // the gRPC control packets and one test network on which to transmit the test - // packets. - ctrlNet := dockerutil.NewNetwork(ctx, logger("ctrlNet")) - testNet := dockerutil.NewNetwork(ctx, logger("testNet")) - for _, dn := range []*dockerutil.Network{ctrlNet, testNet} { - for { - if err := createDockerNetwork(ctx, dn); err != nil { - t.Log("creating docker network:", err) - const wait = 100 * time.Millisecond - t.Logf("sleeping %s and will try creating docker network again", wait) - // This can fail if another docker network claimed the same IP so we'll - // just try again. - time.Sleep(wait) - continue - } - break - } - defer func(dn *dockerutil.Network) { - if err := dn.Cleanup(ctx); err != nil { - t.Errorf("unable to cleanup container %s: %s", dn.Name, err) - } - }(dn) - // Sanity check. - inspect, err := dn.Inspect(ctx) - if err != nil { - t.Fatalf("failed to inspect network %s: %v", dn.Name, err) - } else if inspect.Name != dn.Name { - t.Fatalf("name mismatch for network want: %s got: %s", dn.Name, inspect.Name) - } - - } - - tmpDir, err := ioutil.TempDir("", "container-output") - if err != nil { - t.Fatal("creating temp dir:", err) - } - defer os.RemoveAll(tmpDir) - - const testOutputDir = "/tmp/testoutput" - - // Create the Docker container for the DUT. - var dut *dockerutil.Container - if *native { - dut = dockerutil.MakeNativeContainer(ctx, logger("dut")) - } else { - dut = dockerutil.MakeContainer(ctx, logger("dut")) - } - - runOpts := dockerutil.RunOpts{ - Image: "packetimpact", - CapAdd: []string{"NET_ADMIN"}, - Mounts: []mount.Mount{mount.Mount{ - Type: mount.TypeBind, - Source: tmpDir, - Target: testOutputDir, - ReadOnly: false, - }}, - } - - const containerPosixServerBinary = "/packetimpact/posix_server" - dut.CopyFiles(&runOpts, "/packetimpact", "/test/packetimpact/dut/posix_server") - - conf, hostconf, _ := dut.ConfigsFrom(runOpts, containerPosixServerBinary, "--ip=0.0.0.0", "--port="+ctrlPort) - hostconf.AutoRemove = true - hostconf.Sysctls = map[string]string{"net.ipv6.conf.all.disable_ipv6": "0"} - - if err := dut.CreateFrom(ctx, conf, hostconf, nil); err != nil { - t.Fatalf("unable to create container %s: %v", dut.Name, err) - } - - defer dut.CleanUp(ctx) - - // Add ctrlNet as eth1 and testNet as eth2. - const testNetDev = "eth2" - if err := addNetworks(ctx, dut, dutAddr, []*dockerutil.Network{ctrlNet, testNet}); err != nil { - t.Fatal(err) - } - - if err := dut.Start(ctx); err != nil { - t.Fatalf("unable to start container %s: %s", dut.Name, err) - } - - if _, err := dut.WaitForOutput(ctx, "Server listening.*\n", 60*time.Second); err != nil { - t.Fatalf("%s on container %s never listened: %s", containerPosixServerBinary, dut.Name, err) - } - - dutTestDevice, dutDeviceInfo, err := deviceByIP(ctx, dut, addressInSubnet(dutAddr, *testNet.Subnet)) - if err != nil { - t.Fatal(err) - } - - remoteMAC := dutDeviceInfo.MAC - remoteIPv6 := dutDeviceInfo.IPv6Addr - // Netstack as DUT doesn't assign IPv6 addresses automatically so do it if - // needed. - if remoteIPv6 == nil { - if _, err := dut.Exec(ctx, dockerutil.ExecOpts{}, "ip", "addr", "add", netdevs.MACToIP(remoteMAC).String(), "scope", "link", "dev", dutTestDevice); err != nil { - t.Fatalf("unable to ip addr add on container %s: %s", dut.Name, err) - } - // Now try again, to make sure that it worked. - _, dutDeviceInfo, err = deviceByIP(ctx, dut, addressInSubnet(dutAddr, *testNet.Subnet)) - if err != nil { - t.Fatal(err) - } - remoteIPv6 = dutDeviceInfo.IPv6Addr - if remoteIPv6 == nil { - t.Fatal("unable to set IPv6 address on container", dut.Name) - } - } - - // Create the Docker container for the testbench. - testbench := dockerutil.MakeNativeContainer(ctx, logger("testbench")) - - tbb := path.Base(*testbenchBinary) - containerTestbenchBinary := "/packetimpact/" + tbb - runOpts = dockerutil.RunOpts{ - Image: "packetimpact", - CapAdd: []string{"NET_ADMIN"}, - Mounts: []mount.Mount{mount.Mount{ - Type: mount.TypeBind, - Source: tmpDir, - Target: testOutputDir, - ReadOnly: false, - }}, - } - testbench.CopyFiles(&runOpts, "/packetimpact", "/test/packetimpact/tests/"+tbb) - - // Run tcpdump in the test bench unbuffered, without DNS resolution, just on - // the interface with the test packets. - snifferArgs := []string{ - "tcpdump", - "-S", "-vvv", "-U", "-n", - "-i", testNetDev, - "-w", testOutputDir + "/dump.pcap", - } - snifferRegex := "tcpdump: listening.*\n" - if *tshark { - // Run tshark in the test bench unbuffered, without DNS resolution, just on - // the interface with the test packets. - snifferArgs = []string{ - "tshark", "-V", "-l", "-n", "-i", testNetDev, - "-o", "tcp.check_checksum:TRUE", - "-o", "udp.check_checksum:TRUE", - } - snifferRegex = "Capturing on.*\n" - } - - defer func() { - if err := exec.Command("/bin/cp", "-r", tmpDir, os.Getenv("TEST_UNDECLARED_OUTPUTS_DIR")).Run(); err != nil { - t.Error("unable to copy container output files:", err) - } - }() - - conf, hostconf, _ = testbench.ConfigsFrom(runOpts, snifferArgs...) - hostconf.AutoRemove = true - hostconf.Sysctls = map[string]string{"net.ipv6.conf.all.disable_ipv6": "0"} - - if err := testbench.CreateFrom(ctx, conf, hostconf, nil); err != nil { - t.Fatalf("unable to create container %s: %s", testbench.Name, err) - } - defer testbench.CleanUp(ctx) - - // Add ctrlNet as eth1 and testNet as eth2. - if err := addNetworks(ctx, testbench, testbenchAddr, []*dockerutil.Network{ctrlNet, testNet}); err != nil { - t.Fatal(err) - } - - if err := testbench.Start(ctx); err != nil { - t.Fatalf("unable to start container %s: %s", testbench.Name, err) - } - - // Kill so that it will flush output. - defer func() { - time.Sleep(1 * time.Second) - testbench.Exec(ctx, dockerutil.ExecOpts{}, "killall", snifferArgs[0]) - }() - - if _, err := testbench.WaitForOutput(ctx, snifferRegex, 60*time.Second); err != nil { - t.Fatalf("sniffer on %s never listened: %s", dut.Name, err) - } - - // Because the Linux kernel receives the SYN-ACK but didn't send the SYN it - // will issue an RST. To prevent this IPtables can be used to filter out all - // incoming packets. The raw socket that packetimpact tests use will still see - // everything. - for _, bin := range []string{"iptables", "ip6tables"} { - if logs, err := testbench.Exec(ctx, dockerutil.ExecOpts{}, bin, "-A", "INPUT", "-i", testNetDev, "-p", "tcp", "-j", "DROP"); err != nil { - t.Fatalf("unable to Exec %s on container %s: %s, logs from testbench:\n%s", bin, testbench.Name, err, logs) - } - } - - // FIXME(b/156449515): Some piece of the system has a race. The old - // bash script version had a sleep, so we have one too. The race should - // be fixed and this sleep removed. - time.Sleep(time.Second) - - // Start a packetimpact test on the test bench. The packetimpact test sends - // and receives packets and also sends POSIX socket commands to the - // posix_server to be executed on the DUT. - testArgs := []string{containerTestbenchBinary} - testArgs = append(testArgs, extraTestArgs...) - testArgs = append(testArgs, - "--posix_server_ip", addressInSubnet(dutAddr, *ctrlNet.Subnet).String(), - "--posix_server_port", ctrlPort, - "--remote_ipv4", addressInSubnet(dutAddr, *testNet.Subnet).String(), - "--local_ipv4", addressInSubnet(testbenchAddr, *testNet.Subnet).String(), - "--remote_ipv6", remoteIPv6.String(), - "--remote_mac", remoteMAC.String(), - "--remote_interface_id", fmt.Sprintf("%d", dutDeviceInfo.ID), - "--device", testNetDev, - fmt.Sprintf("--native=%t", *native), - ) - testbenchLogs, err := testbench.Exec(ctx, dockerutil.ExecOpts{}, testArgs...) - if (err != nil) != *expectFailure { - var dutLogs string - if logs, err := dut.Logs(ctx); err != nil { - dutLogs = fmt.Sprintf("failed to fetch DUT logs: %s", err) - } else { - dutLogs = logs - } - - t.Errorf(`test error: %v, expect failure: %t - -====== Begin of DUT Logs ====== - -%s - -====== End of DUT Logs ====== - -====== Begin of Testbench Logs ====== - -%s - -====== End of Testbench Logs ======`, - err, *expectFailure, dutLogs, testbenchLogs) - } -} - -func addNetworks(ctx context.Context, d *dockerutil.Container, addr net.IP, networks []*dockerutil.Network) error { - for _, dn := range networks { - ip := addressInSubnet(addr, *dn.Subnet) - // Connect to the network with the specified IP address. - if err := dn.Connect(ctx, d, ip.String(), ""); err != nil { - return fmt.Errorf("unable to connect container %s to network %s: %w", d.Name, dn.Name, err) - } - } - return nil -} - -// addressInSubnet combines the subnet provided with the address and returns a -// new address. The return address bits come from the subnet where the mask is 1 -// and from the ip address where the mask is 0. -func addressInSubnet(addr net.IP, subnet net.IPNet) net.IP { - var octets []byte - for i := 0; i < 4; i++ { - octets = append(octets, (subnet.IP.To4()[i]&subnet.Mask[i])+(addr.To4()[i]&(^subnet.Mask[i]))) - } - return net.IP(octets) -} - -// createDockerNetwork makes a randomly-named network that will start with the -// namePrefix. The network will be a random /24 subnet. -func createDockerNetwork(ctx context.Context, n *dockerutil.Network) error { - randSource := rand.NewSource(time.Now().UnixNano()) - r1 := rand.New(randSource) - // Class C, 192.0.0.0 to 223.255.255.255, transitionally has mask 24. - ip := net.IPv4(byte(r1.Intn(224-192)+192), byte(r1.Intn(256)), byte(r1.Intn(256)), 0) - n.Subnet = &net.IPNet{ - IP: ip, - Mask: ip.DefaultMask(), - } - return n.Create(ctx) -} - -// deviceByIP finds a deviceInfo and device name from an IP address. -func deviceByIP(ctx context.Context, d *dockerutil.Container, ip net.IP) (string, netdevs.DeviceInfo, error) { - out, err := d.Exec(ctx, dockerutil.ExecOpts{}, "ip", "addr", "show") - if err != nil { - return "", netdevs.DeviceInfo{}, fmt.Errorf("listing devices on %s container: %w", d.Name, err) - } - devs, err := netdevs.ParseDevices(out) - if err != nil { - return "", netdevs.DeviceInfo{}, fmt.Errorf("parsing devices from %s container: %w", d.Name, err) - } - testDevice, deviceInfo, err := netdevs.FindDeviceByIP(ip, devs) - if err != nil { - return "", netdevs.DeviceInfo{}, fmt.Errorf("can't find deviceInfo for container %s: %w", d.Name, err) - } - return testDevice, deviceInfo, nil + runner.TestWithDUT(context.Background(), t, runner.NewDockerDUT, runner.DutAddr) } diff --git a/test/packetimpact/testbench/connections.go b/test/packetimpact/testbench/connections.go index 3af5f83fd..a90046f69 100644 --- a/test/packetimpact/testbench/connections.go +++ b/test/packetimpact/testbench/connections.go @@ -615,7 +615,7 @@ func (conn *Connection) ExpectFrame(t *testing.T, layers Layers, timeout time.Du if errs == nil { return nil, fmt.Errorf("got no frames matching %v during %s", layers, timeout) } - return nil, fmt.Errorf("got no frames matching %v during %s: got %w", layers, timeout, errs) + return nil, fmt.Errorf("got frames %w want %v during %s", errs, layers, timeout) } if conn.match(layers, gotLayers) { for i, s := range conn.layerStates { diff --git a/test/packetimpact/testbench/dut.go b/test/packetimpact/testbench/dut.go index 73c532e75..6165ab293 100644 --- a/test/packetimpact/testbench/dut.go +++ b/test/packetimpact/testbench/dut.go @@ -16,11 +16,13 @@ package testbench import ( "context" + "encoding/binary" "flag" "net" "strconv" "syscall" "testing" + "time" pb "gvisor.dev/gvisor/test/packetimpact/proto/posix_server_go_proto" @@ -700,3 +702,43 @@ func (dut *DUT) RecvWithErrno(ctx context.Context, t *testing.T, sockfd, len, fl } return resp.GetRet(), resp.GetBuf(), syscall.Errno(resp.GetErrno_()) } + +// SetSockLingerOption sets SO_LINGER socket option on the DUT. +func (dut *DUT) SetSockLingerOption(t *testing.T, sockfd int32, timeout time.Duration, enable bool) { + var linger unix.Linger + if enable { + linger.Onoff = 1 + } + linger.Linger = int32(timeout / time.Second) + + buf := make([]byte, 8) + binary.LittleEndian.PutUint32(buf, uint32(linger.Onoff)) + binary.LittleEndian.PutUint32(buf[4:], uint32(linger.Linger)) + dut.SetSockOpt(t, sockfd, unix.SOL_SOCKET, unix.SO_LINGER, buf) +} + +// Shutdown calls shutdown on the DUT and causes a fatal test failure if it doesn't +// succeed. If more control over the timeout or error handling is needed, use +// ShutdownWithErrno. +func (dut *DUT) Shutdown(t *testing.T, fd, how int32) error { + t.Helper() + + ctx, cancel := context.WithTimeout(context.Background(), RPCTimeout) + defer cancel() + return dut.ShutdownWithErrno(ctx, t, fd, how) +} + +// ShutdownWithErrno calls shutdown on the DUT. +func (dut *DUT) ShutdownWithErrno(ctx context.Context, t *testing.T, fd, how int32) error { + t.Helper() + + req := pb.ShutdownRequest{ + Fd: fd, + How: how, + } + resp, err := dut.posixServer.Shutdown(ctx, &req) + if err != nil { + t.Fatalf("failed to call Shutdown: %s", err) + } + return syscall.Errno(resp.GetErrno_()) +} diff --git a/test/packetimpact/testbench/rawsockets.go b/test/packetimpact/testbench/rawsockets.go index 57e822725..193bb2dc8 100644 --- a/test/packetimpact/testbench/rawsockets.go +++ b/test/packetimpact/testbench/rawsockets.go @@ -139,7 +139,7 @@ type Injector struct { func NewInjector(t *testing.T) (Injector, error) { t.Helper() - ifInfo, err := net.InterfaceByName(Device) + ifInfo, err := net.InterfaceByName(LocalDevice) if err != nil { return Injector{}, err } diff --git a/test/packetimpact/testbench/testbench.go b/test/packetimpact/testbench/testbench.go index e3629e1f3..3c85ebbee 100644 --- a/test/packetimpact/testbench/testbench.go +++ b/test/packetimpact/testbench/testbench.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +// Package testbench is the packetimpact test API. package testbench import ( @@ -29,8 +30,11 @@ import ( var ( // Native indicates that the test is being run natively. Native = false - // Device is the local device on the test network. - Device = "" + // LocalDevice is the device that testbench uses to inject traffic. + LocalDevice = "" + // RemoteDevice is the device name on the DUT, individual tests can + // use the name to construct tests. + RemoteDevice = "" // LocalIPv4 is the local IPv4 address on the test network. LocalIPv4 = "" @@ -80,7 +84,8 @@ func RegisterFlags(fs *flag.FlagSet) { fs.StringVar(&RemoteIPv4, "remote_ipv4", RemoteIPv4, "remote IPv4 address for test packets") fs.StringVar(&RemoteIPv6, "remote_ipv6", RemoteIPv6, "remote IPv6 address for test packets") fs.StringVar(&RemoteMAC, "remote_mac", RemoteMAC, "remote mac address for test packets") - fs.StringVar(&Device, "device", Device, "local device for test packets") + fs.StringVar(&LocalDevice, "local_device", LocalDevice, "local device to inject traffic") + fs.StringVar(&RemoteDevice, "remote_device", RemoteDevice, "remote device on the DUT") fs.BoolVar(&Native, "native", Native, "whether the test is running natively") fs.Uint64Var(&RemoteInterfaceID, "remote_interface_id", RemoteInterfaceID, "remote interface ID for test packets") } diff --git a/test/packetimpact/tests/BUILD b/test/packetimpact/tests/BUILD index 74658fea0..8c2de5a9f 100644 --- a/test/packetimpact/tests/BUILD +++ b/test/packetimpact/tests/BUILD @@ -1,11 +1,11 @@ -load("//test/packetimpact/runner:defs.bzl", "packetimpact_go_test") +load("//test/packetimpact/runner:defs.bzl", "ALL_TESTS", "packetimpact_go_test", "packetimpact_testbench", "validate_all_tests") package( default_visibility = ["//test/packetimpact:__subpackages__"], licenses = ["notice"], ) -packetimpact_go_test( +packetimpact_testbench( name = "fin_wait2_timeout", srcs = ["fin_wait2_timeout_test.go"], deps = [ @@ -15,7 +15,7 @@ packetimpact_go_test( ], ) -packetimpact_go_test( +packetimpact_testbench( name = "ipv4_id_uniqueness", srcs = ["ipv4_id_uniqueness_test.go"], deps = [ @@ -26,7 +26,7 @@ packetimpact_go_test( ], ) -packetimpact_go_test( +packetimpact_testbench( name = "udp_discard_mcast_source_addr", srcs = ["udp_discard_mcast_source_addr_test.go"], deps = [ @@ -37,7 +37,7 @@ packetimpact_go_test( ], ) -packetimpact_go_test( +packetimpact_testbench( name = "udp_recv_mcast_bcast", srcs = ["udp_recv_mcast_bcast_test.go"], deps = [ @@ -49,7 +49,7 @@ packetimpact_go_test( ], ) -packetimpact_go_test( +packetimpact_testbench( name = "udp_any_addr_recv_unicast", srcs = ["udp_any_addr_recv_unicast_test.go"], deps = [ @@ -60,7 +60,7 @@ packetimpact_go_test( ], ) -packetimpact_go_test( +packetimpact_testbench( name = "udp_icmp_error_propagation", srcs = ["udp_icmp_error_propagation_test.go"], deps = [ @@ -71,11 +71,9 @@ packetimpact_go_test( ], ) -packetimpact_go_test( +packetimpact_testbench( name = "tcp_reordering", srcs = ["tcp_reordering_test.go"], - # TODO(b/139368047): Fix netstack then remove the line below. - expect_netstack_failure = True, deps = [ "//pkg/tcpip/header", "//pkg/tcpip/seqnum", @@ -84,7 +82,7 @@ packetimpact_go_test( ], ) -packetimpact_go_test( +packetimpact_testbench( name = "tcp_window_shrink", srcs = ["tcp_window_shrink_test.go"], deps = [ @@ -94,7 +92,7 @@ packetimpact_go_test( ], ) -packetimpact_go_test( +packetimpact_testbench( name = "tcp_zero_window_probe", srcs = ["tcp_zero_window_probe_test.go"], deps = [ @@ -104,7 +102,7 @@ packetimpact_go_test( ], ) -packetimpact_go_test( +packetimpact_testbench( name = "tcp_zero_window_probe_retransmit", srcs = ["tcp_zero_window_probe_retransmit_test.go"], deps = [ @@ -114,7 +112,7 @@ packetimpact_go_test( ], ) -packetimpact_go_test( +packetimpact_testbench( name = "tcp_zero_window_probe_usertimeout", srcs = ["tcp_zero_window_probe_usertimeout_test.go"], deps = [ @@ -124,7 +122,7 @@ packetimpact_go_test( ], ) -packetimpact_go_test( +packetimpact_testbench( name = "tcp_retransmits", srcs = ["tcp_retransmits_test.go"], deps = [ @@ -134,7 +132,7 @@ packetimpact_go_test( ], ) -packetimpact_go_test( +packetimpact_testbench( name = "tcp_outside_the_window", srcs = ["tcp_outside_the_window_test.go"], deps = [ @@ -145,7 +143,7 @@ packetimpact_go_test( ], ) -packetimpact_go_test( +packetimpact_testbench( name = "tcp_noaccept_close_rst", srcs = ["tcp_noaccept_close_rst_test.go"], deps = [ @@ -155,7 +153,7 @@ packetimpact_go_test( ], ) -packetimpact_go_test( +packetimpact_testbench( name = "tcp_send_window_sizes_piggyback", srcs = ["tcp_send_window_sizes_piggyback_test.go"], deps = [ @@ -165,9 +163,9 @@ packetimpact_go_test( ], ) -packetimpact_go_test( - name = "tcp_close_wait_ack", - srcs = ["tcp_close_wait_ack_test.go"], +packetimpact_testbench( + name = "tcp_unacc_seq_ack", + srcs = ["tcp_unacc_seq_ack_test.go"], deps = [ "//pkg/tcpip/header", "//pkg/tcpip/seqnum", @@ -176,11 +174,9 @@ packetimpact_go_test( ], ) -packetimpact_go_test( +packetimpact_testbench( name = "tcp_paws_mechanism", srcs = ["tcp_paws_mechanism_test.go"], - # TODO(b/156682000): Fix netstack then remove the line below. - expect_netstack_failure = True, deps = [ "//pkg/tcpip/header", "//pkg/tcpip/seqnum", @@ -189,7 +185,7 @@ packetimpact_go_test( ], ) -packetimpact_go_test( +packetimpact_testbench( name = "tcp_user_timeout", srcs = ["tcp_user_timeout_test.go"], deps = [ @@ -199,7 +195,7 @@ packetimpact_go_test( ], ) -packetimpact_go_test( +packetimpact_testbench( name = "tcp_queue_receive_in_syn_sent", srcs = ["tcp_queue_receive_in_syn_sent_test.go"], deps = [ @@ -209,7 +205,7 @@ packetimpact_go_test( ], ) -packetimpact_go_test( +packetimpact_testbench( name = "tcp_synsent_reset", srcs = ["tcp_synsent_reset_test.go"], deps = [ @@ -219,7 +215,7 @@ packetimpact_go_test( ], ) -packetimpact_go_test( +packetimpact_testbench( name = "tcp_synrcvd_reset", srcs = ["tcp_synrcvd_reset_test.go"], deps = [ @@ -229,7 +225,7 @@ packetimpact_go_test( ], ) -packetimpact_go_test( +packetimpact_testbench( name = "tcp_network_unreachable", srcs = ["tcp_network_unreachable_test.go"], deps = [ @@ -239,7 +235,7 @@ packetimpact_go_test( ], ) -packetimpact_go_test( +packetimpact_testbench( name = "tcp_cork_mss", srcs = ["tcp_cork_mss_test.go"], deps = [ @@ -249,7 +245,7 @@ packetimpact_go_test( ], ) -packetimpact_go_test( +packetimpact_testbench( name = "tcp_handshake_window_size", srcs = ["tcp_handshake_window_size_test.go"], deps = [ @@ -259,11 +255,29 @@ packetimpact_go_test( ], ) -packetimpact_go_test( +packetimpact_testbench( + name = "tcp_timewait_reset", + srcs = ["tcp_timewait_reset_test.go"], + deps = [ + "//pkg/tcpip/header", + "//test/packetimpact/testbench", + "@org_golang_x_sys//unix:go_default_library", + ], +) + +packetimpact_testbench( + name = "tcp_queue_send_in_syn_sent", + srcs = ["tcp_queue_send_in_syn_sent_test.go"], + deps = [ + "//pkg/tcpip/header", + "//test/packetimpact/testbench", + "@org_golang_x_sys//unix:go_default_library", + ], +) + +packetimpact_testbench( name = "icmpv6_param_problem", srcs = ["icmpv6_param_problem_test.go"], - # TODO(b/153485026): Fix netstack then remove the line below. - expect_netstack_failure = True, deps = [ "//pkg/tcpip", "//pkg/tcpip/header", @@ -272,11 +286,9 @@ packetimpact_go_test( ], ) -packetimpact_go_test( +packetimpact_testbench( name = "ipv6_unknown_options_action", srcs = ["ipv6_unknown_options_action_test.go"], - # TODO(b/159928940): Fix netstack then remove the line below. - expect_netstack_failure = True, deps = [ "//pkg/tcpip", "//pkg/tcpip/header", @@ -285,11 +297,9 @@ packetimpact_go_test( ], ) -packetimpact_go_test( +packetimpact_testbench( name = "ipv6_fragment_reassembly", srcs = ["ipv6_fragment_reassembly_test.go"], - # TODO(b/160919104): Fix netstack then remove the line below. - expect_netstack_failure = True, deps = [ "//pkg/tcpip", "//pkg/tcpip/buffer", @@ -299,7 +309,7 @@ packetimpact_go_test( ], ) -packetimpact_go_test( +packetimpact_testbench( name = "udp_send_recv_dgram", srcs = ["udp_send_recv_dgram_test.go"], deps = [ @@ -308,3 +318,30 @@ packetimpact_go_test( "@org_golang_x_sys//unix:go_default_library", ], ) + +packetimpact_testbench( + name = "tcp_linger", + srcs = ["tcp_linger_test.go"], + deps = [ + "//pkg/tcpip/header", + "//test/packetimpact/testbench", + "@org_golang_x_sys//unix:go_default_library", + ], +) + +packetimpact_testbench( + name = "tcp_rcv_buf_space", + srcs = ["tcp_rcv_buf_space_test.go"], + deps = [ + "//pkg/tcpip/header", + "//test/packetimpact/testbench", + "@org_golang_x_sys//unix:go_default_library", + ], +) + +validate_all_tests() + +[packetimpact_go_test( + name = t.name, + expect_netstack_failure = hasattr(t, "expect_netstack_failure"), +) for t in ALL_TESTS] diff --git a/test/packetimpact/tests/ipv4_id_uniqueness_test.go b/test/packetimpact/tests/ipv4_id_uniqueness_test.go index cf881418c..7f7a768d3 100644 --- a/test/packetimpact/tests/ipv4_id_uniqueness_test.go +++ b/test/packetimpact/tests/ipv4_id_uniqueness_test.go @@ -88,7 +88,8 @@ func TestIPv4RetransmitIdentificationUniqueness(t *testing.T) { // this test. Once the socket option is supported, the following call // can be changed to simply assert success. ret, errno := dut.SetSockOptIntWithErrno(context.Background(), t, remoteFD, unix.IPPROTO_IP, linux.IP_MTU_DISCOVER, linux.IP_PMTUDISC_DONT) - if ret == -1 && errno != unix.ENOTSUP { + // Fuchsia will return ENOPROTOPT errno. + if ret == -1 && errno != unix.ENOPROTOOPT { t.Fatalf("failed to set IP_MTU_DISCOVER socket option to IP_PMTUDISC_DONT: %s", errno) } diff --git a/test/packetimpact/tests/tcp_close_wait_ack_test.go b/test/packetimpact/tests/tcp_close_wait_ack_test.go deleted file mode 100644 index e6a96f214..000000000 --- a/test/packetimpact/tests/tcp_close_wait_ack_test.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2020 The gVisor Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tcp_close_wait_ack_test - -import ( - "flag" - "fmt" - "testing" - "time" - - "golang.org/x/sys/unix" - "gvisor.dev/gvisor/pkg/tcpip/header" - "gvisor.dev/gvisor/pkg/tcpip/seqnum" - "gvisor.dev/gvisor/test/packetimpact/testbench" -) - -func init() { - testbench.RegisterFlags(flag.CommandLine) -} - -func TestCloseWaitAck(t *testing.T) { - for _, tt := range []struct { - description string - makeTestingTCP func(t *testing.T, conn *testbench.TCPIPv4, seqNumOffset, windowSize seqnum.Size) testbench.TCP - seqNumOffset seqnum.Size - expectAck bool - }{ - {"OTW", generateOTWSeqSegment, 0, false}, - {"OTW", generateOTWSeqSegment, 1, true}, - {"OTW", generateOTWSeqSegment, 2, true}, - {"ACK", generateUnaccACKSegment, 0, false}, - {"ACK", generateUnaccACKSegment, 1, true}, - {"ACK", generateUnaccACKSegment, 2, true}, - } { - t.Run(fmt.Sprintf("%s%d", tt.description, tt.seqNumOffset), func(t *testing.T) { - dut := testbench.NewDUT(t) - defer dut.TearDown() - listenFd, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1) - defer dut.Close(t, listenFd) - conn := testbench.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort}) - defer conn.Close(t) - - conn.Connect(t) - acceptFd, _ := dut.Accept(t, listenFd) - - // Send a FIN to DUT to intiate the active close - conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck | header.TCPFlagFin)}) - gotTCP, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}, time.Second) - if err != nil { - t.Fatalf("expected an ACK for our fin and DUT should enter CLOSE_WAIT: %s", err) - } - windowSize := seqnum.Size(*gotTCP.WindowSize) - - // Send a segment with OTW Seq / unacc ACK and expect an ACK back - conn.Send(t, tt.makeTestingTCP(t, &conn, tt.seqNumOffset, windowSize), &testbench.Payload{Bytes: []byte("Sample Data")}) - gotAck, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}, time.Second) - if tt.expectAck && err != nil { - t.Fatalf("expected an ack but got none: %s", err) - } - if !tt.expectAck && gotAck != nil { - t.Fatalf("expected no ack but got one: %s", gotAck) - } - - // Now let's verify DUT is indeed in CLOSE_WAIT - dut.Close(t, acceptFd) - if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck | header.TCPFlagFin)}, time.Second); err != nil { - t.Fatalf("expected DUT to send a FIN: %s", err) - } - // Ack the FIN from DUT - conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}) - // Send some extra data to DUT - conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}, &testbench.Payload{Bytes: []byte("Sample Data")}) - if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagRst)}, time.Second); err != nil { - t.Fatalf("expected DUT to send an RST: %s", err) - } - }) - } -} - -// generateOTWSeqSegment generates an segment with -// seqnum = RCV.NXT + RCV.WND + seqNumOffset, the generated segment is only -// acceptable when seqNumOffset is 0, otherwise an ACK is expected from the -// receiver. -func generateOTWSeqSegment(t *testing.T, conn *testbench.TCPIPv4, seqNumOffset seqnum.Size, windowSize seqnum.Size) testbench.TCP { - lastAcceptable := conn.LocalSeqNum(t).Add(windowSize) - otwSeq := uint32(lastAcceptable.Add(seqNumOffset)) - return testbench.TCP{SeqNum: testbench.Uint32(otwSeq), Flags: testbench.Uint8(header.TCPFlagAck)} -} - -// generateUnaccACKSegment generates an segment with -// acknum = SND.NXT + seqNumOffset, the generated segment is only acceptable -// when seqNumOffset is 0, otherwise an ACK is expected from the receiver. -func generateUnaccACKSegment(t *testing.T, conn *testbench.TCPIPv4, seqNumOffset seqnum.Size, windowSize seqnum.Size) testbench.TCP { - lastAcceptable := conn.RemoteSeqNum(t) - unaccAck := uint32(lastAcceptable.Add(seqNumOffset)) - return testbench.TCP{AckNum: testbench.Uint32(unaccAck), Flags: testbench.Uint8(header.TCPFlagAck)} -} diff --git a/test/packetimpact/tests/tcp_linger_test.go b/test/packetimpact/tests/tcp_linger_test.go new file mode 100644 index 000000000..b9a0409aa --- /dev/null +++ b/test/packetimpact/tests/tcp_linger_test.go @@ -0,0 +1,270 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tcp_linger_test + +import ( + "context" + "flag" + "syscall" + "testing" + "time" + + "golang.org/x/sys/unix" + "gvisor.dev/gvisor/pkg/tcpip/header" + "gvisor.dev/gvisor/test/packetimpact/testbench" +) + +func init() { + testbench.RegisterFlags(flag.CommandLine) +} + +func createSocket(t *testing.T, dut testbench.DUT) (int32, int32, testbench.TCPIPv4) { + listenFD, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1) + conn := testbench.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort}) + conn.Connect(t) + acceptFD, _ := dut.Accept(t, listenFD) + return acceptFD, listenFD, conn +} + +func closeAll(t *testing.T, dut testbench.DUT, listenFD int32, conn testbench.TCPIPv4) { + conn.Close(t) + dut.Close(t, listenFD) + dut.TearDown() +} + +// lingerDuration is the timeout value used with SO_LINGER socket option. +const lingerDuration = 3 * time.Second + +// TestTCPLingerZeroTimeout tests when SO_LINGER is set with zero timeout. DUT +// should send RST-ACK when socket is closed. +func TestTCPLingerZeroTimeout(t *testing.T) { + // Create a socket, listen, TCP connect, and accept. + dut := testbench.NewDUT(t) + acceptFD, listenFD, conn := createSocket(t, dut) + defer closeAll(t, dut, listenFD, conn) + + dut.SetSockLingerOption(t, acceptFD, 0, true) + dut.Close(t, acceptFD) + + // If the linger timeout is set to zero, the DUT should send a RST. + if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagRst | header.TCPFlagAck)}, time.Second); err != nil { + t.Errorf("expected RST-ACK packet within a second but got none: %s", err) + } + conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}) +} + +// TestTCPLingerOff tests when SO_LINGER is not set. DUT should send FIN-ACK +// when socket is closed. +func TestTCPLingerOff(t *testing.T) { + // Create a socket, listen, TCP connect, and accept. + dut := testbench.NewDUT(t) + acceptFD, listenFD, conn := createSocket(t, dut) + defer closeAll(t, dut, listenFD, conn) + + dut.Close(t, acceptFD) + + // If SO_LINGER is not set, DUT should send a FIN-ACK. + if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagFin | header.TCPFlagAck)}, time.Second); err != nil { + t.Errorf("expected FIN-ACK packet within a second but got none: %s", err) + } + conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}) +} + +// TestTCPLingerNonZeroTimeout tests when SO_LINGER is set with non-zero timeout. +// DUT should close the socket after timeout. +func TestTCPLingerNonZeroTimeout(t *testing.T) { + for _, tt := range []struct { + description string + lingerOn bool + }{ + {"WithNonZeroLinger", true}, + {"WithoutLinger", false}, + } { + t.Run(tt.description, func(t *testing.T) { + // Create a socket, listen, TCP connect, and accept. + dut := testbench.NewDUT(t) + acceptFD, listenFD, conn := createSocket(t, dut) + defer closeAll(t, dut, listenFD, conn) + + dut.SetSockLingerOption(t, acceptFD, lingerDuration, tt.lingerOn) + + // Increase timeout as Close will take longer time to + // return when SO_LINGER is set with non-zero timeout. + timeout := lingerDuration + 1*time.Second + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + start := time.Now() + dut.CloseWithErrno(ctx, t, acceptFD) + end := time.Now() + diff := end.Sub(start) + + if tt.lingerOn && diff < lingerDuration { + t.Errorf("expected close to return after %v seconds, but returned sooner", lingerDuration) + } else if !tt.lingerOn && diff > 1*time.Second { + t.Errorf("expected close to return within a second, but returned later") + } + + if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagFin | header.TCPFlagAck)}, time.Second); err != nil { + t.Errorf("expected FIN-ACK packet within a second but got none: %s", err) + } + conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}) + }) + } +} + +// TestTCPLingerSendNonZeroTimeout tests when SO_LINGER is set with non-zero +// timeout and send a packet. DUT should close the socket after timeout. +func TestTCPLingerSendNonZeroTimeout(t *testing.T) { + for _, tt := range []struct { + description string + lingerOn bool + }{ + {"WithSendNonZeroLinger", true}, + {"WithoutLinger", false}, + } { + t.Run(tt.description, func(t *testing.T) { + // Create a socket, listen, TCP connect, and accept. + dut := testbench.NewDUT(t) + acceptFD, listenFD, conn := createSocket(t, dut) + defer closeAll(t, dut, listenFD, conn) + + dut.SetSockLingerOption(t, acceptFD, lingerDuration, tt.lingerOn) + + // Send data. + sampleData := []byte("Sample Data") + dut.Send(t, acceptFD, sampleData, 0) + + // Increase timeout as Close will take longer time to + // return when SO_LINGER is set with non-zero timeout. + timeout := lingerDuration + 1*time.Second + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + start := time.Now() + dut.CloseWithErrno(ctx, t, acceptFD) + end := time.Now() + diff := end.Sub(start) + + if tt.lingerOn && diff < lingerDuration { + t.Errorf("expected close to return after %v seconds, but returned sooner", lingerDuration) + } else if !tt.lingerOn && diff > 1*time.Second { + t.Errorf("expected close to return within a second, but returned later") + } + + samplePayload := &testbench.Payload{Bytes: sampleData} + if _, err := conn.ExpectData(t, &testbench.TCP{}, samplePayload, time.Second); err != nil { + t.Fatalf("expected a packet with payload %v: %s", samplePayload, err) + } + + if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagFin | header.TCPFlagAck)}, time.Second); err != nil { + t.Errorf("expected FIN-ACK packet within a second but got none: %s", err) + } + conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}) + }) + } +} + +// TestTCPLingerShutdownZeroTimeout tests SO_LINGER with shutdown() and zero +// timeout. DUT should send RST-ACK when socket is closed. +func TestTCPLingerShutdownZeroTimeout(t *testing.T) { + // Create a socket, listen, TCP connect, and accept. + dut := testbench.NewDUT(t) + acceptFD, listenFD, conn := createSocket(t, dut) + defer closeAll(t, dut, listenFD, conn) + + dut.SetSockLingerOption(t, acceptFD, 0, true) + dut.Shutdown(t, acceptFD, syscall.SHUT_RDWR) + dut.Close(t, acceptFD) + + // Shutdown will send FIN-ACK with read/write option. + if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagFin | header.TCPFlagAck)}, time.Second); err != nil { + t.Errorf("expected FIN-ACK packet within a second but got none: %s", err) + } + + // If the linger timeout is set to zero, the DUT should send a RST. + if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagRst | header.TCPFlagAck)}, time.Second); err != nil { + t.Errorf("expected RST-ACK packet within a second but got none: %s", err) + } + conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}) +} + +// TestTCPLingerShutdownSendNonZeroTimeout tests SO_LINGER with shutdown() and +// non-zero timeout. DUT should close the socket after timeout. +func TestTCPLingerShutdownSendNonZeroTimeout(t *testing.T) { + for _, tt := range []struct { + description string + lingerOn bool + }{ + {"shutdownRDWR", true}, + {"shutdownRDWR", false}, + } { + t.Run(tt.description, func(t *testing.T) { + // Create a socket, listen, TCP connect, and accept. + dut := testbench.NewDUT(t) + acceptFD, listenFD, conn := createSocket(t, dut) + defer closeAll(t, dut, listenFD, conn) + + dut.SetSockLingerOption(t, acceptFD, lingerDuration, tt.lingerOn) + + // Send data. + sampleData := []byte("Sample Data") + dut.Send(t, acceptFD, sampleData, 0) + + dut.Shutdown(t, acceptFD, syscall.SHUT_RDWR) + + // Increase timeout as Close will take longer time to + // return when SO_LINGER is set with non-zero timeout. + timeout := lingerDuration + 1*time.Second + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + start := time.Now() + dut.CloseWithErrno(ctx, t, acceptFD) + end := time.Now() + diff := end.Sub(start) + + if tt.lingerOn && diff < lingerDuration { + t.Errorf("expected close to return after %v seconds, but returned sooner", lingerDuration) + } else if !tt.lingerOn && diff > 1*time.Second { + t.Errorf("expected close to return within a second, but returned later") + } + + samplePayload := &testbench.Payload{Bytes: sampleData} + if _, err := conn.ExpectData(t, &testbench.TCP{}, samplePayload, time.Second); err != nil { + t.Fatalf("expected a packet with payload %v: %s", samplePayload, err) + } + + if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagFin | header.TCPFlagAck)}, time.Second); err != nil { + t.Errorf("expected FIN-ACK packet within a second but got none: %s", err) + } + conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}) + }) + } +} + +func TestTCPLingerNonEstablished(t *testing.T) { + dut := testbench.NewDUT(t) + newFD := dut.Socket(t, unix.AF_INET, unix.SOCK_STREAM, unix.IPPROTO_TCP) + dut.SetSockLingerOption(t, newFD, lingerDuration, true) + + // As the socket is in the initial state, Close() should not linger + // and return immediately. + start := time.Now() + dut.CloseWithErrno(context.Background(), t, newFD) + diff := time.Since(start) + + if diff > lingerDuration { + t.Errorf("expected close to return within %s, but returned after %s", lingerDuration, diff) + } + dut.TearDown() +} diff --git a/test/packetimpact/tests/tcp_queue_send_in_syn_sent_test.go b/test/packetimpact/tests/tcp_queue_send_in_syn_sent_test.go new file mode 100644 index 000000000..0ec8fd748 --- /dev/null +++ b/test/packetimpact/tests/tcp_queue_send_in_syn_sent_test.go @@ -0,0 +1,133 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tcp_queue_send_in_syn_sent_test + +import ( + "context" + "errors" + "flag" + "net" + "sync" + "syscall" + "testing" + "time" + + "golang.org/x/sys/unix" + "gvisor.dev/gvisor/pkg/tcpip/header" + "gvisor.dev/gvisor/test/packetimpact/testbench" +) + +func init() { + testbench.RegisterFlags(flag.CommandLine) +} + +// TestQueueSendInSynSent tests send behavior when the TCP state +// is SYN-SENT. +// It tests for 2 variants when in SYN_SENT state and: +// (1) DUT blocks on send and complete handshake +// (2) DUT blocks on send and receive a TCP RST. +func TestQueueSendInSynSent(t *testing.T) { + for _, tt := range []struct { + description string + reset bool + }{ + {description: "Complete handshake", reset: false}, + {description: "Send RST", reset: true}, + } { + t.Run(tt.description, func(t *testing.T) { + dut := testbench.NewDUT(t) + defer dut.TearDown() + + socket, remotePort := dut.CreateBoundSocket(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, net.ParseIP(testbench.RemoteIPv4)) + conn := testbench.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort}) + defer conn.Close(t) + + sampleData := []byte("Sample Data") + samplePayload := &testbench.Payload{Bytes: sampleData} + dut.SetNonBlocking(t, socket, true) + if _, err := dut.ConnectWithErrno(context.Background(), t, socket, conn.LocalAddr(t)); !errors.Is(err, syscall.EINPROGRESS) { + t.Fatalf("failed to bring DUT to SYN-SENT, got: %s, want EINPROGRESS", err) + } + if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagSyn)}, time.Second); err != nil { + t.Fatalf("expected a SYN from DUT, but got none: %s", err) + } + if _, err := dut.SendWithErrno(context.Background(), t, socket, sampleData, 0); err != syscall.Errno(unix.EWOULDBLOCK) { + t.Fatalf("expected error %s, got %s", syscall.Errno(unix.EWOULDBLOCK), err) + } + + // Test blocking write. + dut.SetNonBlocking(t, socket, false) + + var wg sync.WaitGroup + defer wg.Wait() + wg.Add(1) + var block sync.WaitGroup + block.Add(1) + go func() { + defer wg.Done() + ctx, cancel := context.WithTimeout(context.Background(), time.Second*3) + defer cancel() + + block.Done() + // Issue SEND call in SYN-SENT, this should be queued for + // process until the connection is established. + n, err := dut.SendWithErrno(ctx, t, socket, sampleData, 0) + if tt.reset { + if err != syscall.Errno(unix.ECONNREFUSED) { + t.Errorf("expected error %s, got %s", syscall.Errno(unix.ECONNREFUSED), err) + } + if n != -1 { + t.Errorf("expected return value %d, got %d", -1, n) + } + return + } + if n != int32(len(sampleData)) { + t.Errorf("failed to send on DUT: %s", err) + } + }() + + // Wait for the goroutine to be scheduled and before it + // blocks on endpoint send. + block.Wait() + // The following sleep is used to prevent the connection + // from being established before we are blocked on send. + time.Sleep(100 * time.Millisecond) + + if tt.reset { + conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagRst | header.TCPFlagAck)}) + return + } + + // Bring the connection to Established. + conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagSyn | header.TCPFlagAck)}) + + // Expect the data from the DUT's enqueued send request. + // + // On Linux, this can be piggybacked with the ACK completing the + // handshake. On gVisor, getting such a piggyback is a bit more + // complicated because the actual data enqueuing occurs in the + // callers of endpoint Write. + if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.Uint8(header.TCPFlagPsh | header.TCPFlagAck)}, samplePayload, time.Second); err != nil { + t.Fatalf("expected payload was not received: %s", err) + } + + // Send sample payload and expect an ACK to ensure connection is still ESTABLISHED. + conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagPsh | header.TCPFlagAck)}, &testbench.Payload{Bytes: sampleData}) + if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}, time.Second); err != nil { + t.Fatalf("expected an ACK from DUT, but got none: %s", err) + } + }) + } +} diff --git a/test/packetimpact/tests/tcp_rcv_buf_space_test.go b/test/packetimpact/tests/tcp_rcv_buf_space_test.go new file mode 100644 index 000000000..cfbba1e8e --- /dev/null +++ b/test/packetimpact/tests/tcp_rcv_buf_space_test.go @@ -0,0 +1,80 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tcp_rcv_buf_space_test + +import ( + "context" + "flag" + "syscall" + "testing" + + "golang.org/x/sys/unix" + "gvisor.dev/gvisor/pkg/tcpip/header" + "gvisor.dev/gvisor/test/packetimpact/testbench" +) + +func init() { + testbench.RegisterFlags(flag.CommandLine) +} + +// TestReduceRecvBuf tests that a packet within window is still dropped +// if the available buffer space drops below the size of the incoming +// segment. +func TestReduceRecvBuf(t *testing.T) { + dut := testbench.NewDUT(t) + defer dut.TearDown() + listenFd, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1) + defer dut.Close(t, listenFd) + conn := testbench.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort}) + defer conn.Close(t) + + conn.Connect(t) + acceptFd, _ := dut.Accept(t, listenFd) + defer dut.Close(t, acceptFd) + + // Set a small receive buffer for the test. + const rcvBufSz = 4096 + dut.SetSockOptInt(t, acceptFd, unix.SOL_SOCKET, unix.SO_RCVBUF, rcvBufSz) + + // Retrieve the actual buffer. + bufSz := dut.GetSockOptInt(t, acceptFd, unix.SOL_SOCKET, unix.SO_RCVBUF) + + // Generate a payload of 1 more than the actual buffer size used by the + // DUT. + sampleData := testbench.GenerateRandomPayload(t, int(bufSz)+1) + // Send and receive sample data to the dut. + const pktSize = 1400 + for payload := sampleData; len(payload) != 0; { + payloadBytes := pktSize + if l := len(payload); l < payloadBytes { + payloadBytes = l + } + + conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}, []testbench.Layer{&testbench.Payload{Bytes: payload[:payloadBytes]}}...) + payload = payload[payloadBytes:] + } + + // First read should read < len(sampleData) + if ret, _, err := dut.RecvWithErrno(context.Background(), t, acceptFd, int32(len(sampleData)), 0); ret == -1 || int(ret) == len(sampleData) { + t.Fatalf("dut.RecvWithErrno(ctx, t, %d, %d, 0) = %d,_, %s", acceptFd, int32(len(sampleData)), ret, err) + } + + // Second read should return EAGAIN as the last segment should have been + // dropped due to it exceeding the receive buffer space available in the + // socket. + if ret, got, err := dut.RecvWithErrno(context.Background(), t, acceptFd, int32(len(sampleData)), syscall.MSG_DONTWAIT); got != nil || ret != -1 || err != syscall.EAGAIN { + t.Fatalf("expected no packets but got: %s", got) + } +} diff --git a/test/packetimpact/tests/tcp_timewait_reset_test.go b/test/packetimpact/tests/tcp_timewait_reset_test.go new file mode 100644 index 000000000..2f76a6531 --- /dev/null +++ b/test/packetimpact/tests/tcp_timewait_reset_test.go @@ -0,0 +1,68 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tcp_timewait_reset_test + +import ( + "flag" + "testing" + "time" + + "golang.org/x/sys/unix" + "gvisor.dev/gvisor/pkg/tcpip/header" + "gvisor.dev/gvisor/test/packetimpact/testbench" +) + +func init() { + testbench.RegisterFlags(flag.CommandLine) +} + +// TestTimeWaitReset tests handling of RST when in TIME_WAIT state. +func TestTimeWaitReset(t *testing.T) { + dut := testbench.NewDUT(t) + defer dut.TearDown() + listenFD, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1 /*backlog*/) + defer dut.Close(t, listenFD) + conn := testbench.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort}) + defer conn.Close(t) + + conn.Connect(t) + acceptFD, _ := dut.Accept(t, listenFD) + + // Trigger active close. + dut.Close(t, acceptFD) + + _, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagFin | header.TCPFlagAck)}, time.Second) + if err != nil { + t.Fatalf("expected a FIN: %s", err) + } + conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}) + // Send a FIN, DUT should transition to TIME_WAIT from FIN_WAIT2. + conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagFin | header.TCPFlagAck)}) + if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}, time.Second); err != nil { + t.Fatalf("expected an ACK for our FIN: %s", err) + } + + // Send a RST, the DUT should transition to CLOSED from TIME_WAIT. + // This is the default Linux behavior, it can be changed to ignore RSTs via + // sysctl net.ipv4.tcp_rfc1337. + conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagRst)}) + + conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}) + // The DUT should reply with RST to our ACK as the state should have + // transitioned to CLOSED. + if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagRst)}, time.Second); err != nil { + t.Fatalf("expected a RST: %s", err) + } +} diff --git a/test/packetimpact/tests/tcp_unacc_seq_ack_test.go b/test/packetimpact/tests/tcp_unacc_seq_ack_test.go new file mode 100644 index 000000000..d078bbf15 --- /dev/null +++ b/test/packetimpact/tests/tcp_unacc_seq_ack_test.go @@ -0,0 +1,234 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tcp_unacc_seq_ack_test + +import ( + "flag" + "fmt" + "syscall" + "testing" + "time" + + "golang.org/x/sys/unix" + "gvisor.dev/gvisor/pkg/tcpip/header" + "gvisor.dev/gvisor/pkg/tcpip/seqnum" + "gvisor.dev/gvisor/test/packetimpact/testbench" +) + +func init() { + testbench.RegisterFlags(flag.CommandLine) +} + +func TestEstablishedUnaccSeqAck(t *testing.T) { + for _, tt := range []struct { + description string + makeTestingTCP func(t *testing.T, conn *testbench.TCPIPv4, seqNumOffset, windowSize seqnum.Size) testbench.TCP + seqNumOffset seqnum.Size + expectAck bool + restoreSeq bool + }{ + {description: "OTWSeq", makeTestingTCP: generateOTWSeqSegment, seqNumOffset: 0, expectAck: true, restoreSeq: true}, + {description: "OTWSeq", makeTestingTCP: generateOTWSeqSegment, seqNumOffset: 1, expectAck: true, restoreSeq: true}, + {description: "OTWSeq", makeTestingTCP: generateOTWSeqSegment, seqNumOffset: 2, expectAck: true, restoreSeq: true}, + {description: "UnaccAck", makeTestingTCP: generateUnaccACKSegment, seqNumOffset: 0, expectAck: true, restoreSeq: false}, + {description: "UnaccAck", makeTestingTCP: generateUnaccACKSegment, seqNumOffset: 1, expectAck: false, restoreSeq: true}, + {description: "UnaccAck", makeTestingTCP: generateUnaccACKSegment, seqNumOffset: 2, expectAck: false, restoreSeq: true}, + } { + t.Run(fmt.Sprintf("%s:offset=%d", tt.description, tt.seqNumOffset), func(t *testing.T) { + dut := testbench.NewDUT(t) + defer dut.TearDown() + listenFD, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1 /*backlog*/) + defer dut.Close(t, listenFD) + conn := testbench.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort}) + defer conn.Close(t) + + conn.Connect(t) + dut.Accept(t, listenFD) + + sampleData := []byte("Sample Data") + samplePayload := &testbench.Payload{Bytes: sampleData} + + conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck | header.TCPFlagPsh)}, samplePayload) + gotTCP, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}, time.Second) + if err != nil { + t.Fatalf("expected ack %s", err) + } + windowSize := seqnum.Size(*gotTCP.WindowSize) + + origSeq := *conn.LocalSeqNum(t) + // Send a segment with OTW Seq / unacc ACK. + conn.Send(t, tt.makeTestingTCP(t, &conn, tt.seqNumOffset, windowSize), samplePayload) + if tt.restoreSeq { + // Restore the local sequence number to ensure that the incoming + // ACK matches the TCP layer state. + *conn.LocalSeqNum(t) = origSeq + } + gotAck, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}, time.Second) + if tt.expectAck && err != nil { + t.Fatalf("expected an ack but got none: %s", err) + } + if err == nil && !tt.expectAck && gotAck != nil { + t.Fatalf("expected no ack but got one: %s", gotAck) + } + }) + } +} + +func TestPassiveCloseUnaccSeqAck(t *testing.T) { + for _, tt := range []struct { + description string + makeTestingTCP func(t *testing.T, conn *testbench.TCPIPv4, seqNumOffset, windowSize seqnum.Size) testbench.TCP + seqNumOffset seqnum.Size + expectAck bool + }{ + {description: "OTWSeq", makeTestingTCP: generateOTWSeqSegment, seqNumOffset: 0, expectAck: false}, + {description: "OTWSeq", makeTestingTCP: generateOTWSeqSegment, seqNumOffset: 1, expectAck: true}, + {description: "OTWSeq", makeTestingTCP: generateOTWSeqSegment, seqNumOffset: 2, expectAck: true}, + {description: "UnaccAck", makeTestingTCP: generateUnaccACKSegment, seqNumOffset: 0, expectAck: false}, + {description: "UnaccAck", makeTestingTCP: generateUnaccACKSegment, seqNumOffset: 1, expectAck: true}, + {description: "UnaccAck", makeTestingTCP: generateUnaccACKSegment, seqNumOffset: 2, expectAck: true}, + } { + t.Run(fmt.Sprintf("%s:offset=%d", tt.description, tt.seqNumOffset), func(t *testing.T) { + dut := testbench.NewDUT(t) + defer dut.TearDown() + listenFD, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1 /*backlog*/) + defer dut.Close(t, listenFD) + conn := testbench.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort}) + defer conn.Close(t) + + conn.Connect(t) + acceptFD, _ := dut.Accept(t, listenFD) + + // Send a FIN to DUT to intiate the passive close. + conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck | header.TCPFlagFin)}) + gotTCP, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}, time.Second) + if err != nil { + t.Fatalf("expected an ACK for our fin and DUT should enter CLOSE_WAIT: %s", err) + } + windowSize := seqnum.Size(*gotTCP.WindowSize) + + sampleData := []byte("Sample Data") + samplePayload := &testbench.Payload{Bytes: sampleData} + + // Send a segment with OTW Seq / unacc ACK. + conn.Send(t, tt.makeTestingTCP(t, &conn, tt.seqNumOffset, windowSize), samplePayload) + gotAck, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}, time.Second) + if tt.expectAck && err != nil { + t.Errorf("expected an ack but got none: %s", err) + } + if err == nil && !tt.expectAck && gotAck != nil { + t.Errorf("expected no ack but got one: %s", gotAck) + } + + // Now let's verify DUT is indeed in CLOSE_WAIT + dut.Close(t, acceptFD) + if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck | header.TCPFlagFin)}, time.Second); err != nil { + t.Fatalf("expected DUT to send a FIN: %s", err) + } + // Ack the FIN from DUT + conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}) + // Send some extra data to DUT + conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}, samplePayload) + if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagRst)}, time.Second); err != nil { + t.Fatalf("expected DUT to send an RST: %s", err) + } + }) + } +} + +func TestActiveCloseUnaccpSeqAck(t *testing.T) { + for _, tt := range []struct { + description string + makeTestingTCP func(t *testing.T, conn *testbench.TCPIPv4, seqNumOffset, windowSize seqnum.Size) testbench.TCP + seqNumOffset seqnum.Size + restoreSeq bool + }{ + {description: "OTWSeq", makeTestingTCP: generateOTWSeqSegment, seqNumOffset: 0, restoreSeq: true}, + {description: "OTWSeq", makeTestingTCP: generateOTWSeqSegment, seqNumOffset: 1, restoreSeq: true}, + {description: "OTWSeq", makeTestingTCP: generateOTWSeqSegment, seqNumOffset: 2, restoreSeq: true}, + {description: "UnaccAck", makeTestingTCP: generateUnaccACKSegment, seqNumOffset: 0, restoreSeq: false}, + {description: "UnaccAck", makeTestingTCP: generateUnaccACKSegment, seqNumOffset: 1, restoreSeq: true}, + {description: "UnaccAck", makeTestingTCP: generateUnaccACKSegment, seqNumOffset: 2, restoreSeq: true}, + } { + t.Run(fmt.Sprintf("%s:offset=%d", tt.description, tt.seqNumOffset), func(t *testing.T) { + dut := testbench.NewDUT(t) + defer dut.TearDown() + listenFD, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1 /*backlog*/) + defer dut.Close(t, listenFD) + conn := testbench.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort}) + defer conn.Close(t) + + conn.Connect(t) + acceptFD, _ := dut.Accept(t, listenFD) + + // Trigger active close. + dut.Shutdown(t, acceptFD, syscall.SHUT_WR) + + // Get to FIN_WAIT2 + gotTCP, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagFin | header.TCPFlagAck)}, time.Second) + if err != nil { + t.Fatalf("expected a FIN: %s", err) + } + conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}) + + sendUnaccSeqAck := func(state string) { + t.Helper() + sampleData := []byte("Sample Data") + samplePayload := &testbench.Payload{Bytes: sampleData} + + origSeq := *conn.LocalSeqNum(t) + // Send a segment with OTW Seq / unacc ACK. + conn.Send(t, tt.makeTestingTCP(t, &conn, tt.seqNumOffset, seqnum.Size(*gotTCP.WindowSize)), samplePayload) + if tt.restoreSeq { + // Restore the local sequence number to ensure that the + // incoming ACK matches the TCP layer state. + *conn.LocalSeqNum(t) = origSeq + } + if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}, time.Second); err != nil { + t.Errorf("expected an ack in %s state, but got none: %s", state, err) + } + } + + sendUnaccSeqAck("FIN_WAIT2") + + // Send a FIN to DUT to get to TIME_WAIT + conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagFin | header.TCPFlagAck)}) + if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}, time.Second); err != nil { + t.Fatalf("expected an ACK for our fin and DUT should enter TIME_WAIT: %s", err) + } + + sendUnaccSeqAck("TIME_WAIT") + }) + } +} + +// generateOTWSeqSegment generates an segment with +// seqnum = RCV.NXT + RCV.WND + seqNumOffset, the generated segment is only +// acceptable when seqNumOffset is 0, otherwise an ACK is expected from the +// receiver. +func generateOTWSeqSegment(t *testing.T, conn *testbench.TCPIPv4, seqNumOffset seqnum.Size, windowSize seqnum.Size) testbench.TCP { + lastAcceptable := conn.LocalSeqNum(t).Add(windowSize) + otwSeq := uint32(lastAcceptable.Add(seqNumOffset)) + return testbench.TCP{SeqNum: testbench.Uint32(otwSeq), Flags: testbench.Uint8(header.TCPFlagAck)} +} + +// generateUnaccACKSegment generates an segment with +// acknum = SND.NXT + seqNumOffset, the generated segment is only acceptable +// when seqNumOffset is 0, otherwise an ACK is expected from the receiver. +func generateUnaccACKSegment(t *testing.T, conn *testbench.TCPIPv4, seqNumOffset seqnum.Size, windowSize seqnum.Size) testbench.TCP { + lastAcceptable := conn.RemoteSeqNum(t) + unaccAck := uint32(lastAcceptable.Add(seqNumOffset)) + return testbench.TCP{AckNum: testbench.Uint32(unaccAck), Flags: testbench.Uint8(header.TCPFlagAck)} +} diff --git a/test/packetimpact/tests/udp_discard_mcast_source_addr_test.go b/test/packetimpact/tests/udp_discard_mcast_source_addr_test.go index d30177e64..3d2791a6e 100644 --- a/test/packetimpact/tests/udp_discard_mcast_source_addr_test.go +++ b/test/packetimpact/tests/udp_discard_mcast_source_addr_test.go @@ -53,6 +53,7 @@ func TestDiscardsUDPPacketsWithMcastSourceAddressV4(t *testing.T) { t, testbench.IPv4{SrcAddr: testbench.Address(tcpip.Address(mcastAddr.To4()))}, testbench.UDP{}, + &testbench.Payload{Bytes: []byte("test payload")}, ) ret, payload, errno := dut.RecvWithErrno(context.Background(), t, remoteFD, 100, 0) @@ -76,14 +77,15 @@ func TestDiscardsUDPPacketsWithMcastSourceAddressV6(t *testing.T) { net.IPv6interfacelocalallnodes, net.IPv6linklocalallnodes, net.IPv6linklocalallrouters, - net.ParseIP("fe01::42"), - net.ParseIP("fe02::4242"), + net.ParseIP("ff01::42"), + net.ParseIP("ff02::4242"), } { t.Run(fmt.Sprintf("srcaddr=%s", mcastAddr), func(t *testing.T) { conn.SendIPv6( t, testbench.IPv6{SrcAddr: testbench.Address(tcpip.Address(mcastAddr.To16()))}, testbench.UDP{}, + &testbench.Payload{Bytes: []byte("test payload")}, ) ret, payload, errno := dut.RecvWithErrno(context.Background(), t, remoteFD, 100, 0) if errno != syscall.EAGAIN || errno != syscall.EWOULDBLOCK { |