summaryrefslogtreecommitdiffhomepage
path: root/test/packetimpact
diff options
context:
space:
mode:
Diffstat (limited to 'test/packetimpact')
-rw-r--r--test/packetimpact/runner/BUILD1
-rw-r--r--test/packetimpact/runner/defs.bzl20
-rw-r--r--test/packetimpact/runner/dut.go421
-rw-r--r--test/packetimpact/testbench/BUILD1
-rw-r--r--test/packetimpact/testbench/connections.go120
-rw-r--r--test/packetimpact/testbench/dut.go36
-rw-r--r--test/packetimpact/testbench/layers.go45
-rw-r--r--test/packetimpact/testbench/rawsockets.go33
-rw-r--r--test/packetimpact/testbench/testbench.go147
-rw-r--r--test/packetimpact/tests/BUILD11
-rw-r--r--test/packetimpact/tests/fin_wait2_timeout_test.go5
-rw-r--r--test/packetimpact/tests/icmpv6_param_problem_test.go5
-rw-r--r--test/packetimpact/tests/ipv4_fragment_reassembly_test.go70
-rw-r--r--test/packetimpact/tests/ipv4_id_uniqueness_test.go6
-rw-r--r--test/packetimpact/tests/ipv6_fragment_icmp_error_test.go27
-rw-r--r--test/packetimpact/tests/ipv6_fragment_reassembly_test.go62
-rw-r--r--test/packetimpact/tests/ipv6_unknown_options_action_test.go5
-rw-r--r--test/packetimpact/tests/tcp_cork_mss_test.go5
-rw-r--r--test/packetimpact/tests/tcp_handshake_window_size_test.go5
-rw-r--r--test/packetimpact/tests/tcp_linger_test.go6
-rw-r--r--test/packetimpact/tests/tcp_network_unreachable_test.go19
-rw-r--r--test/packetimpact/tests/tcp_noaccept_close_rst_test.go5
-rw-r--r--test/packetimpact/tests/tcp_outside_the_window_test.go5
-rw-r--r--test/packetimpact/tests/tcp_paws_mechanism_test.go5
-rw-r--r--test/packetimpact/tests/tcp_queue_receive_in_syn_sent_test.go8
-rw-r--r--test/packetimpact/tests/tcp_queue_send_in_syn_sent_test.go8
-rw-r--r--test/packetimpact/tests/tcp_rcv_buf_space_test.go5
-rw-r--r--test/packetimpact/tests/tcp_reordering_test.go45
-rw-r--r--test/packetimpact/tests/tcp_retransmits_test.go5
-rw-r--r--test/packetimpact/tests/tcp_send_window_sizes_piggyback_test.go8
-rw-r--r--test/packetimpact/tests/tcp_synrcvd_reset_test.go5
-rw-r--r--test/packetimpact/tests/tcp_synsent_reset_test.go39
-rw-r--r--test/packetimpact/tests/tcp_timewait_reset_test.go5
-rw-r--r--test/packetimpact/tests/tcp_unacc_seq_ack_test.go11
-rw-r--r--test/packetimpact/tests/tcp_user_timeout_test.go5
-rw-r--r--test/packetimpact/tests/tcp_window_shrink_test.go5
-rw-r--r--test/packetimpact/tests/tcp_zero_receive_window_test.go113
-rw-r--r--test/packetimpact/tests/tcp_zero_window_probe_retransmit_test.go5
-rw-r--r--test/packetimpact/tests/tcp_zero_window_probe_test.go5
-rw-r--r--test/packetimpact/tests/tcp_zero_window_probe_usertimeout_test.go5
-rw-r--r--test/packetimpact/tests/udp_any_addr_recv_unicast_test.go7
-rw-r--r--test/packetimpact/tests/udp_discard_mcast_source_addr_test.go12
-rw-r--r--test/packetimpact/tests/udp_icmp_error_propagation_test.go10
-rw-r--r--test/packetimpact/tests/udp_recv_mcast_bcast_test.go31
-rw-r--r--test/packetimpact/tests/udp_send_recv_dgram_test.go17
45 files changed, 879 insertions, 540 deletions
diff --git a/test/packetimpact/runner/BUILD b/test/packetimpact/runner/BUILD
index 605dd4972..888c44343 100644
--- a/test/packetimpact/runner/BUILD
+++ b/test/packetimpact/runner/BUILD
@@ -32,6 +32,7 @@ go_library(
deps = [
"//pkg/test/dockerutil",
"//test/packetimpact/netdevs",
+ "//test/packetimpact/testbench",
"@com_github_docker_docker//api/types/mount:go_default_library",
],
)
diff --git a/test/packetimpact/runner/defs.bzl b/test/packetimpact/runner/defs.bzl
index 494a7f96c..c6c95546a 100644
--- a/test/packetimpact/runner/defs.bzl
+++ b/test/packetimpact/runner/defs.bzl
@@ -12,10 +12,11 @@ def _packetimpact_test_impl(ctx):
# current user, and no other users will be mapped in that namespace.
# Make sure that everything is readable here.
"find . -type f -or -type d -exec chmod a+rx {} \\;",
- "%s %s --testbench_binary %s $@\n" % (
+ "%s %s --testbench_binary %s --num_duts %d $@\n" % (
test_runner.short_path,
" ".join(ctx.attr.flags),
ctx.files.testbench_binary[0].short_path,
+ ctx.attr.num_duts,
),
])
ctx.actions.write(bench, bench_content, is_executable = True)
@@ -51,6 +52,10 @@ _packetimpact_test = rule(
mandatory = False,
default = [],
),
+ "num_duts": attr.int(
+ mandatory = False,
+ default = 1,
+ ),
},
test = True,
implementation = _packetimpact_test_impl,
@@ -110,24 +115,27 @@ def packetimpact_netstack_test(
**kwargs
)
-def packetimpact_go_test(name, expect_native_failure = False, expect_netstack_failure = False):
+def packetimpact_go_test(name, expect_native_failure = False, expect_netstack_failure = False, num_duts = 1):
"""Add packetimpact tests written in go.
Args:
name: name of the test
expect_native_failure: the test must fail natively
expect_netstack_failure: the test must fail for Netstack
+ num_duts: how many DUTs are needed for the test
"""
testbench_binary = name + "_test"
packetimpact_native_test(
name = name,
expect_failure = expect_native_failure,
testbench_binary = testbench_binary,
+ num_duts = num_duts,
)
packetimpact_netstack_test(
name = name,
expect_failure = expect_netstack_failure,
testbench_binary = testbench_binary,
+ num_duts = num_duts,
)
def packetimpact_testbench(name, size = "small", pure = True, **kwargs):
@@ -153,7 +161,7 @@ def packetimpact_testbench(name, size = "small", pure = True, **kwargs):
PacketimpactTestInfo = provider(
doc = "Provide information for packetimpact tests",
- fields = ["name", "expect_netstack_failure"],
+ fields = ["name", "expect_netstack_failure", "num_duts"],
)
ALL_TESTS = [
@@ -216,6 +224,9 @@ ALL_TESTS = [
name = "tcp_user_timeout",
),
PacketimpactTestInfo(
+ name = "tcp_zero_receive_window",
+ ),
+ PacketimpactTestInfo(
name = "tcp_queue_receive_in_syn_sent",
),
PacketimpactTestInfo(
@@ -243,8 +254,6 @@ ALL_TESTS = [
),
PacketimpactTestInfo(
name = "icmpv6_param_problem",
- # TODO(b/153485026): Fix netstack then remove the line below.
- expect_netstack_failure = True,
),
PacketimpactTestInfo(
name = "ipv6_unknown_options_action",
@@ -257,6 +266,7 @@ ALL_TESTS = [
),
PacketimpactTestInfo(
name = "ipv6_fragment_icmp_error",
+ num_duts = 3,
),
PacketimpactTestInfo(
name = "udp_send_recv_dgram",
diff --git a/test/packetimpact/runner/dut.go b/test/packetimpact/runner/dut.go
index ad1d73de2..ebbf17504 100644
--- a/test/packetimpact/runner/dut.go
+++ b/test/packetimpact/runner/dut.go
@@ -17,6 +17,7 @@ package runner
import (
"context"
+ "encoding/json"
"flag"
"fmt"
"io/ioutil"
@@ -34,6 +35,7 @@ import (
"github.com/docker/docker/api/types/mount"
"gvisor.dev/gvisor/pkg/test/dockerutil"
"gvisor.dev/gvisor/test/packetimpact/netdevs"
+ "gvisor.dev/gvisor/test/packetimpact/testbench"
)
// stringList implements flag.Value.
@@ -56,9 +58,10 @@ var (
tshark = false
extraTestArgs = stringList{}
expectFailure = false
+ numDUTs = 1
- // DutAddr is the IP addres for DUT.
- DutAddr = net.IPv4(0, 0, 0, 10)
+ // DUTAddr is the IP addres for DUT.
+ DUTAddr = net.IPv4(0, 0, 0, 10)
testbenchAddr = net.IPv4(0, 0, 0, 20)
)
@@ -71,10 +74,15 @@ func RegisterFlags(fs *flag.FlagSet) {
fs.BoolVar(&tshark, "tshark", false, "use more verbose tshark in logs instead of tcpdump")
fs.Var(&extraTestArgs, "extra_test_arg", "extra arguments to pass to the testbench")
fs.BoolVar(&expectFailure, "expect_failure", false, "expect that the test will fail when run")
+ fs.IntVar(&numDUTs, "num_duts", numDUTs, "the number of duts to create")
}
-// CtrlPort is the port that posix_server listens on.
-const CtrlPort = "40000"
+const (
+ // CtrlPort is the port that posix_server listens on.
+ CtrlPort uint16 = 40000
+ // testOutputDir is the directory in each container that holds test output.
+ testOutputDir = "/tmp/testoutput"
+)
// logger implements testutil.Logger.
//
@@ -95,16 +103,21 @@ func (l logger) Logf(format string, args ...interface{}) {
}
}
-// TestWithDUT runs a packetimpact test with the given information.
-func TestWithDUT(ctx context.Context, t *testing.T, mkDevice func(*dockerutil.Container) DUT) {
- if testbenchBinary == "" {
- t.Fatal("--testbench_binary is missing")
- }
- dockerutil.EnsureSupportedDockerVersion()
+// dutInfo encapsulates all the essential information to set up testbench
+// container.
+type dutInfo struct {
+ dut DUT
+ ctrlNet, testNet *dockerutil.Network
+ netInfo *testbench.DUTTestNet
+}
- // Create the networks needed for the test. One control network is needed for
- // the gRPC control packets and one test network on which to transmit the test
- // packets.
+// setUpDUT will set up one DUT and return information for setting up the
+// container for testbench.
+func setUpDUT(ctx context.Context, t *testing.T, id int, mkDevice func(*dockerutil.Container) DUT) (dutInfo, error) {
+ // Create the networks needed for the test. One control network is needed
+ // for the gRPC control packets and one test network on which to transmit
+ // the test packets.
+ var info dutInfo
ctrlNet := dockerutil.NewNetwork(ctx, logger("ctrlNet"))
testNet := dockerutil.NewNetwork(ctx, logger("testNet"))
for _, dn := range []*dockerutil.Network{ctrlNet, testNet} {
@@ -113,8 +126,8 @@ func TestWithDUT(ctx context.Context, t *testing.T, mkDevice func(*dockerutil.Co
t.Log("creating docker network:", err)
const wait = 100 * time.Millisecond
t.Logf("sleeping %s and will try creating docker network again", wait)
- // This can fail if another docker network claimed the same IP so we'll
- // just try again.
+ // This can fail if another docker network claimed the same IP so we
+ // will just try again.
time.Sleep(wait)
continue
}
@@ -128,115 +141,161 @@ func TestWithDUT(ctx context.Context, t *testing.T, mkDevice func(*dockerutil.Co
})
// Sanity check.
if inspect, err := dn.Inspect(ctx); err != nil {
- t.Fatalf("failed to inspect network %s: %v", dn.Name, err)
+ return dutInfo{}, fmt.Errorf("failed to inspect network %s: %w", dn.Name, err)
} else if inspect.Name != dn.Name {
- t.Fatalf("name mismatch for network want: %s got: %s", dn.Name, inspect.Name)
+ return dutInfo{}, fmt.Errorf("name mismatch for network want: %s got: %s", dn.Name, inspect.Name)
}
}
-
- tmpDir, err := ioutil.TempDir("", "container-output")
- if err != nil {
- t.Fatal("creating temp dir:", err)
- }
- t.Cleanup(func() {
- if err := exec.Command("/bin/cp", "-r", tmpDir, os.Getenv("TEST_UNDECLARED_OUTPUTS_DIR")).Run(); err != nil {
- t.Errorf("unable to copy container output files: %s", err)
- }
- if err := os.RemoveAll(tmpDir); err != nil {
- t.Errorf("failed to remove tmpDir %s: %s", tmpDir, err)
- }
- })
-
- const testOutputDir = "/tmp/testoutput"
+ info.ctrlNet = ctrlNet
+ info.testNet = testNet
// Create the Docker container for the DUT.
- var dut *dockerutil.Container
+ var dut DUT
if native {
- dut = dockerutil.MakeNativeContainer(ctx, logger("dut"))
+ dut = mkDevice(dockerutil.MakeNativeContainer(ctx, logger(fmt.Sprintf("dut-%d", id))))
} else {
- dut = dockerutil.MakeContainer(ctx, logger("dut"))
+ dut = mkDevice(dockerutil.MakeContainer(ctx, logger(fmt.Sprintf("dut-%d", id))))
}
- t.Cleanup(func() {
- dut.CleanUp(ctx)
- })
+ info.dut = dut
runOpts := dockerutil.RunOpts{
Image: "packetimpact",
CapAdd: []string{"NET_ADMIN"},
- Mounts: []mount.Mount{{
- Type: mount.TypeBind,
- Source: tmpDir,
- Target: testOutputDir,
- ReadOnly: false,
- }},
+ }
+ if _, err := mountTempDirectory(t, &runOpts, "dut-output", testOutputDir); err != nil {
+ return dutInfo{}, err
}
- device := mkDevice(dut)
- remoteIPv6, remoteMAC, dutDeviceID, dutTestNetDev := device.Prepare(ctx, t, runOpts, ctrlNet, testNet)
+ ipv4PrefixLength, _ := testNet.Subnet.Mask.Size()
+ remoteIPv6, remoteMAC, dutDeviceID, dutTestNetDev, err := dut.Prepare(ctx, t, runOpts, ctrlNet, testNet)
+ if err != nil {
+ return dutInfo{}, err
+ }
+ info.netInfo = &testbench.DUTTestNet{
+ RemoteMAC: remoteMAC,
+ RemoteIPv4: AddressInSubnet(DUTAddr, *testNet.Subnet),
+ RemoteIPv6: remoteIPv6,
+ RemoteDevID: dutDeviceID,
+ RemoteDevName: dutTestNetDev,
+ LocalIPv4: AddressInSubnet(testbenchAddr, *testNet.Subnet),
+ IPv4PrefixLength: ipv4PrefixLength,
+ POSIXServerIP: AddressInSubnet(DUTAddr, *ctrlNet.Subnet),
+ POSIXServerPort: CtrlPort,
+ }
+ return info, nil
+}
- // Create the Docker container for the testbench.
- testbench := dockerutil.MakeNativeContainer(ctx, logger("testbench"))
+// TestWithDUT runs a packetimpact test with the given information.
+func TestWithDUT(ctx context.Context, t *testing.T, mkDevice func(*dockerutil.Container) DUT) {
+ if testbenchBinary == "" {
+ t.Fatal("--testbench_binary is missing")
+ }
+ dockerutil.EnsureSupportedDockerVersion()
- tbb := path.Base(testbenchBinary)
- containerTestbenchBinary := filepath.Join("/packetimpact", tbb)
- testbench.CopyFiles(&runOpts, "/packetimpact", filepath.Join("test/packetimpact/tests", tbb))
-
- // snifferNetDev is a network device on the test orchestrator that we will
- // run sniffer (tcpdump or tshark) on and inject traffic to, not to be
- // confused with the device on the DUT.
- const snifferNetDev = "eth2"
- // Run tcpdump in the test bench unbuffered, without DNS resolution, just on
- // the interface with the test packets.
- snifferArgs := []string{
- "tcpdump",
- "-S", "-vvv", "-U", "-n",
- "-i", snifferNetDev,
- "-w", testOutputDir + "/dump.pcap",
+ dutInfoChan := make(chan dutInfo, numDUTs)
+ errChan := make(chan error, numDUTs)
+ var dockerNetworks []*dockerutil.Network
+ var dutTestNets []*testbench.DUTTestNet
+ var duts []DUT
+
+ setUpCtx, cancelSetup := context.WithCancel(ctx)
+ t.Cleanup(cancelSetup)
+ for i := 0; i < numDUTs; i++ {
+ go func(i int) {
+ info, err := setUpDUT(setUpCtx, t, i, mkDevice)
+ if err != nil {
+ errChan <- err
+ } else {
+ dutInfoChan <- info
+ }
+ }(i)
}
- snifferRegex := "tcpdump: listening.*\n"
- if tshark {
- // Run tshark in the test bench unbuffered, without DNS resolution, just on
- // the interface with the test packets.
- snifferArgs = []string{
- "tshark", "-V", "-l", "-n", "-i", snifferNetDev,
- "-o", "tcp.check_checksum:TRUE",
- "-o", "udp.check_checksum:TRUE",
+ for i := 0; i < numDUTs; i++ {
+ select {
+ case info := <-dutInfoChan:
+ dockerNetworks = append(dockerNetworks, info.ctrlNet, info.testNet)
+ dutTestNets = append(dutTestNets, info.netInfo)
+ duts = append(duts, info.dut)
+ case err := <-errChan:
+ t.Fatal(err)
}
- snifferRegex = "Capturing on.*\n"
}
+ // Create the Docker container for the testbench.
+ testbenchContainer := dockerutil.MakeNativeContainer(ctx, logger("testbench"))
+
+ const containerDUTTestNetsDir = "/tmp/dut-test-nets"
+ const dutTestNetsFileName = "pool.json"
+ runOpts := dockerutil.RunOpts{
+ Image: "packetimpact",
+ CapAdd: []string{"NET_ADMIN"},
+ }
+ if _, err := mountTempDirectory(t, &runOpts, "testbench-output", testOutputDir); err != nil {
+ t.Fatal(err)
+ }
+ tbb := path.Base(testbenchBinary)
+ containerTestbenchBinary := filepath.Join("/packetimpact", tbb)
+ testbenchContainer.CopyFiles(&runOpts, "/packetimpact", filepath.Join("test/packetimpact/tests", tbb))
+
if err := StartContainer(
ctx,
runOpts,
- testbench,
+ testbenchContainer,
testbenchAddr,
- []*dockerutil.Network{ctrlNet, testNet},
- snifferArgs...,
+ dockerNetworks,
+ "tail", "-f", "/dev/null",
); err != nil {
- t.Fatalf("failed to start docker container for testbench sniffer: %s", err)
+ t.Fatalf("cannot start testbench container: %s", err)
}
- // Kill so that it will flush output.
- t.Cleanup(func() {
- time.Sleep(1 * time.Second)
- testbench.Exec(ctx, dockerutil.ExecOpts{}, "killall", snifferArgs[0])
- })
- if _, err := testbench.WaitForOutput(ctx, snifferRegex, 60*time.Second); err != nil {
- t.Fatalf("sniffer on %s never listened: %s", dut.Name, err)
+ for i := range dutTestNets {
+ name, info, err := deviceByIP(ctx, testbenchContainer, dutTestNets[i].LocalIPv4)
+ if err != nil {
+ t.Fatalf("failed to get the device name associated with %s: %s", dutTestNets[i].LocalIPv4, err)
+ }
+ dutTestNets[i].LocalDevName = name
+ dutTestNets[i].LocalDevID = info.ID
+ dutTestNets[i].LocalMAC = info.MAC
+ localIPv6, err := getOrAssignIPv6Addr(ctx, testbenchContainer, name)
+ if err != nil {
+ t.Fatalf("failed to get IPV6 address on %s: %s", testbenchContainer.Name, err)
+ }
+ dutTestNets[i].LocalIPv6 = localIPv6
+ }
+ dutTestNetsBytes, err := json.Marshal(dutTestNets)
+ if err != nil {
+ t.Fatalf("failed to marshal %v into json: %s", dutTestNets, err)
}
- // When the Linux kernel receives a SYN-ACK for a SYN it didn't send, it
- // will respond with an RST. In most packetimpact tests, the SYN is sent
- // by the raw socket and the kernel knows nothing about the connection, this
- // behavior will break lots of TCP related packetimpact tests. To prevent
- // this, we can install the following iptables rules. The raw socket that
- // packetimpact tests use will still be able to see everything.
- for _, bin := range []string{"iptables", "ip6tables"} {
- if logs, err := testbench.Exec(ctx, dockerutil.ExecOpts{}, bin, "-A", "INPUT", "-i", snifferNetDev, "-p", "tcp", "-j", "DROP"); err != nil {
- t.Fatalf("unable to Exec %s on container %s: %s, logs from testbench:\n%s", bin, testbench.Name, err, logs)
+ snifferProg := "tcpdump"
+ if tshark {
+ snifferProg = "tshark"
+ }
+ for _, n := range dutTestNets {
+ _, err := testbenchContainer.ExecProcess(ctx, dockerutil.ExecOpts{}, snifferArgs(n.LocalDevName)...)
+ if err != nil {
+ t.Fatalf("failed to start exec a sniffer on %s: %s", n.LocalDevName, err)
+ }
+ // When the Linux kernel receives a SYN-ACK for a SYN it didn't send, it
+ // will respond with an RST. In most packetimpact tests, the SYN is sent
+ // by the raw socket, the kernel knows nothing about the connection, this
+ // behavior will break lots of TCP related packetimpact tests. To prevent
+ // this, we can install the following iptables rules. The raw socket that
+ // packetimpact tests use will still be able to see everything.
+ for _, bin := range []string{"iptables", "ip6tables"} {
+ if logs, err := testbenchContainer.Exec(ctx, dockerutil.ExecOpts{}, bin, "-A", "INPUT", "-i", n.LocalDevName, "-p", "tcp", "-j", "DROP"); err != nil {
+ t.Fatalf("unable to Exec %s on container %s: %s, logs from testbench:\n%s", bin, testbenchContainer.Name, err, logs)
+ }
}
}
+ t.Cleanup(func() {
+ time.Sleep(1 * time.Second)
+ if logs, err := testbenchContainer.Exec(ctx, dockerutil.ExecOpts{}, "killall", snifferProg); err != nil {
+ t.Errorf("failed to kill all sniffers: %s, logs: %s", err, logs)
+ }
+ })
+
// FIXME(b/156449515): Some piece of the system has a race. The old
// bash script version had a sleep, so we have one too. The race should
// be fixed and this sleep removed.
@@ -248,31 +307,29 @@ func TestWithDUT(ctx context.Context, t *testing.T, mkDevice func(*dockerutil.Co
testArgs := []string{containerTestbenchBinary}
testArgs = append(testArgs, extraTestArgs...)
testArgs = append(testArgs,
- "--posix_server_ip", AddressInSubnet(DutAddr, *ctrlNet.Subnet).String(),
- "--posix_server_port", CtrlPort,
- "--remote_ipv4", AddressInSubnet(DutAddr, *testNet.Subnet).String(),
- "--local_ipv4", AddressInSubnet(testbenchAddr, *testNet.Subnet).String(),
- "--remote_ipv6", remoteIPv6.String(),
- "--remote_mac", remoteMAC.String(),
- "--remote_interface_id", fmt.Sprintf("%d", dutDeviceID),
- "--local_device", snifferNetDev,
- "--remote_device", dutTestNetDev,
fmt.Sprintf("--native=%t", native),
+ "--dut_test_nets_json", string(dutTestNetsBytes),
)
- testbenchLogs, err := testbench.Exec(ctx, dockerutil.ExecOpts{}, testArgs...)
+ testbenchLogs, err := testbenchContainer.Exec(ctx, dockerutil.ExecOpts{}, testArgs...)
if (err != nil) != expectFailure {
var dutLogs string
- if logs, err := device.Logs(ctx); err != nil {
- dutLogs = fmt.Sprintf("failed to fetch DUT logs: %s", err)
- } else {
- dutLogs = logs
+ for i, dut := range duts {
+ logs, err := dut.Logs(ctx)
+ if err != nil {
+ logs = fmt.Sprintf("failed to fetch DUT logs: %s", err)
+ }
+ dutLogs = fmt.Sprintf(`%s====== Begin of DUT-%d Logs ======
+
+%s
+
+====== End of DUT-%d Logs ======
+
+`, dutLogs, i, logs, i)
}
t.Errorf(`test error: %v, expect failure: %t
-%s
-
-====== Begin of Testbench Logs ======
+%s====== Begin of Testbench Logs ======
%s
@@ -285,7 +342,9 @@ func TestWithDUT(ctx context.Context, t *testing.T, mkDevice func(*dockerutil.Co
type DUT interface {
// Prepare prepares the dut, starts posix_server and returns the IPv6, MAC
// address, the interface ID, and the interface name for the testNet on DUT.
- Prepare(ctx context.Context, t *testing.T, runOpts dockerutil.RunOpts, ctrlNet, testNet *dockerutil.Network) (net.IP, net.HardwareAddr, uint32, string)
+ // The t parameter is supposed to be used for t.Cleanup. Don't use it for
+ // t.Fatal/FailNow functions.
+ Prepare(ctx context.Context, t *testing.T, runOpts dockerutil.RunOpts, ctrlNet, testNet *dockerutil.Network) (net.IP, net.HardwareAddr, uint32, string, error)
// Logs retrieves the logs from the dut.
Logs(ctx context.Context) (string, error)
}
@@ -303,7 +362,7 @@ func NewDockerDUT(c *dockerutil.Container) DUT {
}
// Prepare implements DUT.Prepare.
-func (dut *DockerDUT) Prepare(ctx context.Context, t *testing.T, runOpts dockerutil.RunOpts, ctrlNet, testNet *dockerutil.Network) (net.IP, net.HardwareAddr, uint32, string) {
+func (dut *DockerDUT) Prepare(ctx context.Context, t *testing.T, runOpts dockerutil.RunOpts, ctrlNet, testNet *dockerutil.Network) (net.IP, net.HardwareAddr, uint32, string, error) {
const containerPosixServerBinary = "/packetimpact/posix_server"
dut.c.CopyFiles(&runOpts, "/packetimpact", "test/packetimpact/dut/posix_server")
@@ -311,45 +370,31 @@ func (dut *DockerDUT) Prepare(ctx context.Context, t *testing.T, runOpts dockeru
ctx,
runOpts,
dut.c,
- DutAddr,
+ DUTAddr,
[]*dockerutil.Network{ctrlNet, testNet},
containerPosixServerBinary,
"--ip=0.0.0.0",
- "--port="+CtrlPort,
+ fmt.Sprintf("--port=%d", CtrlPort),
); err != nil {
- t.Fatalf("failed to start docker container for DUT: %s", err)
+ return nil, nil, 0, "", fmt.Errorf("failed to start docker container for DUT: %w", err)
}
if _, err := dut.c.WaitForOutput(ctx, "Server listening.*\n", 60*time.Second); err != nil {
- t.Fatalf("%s on container %s never listened: %s", containerPosixServerBinary, dut.c.Name, err)
+ return nil, nil, 0, "", fmt.Errorf("%s on container %s never listened: %s", containerPosixServerBinary, dut.c.Name, err)
}
- dutTestDevice, dutDeviceInfo, err := deviceByIP(ctx, dut.c, AddressInSubnet(DutAddr, *testNet.Subnet))
+ dutTestDevice, dutDeviceInfo, err := deviceByIP(ctx, dut.c, AddressInSubnet(DUTAddr, *testNet.Subnet))
if err != nil {
- t.Fatal(err)
+ return nil, nil, 0, "", err
}
- remoteMAC := dutDeviceInfo.MAC
- remoteIPv6 := dutDeviceInfo.IPv6Addr
- // Netstack as DUT doesn't assign IPv6 addresses automatically so do it if
- // needed.
- if remoteIPv6 == nil {
- if _, err := dut.c.Exec(ctx, dockerutil.ExecOpts{}, "ip", "addr", "add", netdevs.MACToIP(remoteMAC).String(), "scope", "link", "dev", dutTestDevice); err != nil {
- t.Fatalf("unable to ip addr add on container %s: %s", dut.c.Name, err)
- }
- // Now try again, to make sure that it worked.
- _, dutDeviceInfo, err = deviceByIP(ctx, dut.c, AddressInSubnet(DutAddr, *testNet.Subnet))
- if err != nil {
- t.Fatal(err)
- }
- remoteIPv6 = dutDeviceInfo.IPv6Addr
- if remoteIPv6 == nil {
- t.Fatalf("unable to set IPv6 address on container %s", dut.c.Name)
- }
+ remoteIPv6, err := getOrAssignIPv6Addr(ctx, dut.c, dutTestDevice)
+ if err != nil {
+ return nil, nil, 0, "", fmt.Errorf("failed to get IPv6 address on %s: %s", dut.c.Name, err)
}
const testNetDev = "eth2"
- return remoteIPv6, dutDeviceInfo.MAC, dutDeviceInfo.ID, testNetDev
+ return remoteIPv6, dutDeviceInfo.MAC, dutDeviceInfo.ID, testNetDev, nil
}
// Logs implements DUT.Logs.
@@ -358,11 +403,7 @@ func (dut *DockerDUT) Logs(ctx context.Context) (string, error) {
if err != nil {
return "", err
}
- return fmt.Sprintf(`====== Begin of DUT Logs ======
-
-%s
-
-====== End of DUT Logs ======`, logs), nil
+ return logs, nil
}
// AddNetworks connects docker network with the container and assigns the specific IP.
@@ -378,8 +419,8 @@ func AddNetworks(ctx context.Context, d *dockerutil.Container, addr net.IP, netw
}
// AddressInSubnet combines the subnet provided with the address and returns a
-// new address. The return address bits come from the subnet where the mask is 1
-// and from the ip address where the mask is 0.
+// new address. The return address bits come from the subnet where the mask is
+// 1 and from the ip address where the mask is 0.
func AddressInSubnet(addr net.IP, subnet net.IPNet) net.IP {
var octets []byte
for i := 0; i < 4; i++ {
@@ -388,15 +429,25 @@ func AddressInSubnet(addr net.IP, subnet net.IPNet) net.IP {
return net.IP(octets)
}
-// deviceByIP finds a deviceInfo and device name from an IP address.
-func deviceByIP(ctx context.Context, d *dockerutil.Container, ip net.IP) (string, netdevs.DeviceInfo, error) {
+// devicesInfo will run "ip addr show" on the container and parse the output
+// to a map[string]netdevs.DeviceInfo.
+func devicesInfo(ctx context.Context, d *dockerutil.Container) (map[string]netdevs.DeviceInfo, error) {
out, err := d.Exec(ctx, dockerutil.ExecOpts{}, "ip", "addr", "show")
if err != nil {
- return "", netdevs.DeviceInfo{}, fmt.Errorf("listing devices on %s container: %w\n%s", d.Name, err, out)
+ return map[string]netdevs.DeviceInfo{}, fmt.Errorf("listing devices on %s container: %w\n%s", d.Name, err, out)
}
devs, err := netdevs.ParseDevices(out)
if err != nil {
- return "", netdevs.DeviceInfo{}, fmt.Errorf("parsing devices from %s container: %w\n%s", d.Name, err, out)
+ return map[string]netdevs.DeviceInfo{}, fmt.Errorf("parsing devices from %s container: %w\n%s", d.Name, err, out)
+ }
+ return devs, nil
+}
+
+// deviceByIP finds a deviceInfo and device name from an IP address.
+func deviceByIP(ctx context.Context, d *dockerutil.Container, ip net.IP) (string, netdevs.DeviceInfo, error) {
+ devs, err := devicesInfo(ctx, d)
+ if err != nil {
+ return "", netdevs.DeviceInfo{}, err
}
testDevice, deviceInfo, err := netdevs.FindDeviceByIP(ip, devs)
if err != nil {
@@ -405,6 +456,36 @@ func deviceByIP(ctx context.Context, d *dockerutil.Container, ip net.IP) (string
return testDevice, deviceInfo, nil
}
+// getOrAssignIPv6Addr will try to get the IPv6 address for the interface; if an
+// address was not assigned, a link-local address based on MAC will be assigned
+// to that interface.
+func getOrAssignIPv6Addr(ctx context.Context, d *dockerutil.Container, iface string) (net.IP, error) {
+ devs, err := devicesInfo(ctx, d)
+ if err != nil {
+ return net.IP{}, err
+ }
+ info := devs[iface]
+ if info.IPv6Addr != nil {
+ return info.IPv6Addr, nil
+ }
+ if info.MAC == nil {
+ return nil, fmt.Errorf("unable to find MAC address of %s", iface)
+ }
+ if logs, err := d.Exec(ctx, dockerutil.ExecOpts{}, "ip", "addr", "add", netdevs.MACToIP(info.MAC).String(), "scope", "link", "dev", iface); err != nil {
+ return net.IP{}, fmt.Errorf("unable to ip addr add on container %s: %w, logs: %s", d.Name, err, logs)
+ }
+ // Now try again, to make sure that it worked.
+ devs, err = devicesInfo(ctx, d)
+ if err != nil {
+ return net.IP{}, err
+ }
+ info = devs[iface]
+ if info.IPv6Addr == nil {
+ return net.IP{}, fmt.Errorf("unable to set IPv6 address on container %s", d.Name)
+ }
+ return info.IPv6Addr, nil
+}
+
// createDockerNetwork makes a randomly-named network that will start with the
// namePrefix. The network will be a random /24 subnet.
func createDockerNetwork(ctx context.Context, n *dockerutil.Network) error {
@@ -440,3 +521,51 @@ func StartContainer(ctx context.Context, runOpts dockerutil.RunOpts, c *dockerut
}
return nil
}
+
+// MountTempDirectory creates a temporary directory on host with the template
+// and then mounts it into the container under the name provided. The temporary
+// directory name is returned. Content in that directory will be copied to
+// TEST_UNDECLARED_OUTPUTS_DIR in cleanup phase.
+func mountTempDirectory(t *testing.T, runOpts *dockerutil.RunOpts, hostDirTemplate, containerDir string) (string, error) {
+ t.Helper()
+ tmpDir, err := ioutil.TempDir("", hostDirTemplate)
+ if err != nil {
+ return "", fmt.Errorf("failed to create a temp dir: %w", err)
+ }
+ t.Cleanup(func() {
+ if err := exec.Command("/bin/cp", "-r", tmpDir, os.Getenv("TEST_UNDECLARED_OUTPUTS_DIR")).Run(); err != nil {
+ t.Errorf("unable to copy container output files: %s", err)
+ }
+ if err := os.RemoveAll(tmpDir); err != nil {
+ t.Errorf("failed to remove tmpDir %s: %s", tmpDir, err)
+ }
+ })
+ runOpts.Mounts = append(runOpts.Mounts, mount.Mount{
+ Type: mount.TypeBind,
+ Source: tmpDir,
+ Target: containerDir,
+ ReadOnly: false,
+ })
+ return tmpDir, nil
+}
+
+// snifferArgs returns the correct command line to run sniffer on the testbench.
+func snifferArgs(devName string) []string {
+ if tshark {
+ // Run tshark in the test bench unbuffered, without DNS resolution, just
+ // on the interface with the test packets.
+ return []string{
+ "tshark", "-V", "-l", "-n", "-i", devName,
+ "-o", "tcp.check_checksum:TRUE",
+ "-o", "udp.check_checksum:TRUE",
+ }
+ }
+ // Run tcpdump in the test bench unbuffered, without DNS resolution, just
+ // on the interface with the test packets.
+ return []string{
+ "tcpdump",
+ "-S", "-vvv", "-U", "-n",
+ "-i", devName,
+ "-w", filepath.Join(testOutputDir, fmt.Sprintf("%s.pcap", devName)),
+ }
+}
diff --git a/test/packetimpact/testbench/BUILD b/test/packetimpact/testbench/BUILD
index 5a0ee1367..983c2c030 100644
--- a/test/packetimpact/testbench/BUILD
+++ b/test/packetimpact/testbench/BUILD
@@ -21,7 +21,6 @@ go_library(
"//pkg/tcpip/header",
"//pkg/tcpip/seqnum",
"//pkg/usermem",
- "//test/packetimpact/netdevs",
"//test/packetimpact/proto:posix_server_go_proto",
"@com_github_google_go_cmp//cmp:go_default_library",
"@com_github_google_go_cmp//cmp/cmpopts:go_default_library",
diff --git a/test/packetimpact/testbench/connections.go b/test/packetimpact/testbench/connections.go
index 919b4fd25..50b9ccf68 100644
--- a/test/packetimpact/testbench/connections.go
+++ b/test/packetimpact/testbench/connections.go
@@ -17,7 +17,6 @@ package testbench
import (
"fmt"
"math/rand"
- "net"
"testing"
"time"
@@ -42,7 +41,7 @@ func portFromSockaddr(sa unix.Sockaddr) (uint16, error) {
// pickPort makes a new socket and returns the socket FD and port. The domain
// should be AF_INET or AF_INET6. The caller must close the FD when done with
// the port if there is no error.
-func pickPort(domain, typ int) (fd int, port uint16, err error) {
+func (n *DUTTestNet) pickPort(domain, typ int) (fd int, port uint16, err error) {
fd, err = unix.Socket(domain, typ, 0)
if err != nil {
return -1, 0, fmt.Errorf("creating socket: %w", err)
@@ -58,11 +57,11 @@ func pickPort(domain, typ int) (fd int, port uint16, err error) {
switch domain {
case unix.AF_INET:
var sa4 unix.SockaddrInet4
- copy(sa4.Addr[:], net.ParseIP(LocalIPv4).To4())
+ copy(sa4.Addr[:], n.LocalIPv4)
sa = &sa4
case unix.AF_INET6:
- sa6 := unix.SockaddrInet6{ZoneId: uint32(LocalInterfaceID)}
- copy(sa6.Addr[:], net.ParseIP(LocalIPv6).To16())
+ sa6 := unix.SockaddrInet6{ZoneId: n.LocalDevID}
+ copy(sa6.Addr[:], n.LocalIPv6)
sa = &sa6
default:
return -1, 0, fmt.Errorf("invalid domain %d, it should be one of unix.AF_INET or unix.AF_INET6", domain)
@@ -117,19 +116,12 @@ type etherState struct {
var _ layerState = (*etherState)(nil)
// newEtherState creates a new etherState.
-func newEtherState(out, in Ether) (*etherState, error) {
- lMAC, err := tcpip.ParseMACAddress(LocalMAC)
- if err != nil {
- return nil, fmt.Errorf("parsing local MAC: %q: %w", LocalMAC, err)
- }
-
- rMAC, err := tcpip.ParseMACAddress(RemoteMAC)
- if err != nil {
- return nil, fmt.Errorf("parsing remote MAC: %q: %w", RemoteMAC, err)
- }
+func (n *DUTTestNet) newEtherState(out, in Ether) (*etherState, error) {
+ lmac := tcpip.LinkAddress(n.LocalMAC)
+ rmac := tcpip.LinkAddress(n.RemoteMAC)
s := etherState{
- out: Ether{SrcAddr: &lMAC, DstAddr: &rMAC},
- in: Ether{SrcAddr: &rMAC, DstAddr: &lMAC},
+ out: Ether{SrcAddr: &lmac, DstAddr: &rmac},
+ in: Ether{SrcAddr: &rmac, DstAddr: &lmac},
}
if err := s.out.merge(&out); err != nil {
return nil, err
@@ -169,9 +161,9 @@ type ipv4State struct {
var _ layerState = (*ipv4State)(nil)
// newIPv4State creates a new ipv4State.
-func newIPv4State(out, in IPv4) (*ipv4State, error) {
- lIP := tcpip.Address(net.ParseIP(LocalIPv4).To4())
- rIP := tcpip.Address(net.ParseIP(RemoteIPv4).To4())
+func (n *DUTTestNet) newIPv4State(out, in IPv4) (*ipv4State, error) {
+ lIP := tcpip.Address(n.LocalIPv4)
+ rIP := tcpip.Address(n.RemoteIPv4)
s := ipv4State{
out: IPv4{SrcAddr: &lIP, DstAddr: &rIP},
in: IPv4{SrcAddr: &rIP, DstAddr: &lIP},
@@ -214,9 +206,9 @@ type ipv6State struct {
var _ layerState = (*ipv6State)(nil)
// newIPv6State creates a new ipv6State.
-func newIPv6State(out, in IPv6) (*ipv6State, error) {
- lIP := tcpip.Address(net.ParseIP(LocalIPv6).To16())
- rIP := tcpip.Address(net.ParseIP(RemoteIPv6).To16())
+func (n *DUTTestNet) newIPv6State(out, in IPv6) (*ipv6State, error) {
+ lIP := tcpip.Address(n.LocalIPv6)
+ rIP := tcpip.Address(n.RemoteIPv6)
s := ipv6State{
out: IPv6{SrcAddr: &lIP, DstAddr: &rIP},
in: IPv6{SrcAddr: &rIP, DstAddr: &lIP},
@@ -272,8 +264,8 @@ func SeqNumValue(v seqnum.Value) *seqnum.Value {
}
// newTCPState creates a new TCPState.
-func newTCPState(domain int, out, in TCP) (*tcpState, error) {
- portPickerFD, localPort, err := pickPort(domain, unix.SOCK_STREAM)
+func (n *DUTTestNet) newTCPState(domain int, out, in TCP) (*tcpState, error) {
+ portPickerFD, localPort, err := n.pickPort(domain, unix.SOCK_STREAM)
if err != nil {
return nil, err
}
@@ -376,8 +368,8 @@ type udpState struct {
var _ layerState = (*udpState)(nil)
// newUDPState creates a new udpState.
-func newUDPState(domain int, out, in UDP) (*udpState, error) {
- portPickerFD, localPort, err := pickPort(domain, unix.SOCK_DGRAM)
+func (n *DUTTestNet) newUDPState(domain int, out, in UDP) (*udpState, error) {
+ portPickerFD, localPort, err := n.pickPort(domain, unix.SOCK_DGRAM)
if err != nil {
return nil, fmt.Errorf("picking port: %w", err)
}
@@ -606,7 +598,7 @@ func (conn *Connection) ExpectFrame(t *testing.T, layers Layers, timeout time.Du
var errs error
for {
var gotLayers Layers
- if timeout = time.Until(deadline); timeout > 0 {
+ if timeout := time.Until(deadline); timeout > 0 {
gotLayers = conn.recvFrame(t, timeout)
}
if gotLayers == nil {
@@ -639,26 +631,26 @@ func (conn *Connection) Drain(t *testing.T) {
type TCPIPv4 Connection
// NewTCPIPv4 creates a new TCPIPv4 connection with reasonable defaults.
-func NewTCPIPv4(t *testing.T, outgoingTCP, incomingTCP TCP) TCPIPv4 {
+func (n *DUTTestNet) NewTCPIPv4(t *testing.T, outgoingTCP, incomingTCP TCP) TCPIPv4 {
t.Helper()
- etherState, err := newEtherState(Ether{}, Ether{})
+ etherState, err := n.newEtherState(Ether{}, Ether{})
if err != nil {
t.Fatalf("can't make etherState: %s", err)
}
- ipv4State, err := newIPv4State(IPv4{}, IPv4{})
+ ipv4State, err := n.newIPv4State(IPv4{}, IPv4{})
if err != nil {
t.Fatalf("can't make ipv4State: %s", err)
}
- tcpState, err := newTCPState(unix.AF_INET, outgoingTCP, incomingTCP)
+ tcpState, err := n.newTCPState(unix.AF_INET, outgoingTCP, incomingTCP)
if err != nil {
t.Fatalf("can't make tcpState: %s", err)
}
- injector, err := NewInjector(t)
+ injector, err := n.NewInjector(t)
if err != nil {
t.Fatalf("can't make injector: %s", err)
}
- sniffer, err := NewSniffer(t)
+ sniffer, err := n.NewSniffer(t)
if err != nil {
t.Fatalf("can't make sniffer: %s", err)
}
@@ -841,23 +833,23 @@ func (conn *TCPIPv4) Drain(t *testing.T) {
type IPv4Conn Connection
// NewIPv4Conn creates a new IPv4Conn connection with reasonable defaults.
-func NewIPv4Conn(t *testing.T, outgoingIPv4, incomingIPv4 IPv4) IPv4Conn {
+func (n *DUTTestNet) NewIPv4Conn(t *testing.T, outgoingIPv4, incomingIPv4 IPv4) IPv4Conn {
t.Helper()
- etherState, err := newEtherState(Ether{}, Ether{})
+ etherState, err := n.newEtherState(Ether{}, Ether{})
if err != nil {
t.Fatalf("can't make EtherState: %s", err)
}
- ipv4State, err := newIPv4State(outgoingIPv4, incomingIPv4)
+ ipv4State, err := n.newIPv4State(outgoingIPv4, incomingIPv4)
if err != nil {
t.Fatalf("can't make IPv4State: %s", err)
}
- injector, err := NewInjector(t)
+ injector, err := n.NewInjector(t)
if err != nil {
t.Fatalf("can't make injector: %s", err)
}
- sniffer, err := NewSniffer(t)
+ sniffer, err := n.NewSniffer(t)
if err != nil {
t.Fatalf("can't make sniffer: %s", err)
}
@@ -896,23 +888,23 @@ func (c *IPv4Conn) ExpectFrame(t *testing.T, frame Layers, timeout time.Duration
type IPv6Conn Connection
// NewIPv6Conn creates a new IPv6Conn connection with reasonable defaults.
-func NewIPv6Conn(t *testing.T, outgoingIPv6, incomingIPv6 IPv6) IPv6Conn {
+func (n *DUTTestNet) NewIPv6Conn(t *testing.T, outgoingIPv6, incomingIPv6 IPv6) IPv6Conn {
t.Helper()
- etherState, err := newEtherState(Ether{}, Ether{})
+ etherState, err := n.newEtherState(Ether{}, Ether{})
if err != nil {
t.Fatalf("can't make EtherState: %s", err)
}
- ipv6State, err := newIPv6State(outgoingIPv6, incomingIPv6)
+ ipv6State, err := n.newIPv6State(outgoingIPv6, incomingIPv6)
if err != nil {
t.Fatalf("can't make IPv6State: %s", err)
}
- injector, err := NewInjector(t)
+ injector, err := n.NewInjector(t)
if err != nil {
t.Fatalf("can't make injector: %s", err)
}
- sniffer, err := NewSniffer(t)
+ sniffer, err := n.NewSniffer(t)
if err != nil {
t.Fatalf("can't make sniffer: %s", err)
}
@@ -951,26 +943,26 @@ func (conn *IPv6Conn) ExpectFrame(t *testing.T, frame Layers, timeout time.Durat
type UDPIPv4 Connection
// NewUDPIPv4 creates a new UDPIPv4 connection with reasonable defaults.
-func NewUDPIPv4(t *testing.T, outgoingUDP, incomingUDP UDP) UDPIPv4 {
+func (n *DUTTestNet) NewUDPIPv4(t *testing.T, outgoingUDP, incomingUDP UDP) UDPIPv4 {
t.Helper()
- etherState, err := newEtherState(Ether{}, Ether{})
+ etherState, err := n.newEtherState(Ether{}, Ether{})
if err != nil {
t.Fatalf("can't make etherState: %s", err)
}
- ipv4State, err := newIPv4State(IPv4{}, IPv4{})
+ ipv4State, err := n.newIPv4State(IPv4{}, IPv4{})
if err != nil {
t.Fatalf("can't make ipv4State: %s", err)
}
- udpState, err := newUDPState(unix.AF_INET, outgoingUDP, incomingUDP)
+ udpState, err := n.newUDPState(unix.AF_INET, outgoingUDP, incomingUDP)
if err != nil {
t.Fatalf("can't make udpState: %s", err)
}
- injector, err := NewInjector(t)
+ injector, err := n.NewInjector(t)
if err != nil {
t.Fatalf("can't make injector: %s", err)
}
- sniffer, err := NewSniffer(t)
+ sniffer, err := n.NewSniffer(t)
if err != nil {
t.Fatalf("can't make sniffer: %s", err)
}
@@ -1075,26 +1067,26 @@ func (conn *UDPIPv4) Drain(t *testing.T) {
type UDPIPv6 Connection
// NewUDPIPv6 creates a new UDPIPv6 connection with reasonable defaults.
-func NewUDPIPv6(t *testing.T, outgoingUDP, incomingUDP UDP) UDPIPv6 {
+func (n *DUTTestNet) NewUDPIPv6(t *testing.T, outgoingUDP, incomingUDP UDP) UDPIPv6 {
t.Helper()
- etherState, err := newEtherState(Ether{}, Ether{})
+ etherState, err := n.newEtherState(Ether{}, Ether{})
if err != nil {
t.Fatalf("can't make etherState: %s", err)
}
- ipv6State, err := newIPv6State(IPv6{}, IPv6{})
+ ipv6State, err := n.newIPv6State(IPv6{}, IPv6{})
if err != nil {
t.Fatalf("can't make IPv6State: %s", err)
}
- udpState, err := newUDPState(unix.AF_INET6, outgoingUDP, incomingUDP)
+ udpState, err := n.newUDPState(unix.AF_INET6, outgoingUDP, incomingUDP)
if err != nil {
t.Fatalf("can't make udpState: %s", err)
}
- injector, err := NewInjector(t)
+ injector, err := n.NewInjector(t)
if err != nil {
t.Fatalf("can't make injector: %s", err)
}
- sniffer, err := NewSniffer(t)
+ sniffer, err := n.NewSniffer(t)
if err != nil {
t.Fatalf("can't make sniffer: %s", err)
}
@@ -1126,14 +1118,14 @@ func (conn *UDPIPv6) ipv6State(t *testing.T) *ipv6State {
}
// LocalAddr gets the local socket address of this connection.
-func (conn *UDPIPv6) LocalAddr(t *testing.T) *unix.SockaddrInet6 {
+func (conn *UDPIPv6) LocalAddr(t *testing.T, zoneID uint32) *unix.SockaddrInet6 {
t.Helper()
sa := &unix.SockaddrInet6{
Port: int(*conn.udpState(t).out.SrcPort),
// Local address is in perspective to the remote host, so it's scoped to the
// ID of the remote interface.
- ZoneId: uint32(RemoteInterfaceID),
+ ZoneId: zoneID,
}
copy(sa.Addr[:], *conn.ipv6State(t).out.SrcAddr)
return sa
@@ -1203,24 +1195,24 @@ func (conn *UDPIPv6) Drain(t *testing.T) {
type TCPIPv6 Connection
// NewTCPIPv6 creates a new TCPIPv6 connection with reasonable defaults.
-func NewTCPIPv6(t *testing.T, outgoingTCP, incomingTCP TCP) TCPIPv6 {
- etherState, err := newEtherState(Ether{}, Ether{})
+func (n *DUTTestNet) NewTCPIPv6(t *testing.T, outgoingTCP, incomingTCP TCP) TCPIPv6 {
+ etherState, err := n.newEtherState(Ether{}, Ether{})
if err != nil {
t.Fatalf("can't make etherState: %s", err)
}
- ipv6State, err := newIPv6State(IPv6{}, IPv6{})
+ ipv6State, err := n.newIPv6State(IPv6{}, IPv6{})
if err != nil {
t.Fatalf("can't make ipv6State: %s", err)
}
- tcpState, err := newTCPState(unix.AF_INET6, outgoingTCP, incomingTCP)
+ tcpState, err := n.newTCPState(unix.AF_INET6, outgoingTCP, incomingTCP)
if err != nil {
t.Fatalf("can't make tcpState: %s", err)
}
- injector, err := NewInjector(t)
+ injector, err := n.NewInjector(t)
if err != nil {
t.Fatalf("can't make injector: %s", err)
}
- sniffer, err := NewSniffer(t)
+ sniffer, err := n.NewSniffer(t)
if err != nil {
t.Fatalf("can't make sniffer: %s", err)
}
diff --git a/test/packetimpact/testbench/dut.go b/test/packetimpact/testbench/dut.go
index 6165ab293..66a0255b8 100644
--- a/test/packetimpact/testbench/dut.go
+++ b/test/packetimpact/testbench/dut.go
@@ -17,9 +17,8 @@ package testbench
import (
"context"
"encoding/binary"
- "flag"
+ "fmt"
"net"
- "strconv"
"syscall"
"testing"
"time"
@@ -35,18 +34,26 @@ import (
type DUT struct {
conn *grpc.ClientConn
posixServer POSIXClient
+ Net *DUTTestNet
}
// NewDUT creates a new connection with the DUT over gRPC.
func NewDUT(t *testing.T) DUT {
t.Helper()
+ n := GetDUTTestNet()
+ dut := n.ConnectToDUT(t)
+ t.Cleanup(func() {
+ dut.TearDownConnection()
+ dut.Net.Release()
+ })
+ return dut
+}
- flag.Parse()
- if err := genPseudoFlags(); err != nil {
- t.Fatal("generating psuedo flags:", err)
- }
+// ConnectToDUT connects to DUT through gRPC.
+func (n *DUTTestNet) ConnectToDUT(t *testing.T) DUT {
+ t.Helper()
- posixServerAddress := POSIXServerIP + ":" + strconv.Itoa(POSIXServerPort)
+ posixServerAddress := net.JoinHostPort(n.POSIXServerIP.String(), fmt.Sprintf("%d", n.POSIXServerPort))
conn, err := grpc.Dial(posixServerAddress, grpc.WithInsecure(), grpc.WithKeepaliveParams(keepalive.ClientParameters{Timeout: RPCKeepalive}))
if err != nil {
t.Fatalf("failed to grpc.Dial(%s): %s", posixServerAddress, err)
@@ -55,11 +62,12 @@ func NewDUT(t *testing.T) DUT {
return DUT{
conn: conn,
posixServer: posixServer,
+ Net: n,
}
}
-// TearDown closes the underlying connection.
-func (dut *DUT) TearDown() {
+// TearDownConnection closes the underlying connection.
+func (dut *DUT) TearDownConnection() {
dut.conn.Close()
}
@@ -132,7 +140,7 @@ func (dut *DUT) CreateBoundSocket(t *testing.T, typ, proto int32, addr net.IP) (
fd = dut.Socket(t, unix.AF_INET6, typ, proto)
sa := unix.SockaddrInet6{}
copy(sa.Addr[:], addr.To16())
- sa.ZoneId = uint32(RemoteInterfaceID)
+ sa.ZoneId = dut.Net.RemoteDevID
dut.Bind(t, fd, &sa)
} else {
t.Fatalf("invalid IP address: %s", addr)
@@ -154,7 +162,7 @@ func (dut *DUT) CreateBoundSocket(t *testing.T, typ, proto int32, addr net.IP) (
func (dut *DUT) CreateListener(t *testing.T, typ, proto, backlog int32) (int32, uint16) {
t.Helper()
- fd, remotePort := dut.CreateBoundSocket(t, typ, proto, net.ParseIP(RemoteIPv4))
+ fd, remotePort := dut.CreateBoundSocket(t, typ, proto, dut.Net.RemoteIPv4)
dut.Listen(t, fd, backlog)
return fd, remotePort
}
@@ -717,9 +725,9 @@ func (dut *DUT) SetSockLingerOption(t *testing.T, sockfd int32, timeout time.Dur
dut.SetSockOpt(t, sockfd, unix.SOL_SOCKET, unix.SO_LINGER, buf)
}
-// Shutdown calls shutdown on the DUT and causes a fatal test failure if it doesn't
-// succeed. If more control over the timeout or error handling is needed, use
-// ShutdownWithErrno.
+// Shutdown calls shutdown on the DUT and causes a fatal test failure if it
+// doesn't succeed. If more control over the timeout or error handling is
+// needed, use ShutdownWithErrno.
func (dut *DUT) Shutdown(t *testing.T, fd, how int32) error {
t.Helper()
diff --git a/test/packetimpact/testbench/layers.go b/test/packetimpact/testbench/layers.go
index 7401a1991..dcff4ab36 100644
--- a/test/packetimpact/testbench/layers.go
+++ b/test/packetimpact/testbench/layers.go
@@ -298,14 +298,12 @@ func (l *IPv4) ToBytes() ([]byte, error) {
// An IPv4 header is variable length depending on the size of the Options.
hdrLen := header.IPv4MinimumSize
if l.Options != nil {
- hdrLen += l.Options.SizeWithPadding()
+ if len(*l.Options)%4 != 0 {
+ return nil, fmt.Errorf("invalid header options '%x (len=%d)'; must be 32 bit aligned", *l.Options, len(*l.Options))
+ }
+ hdrLen += len(*l.Options)
if hdrLen > header.IPv4MaximumHeaderSize {
- // While ToBytes can be called on packets that were received as well
- // as packets locally generated, it is physically impossible for a
- // received packet to overflow this value so any such failure must
- // be the result of a local programming error and not remotely
- // triggered. A panic is therefore appropriate.
- panic(fmt.Sprintf("IPv4 Options %d bytes, Max %d", len(*l.Options), header.IPv4MaximumOptionsSize))
+ return nil, fmt.Errorf("IPv4 Options %d bytes, Max %d", len(*l.Options), header.IPv4MaximumOptionsSize)
}
}
b := make([]byte, hdrLen)
@@ -323,10 +321,6 @@ func (l *IPv4) ToBytes() ([]byte, error) {
DstAddr: tcpip.Address(""),
Options: nil,
}
- // Leave an empty options slice as nil.
- if hdrLen > header.IPv4MinimumSize {
- fields.Options = *l.Options
- }
if l.TOS != nil {
fields.TOS = *l.TOS
}
@@ -373,18 +367,31 @@ func (l *IPv4) ToBytes() ([]byte, error) {
if l.DstAddr != nil {
fields.DstAddr = *l.DstAddr
}
- if l.Checksum != nil {
- fields.Checksum = *l.Checksum
- }
+
h.Encode(fields)
- if l.Checksum == nil {
- h.SetChecksum(^h.CalculateChecksum())
+
+ // Put raw option bytes from test definition in header. Options as raw bytes
+ // allows us to serialize malformed options, which is not possible with
+ // the provided serialization functions.
+ if l.Options != nil {
+ h.SetHeaderLength(h.HeaderLength() + uint8(len(*l.Options)))
+ if got, want := copy(h.Options(), *l.Options), len(*l.Options); got != want {
+ return nil, fmt.Errorf("failed to copy option bytes into header, got %d want %d", got, want)
+ }
}
+
// Encode cannot set this incorrectly so we need to overwrite what it wrote
// in order to test handling of a bad IHL value.
if l.IHL != nil {
h.SetHeaderLength(*l.IHL)
}
+
+ if l.Checksum == nil {
+ h.SetChecksum(^h.CalculateChecksum())
+ } else {
+ h.SetChecksum(*l.Checksum)
+ }
+
return h, nil
}
@@ -830,7 +837,9 @@ func (l *ICMPv6) ToBytes() ([]byte, error) {
if l.Code != nil {
h.SetCode(*l.Code)
}
- copy(h.NDPPayload(), l.Payload)
+ if n := copy(h.MessageBody(), l.Payload); n != len(l.Payload) {
+ panic(fmt.Sprintf("copied %d bytes, expected to copy %d bytes", n, len(l.Payload)))
+ }
if l.Checksum != nil {
h.SetChecksum(*l.Checksum)
} else {
@@ -876,7 +885,7 @@ func parseICMPv6(b []byte) (Layer, layerParser) {
Type: ICMPv6Type(h.Type()),
Code: ICMPv6Code(h.Code()),
Checksum: Uint16(h.Checksum()),
- Payload: h.NDPPayload(),
+ Payload: h.MessageBody(),
}
return &icmpv6, nil
}
diff --git a/test/packetimpact/testbench/rawsockets.go b/test/packetimpact/testbench/rawsockets.go
index 193bb2dc8..1ac96626a 100644
--- a/test/packetimpact/testbench/rawsockets.go
+++ b/test/packetimpact/testbench/rawsockets.go
@@ -38,13 +38,27 @@ func htons(x uint16) uint16 {
}
// NewSniffer creates a Sniffer connected to *device.
-func NewSniffer(t *testing.T) (Sniffer, error) {
+func (n *DUTTestNet) NewSniffer(t *testing.T) (Sniffer, error) {
t.Helper()
+ ifInfo, err := net.InterfaceByName(n.LocalDevName)
+ if err != nil {
+ return Sniffer{}, err
+ }
+
+ var haddr [8]byte
+ copy(haddr[:], ifInfo.HardwareAddr)
+ sa := unix.SockaddrLinklayer{
+ Protocol: htons(unix.ETH_P_ALL),
+ Ifindex: ifInfo.Index,
+ }
snifferFd, err := unix.Socket(unix.AF_PACKET, unix.SOCK_RAW, int(htons(unix.ETH_P_ALL)))
if err != nil {
return Sniffer{}, err
}
+ if err := unix.Bind(snifferFd, &sa); err != nil {
+ return Sniffer{}, err
+ }
if err := unix.SetsockoptInt(snifferFd, unix.SOL_SOCKET, unix.SO_RCVBUFFORCE, 1); err != nil {
t.Fatalf("can't set sockopt SO_RCVBUFFORCE to 1: %s", err)
}
@@ -60,7 +74,8 @@ func NewSniffer(t *testing.T) (Sniffer, error) {
// packet too large for the buffer arrives, the test will get a fatal error.
const maxReadSize int = 65536
-// Recv tries to read one frame until the timeout is up.
+// Recv tries to read one frame until the timeout is up. If the timeout given
+// is 0, then no read attempt will be made.
func (s *Sniffer) Recv(t *testing.T, timeout time.Duration) []byte {
t.Helper()
@@ -73,9 +88,13 @@ func (s *Sniffer) Recv(t *testing.T, timeout time.Duration) []byte {
whole, frac := math.Modf(timeout.Seconds())
tv := unix.Timeval{
Sec: int64(whole),
- Usec: int64(frac * float64(time.Microsecond/time.Second)),
+ Usec: int64(frac * float64(time.Second/time.Microsecond)),
+ }
+ // The following should never happen, but having this guard here is better
+ // than blocking indefinitely in the future.
+ if tv.Sec == 0 && tv.Usec == 0 {
+ t.Fatal("setting SO_RCVTIMEO to 0 means blocking indefinitely")
}
-
if err := unix.SetsockoptTimeval(s.fd, unix.SOL_SOCKET, unix.SO_RCVTIMEO, &tv); err != nil {
t.Fatalf("can't setsockopt SO_RCVTIMEO: %s", err)
}
@@ -136,10 +155,10 @@ type Injector struct {
}
// NewInjector creates a new injector on *device.
-func NewInjector(t *testing.T) (Injector, error) {
+func (n *DUTTestNet) NewInjector(t *testing.T) (Injector, error) {
t.Helper()
- ifInfo, err := net.InterfaceByName(LocalDevice)
+ ifInfo, err := net.InterfaceByName(n.LocalDevName)
if err != nil {
return Injector{}, err
}
@@ -147,7 +166,7 @@ func NewInjector(t *testing.T) (Injector, error) {
var haddr [8]byte
copy(haddr[:], ifInfo.HardwareAddr)
sa := unix.SockaddrLinklayer{
- Protocol: unix.ETH_P_IP,
+ Protocol: htons(unix.ETH_P_IP),
Ifindex: ifInfo.Index,
Halen: uint8(len(ifInfo.HardwareAddr)),
Addr: haddr,
diff --git a/test/packetimpact/testbench/testbench.go b/test/packetimpact/testbench/testbench.go
index c1db95d8c..891897d55 100644
--- a/test/packetimpact/testbench/testbench.go
+++ b/test/packetimpact/testbench/testbench.go
@@ -17,108 +17,105 @@
package testbench
import (
+ "encoding/json"
"flag"
"fmt"
"math/rand"
"net"
- "os/exec"
"testing"
"time"
-
- "gvisor.dev/gvisor/test/packetimpact/netdevs"
)
var (
// Native indicates that the test is being run natively.
Native = false
- // LocalDevice is the device that testbench uses to inject traffic.
- LocalDevice = ""
- // RemoteDevice is the device name on the DUT, individual tests can
- // use the name to construct tests.
- RemoteDevice = ""
+ // RPCKeepalive is the gRPC keepalive.
+ RPCKeepalive = 10 * time.Second
+ // RPCTimeout is the gRPC timeout.
+ RPCTimeout = 100 * time.Millisecond
+
+ // dutTestNetsJSON is the json string that describes all the test networks to
+ // duts available to use.
+ dutTestNetsJSON string
+ // dutTestNets is the pool among which the testbench can choose a DUT to work
+ // with.
+ dutTestNets chan *DUTTestNet
+)
+// DUTTestNet describes the test network setup on dut and how the testbench
+// should connect with an existing DUT.
+type DUTTestNet struct {
+ // LocalMAC is the local MAC address on the test network.
+ LocalMAC net.HardwareAddr
+ // RemoteMAC is the DUT's MAC address on the test network.
+ RemoteMAC net.HardwareAddr
// LocalIPv4 is the local IPv4 address on the test network.
- LocalIPv4 = ""
+ LocalIPv4 net.IP
// RemoteIPv4 is the DUT's IPv4 address on the test network.
- RemoteIPv4 = ""
+ RemoteIPv4 net.IP
// IPv4PrefixLength is the network prefix length of the IPv4 test network.
- IPv4PrefixLength = 0
-
+ IPv4PrefixLength int
// LocalIPv6 is the local IPv6 address on the test network.
- LocalIPv6 = ""
+ LocalIPv6 net.IP
// RemoteIPv6 is the DUT's IPv6 address on the test network.
- RemoteIPv6 = ""
+ RemoteIPv6 net.IP
+ // LocalDevID is the ID of the local interface on the test network.
+ LocalDevID uint32
+ // RemoteDevID is the ID of the remote interface on the test network.
+ RemoteDevID uint32
+ // LocalDevName is the device that testbench uses to inject traffic.
+ LocalDevName string
+ // RemoteDevName is the device name on the DUT, individual tests can
+ // use the name to construct tests.
+ RemoteDevName string
- // LocalInterfaceID is the ID of the local interface on the test network.
- LocalInterfaceID uint32
- // RemoteInterfaceID is the ID of the remote interface on the test network.
- //
- // Not using uint32 because package flag does not support uint32.
- RemoteInterfaceID uint64
-
- // LocalMAC is the local MAC address on the test network.
- LocalMAC = ""
- // RemoteMAC is the DUT's MAC address on the test network.
- RemoteMAC = ""
+ // The following two fields on actually on the control network instead
+ // of the test network, including them for convenience.
// POSIXServerIP is the POSIX server's IP address on the control network.
- POSIXServerIP = ""
+ POSIXServerIP net.IP
// POSIXServerPort is the UDP port the POSIX server is bound to on the
// control network.
- POSIXServerPort = 40000
-
- // RPCKeepalive is the gRPC keepalive.
- RPCKeepalive = 10 * time.Second
- // RPCTimeout is the gRPC timeout.
- RPCTimeout = 100 * time.Millisecond
-)
+ POSIXServerPort uint16
+}
-// RegisterFlags defines flags and associates them with the package-level
+// registerFlags defines flags and associates them with the package-level
// exported variables above. It should be called by tests in their init
// functions.
-func RegisterFlags(fs *flag.FlagSet) {
- fs.StringVar(&POSIXServerIP, "posix_server_ip", POSIXServerIP, "ip address to listen to for UDP commands")
- fs.IntVar(&POSIXServerPort, "posix_server_port", POSIXServerPort, "port to listen to for UDP commands")
+func registerFlags(fs *flag.FlagSet) {
+ fs.BoolVar(&Native, "native", Native, "whether the test is running natively")
fs.DurationVar(&RPCTimeout, "rpc_timeout", RPCTimeout, "gRPC timeout")
fs.DurationVar(&RPCKeepalive, "rpc_keepalive", RPCKeepalive, "gRPC keepalive")
- fs.StringVar(&LocalIPv4, "local_ipv4", LocalIPv4, "local IPv4 address for test packets")
- fs.StringVar(&RemoteIPv4, "remote_ipv4", RemoteIPv4, "remote IPv4 address for test packets")
- fs.StringVar(&RemoteIPv6, "remote_ipv6", RemoteIPv6, "remote IPv6 address for test packets")
- fs.StringVar(&RemoteMAC, "remote_mac", RemoteMAC, "remote mac address for test packets")
- fs.StringVar(&LocalDevice, "local_device", LocalDevice, "local device to inject traffic")
- fs.StringVar(&RemoteDevice, "remote_device", RemoteDevice, "remote device on the DUT")
- fs.BoolVar(&Native, "native", Native, "whether the test is running natively")
- fs.Uint64Var(&RemoteInterfaceID, "remote_interface_id", RemoteInterfaceID, "remote interface ID for test packets")
+ fs.StringVar(&dutTestNetsJSON, "dut_test_nets_json", dutTestNetsJSON, "path to the dut test nets json file")
}
-// genPseudoFlags populates flag-like global config based on real flags.
-//
-// genPseudoFlags must only be called after flag.Parse.
-func genPseudoFlags() error {
- out, err := exec.Command("ip", "addr", "show").CombinedOutput()
- if err != nil {
- return fmt.Errorf("listing devices: %q: %w", string(out), err)
- }
- devs, err := netdevs.ParseDevices(string(out))
- if err != nil {
- return fmt.Errorf("parsing devices: %w", err)
+// Initialize initializes the testbench, it parse the flags and sets up the
+// pool of test networks for testbench's later use.
+func Initialize(fs *flag.FlagSet) {
+ registerFlags(fs)
+ flag.Parse()
+ if err := loadDUTTestNets(); err != nil {
+ panic(err)
}
+}
- _, deviceInfo, err := netdevs.FindDeviceByIP(net.ParseIP(LocalIPv4), devs)
- if err != nil {
- return fmt.Errorf("can't find deviceInfo: %w", err)
+// loadDUTTestNets loads available DUT test networks from the json file, it
+// must be called after flag.Parse().
+func loadDUTTestNets() error {
+ var parsedTestNets []DUTTestNet
+ if err := json.Unmarshal([]byte(dutTestNetsJSON), &parsedTestNets); err != nil {
+ return fmt.Errorf("failed to unmarshal JSON: %w", err)
}
-
- LocalMAC = deviceInfo.MAC.String()
- LocalIPv6 = deviceInfo.IPv6Addr.String()
- LocalInterfaceID = deviceInfo.ID
-
- if deviceInfo.IPv4Net != nil {
- IPv4PrefixLength, _ = deviceInfo.IPv4Net.Mask.Size()
- } else {
- IPv4PrefixLength, _ = net.ParseIP(LocalIPv4).DefaultMask().Size()
+ if got, want := len(parsedTestNets), 1; got < want {
+ return fmt.Errorf("got %d DUTs, the test requires at least %d DUTs", got, want)
+ }
+ // Using a buffered channel as semaphore
+ dutTestNets = make(chan *DUTTestNet, len(parsedTestNets))
+ for i := range parsedTestNets {
+ parsedTestNets[i].LocalIPv4 = parsedTestNets[i].LocalIPv4.To4()
+ parsedTestNets[i].RemoteIPv4 = parsedTestNets[i].RemoteIPv4.To4()
+ dutTestNets <- &parsedTestNets[i]
}
-
return nil
}
@@ -132,3 +129,15 @@ func GenerateRandomPayload(t *testing.T, n int) []byte {
}
return buf
}
+
+// GetDUTTestNet gets a usable DUTTestNet, the function will block until any
+// becomes available.
+func GetDUTTestNet() *DUTTestNet {
+ return <-dutTestNets
+}
+
+// Release releases the DUTTestNet back to the pool so that some other test
+// can use.
+func (n *DUTTestNet) Release() {
+ dutTestNets <- n
+}
diff --git a/test/packetimpact/tests/BUILD b/test/packetimpact/tests/BUILD
index 33bd070c1..373ab8d2f 100644
--- a/test/packetimpact/tests/BUILD
+++ b/test/packetimpact/tests/BUILD
@@ -366,9 +366,20 @@ packetimpact_testbench(
],
)
+packetimpact_testbench(
+ name = "tcp_zero_receive_window",
+ srcs = ["tcp_zero_receive_window_test.go"],
+ deps = [
+ "//pkg/tcpip/header",
+ "//test/packetimpact/testbench",
+ "@org_golang_x_sys//unix:go_default_library",
+ ],
+)
+
validate_all_tests()
[packetimpact_go_test(
name = t.name,
expect_netstack_failure = hasattr(t, "expect_netstack_failure"),
+ num_duts = t.num_duts if hasattr(t, "num_duts") else 1,
) for t in ALL_TESTS]
diff --git a/test/packetimpact/tests/fin_wait2_timeout_test.go b/test/packetimpact/tests/fin_wait2_timeout_test.go
index a61054c2c..11f0fcd1e 100644
--- a/test/packetimpact/tests/fin_wait2_timeout_test.go
+++ b/test/packetimpact/tests/fin_wait2_timeout_test.go
@@ -25,7 +25,7 @@ import (
)
func init() {
- testbench.RegisterFlags(flag.CommandLine)
+ testbench.Initialize(flag.CommandLine)
}
func TestFinWait2Timeout(t *testing.T) {
@@ -38,10 +38,9 @@ func TestFinWait2Timeout(t *testing.T) {
} {
t.Run(tt.description, func(t *testing.T) {
dut := testbench.NewDUT(t)
- defer dut.TearDown()
listenFd, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
defer dut.Close(t, listenFd)
- conn := testbench.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
+ conn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
defer conn.Close(t)
conn.Connect(t)
diff --git a/test/packetimpact/tests/icmpv6_param_problem_test.go b/test/packetimpact/tests/icmpv6_param_problem_test.go
index 2d59d552d..40d7a491d 100644
--- a/test/packetimpact/tests/icmpv6_param_problem_test.go
+++ b/test/packetimpact/tests/icmpv6_param_problem_test.go
@@ -25,15 +25,14 @@ import (
)
func init() {
- testbench.RegisterFlags(flag.CommandLine)
+ testbench.Initialize(flag.CommandLine)
}
// TestICMPv6ParamProblemTest sends a packet with a bad next header. The DUT
// should respond with an ICMPv6 Parameter Problem message.
func TestICMPv6ParamProblemTest(t *testing.T) {
dut := testbench.NewDUT(t)
- defer dut.TearDown()
- conn := testbench.NewIPv6Conn(t, testbench.IPv6{}, testbench.IPv6{})
+ conn := dut.Net.NewIPv6Conn(t, testbench.IPv6{}, testbench.IPv6{})
defer conn.Close(t)
ipv6 := testbench.IPv6{
// 254 is reserved and used for experimentation and testing. This should
diff --git a/test/packetimpact/tests/ipv4_fragment_reassembly_test.go b/test/packetimpact/tests/ipv4_fragment_reassembly_test.go
index 40f899065..d2203082d 100644
--- a/test/packetimpact/tests/ipv4_fragment_reassembly_test.go
+++ b/test/packetimpact/tests/ipv4_fragment_reassembly_test.go
@@ -27,17 +27,17 @@ import (
)
func init() {
- testbench.RegisterFlags(flag.CommandLine)
+ testbench.Initialize(flag.CommandLine)
}
type fragmentInfo struct {
offset uint16
size uint16
more uint8
+ id uint16
}
func TestIPv4FragmentReassembly(t *testing.T) {
- const fragmentID = 42
icmpv4ProtoNum := uint8(header.ICMPv4ProtocolNumber)
tests := []struct {
@@ -45,32 +45,78 @@ func TestIPv4FragmentReassembly(t *testing.T) {
ipPayloadLen int
fragments []fragmentInfo
expectReply bool
+ skip bool
+ skipReason string
}{
{
description: "basic reassembly",
- ipPayloadLen: 2000,
+ ipPayloadLen: 3000,
fragments: []fragmentInfo{
- {offset: 0, size: 1000, more: header.IPv4FlagMoreFragments},
- {offset: 1000, size: 1000, more: 0},
+ {offset: 0, size: 1000, id: 5, more: header.IPv4FlagMoreFragments},
+ {offset: 1000, size: 1000, id: 5, more: header.IPv4FlagMoreFragments},
+ {offset: 2000, size: 1000, id: 5, more: 0},
},
expectReply: true,
},
{
description: "out of order fragments",
- ipPayloadLen: 2000,
+ ipPayloadLen: 3000,
fragments: []fragmentInfo{
- {offset: 1000, size: 1000, more: 0},
- {offset: 0, size: 1000, more: header.IPv4FlagMoreFragments},
+ {offset: 2000, size: 1000, id: 6, more: 0},
+ {offset: 0, size: 1000, id: 6, more: header.IPv4FlagMoreFragments},
+ {offset: 1000, size: 1000, id: 6, more: header.IPv4FlagMoreFragments},
},
expectReply: true,
},
+ {
+ description: "duplicated fragments",
+ ipPayloadLen: 3000,
+ fragments: []fragmentInfo{
+ {offset: 0, size: 1000, id: 7, more: header.IPv4FlagMoreFragments},
+ {offset: 1000, size: 1000, id: 7, more: header.IPv4FlagMoreFragments},
+ {offset: 1000, size: 1000, id: 7, more: header.IPv4FlagMoreFragments},
+ {offset: 2000, size: 1000, id: 7, more: 0},
+ },
+ expectReply: true,
+ skip: true,
+ skipReason: "gvisor.dev/issues/4971",
+ },
+ {
+ description: "fragment subset",
+ ipPayloadLen: 3000,
+ fragments: []fragmentInfo{
+ {offset: 0, size: 1000, id: 8, more: header.IPv4FlagMoreFragments},
+ {offset: 1000, size: 1000, id: 8, more: header.IPv4FlagMoreFragments},
+ {offset: 512, size: 256, id: 8, more: header.IPv4FlagMoreFragments},
+ {offset: 2000, size: 1000, id: 8, more: 0},
+ },
+ expectReply: true,
+ skip: true,
+ skipReason: "gvisor.dev/issues/4971",
+ },
+ {
+ description: "fragment overlap",
+ ipPayloadLen: 3000,
+ fragments: []fragmentInfo{
+ {offset: 0, size: 1000, id: 9, more: header.IPv4FlagMoreFragments},
+ {offset: 1512, size: 1000, id: 9, more: header.IPv4FlagMoreFragments},
+ {offset: 1000, size: 1000, id: 9, more: header.IPv4FlagMoreFragments},
+ {offset: 2000, size: 1000, id: 9, more: 0},
+ },
+ expectReply: false,
+ skip: true,
+ skipReason: "gvisor.dev/issues/4971",
+ },
}
for _, test := range tests {
+ if test.skip {
+ t.Skip("%s test skipped: %s", test.description, test.skipReason)
+ continue
+ }
t.Run(test.description, func(t *testing.T) {
dut := testbench.NewDUT(t)
- defer dut.TearDown()
- conn := testbench.NewIPv4Conn(t, testbench.IPv4{}, testbench.IPv4{})
+ conn := dut.Net.NewIPv4Conn(t, testbench.IPv4{}, testbench.IPv4{})
defer conn.Close(t)
data := make([]byte, test.ipPayloadLen)
@@ -96,7 +142,7 @@ func TestIPv4FragmentReassembly(t *testing.T) {
Protocol: &icmpv4ProtoNum,
FragmentOffset: testbench.Uint16(fragment.offset),
Flags: testbench.Uint8(fragment.more),
- ID: testbench.Uint16(fragmentID),
+ ID: testbench.Uint16(fragment.id),
},
&testbench.Payload{
Bytes: data[fragment.offset:][:fragment.size],
@@ -115,7 +161,7 @@ func TestIPv4FragmentReassembly(t *testing.T) {
}, time.Second)
if err != nil {
// Either an unexpected frame was received, or none at all.
- if bytesReceived < test.ipPayloadLen {
+ if test.expectReply && bytesReceived < test.ipPayloadLen {
t.Fatalf("received %d bytes out of %d, then conn.ExpectFrame(_, _, time.Second) failed with %s", bytesReceived, test.ipPayloadLen, err)
}
break
diff --git a/test/packetimpact/tests/ipv4_id_uniqueness_test.go b/test/packetimpact/tests/ipv4_id_uniqueness_test.go
index 7f7a768d3..a63b41366 100644
--- a/test/packetimpact/tests/ipv4_id_uniqueness_test.go
+++ b/test/packetimpact/tests/ipv4_id_uniqueness_test.go
@@ -28,7 +28,7 @@ import (
)
func init() {
- testbench.RegisterFlags(flag.CommandLine)
+ testbench.Initialize(flag.CommandLine)
}
func recvTCPSegment(t *testing.T, conn *testbench.TCPIPv4, expect *testbench.TCP, expectPayload *testbench.Payload) (uint16, error) {
@@ -67,12 +67,10 @@ func TestIPv4RetransmitIdentificationUniqueness(t *testing.T) {
} {
t.Run(tc.name, func(t *testing.T) {
dut := testbench.NewDUT(t)
- defer dut.TearDown()
-
listenFD, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
defer dut.Close(t, listenFD)
- conn := testbench.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
+ conn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
defer conn.Close(t)
conn.Connect(t)
diff --git a/test/packetimpact/tests/ipv6_fragment_icmp_error_test.go b/test/packetimpact/tests/ipv6_fragment_icmp_error_test.go
index e058fb0d8..a37867e85 100644
--- a/test/packetimpact/tests/ipv6_fragment_icmp_error_test.go
+++ b/test/packetimpact/tests/ipv6_fragment_icmp_error_test.go
@@ -16,7 +16,6 @@ package ipv6_fragment_icmp_error_test
import (
"flag"
- "net"
"testing"
"time"
@@ -35,10 +34,10 @@ const (
)
func init() {
- testbench.RegisterFlags(flag.CommandLine)
+ testbench.Initialize(flag.CommandLine)
}
-func fragmentedICMPEchoRequest(t *testing.T, conn *testbench.Connection, firstPayloadLength uint16, payload []byte, secondFragmentOffset uint16) ([]testbench.Layers, [][]byte) {
+func fragmentedICMPEchoRequest(t *testing.T, n *testbench.DUTTestNet, conn *testbench.Connection, firstPayloadLength uint16, payload []byte, secondFragmentOffset uint16) ([]testbench.Layers, [][]byte) {
t.Helper()
icmpv6Header := header.ICMPv6(make([]byte, header.ICMPv6EchoMinimumSize))
@@ -48,8 +47,8 @@ func fragmentedICMPEchoRequest(t *testing.T, conn *testbench.Connection, firstPa
icmpv6Header.SetSequence(0)
cksum := header.ICMPv6Checksum(
icmpv6Header,
- tcpip.Address(net.ParseIP(testbench.LocalIPv6).To16()),
- tcpip.Address(net.ParseIP(testbench.RemoteIPv6).To16()),
+ tcpip.Address(n.LocalIPv6),
+ tcpip.Address(n.RemoteIPv6),
buffer.NewVectorisedView(len(payload), []buffer.View{payload}),
)
icmpv6Header.SetChecksum(cksum)
@@ -120,13 +119,13 @@ func TestIPv6ICMPEchoRequestFragmentReassembly(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
+ t.Parallel()
dut := testbench.NewDUT(t)
- defer dut.TearDown()
- ipv6Conn := testbench.NewIPv6Conn(t, testbench.IPv6{}, testbench.IPv6{})
+ ipv6Conn := dut.Net.NewIPv6Conn(t, testbench.IPv6{}, testbench.IPv6{})
conn := (*testbench.Connection)(&ipv6Conn)
defer ipv6Conn.Close(t)
- fragments, _ := fragmentedICMPEchoRequest(t, conn, test.firstPayloadLength, test.payload, test.secondFragmentOffset)
+ fragments, _ := fragmentedICMPEchoRequest(t, dut.Net, conn, test.firstPayloadLength, test.payload, test.secondFragmentOffset)
for _, i := range test.sendFrameOrder {
conn.SendFrame(t, fragments[i-1])
@@ -222,13 +221,13 @@ func TestIPv6FragmentReassemblyTimeout(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
+ t.Parallel()
dut := testbench.NewDUT(t)
- defer dut.TearDown()
- ipv6Conn := testbench.NewIPv6Conn(t, testbench.IPv6{}, testbench.IPv6{})
+ ipv6Conn := dut.Net.NewIPv6Conn(t, testbench.IPv6{}, testbench.IPv6{})
conn := (*testbench.Connection)(&ipv6Conn)
defer ipv6Conn.Close(t)
- fragments, ipv6Bytes := fragmentedICMPEchoRequest(t, conn, test.firstPayloadLength, test.payload, test.secondFragmentOffset)
+ fragments, ipv6Bytes := fragmentedICMPEchoRequest(t, dut.Net, conn, test.firstPayloadLength, test.payload, test.secondFragmentOffset)
for _, i := range test.sendFrameOrder {
conn.SendFrame(t, fragments[i-1])
@@ -318,13 +317,13 @@ func TestIPv6FragmentParamProblem(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
+ t.Parallel()
dut := testbench.NewDUT(t)
- defer dut.TearDown()
- ipv6Conn := testbench.NewIPv6Conn(t, testbench.IPv6{}, testbench.IPv6{})
+ ipv6Conn := dut.Net.NewIPv6Conn(t, testbench.IPv6{}, testbench.IPv6{})
conn := (*testbench.Connection)(&ipv6Conn)
defer ipv6Conn.Close(t)
- fragments, ipv6Bytes := fragmentedICMPEchoRequest(t, conn, test.firstPayloadLength, test.payload, test.secondFragmentOffset)
+ fragments, ipv6Bytes := fragmentedICMPEchoRequest(t, dut.Net, conn, test.firstPayloadLength, test.payload, test.secondFragmentOffset)
for _, i := range test.sendFrameOrder {
conn.SendFrame(t, fragments[i-1])
diff --git a/test/packetimpact/tests/ipv6_fragment_reassembly_test.go b/test/packetimpact/tests/ipv6_fragment_reassembly_test.go
index eb56a53f7..dd98ee7a1 100644
--- a/test/packetimpact/tests/ipv6_fragment_reassembly_test.go
+++ b/test/packetimpact/tests/ipv6_fragment_reassembly_test.go
@@ -17,7 +17,6 @@ package ipv6_fragment_reassembly_test
import (
"flag"
"math/rand"
- "net"
"testing"
"time"
@@ -29,17 +28,17 @@ import (
)
func init() {
- testbench.RegisterFlags(flag.CommandLine)
+ testbench.Initialize(flag.CommandLine)
}
type fragmentInfo struct {
offset uint16
size uint16
more bool
+ id uint32
}
func TestIPv6FragmentReassembly(t *testing.T) {
- const fragmentID = 42
icmpv6ProtoNum := header.IPv6ExtensionHeaderIdentifier(header.ICMPv6ProtocolNumber)
tests := []struct {
@@ -50,10 +49,11 @@ func TestIPv6FragmentReassembly(t *testing.T) {
}{
{
description: "basic reassembly",
- ipPayloadLen: 1500,
+ ipPayloadLen: 3000,
fragments: []fragmentInfo{
- {offset: 0, size: 760, more: true},
- {offset: 760, size: 740, more: false},
+ {offset: 0, size: 1000, id: 100, more: true},
+ {offset: 1000, size: 1000, id: 100, more: true},
+ {offset: 2000, size: 1000, id: 100, more: false},
},
expectReply: true,
},
@@ -61,23 +61,55 @@ func TestIPv6FragmentReassembly(t *testing.T) {
description: "out of order fragments",
ipPayloadLen: 3000,
fragments: []fragmentInfo{
- {offset: 0, size: 1024, more: true},
- {offset: 2048, size: 952, more: false},
- {offset: 1024, size: 1024, more: true},
+ {offset: 0, size: 1000, id: 101, more: true},
+ {offset: 2000, size: 1000, id: 101, more: false},
+ {offset: 1000, size: 1000, id: 101, more: true},
+ },
+ expectReply: true,
+ },
+ {
+ description: "duplicated fragments",
+ ipPayloadLen: 3000,
+ fragments: []fragmentInfo{
+ {offset: 0, size: 1000, id: 102, more: true},
+ {offset: 1000, size: 1000, id: 102, more: true},
+ {offset: 1000, size: 1000, id: 102, more: true},
+ {offset: 2000, size: 1000, id: 102, more: false},
+ },
+ expectReply: true,
+ },
+ {
+ description: "fragment subset",
+ ipPayloadLen: 3000,
+ fragments: []fragmentInfo{
+ {offset: 0, size: 1000, id: 103, more: true},
+ {offset: 1000, size: 1000, id: 103, more: true},
+ {offset: 512, size: 256, id: 103, more: true},
+ {offset: 2000, size: 1000, id: 103, more: false},
},
expectReply: true,
},
+ {
+ description: "fragment overlap",
+ ipPayloadLen: 3000,
+ fragments: []fragmentInfo{
+ {offset: 0, size: 1000, id: 104, more: true},
+ {offset: 1512, size: 1000, id: 104, more: true},
+ {offset: 1000, size: 1000, id: 104, more: true},
+ {offset: 2000, size: 1000, id: 104, more: false},
+ },
+ expectReply: false,
+ },
}
for _, test := range tests {
t.Run(test.description, func(t *testing.T) {
dut := testbench.NewDUT(t)
- defer dut.TearDown()
- conn := testbench.NewIPv6Conn(t, testbench.IPv6{}, testbench.IPv6{})
+ conn := dut.Net.NewIPv6Conn(t, testbench.IPv6{}, testbench.IPv6{})
defer conn.Close(t)
- lIP := tcpip.Address(net.ParseIP(testbench.LocalIPv6).To16())
- rIP := tcpip.Address(net.ParseIP(testbench.RemoteIPv6).To16())
+ lIP := tcpip.Address(dut.Net.LocalIPv6)
+ rIP := tcpip.Address(dut.Net.RemoteIPv6)
data := make([]byte, test.ipPayloadLen)
icmp := header.ICMPv6(data[:header.ICMPv6HeaderSize])
@@ -103,7 +135,7 @@ func TestIPv6FragmentReassembly(t *testing.T) {
NextHeader: &icmpv6ProtoNum,
FragmentOffset: testbench.Uint16(fragment.offset / header.IPv6FragmentExtHdrFragmentOffsetBytesPerUnit),
MoreFragments: testbench.Bool(fragment.more),
- Identification: testbench.Uint32(fragmentID),
+ Identification: testbench.Uint32(fragment.id),
},
&testbench.Payload{
Bytes: data[fragment.offset:][:fragment.size],
@@ -120,7 +152,7 @@ func TestIPv6FragmentReassembly(t *testing.T) {
}, time.Second)
if err != nil {
// Either an unexpected frame was received, or none at all.
- if bytesReceived < test.ipPayloadLen {
+ if test.expectReply && bytesReceived < test.ipPayloadLen {
t.Fatalf("received %d bytes out of %d, then conn.ExpectFrame(_, _, time.Second) failed with %s", bytesReceived, test.ipPayloadLen, err)
}
break
diff --git a/test/packetimpact/tests/ipv6_unknown_options_action_test.go b/test/packetimpact/tests/ipv6_unknown_options_action_test.go
index e79d74476..cb5396417 100644
--- a/test/packetimpact/tests/ipv6_unknown_options_action_test.go
+++ b/test/packetimpact/tests/ipv6_unknown_options_action_test.go
@@ -27,7 +27,7 @@ import (
)
func init() {
- testbench.RegisterFlags(flag.CommandLine)
+ testbench.Initialize(flag.CommandLine)
}
func mkHopByHopOptionsExtHdr(optType byte) testbench.Layer {
@@ -141,8 +141,7 @@ func TestIPv6UnknownOptionAction(t *testing.T) {
} {
t.Run(tt.description, func(t *testing.T) {
dut := testbench.NewDUT(t)
- defer dut.TearDown()
- ipv6Conn := testbench.NewIPv6Conn(t, testbench.IPv6{}, testbench.IPv6{})
+ ipv6Conn := dut.Net.NewIPv6Conn(t, testbench.IPv6{}, testbench.IPv6{})
conn := (*testbench.Connection)(&ipv6Conn)
defer ipv6Conn.Close(t)
diff --git a/test/packetimpact/tests/tcp_cork_mss_test.go b/test/packetimpact/tests/tcp_cork_mss_test.go
index 8feea4a82..a7ba5035e 100644
--- a/test/packetimpact/tests/tcp_cork_mss_test.go
+++ b/test/packetimpact/tests/tcp_cork_mss_test.go
@@ -25,16 +25,15 @@ import (
)
func init() {
- testbench.RegisterFlags(flag.CommandLine)
+ testbench.Initialize(flag.CommandLine)
}
// TestTCPCorkMSS tests for segment coalesce and split as per MSS.
func TestTCPCorkMSS(t *testing.T) {
dut := testbench.NewDUT(t)
- defer dut.TearDown()
listenFD, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
defer dut.Close(t, listenFD)
- conn := testbench.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
+ conn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
defer conn.Close(t)
const mss = uint32(header.TCPDefaultMSS)
diff --git a/test/packetimpact/tests/tcp_handshake_window_size_test.go b/test/packetimpact/tests/tcp_handshake_window_size_test.go
index 22937d92f..5d1266f3c 100644
--- a/test/packetimpact/tests/tcp_handshake_window_size_test.go
+++ b/test/packetimpact/tests/tcp_handshake_window_size_test.go
@@ -25,17 +25,16 @@ import (
)
func init() {
- testbench.RegisterFlags(flag.CommandLine)
+ testbench.Initialize(flag.CommandLine)
}
// TestTCPHandshakeWindowSize tests if the stack is honoring the window size
// communicated during handshake.
func TestTCPHandshakeWindowSize(t *testing.T) {
dut := testbench.NewDUT(t)
- defer dut.TearDown()
listenFD, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
defer dut.Close(t, listenFD)
- conn := testbench.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
+ conn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
defer conn.Close(t)
// Start handshake with zero window size.
diff --git a/test/packetimpact/tests/tcp_linger_test.go b/test/packetimpact/tests/tcp_linger_test.go
index b9a0409aa..bc4b64388 100644
--- a/test/packetimpact/tests/tcp_linger_test.go
+++ b/test/packetimpact/tests/tcp_linger_test.go
@@ -27,12 +27,12 @@ import (
)
func init() {
- testbench.RegisterFlags(flag.CommandLine)
+ testbench.Initialize(flag.CommandLine)
}
func createSocket(t *testing.T, dut testbench.DUT) (int32, int32, testbench.TCPIPv4) {
listenFD, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
- conn := testbench.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
+ conn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
conn.Connect(t)
acceptFD, _ := dut.Accept(t, listenFD)
return acceptFD, listenFD, conn
@@ -41,7 +41,6 @@ func createSocket(t *testing.T, dut testbench.DUT) (int32, int32, testbench.TCPI
func closeAll(t *testing.T, dut testbench.DUT, listenFD int32, conn testbench.TCPIPv4) {
conn.Close(t)
dut.Close(t, listenFD)
- dut.TearDown()
}
// lingerDuration is the timeout value used with SO_LINGER socket option.
@@ -266,5 +265,4 @@ func TestTCPLingerNonEstablished(t *testing.T) {
if diff > lingerDuration {
t.Errorf("expected close to return within %s, but returned after %s", lingerDuration, diff)
}
- dut.TearDown()
}
diff --git a/test/packetimpact/tests/tcp_network_unreachable_test.go b/test/packetimpact/tests/tcp_network_unreachable_test.go
index 8a1fe1279..6cd6d2edf 100644
--- a/test/packetimpact/tests/tcp_network_unreachable_test.go
+++ b/test/packetimpact/tests/tcp_network_unreachable_test.go
@@ -17,7 +17,6 @@ package tcp_synsent_reset_test
import (
"context"
"flag"
- "net"
"syscall"
"testing"
"time"
@@ -28,7 +27,7 @@ import (
)
func init() {
- testbench.RegisterFlags(flag.CommandLine)
+ testbench.Initialize(flag.CommandLine)
}
// TestTCPSynSentUnreachable verifies that TCP connections fail immediately when
@@ -37,17 +36,16 @@ func init() {
func TestTCPSynSentUnreachable(t *testing.T) {
// Create the DUT and connection.
dut := testbench.NewDUT(t)
- defer dut.TearDown()
- clientFD, clientPort := dut.CreateBoundSocket(t, unix.SOCK_STREAM|unix.SOCK_NONBLOCK, unix.IPPROTO_TCP, net.ParseIP(testbench.RemoteIPv4))
+ clientFD, clientPort := dut.CreateBoundSocket(t, unix.SOCK_STREAM|unix.SOCK_NONBLOCK, unix.IPPROTO_TCP, dut.Net.RemoteIPv4)
port := uint16(9001)
- conn := testbench.NewTCPIPv4(t, testbench.TCP{SrcPort: &port, DstPort: &clientPort}, testbench.TCP{SrcPort: &clientPort, DstPort: &port})
+ conn := dut.Net.NewTCPIPv4(t, testbench.TCP{SrcPort: &port, DstPort: &clientPort}, testbench.TCP{SrcPort: &clientPort, DstPort: &port})
defer conn.Close(t)
// Bring the DUT to SYN-SENT state with a non-blocking connect.
ctx, cancel := context.WithTimeout(context.Background(), testbench.RPCTimeout)
defer cancel()
sa := unix.SockaddrInet4{Port: int(port)}
- copy(sa.Addr[:], net.IP(net.ParseIP(testbench.LocalIPv4)).To4())
+ copy(sa.Addr[:], dut.Net.LocalIPv4)
if _, err := dut.ConnectWithErrno(ctx, t, clientFD, &sa); err != syscall.Errno(unix.EINPROGRESS) {
t.Errorf("expected connect to fail with EINPROGRESS, but got %v", err)
}
@@ -91,9 +89,8 @@ func TestTCPSynSentUnreachable(t *testing.T) {
func TestTCPSynSentUnreachable6(t *testing.T) {
// Create the DUT and connection.
dut := testbench.NewDUT(t)
- defer dut.TearDown()
- clientFD, clientPort := dut.CreateBoundSocket(t, unix.SOCK_STREAM|unix.SOCK_NONBLOCK, unix.IPPROTO_TCP, net.ParseIP(testbench.RemoteIPv6))
- conn := testbench.NewTCPIPv6(t, testbench.TCP{DstPort: &clientPort}, testbench.TCP{SrcPort: &clientPort})
+ clientFD, clientPort := dut.CreateBoundSocket(t, unix.SOCK_STREAM|unix.SOCK_NONBLOCK, unix.IPPROTO_TCP, dut.Net.RemoteIPv6)
+ conn := dut.Net.NewTCPIPv6(t, testbench.TCP{DstPort: &clientPort}, testbench.TCP{SrcPort: &clientPort})
defer conn.Close(t)
// Bring the DUT to SYN-SENT state with a non-blocking connect.
@@ -101,9 +98,9 @@ func TestTCPSynSentUnreachable6(t *testing.T) {
defer cancel()
sa := unix.SockaddrInet6{
Port: int(conn.SrcPort()),
- ZoneId: uint32(testbench.RemoteInterfaceID),
+ ZoneId: dut.Net.RemoteDevID,
}
- copy(sa.Addr[:], net.IP(net.ParseIP(testbench.LocalIPv6)).To16())
+ copy(sa.Addr[:], dut.Net.LocalIPv6)
if _, err := dut.ConnectWithErrno(ctx, t, clientFD, &sa); err != syscall.Errno(unix.EINPROGRESS) {
t.Errorf("expected connect to fail with EINPROGRESS, but got %v", err)
}
diff --git a/test/packetimpact/tests/tcp_noaccept_close_rst_test.go b/test/packetimpact/tests/tcp_noaccept_close_rst_test.go
index 82b7a85ff..f0af5352d 100644
--- a/test/packetimpact/tests/tcp_noaccept_close_rst_test.go
+++ b/test/packetimpact/tests/tcp_noaccept_close_rst_test.go
@@ -25,14 +25,13 @@ import (
)
func init() {
- testbench.RegisterFlags(flag.CommandLine)
+ testbench.Initialize(flag.CommandLine)
}
func TestTcpNoAcceptCloseReset(t *testing.T) {
dut := testbench.NewDUT(t)
- defer dut.TearDown()
listenFd, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
- conn := testbench.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
+ conn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
conn.Connect(t)
defer conn.Close(t)
dut.Close(t, listenFd)
diff --git a/test/packetimpact/tests/tcp_outside_the_window_test.go b/test/packetimpact/tests/tcp_outside_the_window_test.go
index 08f759f7c..1b041932a 100644
--- a/test/packetimpact/tests/tcp_outside_the_window_test.go
+++ b/test/packetimpact/tests/tcp_outside_the_window_test.go
@@ -27,7 +27,7 @@ import (
)
func init() {
- testbench.RegisterFlags(flag.CommandLine)
+ testbench.Initialize(flag.CommandLine)
}
// TestTCPOutsideTheWindows tests the behavior of the DUT when packets arrive
@@ -62,10 +62,9 @@ func TestTCPOutsideTheWindow(t *testing.T) {
} {
t.Run(fmt.Sprintf("%s%d", tt.description, tt.seqNumOffset), func(t *testing.T) {
dut := testbench.NewDUT(t)
- defer dut.TearDown()
listenFD, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
defer dut.Close(t, listenFD)
- conn := testbench.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
+ conn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
defer conn.Close(t)
conn.Connect(t)
acceptFD, _ := dut.Accept(t, listenFD)
diff --git a/test/packetimpact/tests/tcp_paws_mechanism_test.go b/test/packetimpact/tests/tcp_paws_mechanism_test.go
index 37f3b56dd..24d9ef4ec 100644
--- a/test/packetimpact/tests/tcp_paws_mechanism_test.go
+++ b/test/packetimpact/tests/tcp_paws_mechanism_test.go
@@ -26,15 +26,14 @@ import (
)
func init() {
- testbench.RegisterFlags(flag.CommandLine)
+ testbench.Initialize(flag.CommandLine)
}
func TestPAWSMechanism(t *testing.T) {
dut := testbench.NewDUT(t)
- defer dut.TearDown()
listenFD, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
defer dut.Close(t, listenFD)
- conn := testbench.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
+ conn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
defer conn.Close(t)
options := make([]byte, header.TCPOptionTSLength)
diff --git a/test/packetimpact/tests/tcp_queue_receive_in_syn_sent_test.go b/test/packetimpact/tests/tcp_queue_receive_in_syn_sent_test.go
index d9f3ea0f2..646c93216 100644
--- a/test/packetimpact/tests/tcp_queue_receive_in_syn_sent_test.go
+++ b/test/packetimpact/tests/tcp_queue_receive_in_syn_sent_test.go
@@ -20,7 +20,6 @@ import (
"encoding/hex"
"errors"
"flag"
- "net"
"sync"
"syscall"
"testing"
@@ -32,7 +31,7 @@ import (
)
func init() {
- testbench.RegisterFlags(flag.CommandLine)
+ testbench.Initialize(flag.CommandLine)
}
// TestQueueReceiveInSynSent tests receive behavior when the TCP state
@@ -50,10 +49,9 @@ func TestQueueReceiveInSynSent(t *testing.T) {
} {
t.Run(tt.description, func(t *testing.T) {
dut := testbench.NewDUT(t)
- defer dut.TearDown()
- socket, remotePort := dut.CreateBoundSocket(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, net.ParseIP(testbench.RemoteIPv4))
- conn := testbench.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
+ socket, remotePort := dut.CreateBoundSocket(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, dut.Net.RemoteIPv4)
+ conn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
defer conn.Close(t)
sampleData := []byte("Sample Data")
diff --git a/test/packetimpact/tests/tcp_queue_send_in_syn_sent_test.go b/test/packetimpact/tests/tcp_queue_send_in_syn_sent_test.go
index 0ec8fd748..29e51cae3 100644
--- a/test/packetimpact/tests/tcp_queue_send_in_syn_sent_test.go
+++ b/test/packetimpact/tests/tcp_queue_send_in_syn_sent_test.go
@@ -18,7 +18,6 @@ import (
"context"
"errors"
"flag"
- "net"
"sync"
"syscall"
"testing"
@@ -30,7 +29,7 @@ import (
)
func init() {
- testbench.RegisterFlags(flag.CommandLine)
+ testbench.Initialize(flag.CommandLine)
}
// TestQueueSendInSynSent tests send behavior when the TCP state
@@ -48,10 +47,9 @@ func TestQueueSendInSynSent(t *testing.T) {
} {
t.Run(tt.description, func(t *testing.T) {
dut := testbench.NewDUT(t)
- defer dut.TearDown()
- socket, remotePort := dut.CreateBoundSocket(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, net.ParseIP(testbench.RemoteIPv4))
- conn := testbench.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
+ socket, remotePort := dut.CreateBoundSocket(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, dut.Net.RemoteIPv4)
+ conn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
defer conn.Close(t)
sampleData := []byte("Sample Data")
diff --git a/test/packetimpact/tests/tcp_rcv_buf_space_test.go b/test/packetimpact/tests/tcp_rcv_buf_space_test.go
index cfbba1e8e..d6ad5cda6 100644
--- a/test/packetimpact/tests/tcp_rcv_buf_space_test.go
+++ b/test/packetimpact/tests/tcp_rcv_buf_space_test.go
@@ -26,7 +26,7 @@ import (
)
func init() {
- testbench.RegisterFlags(flag.CommandLine)
+ testbench.Initialize(flag.CommandLine)
}
// TestReduceRecvBuf tests that a packet within window is still dropped
@@ -34,10 +34,9 @@ func init() {
// segment.
func TestReduceRecvBuf(t *testing.T) {
dut := testbench.NewDUT(t)
- defer dut.TearDown()
listenFd, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
defer dut.Close(t, listenFd)
- conn := testbench.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
+ conn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
defer conn.Close(t)
conn.Connect(t)
diff --git a/test/packetimpact/tests/tcp_reordering_test.go b/test/packetimpact/tests/tcp_reordering_test.go
index b4aeaab57..ca352dbc7 100644
--- a/test/packetimpact/tests/tcp_reordering_test.go
+++ b/test/packetimpact/tests/tcp_reordering_test.go
@@ -22,19 +22,18 @@ import (
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/tcpip/header"
"gvisor.dev/gvisor/pkg/tcpip/seqnum"
- tb "gvisor.dev/gvisor/test/packetimpact/testbench"
+ "gvisor.dev/gvisor/test/packetimpact/testbench"
)
func init() {
- tb.RegisterFlags(flag.CommandLine)
+ testbench.Initialize(flag.CommandLine)
}
func TestReorderingWindow(t *testing.T) {
- dut := tb.NewDUT(t)
- defer dut.TearDown()
+ dut := testbench.NewDUT(t)
listenFd, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
defer dut.Close(t, listenFd)
- conn := tb.NewTCPIPv4(t, tb.TCP{DstPort: &remotePort}, tb.TCP{SrcPort: &remotePort})
+ conn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
defer conn.Close(t)
// Enable SACK.
@@ -54,13 +53,13 @@ func TestReorderingWindow(t *testing.T) {
acceptFd, _ := dut.Accept(t, listenFd)
defer dut.Close(t, acceptFd)
- if tb.Native {
+ if testbench.Native {
// Linux has changed its handling of reordering, force the old behavior.
dut.SetSockOpt(t, acceptFd, unix.IPPROTO_TCP, unix.TCP_CONGESTION, []byte("reno"))
}
pls := dut.GetSockOptInt(t, acceptFd, unix.IPPROTO_TCP, unix.TCP_MAXSEG)
- if !tb.Native {
+ if !testbench.Native {
// netstack does not impliment TCP_MAXSEG correctly. Fake it
// here. Netstack uses the max SACK size which is 32. The MSS
// option is 8 bytes, making the total 36 bytes.
@@ -75,14 +74,14 @@ func TestReorderingWindow(t *testing.T) {
for i, sn := 0, seqNum1; i < numPkts; i++ {
dut.Send(t, acceptFd, payload, 0)
- gotOne, err := conn.Expect(t, tb.TCP{SeqNum: tb.Uint32(uint32(sn))}, time.Second)
+ gotOne, err := conn.Expect(t, testbench.TCP{SeqNum: testbench.Uint32(uint32(sn))}, time.Second)
sn.UpdateForward(seqnum.Size(len(payload)))
if err != nil {
- t.Errorf("Expect #%d: %s", i+1, err)
+ t.Fatalf("Expect #%d: %s", i+1, err)
continue
}
if gotOne == nil {
- t.Errorf("#%d: expected a packet within a second but got none", i+1)
+ t.Fatalf("#%d: expected a packet within a second but got none", i+1)
}
}
@@ -97,13 +96,13 @@ func TestReorderingWindow(t *testing.T) {
seqNum1.Add(seqnum.Size(len(payload))),
seqNum1.Add(seqnum.Size(4 * len(payload))),
}}, sackBlock[sbOff:])
- conn.Send(t, tb.TCP{Flags: tb.Uint8(header.TCPFlagAck), AckNum: tb.Uint32(uint32(seqNum1)), Options: sackBlock[:sbOff]})
+ conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck), AckNum: testbench.Uint32(uint32(seqNum1)), Options: sackBlock[:sbOff]})
// ACK first packet.
- conn.Send(t, tb.TCP{Flags: tb.Uint8(header.TCPFlagAck), AckNum: tb.Uint32(uint32(seqNum1) + uint32(len(payload)))})
+ conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck), AckNum: testbench.Uint32(uint32(seqNum1) + uint32(len(payload)))})
// Check for retransmit.
- gotOne, err := conn.Expect(t, tb.TCP{SeqNum: tb.Uint32(uint32(seqNum1))}, time.Second)
+ gotOne, err := conn.Expect(t, testbench.TCP{SeqNum: testbench.Uint32(uint32(seqNum1))}, time.Second)
if err != nil {
t.Error("Expect for retransmit:", err)
}
@@ -123,29 +122,29 @@ func TestReorderingWindow(t *testing.T) {
seqNum1.Add(seqnum.Size(4 * len(payload))),
}}, dsackBlock[dsbOff:])
- conn.Send(t, tb.TCP{Flags: tb.Uint8(header.TCPFlagAck), AckNum: tb.Uint32(uint32(seqNum2)), Options: dsackBlock[:dsbOff]})
+ conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck), AckNum: testbench.Uint32(uint32(seqNum2)), Options: dsackBlock[:dsbOff]})
// Send half of the original window of packets, checking that we
// received each.
for i, sn := 0, seqNum2; i < numPkts/2; i++ {
dut.Send(t, acceptFd, payload, 0)
- gotOne, err := conn.Expect(t, tb.TCP{SeqNum: tb.Uint32(uint32(sn))}, time.Second)
+ gotOne, err := conn.Expect(t, testbench.TCP{SeqNum: testbench.Uint32(uint32(sn))}, time.Second)
sn.UpdateForward(seqnum.Size(len(payload)))
if err != nil {
- t.Errorf("Expect #%d: %s", i+1, err)
+ t.Fatalf("Expect #%d: %s", i+1, err)
continue
}
if gotOne == nil {
- t.Errorf("#%d: expected a packet within a second but got none", i+1)
+ t.Fatalf("#%d: expected a packet within a second but got none", i+1)
}
}
- if !tb.Native {
+ if !testbench.Native {
// The window should now be halved, so we should receive any
// more, even if we send them.
dut.Send(t, acceptFd, payload, 0)
- if got, err := conn.Expect(t, tb.TCP{}, 100*time.Millisecond); got != nil || err == nil {
+ if got, err := conn.Expect(t, testbench.TCP{}, 100*time.Millisecond); got != nil || err == nil {
t.Fatalf("expected no packets within 100 millisecond, but got one: %s", got)
}
return
@@ -155,20 +154,20 @@ func TestReorderingWindow(t *testing.T) {
for i, sn := 0, seqNum2.Add(seqnum.Size(numPkts/2*len(payload))); i < 2; i++ {
dut.Send(t, acceptFd, payload, 0)
- gotOne, err := conn.Expect(t, tb.TCP{SeqNum: tb.Uint32(uint32(sn))}, time.Second)
+ gotOne, err := conn.Expect(t, testbench.TCP{SeqNum: testbench.Uint32(uint32(sn))}, time.Second)
sn.UpdateForward(seqnum.Size(len(payload)))
if err != nil {
- t.Errorf("Expect #%d: %s", i+1, err)
+ t.Fatalf("Expect #%d: %s", i+1, err)
continue
}
if gotOne == nil {
- t.Errorf("#%d: expected a packet within a second but got none", i+1)
+ t.Fatalf("#%d: expected a packet within a second but got none", i+1)
}
}
// The window should now be full.
dut.Send(t, acceptFd, payload, 0)
- if got, err := conn.Expect(t, tb.TCP{}, 100*time.Millisecond); got != nil || err == nil {
+ if got, err := conn.Expect(t, testbench.TCP{}, 100*time.Millisecond); got != nil || err == nil {
t.Fatalf("expected no packets within 100 millisecond, but got one: %s", got)
}
}
diff --git a/test/packetimpact/tests/tcp_retransmits_test.go b/test/packetimpact/tests/tcp_retransmits_test.go
index 072014ff8..27e9641b1 100644
--- a/test/packetimpact/tests/tcp_retransmits_test.go
+++ b/test/packetimpact/tests/tcp_retransmits_test.go
@@ -25,17 +25,16 @@ import (
)
func init() {
- testbench.RegisterFlags(flag.CommandLine)
+ testbench.Initialize(flag.CommandLine)
}
// TestRetransmits tests retransmits occur at exponentially increasing
// time intervals.
func TestRetransmits(t *testing.T) {
dut := testbench.NewDUT(t)
- defer dut.TearDown()
listenFd, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
defer dut.Close(t, listenFd)
- conn := testbench.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
+ conn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
defer conn.Close(t)
conn.Connect(t)
diff --git a/test/packetimpact/tests/tcp_send_window_sizes_piggyback_test.go b/test/packetimpact/tests/tcp_send_window_sizes_piggyback_test.go
index f91b06ba1..418393796 100644
--- a/test/packetimpact/tests/tcp_send_window_sizes_piggyback_test.go
+++ b/test/packetimpact/tests/tcp_send_window_sizes_piggyback_test.go
@@ -16,7 +16,6 @@ package tcp_send_window_sizes_piggyback_test
import (
"flag"
- "fmt"
"testing"
"time"
@@ -26,7 +25,7 @@ import (
)
func init() {
- testbench.RegisterFlags(flag.CommandLine)
+ testbench.Initialize(flag.CommandLine)
}
// TestSendWindowSizesPiggyback tests cases where segment sizes are close to
@@ -58,13 +57,12 @@ func TestSendWindowSizesPiggyback(t *testing.T) {
// greater than available sender window.
{"WindowGreaterThanSegment", segmentSize + 1, sampleData, sampleData, true /* enqueue */},
} {
- t.Run(fmt.Sprintf("%s%d", tt.description, tt.windowSize), func(t *testing.T) {
+ t.Run(tt.description, func(t *testing.T) {
dut := testbench.NewDUT(t)
- defer dut.TearDown()
listenFd, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
defer dut.Close(t, listenFd)
- conn := testbench.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort, WindowSize: testbench.Uint16(tt.windowSize)}, testbench.TCP{SrcPort: &remotePort})
+ conn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort, WindowSize: testbench.Uint16(tt.windowSize)}, testbench.TCP{SrcPort: &remotePort})
defer conn.Close(t)
conn.Connect(t)
diff --git a/test/packetimpact/tests/tcp_synrcvd_reset_test.go b/test/packetimpact/tests/tcp_synrcvd_reset_test.go
index 57d034dd1..c5bbd29ee 100644
--- a/test/packetimpact/tests/tcp_synrcvd_reset_test.go
+++ b/test/packetimpact/tests/tcp_synrcvd_reset_test.go
@@ -25,16 +25,15 @@ import (
)
func init() {
- testbench.RegisterFlags(flag.CommandLine)
+ testbench.Initialize(flag.CommandLine)
}
// TestTCPSynRcvdReset tests transition from SYN-RCVD to CLOSED.
func TestTCPSynRcvdReset(t *testing.T) {
dut := testbench.NewDUT(t)
- defer dut.TearDown()
listenFD, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
defer dut.Close(t, listenFD)
- conn := testbench.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
+ conn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
defer conn.Close(t)
// Expect dut connection to have transitioned to SYN-RCVD state.
diff --git a/test/packetimpact/tests/tcp_synsent_reset_test.go b/test/packetimpact/tests/tcp_synsent_reset_test.go
index eac8eb19d..2c8bb101b 100644
--- a/test/packetimpact/tests/tcp_synsent_reset_test.go
+++ b/test/packetimpact/tests/tcp_synsent_reset_test.go
@@ -16,34 +16,33 @@ package tcp_synsent_reset_test
import (
"flag"
- "net"
"testing"
"time"
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/tcpip/header"
- tb "gvisor.dev/gvisor/test/packetimpact/testbench"
+ "gvisor.dev/gvisor/test/packetimpact/testbench"
)
func init() {
- tb.RegisterFlags(flag.CommandLine)
+ testbench.Initialize(flag.CommandLine)
}
// dutSynSentState sets up the dut connection in SYN-SENT state.
-func dutSynSentState(t *testing.T) (*tb.DUT, *tb.TCPIPv4, uint16, uint16) {
+func dutSynSentState(t *testing.T) (*testbench.DUT, *testbench.TCPIPv4, uint16, uint16) {
t.Helper()
- dut := tb.NewDUT(t)
+ dut := testbench.NewDUT(t)
- clientFD, clientPort := dut.CreateBoundSocket(t, unix.SOCK_STREAM|unix.SOCK_NONBLOCK, unix.IPPROTO_TCP, net.ParseIP(tb.RemoteIPv4))
+ clientFD, clientPort := dut.CreateBoundSocket(t, unix.SOCK_STREAM|unix.SOCK_NONBLOCK, unix.IPPROTO_TCP, dut.Net.RemoteIPv4)
port := uint16(9001)
- conn := tb.NewTCPIPv4(t, tb.TCP{SrcPort: &port, DstPort: &clientPort}, tb.TCP{SrcPort: &clientPort, DstPort: &port})
+ conn := dut.Net.NewTCPIPv4(t, testbench.TCP{SrcPort: &port, DstPort: &clientPort}, testbench.TCP{SrcPort: &clientPort, DstPort: &port})
sa := unix.SockaddrInet4{Port: int(port)}
- copy(sa.Addr[:], net.IP(net.ParseIP(tb.LocalIPv4)).To4())
+ copy(sa.Addr[:], dut.Net.LocalIPv4)
// Bring the dut to SYN-SENT state with a non-blocking connect.
dut.Connect(t, clientFD, &sa)
- if _, err := conn.ExpectData(t, &tb.TCP{Flags: tb.Uint8(header.TCPFlagSyn)}, nil, time.Second); err != nil {
+ if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.Uint8(header.TCPFlagSyn)}, nil, time.Second); err != nil {
t.Fatalf("expected SYN\n")
}
@@ -52,14 +51,13 @@ func dutSynSentState(t *testing.T) (*tb.DUT, *tb.TCPIPv4, uint16, uint16) {
// TestTCPSynSentReset tests RFC793, p67: SYN-SENT to CLOSED transition.
func TestTCPSynSentReset(t *testing.T) {
- dut, conn, _, _ := dutSynSentState(t)
+ _, conn, _, _ := dutSynSentState(t)
defer conn.Close(t)
- defer dut.TearDown()
- conn.Send(t, tb.TCP{Flags: tb.Uint8(header.TCPFlagRst | header.TCPFlagAck)})
+ conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagRst | header.TCPFlagAck)})
// Expect the connection to have closed.
// TODO(gvisor.dev/issue/478): Check for TCP_INFO on the dut side.
- conn.Send(t, tb.TCP{Flags: tb.Uint8(header.TCPFlagAck)})
- if _, err := conn.ExpectData(t, &tb.TCP{Flags: tb.Uint8(header.TCPFlagRst)}, nil, time.Second); err != nil {
+ conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)})
+ if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.Uint8(header.TCPFlagRst)}, nil, time.Second); err != nil {
t.Fatalf("expected a TCP RST")
}
}
@@ -68,23 +66,22 @@ func TestTCPSynSentReset(t *testing.T) {
// transitions.
func TestTCPSynSentRcvdReset(t *testing.T) {
dut, c, remotePort, clientPort := dutSynSentState(t)
- defer dut.TearDown()
defer c.Close(t)
- conn := tb.NewTCPIPv4(t, tb.TCP{SrcPort: &remotePort, DstPort: &clientPort}, tb.TCP{SrcPort: &clientPort, DstPort: &remotePort})
+ conn := dut.Net.NewTCPIPv4(t, testbench.TCP{SrcPort: &remotePort, DstPort: &clientPort}, testbench.TCP{SrcPort: &clientPort, DstPort: &remotePort})
defer conn.Close(t)
// Initiate new SYN connection with the same port pair
// (simultaneous open case), expect the dut connection to move to
// SYN-RCVD state
- conn.Send(t, tb.TCP{Flags: tb.Uint8(header.TCPFlagSyn)})
- if _, err := conn.ExpectData(t, &tb.TCP{Flags: tb.Uint8(header.TCPFlagSyn | header.TCPFlagAck)}, nil, time.Second); err != nil {
+ conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagSyn)})
+ if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.Uint8(header.TCPFlagSyn | header.TCPFlagAck)}, nil, time.Second); err != nil {
t.Fatalf("expected SYN-ACK %s\n", err)
}
- conn.Send(t, tb.TCP{Flags: tb.Uint8(header.TCPFlagRst)})
+ conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagRst)})
// Expect the connection to have transitioned SYN-RCVD to CLOSED.
// TODO(gvisor.dev/issue/478): Check for TCP_INFO on the dut side.
- conn.Send(t, tb.TCP{Flags: tb.Uint8(header.TCPFlagAck)})
- if _, err := conn.ExpectData(t, &tb.TCP{Flags: tb.Uint8(header.TCPFlagRst)}, nil, time.Second); err != nil {
+ conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)})
+ if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.Uint8(header.TCPFlagRst)}, nil, time.Second); err != nil {
t.Fatalf("expected a TCP RST")
}
}
diff --git a/test/packetimpact/tests/tcp_timewait_reset_test.go b/test/packetimpact/tests/tcp_timewait_reset_test.go
index 2f76a6531..d1d2fb83d 100644
--- a/test/packetimpact/tests/tcp_timewait_reset_test.go
+++ b/test/packetimpact/tests/tcp_timewait_reset_test.go
@@ -25,16 +25,15 @@ import (
)
func init() {
- testbench.RegisterFlags(flag.CommandLine)
+ testbench.Initialize(flag.CommandLine)
}
// TestTimeWaitReset tests handling of RST when in TIME_WAIT state.
func TestTimeWaitReset(t *testing.T) {
dut := testbench.NewDUT(t)
- defer dut.TearDown()
listenFD, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1 /*backlog*/)
defer dut.Close(t, listenFD)
- conn := testbench.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
+ conn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
defer conn.Close(t)
conn.Connect(t)
diff --git a/test/packetimpact/tests/tcp_unacc_seq_ack_test.go b/test/packetimpact/tests/tcp_unacc_seq_ack_test.go
index d078bbf15..ea962c818 100644
--- a/test/packetimpact/tests/tcp_unacc_seq_ack_test.go
+++ b/test/packetimpact/tests/tcp_unacc_seq_ack_test.go
@@ -28,7 +28,7 @@ import (
)
func init() {
- testbench.RegisterFlags(flag.CommandLine)
+ testbench.Initialize(flag.CommandLine)
}
func TestEstablishedUnaccSeqAck(t *testing.T) {
@@ -48,10 +48,9 @@ func TestEstablishedUnaccSeqAck(t *testing.T) {
} {
t.Run(fmt.Sprintf("%s:offset=%d", tt.description, tt.seqNumOffset), func(t *testing.T) {
dut := testbench.NewDUT(t)
- defer dut.TearDown()
listenFD, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1 /*backlog*/)
defer dut.Close(t, listenFD)
- conn := testbench.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
+ conn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
defer conn.Close(t)
conn.Connect(t)
@@ -102,10 +101,9 @@ func TestPassiveCloseUnaccSeqAck(t *testing.T) {
} {
t.Run(fmt.Sprintf("%s:offset=%d", tt.description, tt.seqNumOffset), func(t *testing.T) {
dut := testbench.NewDUT(t)
- defer dut.TearDown()
listenFD, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1 /*backlog*/)
defer dut.Close(t, listenFD)
- conn := testbench.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
+ conn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
defer conn.Close(t)
conn.Connect(t)
@@ -164,10 +162,9 @@ func TestActiveCloseUnaccpSeqAck(t *testing.T) {
} {
t.Run(fmt.Sprintf("%s:offset=%d", tt.description, tt.seqNumOffset), func(t *testing.T) {
dut := testbench.NewDUT(t)
- defer dut.TearDown()
listenFD, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1 /*backlog*/)
defer dut.Close(t, listenFD)
- conn := testbench.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
+ conn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
defer conn.Close(t)
conn.Connect(t)
diff --git a/test/packetimpact/tests/tcp_user_timeout_test.go b/test/packetimpact/tests/tcp_user_timeout_test.go
index 551dc78e7..b16e65366 100644
--- a/test/packetimpact/tests/tcp_user_timeout_test.go
+++ b/test/packetimpact/tests/tcp_user_timeout_test.go
@@ -25,7 +25,7 @@ import (
)
func init() {
- testbench.RegisterFlags(flag.CommandLine)
+ testbench.Initialize(flag.CommandLine)
}
func sendPayload(t *testing.T, conn *testbench.TCPIPv4, dut *testbench.DUT, fd int32) {
@@ -64,10 +64,9 @@ func TestTCPUserTimeout(t *testing.T) {
t.Run(tt.description+ttf.description, func(t *testing.T) {
// Create a socket, listen, TCP handshake, and accept.
dut := testbench.NewDUT(t)
- defer dut.TearDown()
listenFD, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
defer dut.Close(t, listenFD)
- conn := testbench.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
+ conn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
defer conn.Close(t)
conn.Connect(t)
acceptFD, _ := dut.Accept(t, listenFD)
diff --git a/test/packetimpact/tests/tcp_window_shrink_test.go b/test/packetimpact/tests/tcp_window_shrink_test.go
index 5b001fbec..093484721 100644
--- a/test/packetimpact/tests/tcp_window_shrink_test.go
+++ b/test/packetimpact/tests/tcp_window_shrink_test.go
@@ -25,15 +25,14 @@ import (
)
func init() {
- testbench.RegisterFlags(flag.CommandLine)
+ testbench.Initialize(flag.CommandLine)
}
func TestWindowShrink(t *testing.T) {
dut := testbench.NewDUT(t)
- defer dut.TearDown()
listenFd, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
defer dut.Close(t, listenFd)
- conn := testbench.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
+ conn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
defer conn.Close(t)
conn.Connect(t)
diff --git a/test/packetimpact/tests/tcp_zero_receive_window_test.go b/test/packetimpact/tests/tcp_zero_receive_window_test.go
new file mode 100644
index 000000000..cf0431c57
--- /dev/null
+++ b/test/packetimpact/tests/tcp_zero_receive_window_test.go
@@ -0,0 +1,113 @@
+// Copyright 2020 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tcp_zero_receive_window_test
+
+import (
+ "context"
+ "flag"
+ "fmt"
+ "testing"
+ "time"
+
+ "golang.org/x/sys/unix"
+ "gvisor.dev/gvisor/pkg/tcpip/header"
+ "gvisor.dev/gvisor/test/packetimpact/testbench"
+)
+
+func init() {
+ testbench.Initialize(flag.CommandLine)
+}
+
+// TestZeroReceiveWindow tests if the DUT sends a zero receive window eventually.
+func TestZeroReceiveWindow(t *testing.T) {
+ for _, payloadLen := range []int{64, 512, 1024} {
+ t.Run(fmt.Sprintf("TestZeroReceiveWindow_with_%dbytes_payload", payloadLen), func(t *testing.T) {
+ dut := testbench.NewDUT(t)
+ listenFd, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
+ defer dut.Close(t, listenFd)
+ conn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
+ defer conn.Close(t)
+
+ conn.Connect(t)
+ acceptFd, _ := dut.Accept(t, listenFd)
+ defer dut.Close(t, acceptFd)
+
+ dut.SetSockOptInt(t, acceptFd, unix.IPPROTO_TCP, unix.TCP_NODELAY, 1)
+
+ samplePayload := &testbench.Payload{Bytes: make([]byte, payloadLen)} //testbench.GenerateRandomPayload(t, payloadLen)}
+ // Expect the DUT to eventually advertize zero receive window.
+ // The test would timeout otherwise.
+ for {
+ conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck | header.TCPFlagPsh)}, samplePayload)
+ gotTCP, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}, time.Second)
+ if err != nil {
+ t.Fatalf("expected packet was not received: %s", err)
+ }
+ if *gotTCP.WindowSize == 0 {
+ break
+ }
+ }
+ })
+ }
+}
+
+// TestNonZeroReceiveWindow tests for the DUT to never send a zero receive
+// window when the data is being read from the socket buffer.
+func TestNonZeroReceiveWindow(t *testing.T) {
+ for _, payloadLen := range []int{64, 512, 1024} {
+ t.Run(fmt.Sprintf("TestZeroReceiveWindow_with_%dbytes_payload", payloadLen), func(t *testing.T) {
+ dut := testbench.NewDUT(t)
+ listenFd, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
+ defer dut.Close(t, listenFd)
+ conn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
+ defer conn.Close(t)
+
+ conn.Connect(t)
+ acceptFd, _ := dut.Accept(t, listenFd)
+ defer dut.Close(t, acceptFd)
+
+ dut.SetSockOptInt(t, acceptFd, unix.IPPROTO_TCP, unix.TCP_NODELAY, 1)
+
+ samplePayload := &testbench.Payload{Bytes: testbench.GenerateRandomPayload(t, payloadLen)}
+ var rcvWindow uint16
+ initRcv := false
+ // This loop keeps a running rcvWindow value from the initial ACK for the data
+ // we sent. Once we have received ACKs with non-zero receive windows, we break
+ // the loop.
+ for {
+ conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck | header.TCPFlagPsh)}, samplePayload)
+ gotTCP, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}, time.Second)
+ if err != nil {
+ t.Fatalf("expected packet was not received: %s", err)
+ }
+ if ret, _, err := dut.RecvWithErrno(context.Background(), t, acceptFd, int32(payloadLen), 0); ret == -1 {
+ t.Fatalf("dut.RecvWithErrno(ctx, t, %d, %d, 0) = %d,_, %s", acceptFd, payloadLen, ret, err)
+ }
+
+ if *gotTCP.WindowSize == 0 {
+ t.Fatalf("expected non-zero receive window.")
+ }
+ if !initRcv {
+ rcvWindow = uint16(*gotTCP.WindowSize)
+ initRcv = true
+ }
+ if rcvWindow <= uint16(payloadLen) {
+ break
+ }
+ rcvWindow -= uint16(payloadLen)
+ }
+ })
+ }
+}
diff --git a/test/packetimpact/tests/tcp_zero_window_probe_retransmit_test.go b/test/packetimpact/tests/tcp_zero_window_probe_retransmit_test.go
index da93267d6..1ab9ee1b2 100644
--- a/test/packetimpact/tests/tcp_zero_window_probe_retransmit_test.go
+++ b/test/packetimpact/tests/tcp_zero_window_probe_retransmit_test.go
@@ -25,17 +25,16 @@ import (
)
func init() {
- testbench.RegisterFlags(flag.CommandLine)
+ testbench.Initialize(flag.CommandLine)
}
// TestZeroWindowProbeRetransmit tests retransmits of zero window probes
// to be sent at exponentially inreasing time intervals.
func TestZeroWindowProbeRetransmit(t *testing.T) {
dut := testbench.NewDUT(t)
- defer dut.TearDown()
listenFd, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
defer dut.Close(t, listenFd)
- conn := testbench.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
+ conn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
defer conn.Close(t)
conn.Connect(t)
diff --git a/test/packetimpact/tests/tcp_zero_window_probe_test.go b/test/packetimpact/tests/tcp_zero_window_probe_test.go
index 44cac42f8..650a569cc 100644
--- a/test/packetimpact/tests/tcp_zero_window_probe_test.go
+++ b/test/packetimpact/tests/tcp_zero_window_probe_test.go
@@ -25,17 +25,16 @@ import (
)
func init() {
- testbench.RegisterFlags(flag.CommandLine)
+ testbench.Initialize(flag.CommandLine)
}
// TestZeroWindowProbe tests few cases of zero window probing over the
// same connection.
func TestZeroWindowProbe(t *testing.T) {
dut := testbench.NewDUT(t)
- defer dut.TearDown()
listenFd, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
defer dut.Close(t, listenFd)
- conn := testbench.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
+ conn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
defer conn.Close(t)
conn.Connect(t)
diff --git a/test/packetimpact/tests/tcp_zero_window_probe_usertimeout_test.go b/test/packetimpact/tests/tcp_zero_window_probe_usertimeout_test.go
index 09a1c653f..079fea68c 100644
--- a/test/packetimpact/tests/tcp_zero_window_probe_usertimeout_test.go
+++ b/test/packetimpact/tests/tcp_zero_window_probe_usertimeout_test.go
@@ -25,17 +25,16 @@ import (
)
func init() {
- testbench.RegisterFlags(flag.CommandLine)
+ testbench.Initialize(flag.CommandLine)
}
// TestZeroWindowProbeUserTimeout sanity tests user timeout when we are
// retransmitting zero window probes.
func TestZeroWindowProbeUserTimeout(t *testing.T) {
dut := testbench.NewDUT(t)
- defer dut.TearDown()
listenFd, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
defer dut.Close(t, listenFd)
- conn := testbench.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
+ conn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
defer conn.Close(t)
conn.Connect(t)
diff --git a/test/packetimpact/tests/udp_any_addr_recv_unicast_test.go b/test/packetimpact/tests/udp_any_addr_recv_unicast_test.go
index 17f32ef65..f4ae00a81 100644
--- a/test/packetimpact/tests/udp_any_addr_recv_unicast_test.go
+++ b/test/packetimpact/tests/udp_any_addr_recv_unicast_test.go
@@ -26,21 +26,20 @@ import (
)
func init() {
- testbench.RegisterFlags(flag.CommandLine)
+ testbench.Initialize(flag.CommandLine)
}
func TestAnyRecvUnicastUDP(t *testing.T) {
dut := testbench.NewDUT(t)
- defer dut.TearDown()
boundFD, remotePort := dut.CreateBoundSocket(t, unix.SOCK_DGRAM, unix.IPPROTO_UDP, net.IPv4zero)
defer dut.Close(t, boundFD)
- conn := testbench.NewUDPIPv4(t, testbench.UDP{DstPort: &remotePort}, testbench.UDP{SrcPort: &remotePort})
+ conn := dut.Net.NewUDPIPv4(t, testbench.UDP{DstPort: &remotePort}, testbench.UDP{SrcPort: &remotePort})
defer conn.Close(t)
payload := testbench.GenerateRandomPayload(t, 1<<10 /* 1 KiB */)
conn.SendIP(
t,
- testbench.IPv4{DstAddr: testbench.Address(tcpip.Address(net.ParseIP(testbench.RemoteIPv4).To4()))},
+ testbench.IPv4{DstAddr: testbench.Address(tcpip.Address(dut.Net.RemoteIPv4))},
testbench.UDP{},
&testbench.Payload{Bytes: payload},
)
diff --git a/test/packetimpact/tests/udp_discard_mcast_source_addr_test.go b/test/packetimpact/tests/udp_discard_mcast_source_addr_test.go
index 3d2791a6e..52c6f9d91 100644
--- a/test/packetimpact/tests/udp_discard_mcast_source_addr_test.go
+++ b/test/packetimpact/tests/udp_discard_mcast_source_addr_test.go
@@ -30,16 +30,15 @@ import (
var oneSecond = unix.Timeval{Sec: 1, Usec: 0}
func init() {
- testbench.RegisterFlags(flag.CommandLine)
+ testbench.Initialize(flag.CommandLine)
}
func TestDiscardsUDPPacketsWithMcastSourceAddressV4(t *testing.T) {
dut := testbench.NewDUT(t)
- defer dut.TearDown()
- remoteFD, remotePort := dut.CreateBoundSocket(t, unix.SOCK_DGRAM, unix.IPPROTO_UDP, net.ParseIP(testbench.RemoteIPv4))
+ remoteFD, remotePort := dut.CreateBoundSocket(t, unix.SOCK_DGRAM, unix.IPPROTO_UDP, dut.Net.RemoteIPv4)
defer dut.Close(t, remoteFD)
dut.SetSockOptTimeval(t, remoteFD, unix.SOL_SOCKET, unix.SO_RCVTIMEO, &oneSecond)
- conn := testbench.NewUDPIPv4(t, testbench.UDP{DstPort: &remotePort}, testbench.UDP{SrcPort: &remotePort})
+ conn := dut.Net.NewUDPIPv4(t, testbench.UDP{DstPort: &remotePort}, testbench.UDP{SrcPort: &remotePort})
defer conn.Close(t)
for _, mcastAddr := range []net.IP{
@@ -66,11 +65,10 @@ func TestDiscardsUDPPacketsWithMcastSourceAddressV4(t *testing.T) {
func TestDiscardsUDPPacketsWithMcastSourceAddressV6(t *testing.T) {
dut := testbench.NewDUT(t)
- defer dut.TearDown()
- remoteFD, remotePort := dut.CreateBoundSocket(t, unix.SOCK_DGRAM, unix.IPPROTO_UDP, net.ParseIP(testbench.RemoteIPv6))
+ remoteFD, remotePort := dut.CreateBoundSocket(t, unix.SOCK_DGRAM, unix.IPPROTO_UDP, dut.Net.RemoteIPv6)
defer dut.Close(t, remoteFD)
dut.SetSockOptTimeval(t, remoteFD, unix.SOL_SOCKET, unix.SO_RCVTIMEO, &oneSecond)
- conn := testbench.NewUDPIPv6(t, testbench.UDP{DstPort: &remotePort}, testbench.UDP{SrcPort: &remotePort})
+ conn := dut.Net.NewUDPIPv6(t, testbench.UDP{DstPort: &remotePort}, testbench.UDP{SrcPort: &remotePort})
defer conn.Close(t)
for _, mcastAddr := range []net.IP{
diff --git a/test/packetimpact/tests/udp_icmp_error_propagation_test.go b/test/packetimpact/tests/udp_icmp_error_propagation_test.go
index df35d16c8..cd4523e88 100644
--- a/test/packetimpact/tests/udp_icmp_error_propagation_test.go
+++ b/test/packetimpact/tests/udp_icmp_error_propagation_test.go
@@ -30,7 +30,7 @@ import (
)
func init() {
- testbench.RegisterFlags(flag.CommandLine)
+ testbench.Initialize(flag.CommandLine)
}
type connectionMode bool
@@ -229,7 +229,6 @@ func TestUDPICMPErrorPropagation(t *testing.T) {
} {
t.Run(fmt.Sprintf("%s/%s/%s", connect, icmpErr, errDetect.name), func(t *testing.T) {
dut := testbench.NewDUT(t)
- defer dut.TearDown()
remoteFD, remotePort := dut.CreateBoundSocket(t, unix.SOCK_DGRAM, unix.IPPROTO_UDP, net.IPv4zero)
defer dut.Close(t, remoteFD)
@@ -239,7 +238,7 @@ func TestUDPICMPErrorPropagation(t *testing.T) {
cleanFD, cleanPort := dut.CreateBoundSocket(t, unix.SOCK_DGRAM, unix.IPPROTO_UDP, net.IPv4zero)
defer dut.Close(t, cleanFD)
- conn := testbench.NewUDPIPv4(t, testbench.UDP{DstPort: &remotePort}, testbench.UDP{SrcPort: &remotePort})
+ conn := dut.Net.NewUDPIPv4(t, testbench.UDP{DstPort: &remotePort}, testbench.UDP{SrcPort: &remotePort})
defer conn.Close(t)
if connect {
@@ -261,7 +260,7 @@ func TestUDPICMPErrorPropagation(t *testing.T) {
// involved in the generation of the ICMP error. As such,
// interactions between it and the the DUT should be independent of
// the ICMP error at least at the port level.
- connClean := testbench.NewUDPIPv4(t, testbench.UDP{DstPort: &remotePort}, testbench.UDP{SrcPort: &remotePort})
+ connClean := dut.Net.NewUDPIPv4(t, testbench.UDP{DstPort: &remotePort}, testbench.UDP{SrcPort: &remotePort})
defer connClean.Close(t)
errDetectConn = &connClean
@@ -283,7 +282,6 @@ func TestICMPErrorDuringUDPRecv(t *testing.T) {
t.Run(fmt.Sprintf("%s/%s", connect, icmpErr), func(t *testing.T) {
dut := testbench.NewDUT(t)
- defer dut.TearDown()
remoteFD, remotePort := dut.CreateBoundSocket(t, unix.SOCK_DGRAM, unix.IPPROTO_UDP, net.IPv4zero)
defer dut.Close(t, remoteFD)
@@ -293,7 +291,7 @@ func TestICMPErrorDuringUDPRecv(t *testing.T) {
cleanFD, cleanPort := dut.CreateBoundSocket(t, unix.SOCK_DGRAM, unix.IPPROTO_UDP, net.IPv4zero)
defer dut.Close(t, cleanFD)
- conn := testbench.NewUDPIPv4(t, testbench.UDP{DstPort: &remotePort}, testbench.UDP{SrcPort: &remotePort})
+ conn := dut.Net.NewUDPIPv4(t, testbench.UDP{DstPort: &remotePort}, testbench.UDP{SrcPort: &remotePort})
defer conn.Close(t)
if connect {
diff --git a/test/packetimpact/tests/udp_recv_mcast_bcast_test.go b/test/packetimpact/tests/udp_recv_mcast_bcast_test.go
index 526173969..b29c07825 100644
--- a/test/packetimpact/tests/udp_recv_mcast_bcast_test.go
+++ b/test/packetimpact/tests/udp_recv_mcast_bcast_test.go
@@ -29,12 +29,12 @@ import (
)
func init() {
- testbench.RegisterFlags(flag.CommandLine)
+ testbench.Initialize(flag.CommandLine)
}
func TestUDPRecvMcastBcast(t *testing.T) {
- subnetBcastAddr := broadcastAddr(net.ParseIP(testbench.RemoteIPv4), net.CIDRMask(testbench.IPv4PrefixLength, 32))
-
+ dut := testbench.NewDUT(t)
+ subnetBcastAddr := broadcastAddr(dut.Net.RemoteIPv4, net.CIDRMask(dut.Net.IPv4PrefixLength, 32))
for _, v := range []struct {
bound, to net.IP
}{
@@ -43,17 +43,22 @@ func TestUDPRecvMcastBcast(t *testing.T) {
{bound: net.IPv4zero, to: net.IPv4allsys},
{bound: subnetBcastAddr, to: subnetBcastAddr},
- {bound: subnetBcastAddr, to: net.IPv4bcast},
+
+ // FIXME(gvisor.dev/issue/4896): Previously by the time subnetBcastAddr is
+ // created, IPv4PrefixLength is still 0 because genPseudoFlags is not called
+ // yet, it was only called in NewDUT, so the test didn't do what the author
+ // original intended to and becomes failing because we process all flags at
+ // the very beginning.
+ //
+ // {bound: subnetBcastAddr, to: net.IPv4bcast},
{bound: net.IPv4bcast, to: net.IPv4bcast},
{bound: net.IPv4allsys, to: net.IPv4allsys},
} {
t.Run(fmt.Sprintf("bound=%s,to=%s", v.bound, v.to), func(t *testing.T) {
- dut := testbench.NewDUT(t)
- defer dut.TearDown()
boundFD, remotePort := dut.CreateBoundSocket(t, unix.SOCK_DGRAM, unix.IPPROTO_UDP, v.bound)
defer dut.Close(t, boundFD)
- conn := testbench.NewUDPIPv4(t, testbench.UDP{DstPort: &remotePort}, testbench.UDP{SrcPort: &remotePort})
+ conn := dut.Net.NewUDPIPv4(t, testbench.UDP{DstPort: &remotePort}, testbench.UDP{SrcPort: &remotePort})
defer conn.Close(t)
payload := testbench.GenerateRandomPayload(t, 1<<10 /* 1 KiB */)
@@ -73,15 +78,14 @@ func TestUDPRecvMcastBcast(t *testing.T) {
func TestUDPDoesntRecvMcastBcastOnUnicastAddr(t *testing.T) {
dut := testbench.NewDUT(t)
- defer dut.TearDown()
- boundFD, remotePort := dut.CreateBoundSocket(t, unix.SOCK_DGRAM, unix.IPPROTO_UDP, net.ParseIP(testbench.RemoteIPv4))
+ boundFD, remotePort := dut.CreateBoundSocket(t, unix.SOCK_DGRAM, unix.IPPROTO_UDP, dut.Net.RemoteIPv4)
dut.SetSockOptTimeval(t, boundFD, unix.SOL_SOCKET, unix.SO_RCVTIMEO, &unix.Timeval{Sec: 1, Usec: 0})
defer dut.Close(t, boundFD)
- conn := testbench.NewUDPIPv4(t, testbench.UDP{DstPort: &remotePort}, testbench.UDP{SrcPort: &remotePort})
+ conn := dut.Net.NewUDPIPv4(t, testbench.UDP{DstPort: &remotePort}, testbench.UDP{SrcPort: &remotePort})
defer conn.Close(t)
for _, to := range []net.IP{
- broadcastAddr(net.ParseIP(testbench.RemoteIPv4), net.CIDRMask(testbench.IPv4PrefixLength, 32)),
+ broadcastAddr(dut.Net.RemoteIPv4, net.CIDRMask(dut.Net.IPv4PrefixLength, 32)),
net.IPv4(255, 255, 255, 255),
net.IPv4(224, 0, 0, 1),
} {
@@ -102,9 +106,10 @@ func TestUDPDoesntRecvMcastBcastOnUnicastAddr(t *testing.T) {
}
func broadcastAddr(ip net.IP, mask net.IPMask) net.IP {
+ result := make(net.IP, net.IPv4len)
ip4 := ip.To4()
for i := range ip4 {
- ip4[i] |= ^mask[i]
+ result[i] = ip4[i] | ^mask[i]
}
- return ip4
+ return result
}
diff --git a/test/packetimpact/tests/udp_send_recv_dgram_test.go b/test/packetimpact/tests/udp_send_recv_dgram_test.go
index 91b967400..7ee2c8014 100644
--- a/test/packetimpact/tests/udp_send_recv_dgram_test.go
+++ b/test/packetimpact/tests/udp_send_recv_dgram_test.go
@@ -26,7 +26,7 @@ import (
)
func init() {
- testbench.RegisterFlags(flag.CommandLine)
+ testbench.Initialize(flag.CommandLine)
}
type udpConn interface {
@@ -38,7 +38,6 @@ type udpConn interface {
func TestUDP(t *testing.T) {
dut := testbench.NewDUT(t)
- defer dut.TearDown()
for _, isIPv4 := range []bool{true, false} {
ipVersionName := "IPv6"
@@ -46,24 +45,24 @@ func TestUDP(t *testing.T) {
ipVersionName = "IPv4"
}
t.Run(ipVersionName, func(t *testing.T) {
- var addr string
+ var addr net.IP
if isIPv4 {
- addr = testbench.RemoteIPv4
+ addr = dut.Net.RemoteIPv4
} else {
- addr = testbench.RemoteIPv6
+ addr = dut.Net.RemoteIPv6
}
- boundFD, remotePort := dut.CreateBoundSocket(t, unix.SOCK_DGRAM, unix.IPPROTO_UDP, net.ParseIP(addr))
+ boundFD, remotePort := dut.CreateBoundSocket(t, unix.SOCK_DGRAM, unix.IPPROTO_UDP, addr)
defer dut.Close(t, boundFD)
var conn udpConn
var localAddr unix.Sockaddr
if isIPv4 {
- v4Conn := testbench.NewUDPIPv4(t, testbench.UDP{DstPort: &remotePort}, testbench.UDP{SrcPort: &remotePort})
+ v4Conn := dut.Net.NewUDPIPv4(t, testbench.UDP{DstPort: &remotePort}, testbench.UDP{SrcPort: &remotePort})
localAddr = v4Conn.LocalAddr(t)
conn = &v4Conn
} else {
- v6Conn := testbench.NewUDPIPv6(t, testbench.UDP{DstPort: &remotePort}, testbench.UDP{SrcPort: &remotePort})
- localAddr = v6Conn.LocalAddr(t)
+ v6Conn := dut.Net.NewUDPIPv6(t, testbench.UDP{DstPort: &remotePort}, testbench.UDP{SrcPort: &remotePort})
+ localAddr = v6Conn.LocalAddr(t, dut.Net.RemoteDevID)
conn = &v6Conn
}
defer conn.Close(t)