summaryrefslogtreecommitdiffhomepage
path: root/test
diff options
context:
space:
mode:
authorJay Zhuang <jayzhuang@google.com>2020-07-27 06:37:11 -0700
committergVisor bot <gvisor-bot@google.com>2020-07-27 06:39:06 -0700
commitcf7141fb43d9b6757c6838baa71e2edfee339d44 (patch)
treeb77879f6310231831ebb7d37f07bfea1d1d59b0b /test
parent2ecf66903ed3da46fa021feeeeccad81cd82eaa6 (diff)
Ask for *testing.T instead of storing it
Storing *testing.T on test helper structs is problematic when subtests are used, because it is possible for nested tests to call Fatal on parent test, which incorrect terminates the parent test. For example func TestOuter(t *testing.T) { dut := NewDUT(t) t.Run("first test", func(t *testing.T) { dut.FallibleCall() }) t.Run("second test", func(t *testing.T) { dut.FallibleCall() } } In the example above, assuming `FallibleCall` calls `t.Fatal` on the `t` it holds, if `dut.FallibleCall` fails in "first test", it will call `Fatal` on the parent `t`, quitting `TestOuter`. This is not a behavior we want. PiperOrigin-RevId: 323350241
Diffstat (limited to 'test')
-rw-r--r--test/packetimpact/testbench/connections.go330
-rw-r--r--test/packetimpact/testbench/dut.go358
-rw-r--r--test/packetimpact/testbench/rawsockets.go44
-rw-r--r--test/packetimpact/tests/fin_wait2_timeout_test.go26
-rw-r--r--test/packetimpact/tests/icmpv6_param_problem_test.go8
-rw-r--r--test/packetimpact/tests/ipv4_id_uniqueness_test.go34
-rw-r--r--test/packetimpact/tests/ipv6_fragment_reassembly_test.go10
-rw-r--r--test/packetimpact/tests/ipv6_unknown_options_action_test.go44
-rw-r--r--test/packetimpact/tests/tcp_close_wait_ack_test.go63
-rw-r--r--test/packetimpact/tests/tcp_cork_mss_test.go36
-rw-r--r--test/packetimpact/tests/tcp_handshake_window_size_test.go20
-rw-r--r--test/packetimpact/tests/tcp_network_unreachable_test.go28
-rw-r--r--test/packetimpact/tests/tcp_noaccept_close_rst_test.go10
-rw-r--r--test/packetimpact/tests/tcp_outside_the_window_test.go24
-rw-r--r--test/packetimpact/tests/tcp_paws_mechanism_test.go24
-rw-r--r--test/packetimpact/tests/tcp_queue_receive_in_syn_sent_test.go26
-rw-r--r--test/packetimpact/tests/tcp_reordering_test.go48
-rw-r--r--test/packetimpact/tests/tcp_retransmits_test.go28
-rw-r--r--test/packetimpact/tests/tcp_send_window_sizes_piggyback_test.go24
-rw-r--r--test/packetimpact/tests/tcp_synrcvd_reset_test.go16
-rw-r--r--test/packetimpact/tests/tcp_synsent_reset_test.go30
-rw-r--r--test/packetimpact/tests/tcp_user_timeout_test.go41
-rw-r--r--test/packetimpact/tests/tcp_window_shrink_test.go36
-rw-r--r--test/packetimpact/tests/tcp_zero_window_probe_retransmit_test.go39
-rw-r--r--test/packetimpact/tests/tcp_zero_window_probe_test.go44
-rw-r--r--test/packetimpact/tests/tcp_zero_window_probe_usertimeout_test.go40
-rw-r--r--test/packetimpact/tests/udp_discard_mcast_source_addr_test.go22
-rw-r--r--test/packetimpact/tests/udp_icmp_error_propagation_test.go130
-rw-r--r--test/packetimpact/tests/udp_recv_mcast_bcast_test.go9
-rw-r--r--test/packetimpact/tests/udp_send_recv_dgram_test.go28
30 files changed, 876 insertions, 744 deletions
diff --git a/test/packetimpact/testbench/connections.go b/test/packetimpact/testbench/connections.go
index 87ce58c24..3af5f83fd 100644
--- a/test/packetimpact/testbench/connections.go
+++ b/test/packetimpact/testbench/connections.go
@@ -429,7 +429,6 @@ type Connection struct {
layerStates []layerState
injector Injector
sniffer Sniffer
- t *testing.T
}
// Returns the default incoming frame against which to match. If received is
@@ -462,7 +461,9 @@ func (conn *Connection) match(override, received Layers) bool {
}
// Close frees associated resources held by the Connection.
-func (conn *Connection) Close() {
+func (conn *Connection) Close(t *testing.T) {
+ t.Helper()
+
errs := multierr.Combine(conn.sniffer.close(), conn.injector.close())
for _, s := range conn.layerStates {
if err := s.close(); err != nil {
@@ -470,7 +471,7 @@ func (conn *Connection) Close() {
}
}
if errs != nil {
- conn.t.Fatalf("unable to close %+v: %s", conn, errs)
+ t.Fatalf("unable to close %+v: %s", conn, errs)
}
}
@@ -482,7 +483,9 @@ func (conn *Connection) Close() {
// overriden first. As an example, valid values of overrideLayers for a TCP-
// over-IPv4-over-Ethernet connection are: nil, [TCP], [IPv4, TCP], and
// [Ethernet, IPv4, TCP].
-func (conn *Connection) CreateFrame(overrideLayers Layers, additionalLayers ...Layer) Layers {
+func (conn *Connection) CreateFrame(t *testing.T, overrideLayers Layers, additionalLayers ...Layer) Layers {
+ t.Helper()
+
var layersToSend Layers
for i, s := range conn.layerStates {
layer := s.outgoing()
@@ -491,7 +494,7 @@ func (conn *Connection) CreateFrame(overrideLayers Layers, additionalLayers ...L
// end.
if j := len(overrideLayers) - (len(conn.layerStates) - i); j >= 0 {
if err := layer.merge(overrideLayers[j]); err != nil {
- conn.t.Fatalf("can't merge %+v into %+v: %s", layer, overrideLayers[j], err)
+ t.Fatalf("can't merge %+v into %+v: %s", layer, overrideLayers[j], err)
}
}
layersToSend = append(layersToSend, layer)
@@ -505,21 +508,25 @@ func (conn *Connection) CreateFrame(overrideLayers Layers, additionalLayers ...L
// This method is useful for sending out-of-band control messages such as
// ICMP packets, where it would not make sense to update the transport layer's
// state using the ICMP header.
-func (conn *Connection) SendFrameStateless(frame Layers) {
+func (conn *Connection) SendFrameStateless(t *testing.T, frame Layers) {
+ t.Helper()
+
outBytes, err := frame.ToBytes()
if err != nil {
- conn.t.Fatalf("can't build outgoing packet: %s", err)
+ t.Fatalf("can't build outgoing packet: %s", err)
}
- conn.injector.Send(outBytes)
+ conn.injector.Send(t, outBytes)
}
// SendFrame sends a frame on the wire and updates the state of all layers.
-func (conn *Connection) SendFrame(frame Layers) {
+func (conn *Connection) SendFrame(t *testing.T, frame Layers) {
+ t.Helper()
+
outBytes, err := frame.ToBytes()
if err != nil {
- conn.t.Fatalf("can't build outgoing packet: %s", err)
+ t.Fatalf("can't build outgoing packet: %s", err)
}
- conn.injector.Send(outBytes)
+ conn.injector.Send(t, outBytes)
// frame might have nil values where the caller wanted to use default values.
// sentFrame will have no nil values in it because it comes from parsing the
@@ -528,7 +535,7 @@ func (conn *Connection) SendFrame(frame Layers) {
// Update the state of each layer based on what was sent.
for i, s := range conn.layerStates {
if err := s.sent(sentFrame[i]); err != nil {
- conn.t.Fatalf("Unable to update the state of %+v with %s: %s", s, sentFrame[i], err)
+ t.Fatalf("Unable to update the state of %+v with %s: %s", s, sentFrame[i], err)
}
}
}
@@ -538,18 +545,22 @@ func (conn *Connection) SendFrame(frame Layers) {
//
// Types defined with Connection as the underlying type should expose
// type-safe versions of this method.
-func (conn *Connection) send(overrideLayers Layers, additionalLayers ...Layer) {
- conn.SendFrame(conn.CreateFrame(overrideLayers, additionalLayers...))
+func (conn *Connection) send(t *testing.T, overrideLayers Layers, additionalLayers ...Layer) {
+ t.Helper()
+
+ conn.SendFrame(t, conn.CreateFrame(t, overrideLayers, additionalLayers...))
}
// recvFrame gets the next successfully parsed frame (of type Layers) within the
// timeout provided. If no parsable frame arrives before the timeout, it returns
// nil.
-func (conn *Connection) recvFrame(timeout time.Duration) Layers {
+func (conn *Connection) recvFrame(t *testing.T, timeout time.Duration) Layers {
+ t.Helper()
+
if timeout <= 0 {
return nil
}
- b := conn.sniffer.Recv(timeout)
+ b := conn.sniffer.Recv(t, timeout)
if b == nil {
return nil
}
@@ -569,32 +580,36 @@ func (e *layersError) Error() string {
// Expect expects a frame with the final layerStates layer matching the
// provided Layer within the timeout specified. If it doesn't arrive in time,
// an error is returned.
-func (conn *Connection) Expect(layer Layer, timeout time.Duration) (Layer, error) {
+func (conn *Connection) Expect(t *testing.T, layer Layer, timeout time.Duration) (Layer, error) {
+ t.Helper()
+
// Make a frame that will ignore all but the final layer.
layers := make([]Layer, len(conn.layerStates))
layers[len(layers)-1] = layer
- gotFrame, err := conn.ExpectFrame(layers, timeout)
+ gotFrame, err := conn.ExpectFrame(t, layers, timeout)
if err != nil {
return nil, err
}
if len(conn.layerStates)-1 < len(gotFrame) {
return gotFrame[len(conn.layerStates)-1], nil
}
- conn.t.Fatal("the received frame should be at least as long as the expected layers")
+ t.Fatalf("the received frame should be at least as long as the expected layers, got %d layers, want at least %d layers, got frame: %#v", len(gotFrame), len(conn.layerStates), gotFrame)
panic("unreachable")
}
// ExpectFrame expects a frame that matches the provided Layers within the
// timeout specified. If one arrives in time, the Layers is returned without an
// error. If it doesn't arrive in time, it returns nil and error is non-nil.
-func (conn *Connection) ExpectFrame(layers Layers, timeout time.Duration) (Layers, error) {
+func (conn *Connection) ExpectFrame(t *testing.T, layers Layers, timeout time.Duration) (Layers, error) {
+ t.Helper()
+
deadline := time.Now().Add(timeout)
var errs error
for {
var gotLayers Layers
if timeout = time.Until(deadline); timeout > 0 {
- gotLayers = conn.recvFrame(timeout)
+ gotLayers = conn.recvFrame(t, timeout)
}
if gotLayers == nil {
if errs == nil {
@@ -605,7 +620,7 @@ func (conn *Connection) ExpectFrame(layers Layers, timeout time.Duration) (Layer
if conn.match(layers, gotLayers) {
for i, s := range conn.layerStates {
if err := s.received(gotLayers[i]); err != nil {
- conn.t.Fatal(err)
+ t.Fatalf("failed to update test connection's layer states based on received frame: %s", err)
}
}
return gotLayers, nil
@@ -616,8 +631,10 @@ func (conn *Connection) ExpectFrame(layers Layers, timeout time.Duration) (Layer
// Drain drains the sniffer's receive buffer by receiving packets until there's
// nothing else to receive.
-func (conn *Connection) Drain() {
- conn.sniffer.Drain()
+func (conn *Connection) Drain(t *testing.T) {
+ t.Helper()
+
+ conn.sniffer.Drain(t)
}
// TCPIPv4 maintains the state for all the layers in a TCP/IPv4 connection.
@@ -625,6 +642,8 @@ type TCPIPv4 Connection
// NewTCPIPv4 creates a new TCPIPv4 connection with reasonable defaults.
func NewTCPIPv4(t *testing.T, outgoingTCP, incomingTCP TCP) TCPIPv4 {
+ t.Helper()
+
etherState, err := newEtherState(Ether{}, Ether{})
if err != nil {
t.Fatalf("can't make etherState: %s", err)
@@ -650,57 +669,58 @@ func NewTCPIPv4(t *testing.T, outgoingTCP, incomingTCP TCP) TCPIPv4 {
layerStates: []layerState{etherState, ipv4State, tcpState},
injector: injector,
sniffer: sniffer,
- t: t,
}
}
// Connect performs a TCP 3-way handshake. The input Connection should have a
// final TCP Layer.
-func (conn *TCPIPv4) Connect() {
- conn.t.Helper()
+func (conn *TCPIPv4) Connect(t *testing.T) {
+ t.Helper()
// Send the SYN.
- conn.Send(TCP{Flags: Uint8(header.TCPFlagSyn)})
+ conn.Send(t, TCP{Flags: Uint8(header.TCPFlagSyn)})
// Wait for the SYN-ACK.
- synAck, err := conn.Expect(TCP{Flags: Uint8(header.TCPFlagSyn | header.TCPFlagAck)}, time.Second)
+ synAck, err := conn.Expect(t, TCP{Flags: Uint8(header.TCPFlagSyn | header.TCPFlagAck)}, time.Second)
if err != nil {
- conn.t.Fatalf("didn't get synack during handshake: %s", err)
+ t.Fatalf("didn't get synack during handshake: %s", err)
}
conn.layerStates[len(conn.layerStates)-1].(*tcpState).synAck = synAck
// Send an ACK.
- conn.Send(TCP{Flags: Uint8(header.TCPFlagAck)})
+ conn.Send(t, TCP{Flags: Uint8(header.TCPFlagAck)})
}
// ConnectWithOptions performs a TCP 3-way handshake with given TCP options.
// The input Connection should have a final TCP Layer.
-func (conn *TCPIPv4) ConnectWithOptions(options []byte) {
- conn.t.Helper()
+func (conn *TCPIPv4) ConnectWithOptions(t *testing.T, options []byte) {
+ t.Helper()
// Send the SYN.
- conn.Send(TCP{Flags: Uint8(header.TCPFlagSyn), Options: options})
+ conn.Send(t, TCP{Flags: Uint8(header.TCPFlagSyn), Options: options})
// Wait for the SYN-ACK.
- synAck, err := conn.Expect(TCP{Flags: Uint8(header.TCPFlagSyn | header.TCPFlagAck)}, time.Second)
+ synAck, err := conn.Expect(t, TCP{Flags: Uint8(header.TCPFlagSyn | header.TCPFlagAck)}, time.Second)
if err != nil {
- conn.t.Fatalf("didn't get synack during handshake: %s", err)
+ t.Fatalf("didn't get synack during handshake: %s", err)
}
conn.layerStates[len(conn.layerStates)-1].(*tcpState).synAck = synAck
// Send an ACK.
- conn.Send(TCP{Flags: Uint8(header.TCPFlagAck)})
+ conn.Send(t, TCP{Flags: Uint8(header.TCPFlagAck)})
}
// ExpectData is a convenient method that expects a Layer and the Layer after
// it. If it doens't arrive in time, it returns nil.
-func (conn *TCPIPv4) ExpectData(tcp *TCP, payload *Payload, timeout time.Duration) (Layers, error) {
+func (conn *TCPIPv4) ExpectData(t *testing.T, tcp *TCP, payload *Payload, timeout time.Duration) (Layers, error) {
+ t.Helper()
+
expected := make([]Layer, len(conn.layerStates))
expected[len(expected)-1] = tcp
if payload != nil {
expected = append(expected, payload)
}
- return (*Connection)(conn).ExpectFrame(expected, timeout)
+ return (*Connection)(conn).ExpectFrame(t, expected, timeout)
}
// ExpectNextData attempts to receive the next incoming segment for the
@@ -709,9 +729,11 @@ func (conn *TCPIPv4) ExpectData(tcp *TCP, payload *Payload, timeout time.Duratio
// It differs from ExpectData() in that here we are only interested in the next
// received segment, while ExpectData() can receive multiple segments for the
// connection until there is a match with given layers or a timeout.
-func (conn *TCPIPv4) ExpectNextData(tcp *TCP, payload *Payload, timeout time.Duration) (Layers, error) {
+func (conn *TCPIPv4) ExpectNextData(t *testing.T, tcp *TCP, payload *Payload, timeout time.Duration) (Layers, error) {
+ t.Helper()
+
// Receive the first incoming TCP segment for this connection.
- got, err := conn.ExpectData(&TCP{}, nil, timeout)
+ got, err := conn.ExpectData(t, &TCP{}, nil, timeout)
if err != nil {
return nil, err
}
@@ -720,7 +742,7 @@ func (conn *TCPIPv4) ExpectNextData(tcp *TCP, payload *Payload, timeout time.Dur
expected[len(expected)-1] = tcp
if payload != nil {
expected = append(expected, payload)
- tcp.SeqNum = Uint32(uint32(*conn.RemoteSeqNum()) - uint32(payload.Length()))
+ tcp.SeqNum = Uint32(uint32(*conn.RemoteSeqNum(t)) - uint32(payload.Length()))
}
if !(*Connection)(conn).match(expected, got) {
return nil, fmt.Errorf("next frame is not matching %s during %s: got %s", expected, timeout, got)
@@ -730,71 +752,91 @@ func (conn *TCPIPv4) ExpectNextData(tcp *TCP, payload *Payload, timeout time.Dur
// Send a packet with reasonable defaults. Potentially override the TCP layer in
// the connection with the provided layer and add additionLayers.
-func (conn *TCPIPv4) Send(tcp TCP, additionalLayers ...Layer) {
- (*Connection)(conn).send(Layers{&tcp}, additionalLayers...)
+func (conn *TCPIPv4) Send(t *testing.T, tcp TCP, additionalLayers ...Layer) {
+ t.Helper()
+
+ (*Connection)(conn).send(t, Layers{&tcp}, additionalLayers...)
}
// Close frees associated resources held by the TCPIPv4 connection.
-func (conn *TCPIPv4) Close() {
- (*Connection)(conn).Close()
+func (conn *TCPIPv4) Close(t *testing.T) {
+ t.Helper()
+
+ (*Connection)(conn).Close(t)
}
// Expect expects a frame with the TCP layer matching the provided TCP within
// the timeout specified. If it doesn't arrive in time, an error is returned.
-func (conn *TCPIPv4) Expect(tcp TCP, timeout time.Duration) (*TCP, error) {
- layer, err := (*Connection)(conn).Expect(&tcp, timeout)
+func (conn *TCPIPv4) Expect(t *testing.T, tcp TCP, timeout time.Duration) (*TCP, error) {
+ t.Helper()
+
+ layer, err := (*Connection)(conn).Expect(t, &tcp, timeout)
if layer == nil {
return nil, err
}
gotTCP, ok := layer.(*TCP)
if !ok {
- conn.t.Fatalf("expected %s to be TCP", layer)
+ t.Fatalf("expected %s to be TCP", layer)
}
return gotTCP, err
}
-func (conn *TCPIPv4) tcpState() *tcpState {
+func (conn *TCPIPv4) tcpState(t *testing.T) *tcpState {
+ t.Helper()
+
state, ok := conn.layerStates[2].(*tcpState)
if !ok {
- conn.t.Fatalf("got transport-layer state type=%T, expected tcpState", conn.layerStates[2])
+ t.Fatalf("got transport-layer state type=%T, expected tcpState", conn.layerStates[2])
}
return state
}
-func (conn *TCPIPv4) ipv4State() *ipv4State {
+func (conn *TCPIPv4) ipv4State(t *testing.T) *ipv4State {
+ t.Helper()
+
state, ok := conn.layerStates[1].(*ipv4State)
if !ok {
- conn.t.Fatalf("expected network-layer state type=%T, expected ipv4State", conn.layerStates[1])
+ t.Fatalf("expected network-layer state type=%T, expected ipv4State", conn.layerStates[1])
}
return state
}
// RemoteSeqNum returns the next expected sequence number from the DUT.
-func (conn *TCPIPv4) RemoteSeqNum() *seqnum.Value {
- return conn.tcpState().remoteSeqNum
+func (conn *TCPIPv4) RemoteSeqNum(t *testing.T) *seqnum.Value {
+ t.Helper()
+
+ return conn.tcpState(t).remoteSeqNum
}
// LocalSeqNum returns the next sequence number to send from the testbench.
-func (conn *TCPIPv4) LocalSeqNum() *seqnum.Value {
- return conn.tcpState().localSeqNum
+func (conn *TCPIPv4) LocalSeqNum(t *testing.T) *seqnum.Value {
+ t.Helper()
+
+ return conn.tcpState(t).localSeqNum
}
// SynAck returns the SynAck that was part of the handshake.
-func (conn *TCPIPv4) SynAck() *TCP {
- return conn.tcpState().synAck
+func (conn *TCPIPv4) SynAck(t *testing.T) *TCP {
+ t.Helper()
+
+ return conn.tcpState(t).synAck
}
// LocalAddr gets the local socket address of this connection.
-func (conn *TCPIPv4) LocalAddr() *unix.SockaddrInet4 {
- sa := &unix.SockaddrInet4{Port: int(*conn.tcpState().out.SrcPort)}
- copy(sa.Addr[:], *conn.ipv4State().out.SrcAddr)
+func (conn *TCPIPv4) LocalAddr(t *testing.T) *unix.SockaddrInet4 {
+ t.Helper()
+
+ sa := &unix.SockaddrInet4{Port: int(*conn.tcpState(t).out.SrcPort)}
+ copy(sa.Addr[:], *conn.ipv4State(t).out.SrcAddr)
return sa
}
// Drain drains the sniffer's receive buffer by receiving packets until there's
// nothing else to receive.
-func (conn *TCPIPv4) Drain() {
- conn.sniffer.Drain()
+func (conn *TCPIPv4) Drain(t *testing.T) {
+ t.Helper()
+
+ conn.sniffer.Drain(t)
}
// IPv6Conn maintains the state for all the layers in a IPv6 connection.
@@ -802,6 +844,8 @@ type IPv6Conn Connection
// NewIPv6Conn creates a new IPv6Conn connection with reasonable defaults.
func NewIPv6Conn(t *testing.T, outgoingIPv6, incomingIPv6 IPv6) IPv6Conn {
+ t.Helper()
+
etherState, err := newEtherState(Ether{}, Ether{})
if err != nil {
t.Fatalf("can't make EtherState: %s", err)
@@ -824,25 +868,30 @@ func NewIPv6Conn(t *testing.T, outgoingIPv6, incomingIPv6 IPv6) IPv6Conn {
layerStates: []layerState{etherState, ipv6State},
injector: injector,
sniffer: sniffer,
- t: t,
}
}
// Send sends a frame with ipv6 overriding the IPv6 layer defaults and
// additionalLayers added after it.
-func (conn *IPv6Conn) Send(ipv6 IPv6, additionalLayers ...Layer) {
- (*Connection)(conn).send(Layers{&ipv6}, additionalLayers...)
+func (conn *IPv6Conn) Send(t *testing.T, ipv6 IPv6, additionalLayers ...Layer) {
+ t.Helper()
+
+ (*Connection)(conn).send(t, Layers{&ipv6}, additionalLayers...)
}
// Close to clean up any resources held.
-func (conn *IPv6Conn) Close() {
- (*Connection)(conn).Close()
+func (conn *IPv6Conn) Close(t *testing.T) {
+ t.Helper()
+
+ (*Connection)(conn).Close(t)
}
// ExpectFrame expects a frame that matches the provided Layers within the
// timeout specified. If it doesn't arrive in time, an error is returned.
-func (conn *IPv6Conn) ExpectFrame(frame Layers, timeout time.Duration) (Layers, error) {
- return (*Connection)(conn).ExpectFrame(frame, timeout)
+func (conn *IPv6Conn) ExpectFrame(t *testing.T, frame Layers, timeout time.Duration) (Layers, error) {
+ t.Helper()
+
+ return (*Connection)(conn).ExpectFrame(t, frame, timeout)
}
// UDPIPv4 maintains the state for all the layers in a UDP/IPv4 connection.
@@ -850,6 +899,8 @@ type UDPIPv4 Connection
// NewUDPIPv4 creates a new UDPIPv4 connection with reasonable defaults.
func NewUDPIPv4(t *testing.T, outgoingUDP, incomingUDP UDP) UDPIPv4 {
+ t.Helper()
+
etherState, err := newEtherState(Ether{}, Ether{})
if err != nil {
t.Fatalf("can't make etherState: %s", err)
@@ -875,81 +926,96 @@ func NewUDPIPv4(t *testing.T, outgoingUDP, incomingUDP UDP) UDPIPv4 {
layerStates: []layerState{etherState, ipv4State, udpState},
injector: injector,
sniffer: sniffer,
- t: t,
}
}
-func (conn *UDPIPv4) udpState() *udpState {
+func (conn *UDPIPv4) udpState(t *testing.T) *udpState {
+ t.Helper()
+
state, ok := conn.layerStates[2].(*udpState)
if !ok {
- conn.t.Fatalf("got transport-layer state type=%T, expected udpState", conn.layerStates[2])
+ t.Fatalf("got transport-layer state type=%T, expected udpState", conn.layerStates[2])
}
return state
}
-func (conn *UDPIPv4) ipv4State() *ipv4State {
+func (conn *UDPIPv4) ipv4State(t *testing.T) *ipv4State {
+ t.Helper()
+
state, ok := conn.layerStates[1].(*ipv4State)
if !ok {
- conn.t.Fatalf("got network-layer state type=%T, expected ipv4State", conn.layerStates[1])
+ t.Fatalf("got network-layer state type=%T, expected ipv4State", conn.layerStates[1])
}
return state
}
// LocalAddr gets the local socket address of this connection.
-func (conn *UDPIPv4) LocalAddr() *unix.SockaddrInet4 {
- sa := &unix.SockaddrInet4{Port: int(*conn.udpState().out.SrcPort)}
- copy(sa.Addr[:], *conn.ipv4State().out.SrcAddr)
+func (conn *UDPIPv4) LocalAddr(t *testing.T) *unix.SockaddrInet4 {
+ t.Helper()
+
+ sa := &unix.SockaddrInet4{Port: int(*conn.udpState(t).out.SrcPort)}
+ copy(sa.Addr[:], *conn.ipv4State(t).out.SrcAddr)
return sa
}
// Send sends a packet with reasonable defaults, potentially overriding the UDP
// layer and adding additionLayers.
-func (conn *UDPIPv4) Send(udp UDP, additionalLayers ...Layer) {
- (*Connection)(conn).send(Layers{&udp}, additionalLayers...)
+func (conn *UDPIPv4) Send(t *testing.T, udp UDP, additionalLayers ...Layer) {
+ t.Helper()
+
+ (*Connection)(conn).send(t, Layers{&udp}, additionalLayers...)
}
// SendIP sends a packet with reasonable defaults, potentially overriding the
// UDP and IPv4 headers and adding additionLayers.
-func (conn *UDPIPv4) SendIP(ip IPv4, udp UDP, additionalLayers ...Layer) {
- (*Connection)(conn).send(Layers{&ip, &udp}, additionalLayers...)
+func (conn *UDPIPv4) SendIP(t *testing.T, ip IPv4, udp UDP, additionalLayers ...Layer) {
+ t.Helper()
+
+ (*Connection)(conn).send(t, Layers{&ip, &udp}, additionalLayers...)
}
// Expect expects a frame with the UDP layer matching the provided UDP within
// the timeout specified. If it doesn't arrive in time, an error is returned.
-func (conn *UDPIPv4) Expect(udp UDP, timeout time.Duration) (*UDP, error) {
- conn.t.Helper()
- layer, err := (*Connection)(conn).Expect(&udp, timeout)
+func (conn *UDPIPv4) Expect(t *testing.T, udp UDP, timeout time.Duration) (*UDP, error) {
+ t.Helper()
+
+ layer, err := (*Connection)(conn).Expect(t, &udp, timeout)
if err != nil {
return nil, err
}
gotUDP, ok := layer.(*UDP)
if !ok {
- conn.t.Fatalf("expected %s to be UDP", layer)
+ t.Fatalf("expected %s to be UDP", layer)
}
return gotUDP, nil
}
// ExpectData is a convenient method that expects a Layer and the Layer after
// it. If it doens't arrive in time, it returns nil.
-func (conn *UDPIPv4) ExpectData(udp UDP, payload Payload, timeout time.Duration) (Layers, error) {
- conn.t.Helper()
+func (conn *UDPIPv4) ExpectData(t *testing.T, udp UDP, payload Payload, timeout time.Duration) (Layers, error) {
+ t.Helper()
+
expected := make([]Layer, len(conn.layerStates))
expected[len(expected)-1] = &udp
if payload.length() != 0 {
expected = append(expected, &payload)
}
- return (*Connection)(conn).ExpectFrame(expected, timeout)
+ return (*Connection)(conn).ExpectFrame(t, expected, timeout)
}
// Close frees associated resources held by the UDPIPv4 connection.
-func (conn *UDPIPv4) Close() {
- (*Connection)(conn).Close()
+func (conn *UDPIPv4) Close(t *testing.T) {
+ t.Helper()
+
+ (*Connection)(conn).Close(t)
}
// Drain drains the sniffer's receive buffer by receiving packets until there's
// nothing else to receive.
-func (conn *UDPIPv4) Drain() {
- conn.sniffer.Drain()
+func (conn *UDPIPv4) Drain(t *testing.T) {
+ t.Helper()
+
+ conn.sniffer.Drain(t)
}
// UDPIPv6 maintains the state for all the layers in a UDP/IPv6 connection.
@@ -957,6 +1023,8 @@ type UDPIPv6 Connection
// NewUDPIPv6 creates a new UDPIPv6 connection with reasonable defaults.
func NewUDPIPv6(t *testing.T, outgoingUDP, incomingUDP UDP) UDPIPv6 {
+ t.Helper()
+
etherState, err := newEtherState(Ether{}, Ether{})
if err != nil {
t.Fatalf("can't make etherState: %s", err)
@@ -981,86 +1049,101 @@ func NewUDPIPv6(t *testing.T, outgoingUDP, incomingUDP UDP) UDPIPv6 {
layerStates: []layerState{etherState, ipv6State, udpState},
injector: injector,
sniffer: sniffer,
- t: t,
}
}
-func (conn *UDPIPv6) udpState() *udpState {
+func (conn *UDPIPv6) udpState(t *testing.T) *udpState {
+ t.Helper()
+
state, ok := conn.layerStates[2].(*udpState)
if !ok {
- conn.t.Fatalf("got transport-layer state type=%T, expected udpState", conn.layerStates[2])
+ t.Fatalf("got transport-layer state type=%T, expected udpState", conn.layerStates[2])
}
return state
}
-func (conn *UDPIPv6) ipv6State() *ipv6State {
+func (conn *UDPIPv6) ipv6State(t *testing.T) *ipv6State {
+ t.Helper()
+
state, ok := conn.layerStates[1].(*ipv6State)
if !ok {
- conn.t.Fatalf("got network-layer state type=%T, expected ipv6State", conn.layerStates[1])
+ t.Fatalf("got network-layer state type=%T, expected ipv6State", conn.layerStates[1])
}
return state
}
// LocalAddr gets the local socket address of this connection.
-func (conn *UDPIPv6) LocalAddr() *unix.SockaddrInet6 {
+func (conn *UDPIPv6) LocalAddr(t *testing.T) *unix.SockaddrInet6 {
+ t.Helper()
+
sa := &unix.SockaddrInet6{
- Port: int(*conn.udpState().out.SrcPort),
+ Port: int(*conn.udpState(t).out.SrcPort),
// Local address is in perspective to the remote host, so it's scoped to the
// ID of the remote interface.
ZoneId: uint32(RemoteInterfaceID),
}
- copy(sa.Addr[:], *conn.ipv6State().out.SrcAddr)
+ copy(sa.Addr[:], *conn.ipv6State(t).out.SrcAddr)
return sa
}
// Send sends a packet with reasonable defaults, potentially overriding the UDP
// layer and adding additionLayers.
-func (conn *UDPIPv6) Send(udp UDP, additionalLayers ...Layer) {
- (*Connection)(conn).send(Layers{&udp}, additionalLayers...)
+func (conn *UDPIPv6) Send(t *testing.T, udp UDP, additionalLayers ...Layer) {
+ t.Helper()
+
+ (*Connection)(conn).send(t, Layers{&udp}, additionalLayers...)
}
// SendIPv6 sends a packet with reasonable defaults, potentially overriding the
// UDP and IPv6 headers and adding additionLayers.
-func (conn *UDPIPv6) SendIPv6(ip IPv6, udp UDP, additionalLayers ...Layer) {
- (*Connection)(conn).send(Layers{&ip, &udp}, additionalLayers...)
+func (conn *UDPIPv6) SendIPv6(t *testing.T, ip IPv6, udp UDP, additionalLayers ...Layer) {
+ t.Helper()
+
+ (*Connection)(conn).send(t, Layers{&ip, &udp}, additionalLayers...)
}
// Expect expects a frame with the UDP layer matching the provided UDP within
// the timeout specified. If it doesn't arrive in time, an error is returned.
-func (conn *UDPIPv6) Expect(udp UDP, timeout time.Duration) (*UDP, error) {
- conn.t.Helper()
- layer, err := (*Connection)(conn).Expect(&udp, timeout)
+func (conn *UDPIPv6) Expect(t *testing.T, udp UDP, timeout time.Duration) (*UDP, error) {
+ t.Helper()
+
+ layer, err := (*Connection)(conn).Expect(t, &udp, timeout)
if err != nil {
return nil, err
}
gotUDP, ok := layer.(*UDP)
if !ok {
- conn.t.Fatalf("expected %s to be UDP", layer)
+ t.Fatalf("expected %s to be UDP", layer)
}
return gotUDP, nil
}
// ExpectData is a convenient method that expects a Layer and the Layer after
// it. If it doens't arrive in time, it returns nil.
-func (conn *UDPIPv6) ExpectData(udp UDP, payload Payload, timeout time.Duration) (Layers, error) {
- conn.t.Helper()
+func (conn *UDPIPv6) ExpectData(t *testing.T, udp UDP, payload Payload, timeout time.Duration) (Layers, error) {
+ t.Helper()
+
expected := make([]Layer, len(conn.layerStates))
expected[len(expected)-1] = &udp
if payload.length() != 0 {
expected = append(expected, &payload)
}
- return (*Connection)(conn).ExpectFrame(expected, timeout)
+ return (*Connection)(conn).ExpectFrame(t, expected, timeout)
}
// Close frees associated resources held by the UDPIPv6 connection.
-func (conn *UDPIPv6) Close() {
- (*Connection)(conn).Close()
+func (conn *UDPIPv6) Close(t *testing.T) {
+ t.Helper()
+
+ (*Connection)(conn).Close(t)
}
// Drain drains the sniffer's receive buffer by receiving packets until there's
// nothing else to receive.
-func (conn *UDPIPv6) Drain() {
- conn.sniffer.Drain()
+func (conn *UDPIPv6) Drain(t *testing.T) {
+ t.Helper()
+
+ conn.sniffer.Drain(t)
}
// TCPIPv6 maintains the state for all the layers in a TCP/IPv6 connection.
@@ -1093,7 +1176,6 @@ func NewTCPIPv6(t *testing.T, outgoingTCP, incomingTCP TCP) TCPIPv6 {
layerStates: []layerState{etherState, ipv6State, tcpState},
injector: injector,
sniffer: sniffer,
- t: t,
}
}
@@ -1104,16 +1186,20 @@ func (conn *TCPIPv6) SrcPort() uint16 {
// ExpectData is a convenient method that expects a Layer and the Layer after
// it. If it doens't arrive in time, it returns nil.
-func (conn *TCPIPv6) ExpectData(tcp *TCP, payload *Payload, timeout time.Duration) (Layers, error) {
+func (conn *TCPIPv6) ExpectData(t *testing.T, tcp *TCP, payload *Payload, timeout time.Duration) (Layers, error) {
+ t.Helper()
+
expected := make([]Layer, len(conn.layerStates))
expected[len(expected)-1] = tcp
if payload != nil {
expected = append(expected, payload)
}
- return (*Connection)(conn).ExpectFrame(expected, timeout)
+ return (*Connection)(conn).ExpectFrame(t, expected, timeout)
}
// Close frees associated resources held by the TCPIPv6 connection.
-func (conn *TCPIPv6) Close() {
- (*Connection)(conn).Close()
+func (conn *TCPIPv6) Close(t *testing.T) {
+ t.Helper()
+
+ (*Connection)(conn).Close(t)
}
diff --git a/test/packetimpact/testbench/dut.go b/test/packetimpact/testbench/dut.go
index 51be13759..73c532e75 100644
--- a/test/packetimpact/testbench/dut.go
+++ b/test/packetimpact/testbench/dut.go
@@ -31,13 +31,14 @@ import (
// DUT communicates with the DUT to force it to make POSIX calls.
type DUT struct {
- t *testing.T
conn *grpc.ClientConn
posixServer POSIXClient
}
// NewDUT creates a new connection with the DUT over gRPC.
func NewDUT(t *testing.T) DUT {
+ t.Helper()
+
flag.Parse()
if err := genPseudoFlags(); err != nil {
t.Fatal("generating psuedo flags:", err)
@@ -50,7 +51,6 @@ func NewDUT(t *testing.T) DUT {
}
posixServer := NewPOSIXClient(conn)
return DUT{
- t: t,
conn: conn,
posixServer: posixServer,
}
@@ -61,8 +61,9 @@ func (dut *DUT) TearDown() {
dut.conn.Close()
}
-func (dut *DUT) sockaddrToProto(sa unix.Sockaddr) *pb.Sockaddr {
- dut.t.Helper()
+func (dut *DUT) sockaddrToProto(t *testing.T, sa unix.Sockaddr) *pb.Sockaddr {
+ t.Helper()
+
switch s := sa.(type) {
case *unix.SockaddrInet4:
return &pb.Sockaddr{
@@ -87,12 +88,13 @@ func (dut *DUT) sockaddrToProto(sa unix.Sockaddr) *pb.Sockaddr {
},
}
}
- dut.t.Fatalf("can't parse Sockaddr struct: %+v", sa)
+ t.Fatalf("can't parse Sockaddr struct: %+v", sa)
return nil
}
-func (dut *DUT) protoToSockaddr(sa *pb.Sockaddr) unix.Sockaddr {
- dut.t.Helper()
+func (dut *DUT) protoToSockaddr(t *testing.T, sa *pb.Sockaddr) unix.Sockaddr {
+ t.Helper()
+
switch s := sa.Sockaddr.(type) {
case *pb.Sockaddr_In:
ret := unix.SockaddrInet4{
@@ -108,31 +110,32 @@ func (dut *DUT) protoToSockaddr(sa *pb.Sockaddr) unix.Sockaddr {
copy(ret.Addr[:], s.In6.GetAddr())
return &ret
}
- dut.t.Fatalf("can't parse Sockaddr proto: %+v", sa)
+ t.Fatalf("can't parse Sockaddr proto: %#v", sa)
return nil
}
// CreateBoundSocket makes a new socket on the DUT, with type typ and protocol
// proto, and bound to the IP address addr. Returns the new file descriptor and
// the port that was selected on the DUT.
-func (dut *DUT) CreateBoundSocket(typ, proto int32, addr net.IP) (int32, uint16) {
- dut.t.Helper()
+func (dut *DUT) CreateBoundSocket(t *testing.T, typ, proto int32, addr net.IP) (int32, uint16) {
+ t.Helper()
+
var fd int32
if addr.To4() != nil {
- fd = dut.Socket(unix.AF_INET, typ, proto)
+ fd = dut.Socket(t, unix.AF_INET, typ, proto)
sa := unix.SockaddrInet4{}
copy(sa.Addr[:], addr.To4())
- dut.Bind(fd, &sa)
+ dut.Bind(t, fd, &sa)
} else if addr.To16() != nil {
- fd = dut.Socket(unix.AF_INET6, typ, proto)
+ fd = dut.Socket(t, unix.AF_INET6, typ, proto)
sa := unix.SockaddrInet6{}
copy(sa.Addr[:], addr.To16())
sa.ZoneId = uint32(RemoteInterfaceID)
- dut.Bind(fd, &sa)
+ dut.Bind(t, fd, &sa)
} else {
- dut.t.Fatalf("unknown ip addr type for remoteIP")
+ t.Fatalf("invalid IP address: %s", addr)
}
- sa := dut.GetSockName(fd)
+ sa := dut.GetSockName(t, fd)
var port int
switch s := sa.(type) {
case *unix.SockaddrInet4:
@@ -140,15 +143,17 @@ func (dut *DUT) CreateBoundSocket(typ, proto int32, addr net.IP) (int32, uint16)
case *unix.SockaddrInet6:
port = s.Port
default:
- dut.t.Fatalf("unknown sockaddr type from getsockname: %t", sa)
+ t.Fatalf("unknown sockaddr type from getsockname: %T", sa)
}
return fd, uint16(port)
}
// CreateListener makes a new TCP connection. If it fails, the test ends.
-func (dut *DUT) CreateListener(typ, proto, backlog int32) (int32, uint16) {
- fd, remotePort := dut.CreateBoundSocket(typ, proto, net.ParseIP(RemoteIPv4))
- dut.Listen(fd, backlog)
+func (dut *DUT) CreateListener(t *testing.T, typ, proto, backlog int32) (int32, uint16) {
+ t.Helper()
+
+ fd, remotePort := dut.CreateBoundSocket(t, typ, proto, net.ParseIP(RemoteIPv4))
+ dut.Listen(t, fd, backlog)
return fd, remotePort
}
@@ -158,53 +163,57 @@ func (dut *DUT) CreateListener(typ, proto, backlog int32) (int32, uint16) {
// Accept calls accept on the DUT and causes a fatal test failure if it doesn't
// succeed. If more control over the timeout or error handling is needed, use
// AcceptWithErrno.
-func (dut *DUT) Accept(sockfd int32) (int32, unix.Sockaddr) {
- dut.t.Helper()
+func (dut *DUT) Accept(t *testing.T, sockfd int32) (int32, unix.Sockaddr) {
+ t.Helper()
+
ctx, cancel := context.WithTimeout(context.Background(), RPCTimeout)
defer cancel()
- fd, sa, err := dut.AcceptWithErrno(ctx, sockfd)
+ fd, sa, err := dut.AcceptWithErrno(ctx, t, sockfd)
if fd < 0 {
- dut.t.Fatalf("failed to accept: %s", err)
+ t.Fatalf("failed to accept: %s", err)
}
return fd, sa
}
// AcceptWithErrno calls accept on the DUT.
-func (dut *DUT) AcceptWithErrno(ctx context.Context, sockfd int32) (int32, unix.Sockaddr, error) {
- dut.t.Helper()
+func (dut *DUT) AcceptWithErrno(ctx context.Context, t *testing.T, sockfd int32) (int32, unix.Sockaddr, error) {
+ t.Helper()
+
req := pb.AcceptRequest{
Sockfd: sockfd,
}
resp, err := dut.posixServer.Accept(ctx, &req)
if err != nil {
- dut.t.Fatalf("failed to call Accept: %s", err)
+ t.Fatalf("failed to call Accept: %s", err)
}
- return resp.GetFd(), dut.protoToSockaddr(resp.GetAddr()), syscall.Errno(resp.GetErrno_())
+ return resp.GetFd(), dut.protoToSockaddr(t, resp.GetAddr()), syscall.Errno(resp.GetErrno_())
}
// Bind calls bind on the DUT and causes a fatal test failure if it doesn't
// succeed. If more control over the timeout or error handling is
// needed, use BindWithErrno.
-func (dut *DUT) Bind(fd int32, sa unix.Sockaddr) {
- dut.t.Helper()
+func (dut *DUT) Bind(t *testing.T, fd int32, sa unix.Sockaddr) {
+ t.Helper()
+
ctx, cancel := context.WithTimeout(context.Background(), RPCTimeout)
defer cancel()
- ret, err := dut.BindWithErrno(ctx, fd, sa)
+ ret, err := dut.BindWithErrno(ctx, t, fd, sa)
if ret != 0 {
- dut.t.Fatalf("failed to bind socket: %s", err)
+ t.Fatalf("failed to bind socket: %s", err)
}
}
// BindWithErrno calls bind on the DUT.
-func (dut *DUT) BindWithErrno(ctx context.Context, fd int32, sa unix.Sockaddr) (int32, error) {
- dut.t.Helper()
+func (dut *DUT) BindWithErrno(ctx context.Context, t *testing.T, fd int32, sa unix.Sockaddr) (int32, error) {
+ t.Helper()
+
req := pb.BindRequest{
Sockfd: fd,
- Addr: dut.sockaddrToProto(sa),
+ Addr: dut.sockaddrToProto(t, sa),
}
resp, err := dut.posixServer.Bind(ctx, &req)
if err != nil {
- dut.t.Fatalf("failed to call Bind: %s", err)
+ t.Fatalf("failed to call Bind: %s", err)
}
return resp.GetRet(), syscall.Errno(resp.GetErrno_())
}
@@ -212,25 +221,27 @@ func (dut *DUT) BindWithErrno(ctx context.Context, fd int32, sa unix.Sockaddr) (
// Close calls close on the DUT and causes a fatal test failure if it doesn't
// succeed. If more control over the timeout or error handling is needed, use
// CloseWithErrno.
-func (dut *DUT) Close(fd int32) {
- dut.t.Helper()
+func (dut *DUT) Close(t *testing.T, fd int32) {
+ t.Helper()
+
ctx, cancel := context.WithTimeout(context.Background(), RPCTimeout)
defer cancel()
- ret, err := dut.CloseWithErrno(ctx, fd)
+ ret, err := dut.CloseWithErrno(ctx, t, fd)
if ret != 0 {
- dut.t.Fatalf("failed to close: %s", err)
+ t.Fatalf("failed to close: %s", err)
}
}
// CloseWithErrno calls close on the DUT.
-func (dut *DUT) CloseWithErrno(ctx context.Context, fd int32) (int32, error) {
- dut.t.Helper()
+func (dut *DUT) CloseWithErrno(ctx context.Context, t *testing.T, fd int32) (int32, error) {
+ t.Helper()
+
req := pb.CloseRequest{
Fd: fd,
}
resp, err := dut.posixServer.Close(ctx, &req)
if err != nil {
- dut.t.Fatalf("failed to call Close: %s", err)
+ t.Fatalf("failed to call Close: %s", err)
}
return resp.GetRet(), syscall.Errno(resp.GetErrno_())
}
@@ -238,28 +249,30 @@ func (dut *DUT) CloseWithErrno(ctx context.Context, fd int32) (int32, error) {
// Connect calls connect on the DUT and causes a fatal test failure if it
// doesn't succeed. If more control over the timeout or error handling is
// needed, use ConnectWithErrno.
-func (dut *DUT) Connect(fd int32, sa unix.Sockaddr) {
- dut.t.Helper()
+func (dut *DUT) Connect(t *testing.T, fd int32, sa unix.Sockaddr) {
+ t.Helper()
+
ctx, cancel := context.WithTimeout(context.Background(), RPCTimeout)
defer cancel()
- ret, err := dut.ConnectWithErrno(ctx, fd, sa)
+ ret, err := dut.ConnectWithErrno(ctx, t, fd, sa)
// Ignore 'operation in progress' error that can be returned when the socket
// is non-blocking.
if err != syscall.Errno(unix.EINPROGRESS) && ret != 0 {
- dut.t.Fatalf("failed to connect socket: %s", err)
+ t.Fatalf("failed to connect socket: %s", err)
}
}
// ConnectWithErrno calls bind on the DUT.
-func (dut *DUT) ConnectWithErrno(ctx context.Context, fd int32, sa unix.Sockaddr) (int32, error) {
- dut.t.Helper()
+func (dut *DUT) ConnectWithErrno(ctx context.Context, t *testing.T, fd int32, sa unix.Sockaddr) (int32, error) {
+ t.Helper()
+
req := pb.ConnectRequest{
Sockfd: fd,
- Addr: dut.sockaddrToProto(sa),
+ Addr: dut.sockaddrToProto(t, sa),
}
resp, err := dut.posixServer.Connect(ctx, &req)
if err != nil {
- dut.t.Fatalf("failed to call Connect: %s", err)
+ t.Fatalf("failed to call Connect: %s", err)
}
return resp.GetRet(), syscall.Errno(resp.GetErrno_())
}
@@ -267,20 +280,22 @@ func (dut *DUT) ConnectWithErrno(ctx context.Context, fd int32, sa unix.Sockaddr
// Fcntl calls fcntl on the DUT and causes a fatal test failure if it
// doesn't succeed. If more control over the timeout or error handling is
// needed, use FcntlWithErrno.
-func (dut *DUT) Fcntl(fd, cmd, arg int32) int32 {
- dut.t.Helper()
+func (dut *DUT) Fcntl(t *testing.T, fd, cmd, arg int32) int32 {
+ t.Helper()
+
ctx, cancel := context.WithTimeout(context.Background(), RPCTimeout)
defer cancel()
- ret, err := dut.FcntlWithErrno(ctx, fd, cmd, arg)
+ ret, err := dut.FcntlWithErrno(ctx, t, fd, cmd, arg)
if ret == -1 {
- dut.t.Fatalf("failed to Fcntl: ret=%d, errno=%s", ret, err)
+ t.Fatalf("failed to Fcntl: ret=%d, errno=%s", ret, err)
}
return ret
}
// FcntlWithErrno calls fcntl on the DUT.
-func (dut *DUT) FcntlWithErrno(ctx context.Context, fd, cmd, arg int32) (int32, error) {
- dut.t.Helper()
+func (dut *DUT) FcntlWithErrno(ctx context.Context, t *testing.T, fd, cmd, arg int32) (int32, error) {
+ t.Helper()
+
req := pb.FcntlRequest{
Fd: fd,
Cmd: cmd,
@@ -288,7 +303,7 @@ func (dut *DUT) FcntlWithErrno(ctx context.Context, fd, cmd, arg int32) (int32,
}
resp, err := dut.posixServer.Fcntl(ctx, &req)
if err != nil {
- dut.t.Fatalf("failed to call Fcntl: %s", err)
+ t.Fatalf("failed to call Fcntl: %s", err)
}
return resp.GetRet(), syscall.Errno(resp.GetErrno_())
}
@@ -296,32 +311,35 @@ func (dut *DUT) FcntlWithErrno(ctx context.Context, fd, cmd, arg int32) (int32,
// GetSockName calls getsockname on the DUT and causes a fatal test failure if
// it doesn't succeed. If more control over the timeout or error handling is
// needed, use GetSockNameWithErrno.
-func (dut *DUT) GetSockName(sockfd int32) unix.Sockaddr {
- dut.t.Helper()
+func (dut *DUT) GetSockName(t *testing.T, sockfd int32) unix.Sockaddr {
+ t.Helper()
+
ctx, cancel := context.WithTimeout(context.Background(), RPCTimeout)
defer cancel()
- ret, sa, err := dut.GetSockNameWithErrno(ctx, sockfd)
+ ret, sa, err := dut.GetSockNameWithErrno(ctx, t, sockfd)
if ret != 0 {
- dut.t.Fatalf("failed to getsockname: %s", err)
+ t.Fatalf("failed to getsockname: %s", err)
}
return sa
}
// GetSockNameWithErrno calls getsockname on the DUT.
-func (dut *DUT) GetSockNameWithErrno(ctx context.Context, sockfd int32) (int32, unix.Sockaddr, error) {
- dut.t.Helper()
+func (dut *DUT) GetSockNameWithErrno(ctx context.Context, t *testing.T, sockfd int32) (int32, unix.Sockaddr, error) {
+ t.Helper()
+
req := pb.GetSockNameRequest{
Sockfd: sockfd,
}
resp, err := dut.posixServer.GetSockName(ctx, &req)
if err != nil {
- dut.t.Fatalf("failed to call Bind: %s", err)
+ t.Fatalf("failed to call Bind: %s", err)
}
- return resp.GetRet(), dut.protoToSockaddr(resp.GetAddr()), syscall.Errno(resp.GetErrno_())
+ return resp.GetRet(), dut.protoToSockaddr(t, resp.GetAddr()), syscall.Errno(resp.GetErrno_())
}
-func (dut *DUT) getSockOpt(ctx context.Context, sockfd, level, optname, optlen int32, typ pb.GetSockOptRequest_SockOptType) (int32, *pb.SockOptVal, error) {
- dut.t.Helper()
+func (dut *DUT) getSockOpt(ctx context.Context, t *testing.T, sockfd, level, optname, optlen int32, typ pb.GetSockOptRequest_SockOptType) (int32, *pb.SockOptVal, error) {
+ t.Helper()
+
req := pb.GetSockOptRequest{
Sockfd: sockfd,
Level: level,
@@ -331,11 +349,11 @@ func (dut *DUT) getSockOpt(ctx context.Context, sockfd, level, optname, optlen i
}
resp, err := dut.posixServer.GetSockOpt(ctx, &req)
if err != nil {
- dut.t.Fatalf("failed to call GetSockOpt: %s", err)
+ t.Fatalf("failed to call GetSockOpt: %s", err)
}
optval := resp.GetOptval()
if optval == nil {
- dut.t.Fatalf("GetSockOpt response does not contain a value")
+ t.Fatalf("GetSockOpt response does not contain a value")
}
return resp.GetRet(), optval, syscall.Errno(resp.GetErrno_())
}
@@ -345,13 +363,14 @@ func (dut *DUT) getSockOpt(ctx context.Context, sockfd, level, optname, optlen i
// needed, use GetSockOptWithErrno. Because endianess and the width of values
// might differ between the testbench and DUT architectures, prefer to use a
// more specific GetSockOptXxx function.
-func (dut *DUT) GetSockOpt(sockfd, level, optname, optlen int32) []byte {
- dut.t.Helper()
+func (dut *DUT) GetSockOpt(t *testing.T, sockfd, level, optname, optlen int32) []byte {
+ t.Helper()
+
ctx, cancel := context.WithTimeout(context.Background(), RPCTimeout)
defer cancel()
- ret, optval, err := dut.GetSockOptWithErrno(ctx, sockfd, level, optname, optlen)
+ ret, optval, err := dut.GetSockOptWithErrno(ctx, t, sockfd, level, optname, optlen)
if ret != 0 {
- dut.t.Fatalf("failed to GetSockOpt: %s", err)
+ t.Fatalf("failed to GetSockOpt: %s", err)
}
return optval
}
@@ -359,12 +378,13 @@ func (dut *DUT) GetSockOpt(sockfd, level, optname, optlen int32) []byte {
// GetSockOptWithErrno calls getsockopt on the DUT. Because endianess and the
// width of values might differ between the testbench and DUT architectures,
// prefer to use a more specific GetSockOptXxxWithErrno function.
-func (dut *DUT) GetSockOptWithErrno(ctx context.Context, sockfd, level, optname, optlen int32) (int32, []byte, error) {
- dut.t.Helper()
- ret, optval, errno := dut.getSockOpt(ctx, sockfd, level, optname, optlen, pb.GetSockOptRequest_BYTES)
+func (dut *DUT) GetSockOptWithErrno(ctx context.Context, t *testing.T, sockfd, level, optname, optlen int32) (int32, []byte, error) {
+ t.Helper()
+
+ ret, optval, errno := dut.getSockOpt(ctx, t, sockfd, level, optname, optlen, pb.GetSockOptRequest_BYTES)
bytesval, ok := optval.Val.(*pb.SockOptVal_Bytesval)
if !ok {
- dut.t.Fatalf("GetSockOpt got value type: %T, want bytes", optval)
+ t.Fatalf("GetSockOpt got value type: %T, want bytes", optval.Val)
}
return ret, bytesval.Bytesval, errno
}
@@ -372,24 +392,26 @@ func (dut *DUT) GetSockOptWithErrno(ctx context.Context, sockfd, level, optname,
// GetSockOptInt calls getsockopt on the DUT and causes a fatal test failure
// if it doesn't succeed. If more control over the int optval or error handling
// is needed, use GetSockOptIntWithErrno.
-func (dut *DUT) GetSockOptInt(sockfd, level, optname int32) int32 {
- dut.t.Helper()
+func (dut *DUT) GetSockOptInt(t *testing.T, sockfd, level, optname int32) int32 {
+ t.Helper()
+
ctx, cancel := context.WithTimeout(context.Background(), RPCTimeout)
defer cancel()
- ret, intval, err := dut.GetSockOptIntWithErrno(ctx, sockfd, level, optname)
+ ret, intval, err := dut.GetSockOptIntWithErrno(ctx, t, sockfd, level, optname)
if ret != 0 {
- dut.t.Fatalf("failed to GetSockOptInt: %s", err)
+ t.Fatalf("failed to GetSockOptInt: %s", err)
}
return intval
}
// GetSockOptIntWithErrno calls getsockopt with an integer optval.
-func (dut *DUT) GetSockOptIntWithErrno(ctx context.Context, sockfd, level, optname int32) (int32, int32, error) {
- dut.t.Helper()
- ret, optval, errno := dut.getSockOpt(ctx, sockfd, level, optname, 0, pb.GetSockOptRequest_INT)
+func (dut *DUT) GetSockOptIntWithErrno(ctx context.Context, t *testing.T, sockfd, level, optname int32) (int32, int32, error) {
+ t.Helper()
+
+ ret, optval, errno := dut.getSockOpt(ctx, t, sockfd, level, optname, 0, pb.GetSockOptRequest_INT)
intval, ok := optval.Val.(*pb.SockOptVal_Intval)
if !ok {
- dut.t.Fatalf("GetSockOpt got value type: %T, want int", optval)
+ t.Fatalf("GetSockOpt got value type: %T, want int", optval.Val)
}
return ret, intval.Intval, errno
}
@@ -397,24 +419,26 @@ func (dut *DUT) GetSockOptIntWithErrno(ctx context.Context, sockfd, level, optna
// GetSockOptTimeval calls getsockopt on the DUT and causes a fatal test failure
// if it doesn't succeed. If more control over the timeout or error handling is
// needed, use GetSockOptTimevalWithErrno.
-func (dut *DUT) GetSockOptTimeval(sockfd, level, optname int32) unix.Timeval {
- dut.t.Helper()
+func (dut *DUT) GetSockOptTimeval(t *testing.T, sockfd, level, optname int32) unix.Timeval {
+ t.Helper()
+
ctx, cancel := context.WithTimeout(context.Background(), RPCTimeout)
defer cancel()
- ret, timeval, err := dut.GetSockOptTimevalWithErrno(ctx, sockfd, level, optname)
+ ret, timeval, err := dut.GetSockOptTimevalWithErrno(ctx, t, sockfd, level, optname)
if ret != 0 {
- dut.t.Fatalf("failed to GetSockOptTimeval: %s", err)
+ t.Fatalf("failed to GetSockOptTimeval: %s", err)
}
return timeval
}
// GetSockOptTimevalWithErrno calls getsockopt and returns a timeval.
-func (dut *DUT) GetSockOptTimevalWithErrno(ctx context.Context, sockfd, level, optname int32) (int32, unix.Timeval, error) {
- dut.t.Helper()
- ret, optval, errno := dut.getSockOpt(ctx, sockfd, level, optname, 0, pb.GetSockOptRequest_TIME)
+func (dut *DUT) GetSockOptTimevalWithErrno(ctx context.Context, t *testing.T, sockfd, level, optname int32) (int32, unix.Timeval, error) {
+ t.Helper()
+
+ ret, optval, errno := dut.getSockOpt(ctx, t, sockfd, level, optname, 0, pb.GetSockOptRequest_TIME)
tv, ok := optval.Val.(*pb.SockOptVal_Timeval)
if !ok {
- dut.t.Fatalf("GetSockOpt got value type: %T, want timeval", optval)
+ t.Fatalf("GetSockOpt got value type: %T, want timeval", optval.Val)
}
timeval := unix.Timeval{
Sec: tv.Timeval.Seconds,
@@ -426,26 +450,28 @@ func (dut *DUT) GetSockOptTimevalWithErrno(ctx context.Context, sockfd, level, o
// Listen calls listen on the DUT and causes a fatal test failure if it doesn't
// succeed. If more control over the timeout or error handling is needed, use
// ListenWithErrno.
-func (dut *DUT) Listen(sockfd, backlog int32) {
- dut.t.Helper()
+func (dut *DUT) Listen(t *testing.T, sockfd, backlog int32) {
+ t.Helper()
+
ctx, cancel := context.WithTimeout(context.Background(), RPCTimeout)
defer cancel()
- ret, err := dut.ListenWithErrno(ctx, sockfd, backlog)
+ ret, err := dut.ListenWithErrno(ctx, t, sockfd, backlog)
if ret != 0 {
- dut.t.Fatalf("failed to listen: %s", err)
+ t.Fatalf("failed to listen: %s", err)
}
}
// ListenWithErrno calls listen on the DUT.
-func (dut *DUT) ListenWithErrno(ctx context.Context, sockfd, backlog int32) (int32, error) {
- dut.t.Helper()
+func (dut *DUT) ListenWithErrno(ctx context.Context, t *testing.T, sockfd, backlog int32) (int32, error) {
+ t.Helper()
+
req := pb.ListenRequest{
Sockfd: sockfd,
Backlog: backlog,
}
resp, err := dut.posixServer.Listen(ctx, &req)
if err != nil {
- dut.t.Fatalf("failed to call Listen: %s", err)
+ t.Fatalf("failed to call Listen: %s", err)
}
return resp.GetRet(), syscall.Errno(resp.GetErrno_())
}
@@ -453,20 +479,22 @@ func (dut *DUT) ListenWithErrno(ctx context.Context, sockfd, backlog int32) (int
// Send calls send on the DUT and causes a fatal test failure if it doesn't
// succeed. If more control over the timeout or error handling is needed, use
// SendWithErrno.
-func (dut *DUT) Send(sockfd int32, buf []byte, flags int32) int32 {
- dut.t.Helper()
+func (dut *DUT) Send(t *testing.T, sockfd int32, buf []byte, flags int32) int32 {
+ t.Helper()
+
ctx, cancel := context.WithTimeout(context.Background(), RPCTimeout)
defer cancel()
- ret, err := dut.SendWithErrno(ctx, sockfd, buf, flags)
+ ret, err := dut.SendWithErrno(ctx, t, sockfd, buf, flags)
if ret == -1 {
- dut.t.Fatalf("failed to send: %s", err)
+ t.Fatalf("failed to send: %s", err)
}
return ret
}
// SendWithErrno calls send on the DUT.
-func (dut *DUT) SendWithErrno(ctx context.Context, sockfd int32, buf []byte, flags int32) (int32, error) {
- dut.t.Helper()
+func (dut *DUT) SendWithErrno(ctx context.Context, t *testing.T, sockfd int32, buf []byte, flags int32) (int32, error) {
+ t.Helper()
+
req := pb.SendRequest{
Sockfd: sockfd,
Buf: buf,
@@ -474,7 +502,7 @@ func (dut *DUT) SendWithErrno(ctx context.Context, sockfd int32, buf []byte, fla
}
resp, err := dut.posixServer.Send(ctx, &req)
if err != nil {
- dut.t.Fatalf("failed to call Send: %s", err)
+ t.Fatalf("failed to call Send: %s", err)
}
return resp.GetRet(), syscall.Errno(resp.GetErrno_())
}
@@ -482,48 +510,52 @@ func (dut *DUT) SendWithErrno(ctx context.Context, sockfd int32, buf []byte, fla
// SendTo calls sendto on the DUT and causes a fatal test failure if it doesn't
// succeed. If more control over the timeout or error handling is needed, use
// SendToWithErrno.
-func (dut *DUT) SendTo(sockfd int32, buf []byte, flags int32, destAddr unix.Sockaddr) int32 {
- dut.t.Helper()
+func (dut *DUT) SendTo(t *testing.T, sockfd int32, buf []byte, flags int32, destAddr unix.Sockaddr) int32 {
+ t.Helper()
+
ctx, cancel := context.WithTimeout(context.Background(), RPCTimeout)
defer cancel()
- ret, err := dut.SendToWithErrno(ctx, sockfd, buf, flags, destAddr)
+ ret, err := dut.SendToWithErrno(ctx, t, sockfd, buf, flags, destAddr)
if ret == -1 {
- dut.t.Fatalf("failed to sendto: %s", err)
+ t.Fatalf("failed to sendto: %s", err)
}
return ret
}
// SendToWithErrno calls sendto on the DUT.
-func (dut *DUT) SendToWithErrno(ctx context.Context, sockfd int32, buf []byte, flags int32, destAddr unix.Sockaddr) (int32, error) {
- dut.t.Helper()
+func (dut *DUT) SendToWithErrno(ctx context.Context, t *testing.T, sockfd int32, buf []byte, flags int32, destAddr unix.Sockaddr) (int32, error) {
+ t.Helper()
+
req := pb.SendToRequest{
Sockfd: sockfd,
Buf: buf,
Flags: flags,
- DestAddr: dut.sockaddrToProto(destAddr),
+ DestAddr: dut.sockaddrToProto(t, destAddr),
}
resp, err := dut.posixServer.SendTo(ctx, &req)
if err != nil {
- dut.t.Fatalf("faled to call SendTo: %s", err)
+ t.Fatalf("faled to call SendTo: %s", err)
}
return resp.GetRet(), syscall.Errno(resp.GetErrno_())
}
// SetNonBlocking will set O_NONBLOCK flag for fd if nonblocking
// is true, otherwise it will clear the flag.
-func (dut *DUT) SetNonBlocking(fd int32, nonblocking bool) {
- dut.t.Helper()
- flags := dut.Fcntl(fd, unix.F_GETFL, 0)
+func (dut *DUT) SetNonBlocking(t *testing.T, fd int32, nonblocking bool) {
+ t.Helper()
+
+ flags := dut.Fcntl(t, fd, unix.F_GETFL, 0)
if nonblocking {
flags |= unix.O_NONBLOCK
} else {
flags &= ^unix.O_NONBLOCK
}
- dut.Fcntl(fd, unix.F_SETFL, flags)
+ dut.Fcntl(t, fd, unix.F_SETFL, flags)
}
-func (dut *DUT) setSockOpt(ctx context.Context, sockfd, level, optname int32, optval *pb.SockOptVal) (int32, error) {
- dut.t.Helper()
+func (dut *DUT) setSockOpt(ctx context.Context, t *testing.T, sockfd, level, optname int32, optval *pb.SockOptVal) (int32, error) {
+ t.Helper()
+
req := pb.SetSockOptRequest{
Sockfd: sockfd,
Level: level,
@@ -532,7 +564,7 @@ func (dut *DUT) setSockOpt(ctx context.Context, sockfd, level, optname int32, op
}
resp, err := dut.posixServer.SetSockOpt(ctx, &req)
if err != nil {
- dut.t.Fatalf("failed to call SetSockOpt: %s", err)
+ t.Fatalf("failed to call SetSockOpt: %s", err)
}
return resp.GetRet(), syscall.Errno(resp.GetErrno_())
}
@@ -542,81 +574,89 @@ func (dut *DUT) setSockOpt(ctx context.Context, sockfd, level, optname int32, op
// needed, use SetSockOptWithErrno. Because endianess and the width of values
// might differ between the testbench and DUT architectures, prefer to use a
// more specific SetSockOptXxx function.
-func (dut *DUT) SetSockOpt(sockfd, level, optname int32, optval []byte) {
- dut.t.Helper()
+func (dut *DUT) SetSockOpt(t *testing.T, sockfd, level, optname int32, optval []byte) {
+ t.Helper()
+
ctx, cancel := context.WithTimeout(context.Background(), RPCTimeout)
defer cancel()
- ret, err := dut.SetSockOptWithErrno(ctx, sockfd, level, optname, optval)
+ ret, err := dut.SetSockOptWithErrno(ctx, t, sockfd, level, optname, optval)
if ret != 0 {
- dut.t.Fatalf("failed to SetSockOpt: %s", err)
+ t.Fatalf("failed to SetSockOpt: %s", err)
}
}
// SetSockOptWithErrno calls setsockopt on the DUT. Because endianess and the
// width of values might differ between the testbench and DUT architectures,
// prefer to use a more specific SetSockOptXxxWithErrno function.
-func (dut *DUT) SetSockOptWithErrno(ctx context.Context, sockfd, level, optname int32, optval []byte) (int32, error) {
- dut.t.Helper()
- return dut.setSockOpt(ctx, sockfd, level, optname, &pb.SockOptVal{Val: &pb.SockOptVal_Bytesval{optval}})
+func (dut *DUT) SetSockOptWithErrno(ctx context.Context, t *testing.T, sockfd, level, optname int32, optval []byte) (int32, error) {
+ t.Helper()
+
+ return dut.setSockOpt(ctx, t, sockfd, level, optname, &pb.SockOptVal{Val: &pb.SockOptVal_Bytesval{optval}})
}
// SetSockOptInt calls setsockopt on the DUT and causes a fatal test failure
// if it doesn't succeed. If more control over the int optval or error handling
// is needed, use SetSockOptIntWithErrno.
-func (dut *DUT) SetSockOptInt(sockfd, level, optname, optval int32) {
- dut.t.Helper()
+func (dut *DUT) SetSockOptInt(t *testing.T, sockfd, level, optname, optval int32) {
+ t.Helper()
+
ctx, cancel := context.WithTimeout(context.Background(), RPCTimeout)
defer cancel()
- ret, err := dut.SetSockOptIntWithErrno(ctx, sockfd, level, optname, optval)
+ ret, err := dut.SetSockOptIntWithErrno(ctx, t, sockfd, level, optname, optval)
if ret != 0 {
- dut.t.Fatalf("failed to SetSockOptInt: %s", err)
+ t.Fatalf("failed to SetSockOptInt: %s", err)
}
}
// SetSockOptIntWithErrno calls setsockopt with an integer optval.
-func (dut *DUT) SetSockOptIntWithErrno(ctx context.Context, sockfd, level, optname, optval int32) (int32, error) {
- dut.t.Helper()
- return dut.setSockOpt(ctx, sockfd, level, optname, &pb.SockOptVal{Val: &pb.SockOptVal_Intval{optval}})
+func (dut *DUT) SetSockOptIntWithErrno(ctx context.Context, t *testing.T, sockfd, level, optname, optval int32) (int32, error) {
+ t.Helper()
+
+ return dut.setSockOpt(ctx, t, sockfd, level, optname, &pb.SockOptVal{Val: &pb.SockOptVal_Intval{optval}})
}
// SetSockOptTimeval calls setsockopt on the DUT and causes a fatal test failure
// if it doesn't succeed. If more control over the timeout or error handling is
// needed, use SetSockOptTimevalWithErrno.
-func (dut *DUT) SetSockOptTimeval(sockfd, level, optname int32, tv *unix.Timeval) {
- dut.t.Helper()
+func (dut *DUT) SetSockOptTimeval(t *testing.T, sockfd, level, optname int32, tv *unix.Timeval) {
+ t.Helper()
+
ctx, cancel := context.WithTimeout(context.Background(), RPCTimeout)
defer cancel()
- ret, err := dut.SetSockOptTimevalWithErrno(ctx, sockfd, level, optname, tv)
+ ret, err := dut.SetSockOptTimevalWithErrno(ctx, t, sockfd, level, optname, tv)
if ret != 0 {
- dut.t.Fatalf("failed to SetSockOptTimeval: %s", err)
+ t.Fatalf("failed to SetSockOptTimeval: %s", err)
}
}
// SetSockOptTimevalWithErrno calls setsockopt with the timeval converted to
// bytes.
-func (dut *DUT) SetSockOptTimevalWithErrno(ctx context.Context, sockfd, level, optname int32, tv *unix.Timeval) (int32, error) {
- dut.t.Helper()
+func (dut *DUT) SetSockOptTimevalWithErrno(ctx context.Context, t *testing.T, sockfd, level, optname int32, tv *unix.Timeval) (int32, error) {
+ t.Helper()
+
timeval := pb.Timeval{
Seconds: int64(tv.Sec),
Microseconds: int64(tv.Usec),
}
- return dut.setSockOpt(ctx, sockfd, level, optname, &pb.SockOptVal{Val: &pb.SockOptVal_Timeval{&timeval}})
+ return dut.setSockOpt(ctx, t, sockfd, level, optname, &pb.SockOptVal{Val: &pb.SockOptVal_Timeval{&timeval}})
}
// Socket calls socket on the DUT and returns the file descriptor. If socket
// fails on the DUT, the test ends.
-func (dut *DUT) Socket(domain, typ, proto int32) int32 {
- dut.t.Helper()
- fd, err := dut.SocketWithErrno(domain, typ, proto)
+func (dut *DUT) Socket(t *testing.T, domain, typ, proto int32) int32 {
+ t.Helper()
+
+ fd, err := dut.SocketWithErrno(t, domain, typ, proto)
if fd < 0 {
- dut.t.Fatalf("failed to create socket: %s", err)
+ t.Fatalf("failed to create socket: %s", err)
}
return fd
}
// SocketWithErrno calls socket on the DUT and returns the fd and errno.
-func (dut *DUT) SocketWithErrno(domain, typ, proto int32) (int32, error) {
- dut.t.Helper()
+func (dut *DUT) SocketWithErrno(t *testing.T, domain, typ, proto int32) (int32, error) {
+ t.Helper()
+
req := pb.SocketRequest{
Domain: domain,
Type: typ,
@@ -625,7 +665,7 @@ func (dut *DUT) SocketWithErrno(domain, typ, proto int32) (int32, error) {
ctx := context.Background()
resp, err := dut.posixServer.Socket(ctx, &req)
if err != nil {
- dut.t.Fatalf("failed to call Socket: %s", err)
+ t.Fatalf("failed to call Socket: %s", err)
}
return resp.GetFd(), syscall.Errno(resp.GetErrno_())
}
@@ -633,20 +673,22 @@ func (dut *DUT) SocketWithErrno(domain, typ, proto int32) (int32, error) {
// Recv calls recv on the DUT and causes a fatal test failure if it doesn't
// succeed. If more control over the timeout or error handling is needed, use
// RecvWithErrno.
-func (dut *DUT) Recv(sockfd, len, flags int32) []byte {
- dut.t.Helper()
+func (dut *DUT) Recv(t *testing.T, sockfd, len, flags int32) []byte {
+ t.Helper()
+
ctx, cancel := context.WithTimeout(context.Background(), RPCTimeout)
defer cancel()
- ret, buf, err := dut.RecvWithErrno(ctx, sockfd, len, flags)
+ ret, buf, err := dut.RecvWithErrno(ctx, t, sockfd, len, flags)
if ret == -1 {
- dut.t.Fatalf("failed to recv: %s", err)
+ t.Fatalf("failed to recv: %s", err)
}
return buf
}
// RecvWithErrno calls recv on the DUT.
-func (dut *DUT) RecvWithErrno(ctx context.Context, sockfd, len, flags int32) (int32, []byte, error) {
- dut.t.Helper()
+func (dut *DUT) RecvWithErrno(ctx context.Context, t *testing.T, sockfd, len, flags int32) (int32, []byte, error) {
+ t.Helper()
+
req := pb.RecvRequest{
Sockfd: sockfd,
Len: len,
@@ -654,7 +696,7 @@ func (dut *DUT) RecvWithErrno(ctx context.Context, sockfd, len, flags int32) (in
}
resp, err := dut.posixServer.Recv(ctx, &req)
if err != nil {
- dut.t.Fatalf("failed to call Recv: %s", err)
+ t.Fatalf("failed to call Recv: %s", err)
}
return resp.GetRet(), resp.GetBuf(), syscall.Errno(resp.GetErrno_())
}
diff --git a/test/packetimpact/testbench/rawsockets.go b/test/packetimpact/testbench/rawsockets.go
index 278229b7e..57e822725 100644
--- a/test/packetimpact/testbench/rawsockets.go
+++ b/test/packetimpact/testbench/rawsockets.go
@@ -28,7 +28,6 @@ import (
// Sniffer can sniff raw packets on the wire.
type Sniffer struct {
- t *testing.T
fd int
}
@@ -40,6 +39,8 @@ func htons(x uint16) uint16 {
// NewSniffer creates a Sniffer connected to *device.
func NewSniffer(t *testing.T) (Sniffer, error) {
+ t.Helper()
+
snifferFd, err := unix.Socket(unix.AF_PACKET, unix.SOCK_RAW, int(htons(unix.ETH_P_ALL)))
if err != nil {
return Sniffer{}, err
@@ -51,7 +52,6 @@ func NewSniffer(t *testing.T) (Sniffer, error) {
t.Fatalf("can't setsockopt SO_RCVBUF to 10M: %s", err)
}
return Sniffer{
- t: t,
fd: snifferFd,
}, nil
}
@@ -61,7 +61,9 @@ func NewSniffer(t *testing.T) (Sniffer, error) {
const maxReadSize int = 65536
// Recv tries to read one frame until the timeout is up.
-func (s *Sniffer) Recv(timeout time.Duration) []byte {
+func (s *Sniffer) Recv(t *testing.T, timeout time.Duration) []byte {
+ t.Helper()
+
deadline := time.Now().Add(timeout)
for {
timeout = deadline.Sub(time.Now())
@@ -75,7 +77,7 @@ func (s *Sniffer) Recv(timeout time.Duration) []byte {
}
if err := unix.SetsockoptTimeval(s.fd, unix.SOL_SOCKET, unix.SO_RCVTIMEO, &tv); err != nil {
- s.t.Fatalf("can't setsockopt SO_RCVTIMEO: %s", err)
+ t.Fatalf("can't setsockopt SO_RCVTIMEO: %s", err)
}
buf := make([]byte, maxReadSize)
@@ -85,10 +87,10 @@ func (s *Sniffer) Recv(timeout time.Duration) []byte {
continue
}
if err != nil {
- s.t.Fatalf("can't read: %s", err)
+ t.Fatalf("can't read: %s", err)
}
if nread > maxReadSize {
- s.t.Fatalf("received a truncated frame of %d bytes", nread)
+ t.Fatalf("received a truncated frame of %d bytes, want at most %d bytes", nread, maxReadSize)
}
return buf[:nread]
}
@@ -96,14 +98,16 @@ func (s *Sniffer) Recv(timeout time.Duration) []byte {
// Drain drains the Sniffer's socket receive buffer by receiving until there's
// nothing else to receive.
-func (s *Sniffer) Drain() {
- s.t.Helper()
+func (s *Sniffer) Drain(t *testing.T) {
+ t.Helper()
+
flags, err := unix.FcntlInt(uintptr(s.fd), unix.F_GETFL, 0)
if err != nil {
- s.t.Fatalf("failed to get sniffer socket fd flags: %s", err)
+ t.Fatalf("failed to get sniffer socket fd flags: %s", err)
}
- if _, err := unix.FcntlInt(uintptr(s.fd), unix.F_SETFL, flags|unix.O_NONBLOCK); err != nil {
- s.t.Fatalf("failed to make sniffer socket non-blocking: %s", err)
+ nonBlockingFlags := flags | unix.O_NONBLOCK
+ if _, err := unix.FcntlInt(uintptr(s.fd), unix.F_SETFL, nonBlockingFlags); err != nil {
+ t.Fatalf("failed to make sniffer socket non-blocking with flags %b: %s", nonBlockingFlags, err)
}
for {
buf := make([]byte, maxReadSize)
@@ -113,7 +117,7 @@ func (s *Sniffer) Drain() {
}
}
if _, err := unix.FcntlInt(uintptr(s.fd), unix.F_SETFL, flags); err != nil {
- s.t.Fatalf("failed to restore sniffer socket fd flags: %s", err)
+ t.Fatalf("failed to restore sniffer socket fd flags to %b: %s", flags, err)
}
}
@@ -128,12 +132,13 @@ func (s *Sniffer) close() error {
// Injector can inject raw frames.
type Injector struct {
- t *testing.T
fd int
}
// NewInjector creates a new injector on *device.
func NewInjector(t *testing.T) (Injector, error) {
+ t.Helper()
+
ifInfo, err := net.InterfaceByName(Device)
if err != nil {
return Injector{}, err
@@ -156,15 +161,20 @@ func NewInjector(t *testing.T) (Injector, error) {
return Injector{}, err
}
return Injector{
- t: t,
fd: injectFd,
}, nil
}
// Send a raw frame.
-func (i *Injector) Send(b []byte) {
- if _, err := unix.Write(i.fd, b); err != nil {
- i.t.Fatalf("can't write: %s of len %d", err, len(b))
+func (i *Injector) Send(t *testing.T, b []byte) {
+ t.Helper()
+
+ n, err := unix.Write(i.fd, b)
+ if err != nil {
+ t.Fatalf("can't write bytes of len %d: %s", len(b), err)
+ }
+ if n != len(b) {
+ t.Fatalf("got %d bytes written, want %d", n, len(b))
}
}
diff --git a/test/packetimpact/tests/fin_wait2_timeout_test.go b/test/packetimpact/tests/fin_wait2_timeout_test.go
index 407565078..a61054c2c 100644
--- a/test/packetimpact/tests/fin_wait2_timeout_test.go
+++ b/test/packetimpact/tests/fin_wait2_timeout_test.go
@@ -39,34 +39,34 @@ func TestFinWait2Timeout(t *testing.T) {
t.Run(tt.description, func(t *testing.T) {
dut := testbench.NewDUT(t)
defer dut.TearDown()
- listenFd, remotePort := dut.CreateListener(unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
- defer dut.Close(listenFd)
+ listenFd, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
+ defer dut.Close(t, listenFd)
conn := testbench.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
- defer conn.Close()
- conn.Connect()
+ defer conn.Close(t)
+ conn.Connect(t)
- acceptFd, _ := dut.Accept(listenFd)
+ acceptFd, _ := dut.Accept(t, listenFd)
if tt.linger2 {
tv := unix.Timeval{Sec: 1, Usec: 0}
- dut.SetSockOptTimeval(acceptFd, unix.SOL_TCP, unix.TCP_LINGER2, &tv)
+ dut.SetSockOptTimeval(t, acceptFd, unix.SOL_TCP, unix.TCP_LINGER2, &tv)
}
- dut.Close(acceptFd)
+ dut.Close(t, acceptFd)
- if _, err := conn.Expect(testbench.TCP{Flags: testbench.Uint8(header.TCPFlagFin | header.TCPFlagAck)}, time.Second); err != nil {
+ if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagFin | header.TCPFlagAck)}, time.Second); err != nil {
t.Fatalf("expected a FIN-ACK within 1 second but got none: %s", err)
}
- conn.Send(testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)})
+ conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)})
time.Sleep(5 * time.Second)
- conn.Drain()
+ conn.Drain(t)
- conn.Send(testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)})
+ conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)})
if tt.linger2 {
- if _, err := conn.Expect(testbench.TCP{Flags: testbench.Uint8(header.TCPFlagRst)}, time.Second); err != nil {
+ if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagRst)}, time.Second); err != nil {
t.Fatalf("expected a RST packet within a second but got none: %s", err)
}
} else {
- if got, err := conn.Expect(testbench.TCP{Flags: testbench.Uint8(header.TCPFlagRst)}, 10*time.Second); got != nil || err == nil {
+ if got, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagRst)}, 10*time.Second); got != nil || err == nil {
t.Fatalf("expected no RST packets within ten seconds but got one: %s", got)
}
}
diff --git a/test/packetimpact/tests/icmpv6_param_problem_test.go b/test/packetimpact/tests/icmpv6_param_problem_test.go
index 8dfd26ee8..2d59d552d 100644
--- a/test/packetimpact/tests/icmpv6_param_problem_test.go
+++ b/test/packetimpact/tests/icmpv6_param_problem_test.go
@@ -34,7 +34,7 @@ func TestICMPv6ParamProblemTest(t *testing.T) {
dut := testbench.NewDUT(t)
defer dut.TearDown()
conn := testbench.NewIPv6Conn(t, testbench.IPv6{}, testbench.IPv6{})
- defer conn.Close()
+ defer conn.Close(t)
ipv6 := testbench.IPv6{
// 254 is reserved and used for experimentation and testing. This should
// cause an error.
@@ -45,8 +45,8 @@ func TestICMPv6ParamProblemTest(t *testing.T) {
Payload: []byte("hello world"),
}
- toSend := (*testbench.Connection)(&conn).CreateFrame(testbench.Layers{&ipv6}, &icmpv6)
- (*testbench.Connection)(&conn).SendFrame(toSend)
+ toSend := (*testbench.Connection)(&conn).CreateFrame(t, testbench.Layers{&ipv6}, &icmpv6)
+ (*testbench.Connection)(&conn).SendFrame(t, toSend)
// Build the expected ICMPv6 payload, which includes an index to the
// problematic byte and also the problematic packet as described in
@@ -72,7 +72,7 @@ func TestICMPv6ParamProblemTest(t *testing.T) {
&expectedICMPv6,
}
timeout := time.Second
- if _, err := conn.ExpectFrame(paramProblem, timeout); err != nil {
+ if _, err := conn.ExpectFrame(t, paramProblem, timeout); err != nil {
t.Errorf("expected %s within %s but got none: %s", paramProblem, timeout, err)
}
}
diff --git a/test/packetimpact/tests/ipv4_id_uniqueness_test.go b/test/packetimpact/tests/ipv4_id_uniqueness_test.go
index 70f6df5e0..cf881418c 100644
--- a/test/packetimpact/tests/ipv4_id_uniqueness_test.go
+++ b/test/packetimpact/tests/ipv4_id_uniqueness_test.go
@@ -31,8 +31,8 @@ func init() {
testbench.RegisterFlags(flag.CommandLine)
}
-func recvTCPSegment(conn *testbench.TCPIPv4, expect *testbench.TCP, expectPayload *testbench.Payload) (uint16, error) {
- layers, err := conn.ExpectData(expect, expectPayload, time.Second)
+func recvTCPSegment(t *testing.T, conn *testbench.TCPIPv4, expect *testbench.TCP, expectPayload *testbench.Payload) (uint16, error) {
+ layers, err := conn.ExpectData(t, expect, expectPayload, time.Second)
if err != nil {
return 0, fmt.Errorf("failed to receive TCP segment: %s", err)
}
@@ -69,17 +69,17 @@ func TestIPv4RetransmitIdentificationUniqueness(t *testing.T) {
dut := testbench.NewDUT(t)
defer dut.TearDown()
- listenFD, remotePort := dut.CreateListener(unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
- defer dut.Close(listenFD)
+ listenFD, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
+ defer dut.Close(t, listenFD)
conn := testbench.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
- defer conn.Close()
+ defer conn.Close(t)
- conn.Connect()
- remoteFD, _ := dut.Accept(listenFD)
- defer dut.Close(remoteFD)
+ conn.Connect(t)
+ remoteFD, _ := dut.Accept(t, listenFD)
+ defer dut.Close(t, remoteFD)
- dut.SetSockOptInt(remoteFD, unix.IPPROTO_TCP, unix.TCP_NODELAY, 1)
+ dut.SetSockOptInt(t, remoteFD, unix.IPPROTO_TCP, unix.TCP_NODELAY, 1)
// TODO(b/129291778) The following socket option clears the DF bit on
// IP packets sent over the socket, and is currently not supported by
@@ -87,30 +87,30 @@ func TestIPv4RetransmitIdentificationUniqueness(t *testing.T) {
// socket option being not supported does not affect the operation of
// this test. Once the socket option is supported, the following call
// can be changed to simply assert success.
- ret, errno := dut.SetSockOptIntWithErrno(context.Background(), remoteFD, unix.IPPROTO_IP, linux.IP_MTU_DISCOVER, linux.IP_PMTUDISC_DONT)
+ ret, errno := dut.SetSockOptIntWithErrno(context.Background(), t, remoteFD, unix.IPPROTO_IP, linux.IP_MTU_DISCOVER, linux.IP_PMTUDISC_DONT)
if ret == -1 && errno != unix.ENOTSUP {
t.Fatalf("failed to set IP_MTU_DISCOVER socket option to IP_PMTUDISC_DONT: %s", errno)
}
samplePayload := &testbench.Payload{Bytes: tc.payload}
- dut.Send(remoteFD, tc.payload, 0)
- if _, err := conn.ExpectData(&testbench.TCP{}, samplePayload, time.Second); err != nil {
+ dut.Send(t, remoteFD, tc.payload, 0)
+ if _, err := conn.ExpectData(t, &testbench.TCP{}, samplePayload, time.Second); err != nil {
t.Fatalf("failed to receive TCP segment sent for RTT calculation: %s", err)
}
// Let the DUT estimate RTO with RTT from the DATA-ACK.
// TODO(gvisor.dev/issue/2685) Estimate RTO during handshake, after which
// we can skip sending this ACK.
- conn.Send(testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)})
+ conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)})
- dut.Send(remoteFD, tc.payload, 0)
- expectTCP := &testbench.TCP{SeqNum: testbench.Uint32(uint32(*conn.RemoteSeqNum()))}
- originalID, err := recvTCPSegment(&conn, expectTCP, samplePayload)
+ dut.Send(t, remoteFD, tc.payload, 0)
+ expectTCP := &testbench.TCP{SeqNum: testbench.Uint32(uint32(*conn.RemoteSeqNum(t)))}
+ originalID, err := recvTCPSegment(t, &conn, expectTCP, samplePayload)
if err != nil {
t.Fatalf("failed to receive TCP segment: %s", err)
}
- retransmitID, err := recvTCPSegment(&conn, expectTCP, samplePayload)
+ retransmitID, err := recvTCPSegment(t, &conn, expectTCP, samplePayload)
if err != nil {
t.Fatalf("failed to receive retransmitted TCP segment: %s", err)
}
diff --git a/test/packetimpact/tests/ipv6_fragment_reassembly_test.go b/test/packetimpact/tests/ipv6_fragment_reassembly_test.go
index 7b462c8e2..b5f94ad4b 100644
--- a/test/packetimpact/tests/ipv6_fragment_reassembly_test.go
+++ b/test/packetimpact/tests/ipv6_fragment_reassembly_test.go
@@ -48,7 +48,7 @@ func TestIPv6FragmentReassembly(t *testing.T) {
dut := testbench.NewDUT(t)
defer dut.TearDown()
conn := testbench.NewIPv6Conn(t, testbench.IPv6{}, testbench.IPv6{})
- defer conn.Close()
+ defer conn.Close(t)
firstPayloadToSend := make([]byte, firstPayloadLength)
for i := range firstPayloadToSend {
@@ -81,7 +81,7 @@ func TestIPv6FragmentReassembly(t *testing.T) {
buffer.NewVectorisedView(len(secondPayloadToSend), []buffer.View{secondPayloadToSend}),
)
- conn.Send(testbench.IPv6{},
+ conn.Send(t, testbench.IPv6{},
&testbench.IPv6FragmentExtHdr{
FragmentOffset: testbench.Uint16(0),
MoreFragments: testbench.Bool(true),
@@ -96,7 +96,7 @@ func TestIPv6FragmentReassembly(t *testing.T) {
icmpv6ProtoNum := header.IPv6ExtensionHeaderIdentifier(header.ICMPv6ProtocolNumber)
- conn.Send(testbench.IPv6{},
+ conn.Send(t, testbench.IPv6{},
&testbench.IPv6FragmentExtHdr{
NextHeader: &icmpv6ProtoNum,
FragmentOffset: testbench.Uint16((firstPayloadLength + header.ICMPv6EchoMinimumSize) / 8),
@@ -107,7 +107,7 @@ func TestIPv6FragmentReassembly(t *testing.T) {
Bytes: secondPayloadToSend,
})
- gotEchoReplyFirstPart, err := conn.ExpectFrame(testbench.Layers{
+ gotEchoReplyFirstPart, err := conn.ExpectFrame(t, testbench.Layers{
&testbench.Ether{},
&testbench.IPv6{},
&testbench.IPv6FragmentExtHdr{
@@ -142,7 +142,7 @@ func TestIPv6FragmentReassembly(t *testing.T) {
hex.Dump(wantFirstPayload))
}
- gotEchoReplySecondPart, err := conn.ExpectFrame(testbench.Layers{
+ gotEchoReplySecondPart, err := conn.ExpectFrame(t, testbench.Layers{
&testbench.Ether{},
&testbench.IPv6{},
&testbench.IPv6FragmentExtHdr{
diff --git a/test/packetimpact/tests/ipv6_unknown_options_action_test.go b/test/packetimpact/tests/ipv6_unknown_options_action_test.go
index 100b30ad7..d7d63cbd2 100644
--- a/test/packetimpact/tests/ipv6_unknown_options_action_test.go
+++ b/test/packetimpact/tests/ipv6_unknown_options_action_test.go
@@ -23,21 +23,21 @@ import (
"gvisor.dev/gvisor/pkg/tcpip"
"gvisor.dev/gvisor/pkg/tcpip/header"
- tb "gvisor.dev/gvisor/test/packetimpact/testbench"
+ "gvisor.dev/gvisor/test/packetimpact/testbench"
)
func init() {
- tb.RegisterFlags(flag.CommandLine)
+ testbench.RegisterFlags(flag.CommandLine)
}
-func mkHopByHopOptionsExtHdr(optType byte) tb.Layer {
- return &tb.IPv6HopByHopOptionsExtHdr{
+func mkHopByHopOptionsExtHdr(optType byte) testbench.Layer {
+ return &testbench.IPv6HopByHopOptionsExtHdr{
Options: []byte{optType, 0x04, 0x00, 0x00, 0x00, 0x00},
}
}
-func mkDestinationOptionsExtHdr(optType byte) tb.Layer {
- return &tb.IPv6DestinationOptionsExtHdr{
+func mkDestinationOptionsExtHdr(optType byte) testbench.Layer {
+ return &testbench.IPv6DestinationOptionsExtHdr{
Options: []byte{optType, 0x04, 0x00, 0x00, 0x00, 0x00},
}
}
@@ -49,7 +49,7 @@ func optionTypeFromAction(action header.IPv6OptionUnknownAction) byte {
func TestIPv6UnknownOptionAction(t *testing.T) {
for _, tt := range []struct {
description string
- mkExtHdr func(optType byte) tb.Layer
+ mkExtHdr func(optType byte) testbench.Layer
action header.IPv6OptionUnknownAction
multicastDst bool
wantICMPv6 bool
@@ -140,21 +140,21 @@ func TestIPv6UnknownOptionAction(t *testing.T) {
},
} {
t.Run(tt.description, func(t *testing.T) {
- dut := tb.NewDUT(t)
+ dut := testbench.NewDUT(t)
defer dut.TearDown()
- ipv6Conn := tb.NewIPv6Conn(t, tb.IPv6{}, tb.IPv6{})
- conn := (*tb.Connection)(&ipv6Conn)
- defer ipv6Conn.Close()
+ ipv6Conn := testbench.NewIPv6Conn(t, testbench.IPv6{}, testbench.IPv6{})
+ conn := (*testbench.Connection)(&ipv6Conn)
+ defer ipv6Conn.Close(t)
- outgoingOverride := tb.Layers{}
+ outgoingOverride := testbench.Layers{}
if tt.multicastDst {
- outgoingOverride = tb.Layers{&tb.IPv6{
- DstAddr: tb.Address(tcpip.Address(net.ParseIP("ff02::1"))),
+ outgoingOverride = testbench.Layers{&testbench.IPv6{
+ DstAddr: testbench.Address(tcpip.Address(net.ParseIP("ff02::1"))),
}}
}
- outgoing := conn.CreateFrame(outgoingOverride, tt.mkExtHdr(optionTypeFromAction(tt.action)))
- conn.SendFrame(outgoing)
+ outgoing := conn.CreateFrame(t, outgoingOverride, tt.mkExtHdr(optionTypeFromAction(tt.action)))
+ conn.SendFrame(t, outgoing)
ipv6Sent := outgoing[1:]
invokingPacket, err := ipv6Sent.ToBytes()
if err != nil {
@@ -167,12 +167,12 @@ func TestIPv6UnknownOptionAction(t *testing.T) {
// after the IPv6 header (after NextHeader and ExtHdrLen).
binary.BigEndian.PutUint32(icmpv6Payload, header.IPv6MinimumSize+2)
icmpv6Payload = append(icmpv6Payload, invokingPacket...)
- gotICMPv6, err := ipv6Conn.ExpectFrame(tb.Layers{
- &tb.Ether{},
- &tb.IPv6{},
- &tb.ICMPv6{
- Type: tb.ICMPv6Type(header.ICMPv6ParamProblem),
- Code: tb.Byte(2),
+ gotICMPv6, err := ipv6Conn.ExpectFrame(t, testbench.Layers{
+ &testbench.Ether{},
+ &testbench.IPv6{},
+ &testbench.ICMPv6{
+ Type: testbench.ICMPv6Type(header.ICMPv6ParamProblem),
+ Code: testbench.Byte(2),
Payload: icmpv6Payload,
},
}, time.Second)
diff --git a/test/packetimpact/tests/tcp_close_wait_ack_test.go b/test/packetimpact/tests/tcp_close_wait_ack_test.go
index 6e7ff41d7..e6a96f214 100644
--- a/test/packetimpact/tests/tcp_close_wait_ack_test.go
+++ b/test/packetimpact/tests/tcp_close_wait_ack_test.go
@@ -33,39 +33,39 @@ func init() {
func TestCloseWaitAck(t *testing.T) {
for _, tt := range []struct {
description string
- makeTestingTCP func(conn *testbench.TCPIPv4, seqNumOffset seqnum.Size, windowSize seqnum.Size) testbench.TCP
+ makeTestingTCP func(t *testing.T, conn *testbench.TCPIPv4, seqNumOffset, windowSize seqnum.Size) testbench.TCP
seqNumOffset seqnum.Size
expectAck bool
}{
- {"OTW", GenerateOTWSeqSegment, 0, false},
- {"OTW", GenerateOTWSeqSegment, 1, true},
- {"OTW", GenerateOTWSeqSegment, 2, true},
- {"ACK", GenerateUnaccACKSegment, 0, false},
- {"ACK", GenerateUnaccACKSegment, 1, true},
- {"ACK", GenerateUnaccACKSegment, 2, true},
+ {"OTW", generateOTWSeqSegment, 0, false},
+ {"OTW", generateOTWSeqSegment, 1, true},
+ {"OTW", generateOTWSeqSegment, 2, true},
+ {"ACK", generateUnaccACKSegment, 0, false},
+ {"ACK", generateUnaccACKSegment, 1, true},
+ {"ACK", generateUnaccACKSegment, 2, true},
} {
t.Run(fmt.Sprintf("%s%d", tt.description, tt.seqNumOffset), func(t *testing.T) {
dut := testbench.NewDUT(t)
defer dut.TearDown()
- listenFd, remotePort := dut.CreateListener(unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
- defer dut.Close(listenFd)
+ listenFd, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
+ defer dut.Close(t, listenFd)
conn := testbench.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
- defer conn.Close()
+ defer conn.Close(t)
- conn.Connect()
- acceptFd, _ := dut.Accept(listenFd)
+ conn.Connect(t)
+ acceptFd, _ := dut.Accept(t, listenFd)
// Send a FIN to DUT to intiate the active close
- conn.Send(testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck | header.TCPFlagFin)})
- gotTCP, err := conn.Expect(testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}, time.Second)
+ conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck | header.TCPFlagFin)})
+ gotTCP, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}, time.Second)
if err != nil {
t.Fatalf("expected an ACK for our fin and DUT should enter CLOSE_WAIT: %s", err)
}
windowSize := seqnum.Size(*gotTCP.WindowSize)
// Send a segment with OTW Seq / unacc ACK and expect an ACK back
- conn.Send(tt.makeTestingTCP(&conn, tt.seqNumOffset, windowSize), &testbench.Payload{Bytes: []byte("Sample Data")})
- gotAck, err := conn.Expect(testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}, time.Second)
+ conn.Send(t, tt.makeTestingTCP(t, &conn, tt.seqNumOffset, windowSize), &testbench.Payload{Bytes: []byte("Sample Data")})
+ gotAck, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}, time.Second)
if tt.expectAck && err != nil {
t.Fatalf("expected an ack but got none: %s", err)
}
@@ -74,35 +74,36 @@ func TestCloseWaitAck(t *testing.T) {
}
// Now let's verify DUT is indeed in CLOSE_WAIT
- dut.Close(acceptFd)
- if _, err := conn.Expect(testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck | header.TCPFlagFin)}, time.Second); err != nil {
+ dut.Close(t, acceptFd)
+ if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck | header.TCPFlagFin)}, time.Second); err != nil {
t.Fatalf("expected DUT to send a FIN: %s", err)
}
// Ack the FIN from DUT
- conn.Send(testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)})
+ conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)})
// Send some extra data to DUT
- conn.Send(testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}, &testbench.Payload{Bytes: []byte("Sample Data")})
- if _, err := conn.Expect(testbench.TCP{Flags: testbench.Uint8(header.TCPFlagRst)}, time.Second); err != nil {
+ conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}, &testbench.Payload{Bytes: []byte("Sample Data")})
+ if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagRst)}, time.Second); err != nil {
t.Fatalf("expected DUT to send an RST: %s", err)
}
})
}
}
-// This generates an segment with seqnum = RCV.NXT + RCV.WND + seqNumOffset, the
-// generated segment is only acceptable when seqNumOffset is 0, otherwise an ACK
-// is expected from the receiver.
-func GenerateOTWSeqSegment(conn *testbench.TCPIPv4, seqNumOffset seqnum.Size, windowSize seqnum.Size) testbench.TCP {
- lastAcceptable := conn.LocalSeqNum().Add(windowSize)
+// generateOTWSeqSegment generates an segment with
+// seqnum = RCV.NXT + RCV.WND + seqNumOffset, the generated segment is only
+// acceptable when seqNumOffset is 0, otherwise an ACK is expected from the
+// receiver.
+func generateOTWSeqSegment(t *testing.T, conn *testbench.TCPIPv4, seqNumOffset seqnum.Size, windowSize seqnum.Size) testbench.TCP {
+ lastAcceptable := conn.LocalSeqNum(t).Add(windowSize)
otwSeq := uint32(lastAcceptable.Add(seqNumOffset))
return testbench.TCP{SeqNum: testbench.Uint32(otwSeq), Flags: testbench.Uint8(header.TCPFlagAck)}
}
-// This generates an segment with acknum = SND.NXT + seqNumOffset, the generated
-// segment is only acceptable when seqNumOffset is 0, otherwise an ACK is
-// expected from the receiver.
-func GenerateUnaccACKSegment(conn *testbench.TCPIPv4, seqNumOffset seqnum.Size, windowSize seqnum.Size) testbench.TCP {
- lastAcceptable := conn.RemoteSeqNum()
+// generateUnaccACKSegment generates an segment with
+// acknum = SND.NXT + seqNumOffset, the generated segment is only acceptable
+// when seqNumOffset is 0, otherwise an ACK is expected from the receiver.
+func generateUnaccACKSegment(t *testing.T, conn *testbench.TCPIPv4, seqNumOffset seqnum.Size, windowSize seqnum.Size) testbench.TCP {
+ lastAcceptable := conn.RemoteSeqNum(t)
unaccAck := uint32(lastAcceptable.Add(seqNumOffset))
return testbench.TCP{AckNum: testbench.Uint32(unaccAck), Flags: testbench.Uint8(header.TCPFlagAck)}
}
diff --git a/test/packetimpact/tests/tcp_cork_mss_test.go b/test/packetimpact/tests/tcp_cork_mss_test.go
index fb8f48629..8feea4a82 100644
--- a/test/packetimpact/tests/tcp_cork_mss_test.go
+++ b/test/packetimpact/tests/tcp_cork_mss_test.go
@@ -32,53 +32,53 @@ func init() {
func TestTCPCorkMSS(t *testing.T) {
dut := testbench.NewDUT(t)
defer dut.TearDown()
- listenFD, remotePort := dut.CreateListener(unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
- defer dut.Close(listenFD)
+ listenFD, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
+ defer dut.Close(t, listenFD)
conn := testbench.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
- defer conn.Close()
+ defer conn.Close(t)
const mss = uint32(header.TCPDefaultMSS)
options := make([]byte, header.TCPOptionMSSLength)
header.EncodeMSSOption(mss, options)
- conn.ConnectWithOptions(options)
+ conn.ConnectWithOptions(t, options)
- acceptFD, _ := dut.Accept(listenFD)
- defer dut.Close(acceptFD)
+ acceptFD, _ := dut.Accept(t, listenFD)
+ defer dut.Close(t, acceptFD)
- dut.SetSockOptInt(acceptFD, unix.IPPROTO_TCP, unix.TCP_CORK, 1)
+ dut.SetSockOptInt(t, acceptFD, unix.IPPROTO_TCP, unix.TCP_CORK, 1)
// Let the dut application send 2 small segments to be held up and coalesced
// until the application sends a larger segment to fill up to > MSS.
sampleData := []byte("Sample Data")
- dut.Send(acceptFD, sampleData, 0)
- dut.Send(acceptFD, sampleData, 0)
+ dut.Send(t, acceptFD, sampleData, 0)
+ dut.Send(t, acceptFD, sampleData, 0)
expectedData := sampleData
expectedData = append(expectedData, sampleData...)
largeData := make([]byte, mss+1)
expectedData = append(expectedData, largeData...)
- dut.Send(acceptFD, largeData, 0)
+ dut.Send(t, acceptFD, largeData, 0)
// Expect the segments to be coalesced and sent and capped to MSS.
expectedPayload := testbench.Payload{Bytes: expectedData[:mss]}
- if _, err := conn.ExpectData(&testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}, &expectedPayload, time.Second); err != nil {
+ if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}, &expectedPayload, time.Second); err != nil {
t.Fatalf("expected payload was not received: %s", err)
}
- conn.Send(testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)})
+ conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)})
// Expect the coalesced segment to be split and transmitted.
expectedPayload = testbench.Payload{Bytes: expectedData[mss:]}
- if _, err := conn.ExpectData(&testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck | header.TCPFlagPsh)}, &expectedPayload, time.Second); err != nil {
+ if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck | header.TCPFlagPsh)}, &expectedPayload, time.Second); err != nil {
t.Fatalf("expected payload was not received: %s", err)
}
// Check for segments to *not* be held up because of TCP_CORK when
// the current send window is less than MSS.
- conn.Send(testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck), WindowSize: testbench.Uint16(uint16(2 * len(sampleData)))})
- dut.Send(acceptFD, sampleData, 0)
- dut.Send(acceptFD, sampleData, 0)
+ conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck), WindowSize: testbench.Uint16(uint16(2 * len(sampleData)))})
+ dut.Send(t, acceptFD, sampleData, 0)
+ dut.Send(t, acceptFD, sampleData, 0)
expectedPayload = testbench.Payload{Bytes: append(sampleData, sampleData...)}
- if _, err := conn.ExpectData(&testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck | header.TCPFlagPsh)}, &expectedPayload, time.Second); err != nil {
+ if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck | header.TCPFlagPsh)}, &expectedPayload, time.Second); err != nil {
t.Fatalf("expected payload was not received: %s", err)
}
- conn.Send(testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)})
+ conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)})
}
diff --git a/test/packetimpact/tests/tcp_handshake_window_size_test.go b/test/packetimpact/tests/tcp_handshake_window_size_test.go
index 652b530d0..22937d92f 100644
--- a/test/packetimpact/tests/tcp_handshake_window_size_test.go
+++ b/test/packetimpact/tests/tcp_handshake_window_size_test.go
@@ -33,14 +33,14 @@ func init() {
func TestTCPHandshakeWindowSize(t *testing.T) {
dut := testbench.NewDUT(t)
defer dut.TearDown()
- listenFD, remotePort := dut.CreateListener(unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
- defer dut.Close(listenFD)
+ listenFD, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
+ defer dut.Close(t, listenFD)
conn := testbench.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
- defer conn.Close()
+ defer conn.Close(t)
// Start handshake with zero window size.
- conn.Send(testbench.TCP{Flags: testbench.Uint8(header.TCPFlagSyn), WindowSize: testbench.Uint16(uint16(0))})
- if _, err := conn.ExpectData(&testbench.TCP{Flags: testbench.Uint8(header.TCPFlagSyn | header.TCPFlagAck)}, nil, time.Second); err != nil {
+ conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagSyn), WindowSize: testbench.Uint16(uint16(0))})
+ if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.Uint8(header.TCPFlagSyn | header.TCPFlagAck)}, nil, time.Second); err != nil {
t.Fatalf("expected SYN-ACK: %s", err)
}
// Update the advertised window size to a non-zero value with the ACK that
@@ -48,10 +48,10 @@ func TestTCPHandshakeWindowSize(t *testing.T) {
//
// Set the window size with MSB set and expect the dut to treat it as
// an unsigned value.
- conn.Send(testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck), WindowSize: testbench.Uint16(uint16(1 << 15))})
+ conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck), WindowSize: testbench.Uint16(uint16(1 << 15))})
- acceptFd, _ := dut.Accept(listenFD)
- defer dut.Close(acceptFd)
+ acceptFd, _ := dut.Accept(t, listenFD)
+ defer dut.Close(t, acceptFd)
sampleData := []byte("Sample Data")
samplePayload := &testbench.Payload{Bytes: sampleData}
@@ -59,8 +59,8 @@ func TestTCPHandshakeWindowSize(t *testing.T) {
// Since we advertised a zero window followed by a non-zero window,
// expect the dut to honor the recently advertised non-zero window
// and actually send out the data instead of probing for zero window.
- dut.Send(acceptFd, sampleData, 0)
- if _, err := conn.ExpectNextData(&testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck | header.TCPFlagPsh)}, samplePayload, time.Second); err != nil {
+ dut.Send(t, acceptFd, sampleData, 0)
+ if _, err := conn.ExpectNextData(t, &testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck | header.TCPFlagPsh)}, samplePayload, time.Second); err != nil {
t.Fatalf("expected payload was not received: %s", err)
}
}
diff --git a/test/packetimpact/tests/tcp_network_unreachable_test.go b/test/packetimpact/tests/tcp_network_unreachable_test.go
index 868a08da8..900352fa1 100644
--- a/test/packetimpact/tests/tcp_network_unreachable_test.go
+++ b/test/packetimpact/tests/tcp_network_unreachable_test.go
@@ -38,29 +38,29 @@ func TestTCPSynSentUnreachable(t *testing.T) {
// Create the DUT and connection.
dut := testbench.NewDUT(t)
defer dut.TearDown()
- clientFD, clientPort := dut.CreateBoundSocket(unix.SOCK_STREAM|unix.SOCK_NONBLOCK, unix.IPPROTO_TCP, net.ParseIP(testbench.RemoteIPv4))
+ clientFD, clientPort := dut.CreateBoundSocket(t, unix.SOCK_STREAM|unix.SOCK_NONBLOCK, unix.IPPROTO_TCP, net.ParseIP(testbench.RemoteIPv4))
port := uint16(9001)
conn := testbench.NewTCPIPv4(t, testbench.TCP{SrcPort: &port, DstPort: &clientPort}, testbench.TCP{SrcPort: &clientPort, DstPort: &port})
- defer conn.Close()
+ defer conn.Close(t)
// Bring the DUT to SYN-SENT state with a non-blocking connect.
ctx, cancel := context.WithTimeout(context.Background(), testbench.RPCTimeout)
defer cancel()
sa := unix.SockaddrInet4{Port: int(port)}
copy(sa.Addr[:], net.IP(net.ParseIP(testbench.LocalIPv4)).To4())
- if _, err := dut.ConnectWithErrno(ctx, clientFD, &sa); err != syscall.Errno(unix.EINPROGRESS) {
+ if _, err := dut.ConnectWithErrno(ctx, t, clientFD, &sa); err != syscall.Errno(unix.EINPROGRESS) {
t.Errorf("expected connect to fail with EINPROGRESS, but got %v", err)
}
// Get the SYN.
- tcpLayers, err := conn.ExpectData(&testbench.TCP{Flags: testbench.Uint8(header.TCPFlagSyn)}, nil, time.Second)
+ tcpLayers, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.Uint8(header.TCPFlagSyn)}, nil, time.Second)
if err != nil {
t.Fatalf("expected SYN: %s", err)
}
// Send a host unreachable message.
rawConn := (*testbench.Connection)(&conn)
- layers := rawConn.CreateFrame(nil)
+ layers := rawConn.CreateFrame(t, nil)
layers = layers[:len(layers)-1]
const ipLayer = 1
const tcpLayer = ipLayer + 1
@@ -74,9 +74,9 @@ func TestTCPSynSentUnreachable(t *testing.T) {
}
var icmpv4 testbench.ICMPv4 = testbench.ICMPv4{Type: testbench.ICMPv4Type(header.ICMPv4DstUnreachable), Code: testbench.Uint8(header.ICMPv4HostUnreachable)}
layers = append(layers, &icmpv4, ip, tcp)
- rawConn.SendFrameStateless(layers)
+ rawConn.SendFrameStateless(t, layers)
- if _, err = dut.ConnectWithErrno(ctx, clientFD, &sa); err != syscall.Errno(unix.EHOSTUNREACH) {
+ if _, err = dut.ConnectWithErrno(ctx, t, clientFD, &sa); err != syscall.Errno(unix.EHOSTUNREACH) {
t.Errorf("expected connect to fail with EHOSTUNREACH, but got %v", err)
}
}
@@ -88,9 +88,9 @@ func TestTCPSynSentUnreachable6(t *testing.T) {
// Create the DUT and connection.
dut := testbench.NewDUT(t)
defer dut.TearDown()
- clientFD, clientPort := dut.CreateBoundSocket(unix.SOCK_STREAM|unix.SOCK_NONBLOCK, unix.IPPROTO_TCP, net.ParseIP(testbench.RemoteIPv6))
+ clientFD, clientPort := dut.CreateBoundSocket(t, unix.SOCK_STREAM|unix.SOCK_NONBLOCK, unix.IPPROTO_TCP, net.ParseIP(testbench.RemoteIPv6))
conn := testbench.NewTCPIPv6(t, testbench.TCP{DstPort: &clientPort}, testbench.TCP{SrcPort: &clientPort})
- defer conn.Close()
+ defer conn.Close(t)
// Bring the DUT to SYN-SENT state with a non-blocking connect.
ctx, cancel := context.WithTimeout(context.Background(), testbench.RPCTimeout)
@@ -100,19 +100,19 @@ func TestTCPSynSentUnreachable6(t *testing.T) {
ZoneId: uint32(testbench.RemoteInterfaceID),
}
copy(sa.Addr[:], net.IP(net.ParseIP(testbench.LocalIPv6)).To16())
- if _, err := dut.ConnectWithErrno(ctx, clientFD, &sa); err != syscall.Errno(unix.EINPROGRESS) {
+ if _, err := dut.ConnectWithErrno(ctx, t, clientFD, &sa); err != syscall.Errno(unix.EINPROGRESS) {
t.Errorf("expected connect to fail with EINPROGRESS, but got %v", err)
}
// Get the SYN.
- tcpLayers, err := conn.ExpectData(&testbench.TCP{Flags: testbench.Uint8(header.TCPFlagSyn)}, nil, time.Second)
+ tcpLayers, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.Uint8(header.TCPFlagSyn)}, nil, time.Second)
if err != nil {
t.Fatalf("expected SYN: %s", err)
}
// Send a host unreachable message.
rawConn := (*testbench.Connection)(&conn)
- layers := rawConn.CreateFrame(nil)
+ layers := rawConn.CreateFrame(t, nil)
layers = layers[:len(layers)-1]
const ipLayer = 1
const tcpLayer = ipLayer + 1
@@ -131,9 +131,9 @@ func TestTCPSynSentUnreachable6(t *testing.T) {
Payload: []byte{0, 0, 0, 0},
}
layers = append(layers, &icmpv6, ip, tcp)
- rawConn.SendFrameStateless(layers)
+ rawConn.SendFrameStateless(t, layers)
- if _, err = dut.ConnectWithErrno(ctx, clientFD, &sa); err != syscall.Errno(unix.ENETUNREACH) {
+ if _, err = dut.ConnectWithErrno(ctx, t, clientFD, &sa); err != syscall.Errno(unix.ENETUNREACH) {
t.Errorf("expected connect to fail with ENETUNREACH, but got %v", err)
}
}
diff --git a/test/packetimpact/tests/tcp_noaccept_close_rst_test.go b/test/packetimpact/tests/tcp_noaccept_close_rst_test.go
index b9b3e91d3..82b7a85ff 100644
--- a/test/packetimpact/tests/tcp_noaccept_close_rst_test.go
+++ b/test/packetimpact/tests/tcp_noaccept_close_rst_test.go
@@ -31,12 +31,12 @@ func init() {
func TestTcpNoAcceptCloseReset(t *testing.T) {
dut := testbench.NewDUT(t)
defer dut.TearDown()
- listenFd, remotePort := dut.CreateListener(unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
+ listenFd, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
conn := testbench.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
- conn.Connect()
- defer conn.Close()
- dut.Close(listenFd)
- if _, err := conn.Expect(testbench.TCP{Flags: testbench.Uint8(header.TCPFlagRst | header.TCPFlagAck)}, 1*time.Second); err != nil {
+ conn.Connect(t)
+ defer conn.Close(t)
+ dut.Close(t, listenFd)
+ if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagRst | header.TCPFlagAck)}, 1*time.Second); err != nil {
t.Fatalf("expected a RST-ACK packet but got none: %s", err)
}
}
diff --git a/test/packetimpact/tests/tcp_outside_the_window_test.go b/test/packetimpact/tests/tcp_outside_the_window_test.go
index ad8c74234..08f759f7c 100644
--- a/test/packetimpact/tests/tcp_outside_the_window_test.go
+++ b/test/packetimpact/tests/tcp_outside_the_window_test.go
@@ -63,25 +63,25 @@ func TestTCPOutsideTheWindow(t *testing.T) {
t.Run(fmt.Sprintf("%s%d", tt.description, tt.seqNumOffset), func(t *testing.T) {
dut := testbench.NewDUT(t)
defer dut.TearDown()
- listenFD, remotePort := dut.CreateListener(unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
- defer dut.Close(listenFD)
+ listenFD, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
+ defer dut.Close(t, listenFD)
conn := testbench.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
- defer conn.Close()
- conn.Connect()
- acceptFD, _ := dut.Accept(listenFD)
- defer dut.Close(acceptFD)
+ defer conn.Close(t)
+ conn.Connect(t)
+ acceptFD, _ := dut.Accept(t, listenFD)
+ defer dut.Close(t, acceptFD)
- windowSize := seqnum.Size(*conn.SynAck().WindowSize) + tt.seqNumOffset
- conn.Drain()
+ windowSize := seqnum.Size(*conn.SynAck(t).WindowSize) + tt.seqNumOffset
+ conn.Drain(t)
// Ignore whatever incrementing that this out-of-order packet might cause
// to the AckNum.
- localSeqNum := testbench.Uint32(uint32(*conn.LocalSeqNum()))
- conn.Send(testbench.TCP{
+ localSeqNum := testbench.Uint32(uint32(*conn.LocalSeqNum(t)))
+ conn.Send(t, testbench.TCP{
Flags: testbench.Uint8(tt.tcpFlags),
- SeqNum: testbench.Uint32(uint32(conn.LocalSeqNum().Add(windowSize))),
+ SeqNum: testbench.Uint32(uint32(conn.LocalSeqNum(t).Add(windowSize))),
}, tt.payload...)
timeout := 3 * time.Second
- gotACK, err := conn.Expect(testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck), AckNum: localSeqNum}, timeout)
+ gotACK, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck), AckNum: localSeqNum}, timeout)
if tt.expectACK && err != nil {
t.Fatalf("expected an ACK packet within %s but got none: %s", timeout, err)
}
diff --git a/test/packetimpact/tests/tcp_paws_mechanism_test.go b/test/packetimpact/tests/tcp_paws_mechanism_test.go
index 55db4ece6..37f3b56dd 100644
--- a/test/packetimpact/tests/tcp_paws_mechanism_test.go
+++ b/test/packetimpact/tests/tcp_paws_mechanism_test.go
@@ -32,15 +32,15 @@ func init() {
func TestPAWSMechanism(t *testing.T) {
dut := testbench.NewDUT(t)
defer dut.TearDown()
- listenFD, remotePort := dut.CreateListener(unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
- defer dut.Close(listenFD)
+ listenFD, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
+ defer dut.Close(t, listenFD)
conn := testbench.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
- defer conn.Close()
+ defer conn.Close(t)
options := make([]byte, header.TCPOptionTSLength)
header.EncodeTSOption(currentTS(), 0, options)
- conn.Send(testbench.TCP{Flags: testbench.Uint8(header.TCPFlagSyn), Options: options})
- synAck, err := conn.Expect(testbench.TCP{Flags: testbench.Uint8(header.TCPFlagSyn | header.TCPFlagAck)}, time.Second)
+ conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagSyn), Options: options})
+ synAck, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagSyn | header.TCPFlagAck)}, time.Second)
if err != nil {
t.Fatalf("didn't get synack during handshake: %s", err)
}
@@ -50,9 +50,9 @@ func TestPAWSMechanism(t *testing.T) {
}
tsecr := parsedSynOpts.TSVal
header.EncodeTSOption(currentTS(), tsecr, options)
- conn.Send(testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck), Options: options})
- acceptFD, _ := dut.Accept(listenFD)
- defer dut.Close(acceptFD)
+ conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck), Options: options})
+ acceptFD, _ := dut.Accept(t, listenFD)
+ defer dut.Close(t, acceptFD)
sampleData := []byte("Sample Data")
sentTSVal := currentTS()
@@ -61,9 +61,9 @@ func TestPAWSMechanism(t *testing.T) {
// every time we send one, it should not cause any flakiness because timestamps
// only need to be non-decreasing.
time.Sleep(3 * time.Millisecond)
- conn.Send(testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck), Options: options}, &testbench.Payload{Bytes: sampleData})
+ conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck), Options: options}, &testbench.Payload{Bytes: sampleData})
- gotTCP, err := conn.Expect(testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}, time.Second)
+ gotTCP, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}, time.Second)
if err != nil {
t.Fatalf("expected an ACK but got none: %s", err)
}
@@ -86,9 +86,9 @@ func TestPAWSMechanism(t *testing.T) {
// 3ms here is chosen arbitrarily and this time.Sleep() should not cause flakiness
// due to the exact same reasoning discussed above.
time.Sleep(3 * time.Millisecond)
- conn.Send(testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck), Options: options}, &testbench.Payload{Bytes: sampleData})
+ conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck), Options: options}, &testbench.Payload{Bytes: sampleData})
- gotTCP, err = conn.Expect(testbench.TCP{AckNum: lastAckNum, Flags: testbench.Uint8(header.TCPFlagAck)}, time.Second)
+ gotTCP, err = conn.Expect(t, testbench.TCP{AckNum: lastAckNum, Flags: testbench.Uint8(header.TCPFlagAck)}, time.Second)
if err != nil {
t.Fatalf("expected segment with AckNum %d but got none: %s", lastAckNum, err)
}
diff --git a/test/packetimpact/tests/tcp_queue_receive_in_syn_sent_test.go b/test/packetimpact/tests/tcp_queue_receive_in_syn_sent_test.go
index 8fbec893b..d9f3ea0f2 100644
--- a/test/packetimpact/tests/tcp_queue_receive_in_syn_sent_test.go
+++ b/test/packetimpact/tests/tcp_queue_receive_in_syn_sent_test.go
@@ -52,26 +52,26 @@ func TestQueueReceiveInSynSent(t *testing.T) {
dut := testbench.NewDUT(t)
defer dut.TearDown()
- socket, remotePort := dut.CreateBoundSocket(unix.SOCK_STREAM, unix.IPPROTO_TCP, net.ParseIP(testbench.RemoteIPv4))
+ socket, remotePort := dut.CreateBoundSocket(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, net.ParseIP(testbench.RemoteIPv4))
conn := testbench.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
- defer conn.Close()
+ defer conn.Close(t)
sampleData := []byte("Sample Data")
- dut.SetNonBlocking(socket, true)
- if _, err := dut.ConnectWithErrno(context.Background(), socket, conn.LocalAddr()); !errors.Is(err, syscall.EINPROGRESS) {
+ dut.SetNonBlocking(t, socket, true)
+ if _, err := dut.ConnectWithErrno(context.Background(), t, socket, conn.LocalAddr(t)); !errors.Is(err, syscall.EINPROGRESS) {
t.Fatalf("failed to bring DUT to SYN-SENT, got: %s, want EINPROGRESS", err)
}
- if _, err := conn.Expect(testbench.TCP{Flags: testbench.Uint8(header.TCPFlagSyn)}, time.Second); err != nil {
+ if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagSyn)}, time.Second); err != nil {
t.Fatalf("expected a SYN from DUT, but got none: %s", err)
}
- if _, _, err := dut.RecvWithErrno(context.Background(), socket, int32(len(sampleData)), 0); err != syscall.Errno(unix.EWOULDBLOCK) {
+ if _, _, err := dut.RecvWithErrno(context.Background(), t, socket, int32(len(sampleData)), 0); err != syscall.Errno(unix.EWOULDBLOCK) {
t.Fatalf("expected error %s, got %s", syscall.Errno(unix.EWOULDBLOCK), err)
}
// Test blocking read.
- dut.SetNonBlocking(socket, false)
+ dut.SetNonBlocking(t, socket, false)
var wg sync.WaitGroup
defer wg.Wait()
@@ -86,7 +86,7 @@ func TestQueueReceiveInSynSent(t *testing.T) {
block.Done()
// Issue RECEIVE call in SYN-SENT, this should be queued for
// process until the connection is established.
- n, buff, err := dut.RecvWithErrno(ctx, socket, int32(len(sampleData)), 0)
+ n, buff, err := dut.RecvWithErrno(ctx, t, socket, int32(len(sampleData)), 0)
if tt.reset {
if err != syscall.Errno(unix.ECONNREFUSED) {
t.Errorf("expected error %s, got %s", syscall.Errno(unix.ECONNREFUSED), err)
@@ -112,19 +112,19 @@ func TestQueueReceiveInSynSent(t *testing.T) {
time.Sleep(100 * time.Millisecond)
if tt.reset {
- conn.Send(testbench.TCP{Flags: testbench.Uint8(header.TCPFlagRst | header.TCPFlagAck)})
+ conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagRst | header.TCPFlagAck)})
return
}
// Bring the connection to Established.
- conn.Send(testbench.TCP{Flags: testbench.Uint8(header.TCPFlagSyn | header.TCPFlagAck)})
- if _, err := conn.Expect(testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}, time.Second); err != nil {
+ conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagSyn | header.TCPFlagAck)})
+ if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}, time.Second); err != nil {
t.Fatalf("expected an ACK from DUT, but got none: %s", err)
}
// Send sample payload and expect an ACK.
- conn.Send(testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}, &testbench.Payload{Bytes: sampleData})
- if _, err := conn.Expect(testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}, time.Second); err != nil {
+ conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}, &testbench.Payload{Bytes: sampleData})
+ if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}, time.Second); err != nil {
t.Fatalf("expected an ACK from DUT, but got none: %s", err)
}
})
diff --git a/test/packetimpact/tests/tcp_reordering_test.go b/test/packetimpact/tests/tcp_reordering_test.go
index a5378a9dd..8742819ca 100644
--- a/test/packetimpact/tests/tcp_reordering_test.go
+++ b/test/packetimpact/tests/tcp_reordering_test.go
@@ -32,10 +32,10 @@ func init() {
func TestReorderingWindow(t *testing.T) {
dut := tb.NewDUT(t)
defer dut.TearDown()
- listenFd, remotePort := dut.CreateListener(unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
- defer dut.Close(listenFd)
+ listenFd, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
+ defer dut.Close(t, listenFd)
conn := tb.NewTCPIPv4(t, tb.TCP{DstPort: &remotePort}, tb.TCP{SrcPort: &remotePort})
- defer conn.Close()
+ defer conn.Close(t)
// Enable SACK.
opts := make([]byte, 40)
@@ -49,17 +49,17 @@ func TestReorderingWindow(t *testing.T) {
const mss = minMTU - header.IPv4MinimumSize - header.TCPMinimumSize
optsOff += header.EncodeMSSOption(mss, opts[optsOff:])
- conn.ConnectWithOptions(opts[:optsOff])
+ conn.ConnectWithOptions(t, opts[:optsOff])
- acceptFd, _ := dut.Accept(listenFd)
- defer dut.Close(acceptFd)
+ acceptFd, _ := dut.Accept(t, listenFd)
+ defer dut.Close(t, acceptFd)
if tb.DUTType == "linux" {
// Linux has changed its handling of reordering, force the old behavior.
- dut.SetSockOpt(acceptFd, unix.IPPROTO_TCP, unix.TCP_CONGESTION, []byte("reno"))
+ dut.SetSockOpt(t, acceptFd, unix.IPPROTO_TCP, unix.TCP_CONGESTION, []byte("reno"))
}
- pls := dut.GetSockOptInt(acceptFd, unix.IPPROTO_TCP, unix.TCP_MAXSEG)
+ pls := dut.GetSockOptInt(t, acceptFd, unix.IPPROTO_TCP, unix.TCP_MAXSEG)
if tb.DUTType == "netstack" {
// netstack does not impliment TCP_MAXSEG correctly. Fake it
// here. Netstack uses the max SACK size which is 32. The MSS
@@ -69,13 +69,13 @@ func TestReorderingWindow(t *testing.T) {
payload := make([]byte, pls)
- seqNum1 := *conn.RemoteSeqNum()
+ seqNum1 := *conn.RemoteSeqNum(t)
const numPkts = 10
// Send some packets, checking that we receive each.
for i, sn := 0, seqNum1; i < numPkts; i++ {
- dut.Send(acceptFd, payload, 0)
+ dut.Send(t, acceptFd, payload, 0)
- gotOne, err := conn.Expect(tb.TCP{SeqNum: tb.Uint32(uint32(sn))}, time.Second)
+ gotOne, err := conn.Expect(t, tb.TCP{SeqNum: tb.Uint32(uint32(sn))}, time.Second)
sn.UpdateForward(seqnum.Size(len(payload)))
if err != nil {
t.Errorf("Expect #%d: %s", i+1, err)
@@ -86,7 +86,7 @@ func TestReorderingWindow(t *testing.T) {
}
}
- seqNum2 := *conn.RemoteSeqNum()
+ seqNum2 := *conn.RemoteSeqNum(t)
// SACK packets #2-4.
sackBlock := make([]byte, 40)
@@ -97,13 +97,13 @@ func TestReorderingWindow(t *testing.T) {
seqNum1.Add(seqnum.Size(len(payload))),
seqNum1.Add(seqnum.Size(4 * len(payload))),
}}, sackBlock[sbOff:])
- conn.Send(tb.TCP{Flags: tb.Uint8(header.TCPFlagAck), AckNum: tb.Uint32(uint32(seqNum1)), Options: sackBlock[:sbOff]})
+ conn.Send(t, tb.TCP{Flags: tb.Uint8(header.TCPFlagAck), AckNum: tb.Uint32(uint32(seqNum1)), Options: sackBlock[:sbOff]})
// ACK first packet.
- conn.Send(tb.TCP{Flags: tb.Uint8(header.TCPFlagAck), AckNum: tb.Uint32(uint32(seqNum1) + uint32(len(payload)))})
+ conn.Send(t, tb.TCP{Flags: tb.Uint8(header.TCPFlagAck), AckNum: tb.Uint32(uint32(seqNum1) + uint32(len(payload)))})
// Check for retransmit.
- gotOne, err := conn.Expect(tb.TCP{SeqNum: tb.Uint32(uint32(seqNum1))}, time.Second)
+ gotOne, err := conn.Expect(t, tb.TCP{SeqNum: tb.Uint32(uint32(seqNum1))}, time.Second)
if err != nil {
t.Error("Expect for retransmit:", err)
}
@@ -123,14 +123,14 @@ func TestReorderingWindow(t *testing.T) {
seqNum1.Add(seqnum.Size(4 * len(payload))),
}}, dsackBlock[dsbOff:])
- conn.Send(tb.TCP{Flags: tb.Uint8(header.TCPFlagAck), AckNum: tb.Uint32(uint32(seqNum2)), Options: dsackBlock[:dsbOff]})
+ conn.Send(t, tb.TCP{Flags: tb.Uint8(header.TCPFlagAck), AckNum: tb.Uint32(uint32(seqNum2)), Options: dsackBlock[:dsbOff]})
// Send half of the original window of packets, checking that we
// received each.
for i, sn := 0, seqNum2; i < numPkts/2; i++ {
- dut.Send(acceptFd, payload, 0)
+ dut.Send(t, acceptFd, payload, 0)
- gotOne, err := conn.Expect(tb.TCP{SeqNum: tb.Uint32(uint32(sn))}, time.Second)
+ gotOne, err := conn.Expect(t, tb.TCP{SeqNum: tb.Uint32(uint32(sn))}, time.Second)
sn.UpdateForward(seqnum.Size(len(payload)))
if err != nil {
t.Errorf("Expect #%d: %s", i+1, err)
@@ -144,8 +144,8 @@ func TestReorderingWindow(t *testing.T) {
if tb.DUTType == "netstack" {
// The window should now be halved, so we should receive any
// more, even if we send them.
- dut.Send(acceptFd, payload, 0)
- if got, err := conn.Expect(tb.TCP{}, 100*time.Millisecond); got != nil || err == nil {
+ dut.Send(t, acceptFd, payload, 0)
+ if got, err := conn.Expect(t, tb.TCP{}, 100*time.Millisecond); got != nil || err == nil {
t.Fatalf("expected no packets within 100 millisecond, but got one: %s", got)
}
return
@@ -153,9 +153,9 @@ func TestReorderingWindow(t *testing.T) {
// Linux reduces the window by three. Check that we can receive the rest.
for i, sn := 0, seqNum2.Add(seqnum.Size(numPkts/2*len(payload))); i < 2; i++ {
- dut.Send(acceptFd, payload, 0)
+ dut.Send(t, acceptFd, payload, 0)
- gotOne, err := conn.Expect(tb.TCP{SeqNum: tb.Uint32(uint32(sn))}, time.Second)
+ gotOne, err := conn.Expect(t, tb.TCP{SeqNum: tb.Uint32(uint32(sn))}, time.Second)
sn.UpdateForward(seqnum.Size(len(payload)))
if err != nil {
t.Errorf("Expect #%d: %s", i+1, err)
@@ -167,8 +167,8 @@ func TestReorderingWindow(t *testing.T) {
}
// The window should now be full.
- dut.Send(acceptFd, payload, 0)
- if got, err := conn.Expect(tb.TCP{}, 100*time.Millisecond); got != nil || err == nil {
+ dut.Send(t, acceptFd, payload, 0)
+ if got, err := conn.Expect(t, tb.TCP{}, 100*time.Millisecond); got != nil || err == nil {
t.Fatalf("expected no packets within 100 millisecond, but got one: %s", got)
}
}
diff --git a/test/packetimpact/tests/tcp_retransmits_test.go b/test/packetimpact/tests/tcp_retransmits_test.go
index 6940eb7fb..072014ff8 100644
--- a/test/packetimpact/tests/tcp_retransmits_test.go
+++ b/test/packetimpact/tests/tcp_retransmits_test.go
@@ -33,41 +33,41 @@ func init() {
func TestRetransmits(t *testing.T) {
dut := testbench.NewDUT(t)
defer dut.TearDown()
- listenFd, remotePort := dut.CreateListener(unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
- defer dut.Close(listenFd)
+ listenFd, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
+ defer dut.Close(t, listenFd)
conn := testbench.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
- defer conn.Close()
+ defer conn.Close(t)
- conn.Connect()
- acceptFd, _ := dut.Accept(listenFd)
- defer dut.Close(acceptFd)
+ conn.Connect(t)
+ acceptFd, _ := dut.Accept(t, listenFd)
+ defer dut.Close(t, acceptFd)
- dut.SetSockOptInt(acceptFd, unix.IPPROTO_TCP, unix.TCP_NODELAY, 1)
+ dut.SetSockOptInt(t, acceptFd, unix.IPPROTO_TCP, unix.TCP_NODELAY, 1)
sampleData := []byte("Sample Data")
samplePayload := &testbench.Payload{Bytes: sampleData}
- dut.Send(acceptFd, sampleData, 0)
- if _, err := conn.ExpectData(&testbench.TCP{}, samplePayload, time.Second); err != nil {
+ dut.Send(t, acceptFd, sampleData, 0)
+ if _, err := conn.ExpectData(t, &testbench.TCP{}, samplePayload, time.Second); err != nil {
t.Fatalf("expected payload was not received: %s", err)
}
// Give a chance for the dut to estimate RTO with RTT from the DATA-ACK.
// TODO(gvisor.dev/issue/2685) Estimate RTO during handshake, after which
// we can skip sending this ACK.
- conn.Send(testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)})
+ conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)})
startRTO := time.Second
current := startRTO
first := time.Now()
- dut.Send(acceptFd, sampleData, 0)
- seq := testbench.Uint32(uint32(*conn.RemoteSeqNum()))
- if _, err := conn.ExpectData(&testbench.TCP{SeqNum: seq}, samplePayload, startRTO); err != nil {
+ dut.Send(t, acceptFd, sampleData, 0)
+ seq := testbench.Uint32(uint32(*conn.RemoteSeqNum(t)))
+ if _, err := conn.ExpectData(t, &testbench.TCP{SeqNum: seq}, samplePayload, startRTO); err != nil {
t.Fatalf("expected payload was not received: %s", err)
}
// Expect retransmits of the same segment.
for i := 0; i < 5; i++ {
start := time.Now()
- if _, err := conn.ExpectData(&testbench.TCP{SeqNum: seq}, samplePayload, 2*current); err != nil {
+ if _, err := conn.ExpectData(t, &testbench.TCP{SeqNum: seq}, samplePayload, 2*current); err != nil {
t.Fatalf("expected payload was not received: %s loop %d", err, i)
}
if i == 0 {
diff --git a/test/packetimpact/tests/tcp_send_window_sizes_piggyback_test.go b/test/packetimpact/tests/tcp_send_window_sizes_piggyback_test.go
index 90ab85419..f91b06ba1 100644
--- a/test/packetimpact/tests/tcp_send_window_sizes_piggyback_test.go
+++ b/test/packetimpact/tests/tcp_send_window_sizes_piggyback_test.go
@@ -61,23 +61,23 @@ func TestSendWindowSizesPiggyback(t *testing.T) {
t.Run(fmt.Sprintf("%s%d", tt.description, tt.windowSize), func(t *testing.T) {
dut := testbench.NewDUT(t)
defer dut.TearDown()
- listenFd, remotePort := dut.CreateListener(unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
- defer dut.Close(listenFd)
+ listenFd, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
+ defer dut.Close(t, listenFd)
conn := testbench.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort, WindowSize: testbench.Uint16(tt.windowSize)}, testbench.TCP{SrcPort: &remotePort})
- defer conn.Close()
+ defer conn.Close(t)
- conn.Connect()
- acceptFd, _ := dut.Accept(listenFd)
- defer dut.Close(acceptFd)
+ conn.Connect(t)
+ acceptFd, _ := dut.Accept(t, listenFd)
+ defer dut.Close(t, acceptFd)
- dut.SetSockOptInt(acceptFd, unix.IPPROTO_TCP, unix.TCP_NODELAY, 1)
+ dut.SetSockOptInt(t, acceptFd, unix.IPPROTO_TCP, unix.TCP_NODELAY, 1)
expectedTCP := testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck | header.TCPFlagPsh)}
- dut.Send(acceptFd, sampleData, 0)
+ dut.Send(t, acceptFd, sampleData, 0)
expectedPayload := testbench.Payload{Bytes: tt.expectedPayload1}
- if _, err := conn.ExpectData(&expectedTCP, &expectedPayload, time.Second); err != nil {
+ if _, err := conn.ExpectData(t, &expectedTCP, &expectedPayload, time.Second); err != nil {
t.Fatalf("expected payload was not received: %s", err)
}
@@ -86,18 +86,18 @@ func TestSendWindowSizesPiggyback(t *testing.T) {
if tt.enqueue {
// Enqueue a segment for the dut to transmit.
- dut.Send(acceptFd, sampleData, 0)
+ dut.Send(t, acceptFd, sampleData, 0)
}
// Send ACK for the previous segment along with data for the dut to
// receive and ACK back. Sending this ACK would make room for the dut
// to transmit any enqueued segment.
- conn.Send(testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck | header.TCPFlagPsh), WindowSize: testbench.Uint16(tt.windowSize)}, &testbench.Payload{Bytes: sampleData})
+ conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck | header.TCPFlagPsh), WindowSize: testbench.Uint16(tt.windowSize)}, &testbench.Payload{Bytes: sampleData})
// Expect the dut to piggyback the ACK for received data along with
// the segment enqueued for transmit.
expectedPayload = testbench.Payload{Bytes: tt.expectedPayload2}
- if _, err := conn.ExpectData(&expectedTCP, &expectedPayload, time.Second); err != nil {
+ if _, err := conn.ExpectData(t, &expectedTCP, &expectedPayload, time.Second); err != nil {
t.Fatalf("expected payload was not received: %s", err)
}
})
diff --git a/test/packetimpact/tests/tcp_synrcvd_reset_test.go b/test/packetimpact/tests/tcp_synrcvd_reset_test.go
index 7d5deab01..57d034dd1 100644
--- a/test/packetimpact/tests/tcp_synrcvd_reset_test.go
+++ b/test/packetimpact/tests/tcp_synrcvd_reset_test.go
@@ -32,21 +32,21 @@ func init() {
func TestTCPSynRcvdReset(t *testing.T) {
dut := testbench.NewDUT(t)
defer dut.TearDown()
- listenFD, remotePort := dut.CreateListener(unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
- defer dut.Close(listenFD)
+ listenFD, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
+ defer dut.Close(t, listenFD)
conn := testbench.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
- defer conn.Close()
+ defer conn.Close(t)
// Expect dut connection to have transitioned to SYN-RCVD state.
- conn.Send(testbench.TCP{Flags: testbench.Uint8(header.TCPFlagSyn)})
- if _, err := conn.ExpectData(&testbench.TCP{Flags: testbench.Uint8(header.TCPFlagSyn | header.TCPFlagAck)}, nil, time.Second); err != nil {
+ conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagSyn)})
+ if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.Uint8(header.TCPFlagSyn | header.TCPFlagAck)}, nil, time.Second); err != nil {
t.Fatalf("expected SYN-ACK %s", err)
}
- conn.Send(testbench.TCP{Flags: testbench.Uint8(header.TCPFlagRst)})
+ conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagRst)})
// Expect the connection to have transitioned SYN-RCVD to CLOSED.
// TODO(gvisor.dev/issue/478): Check for TCP_INFO on the dut side.
- conn.Send(testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)})
- if _, err := conn.ExpectData(&testbench.TCP{Flags: testbench.Uint8(header.TCPFlagRst)}, nil, time.Second); err != nil {
+ conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)})
+ if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.Uint8(header.TCPFlagRst)}, nil, time.Second); err != nil {
t.Fatalf("expected a TCP RST %s", err)
}
}
diff --git a/test/packetimpact/tests/tcp_synsent_reset_test.go b/test/packetimpact/tests/tcp_synsent_reset_test.go
index 6898a2239..eac8eb19d 100644
--- a/test/packetimpact/tests/tcp_synsent_reset_test.go
+++ b/test/packetimpact/tests/tcp_synsent_reset_test.go
@@ -31,17 +31,19 @@ func init() {
// dutSynSentState sets up the dut connection in SYN-SENT state.
func dutSynSentState(t *testing.T) (*tb.DUT, *tb.TCPIPv4, uint16, uint16) {
+ t.Helper()
+
dut := tb.NewDUT(t)
- clientFD, clientPort := dut.CreateBoundSocket(unix.SOCK_STREAM|unix.SOCK_NONBLOCK, unix.IPPROTO_TCP, net.ParseIP(tb.RemoteIPv4))
+ clientFD, clientPort := dut.CreateBoundSocket(t, unix.SOCK_STREAM|unix.SOCK_NONBLOCK, unix.IPPROTO_TCP, net.ParseIP(tb.RemoteIPv4))
port := uint16(9001)
conn := tb.NewTCPIPv4(t, tb.TCP{SrcPort: &port, DstPort: &clientPort}, tb.TCP{SrcPort: &clientPort, DstPort: &port})
sa := unix.SockaddrInet4{Port: int(port)}
copy(sa.Addr[:], net.IP(net.ParseIP(tb.LocalIPv4)).To4())
// Bring the dut to SYN-SENT state with a non-blocking connect.
- dut.Connect(clientFD, &sa)
- if _, err := conn.ExpectData(&tb.TCP{Flags: tb.Uint8(header.TCPFlagSyn)}, nil, time.Second); err != nil {
+ dut.Connect(t, clientFD, &sa)
+ if _, err := conn.ExpectData(t, &tb.TCP{Flags: tb.Uint8(header.TCPFlagSyn)}, nil, time.Second); err != nil {
t.Fatalf("expected SYN\n")
}
@@ -51,13 +53,13 @@ func dutSynSentState(t *testing.T) (*tb.DUT, *tb.TCPIPv4, uint16, uint16) {
// TestTCPSynSentReset tests RFC793, p67: SYN-SENT to CLOSED transition.
func TestTCPSynSentReset(t *testing.T) {
dut, conn, _, _ := dutSynSentState(t)
- defer conn.Close()
+ defer conn.Close(t)
defer dut.TearDown()
- conn.Send(tb.TCP{Flags: tb.Uint8(header.TCPFlagRst | header.TCPFlagAck)})
+ conn.Send(t, tb.TCP{Flags: tb.Uint8(header.TCPFlagRst | header.TCPFlagAck)})
// Expect the connection to have closed.
// TODO(gvisor.dev/issue/478): Check for TCP_INFO on the dut side.
- conn.Send(tb.TCP{Flags: tb.Uint8(header.TCPFlagAck)})
- if _, err := conn.ExpectData(&tb.TCP{Flags: tb.Uint8(header.TCPFlagRst)}, nil, time.Second); err != nil {
+ conn.Send(t, tb.TCP{Flags: tb.Uint8(header.TCPFlagAck)})
+ if _, err := conn.ExpectData(t, &tb.TCP{Flags: tb.Uint8(header.TCPFlagRst)}, nil, time.Second); err != nil {
t.Fatalf("expected a TCP RST")
}
}
@@ -67,22 +69,22 @@ func TestTCPSynSentReset(t *testing.T) {
func TestTCPSynSentRcvdReset(t *testing.T) {
dut, c, remotePort, clientPort := dutSynSentState(t)
defer dut.TearDown()
- defer c.Close()
+ defer c.Close(t)
conn := tb.NewTCPIPv4(t, tb.TCP{SrcPort: &remotePort, DstPort: &clientPort}, tb.TCP{SrcPort: &clientPort, DstPort: &remotePort})
- defer conn.Close()
+ defer conn.Close(t)
// Initiate new SYN connection with the same port pair
// (simultaneous open case), expect the dut connection to move to
// SYN-RCVD state
- conn.Send(tb.TCP{Flags: tb.Uint8(header.TCPFlagSyn)})
- if _, err := conn.ExpectData(&tb.TCP{Flags: tb.Uint8(header.TCPFlagSyn | header.TCPFlagAck)}, nil, time.Second); err != nil {
+ conn.Send(t, tb.TCP{Flags: tb.Uint8(header.TCPFlagSyn)})
+ if _, err := conn.ExpectData(t, &tb.TCP{Flags: tb.Uint8(header.TCPFlagSyn | header.TCPFlagAck)}, nil, time.Second); err != nil {
t.Fatalf("expected SYN-ACK %s\n", err)
}
- conn.Send(tb.TCP{Flags: tb.Uint8(header.TCPFlagRst)})
+ conn.Send(t, tb.TCP{Flags: tb.Uint8(header.TCPFlagRst)})
// Expect the connection to have transitioned SYN-RCVD to CLOSED.
// TODO(gvisor.dev/issue/478): Check for TCP_INFO on the dut side.
- conn.Send(tb.TCP{Flags: tb.Uint8(header.TCPFlagAck)})
- if _, err := conn.ExpectData(&tb.TCP{Flags: tb.Uint8(header.TCPFlagRst)}, nil, time.Second); err != nil {
+ conn.Send(t, tb.TCP{Flags: tb.Uint8(header.TCPFlagAck)})
+ if _, err := conn.ExpectData(t, &tb.TCP{Flags: tb.Uint8(header.TCPFlagRst)}, nil, time.Second); err != nil {
t.Fatalf("expected a TCP RST")
}
}
diff --git a/test/packetimpact/tests/tcp_user_timeout_test.go b/test/packetimpact/tests/tcp_user_timeout_test.go
index 87e45d765..551dc78e7 100644
--- a/test/packetimpact/tests/tcp_user_timeout_test.go
+++ b/test/packetimpact/tests/tcp_user_timeout_test.go
@@ -16,7 +16,6 @@ package tcp_user_timeout_test
import (
"flag"
- "fmt"
"testing"
"time"
@@ -29,22 +28,20 @@ func init() {
testbench.RegisterFlags(flag.CommandLine)
}
-func sendPayload(conn *testbench.TCPIPv4, dut *testbench.DUT, fd int32) error {
+func sendPayload(t *testing.T, conn *testbench.TCPIPv4, dut *testbench.DUT, fd int32) {
sampleData := make([]byte, 100)
for i := range sampleData {
sampleData[i] = uint8(i)
}
- conn.Drain()
- dut.Send(fd, sampleData, 0)
- if _, err := conn.ExpectData(&testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck | header.TCPFlagPsh)}, &testbench.Payload{Bytes: sampleData}, time.Second); err != nil {
- return fmt.Errorf("expected data but got none: %w", err)
+ conn.Drain(t)
+ dut.Send(t, fd, sampleData, 0)
+ if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck | header.TCPFlagPsh)}, &testbench.Payload{Bytes: sampleData}, time.Second); err != nil {
+ t.Fatalf("expected data but got none: %w", err)
}
- return nil
}
-func sendFIN(conn *testbench.TCPIPv4, dut *testbench.DUT, fd int32) error {
- dut.Close(fd)
- return nil
+func sendFIN(t *testing.T, conn *testbench.TCPIPv4, dut *testbench.DUT, fd int32) {
+ dut.Close(t, fd)
}
func TestTCPUserTimeout(t *testing.T) {
@@ -59,7 +56,7 @@ func TestTCPUserTimeout(t *testing.T) {
} {
for _, ttf := range []struct {
description string
- f func(conn *testbench.TCPIPv4, dut *testbench.DUT, fd int32) error
+ f func(_ *testing.T, _ *testbench.TCPIPv4, _ *testbench.DUT, fd int32)
}{
{"AfterPayload", sendPayload},
{"AfterFIN", sendFIN},
@@ -68,31 +65,29 @@ func TestTCPUserTimeout(t *testing.T) {
// Create a socket, listen, TCP handshake, and accept.
dut := testbench.NewDUT(t)
defer dut.TearDown()
- listenFD, remotePort := dut.CreateListener(unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
- defer dut.Close(listenFD)
+ listenFD, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
+ defer dut.Close(t, listenFD)
conn := testbench.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
- defer conn.Close()
- conn.Connect()
- acceptFD, _ := dut.Accept(listenFD)
+ defer conn.Close(t)
+ conn.Connect(t)
+ acceptFD, _ := dut.Accept(t, listenFD)
if tt.userTimeout != 0 {
- dut.SetSockOptInt(acceptFD, unix.SOL_TCP, unix.TCP_USER_TIMEOUT, int32(tt.userTimeout.Milliseconds()))
+ dut.SetSockOptInt(t, acceptFD, unix.SOL_TCP, unix.TCP_USER_TIMEOUT, int32(tt.userTimeout.Milliseconds()))
}
- if err := ttf.f(&conn, &dut, acceptFD); err != nil {
- t.Fatal(err)
- }
+ ttf.f(t, &conn, &dut, acceptFD)
time.Sleep(tt.sendDelay)
- conn.Drain()
- conn.Send(testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)})
+ conn.Drain(t)
+ conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)})
// If TCP_USER_TIMEOUT was set and the above delay was longer than the
// TCP_USER_TIMEOUT then the DUT should send a RST in response to the
// testbench's packet.
expectRST := tt.userTimeout != 0 && tt.sendDelay > tt.userTimeout
expectTimeout := 5 * time.Second
- got, err := conn.Expect(testbench.TCP{Flags: testbench.Uint8(header.TCPFlagRst)}, expectTimeout)
+ got, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagRst)}, expectTimeout)
if expectRST && err != nil {
t.Errorf("expected RST packet within %s but got none: %s", expectTimeout, err)
}
diff --git a/test/packetimpact/tests/tcp_window_shrink_test.go b/test/packetimpact/tests/tcp_window_shrink_test.go
index e78d04756..5b001fbec 100644
--- a/test/packetimpact/tests/tcp_window_shrink_test.go
+++ b/test/packetimpact/tests/tcp_window_shrink_test.go
@@ -31,43 +31,43 @@ func init() {
func TestWindowShrink(t *testing.T) {
dut := testbench.NewDUT(t)
defer dut.TearDown()
- listenFd, remotePort := dut.CreateListener(unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
- defer dut.Close(listenFd)
+ listenFd, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
+ defer dut.Close(t, listenFd)
conn := testbench.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
- defer conn.Close()
+ defer conn.Close(t)
- conn.Connect()
- acceptFd, _ := dut.Accept(listenFd)
- defer dut.Close(acceptFd)
+ conn.Connect(t)
+ acceptFd, _ := dut.Accept(t, listenFd)
+ defer dut.Close(t, acceptFd)
- dut.SetSockOptInt(acceptFd, unix.IPPROTO_TCP, unix.TCP_NODELAY, 1)
+ dut.SetSockOptInt(t, acceptFd, unix.IPPROTO_TCP, unix.TCP_NODELAY, 1)
sampleData := []byte("Sample Data")
samplePayload := &testbench.Payload{Bytes: sampleData}
- dut.Send(acceptFd, sampleData, 0)
- if _, err := conn.ExpectData(&testbench.TCP{}, samplePayload, time.Second); err != nil {
+ dut.Send(t, acceptFd, sampleData, 0)
+ if _, err := conn.ExpectData(t, &testbench.TCP{}, samplePayload, time.Second); err != nil {
t.Fatalf("expected payload was not received: %s", err)
}
- conn.Send(testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)})
+ conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)})
- dut.Send(acceptFd, sampleData, 0)
- dut.Send(acceptFd, sampleData, 0)
- if _, err := conn.ExpectData(&testbench.TCP{}, samplePayload, time.Second); err != nil {
+ dut.Send(t, acceptFd, sampleData, 0)
+ dut.Send(t, acceptFd, sampleData, 0)
+ if _, err := conn.ExpectData(t, &testbench.TCP{}, samplePayload, time.Second); err != nil {
t.Fatalf("expected payload was not received: %s", err)
}
- if _, err := conn.ExpectData(&testbench.TCP{}, samplePayload, time.Second); err != nil {
+ if _, err := conn.ExpectData(t, &testbench.TCP{}, samplePayload, time.Second); err != nil {
t.Fatalf("expected payload was not received: %s", err)
}
// We close our receiving window here
- conn.Send(testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck), WindowSize: testbench.Uint16(0)})
+ conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck), WindowSize: testbench.Uint16(0)})
- dut.Send(acceptFd, []byte("Sample Data"), 0)
+ dut.Send(t, acceptFd, []byte("Sample Data"), 0)
// Note: There is another kind of zero-window probing which Windows uses (by sending one
// new byte at `RemoteSeqNum`), if netstack wants to go that way, we may want to change
// the following lines.
- expectedRemoteSeqNum := *conn.RemoteSeqNum() - 1
- if _, err := conn.ExpectData(&testbench.TCP{SeqNum: testbench.Uint32(uint32(expectedRemoteSeqNum))}, nil, time.Second); err != nil {
+ expectedRemoteSeqNum := *conn.RemoteSeqNum(t) - 1
+ if _, err := conn.ExpectData(t, &testbench.TCP{SeqNum: testbench.Uint32(uint32(expectedRemoteSeqNum))}, nil, time.Second); err != nil {
t.Fatalf("expected a packet with sequence number %d: %s", expectedRemoteSeqNum, err)
}
}
diff --git a/test/packetimpact/tests/tcp_zero_window_probe_retransmit_test.go b/test/packetimpact/tests/tcp_zero_window_probe_retransmit_test.go
index 8c89d57c9..da93267d6 100644
--- a/test/packetimpact/tests/tcp_zero_window_probe_retransmit_test.go
+++ b/test/packetimpact/tests/tcp_zero_window_probe_retransmit_test.go
@@ -33,27 +33,27 @@ func init() {
func TestZeroWindowProbeRetransmit(t *testing.T) {
dut := testbench.NewDUT(t)
defer dut.TearDown()
- listenFd, remotePort := dut.CreateListener(unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
- defer dut.Close(listenFd)
+ listenFd, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
+ defer dut.Close(t, listenFd)
conn := testbench.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
- defer conn.Close()
+ defer conn.Close(t)
- conn.Connect()
- acceptFd, _ := dut.Accept(listenFd)
- defer dut.Close(acceptFd)
+ conn.Connect(t)
+ acceptFd, _ := dut.Accept(t, listenFd)
+ defer dut.Close(t, acceptFd)
- dut.SetSockOptInt(acceptFd, unix.IPPROTO_TCP, unix.TCP_NODELAY, 1)
+ dut.SetSockOptInt(t, acceptFd, unix.IPPROTO_TCP, unix.TCP_NODELAY, 1)
sampleData := []byte("Sample Data")
samplePayload := &testbench.Payload{Bytes: sampleData}
// Send and receive sample data to the dut.
- dut.Send(acceptFd, sampleData, 0)
- if _, err := conn.ExpectData(&testbench.TCP{}, samplePayload, time.Second); err != nil {
+ dut.Send(t, acceptFd, sampleData, 0)
+ if _, err := conn.ExpectData(t, &testbench.TCP{}, samplePayload, time.Second); err != nil {
t.Fatalf("expected payload was not received: %s", err)
}
- conn.Send(testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck | header.TCPFlagPsh)}, samplePayload)
- if _, err := conn.ExpectData(&testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}, nil, time.Second); err != nil {
+ conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck | header.TCPFlagPsh)}, samplePayload)
+ if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}, nil, time.Second); err != nil {
t.Fatalf("expected packet was not received: %s", err)
}
@@ -63,15 +63,15 @@ func TestZeroWindowProbeRetransmit(t *testing.T) {
// of the recorded first zero probe transmission duration.
//
// Advertize zero receive window again.
- conn.Send(testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck), WindowSize: testbench.Uint16(0)})
- probeSeq := testbench.Uint32(uint32(*conn.RemoteSeqNum() - 1))
- ackProbe := testbench.Uint32(uint32(*conn.RemoteSeqNum()))
+ conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck), WindowSize: testbench.Uint16(0)})
+ probeSeq := testbench.Uint32(uint32(*conn.RemoteSeqNum(t) - 1))
+ ackProbe := testbench.Uint32(uint32(*conn.RemoteSeqNum(t)))
startProbeDuration := time.Second
current := startProbeDuration
first := time.Now()
// Ask the dut to send out data.
- dut.Send(acceptFd, sampleData, 0)
+ dut.Send(t, acceptFd, sampleData, 0)
// Expect the dut to keep the connection alive as long as the remote is
// acknowledging the zero-window probes.
for i := 0; i < 5; i++ {
@@ -79,7 +79,7 @@ func TestZeroWindowProbeRetransmit(t *testing.T) {
// Expect zero-window probe with a timeout which is a function of the typical
// first retransmission time. The retransmission times is supposed to
// exponentially increase.
- if _, err := conn.ExpectData(&testbench.TCP{SeqNum: probeSeq}, nil, 2*current); err != nil {
+ if _, err := conn.ExpectData(t, &testbench.TCP{SeqNum: probeSeq}, nil, 2*current); err != nil {
t.Fatalf("expected a probe with sequence number %d: loop %d", probeSeq, i)
}
if i == 0 {
@@ -92,14 +92,13 @@ func TestZeroWindowProbeRetransmit(t *testing.T) {
t.Errorf("got zero probe %d after %s, want >= %s", i, got, want)
}
// Acknowledge the zero-window probes from the dut.
- conn.Send(testbench.TCP{AckNum: ackProbe, Flags: testbench.Uint8(header.TCPFlagAck), WindowSize: testbench.Uint16(0)})
+ conn.Send(t, testbench.TCP{AckNum: ackProbe, Flags: testbench.Uint8(header.TCPFlagAck), WindowSize: testbench.Uint16(0)})
current *= 2
}
// Advertize non-zero window.
- conn.Send(testbench.TCP{AckNum: ackProbe, Flags: testbench.Uint8(header.TCPFlagAck)})
+ conn.Send(t, testbench.TCP{AckNum: ackProbe, Flags: testbench.Uint8(header.TCPFlagAck)})
// Expect the dut to recover and transmit data.
- if _, err := conn.ExpectData(&testbench.
- TCP{SeqNum: ackProbe}, samplePayload, time.Second); err != nil {
+ if _, err := conn.ExpectData(t, &testbench.TCP{SeqNum: ackProbe}, samplePayload, time.Second); err != nil {
t.Fatalf("expected payload was not received: %s", err)
}
}
diff --git a/test/packetimpact/tests/tcp_zero_window_probe_test.go b/test/packetimpact/tests/tcp_zero_window_probe_test.go
index 649fd5699..44cac42f8 100644
--- a/test/packetimpact/tests/tcp_zero_window_probe_test.go
+++ b/test/packetimpact/tests/tcp_zero_window_probe_test.go
@@ -33,29 +33,29 @@ func init() {
func TestZeroWindowProbe(t *testing.T) {
dut := testbench.NewDUT(t)
defer dut.TearDown()
- listenFd, remotePort := dut.CreateListener(unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
- defer dut.Close(listenFd)
+ listenFd, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
+ defer dut.Close(t, listenFd)
conn := testbench.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
- defer conn.Close()
+ defer conn.Close(t)
- conn.Connect()
- acceptFd, _ := dut.Accept(listenFd)
- defer dut.Close(acceptFd)
+ conn.Connect(t)
+ acceptFd, _ := dut.Accept(t, listenFd)
+ defer dut.Close(t, acceptFd)
- dut.SetSockOptInt(acceptFd, unix.IPPROTO_TCP, unix.TCP_NODELAY, 1)
+ dut.SetSockOptInt(t, acceptFd, unix.IPPROTO_TCP, unix.TCP_NODELAY, 1)
sampleData := []byte("Sample Data")
samplePayload := &testbench.Payload{Bytes: sampleData}
start := time.Now()
// Send and receive sample data to the dut.
- dut.Send(acceptFd, sampleData, 0)
- if _, err := conn.ExpectData(&testbench.TCP{}, samplePayload, time.Second); err != nil {
+ dut.Send(t, acceptFd, sampleData, 0)
+ if _, err := conn.ExpectData(t, &testbench.TCP{}, samplePayload, time.Second); err != nil {
t.Fatalf("expected payload was not received: %s", err)
}
sendTime := time.Now().Sub(start)
- conn.Send(testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck | header.TCPFlagPsh)}, samplePayload)
- if _, err := conn.ExpectData(&testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}, nil, time.Second); err != nil {
+ conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck | header.TCPFlagPsh)}, samplePayload)
+ if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}, nil, time.Second); err != nil {
t.Fatalf("expected packet was not received: %s", err)
}
@@ -63,24 +63,24 @@ func TestZeroWindowProbe(t *testing.T) {
// probe to be sent.
//
// Advertize zero window to the dut.
- conn.Send(testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck), WindowSize: testbench.Uint16(0)})
+ conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck), WindowSize: testbench.Uint16(0)})
// Expected sequence number of the zero window probe.
- probeSeq := testbench.Uint32(uint32(*conn.RemoteSeqNum() - 1))
+ probeSeq := testbench.Uint32(uint32(*conn.RemoteSeqNum(t) - 1))
// Expected ack number of the ACK for the probe.
- ackProbe := testbench.Uint32(uint32(*conn.RemoteSeqNum()))
+ ackProbe := testbench.Uint32(uint32(*conn.RemoteSeqNum(t)))
// Expect there are no zero-window probes sent until there is data to be sent out
// from the dut.
- if _, err := conn.ExpectData(&testbench.TCP{SeqNum: probeSeq}, nil, 2*time.Second); err == nil {
+ if _, err := conn.ExpectData(t, &testbench.TCP{SeqNum: probeSeq}, nil, 2*time.Second); err == nil {
t.Fatalf("unexpected packet with sequence number %d: %s", probeSeq, err)
}
start = time.Now()
// Ask the dut to send out data.
- dut.Send(acceptFd, sampleData, 0)
+ dut.Send(t, acceptFd, sampleData, 0)
// Expect zero-window probe from the dut.
- if _, err := conn.ExpectData(&testbench.TCP{SeqNum: probeSeq}, nil, time.Second); err != nil {
+ if _, err := conn.ExpectData(t, &testbench.TCP{SeqNum: probeSeq}, nil, time.Second); err != nil {
t.Fatalf("expected a packet with sequence number %d: %s", probeSeq, err)
}
// Expect the probe to be sent after some time. Compare against the previous
@@ -94,9 +94,9 @@ func TestZeroWindowProbe(t *testing.T) {
// and sends out the sample payload after the send window opens.
//
// Advertize non-zero window to the dut and ack the zero window probe.
- conn.Send(testbench.TCP{AckNum: ackProbe, Flags: testbench.Uint8(header.TCPFlagAck)})
+ conn.Send(t, testbench.TCP{AckNum: ackProbe, Flags: testbench.Uint8(header.TCPFlagAck)})
// Expect the dut to recover and transmit data.
- if _, err := conn.ExpectData(&testbench.TCP{SeqNum: ackProbe}, samplePayload, time.Second); err != nil {
+ if _, err := conn.ExpectData(t, &testbench.TCP{SeqNum: ackProbe}, samplePayload, time.Second); err != nil {
t.Fatalf("expected payload was not received: %s", err)
}
@@ -104,9 +104,9 @@ func TestZeroWindowProbe(t *testing.T) {
// Check if the dut responds as we do for a similar probe sent to it.
// Basically with sequence number to one byte behind the unacknowledged
// sequence number.
- p := testbench.Uint32(uint32(*conn.LocalSeqNum()))
- conn.Send(testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck), SeqNum: testbench.Uint32(uint32(*conn.LocalSeqNum() - 1))})
- if _, err := conn.ExpectData(&testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck), AckNum: p}, nil, time.Second); err != nil {
+ p := testbench.Uint32(uint32(*conn.LocalSeqNum(t)))
+ conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck), SeqNum: testbench.Uint32(uint32(*conn.LocalSeqNum(t) - 1))})
+ if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck), AckNum: p}, nil, time.Second); err != nil {
t.Fatalf("expected a packet with ack number: %d: %s", p, err)
}
}
diff --git a/test/packetimpact/tests/tcp_zero_window_probe_usertimeout_test.go b/test/packetimpact/tests/tcp_zero_window_probe_usertimeout_test.go
index 3c467b14f..09a1c653f 100644
--- a/test/packetimpact/tests/tcp_zero_window_probe_usertimeout_test.go
+++ b/test/packetimpact/tests/tcp_zero_window_probe_usertimeout_test.go
@@ -33,27 +33,27 @@ func init() {
func TestZeroWindowProbeUserTimeout(t *testing.T) {
dut := testbench.NewDUT(t)
defer dut.TearDown()
- listenFd, remotePort := dut.CreateListener(unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
- defer dut.Close(listenFd)
+ listenFd, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)
+ defer dut.Close(t, listenFd)
conn := testbench.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
- defer conn.Close()
+ defer conn.Close(t)
- conn.Connect()
- acceptFd, _ := dut.Accept(listenFd)
- defer dut.Close(acceptFd)
+ conn.Connect(t)
+ acceptFd, _ := dut.Accept(t, listenFd)
+ defer dut.Close(t, acceptFd)
- dut.SetSockOptInt(acceptFd, unix.IPPROTO_TCP, unix.TCP_NODELAY, 1)
+ dut.SetSockOptInt(t, acceptFd, unix.IPPROTO_TCP, unix.TCP_NODELAY, 1)
sampleData := []byte("Sample Data")
samplePayload := &testbench.Payload{Bytes: sampleData}
// Send and receive sample data to the dut.
- dut.Send(acceptFd, sampleData, 0)
- if _, err := conn.ExpectData(&testbench.TCP{}, samplePayload, time.Second); err != nil {
+ dut.Send(t, acceptFd, sampleData, 0)
+ if _, err := conn.ExpectData(t, &testbench.TCP{}, samplePayload, time.Second); err != nil {
t.Fatalf("expected payload was not received: %s", err)
}
- conn.Send(testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck | header.TCPFlagPsh)}, samplePayload)
- if _, err := conn.ExpectData(&testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}, nil, time.Second); err != nil {
+ conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck | header.TCPFlagPsh)}, samplePayload)
+ if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}, nil, time.Second); err != nil {
t.Fatalf("expected packet was not received: %s", err)
}
@@ -61,15 +61,15 @@ func TestZeroWindowProbeUserTimeout(t *testing.T) {
// probe to be sent.
//
// Advertize zero window to the dut.
- conn.Send(testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck), WindowSize: testbench.Uint16(0)})
+ conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck), WindowSize: testbench.Uint16(0)})
// Expected sequence number of the zero window probe.
- probeSeq := testbench.Uint32(uint32(*conn.RemoteSeqNum() - 1))
+ probeSeq := testbench.Uint32(uint32(*conn.RemoteSeqNum(t) - 1))
start := time.Now()
// Ask the dut to send out data.
- dut.Send(acceptFd, sampleData, 0)
+ dut.Send(t, acceptFd, sampleData, 0)
// Expect zero-window probe from the dut.
- if _, err := conn.ExpectData(&testbench.TCP{SeqNum: probeSeq}, nil, time.Second); err != nil {
+ if _, err := conn.ExpectData(t, &testbench.TCP{SeqNum: probeSeq}, nil, time.Second); err != nil {
t.Fatalf("expected a packet with sequence number %d: %s", probeSeq, err)
}
// Record the duration for first probe, the dut sends the zero window probe after
@@ -80,19 +80,19 @@ func TestZeroWindowProbeUserTimeout(t *testing.T) {
// when the dut is sending zero-window probes.
//
// Reduce the retransmit timeout.
- dut.SetSockOptInt(acceptFd, unix.IPPROTO_TCP, unix.TCP_USER_TIMEOUT, int32(startProbeDuration.Milliseconds()))
+ dut.SetSockOptInt(t, acceptFd, unix.IPPROTO_TCP, unix.TCP_USER_TIMEOUT, int32(startProbeDuration.Milliseconds()))
// Advertize zero window again.
- conn.Send(testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck), WindowSize: testbench.Uint16(0)})
+ conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck), WindowSize: testbench.Uint16(0)})
// Ask the dut to send out data that would trigger zero window probe retransmissions.
- dut.Send(acceptFd, sampleData, 0)
+ dut.Send(t, acceptFd, sampleData, 0)
// Wait for the connection to timeout after multiple zero-window probe retransmissions.
time.Sleep(8 * startProbeDuration)
// Expect the connection to have timed out and closed which would cause the dut
// to reply with a RST to the ACK we send.
- conn.Send(testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)})
- if _, err := conn.ExpectData(&testbench.TCP{Flags: testbench.Uint8(header.TCPFlagRst)}, nil, time.Second); err != nil {
+ conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)})
+ if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.Uint8(header.TCPFlagRst)}, nil, time.Second); err != nil {
t.Fatalf("expected a TCP RST")
}
}
diff --git a/test/packetimpact/tests/udp_discard_mcast_source_addr_test.go b/test/packetimpact/tests/udp_discard_mcast_source_addr_test.go
index b0315e67c..d30177e64 100644
--- a/test/packetimpact/tests/udp_discard_mcast_source_addr_test.go
+++ b/test/packetimpact/tests/udp_discard_mcast_source_addr_test.go
@@ -36,11 +36,11 @@ func init() {
func TestDiscardsUDPPacketsWithMcastSourceAddressV4(t *testing.T) {
dut := testbench.NewDUT(t)
defer dut.TearDown()
- remoteFD, remotePort := dut.CreateBoundSocket(unix.SOCK_DGRAM, unix.IPPROTO_UDP, net.ParseIP(testbench.RemoteIPv4))
- defer dut.Close(remoteFD)
- dut.SetSockOptTimeval(remoteFD, unix.SOL_SOCKET, unix.SO_RCVTIMEO, &oneSecond)
+ remoteFD, remotePort := dut.CreateBoundSocket(t, unix.SOCK_DGRAM, unix.IPPROTO_UDP, net.ParseIP(testbench.RemoteIPv4))
+ defer dut.Close(t, remoteFD)
+ dut.SetSockOptTimeval(t, remoteFD, unix.SOL_SOCKET, unix.SO_RCVTIMEO, &oneSecond)
conn := testbench.NewUDPIPv4(t, testbench.UDP{DstPort: &remotePort}, testbench.UDP{SrcPort: &remotePort})
- defer conn.Close()
+ defer conn.Close(t)
for _, mcastAddr := range []net.IP{
net.IPv4allsys,
@@ -50,11 +50,12 @@ func TestDiscardsUDPPacketsWithMcastSourceAddressV4(t *testing.T) {
} {
t.Run(fmt.Sprintf("srcaddr=%s", mcastAddr), func(t *testing.T) {
conn.SendIP(
+ t,
testbench.IPv4{SrcAddr: testbench.Address(tcpip.Address(mcastAddr.To4()))},
testbench.UDP{},
)
- ret, payload, errno := dut.RecvWithErrno(context.Background(), remoteFD, 100, 0)
+ ret, payload, errno := dut.RecvWithErrno(context.Background(), t, remoteFD, 100, 0)
if errno != syscall.EAGAIN || errno != syscall.EWOULDBLOCK {
t.Errorf("Recv got unexpected result, ret=%d, payload=%q, errno=%s", ret, payload, errno)
}
@@ -65,11 +66,11 @@ func TestDiscardsUDPPacketsWithMcastSourceAddressV4(t *testing.T) {
func TestDiscardsUDPPacketsWithMcastSourceAddressV6(t *testing.T) {
dut := testbench.NewDUT(t)
defer dut.TearDown()
- remoteFD, remotePort := dut.CreateBoundSocket(unix.SOCK_DGRAM, unix.IPPROTO_UDP, net.ParseIP(testbench.RemoteIPv6))
- defer dut.Close(remoteFD)
- dut.SetSockOptTimeval(remoteFD, unix.SOL_SOCKET, unix.SO_RCVTIMEO, &oneSecond)
+ remoteFD, remotePort := dut.CreateBoundSocket(t, unix.SOCK_DGRAM, unix.IPPROTO_UDP, net.ParseIP(testbench.RemoteIPv6))
+ defer dut.Close(t, remoteFD)
+ dut.SetSockOptTimeval(t, remoteFD, unix.SOL_SOCKET, unix.SO_RCVTIMEO, &oneSecond)
conn := testbench.NewUDPIPv6(t, testbench.UDP{DstPort: &remotePort}, testbench.UDP{SrcPort: &remotePort})
- defer conn.Close()
+ defer conn.Close(t)
for _, mcastAddr := range []net.IP{
net.IPv6interfacelocalallnodes,
@@ -80,10 +81,11 @@ func TestDiscardsUDPPacketsWithMcastSourceAddressV6(t *testing.T) {
} {
t.Run(fmt.Sprintf("srcaddr=%s", mcastAddr), func(t *testing.T) {
conn.SendIPv6(
+ t,
testbench.IPv6{SrcAddr: testbench.Address(tcpip.Address(mcastAddr.To16()))},
testbench.UDP{},
)
- ret, payload, errno := dut.RecvWithErrno(context.Background(), remoteFD, 100, 0)
+ ret, payload, errno := dut.RecvWithErrno(context.Background(), t, remoteFD, 100, 0)
if errno != syscall.EAGAIN || errno != syscall.EWOULDBLOCK {
t.Errorf("Recv got unexpected result, ret=%d, payload=%q, errno=%s", ret, payload, errno)
}
diff --git a/test/packetimpact/tests/udp_icmp_error_propagation_test.go b/test/packetimpact/tests/udp_icmp_error_propagation_test.go
index b754918f6..715e8f5b5 100644
--- a/test/packetimpact/tests/udp_icmp_error_propagation_test.go
+++ b/test/packetimpact/tests/udp_icmp_error_propagation_test.go
@@ -72,7 +72,7 @@ func (e icmpError) ToICMPv4() *testbench.ICMPv4 {
type errorDetection struct {
name string
useValidConn bool
- f func(context.Context, testData) error
+ f func(context.Context, *testing.T, testData)
}
type testData struct {
@@ -95,12 +95,14 @@ func wantErrno(c connectionMode, icmpErr icmpError) syscall.Errno {
}
// sendICMPError sends an ICMP error message in response to a UDP datagram.
-func sendICMPError(conn *testbench.UDPIPv4, icmpErr icmpError, udp *testbench.UDP) error {
- layers := (*testbench.Connection)(conn).CreateFrame(nil)
+func sendICMPError(t *testing.T, conn *testbench.UDPIPv4, icmpErr icmpError, udp *testbench.UDP) {
+ t.Helper()
+
+ layers := (*testbench.Connection)(conn).CreateFrame(t, nil)
layers = layers[:len(layers)-1]
ip, ok := udp.Prev().(*testbench.IPv4)
if !ok {
- return fmt.Errorf("expected %s to be IPv4", udp.Prev())
+ t.Fatalf("expected %s to be IPv4", udp.Prev())
}
if icmpErr == timeToLiveExceeded {
*ip.TTL = 1
@@ -114,84 +116,82 @@ func sendICMPError(conn *testbench.UDPIPv4, icmpErr icmpError, udp *testbench.UD
// resulting in a mal-formed packet.
layers = append(layers, icmpErr.ToICMPv4(), ip, udp)
- (*testbench.Connection)(conn).SendFrameStateless(layers)
- return nil
+ (*testbench.Connection)(conn).SendFrameStateless(t, layers)
}
// testRecv tests observing the ICMP error through the recv syscall. A packet
// is sent to the DUT, and if wantErrno is non-zero, then the first recv should
// fail and the second should succeed. Otherwise if wantErrno is zero then the
// first recv should succeed immediately.
-func testRecv(ctx context.Context, d testData) error {
+func testRecv(ctx context.Context, t *testing.T, d testData) {
+ t.Helper()
+
// Check that receiving on the clean socket works.
- d.conn.Send(testbench.UDP{DstPort: &d.cleanPort})
- d.dut.Recv(d.cleanFD, 100, 0)
+ d.conn.Send(t, testbench.UDP{DstPort: &d.cleanPort})
+ d.dut.Recv(t, d.cleanFD, 100, 0)
- d.conn.Send(testbench.UDP{})
+ d.conn.Send(t, testbench.UDP{})
if d.wantErrno != syscall.Errno(0) {
ctx, cancel := context.WithTimeout(ctx, time.Second)
defer cancel()
- ret, _, err := d.dut.RecvWithErrno(ctx, d.remoteFD, 100, 0)
+ ret, _, err := d.dut.RecvWithErrno(ctx, t, d.remoteFD, 100, 0)
if ret != -1 {
- return fmt.Errorf("recv after ICMP error succeeded unexpectedly, expected (%[1]d) %[1]v", d.wantErrno)
+ t.Fatalf("recv after ICMP error succeeded unexpectedly, expected (%[1]d) %[1]v", d.wantErrno)
}
if err != d.wantErrno {
- return fmt.Errorf("recv after ICMP error resulted in error (%[1]d) %[1]v, expected (%[2]d) %[2]v", err, d.wantErrno)
+ t.Fatalf("recv after ICMP error resulted in error (%[1]d) %[1]v, expected (%[2]d) %[2]v", err, d.wantErrno)
}
}
- d.dut.Recv(d.remoteFD, 100, 0)
- return nil
+ d.dut.Recv(t, d.remoteFD, 100, 0)
}
// testSendTo tests observing the ICMP error through the send syscall. If
// wantErrno is non-zero, the first send should fail and a subsequent send
// should suceed; while if wantErrno is zero then the first send should just
// succeed.
-func testSendTo(ctx context.Context, d testData) error {
+func testSendTo(ctx context.Context, t *testing.T, d testData) {
// Check that sending on the clean socket works.
- d.dut.SendTo(d.cleanFD, nil, 0, d.conn.LocalAddr())
- if _, err := d.conn.Expect(testbench.UDP{SrcPort: &d.cleanPort}, time.Second); err != nil {
- return fmt.Errorf("did not receive UDP packet from clean socket on DUT: %s", err)
+ d.dut.SendTo(t, d.cleanFD, nil, 0, d.conn.LocalAddr(t))
+ if _, err := d.conn.Expect(t, testbench.UDP{SrcPort: &d.cleanPort}, time.Second); err != nil {
+ t.Fatalf("did not receive UDP packet from clean socket on DUT: %s", err)
}
if d.wantErrno != syscall.Errno(0) {
ctx, cancel := context.WithTimeout(ctx, time.Second)
defer cancel()
- ret, err := d.dut.SendToWithErrno(ctx, d.remoteFD, nil, 0, d.conn.LocalAddr())
+ ret, err := d.dut.SendToWithErrno(ctx, t, d.remoteFD, nil, 0, d.conn.LocalAddr(t))
if ret != -1 {
- return fmt.Errorf("sendto after ICMP error succeeded unexpectedly, expected (%[1]d) %[1]v", d.wantErrno)
+ t.Fatalf("sendto after ICMP error succeeded unexpectedly, expected (%[1]d) %[1]v", d.wantErrno)
}
if err != d.wantErrno {
- return fmt.Errorf("sendto after ICMP error resulted in error (%[1]d) %[1]v, expected (%[2]d) %[2]v", err, d.wantErrno)
+ t.Fatalf("sendto after ICMP error resulted in error (%[1]d) %[1]v, expected (%[2]d) %[2]v", err, d.wantErrno)
}
}
- d.dut.SendTo(d.remoteFD, nil, 0, d.conn.LocalAddr())
- if _, err := d.conn.Expect(testbench.UDP{}, time.Second); err != nil {
- return fmt.Errorf("did not receive UDP packet as expected: %s", err)
+ d.dut.SendTo(t, d.remoteFD, nil, 0, d.conn.LocalAddr(t))
+ if _, err := d.conn.Expect(t, testbench.UDP{}, time.Second); err != nil {
+ t.Fatalf("did not receive UDP packet as expected: %s", err)
}
- return nil
}
-func testSockOpt(_ context.Context, d testData) error {
+func testSockOpt(_ context.Context, t *testing.T, d testData) {
// Check that there's no pending error on the clean socket.
- if errno := syscall.Errno(d.dut.GetSockOptInt(d.cleanFD, unix.SOL_SOCKET, unix.SO_ERROR)); errno != syscall.Errno(0) {
- return fmt.Errorf("unexpected error (%[1]d) %[1]v on clean socket", errno)
+ if errno := syscall.Errno(d.dut.GetSockOptInt(t, d.cleanFD, unix.SOL_SOCKET, unix.SO_ERROR)); errno != syscall.Errno(0) {
+ t.Fatalf("unexpected error (%[1]d) %[1]v on clean socket", errno)
}
- if errno := syscall.Errno(d.dut.GetSockOptInt(d.remoteFD, unix.SOL_SOCKET, unix.SO_ERROR)); errno != d.wantErrno {
- return fmt.Errorf("SO_ERROR sockopt after ICMP error is (%[1]d) %[1]v, expected (%[2]d) %[2]v", errno, d.wantErrno)
+ if errno := syscall.Errno(d.dut.GetSockOptInt(t, d.remoteFD, unix.SOL_SOCKET, unix.SO_ERROR)); errno != d.wantErrno {
+ t.Fatalf("SO_ERROR sockopt after ICMP error is (%[1]d) %[1]v, expected (%[2]d) %[2]v", errno, d.wantErrno)
}
// Check that after clearing socket error, sending doesn't fail.
- d.dut.SendTo(d.remoteFD, nil, 0, d.conn.LocalAddr())
- if _, err := d.conn.Expect(testbench.UDP{}, time.Second); err != nil {
- return fmt.Errorf("did not receive UDP packet as expected: %s", err)
+ d.dut.SendTo(t, d.remoteFD, nil, 0, d.conn.LocalAddr(t))
+ if _, err := d.conn.Expect(t, testbench.UDP{}, time.Second); err != nil {
+ t.Fatalf("did not receive UDP packet as expected: %s", err)
}
- return nil
}
// TestUDPICMPErrorPropagation tests that ICMP error messages in response to
@@ -227,31 +227,29 @@ func TestUDPICMPErrorPropagation(t *testing.T) {
dut := testbench.NewDUT(t)
defer dut.TearDown()
- remoteFD, remotePort := dut.CreateBoundSocket(unix.SOCK_DGRAM, unix.IPPROTO_UDP, net.ParseIP("0.0.0.0"))
- defer dut.Close(remoteFD)
+ remoteFD, remotePort := dut.CreateBoundSocket(t, unix.SOCK_DGRAM, unix.IPPROTO_UDP, net.ParseIP("0.0.0.0"))
+ defer dut.Close(t, remoteFD)
// Create a second, clean socket on the DUT to ensure that the ICMP
// error messages only affect the sockets they are intended for.
- cleanFD, cleanPort := dut.CreateBoundSocket(unix.SOCK_DGRAM, unix.IPPROTO_UDP, net.ParseIP("0.0.0.0"))
- defer dut.Close(cleanFD)
+ cleanFD, cleanPort := dut.CreateBoundSocket(t, unix.SOCK_DGRAM, unix.IPPROTO_UDP, net.ParseIP("0.0.0.0"))
+ defer dut.Close(t, cleanFD)
conn := testbench.NewUDPIPv4(t, testbench.UDP{DstPort: &remotePort}, testbench.UDP{SrcPort: &remotePort})
- defer conn.Close()
+ defer conn.Close(t)
if connect {
- dut.Connect(remoteFD, conn.LocalAddr())
- dut.Connect(cleanFD, conn.LocalAddr())
+ dut.Connect(t, remoteFD, conn.LocalAddr(t))
+ dut.Connect(t, cleanFD, conn.LocalAddr(t))
}
- dut.SendTo(remoteFD, nil, 0, conn.LocalAddr())
- udp, err := conn.Expect(testbench.UDP{}, time.Second)
+ dut.SendTo(t, remoteFD, nil, 0, conn.LocalAddr(t))
+ udp, err := conn.Expect(t, testbench.UDP{}, time.Second)
if err != nil {
t.Fatalf("did not receive message from DUT: %s", err)
}
- if err := sendICMPError(&conn, icmpErr, udp); err != nil {
- t.Fatal(err)
- }
+ sendICMPError(t, &conn, icmpErr, udp)
errDetectConn := &conn
if errDetect.useValidConn {
@@ -260,14 +258,12 @@ func TestUDPICMPErrorPropagation(t *testing.T) {
// interactions between it and the the DUT should be independent of
// the ICMP error at least at the port level.
connClean := testbench.NewUDPIPv4(t, testbench.UDP{DstPort: &remotePort}, testbench.UDP{SrcPort: &remotePort})
- defer connClean.Close()
+ defer connClean.Close(t)
errDetectConn = &connClean
}
- if err := errDetect.f(context.Background(), testData{&dut, errDetectConn, remoteFD, remotePort, cleanFD, cleanPort, wantErrno}); err != nil {
- t.Fatal(err)
- }
+ errDetect.f(context.Background(), t, testData{&dut, errDetectConn, remoteFD, remotePort, cleanFD, cleanPort, wantErrno})
})
}
}
@@ -285,24 +281,24 @@ func TestICMPErrorDuringUDPRecv(t *testing.T) {
dut := testbench.NewDUT(t)
defer dut.TearDown()
- remoteFD, remotePort := dut.CreateBoundSocket(unix.SOCK_DGRAM, unix.IPPROTO_UDP, net.ParseIP("0.0.0.0"))
- defer dut.Close(remoteFD)
+ remoteFD, remotePort := dut.CreateBoundSocket(t, unix.SOCK_DGRAM, unix.IPPROTO_UDP, net.ParseIP("0.0.0.0"))
+ defer dut.Close(t, remoteFD)
// Create a second, clean socket on the DUT to ensure that the ICMP
// error messages only affect the sockets they are intended for.
- cleanFD, cleanPort := dut.CreateBoundSocket(unix.SOCK_DGRAM, unix.IPPROTO_UDP, net.ParseIP("0.0.0.0"))
- defer dut.Close(cleanFD)
+ cleanFD, cleanPort := dut.CreateBoundSocket(t, unix.SOCK_DGRAM, unix.IPPROTO_UDP, net.ParseIP("0.0.0.0"))
+ defer dut.Close(t, cleanFD)
conn := testbench.NewUDPIPv4(t, testbench.UDP{DstPort: &remotePort}, testbench.UDP{SrcPort: &remotePort})
- defer conn.Close()
+ defer conn.Close(t)
if connect {
- dut.Connect(remoteFD, conn.LocalAddr())
- dut.Connect(cleanFD, conn.LocalAddr())
+ dut.Connect(t, remoteFD, conn.LocalAddr(t))
+ dut.Connect(t, cleanFD, conn.LocalAddr(t))
}
- dut.SendTo(remoteFD, nil, 0, conn.LocalAddr())
- udp, err := conn.Expect(testbench.UDP{}, time.Second)
+ dut.SendTo(t, remoteFD, nil, 0, conn.LocalAddr(t))
+ udp, err := conn.Expect(t, testbench.UDP{}, time.Second)
if err != nil {
t.Fatalf("did not receive message from DUT: %s", err)
}
@@ -316,7 +312,7 @@ func TestICMPErrorDuringUDPRecv(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
- ret, _, err := dut.RecvWithErrno(ctx, remoteFD, 100, 0)
+ ret, _, err := dut.RecvWithErrno(ctx, t, remoteFD, 100, 0)
if ret != -1 {
t.Errorf("recv during ICMP error succeeded unexpectedly, expected (%[1]d) %[1]v", wantErrno)
return
@@ -330,7 +326,7 @@ func TestICMPErrorDuringUDPRecv(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
- if ret, _, err := dut.RecvWithErrno(ctx, remoteFD, 100, 0); ret == -1 {
+ if ret, _, err := dut.RecvWithErrno(ctx, t, remoteFD, 100, 0); ret == -1 {
t.Errorf("recv after ICMP error failed with (%[1]d) %[1]", err)
}
}()
@@ -341,7 +337,7 @@ func TestICMPErrorDuringUDPRecv(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
- if ret, _, err := dut.RecvWithErrno(ctx, cleanFD, 100, 0); ret == -1 {
+ if ret, _, err := dut.RecvWithErrno(ctx, t, cleanFD, 100, 0); ret == -1 {
t.Errorf("recv on clean socket failed with (%[1]d) %[1]", err)
}
}()
@@ -352,12 +348,10 @@ func TestICMPErrorDuringUDPRecv(t *testing.T) {
// alternative is available.
time.Sleep(2 * time.Second)
- if err := sendICMPError(&conn, icmpErr, udp); err != nil {
- t.Fatal(err)
- }
+ sendICMPError(t, &conn, icmpErr, udp)
- conn.Send(testbench.UDP{DstPort: &cleanPort})
- conn.Send(testbench.UDP{})
+ conn.Send(t, testbench.UDP{DstPort: &cleanPort})
+ conn.Send(t, testbench.UDP{})
wg.Wait()
})
}
diff --git a/test/packetimpact/tests/udp_recv_mcast_bcast_test.go b/test/packetimpact/tests/udp_recv_mcast_bcast_test.go
index 263a54291..fcd202643 100644
--- a/test/packetimpact/tests/udp_recv_mcast_bcast_test.go
+++ b/test/packetimpact/tests/udp_recv_mcast_bcast_test.go
@@ -31,10 +31,10 @@ func init() {
func TestUDPRecvMulticastBroadcast(t *testing.T) {
dut := testbench.NewDUT(t)
defer dut.TearDown()
- boundFD, remotePort := dut.CreateBoundSocket(unix.SOCK_DGRAM, unix.IPPROTO_UDP, net.IPv4(0, 0, 0, 0))
- defer dut.Close(boundFD)
+ boundFD, remotePort := dut.CreateBoundSocket(t, unix.SOCK_DGRAM, unix.IPPROTO_UDP, net.IPv4(0, 0, 0, 0))
+ defer dut.Close(t, boundFD)
conn := testbench.NewUDPIPv4(t, testbench.UDP{DstPort: &remotePort}, testbench.UDP{SrcPort: &remotePort})
- defer conn.Close()
+ defer conn.Close(t)
for _, bcastAddr := range []net.IP{
broadcastAddr(net.ParseIP(testbench.RemoteIPv4), net.CIDRMask(testbench.IPv4PrefixLength, 32)),
@@ -43,12 +43,13 @@ func TestUDPRecvMulticastBroadcast(t *testing.T) {
} {
payload := testbench.GenerateRandomPayload(t, 1<<10)
conn.SendIP(
+ t,
testbench.IPv4{DstAddr: testbench.Address(tcpip.Address(bcastAddr.To4()))},
testbench.UDP{},
&testbench.Payload{Bytes: payload},
)
t.Logf("Receiving packet sent to address: %s", bcastAddr)
- if got, want := string(dut.Recv(boundFD, int32(len(payload)), 0)), string(payload); got != want {
+ if got, want := string(dut.Recv(t, boundFD, int32(len(payload)), 0)), string(payload); got != want {
t.Errorf("received payload does not match sent payload got: %s, want: %s", got, want)
}
}
diff --git a/test/packetimpact/tests/udp_send_recv_dgram_test.go b/test/packetimpact/tests/udp_send_recv_dgram_test.go
index bd53ad90b..dc20275d6 100644
--- a/test/packetimpact/tests/udp_send_recv_dgram_test.go
+++ b/test/packetimpact/tests/udp_send_recv_dgram_test.go
@@ -29,10 +29,10 @@ func init() {
}
type udpConn interface {
- Send(testbench.UDP, ...testbench.Layer)
- ExpectData(testbench.UDP, testbench.Payload, time.Duration) (testbench.Layers, error)
- Drain()
- Close()
+ Send(*testing.T, testbench.UDP, ...testbench.Layer)
+ ExpectData(*testing.T, testbench.UDP, testbench.Payload, time.Duration) (testbench.Layers, error)
+ Drain(*testing.T)
+ Close(*testing.T)
}
func TestUDP(t *testing.T) {
@@ -51,21 +51,21 @@ func TestUDP(t *testing.T) {
} else {
addr = testbench.RemoteIPv6
}
- boundFD, remotePort := dut.CreateBoundSocket(unix.SOCK_DGRAM, unix.IPPROTO_UDP, net.ParseIP(addr))
- defer dut.Close(boundFD)
+ boundFD, remotePort := dut.CreateBoundSocket(t, unix.SOCK_DGRAM, unix.IPPROTO_UDP, net.ParseIP(addr))
+ defer dut.Close(t, boundFD)
var conn udpConn
var localAddr unix.Sockaddr
if isIPv4 {
v4Conn := testbench.NewUDPIPv4(t, testbench.UDP{DstPort: &remotePort}, testbench.UDP{SrcPort: &remotePort})
- localAddr = v4Conn.LocalAddr()
+ localAddr = v4Conn.LocalAddr(t)
conn = &v4Conn
} else {
v6Conn := testbench.NewUDPIPv6(t, testbench.UDP{DstPort: &remotePort}, testbench.UDP{SrcPort: &remotePort})
- localAddr = v6Conn.LocalAddr()
+ localAddr = v6Conn.LocalAddr(t)
conn = &v6Conn
}
- defer conn.Close()
+ defer conn.Close(t)
testCases := []struct {
name string
@@ -81,17 +81,17 @@ func TestUDP(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
t.Run("Send", func(t *testing.T) {
- conn.Send(testbench.UDP{}, &testbench.Payload{Bytes: tc.payload})
- if got, want := string(dut.Recv(boundFD, int32(len(tc.payload)), 0)), string(tc.payload); got != want {
+ conn.Send(t, testbench.UDP{}, &testbench.Payload{Bytes: tc.payload})
+ if got, want := string(dut.Recv(t, boundFD, int32(len(tc.payload)), 0)), string(tc.payload); got != want {
t.Fatalf("received payload does not match sent payload got: %s, want: %s", got, want)
}
})
t.Run("Recv", func(t *testing.T) {
- conn.Drain()
- if got, want := int(dut.SendTo(boundFD, tc.payload, 0, localAddr)), len(tc.payload); got != want {
+ conn.Drain(t)
+ if got, want := int(dut.SendTo(t, boundFD, tc.payload, 0, localAddr)), len(tc.payload); got != want {
t.Fatalf("short write got: %d, want: %d", got, want)
}
- if _, err := conn.ExpectData(testbench.UDP{SrcPort: &remotePort}, testbench.Payload{Bytes: tc.payload}, time.Second); err != nil {
+ if _, err := conn.ExpectData(t, testbench.UDP{SrcPort: &remotePort}, testbench.Payload{Bytes: tc.payload}, time.Second); err != nil {
t.Fatal(err)
}
})