summaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--pkg/abi/linux/linux_abi_autogen_unsafe.go26
-rw-r--r--pkg/abi/linux/linux_amd64_abi_autogen_unsafe.go6
-rw-r--r--pkg/abi/linux/linux_arm64_abi_autogen_unsafe.go6
-rw-r--r--pkg/sentry/fsimpl/host/connected_endpoint_refs.go2
-rw-r--r--pkg/sentry/fsimpl/host/inode_refs.go2
-rw-r--r--pkg/sentry/fsimpl/tmpfs/inode_refs.go2
-rw-r--r--pkg/sentry/platform/ring0/defs_impl_arm64.go3
-rw-r--r--pkg/sentry/socket/netstack/netstack.go137
-rw-r--r--pkg/sentry/socket/netstack/netstack_vfs2.go68
-rw-r--r--pkg/sentry/socket/unix/socket_refs.go2
-rw-r--r--pkg/sentry/socket/unix/unix.go2
-rw-r--r--pkg/sentry/socket/unix/unix_vfs2.go2
12 files changed, 95 insertions, 163 deletions
diff --git a/pkg/abi/linux/linux_abi_autogen_unsafe.go b/pkg/abi/linux/linux_abi_autogen_unsafe.go
index 0f1500e96..b918fabf2 100644
--- a/pkg/abi/linux/linux_abi_autogen_unsafe.go
+++ b/pkg/abi/linux/linux_abi_autogen_unsafe.go
@@ -152,12 +152,12 @@ func (s *Statx) UnmarshalBytes(src []byte) {
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
func (s *Statx) Packed() bool {
- return s.Atime.Packed() && s.Btime.Packed() && s.Ctime.Packed() && s.Mtime.Packed()
+ return s.Ctime.Packed() && s.Mtime.Packed() && s.Atime.Packed() && s.Btime.Packed()
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
func (s *Statx) MarshalUnsafe(dst []byte) {
- if s.Mtime.Packed() && s.Atime.Packed() && s.Btime.Packed() && s.Ctime.Packed() {
+ if s.Atime.Packed() && s.Btime.Packed() && s.Ctime.Packed() && s.Mtime.Packed() {
safecopy.CopyIn(dst, unsafe.Pointer(s))
} else {
// Type Statx doesn't have a packed layout in memory, fallback to MarshalBytes.
@@ -234,7 +234,7 @@ func (s *Statx) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
// WriteTo implements io.WriterTo.WriteTo.
func (s *Statx) WriteTo(writer io.Writer) (int64, error) {
- if !s.Mtime.Packed() && s.Atime.Packed() && s.Btime.Packed() && s.Ctime.Packed() {
+ if !s.Atime.Packed() && s.Btime.Packed() && s.Ctime.Packed() && s.Mtime.Packed() {
// Type Statx doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := make([]byte, s.SizeBytes())
s.MarshalBytes(buf)
@@ -709,7 +709,7 @@ func (f *FUSEHeaderIn) CopyIn(task marshal.Task, addr usermem.Addr) (int, error)
// WriteTo implements io.WriterTo.WriteTo.
func (f *FUSEHeaderIn) WriteTo(writer io.Writer) (int64, error) {
- if !f.Unique.Packed() && f.Opcode.Packed() {
+ if !f.Opcode.Packed() && f.Unique.Packed() {
// Type FUSEHeaderIn doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := make([]byte, f.SizeBytes())
f.MarshalBytes(buf)
@@ -2035,7 +2035,7 @@ func (i *IPTEntry) MarshalUnsafe(dst []byte) {
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
func (i *IPTEntry) UnmarshalUnsafe(src []byte) {
- if i.IP.Packed() && i.Counters.Packed() {
+ if i.Counters.Packed() && i.IP.Packed() {
safecopy.CopyOut(unsafe.Pointer(i), src)
} else {
// Type IPTEntry doesn't have a packed layout in memory, fallback to UnmarshalBytes.
@@ -2223,7 +2223,7 @@ func (i *IPTIP) MarshalUnsafe(dst []byte) {
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
func (i *IPTIP) UnmarshalUnsafe(src []byte) {
- if i.Src.Packed() && i.Dst.Packed() && i.SrcMask.Packed() && i.DstMask.Packed() {
+ if i.DstMask.Packed() && i.Src.Packed() && i.Dst.Packed() && i.SrcMask.Packed() {
safecopy.CopyOut(unsafe.Pointer(i), src)
} else {
// Type IPTIP doesn't have a packed layout in memory, fallback to UnmarshalBytes.
@@ -2234,7 +2234,7 @@ func (i *IPTIP) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
func (i *IPTIP) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (int, error) {
- if !i.DstMask.Packed() && i.Src.Packed() && i.Dst.Packed() && i.SrcMask.Packed() {
+ if !i.Src.Packed() && i.Dst.Packed() && i.SrcMask.Packed() && i.DstMask.Packed() {
// Type IPTIP doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := task.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
i.MarshalBytes(buf) // escapes: fallback.
@@ -2264,7 +2264,7 @@ func (i *IPTIP) CopyOut(task marshal.Task, addr usermem.Addr) (int, error) {
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
func (i *IPTIP) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
- if !i.DstMask.Packed() && i.Src.Packed() && i.Dst.Packed() && i.SrcMask.Packed() {
+ if !i.Src.Packed() && i.Dst.Packed() && i.SrcMask.Packed() && i.DstMask.Packed() {
// Type IPTIP doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := task.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
@@ -3055,7 +3055,7 @@ func (i *IP6TEntry) CopyOut(task marshal.Task, addr usermem.Addr) (int, error) {
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
func (i *IP6TEntry) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
- if !i.IPv6.Packed() && i.Counters.Packed() {
+ if !i.Counters.Packed() && i.IPv6.Packed() {
// Type IP6TEntry doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := task.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
@@ -3081,7 +3081,7 @@ func (i *IP6TEntry) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
// WriteTo implements io.WriterTo.WriteTo.
func (i *IP6TEntry) WriteTo(writer io.Writer) (int64, error) {
- if !i.IPv6.Packed() && i.Counters.Packed() {
+ if !i.Counters.Packed() && i.IPv6.Packed() {
// Type IP6TEntry doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := make([]byte, i.SizeBytes())
i.MarshalBytes(buf)
@@ -3196,7 +3196,7 @@ func (i *IP6TIP) UnmarshalBytes(src []byte) {
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
func (i *IP6TIP) Packed() bool {
- return i.Src.Packed() && i.Dst.Packed() && i.SrcMask.Packed() && i.DstMask.Packed()
+ return i.DstMask.Packed() && i.Src.Packed() && i.Dst.Packed() && i.SrcMask.Packed()
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
@@ -3222,7 +3222,7 @@ func (i *IP6TIP) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
func (i *IP6TIP) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (int, error) {
- if !i.DstMask.Packed() && i.Src.Packed() && i.Dst.Packed() && i.SrcMask.Packed() {
+ if !i.Src.Packed() && i.Dst.Packed() && i.SrcMask.Packed() && i.DstMask.Packed() {
// Type IP6TIP doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := task.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
i.MarshalBytes(buf) // escapes: fallback.
@@ -3278,7 +3278,7 @@ func (i *IP6TIP) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
// WriteTo implements io.WriterTo.WriteTo.
func (i *IP6TIP) WriteTo(writer io.Writer) (int64, error) {
- if !i.Src.Packed() && i.Dst.Packed() && i.SrcMask.Packed() && i.DstMask.Packed() {
+ if !i.Dst.Packed() && i.SrcMask.Packed() && i.DstMask.Packed() && i.Src.Packed() {
// Type IP6TIP doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := make([]byte, i.SizeBytes())
i.MarshalBytes(buf)
diff --git a/pkg/abi/linux/linux_amd64_abi_autogen_unsafe.go b/pkg/abi/linux/linux_amd64_abi_autogen_unsafe.go
index 5e888bdd7..6890564f1 100644
--- a/pkg/abi/linux/linux_amd64_abi_autogen_unsafe.go
+++ b/pkg/abi/linux/linux_amd64_abi_autogen_unsafe.go
@@ -288,7 +288,7 @@ func (s *Stat) UnmarshalBytes(src []byte) {
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
func (s *Stat) Packed() bool {
- return s.ATime.Packed() && s.MTime.Packed() && s.CTime.Packed()
+ return s.MTime.Packed() && s.CTime.Packed() && s.ATime.Packed()
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
@@ -314,7 +314,7 @@ func (s *Stat) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
func (s *Stat) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (int, error) {
- if !s.ATime.Packed() && s.MTime.Packed() && s.CTime.Packed() {
+ if !s.CTime.Packed() && s.ATime.Packed() && s.MTime.Packed() {
// Type Stat doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := task.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
s.MarshalBytes(buf) // escapes: fallback.
@@ -370,7 +370,7 @@ func (s *Stat) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
// WriteTo implements io.WriterTo.WriteTo.
func (s *Stat) WriteTo(writer io.Writer) (int64, error) {
- if !s.CTime.Packed() && s.ATime.Packed() && s.MTime.Packed() {
+ if !s.ATime.Packed() && s.MTime.Packed() && s.CTime.Packed() {
// Type Stat doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := make([]byte, s.SizeBytes())
s.MarshalBytes(buf)
diff --git a/pkg/abi/linux/linux_arm64_abi_autogen_unsafe.go b/pkg/abi/linux/linux_arm64_abi_autogen_unsafe.go
index 7c4fe4ac4..f630c7148 100644
--- a/pkg/abi/linux/linux_arm64_abi_autogen_unsafe.go
+++ b/pkg/abi/linux/linux_arm64_abi_autogen_unsafe.go
@@ -300,7 +300,7 @@ func (s *Stat) Packed() bool {
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
func (s *Stat) MarshalUnsafe(dst []byte) {
- if s.CTime.Packed() && s.ATime.Packed() && s.MTime.Packed() {
+ if s.ATime.Packed() && s.MTime.Packed() && s.CTime.Packed() {
safecopy.CopyIn(dst, unsafe.Pointer(s))
} else {
// Type Stat doesn't have a packed layout in memory, fallback to MarshalBytes.
@@ -310,7 +310,7 @@ func (s *Stat) MarshalUnsafe(dst []byte) {
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
func (s *Stat) UnmarshalUnsafe(src []byte) {
- if s.MTime.Packed() && s.CTime.Packed() && s.ATime.Packed() {
+ if s.ATime.Packed() && s.MTime.Packed() && s.CTime.Packed() {
safecopy.CopyOut(unsafe.Pointer(s), src)
} else {
// Type Stat doesn't have a packed layout in memory, fallback to UnmarshalBytes.
@@ -351,7 +351,7 @@ func (s *Stat) CopyOut(task marshal.Task, addr usermem.Addr) (int, error) {
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
func (s *Stat) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
- if !s.MTime.Packed() && s.CTime.Packed() && s.ATime.Packed() {
+ if !s.ATime.Packed() && s.MTime.Packed() && s.CTime.Packed() {
// Type Stat doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := task.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
diff --git a/pkg/sentry/fsimpl/host/connected_endpoint_refs.go b/pkg/sentry/fsimpl/host/connected_endpoint_refs.go
index 1f13a2608..3b7bf599e 100644
--- a/pkg/sentry/fsimpl/host/connected_endpoint_refs.go
+++ b/pkg/sentry/fsimpl/host/connected_endpoint_refs.go
@@ -1,11 +1,11 @@
package host
import (
+ "runtime"
"sync/atomic"
"gvisor.dev/gvisor/pkg/log"
refs_vfs1 "gvisor.dev/gvisor/pkg/refs"
- "runtime"
)
// ownerType is used to customize logging. Note that we use a pointer to T so
diff --git a/pkg/sentry/fsimpl/host/inode_refs.go b/pkg/sentry/fsimpl/host/inode_refs.go
index 2c67758a0..55c0fb3a9 100644
--- a/pkg/sentry/fsimpl/host/inode_refs.go
+++ b/pkg/sentry/fsimpl/host/inode_refs.go
@@ -1,11 +1,11 @@
package host
import (
+ "runtime"
"sync/atomic"
"gvisor.dev/gvisor/pkg/log"
refs_vfs1 "gvisor.dev/gvisor/pkg/refs"
- "runtime"
)
// ownerType is used to customize logging. Note that we use a pointer to T so
diff --git a/pkg/sentry/fsimpl/tmpfs/inode_refs.go b/pkg/sentry/fsimpl/tmpfs/inode_refs.go
index 3245ede1f..8b7ff185f 100644
--- a/pkg/sentry/fsimpl/tmpfs/inode_refs.go
+++ b/pkg/sentry/fsimpl/tmpfs/inode_refs.go
@@ -1,11 +1,11 @@
package tmpfs
import (
+ "runtime"
"sync/atomic"
"gvisor.dev/gvisor/pkg/log"
refs_vfs1 "gvisor.dev/gvisor/pkg/refs"
- "runtime"
)
// ownerType is used to customize logging. Note that we use a pointer to T so
diff --git a/pkg/sentry/platform/ring0/defs_impl_arm64.go b/pkg/sentry/platform/ring0/defs_impl_arm64.go
index 9fb351879..8ebfbfdb6 100644
--- a/pkg/sentry/platform/ring0/defs_impl_arm64.go
+++ b/pkg/sentry/platform/ring0/defs_impl_arm64.go
@@ -1,9 +1,10 @@
package ring0
import (
+ "gvisor.dev/gvisor/pkg/sentry/platform/ring0/pagetables"
+
"fmt"
"gvisor.dev/gvisor/pkg/sentry/arch"
- "gvisor.dev/gvisor/pkg/sentry/platform/ring0/pagetables"
"gvisor.dev/gvisor/pkg/usermem"
"io"
"reflect"
diff --git a/pkg/sentry/socket/netstack/netstack.go b/pkg/sentry/socket/netstack/netstack.go
index 626195be2..9e2ebc7d4 100644
--- a/pkg/sentry/socket/netstack/netstack.go
+++ b/pkg/sentry/socket/netstack/netstack.go
@@ -983,53 +983,12 @@ func (s *SocketOperations) GetSockOpt(t *kernel.Task, level, name int, outPtr us
return &val, nil
}
- if s.skType == linux.SOCK_RAW && level == linux.IPPROTO_IP {
- switch name {
- case linux.IPT_SO_GET_INFO:
- if outLen < linux.SizeOfIPTGetinfo {
- return nil, syserr.ErrInvalidArgument
- }
- if s.family != linux.AF_INET {
- return nil, syserr.ErrInvalidArgument
- }
-
- stack := inet.StackFromContext(t)
- if stack == nil {
- return nil, syserr.ErrNoDevice
- }
- info, err := netfilter.GetInfo(t, stack.(*Stack).Stack, outPtr)
- if err != nil {
- return nil, err
- }
- return &info, nil
-
- case linux.IPT_SO_GET_ENTRIES:
- if outLen < linux.SizeOfIPTGetEntries {
- return nil, syserr.ErrInvalidArgument
- }
- if s.family != linux.AF_INET {
- return nil, syserr.ErrInvalidArgument
- }
-
- stack := inet.StackFromContext(t)
- if stack == nil {
- return nil, syserr.ErrNoDevice
- }
- entries, err := netfilter.GetEntries4(t, stack.(*Stack).Stack, outPtr, outLen)
- if err != nil {
- return nil, err
- }
- return &entries, nil
-
- }
- }
-
- return GetSockOpt(t, s, s.Endpoint, s.family, s.skType, level, name, outLen)
+ return GetSockOpt(t, s, s.Endpoint, s.family, s.skType, level, name, outPtr, outLen)
}
// GetSockOpt can be used to implement the linux syscall getsockopt(2) for
// sockets backed by a commonEndpoint.
-func GetSockOpt(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, family int, skType linux.SockType, level, name, outLen int) (marshal.Marshallable, *syserr.Error) {
+func GetSockOpt(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, family int, skType linux.SockType, level, name int, outPtr usermem.Addr, outLen int) (marshal.Marshallable, *syserr.Error) {
switch level {
case linux.SOL_SOCKET:
return getSockOptSocket(t, s, ep, family, skType, name, outLen)
@@ -1041,7 +1000,7 @@ func GetSockOpt(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, family in
return getSockOptIPv6(t, ep, name, outLen)
case linux.SOL_IP:
- return getSockOptIP(t, ep, name, outLen, family)
+ return getSockOptIP(t, s, ep, name, outPtr, outLen, family)
case linux.SOL_UDP,
linux.SOL_ICMPV6,
@@ -1560,7 +1519,7 @@ func getSockOptIPv6(t *kernel.Task, ep commonEndpoint, name, outLen int) (marsha
}
// getSockOptIP implements GetSockOpt when level is SOL_IP.
-func getSockOptIP(t *kernel.Task, ep commonEndpoint, name, outLen int, family int) (marshal.Marshallable, *syserr.Error) {
+func getSockOptIP(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name int, outPtr usermem.Addr, outLen int, family int) (marshal.Marshallable, *syserr.Error) {
switch name {
case linux.IP_TTL:
if outLen < sizeOfInt32 {
@@ -1676,6 +1635,46 @@ func getSockOptIP(t *kernel.Task, ep commonEndpoint, name, outLen int, family in
a, _ := ConvertAddress(linux.AF_INET, tcpip.FullAddress(v))
return a.(*linux.SockAddrInet), nil
+ case linux.IPT_SO_GET_INFO:
+ if outLen < linux.SizeOfIPTGetinfo {
+ return nil, syserr.ErrInvalidArgument
+ }
+
+ // Only valid for raw IPv4 sockets.
+ if family, skType, _ := s.Type(); family != linux.AF_INET || skType != linux.SOCK_RAW {
+ return nil, syserr.ErrProtocolNotAvailable
+ }
+
+ stack := inet.StackFromContext(t)
+ if stack == nil {
+ return nil, syserr.ErrNoDevice
+ }
+ info, err := netfilter.GetInfo(t, stack.(*Stack).Stack, outPtr)
+ if err != nil {
+ return nil, err
+ }
+ return &info, nil
+
+ case linux.IPT_SO_GET_ENTRIES:
+ if outLen < linux.SizeOfIPTGetEntries {
+ return nil, syserr.ErrInvalidArgument
+ }
+
+ // Only valid for raw IPv4 sockets.
+ if family, skType, _ := s.Type(); family != linux.AF_INET || skType != linux.SOCK_RAW {
+ return nil, syserr.ErrProtocolNotAvailable
+ }
+
+ stack := inet.StackFromContext(t)
+ if stack == nil {
+ return nil, syserr.ErrNoDevice
+ }
+ entries, err := netfilter.GetEntries4(t, stack.(*Stack).Stack, outPtr, outLen)
+ if err != nil {
+ return nil, err
+ }
+ return &entries, nil
+
default:
emitUnimplementedEventIP(t, name)
}
@@ -1709,29 +1708,6 @@ func (s *SocketOperations) SetSockOpt(t *kernel.Task, level int, name int, optVa
return nil
}
- if s.skType == linux.SOCK_RAW && level == linux.SOL_IP {
- switch name {
- case linux.IPT_SO_SET_REPLACE:
- if len(optVal) < linux.SizeOfIPTReplace {
- return syserr.ErrInvalidArgument
- }
- if s.family != linux.AF_INET {
- return syserr.ErrInvalidArgument
- }
-
- stack := inet.StackFromContext(t)
- if stack == nil {
- return syserr.ErrNoDevice
- }
- // Stack must be a netstack stack.
- return netfilter.SetEntries(stack.(*Stack).Stack, optVal)
-
- case linux.IPT_SO_SET_ADD_COUNTERS:
- // TODO(gvisor.dev/issue/170): Counter support.
- return nil
- }
- }
-
return SetSockOpt(t, s, s.Endpoint, level, name, optVal)
}
@@ -1749,7 +1725,7 @@ func SetSockOpt(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, level int
return setSockOptIPv6(t, ep, name, optVal)
case linux.SOL_IP:
- return setSockOptIP(t, ep, name, optVal)
+ return setSockOptIP(t, s, ep, name, optVal)
case linux.SOL_UDP,
linux.SOL_ICMPV6,
@@ -2160,7 +2136,7 @@ func parseIntOrChar(buf []byte) (int32, *syserr.Error) {
}
// setSockOptIP implements SetSockOpt when level is SOL_IP.
-func setSockOptIP(t *kernel.Task, ep commonEndpoint, name int, optVal []byte) *syserr.Error {
+func setSockOptIP(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name int, optVal []byte) *syserr.Error {
switch name {
case linux.IP_MULTICAST_TTL:
v, err := parseIntOrChar(optVal)
@@ -2280,6 +2256,27 @@ func setSockOptIP(t *kernel.Task, ep commonEndpoint, name int, optVal []byte) *s
}
return syserr.TranslateNetstackError(ep.SetSockOptBool(tcpip.IPHdrIncludedOption, v != 0))
+ case linux.IPT_SO_SET_REPLACE:
+ if len(optVal) < linux.SizeOfIPTReplace {
+ return syserr.ErrInvalidArgument
+ }
+
+ // Only valid for raw IPv4 sockets.
+ if family, skType, _ := s.Type(); family != linux.AF_INET || skType != linux.SOCK_RAW {
+ return syserr.ErrProtocolNotAvailable
+ }
+
+ stack := inet.StackFromContext(t)
+ if stack == nil {
+ return syserr.ErrNoDevice
+ }
+ // Stack must be a netstack stack.
+ return netfilter.SetEntries(stack.(*Stack).Stack, optVal)
+
+ case linux.IPT_SO_SET_ADD_COUNTERS:
+ // TODO(gvisor.dev/issue/170): Counter support.
+ return nil
+
case linux.IP_ADD_SOURCE_MEMBERSHIP,
linux.IP_BIND_ADDRESS_NO_PORT,
linux.IP_BLOCK_SOURCE,
diff --git a/pkg/sentry/socket/netstack/netstack_vfs2.go b/pkg/sentry/socket/netstack/netstack_vfs2.go
index 1db8ae491..59fa4c58f 100644
--- a/pkg/sentry/socket/netstack/netstack_vfs2.go
+++ b/pkg/sentry/socket/netstack/netstack_vfs2.go
@@ -21,10 +21,8 @@ import (
"gvisor.dev/gvisor/pkg/sentry/arch"
fslock "gvisor.dev/gvisor/pkg/sentry/fs/lock"
"gvisor.dev/gvisor/pkg/sentry/fsimpl/sockfs"
- "gvisor.dev/gvisor/pkg/sentry/inet"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/socket"
- "gvisor.dev/gvisor/pkg/sentry/socket/netfilter"
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/syserr"
"gvisor.dev/gvisor/pkg/syserror"
@@ -233,48 +231,7 @@ func (s *SocketVFS2) GetSockOpt(t *kernel.Task, level, name int, outPtr usermem.
return &val, nil
}
- if s.skType == linux.SOCK_RAW && level == linux.IPPROTO_IP {
- switch name {
- case linux.IPT_SO_GET_INFO:
- if outLen < linux.SizeOfIPTGetinfo {
- return nil, syserr.ErrInvalidArgument
- }
- if s.family != linux.AF_INET {
- return nil, syserr.ErrInvalidArgument
- }
-
- stack := inet.StackFromContext(t)
- if stack == nil {
- return nil, syserr.ErrNoDevice
- }
- info, err := netfilter.GetInfo(t, stack.(*Stack).Stack, outPtr)
- if err != nil {
- return nil, err
- }
- return &info, nil
-
- case linux.IPT_SO_GET_ENTRIES:
- if outLen < linux.SizeOfIPTGetEntries {
- return nil, syserr.ErrInvalidArgument
- }
- if s.family != linux.AF_INET {
- return nil, syserr.ErrInvalidArgument
- }
-
- stack := inet.StackFromContext(t)
- if stack == nil {
- return nil, syserr.ErrNoDevice
- }
- entries, err := netfilter.GetEntries4(t, stack.(*Stack).Stack, outPtr, outLen)
- if err != nil {
- return nil, err
- }
- return &entries, nil
-
- }
- }
-
- return GetSockOpt(t, s, s.Endpoint, s.family, s.skType, level, name, outLen)
+ return GetSockOpt(t, s, s.Endpoint, s.family, s.skType, level, name, outPtr, outLen)
}
// SetSockOpt implements the linux syscall setsockopt(2) for sockets backed by
@@ -304,29 +261,6 @@ func (s *SocketVFS2) SetSockOpt(t *kernel.Task, level int, name int, optVal []by
return nil
}
- if s.skType == linux.SOCK_RAW && level == linux.SOL_IP {
- switch name {
- case linux.IPT_SO_SET_REPLACE:
- if len(optVal) < linux.SizeOfIPTReplace {
- return syserr.ErrInvalidArgument
- }
- if s.family != linux.AF_INET {
- return syserr.ErrInvalidArgument
- }
-
- stack := inet.StackFromContext(t)
- if stack == nil {
- return syserr.ErrNoDevice
- }
- // Stack must be a netstack stack.
- return netfilter.SetEntries(stack.(*Stack).Stack, optVal)
-
- case linux.IPT_SO_SET_ADD_COUNTERS:
- // TODO(gvisor.dev/issue/170): Counter support.
- return nil
- }
- }
-
return SetSockOpt(t, s, s.Endpoint, level, name, optVal)
}
diff --git a/pkg/sentry/socket/unix/socket_refs.go b/pkg/sentry/socket/unix/socket_refs.go
index 6ed7b1151..4c6ec186b 100644
--- a/pkg/sentry/socket/unix/socket_refs.go
+++ b/pkg/sentry/socket/unix/socket_refs.go
@@ -1,11 +1,11 @@
package unix
import (
+ "runtime"
"sync/atomic"
"gvisor.dev/gvisor/pkg/log"
refs_vfs1 "gvisor.dev/gvisor/pkg/refs"
- "runtime"
)
// ownerType is used to customize logging. Note that we use a pointer to T so
diff --git a/pkg/sentry/socket/unix/unix.go b/pkg/sentry/socket/unix/unix.go
index b7e8e4325..0a7a26495 100644
--- a/pkg/sentry/socket/unix/unix.go
+++ b/pkg/sentry/socket/unix/unix.go
@@ -194,7 +194,7 @@ func (s *SocketOperations) Ioctl(ctx context.Context, _ *fs.File, io usermem.IO,
// GetSockOpt implements the linux syscall getsockopt(2) for sockets backed by
// a transport.Endpoint.
func (s *SocketOperations) GetSockOpt(t *kernel.Task, level, name int, outPtr usermem.Addr, outLen int) (marshal.Marshallable, *syserr.Error) {
- return netstack.GetSockOpt(t, s, s.ep, linux.AF_UNIX, s.ep.Type(), level, name, outLen)
+ return netstack.GetSockOpt(t, s, s.ep, linux.AF_UNIX, s.ep.Type(), level, name, outPtr, outLen)
}
// Listen implements the linux syscall listen(2) for sockets backed by
diff --git a/pkg/sentry/socket/unix/unix_vfs2.go b/pkg/sentry/socket/unix/unix_vfs2.go
index d066ef8ab..65a285b8f 100644
--- a/pkg/sentry/socket/unix/unix_vfs2.go
+++ b/pkg/sentry/socket/unix/unix_vfs2.go
@@ -91,7 +91,7 @@ func NewFileDescription(ep transport.Endpoint, stype linux.SockType, flags uint3
// GetSockOpt implements the linux syscall getsockopt(2) for sockets backed by
// a transport.Endpoint.
func (s *SocketVFS2) GetSockOpt(t *kernel.Task, level, name int, outPtr usermem.Addr, outLen int) (marshal.Marshallable, *syserr.Error) {
- return netstack.GetSockOpt(t, s, s.ep, linux.AF_UNIX, s.ep.Type(), level, name, outLen)
+ return netstack.GetSockOpt(t, s, s.ep, linux.AF_UNIX, s.ep.Type(), level, name, outPtr, outLen)
}
// blockingAccept implements a blocking version of accept(2), that is, if no