summaryrefslogtreecommitdiffhomepage
path: root/pkg/sentry/syscalls
diff options
context:
space:
mode:
authorgVisor bot <gvisor-bot@google.com>2021-03-29 20:35:44 +0000
committergVisor bot <gvisor-bot@google.com>2021-03-29 20:35:44 +0000
commit08cc017c088017546ed712cce700bf4374c864c0 (patch)
treeaf024e69d8855f4f867ef435ced35532b368a981 /pkg/sentry/syscalls
parent6a422755602daeaef4be60969c1acddc8b7b3041 (diff)
parent8a2f7e716dcc62f04d2808e8ade34941c94fc956 (diff)
Merge release-20210322.0-29-g8a2f7e716 (automated)
Diffstat (limited to 'pkg/sentry/syscalls')
-rw-r--r--pkg/sentry/syscalls/linux/linux64.go6
-rw-r--r--pkg/sentry/syscalls/linux/linux_abi_autogen_unsafe.go108
-rw-r--r--pkg/sentry/syscalls/linux/sigset.go16
-rw-r--r--pkg/sentry/syscalls/linux/sys_aio.go17
-rw-r--r--pkg/sentry/syscalls/linux/sys_file.go34
-rw-r--r--pkg/sentry/syscalls/linux/sys_futex.go12
-rw-r--r--pkg/sentry/syscalls/linux/sys_getdents.go3
-rw-r--r--pkg/sentry/syscalls/linux/sys_mempolicy.go15
-rw-r--r--pkg/sentry/syscalls/linux/sys_mmap.go15
-rw-r--r--pkg/sentry/syscalls/linux/sys_mount.go7
-rw-r--r--pkg/sentry/syscalls/linux/sys_pipe.go4
-rw-r--r--pkg/sentry/syscalls/linux/sys_poll.go18
-rw-r--r--pkg/sentry/syscalls/linux/sys_random.go4
-rw-r--r--pkg/sentry/syscalls/linux/sys_rlimit.go6
-rw-r--r--pkg/sentry/syscalls/linux/sys_seccomp.go10
-rw-r--r--pkg/sentry/syscalls/linux/sys_sem.go6
-rw-r--r--pkg/sentry/syscalls/linux/sys_signal.go4
-rw-r--r--pkg/sentry/syscalls/linux/sys_socket.go29
-rw-r--r--pkg/sentry/syscalls/linux/sys_stat.go10
-rw-r--r--pkg/sentry/syscalls/linux/sys_thread.go9
-rw-r--r--pkg/sentry/syscalls/linux/sys_time.go12
-rw-r--r--pkg/sentry/syscalls/linux/sys_xattr.go12
-rw-r--r--pkg/sentry/syscalls/linux/timespec.go28
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/aio.go16
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/execve.go5
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/filesystem.go19
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/getdents.go21
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/mmap.go7
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/mount.go9
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/path.go5
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/pipe.go5
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/poll.go25
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/setstat.go11
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/signal.go5
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/socket.go30
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/stat.go9
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/vfs2_abi_autogen_unsafe.go60
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/xattr.go11
38 files changed, 324 insertions, 299 deletions
diff --git a/pkg/sentry/syscalls/linux/linux64.go b/pkg/sentry/syscalls/linux/linux64.go
index ac53a0c0e..2d2212605 100644
--- a/pkg/sentry/syscalls/linux/linux64.go
+++ b/pkg/sentry/syscalls/linux/linux64.go
@@ -18,11 +18,11 @@ package linux
import (
"gvisor.dev/gvisor/pkg/abi"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/syscalls"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
)
const (
@@ -405,7 +405,7 @@ var AMD64 = &kernel.SyscallTable{
434: syscalls.ErrorWithEvent("pidfd_open", syserror.ENOSYS, "", nil),
435: syscalls.ErrorWithEvent("clone3", syserror.ENOSYS, "", nil),
},
- Emulate: map[usermem.Addr]uintptr{
+ Emulate: map[hostarch.Addr]uintptr{
0xffffffffff600000: 96, // vsyscall gettimeofday(2)
0xffffffffff600400: 201, // vsyscall time(2)
0xffffffffff600800: 309, // vsyscall getcpu(2)
@@ -723,7 +723,7 @@ var ARM64 = &kernel.SyscallTable{
434: syscalls.ErrorWithEvent("pidfd_open", syserror.ENOSYS, "", nil),
435: syscalls.ErrorWithEvent("clone3", syserror.ENOSYS, "", nil),
},
- Emulate: map[usermem.Addr]uintptr{},
+ Emulate: map[hostarch.Addr]uintptr{},
Missing: func(t *kernel.Task, sysno uintptr, args arch.SyscallArguments) (uintptr, error) {
t.Kernel().EmitUnimplementedEvent(t)
return 0, syserror.ENOSYS
diff --git a/pkg/sentry/syscalls/linux/linux_abi_autogen_unsafe.go b/pkg/sentry/syscalls/linux/linux_abi_autogen_unsafe.go
index bd13e39f2..0eb2e184b 100644
--- a/pkg/sentry/syscalls/linux/linux_abi_autogen_unsafe.go
+++ b/pkg/sentry/syscalls/linux/linux_abi_autogen_unsafe.go
@@ -10,9 +10,9 @@ package linux
import (
"gvisor.dev/gvisor/pkg/gohacks"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal"
"gvisor.dev/gvisor/pkg/safecopy"
- "gvisor.dev/gvisor/pkg/usermem"
"io"
"reflect"
"runtime"
@@ -70,7 +70,7 @@ func (d *direntHdr) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (d *direntHdr) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (d *direntHdr) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Type direntHdr doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := cc.CopyScratchBuffer(d.SizeBytes()) // escapes: okay.
d.MarshalBytes(buf) // escapes: fallback.
@@ -79,13 +79,13 @@ func (d *direntHdr) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit in
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (d *direntHdr) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (d *direntHdr) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return d.CopyOutN(cc, addr, d.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (d *direntHdr) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (d *direntHdr) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Type direntHdr doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := cc.CopyScratchBuffer(d.SizeBytes()) // escapes: okay.
length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
@@ -111,21 +111,21 @@ func (o *oldDirentHdr) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (o *oldDirentHdr) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(o.Ino))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(o.Ino))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(o.Off))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(o.Off))
dst = dst[8:]
- usermem.ByteOrder.PutUint16(dst[:2], uint16(o.Reclen))
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(o.Reclen))
dst = dst[2:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (o *oldDirentHdr) UnmarshalBytes(src []byte) {
- o.Ino = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ o.Ino = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- o.Off = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ o.Off = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- o.Reclen = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ o.Reclen = uint16(hostarch.ByteOrder.Uint16(src[:2]))
src = src[2:]
}
@@ -149,7 +149,7 @@ func (o *oldDirentHdr) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (o *oldDirentHdr) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (o *oldDirentHdr) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Type oldDirentHdr doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := cc.CopyScratchBuffer(o.SizeBytes()) // escapes: okay.
o.MarshalBytes(buf) // escapes: fallback.
@@ -158,13 +158,13 @@ func (o *oldDirentHdr) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (o *oldDirentHdr) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (o *oldDirentHdr) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return o.CopyOutN(cc, addr, o.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (o *oldDirentHdr) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (o *oldDirentHdr) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Type oldDirentHdr doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := cc.CopyScratchBuffer(o.SizeBytes()) // escapes: okay.
length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
@@ -190,17 +190,17 @@ func (r *rlimit64) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (r *rlimit64) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(r.Cur))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(r.Cur))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(r.Max))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(r.Max))
dst = dst[8:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (r *rlimit64) UnmarshalBytes(src []byte) {
- r.Cur = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ r.Cur = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- r.Max = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ r.Max = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
}
@@ -222,7 +222,7 @@ func (r *rlimit64) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (r *rlimit64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (r *rlimit64) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -239,13 +239,13 @@ func (r *rlimit64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (r *rlimit64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (r *rlimit64) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return r.CopyOutN(cc, addr, r.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (r *rlimit64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (r *rlimit64) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -283,13 +283,13 @@ func (s *SchedParam) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (s *SchedParam) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.schedPriority))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.schedPriority))
dst = dst[4:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (s *SchedParam) UnmarshalBytes(src []byte) {
- s.schedPriority = int32(usermem.ByteOrder.Uint32(src[:4]))
+ s.schedPriority = int32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
}
@@ -311,7 +311,7 @@ func (s *SchedParam) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (s *SchedParam) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (s *SchedParam) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -328,13 +328,13 @@ func (s *SchedParam) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit i
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (s *SchedParam) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *SchedParam) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return s.CopyOutN(cc, addr, s.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (s *SchedParam) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *SchedParam) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -373,21 +373,21 @@ func (u *userSockFprog) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (u *userSockFprog) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint16(dst[:2], uint16(u.Len))
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(u.Len))
dst = dst[2:]
// Padding: dst[:sizeof(byte)*6] ~= [6]byte{0}
dst = dst[1*(6):]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(u.Filter))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(u.Filter))
dst = dst[8:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (u *userSockFprog) UnmarshalBytes(src []byte) {
- u.Len = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ u.Len = uint16(hostarch.ByteOrder.Uint16(src[:2]))
src = src[2:]
// Padding: ~ copy([6]byte(u._), src[:sizeof(byte)*6])
src = src[1*(6):]
- u.Filter = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ u.Filter = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
}
@@ -409,7 +409,7 @@ func (u *userSockFprog) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (u *userSockFprog) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (u *userSockFprog) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -426,13 +426,13 @@ func (u *userSockFprog) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limi
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (u *userSockFprog) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (u *userSockFprog) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return u.CopyOutN(cc, addr, u.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (u *userSockFprog) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (u *userSockFprog) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -470,21 +470,21 @@ func (m *MessageHeader64) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (m *MessageHeader64) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(m.Name))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(m.Name))
dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(m.NameLen))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(m.NameLen))
dst = dst[4:]
// Padding: dst[:sizeof(uint32)] ~= uint32(0)
dst = dst[4:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(m.Iov))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(m.Iov))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(m.IovLen))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(m.IovLen))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(m.Control))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(m.Control))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(m.ControlLen))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(m.ControlLen))
dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(m.Flags))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(m.Flags))
dst = dst[4:]
// Padding: dst[:sizeof(int32)] ~= int32(0)
dst = dst[4:]
@@ -492,21 +492,21 @@ func (m *MessageHeader64) MarshalBytes(dst []byte) {
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (m *MessageHeader64) UnmarshalBytes(src []byte) {
- m.Name = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ m.Name = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- m.NameLen = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ m.NameLen = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
// Padding: var _ uint32 ~= src[:sizeof(uint32)]
src = src[4:]
- m.Iov = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ m.Iov = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- m.IovLen = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ m.IovLen = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- m.Control = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ m.Control = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- m.ControlLen = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ m.ControlLen = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- m.Flags = int32(usermem.ByteOrder.Uint32(src[:4]))
+ m.Flags = int32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
// Padding: var _ int32 ~= src[:sizeof(int32)]
src = src[4:]
@@ -530,7 +530,7 @@ func (m *MessageHeader64) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (m *MessageHeader64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (m *MessageHeader64) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -547,13 +547,13 @@ func (m *MessageHeader64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, li
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (m *MessageHeader64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (m *MessageHeader64) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return m.CopyOutN(cc, addr, m.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (m *MessageHeader64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (m *MessageHeader64) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -594,7 +594,7 @@ func (m *multipleMessageHeader64) SizeBytes() int {
func (m *multipleMessageHeader64) MarshalBytes(dst []byte) {
m.msgHdr.MarshalBytes(dst[:m.msgHdr.SizeBytes()])
dst = dst[m.msgHdr.SizeBytes():]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(m.msgLen))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(m.msgLen))
dst = dst[4:]
// Padding: dst[:sizeof(int32)] ~= int32(0)
dst = dst[4:]
@@ -604,7 +604,7 @@ func (m *multipleMessageHeader64) MarshalBytes(dst []byte) {
func (m *multipleMessageHeader64) UnmarshalBytes(src []byte) {
m.msgHdr.UnmarshalBytes(src[:m.msgHdr.SizeBytes()])
src = src[m.msgHdr.SizeBytes():]
- m.msgLen = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ m.msgLen = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
// Padding: var _ int32 ~= src[:sizeof(int32)]
src = src[4:]
@@ -638,7 +638,7 @@ func (m *multipleMessageHeader64) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (m *multipleMessageHeader64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (m *multipleMessageHeader64) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
if !m.msgHdr.Packed() {
// Type multipleMessageHeader64 doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := cc.CopyScratchBuffer(m.SizeBytes()) // escapes: okay.
@@ -662,13 +662,13 @@ func (m *multipleMessageHeader64) CopyOutN(cc marshal.CopyContext, addr usermem.
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (m *multipleMessageHeader64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (m *multipleMessageHeader64) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return m.CopyOutN(cc, addr, m.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (m *multipleMessageHeader64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (m *multipleMessageHeader64) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
if !m.msgHdr.Packed() {
// Type multipleMessageHeader64 doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := cc.CopyScratchBuffer(m.SizeBytes()) // escapes: okay.
diff --git a/pkg/sentry/syscalls/linux/sigset.go b/pkg/sentry/syscalls/linux/sigset.go
index 434559b80..e8c2d8f9e 100644
--- a/pkg/sentry/syscalls/linux/sigset.go
+++ b/pkg/sentry/syscalls/linux/sigset.go
@@ -16,9 +16,9 @@ package linux
import (
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
)
// CopyInSigSet copies in a sigset_t, checks its size, and ensures that KILL and
@@ -27,7 +27,7 @@ import (
// TODO(gvisor.dev/issue/1624): This is only exported because
// syscalls/vfs2/signal.go depends on it. Once vfs1 is deleted and the vfs2
// syscalls are moved into this package, then they can be unexported.
-func CopyInSigSet(t *kernel.Task, sigSetAddr usermem.Addr, size uint) (linux.SignalSet, error) {
+func CopyInSigSet(t *kernel.Task, sigSetAddr hostarch.Addr, size uint) (linux.SignalSet, error) {
if size != linux.SignalSetSize {
return 0, syserror.EINVAL
}
@@ -35,14 +35,14 @@ func CopyInSigSet(t *kernel.Task, sigSetAddr usermem.Addr, size uint) (linux.Sig
if _, err := t.CopyInBytes(sigSetAddr, b); err != nil {
return 0, err
}
- mask := usermem.ByteOrder.Uint64(b[:])
+ mask := hostarch.ByteOrder.Uint64(b[:])
return linux.SignalSet(mask) &^ kernel.UnblockableSignals, nil
}
// copyOutSigSet copies out a sigset_t.
-func copyOutSigSet(t *kernel.Task, sigSetAddr usermem.Addr, mask linux.SignalSet) error {
+func copyOutSigSet(t *kernel.Task, sigSetAddr hostarch.Addr, mask linux.SignalSet) error {
b := t.CopyScratchBuffer(8)
- usermem.ByteOrder.PutUint64(b, uint64(mask))
+ hostarch.ByteOrder.PutUint64(b, uint64(mask))
_, err := t.CopyOutBytes(sigSetAddr, b)
return err
}
@@ -55,15 +55,15 @@ func copyOutSigSet(t *kernel.Task, sigSetAddr usermem.Addr, mask linux.SignalSet
// };
//
// and returns sigset_addr and size.
-func copyInSigSetWithSize(t *kernel.Task, addr usermem.Addr) (usermem.Addr, uint, error) {
+func copyInSigSetWithSize(t *kernel.Task, addr hostarch.Addr) (hostarch.Addr, uint, error) {
switch t.Arch().Width() {
case 8:
in := t.CopyScratchBuffer(16)
if _, err := t.CopyInBytes(addr, in); err != nil {
return 0, 0, err
}
- maskAddr := usermem.Addr(usermem.ByteOrder.Uint64(in[0:]))
- maskSize := uint(usermem.ByteOrder.Uint64(in[8:]))
+ maskAddr := hostarch.Addr(hostarch.ByteOrder.Uint64(in[0:]))
+ maskSize := uint(hostarch.ByteOrder.Uint64(in[8:]))
return maskAddr, maskSize, nil
default:
return 0, 0, syserror.ENOSYS
diff --git a/pkg/sentry/syscalls/linux/sys_aio.go b/pkg/sentry/syscalls/linux/sys_aio.go
index c2285f796..70e8569a8 100644
--- a/pkg/sentry/syscalls/linux/sys_aio.go
+++ b/pkg/sentry/syscalls/linux/sys_aio.go
@@ -17,6 +17,7 @@ package linux
import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal/primitive"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fs"
@@ -152,7 +153,7 @@ func IoGetevents(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.S
}
// Keep rolling.
- eventsAddr += usermem.Addr(linux.IOEventSize)
+ eventsAddr += hostarch.Addr(linux.IOEventSize)
}
// Everything finished.
@@ -191,12 +192,12 @@ func memoryFor(t *kernel.Task, cb *linux.IOCallback) (usermem.IOSequence, error)
// I/O.
switch cb.OpCode {
case linux.IOCB_CMD_PREAD, linux.IOCB_CMD_PWRITE:
- return t.SingleIOSequence(usermem.Addr(cb.Buf), bytes, usermem.IOOpts{
+ return t.SingleIOSequence(hostarch.Addr(cb.Buf), bytes, usermem.IOOpts{
AddressSpaceActive: false,
})
case linux.IOCB_CMD_PREADV, linux.IOCB_CMD_PWRITEV:
- return t.IovecsIOSequence(usermem.Addr(cb.Buf), bytes, usermem.IOOpts{
+ return t.IovecsIOSequence(hostarch.Addr(cb.Buf), bytes, usermem.IOOpts{
AddressSpaceActive: false,
})
@@ -219,7 +220,7 @@ func IoCancel(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
// LINT.IfChange
-func getAIOCallback(t *kernel.Task, file *fs.File, cbAddr usermem.Addr, cb *linux.IOCallback, ioseq usermem.IOSequence, actx *mm.AIOContext, eventFile *fs.File) kernel.AIOCallback {
+func getAIOCallback(t *kernel.Task, file *fs.File, cbAddr hostarch.Addr, cb *linux.IOCallback, ioseq usermem.IOSequence, actx *mm.AIOContext, eventFile *fs.File) kernel.AIOCallback {
return func(ctx context.Context) {
if actx.Dead() {
actx.CancelPendingRequest()
@@ -264,7 +265,7 @@ func getAIOCallback(t *kernel.Task, file *fs.File, cbAddr usermem.Addr, cb *linu
}
// submitCallback processes a single callback.
-func submitCallback(t *kernel.Task, id uint64, cb *linux.IOCallback, cbAddr usermem.Addr) error {
+func submitCallback(t *kernel.Task, id uint64, cb *linux.IOCallback, cbAddr hostarch.Addr) error {
file := t.GetFile(cb.FD)
if file == nil {
// File not found.
@@ -339,7 +340,7 @@ func IoSubmit(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
for i := int32(0); i < nrEvents; i++ {
// Copy in the callback address.
- var cbAddr usermem.Addr
+ var cbAddr hostarch.Addr
switch t.Arch().Width() {
case 8:
var cbAddrP primitive.Uint64
@@ -351,7 +352,7 @@ func IoSubmit(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
// Nothing done.
return 0, nil, err
}
- cbAddr = usermem.Addr(cbAddrP)
+ cbAddr = hostarch.Addr(cbAddrP)
default:
return 0, nil, syserror.ENOSYS
}
@@ -379,7 +380,7 @@ func IoSubmit(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
}
// Advance to the next one.
- addr += usermem.Addr(t.Arch().Width())
+ addr += hostarch.Addr(t.Arch().Width())
}
return uintptr(nrEvents), nil, nil
diff --git a/pkg/sentry/syscalls/linux/sys_file.go b/pkg/sentry/syscalls/linux/sys_file.go
index fd9649340..9cd238efd 100644
--- a/pkg/sentry/syscalls/linux/sys_file.go
+++ b/pkg/sentry/syscalls/linux/sys_file.go
@@ -18,6 +18,7 @@ import (
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal/primitive"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fs"
@@ -29,7 +30,6 @@ import (
ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time"
"gvisor.dev/gvisor/pkg/sentry/limits"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
)
// fileOpAt performs an operation on the second last component in the path.
@@ -115,7 +115,7 @@ func fileOpOn(t *kernel.Task, dirFD int32, path string, resolve bool, fn func(ro
}
// copyInPath copies a path in.
-func copyInPath(t *kernel.Task, addr usermem.Addr, allowEmpty bool) (path string, dirPath bool, err error) {
+func copyInPath(t *kernel.Task, addr hostarch.Addr, allowEmpty bool) (path string, dirPath bool, err error) {
path, err = t.CopyInString(addr, linux.PATH_MAX)
if err != nil {
return "", false, err
@@ -133,7 +133,7 @@ func copyInPath(t *kernel.Task, addr usermem.Addr, allowEmpty bool) (path string
// LINT.IfChange
-func openAt(t *kernel.Task, dirFD int32, addr usermem.Addr, flags uint) (fd uintptr, err error) {
+func openAt(t *kernel.Task, dirFD int32, addr hostarch.Addr, flags uint) (fd uintptr, err error) {
path, dirPath, err := copyInPath(t, addr, false /* allowEmpty */)
if err != nil {
return 0, err
@@ -208,7 +208,7 @@ func openAt(t *kernel.Task, dirFD int32, addr usermem.Addr, flags uint) (fd uint
return fd, err // Use result in frame.
}
-func mknodAt(t *kernel.Task, dirFD int32, addr usermem.Addr, mode linux.FileMode) error {
+func mknodAt(t *kernel.Task, dirFD int32, addr hostarch.Addr, mode linux.FileMode) error {
path, dirPath, err := copyInPath(t, addr, false /* allowEmpty */)
if err != nil {
return err
@@ -301,7 +301,7 @@ func Mknodat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca
return 0, nil, mknodAt(t, dirFD, path, mode)
}
-func createAt(t *kernel.Task, dirFD int32, addr usermem.Addr, flags uint, mode linux.FileMode) (fd uintptr, err error) {
+func createAt(t *kernel.Task, dirFD int32, addr hostarch.Addr, flags uint, mode linux.FileMode) (fd uintptr, err error) {
path, dirPath, err := copyInPath(t, addr, false /* allowEmpty */)
if err != nil {
return 0, err
@@ -515,7 +515,7 @@ func (ac accessContext) Value(key interface{}) interface{} {
}
}
-func accessAt(t *kernel.Task, dirFD int32, addr usermem.Addr, mode uint) error {
+func accessAt(t *kernel.Task, dirFD int32, addr hostarch.Addr, mode uint) error {
const rOK = 4
const wOK = 2
const xOK = 1
@@ -694,7 +694,7 @@ func Getcwd(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
}
// Top it off with a terminator.
- _, err = t.CopyOutBytes(addr+usermem.Addr(bytes), []byte("\x00"))
+ _, err = t.CopyOutBytes(addr+hostarch.Addr(bytes), []byte("\x00"))
return uintptr(bytes + 1), nil, err
}
@@ -1164,7 +1164,7 @@ func Fadvise64(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys
// LINT.IfChange
-func mkdirAt(t *kernel.Task, dirFD int32, addr usermem.Addr, mode linux.FileMode) error {
+func mkdirAt(t *kernel.Task, dirFD int32, addr hostarch.Addr, mode linux.FileMode) error {
path, _, err := copyInPath(t, addr, false /* allowEmpty */)
if err != nil {
return err
@@ -1216,7 +1216,7 @@ func Mkdirat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca
return 0, nil, mkdirAt(t, dirFD, addr, mode)
}
-func rmdirAt(t *kernel.Task, dirFD int32, addr usermem.Addr) error {
+func rmdirAt(t *kernel.Task, dirFD int32, addr hostarch.Addr) error {
path, _, err := copyInPath(t, addr, false /* allowEmpty */)
if err != nil {
return err
@@ -1256,7 +1256,7 @@ func Rmdir(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
return 0, nil, rmdirAt(t, linux.AT_FDCWD, addr)
}
-func symlinkAt(t *kernel.Task, dirFD int32, newAddr usermem.Addr, oldAddr usermem.Addr) error {
+func symlinkAt(t *kernel.Task, dirFD int32, newAddr hostarch.Addr, oldAddr hostarch.Addr) error {
newPath, dirPath, err := copyInPath(t, newAddr, false /* allowEmpty */)
if err != nil {
return err
@@ -1341,7 +1341,7 @@ func mayLinkAt(t *kernel.Task, target *fs.Inode) error {
// linkAt creates a hard link to the target specified by oldDirFD and oldAddr,
// specified by newDirFD and newAddr. If resolve is true, then the symlinks
// will be followed when evaluating the target.
-func linkAt(t *kernel.Task, oldDirFD int32, oldAddr usermem.Addr, newDirFD int32, newAddr usermem.Addr, resolve, allowEmpty bool) error {
+func linkAt(t *kernel.Task, oldDirFD int32, oldAddr hostarch.Addr, newDirFD int32, newAddr hostarch.Addr, resolve, allowEmpty bool) error {
oldPath, _, err := copyInPath(t, oldAddr, allowEmpty)
if err != nil {
return err
@@ -1448,7 +1448,7 @@ func Linkat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
// LINT.IfChange
-func readlinkAt(t *kernel.Task, dirFD int32, addr usermem.Addr, bufAddr usermem.Addr, size uint) (copied uintptr, err error) {
+func readlinkAt(t *kernel.Task, dirFD int32, addr hostarch.Addr, bufAddr hostarch.Addr, size uint) (copied uintptr, err error) {
path, dirPath, err := copyInPath(t, addr, false /* allowEmpty */)
if err != nil {
return 0, err
@@ -1511,7 +1511,7 @@ func Readlinkat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sy
// LINT.IfChange
-func unlinkAt(t *kernel.Task, dirFD int32, addr usermem.Addr) error {
+func unlinkAt(t *kernel.Task, dirFD int32, addr hostarch.Addr) error {
path, dirPath, err := copyInPath(t, addr, false /* allowEmpty */)
if err != nil {
return err
@@ -1728,7 +1728,7 @@ func chown(t *kernel.Task, d *fs.Dirent, uid auth.UID, gid auth.GID) error {
return nil
}
-func chownAt(t *kernel.Task, fd int32, addr usermem.Addr, resolve, allowEmpty bool, uid auth.UID, gid auth.GID) error {
+func chownAt(t *kernel.Task, fd int32, addr hostarch.Addr, resolve, allowEmpty bool, uid auth.UID, gid auth.GID) error {
path, _, err := copyInPath(t, addr, allowEmpty)
if err != nil {
return err
@@ -1815,7 +1815,7 @@ func chmod(t *kernel.Task, d *fs.Dirent, mode linux.FileMode) error {
return nil
}
-func chmodAt(t *kernel.Task, fd int32, addr usermem.Addr, mode linux.FileMode) error {
+func chmodAt(t *kernel.Task, fd int32, addr hostarch.Addr, mode linux.FileMode) error {
path, _, err := copyInPath(t, addr, false /* allowEmpty */)
if err != nil {
return err
@@ -1866,7 +1866,7 @@ func defaultSetToSystemTimeSpec() fs.TimeSpec {
}
}
-func utimes(t *kernel.Task, dirFD int32, addr usermem.Addr, ts fs.TimeSpec, resolve bool) error {
+func utimes(t *kernel.Task, dirFD int32, addr hostarch.Addr, ts fs.TimeSpec, resolve bool) error {
setTimestamp := func(root *fs.Dirent, d *fs.Dirent, _ uint) error {
// Does the task own the file?
if !d.Inode.CheckOwnership(t) {
@@ -2030,7 +2030,7 @@ func Futimesat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys
// LINT.IfChange
-func renameAt(t *kernel.Task, oldDirFD int32, oldAddr usermem.Addr, newDirFD int32, newAddr usermem.Addr) error {
+func renameAt(t *kernel.Task, oldDirFD int32, oldAddr hostarch.Addr, newDirFD int32, newAddr hostarch.Addr) error {
newPath, _, err := copyInPath(t, newAddr, false /* allowEmpty */)
if err != nil {
return err
diff --git a/pkg/sentry/syscalls/linux/sys_futex.go b/pkg/sentry/syscalls/linux/sys_futex.go
index f39ce0639..eeea1613b 100644
--- a/pkg/sentry/syscalls/linux/sys_futex.go
+++ b/pkg/sentry/syscalls/linux/sys_futex.go
@@ -18,11 +18,11 @@ import (
"time"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel"
ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
)
// futexWaitRestartBlock encapsulates the state required to restart futex(2)
@@ -41,7 +41,7 @@ type futexWaitRestartBlock struct {
// Restart implements kernel.SyscallRestartBlock.Restart.
func (f *futexWaitRestartBlock) Restart(t *kernel.Task) (uintptr, error) {
- return futexWaitDuration(t, f.duration, false, usermem.Addr(f.addr), f.private, f.val, f.mask)
+ return futexWaitDuration(t, f.duration, false, hostarch.Addr(f.addr), f.private, f.val, f.mask)
}
// futexWaitAbsolute performs a FUTEX_WAIT_BITSET, blocking until the wait is
@@ -51,7 +51,7 @@ func (f *futexWaitRestartBlock) Restart(t *kernel.Task) (uintptr, error) {
//
// If blocking is interrupted, the syscall is restarted with the original
// arguments.
-func futexWaitAbsolute(t *kernel.Task, clockRealtime bool, ts linux.Timespec, forever bool, addr usermem.Addr, private bool, val, mask uint32) (uintptr, error) {
+func futexWaitAbsolute(t *kernel.Task, clockRealtime bool, ts linux.Timespec, forever bool, addr hostarch.Addr, private bool, val, mask uint32) (uintptr, error) {
w := t.FutexWaiter()
err := t.Futex().WaitPrepare(w, t, addr, private, val, mask)
if err != nil {
@@ -87,7 +87,7 @@ func futexWaitAbsolute(t *kernel.Task, clockRealtime bool, ts linux.Timespec, fo
// syscall. If forever is true, the syscall is restarted with the original
// arguments. If forever is false, duration is a relative timeout and the
// syscall is restarted with the remaining timeout.
-func futexWaitDuration(t *kernel.Task, duration time.Duration, forever bool, addr usermem.Addr, private bool, val, mask uint32) (uintptr, error) {
+func futexWaitDuration(t *kernel.Task, duration time.Duration, forever bool, addr hostarch.Addr, private bool, val, mask uint32) (uintptr, error) {
w := t.FutexWaiter()
err := t.Futex().WaitPrepare(w, t, addr, private, val, mask)
if err != nil {
@@ -124,7 +124,7 @@ func futexWaitDuration(t *kernel.Task, duration time.Duration, forever bool, add
return 0, syserror.ERESTART_RESTARTBLOCK
}
-func futexLockPI(t *kernel.Task, ts linux.Timespec, forever bool, addr usermem.Addr, private bool) error {
+func futexLockPI(t *kernel.Task, ts linux.Timespec, forever bool, addr hostarch.Addr, private bool) error {
w := t.FutexWaiter()
locked, err := t.Futex().LockPI(w, t, addr, uint32(t.ThreadID()), private, false)
if err != nil {
@@ -152,7 +152,7 @@ func futexLockPI(t *kernel.Task, ts linux.Timespec, forever bool, addr usermem.A
return syserror.ConvertIntr(err, syserror.ERESTARTSYS)
}
-func tryLockPI(t *kernel.Task, addr usermem.Addr, private bool) error {
+func tryLockPI(t *kernel.Task, addr hostarch.Addr, private bool) error {
w := t.FutexWaiter()
locked, err := t.Futex().LockPI(w, t, addr, uint32(t.ThreadID()), private, true)
if err != nil {
diff --git a/pkg/sentry/syscalls/linux/sys_getdents.go b/pkg/sentry/syscalls/linux/sys_getdents.go
index b25f7d881..bbba71d8f 100644
--- a/pkg/sentry/syscalls/linux/sys_getdents.go
+++ b/pkg/sentry/syscalls/linux/sys_getdents.go
@@ -19,6 +19,7 @@ import (
"io"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/kernel"
@@ -62,7 +63,7 @@ func Getdents64(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sy
// getdents implements the core of getdents(2)/getdents64(2).
// f is the syscall implementation dirent serialization function.
-func getdents(t *kernel.Task, fd int32, addr usermem.Addr, size int, f func(*dirent, io.Writer) (int, error)) (uintptr, error) {
+func getdents(t *kernel.Task, fd int32, addr hostarch.Addr, size int, f func(*dirent, io.Writer) (int, error)) (uintptr, error) {
dir := t.GetFile(fd)
if dir == nil {
return 0, syserror.EBADF
diff --git a/pkg/sentry/syscalls/linux/sys_mempolicy.go b/pkg/sentry/syscalls/linux/sys_mempolicy.go
index 9b4a5c3f1..6d27f4292 100644
--- a/pkg/sentry/syscalls/linux/sys_mempolicy.go
+++ b/pkg/sentry/syscalls/linux/sys_mempolicy.go
@@ -18,6 +18,7 @@ import (
"fmt"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/syserror"
@@ -31,7 +32,7 @@ const (
allowedNodemask = (1 << maxNodes) - 1
)
-func copyInNodemask(t *kernel.Task, addr usermem.Addr, maxnode uint32) (uint64, error) {
+func copyInNodemask(t *kernel.Task, addr hostarch.Addr, maxnode uint32) (uint64, error) {
// "nodemask points to a bit mask of node IDs that contains up to maxnode
// bits. The bit mask size is rounded to the next multiple of
// sizeof(unsigned long), but the kernel will use bits only up to maxnode.
@@ -41,7 +42,7 @@ func copyInNodemask(t *kernel.Task, addr usermem.Addr, maxnode uint32) (uint64,
// because of what appears to be a bug: mm/mempolicy.c:get_nodes() uses
// maxnode-1, not maxnode, as the number of bits.
bits := maxnode - 1
- if bits > usermem.PageSize*8 { // also handles overflow from maxnode == 0
+ if bits > hostarch.PageSize*8 { // also handles overflow from maxnode == 0
return 0, syserror.EINVAL
}
if bits == 0 {
@@ -53,7 +54,7 @@ func copyInNodemask(t *kernel.Task, addr usermem.Addr, maxnode uint32) (uint64,
if _, err := t.CopyInBytes(addr, buf); err != nil {
return 0, err
}
- val := usermem.ByteOrder.Uint64(buf)
+ val := hostarch.ByteOrder.Uint64(buf)
// Check that only allowed bits in the first unsigned long in the nodemask
// are set.
if val&^allowedNodemask != 0 {
@@ -68,11 +69,11 @@ func copyInNodemask(t *kernel.Task, addr usermem.Addr, maxnode uint32) (uint64,
return val, nil
}
-func copyOutNodemask(t *kernel.Task, addr usermem.Addr, maxnode uint32, val uint64) error {
+func copyOutNodemask(t *kernel.Task, addr hostarch.Addr, maxnode uint32, val uint64) error {
// mm/mempolicy.c:copy_nodes_to_user() also uses maxnode-1 as the number of
// bits.
bits := maxnode - 1
- if bits > usermem.PageSize*8 { // also handles overflow from maxnode == 0
+ if bits > hostarch.PageSize*8 { // also handles overflow from maxnode == 0
return syserror.EINVAL
}
if bits == 0 {
@@ -80,7 +81,7 @@ func copyOutNodemask(t *kernel.Task, addr usermem.Addr, maxnode uint32, val uint
}
// Copy out the first unsigned long in the nodemask.
buf := t.CopyScratchBuffer(8)
- usermem.ByteOrder.PutUint64(buf, val)
+ hostarch.ByteOrder.PutUint64(buf, val)
if _, err := t.CopyOutBytes(addr, buf); err != nil {
return err
}
@@ -258,7 +259,7 @@ func Mbind(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
return 0, nil, err
}
-func copyInMempolicyNodemask(t *kernel.Task, modeWithFlags linux.NumaPolicy, nodemask usermem.Addr, maxnode uint32) (linux.NumaPolicy, uint64, error) {
+func copyInMempolicyNodemask(t *kernel.Task, modeWithFlags linux.NumaPolicy, nodemask hostarch.Addr, maxnode uint32) (linux.NumaPolicy, uint64, error) {
flags := linux.NumaPolicy(modeWithFlags & linux.MPOL_MODE_FLAGS)
mode := linux.NumaPolicy(modeWithFlags &^ linux.MPOL_MODE_FLAGS)
if flags == linux.MPOL_MODE_FLAGS {
diff --git a/pkg/sentry/syscalls/linux/sys_mmap.go b/pkg/sentry/syscalls/linux/sys_mmap.go
index cd8dfdfa4..70da0707d 100644
--- a/pkg/sentry/syscalls/linux/sys_mmap.go
+++ b/pkg/sentry/syscalls/linux/sys_mmap.go
@@ -23,7 +23,8 @@ import (
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sentry/mm"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
+
+ "gvisor.dev/gvisor/pkg/hostarch"
)
// Brk implements linux syscall brk(2).
@@ -61,12 +62,12 @@ func Mmap(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallC
Unmap: fixed,
Map32Bit: map32bit,
Private: private,
- Perms: usermem.AccessType{
+ Perms: hostarch.AccessType{
Read: linux.PROT_READ&prot != 0,
Write: linux.PROT_WRITE&prot != 0,
Execute: linux.PROT_EXEC&prot != 0,
},
- MaxPerms: usermem.AnyAccess,
+ MaxPerms: hostarch.AnyAccess,
GrowsDown: linux.MAP_GROWSDOWN&flags != 0,
Precommit: linux.MAP_POPULATE&flags != 0,
}
@@ -160,7 +161,7 @@ func Mremap(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
func Mprotect(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {
length := args[1].Uint64()
prot := args[2].Int()
- err := t.MemoryManager().MProtect(args[0].Pointer(), length, usermem.AccessType{
+ err := t.MemoryManager().MProtect(args[0].Pointer(), length, hostarch.AccessType{
Read: linux.PROT_READ&prot != 0,
Write: linux.PROT_WRITE&prot != 0,
Execute: linux.PROT_EXEC&prot != 0,
@@ -183,7 +184,7 @@ func Madvise(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca
return 0, nil, nil
}
// Not explicitly stated: length need not be page-aligned.
- lenAddr, ok := usermem.Addr(length).RoundUp()
+ lenAddr, ok := hostarch.Addr(length).RoundUp()
if !ok {
return 0, nil, syserror.EINVAL
}
@@ -232,7 +233,7 @@ func Mincore(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca
// "The length argument need not be a multiple of the page size, but since
// residency information is returned for whole pages, length is effectively
// rounded up to the next multiple of the page size." - mincore(2)
- la, ok := usermem.Addr(length).RoundUp()
+ la, ok := hostarch.Addr(length).RoundUp()
if !ok {
return 0, nil, syserror.ENOMEM
}
@@ -247,7 +248,7 @@ func Mincore(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca
if mapped != uint64(la) {
return 0, nil, syserror.ENOMEM
}
- resident := bytes.Repeat([]byte{1}, int(mapped/usermem.PageSize))
+ resident := bytes.Repeat([]byte{1}, int(mapped/hostarch.PageSize))
_, err := t.CopyOutBytes(vec, resident)
return 0, nil, err
}
diff --git a/pkg/sentry/syscalls/linux/sys_mount.go b/pkg/sentry/syscalls/linux/sys_mount.go
index bd0633564..864d2138c 100644
--- a/pkg/sentry/syscalls/linux/sys_mount.go
+++ b/pkg/sentry/syscalls/linux/sys_mount.go
@@ -20,7 +20,8 @@ import (
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
+
+ "gvisor.dev/gvisor/pkg/hostarch"
)
// Mount implements Linux syscall mount(2).
@@ -31,7 +32,7 @@ func Mount(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
flags := args[3].Uint64()
dataAddr := args[4].Pointer()
- fsType, err := t.CopyInString(typeAddr, usermem.PageSize)
+ fsType, err := t.CopyInString(typeAddr, hostarch.PageSize)
if err != nil {
return 0, nil, err
}
@@ -52,7 +53,7 @@ func Mount(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
// character placement, and the address is passed to each file system.
// Most file systems always treat this data as a string, though, and so
// do all of the ones we implement.
- data, err = t.CopyInString(dataAddr, usermem.PageSize)
+ data, err = t.CopyInString(dataAddr, hostarch.PageSize)
if err != nil {
return 0, nil, err
}
diff --git a/pkg/sentry/syscalls/linux/sys_pipe.go b/pkg/sentry/syscalls/linux/sys_pipe.go
index f7135ea46..d95034347 100644
--- a/pkg/sentry/syscalls/linux/sys_pipe.go
+++ b/pkg/sentry/syscalls/linux/sys_pipe.go
@@ -16,19 +16,19 @@ package linux
import (
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal/primitive"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/kernel/pipe"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
)
// LINT.IfChange
// pipe2 implements the actual system call with flags.
-func pipe2(t *kernel.Task, addr usermem.Addr, flags uint) (uintptr, error) {
+func pipe2(t *kernel.Task, addr hostarch.Addr, flags uint) (uintptr, error) {
if flags&^(linux.O_NONBLOCK|linux.O_CLOEXEC) != 0 {
return 0, syserror.EINVAL
}
diff --git a/pkg/sentry/syscalls/linux/sys_poll.go b/pkg/sentry/syscalls/linux/sys_poll.go
index 254f4c9f9..da548a14a 100644
--- a/pkg/sentry/syscalls/linux/sys_poll.go
+++ b/pkg/sentry/syscalls/linux/sys_poll.go
@@ -18,13 +18,13 @@ import (
"time"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/kernel"
ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time"
"gvisor.dev/gvisor/pkg/sentry/limits"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/pkg/waiter"
)
@@ -155,7 +155,7 @@ func pollBlock(t *kernel.Task, pfd []linux.PollFD, timeout time.Duration) (time.
}
// CopyInPollFDs copies an array of struct pollfd unless nfds exceeds the max.
-func CopyInPollFDs(t *kernel.Task, addr usermem.Addr, nfds uint) ([]linux.PollFD, error) {
+func CopyInPollFDs(t *kernel.Task, addr hostarch.Addr, nfds uint) ([]linux.PollFD, error) {
if uint64(nfds) > t.ThreadGroup().Limits().GetCapped(limits.NumberOfFiles, fileCap) {
return nil, syserror.EINVAL
}
@@ -170,7 +170,7 @@ func CopyInPollFDs(t *kernel.Task, addr usermem.Addr, nfds uint) ([]linux.PollFD
return pfd, nil
}
-func doPoll(t *kernel.Task, addr usermem.Addr, nfds uint, timeout time.Duration) (time.Duration, uintptr, error) {
+func doPoll(t *kernel.Task, addr hostarch.Addr, nfds uint, timeout time.Duration) (time.Duration, uintptr, error) {
pfd, err := CopyInPollFDs(t, addr, nfds)
if err != nil {
return timeout, 0, err
@@ -198,7 +198,7 @@ func doPoll(t *kernel.Task, addr usermem.Addr, nfds uint, timeout time.Duration)
}
// CopyInFDSet copies an fd set from select(2)/pselect(2).
-func CopyInFDSet(t *kernel.Task, addr usermem.Addr, nBytes, nBitsInLastPartialByte int) ([]byte, error) {
+func CopyInFDSet(t *kernel.Task, addr hostarch.Addr, nBytes, nBitsInLastPartialByte int) ([]byte, error) {
set := make([]byte, nBytes)
if addr != 0 {
@@ -215,7 +215,7 @@ func CopyInFDSet(t *kernel.Task, addr usermem.Addr, nBytes, nBitsInLastPartialBy
return set, nil
}
-func doSelect(t *kernel.Task, nfds int, readFDs, writeFDs, exceptFDs usermem.Addr, timeout time.Duration) (uintptr, error) {
+func doSelect(t *kernel.Task, nfds int, readFDs, writeFDs, exceptFDs hostarch.Addr, timeout time.Duration) (uintptr, error) {
if nfds < 0 || nfds > fileCap {
return 0, syserror.EINVAL
}
@@ -365,7 +365,7 @@ func timeoutRemaining(t *kernel.Task, startNs ktime.Time, timeout time.Duration)
// copyOutTimespecRemaining copies the time remaining in timeout to timespecAddr.
//
// startNs must be from CLOCK_MONOTONIC.
-func copyOutTimespecRemaining(t *kernel.Task, startNs ktime.Time, timeout time.Duration, timespecAddr usermem.Addr) error {
+func copyOutTimespecRemaining(t *kernel.Task, startNs ktime.Time, timeout time.Duration, timespecAddr hostarch.Addr) error {
if timeout <= 0 {
return nil
}
@@ -377,7 +377,7 @@ func copyOutTimespecRemaining(t *kernel.Task, startNs ktime.Time, timeout time.D
// copyOutTimevalRemaining copies the time remaining in timeout to timevalAddr.
//
// startNs must be from CLOCK_MONOTONIC.
-func copyOutTimevalRemaining(t *kernel.Task, startNs ktime.Time, timeout time.Duration, timevalAddr usermem.Addr) error {
+func copyOutTimevalRemaining(t *kernel.Task, startNs ktime.Time, timeout time.Duration, timevalAddr hostarch.Addr) error {
if timeout <= 0 {
return nil
}
@@ -391,7 +391,7 @@ func copyOutTimevalRemaining(t *kernel.Task, startNs ktime.Time, timeout time.Du
//
// +stateify savable
type pollRestartBlock struct {
- pfdAddr usermem.Addr
+ pfdAddr hostarch.Addr
nfds uint
timeout time.Duration
}
@@ -401,7 +401,7 @@ func (p *pollRestartBlock) Restart(t *kernel.Task) (uintptr, error) {
return poll(t, p.pfdAddr, p.nfds, p.timeout)
}
-func poll(t *kernel.Task, pfdAddr usermem.Addr, nfds uint, timeout time.Duration) (uintptr, error) {
+func poll(t *kernel.Task, pfdAddr hostarch.Addr, nfds uint, timeout time.Duration) (uintptr, error) {
remainingTimeout, n, err := doPoll(t, pfdAddr, nfds, timeout)
// On an interrupt poll(2) is restarted with the remaining timeout.
if err == syserror.EINTR {
diff --git a/pkg/sentry/syscalls/linux/sys_random.go b/pkg/sentry/syscalls/linux/sys_random.go
index c0aa0fd60..ae545f80f 100644
--- a/pkg/sentry/syscalls/linux/sys_random.go
+++ b/pkg/sentry/syscalls/linux/sys_random.go
@@ -24,6 +24,8 @@ import (
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
+
+ "gvisor.dev/gvisor/pkg/hostarch"
)
const (
@@ -64,7 +66,7 @@ func GetRandom(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys
if min > 256 {
min = 256
}
- n, err := t.MemoryManager().CopyOutFrom(t, usermem.AddrRangeSeqOf(ar), safemem.FromIOReader{&randReader{-1, min}}, usermem.IOOpts{
+ n, err := t.MemoryManager().CopyOutFrom(t, hostarch.AddrRangeSeqOf(ar), safemem.FromIOReader{&randReader{-1, min}}, usermem.IOOpts{
AddressSpaceActive: true,
})
if n >= int64(min) {
diff --git a/pkg/sentry/syscalls/linux/sys_rlimit.go b/pkg/sentry/syscalls/linux/sys_rlimit.go
index 88cd234d1..e64246d57 100644
--- a/pkg/sentry/syscalls/linux/sys_rlimit.go
+++ b/pkg/sentry/syscalls/linux/sys_rlimit.go
@@ -16,12 +16,12 @@ package linux
import (
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/limits"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
)
// rlimit describes an implementation of 'struct rlimit', which may vary from
@@ -67,12 +67,12 @@ func (r *rlimit64) fromLimit(lim limits.Limit) {
}
}
-func (r *rlimit64) copyIn(t *kernel.Task, addr usermem.Addr) error {
+func (r *rlimit64) copyIn(t *kernel.Task, addr hostarch.Addr) error {
_, err := r.CopyIn(t, addr)
return err
}
-func (r *rlimit64) copyOut(t *kernel.Task, addr usermem.Addr) error {
+func (r *rlimit64) copyOut(t *kernel.Task, addr hostarch.Addr) error {
_, err := r.CopyOut(t, addr)
return err
}
diff --git a/pkg/sentry/syscalls/linux/sys_seccomp.go b/pkg/sentry/syscalls/linux/sys_seccomp.go
index 4fdb4463c..e16d6ff3f 100644
--- a/pkg/sentry/syscalls/linux/sys_seccomp.go
+++ b/pkg/sentry/syscalls/linux/sys_seccomp.go
@@ -17,10 +17,10 @@ package linux
import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/bpf"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
)
// userSockFprog is equivalent to Linux's struct sock_fprog on amd64.
@@ -33,14 +33,14 @@ type userSockFprog struct {
_ [6]byte // padding for alignment
// Filter is a user pointer to the struct sock_filter array that makes up
- // the filter program. Filter is a uint64 rather than a usermem.Addr
- // because usermem.Addr is actually uintptr, which is not a fixed-size
+ // the filter program. Filter is a uint64 rather than a hostarch.Addr
+ // because hostarch.Addr is actually uintptr, which is not a fixed-size
// type.
Filter uint64
}
// seccomp applies a seccomp policy to the current task.
-func seccomp(t *kernel.Task, mode, flags uint64, addr usermem.Addr) error {
+func seccomp(t *kernel.Task, mode, flags uint64, addr hostarch.Addr) error {
// We only support SECCOMP_SET_MODE_FILTER at the moment.
if mode != linux.SECCOMP_SET_MODE_FILTER {
// Unsupported mode.
@@ -60,7 +60,7 @@ func seccomp(t *kernel.Task, mode, flags uint64, addr usermem.Addr) error {
return err
}
filter := make([]linux.BPFInstruction, int(fprog.Len))
- if _, err := linux.CopyBPFInstructionSliceIn(t, usermem.Addr(fprog.Filter), filter); err != nil {
+ if _, err := linux.CopyBPFInstructionSliceIn(t, hostarch.Addr(fprog.Filter), filter); err != nil {
return err
}
compiledFilter, err := bpf.Compile(filter)
diff --git a/pkg/sentry/syscalls/linux/sys_sem.go b/pkg/sentry/syscalls/linux/sys_sem.go
index f0570d927..c84260080 100644
--- a/pkg/sentry/syscalls/linux/sys_sem.go
+++ b/pkg/sentry/syscalls/linux/sys_sem.go
@@ -19,13 +19,13 @@ import (
"time"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal/primitive"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
)
const opsMax = 500 // SEMOPM
@@ -310,7 +310,7 @@ func setVal(t *kernel.Task, id int32, num int32, val int16) error {
return set.SetVal(t, num, val, creds, int32(pid))
}
-func setValAll(t *kernel.Task, id int32, array usermem.Addr) error {
+func setValAll(t *kernel.Task, id int32, array hostarch.Addr) error {
r := t.IPCNamespace().SemaphoreRegistry()
set := r.FindByID(id)
if set == nil {
@@ -335,7 +335,7 @@ func getVal(t *kernel.Task, id int32, num int32) (int16, error) {
return set.GetVal(num, creds)
}
-func getValAll(t *kernel.Task, id int32, array usermem.Addr) error {
+func getValAll(t *kernel.Task, id int32, array hostarch.Addr) error {
r := t.IPCNamespace().SemaphoreRegistry()
set := r.FindByID(id)
if set == nil {
diff --git a/pkg/sentry/syscalls/linux/sys_signal.go b/pkg/sentry/syscalls/linux/sys_signal.go
index d639c9bf7..53b12dc41 100644
--- a/pkg/sentry/syscalls/linux/sys_signal.go
+++ b/pkg/sentry/syscalls/linux/sys_signal.go
@@ -19,12 +19,12 @@ import (
"time"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/kernel/signalfd"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
)
// "For a process to have permission to send a signal it must
@@ -516,7 +516,7 @@ func RestartSyscall(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kerne
}
// sharedSignalfd is shared between the two calls.
-func sharedSignalfd(t *kernel.Task, fd int32, sigset usermem.Addr, sigsetsize uint, flags int32) (uintptr, *kernel.SyscallControl, error) {
+func sharedSignalfd(t *kernel.Task, fd int32, sigset hostarch.Addr, sigsetsize uint, flags int32) (uintptr, *kernel.SyscallControl, error) {
// Copy in the signal mask.
mask, err := CopyInSigSet(t, sigset, sigsetsize)
if err != nil {
diff --git a/pkg/sentry/syscalls/linux/sys_socket.go b/pkg/sentry/syscalls/linux/sys_socket.go
index c6adfe06b..9bdf6d3d8 100644
--- a/pkg/sentry/syscalls/linux/sys_socket.go
+++ b/pkg/sentry/syscalls/linux/sys_socket.go
@@ -18,6 +18,7 @@ import (
"time"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal"
"gvisor.dev/gvisor/pkg/marshal/primitive"
"gvisor.dev/gvisor/pkg/sentry/arch"
@@ -117,7 +118,7 @@ type multipleMessageHeader64 struct {
// CaptureAddress allocates memory for and copies a socket address structure
// from the untrusted address space range.
-func CaptureAddress(t *kernel.Task, addr usermem.Addr, addrlen uint32) ([]byte, error) {
+func CaptureAddress(t *kernel.Task, addr hostarch.Addr, addrlen uint32) ([]byte, error) {
if addrlen > maxAddrLen {
return nil, syserror.EINVAL
}
@@ -133,7 +134,7 @@ func CaptureAddress(t *kernel.Task, addr usermem.Addr, addrlen uint32) ([]byte,
// writeAddress writes a sockaddr structure and its length to an output buffer
// in the unstrusted address space range. If the address is bigger than the
// buffer, it is truncated.
-func writeAddress(t *kernel.Task, addr linux.SockAddr, addrLen uint32, addrPtr usermem.Addr, addrLenPtr usermem.Addr) error {
+func writeAddress(t *kernel.Task, addr linux.SockAddr, addrLen uint32, addrPtr hostarch.Addr, addrLenPtr hostarch.Addr) error {
// Get the buffer length.
var bufLen uint32
if _, err := primitive.CopyUint32In(t, addrLenPtr, &bufLen); err != nil {
@@ -276,7 +277,7 @@ func Connect(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca
// accept is the implementation of the accept syscall. It is called by accept
// and accept4 syscall handlers.
-func accept(t *kernel.Task, fd int32, addr usermem.Addr, addrLen usermem.Addr, flags int) (uintptr, error) {
+func accept(t *kernel.Task, fd int32, addr hostarch.Addr, addrLen hostarch.Addr, flags int) (uintptr, error) {
// Check that no unsupported flags are passed in.
if flags & ^(linux.SOCK_NONBLOCK|linux.SOCK_CLOEXEC) != 0 {
return 0, syserror.EINVAL
@@ -472,7 +473,7 @@ func GetSockOpt(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sy
// getSockOpt tries to handle common socket options, or dispatches to a specific
// socket implementation.
-func getSockOpt(t *kernel.Task, s socket.Socket, level, name int, optValAddr usermem.Addr, len int) (marshal.Marshallable, *syserr.Error) {
+func getSockOpt(t *kernel.Task, s socket.Socket, level, name int, optValAddr hostarch.Addr, len int) (marshal.Marshallable, *syserr.Error) {
if level == linux.SOL_SOCKET {
switch name {
case linux.SO_TYPE, linux.SO_DOMAIN, linux.SO_PROTOCOL:
@@ -735,7 +736,7 @@ func RecvMMsg(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
return uintptr(count), nil, nil
}
-func recvSingleMsg(t *kernel.Task, s socket.Socket, msgPtr usermem.Addr, flags int32, haveDeadline bool, deadline ktime.Time) (uintptr, error) {
+func recvSingleMsg(t *kernel.Task, s socket.Socket, msgPtr hostarch.Addr, flags int32, haveDeadline bool, deadline ktime.Time) (uintptr, error) {
// Capture the message header and io vectors.
var msg MessageHeader64
if _, err := msg.CopyIn(t, msgPtr); err != nil {
@@ -745,7 +746,7 @@ func recvSingleMsg(t *kernel.Task, s socket.Socket, msgPtr usermem.Addr, flags i
if msg.IovLen > linux.UIO_MAXIOV {
return 0, syserror.EMSGSIZE
}
- dst, err := t.IovecsIOSequence(usermem.Addr(msg.Iov), int(msg.IovLen), usermem.IOOpts{
+ dst, err := t.IovecsIOSequence(hostarch.Addr(msg.Iov), int(msg.IovLen), usermem.IOOpts{
AddressSpaceActive: true,
})
if err != nil {
@@ -796,7 +797,7 @@ func recvSingleMsg(t *kernel.Task, s socket.Socket, msgPtr usermem.Addr, flags i
// Copy the address to the caller.
if msg.NameLen != 0 {
- if err := writeAddress(t, sender, senderLen, usermem.Addr(msg.Name), usermem.Addr(msgPtr+nameLenOffset)); err != nil {
+ if err := writeAddress(t, sender, senderLen, hostarch.Addr(msg.Name), hostarch.Addr(msgPtr+nameLenOffset)); err != nil {
return 0, err
}
}
@@ -806,7 +807,7 @@ func recvSingleMsg(t *kernel.Task, s socket.Socket, msgPtr usermem.Addr, flags i
return 0, err
}
if len(controlData) > 0 {
- if _, err := t.CopyOutBytes(usermem.Addr(msg.Control), controlData); err != nil {
+ if _, err := t.CopyOutBytes(hostarch.Addr(msg.Control), controlData); err != nil {
return 0, err
}
}
@@ -821,7 +822,7 @@ func recvSingleMsg(t *kernel.Task, s socket.Socket, msgPtr usermem.Addr, flags i
// recvFrom is the implementation of the recvfrom syscall. It is called by
// recvfrom and recv syscall handlers.
-func recvFrom(t *kernel.Task, fd int32, bufPtr usermem.Addr, bufLen uint64, flags int32, namePtr usermem.Addr, nameLenPtr usermem.Addr) (uintptr, error) {
+func recvFrom(t *kernel.Task, fd int32, bufPtr hostarch.Addr, bufLen uint64, flags int32, namePtr hostarch.Addr, nameLenPtr hostarch.Addr) (uintptr, error) {
if int(bufLen) < 0 {
return 0, syserror.EINVAL
}
@@ -997,7 +998,7 @@ func SendMMsg(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
return uintptr(count), nil, nil
}
-func sendSingleMsg(t *kernel.Task, s socket.Socket, file *fs.File, msgPtr usermem.Addr, flags int32) (uintptr, error) {
+func sendSingleMsg(t *kernel.Task, s socket.Socket, file *fs.File, msgPtr hostarch.Addr, flags int32) (uintptr, error) {
// Capture the message header.
var msg MessageHeader64
if _, err := msg.CopyIn(t, msgPtr); err != nil {
@@ -1011,7 +1012,7 @@ func sendSingleMsg(t *kernel.Task, s socket.Socket, file *fs.File, msgPtr userme
return 0, syserror.ENOBUFS
}
controlData = make([]byte, msg.ControlLen)
- if _, err := t.CopyInBytes(usermem.Addr(msg.Control), controlData); err != nil {
+ if _, err := t.CopyInBytes(hostarch.Addr(msg.Control), controlData); err != nil {
return 0, err
}
}
@@ -1020,7 +1021,7 @@ func sendSingleMsg(t *kernel.Task, s socket.Socket, file *fs.File, msgPtr userme
var to []byte
if msg.NameLen != 0 {
var err error
- to, err = CaptureAddress(t, usermem.Addr(msg.Name), msg.NameLen)
+ to, err = CaptureAddress(t, hostarch.Addr(msg.Name), msg.NameLen)
if err != nil {
return 0, err
}
@@ -1030,7 +1031,7 @@ func sendSingleMsg(t *kernel.Task, s socket.Socket, file *fs.File, msgPtr userme
if msg.IovLen > linux.UIO_MAXIOV {
return 0, syserror.EMSGSIZE
}
- src, err := t.IovecsIOSequence(usermem.Addr(msg.Iov), int(msg.IovLen), usermem.IOOpts{
+ src, err := t.IovecsIOSequence(hostarch.Addr(msg.Iov), int(msg.IovLen), usermem.IOOpts{
AddressSpaceActive: true,
})
if err != nil {
@@ -1064,7 +1065,7 @@ func sendSingleMsg(t *kernel.Task, s socket.Socket, file *fs.File, msgPtr userme
// sendTo is the implementation of the sendto syscall. It is called by sendto
// and send syscall handlers.
-func sendTo(t *kernel.Task, fd int32, bufPtr usermem.Addr, bufLen uint64, flags int32, namePtr usermem.Addr, nameLen uint32) (uintptr, error) {
+func sendTo(t *kernel.Task, fd int32, bufPtr hostarch.Addr, bufLen uint64, flags int32, namePtr hostarch.Addr, nameLen uint32) (uintptr, error) {
bl := int(bufLen)
if bl < 0 {
return 0, syserror.EINVAL
diff --git a/pkg/sentry/syscalls/linux/sys_stat.go b/pkg/sentry/syscalls/linux/sys_stat.go
index cda29a8b5..2338ba44b 100644
--- a/pkg/sentry/syscalls/linux/sys_stat.go
+++ b/pkg/sentry/syscalls/linux/sys_stat.go
@@ -16,11 +16,11 @@ package linux
import (
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
)
// LINT.IfChange
@@ -106,7 +106,7 @@ func Fstat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
}
// stat implements stat from the given *fs.Dirent.
-func stat(t *kernel.Task, d *fs.Dirent, dirPath bool, statAddr usermem.Addr) error {
+func stat(t *kernel.Task, d *fs.Dirent, dirPath bool, statAddr hostarch.Addr) error {
if dirPath && !fs.IsDir(d.Inode.StableAttr) {
return syserror.ENOTDIR
}
@@ -120,7 +120,7 @@ func stat(t *kernel.Task, d *fs.Dirent, dirPath bool, statAddr usermem.Addr) err
}
// fstat implements fstat for the given *fs.File.
-func fstat(t *kernel.Task, f *fs.File, statAddr usermem.Addr) error {
+func fstat(t *kernel.Task, f *fs.File, statAddr hostarch.Addr) error {
uattr, err := f.UnstableAttr(t)
if err != nil {
return err
@@ -180,7 +180,7 @@ func Statx(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
})
}
-func statx(t *kernel.Task, sattr fs.StableAttr, uattr fs.UnstableAttr, statxAddr usermem.Addr) error {
+func statx(t *kernel.Task, sattr fs.StableAttr, uattr fs.UnstableAttr, statxAddr hostarch.Addr) error {
// "[T]he kernel may return fields that weren't requested and may fail to
// return fields that were requested, depending on what the backing
// filesystem supports.
@@ -257,7 +257,7 @@ func Fstatfs(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca
// statfsImpl implements the linux syscall statfs and fstatfs based on a Dirent,
// copying the statfs structure out to addr on success, otherwise an error is
// returned.
-func statfsImpl(t *kernel.Task, d *fs.Dirent, addr usermem.Addr) error {
+func statfsImpl(t *kernel.Task, d *fs.Dirent, addr hostarch.Addr) error {
info, err := d.Inode.StatFS(t)
if err != nil {
return err
diff --git a/pkg/sentry/syscalls/linux/sys_thread.go b/pkg/sentry/syscalls/linux/sys_thread.go
index b5f920949..3185ea527 100644
--- a/pkg/sentry/syscalls/linux/sys_thread.go
+++ b/pkg/sentry/syscalls/linux/sys_thread.go
@@ -19,6 +19,7 @@ import (
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal/primitive"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fs"
@@ -46,7 +47,7 @@ var (
ExecMaxTotalSize = 2 * 1024 * 1024
// ExecMaxElemSize is the maximum length of a single argv or envv entry.
- ExecMaxElemSize = 32 * usermem.PageSize
+ ExecMaxElemSize = 32 * hostarch.PageSize
)
// Getppid implements linux syscall getppid(2).
@@ -88,7 +89,7 @@ func Execveat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
return execveat(t, dirFD, pathnameAddr, argvAddr, envvAddr, flags)
}
-func execveat(t *kernel.Task, dirFD int32, pathnameAddr, argvAddr, envvAddr usermem.Addr, flags int32) (uintptr, *kernel.SyscallControl, error) {
+func execveat(t *kernel.Task, dirFD int32, pathnameAddr, argvAddr, envvAddr hostarch.Addr, flags int32) (uintptr, *kernel.SyscallControl, error) {
pathname, err := t.CopyInString(pathnameAddr, linux.PATH_MAX)
if err != nil {
return 0, nil, err
@@ -199,7 +200,7 @@ func ExitGroup(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys
}
// clone is used by Clone, Fork, and VFork.
-func clone(t *kernel.Task, flags int, stack usermem.Addr, parentTID usermem.Addr, childTID usermem.Addr, tls usermem.Addr) (uintptr, *kernel.SyscallControl, error) {
+func clone(t *kernel.Task, flags int, stack hostarch.Addr, parentTID hostarch.Addr, childTID hostarch.Addr, tls hostarch.Addr) (uintptr, *kernel.SyscallControl, error) {
opts := kernel.CloneOptions{
SharingOptions: kernel.SharingOptions{
NewAddressSpace: flags&linux.CLONE_VM == 0,
@@ -274,7 +275,7 @@ func parseCommonWaitOptions(wopts *kernel.WaitOptions, options int) error {
}
// wait4 waits for the given child process to exit.
-func wait4(t *kernel.Task, pid int, statusAddr usermem.Addr, options int, rusageAddr usermem.Addr) (uintptr, error) {
+func wait4(t *kernel.Task, pid int, statusAddr hostarch.Addr, options int, rusageAddr hostarch.Addr) (uintptr, error) {
if options&^(linux.WNOHANG|linux.WUNTRACED|linux.WCONTINUED|linux.WNOTHREAD|linux.WALL|linux.WCLONE) != 0 {
return 0, syserror.EINVAL
}
diff --git a/pkg/sentry/syscalls/linux/sys_time.go b/pkg/sentry/syscalls/linux/sys_time.go
index c5054d2f1..83b777bbd 100644
--- a/pkg/sentry/syscalls/linux/sys_time.go
+++ b/pkg/sentry/syscalls/linux/sys_time.go
@@ -19,12 +19,12 @@ import (
"time"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal/primitive"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel"
ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
)
// The most significant 29 bits hold either a pid or a file descriptor.
@@ -165,7 +165,7 @@ func Time(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallC
addr := args[0].Pointer()
r := t.Kernel().RealtimeClock().Now().TimeT()
- if addr == usermem.Addr(0) {
+ if addr == hostarch.Addr(0) {
return uintptr(r), nil, nil
}
@@ -182,7 +182,7 @@ func Time(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallC
type clockNanosleepRestartBlock struct {
c ktime.Clock
duration time.Duration
- rem usermem.Addr
+ rem hostarch.Addr
}
// Restart implements kernel.SyscallRestartBlock.Restart.
@@ -221,7 +221,7 @@ func clockNanosleepUntil(t *kernel.Task, c ktime.Clock, ts linux.Timespec) error
//
// If blocking is interrupted, the syscall is restarted with the remaining
// duration timeout.
-func clockNanosleepFor(t *kernel.Task, c ktime.Clock, dur time.Duration, rem usermem.Addr) error {
+func clockNanosleepFor(t *kernel.Task, c ktime.Clock, dur time.Duration, rem hostarch.Addr) error {
timer, start, tchan := ktime.After(c, dur)
err := t.BlockWithTimer(nil, tchan)
@@ -324,14 +324,14 @@ func Gettimeofday(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.
tv := args[0].Pointer()
tz := args[1].Pointer()
- if tv != usermem.Addr(0) {
+ if tv != hostarch.Addr(0) {
nowTv := t.Kernel().RealtimeClock().Now().Timeval()
if err := copyTimevalOut(t, tv, &nowTv); err != nil {
return 0, nil, err
}
}
- if tz != usermem.Addr(0) {
+ if tz != hostarch.Addr(0) {
// Ask the time package for the timezone.
_, offset := time.Now().Zone()
// This int32 array mimics linux's struct timezone.
diff --git a/pkg/sentry/syscalls/linux/sys_xattr.go b/pkg/sentry/syscalls/linux/sys_xattr.go
index 97474fd3c..28ad6a60e 100644
--- a/pkg/sentry/syscalls/linux/sys_xattr.go
+++ b/pkg/sentry/syscalls/linux/sys_xattr.go
@@ -18,11 +18,11 @@ import (
"strings"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
)
// LINT.IfChange
@@ -87,7 +87,7 @@ func getXattrFromPath(t *kernel.Task, args arch.SyscallArguments, resolveSymlink
}
// getXattr implements getxattr(2) from the given *fs.Dirent.
-func getXattr(t *kernel.Task, d *fs.Dirent, nameAddr, valueAddr usermem.Addr, size uint64) (int, error) {
+func getXattr(t *kernel.Task, d *fs.Dirent, nameAddr, valueAddr hostarch.Addr, size uint64) (int, error) {
name, err := copyInXattrName(t, nameAddr)
if err != nil {
return 0, err
@@ -180,7 +180,7 @@ func setXattrFromPath(t *kernel.Task, args arch.SyscallArguments, resolveSymlink
}
// setXattr implements setxattr(2) from the given *fs.Dirent.
-func setXattr(t *kernel.Task, d *fs.Dirent, nameAddr, valueAddr usermem.Addr, size uint64, flags uint32) error {
+func setXattr(t *kernel.Task, d *fs.Dirent, nameAddr, valueAddr hostarch.Addr, size uint64, flags uint32) error {
if flags&^(linux.XATTR_CREATE|linux.XATTR_REPLACE) != 0 {
return syserror.EINVAL
}
@@ -214,7 +214,7 @@ func setXattr(t *kernel.Task, d *fs.Dirent, nameAddr, valueAddr usermem.Addr, si
return nil
}
-func copyInXattrName(t *kernel.Task, nameAddr usermem.Addr) (string, error) {
+func copyInXattrName(t *kernel.Task, nameAddr hostarch.Addr) (string, error) {
name, err := t.CopyInString(nameAddr, linux.XATTR_NAME_MAX+1)
if err != nil {
if err == syserror.ENAMETOOLONG {
@@ -306,7 +306,7 @@ func listXattrFromPath(t *kernel.Task, args arch.SyscallArguments, resolveSymlin
return uintptr(n), nil, nil
}
-func listXattr(t *kernel.Task, d *fs.Dirent, addr usermem.Addr, size uint64) (int, error) {
+func listXattr(t *kernel.Task, d *fs.Dirent, addr hostarch.Addr, size uint64) (int, error) {
if !xattrFileTypeOk(d.Inode) {
return 0, nil
}
@@ -408,7 +408,7 @@ func removeXattrFromPath(t *kernel.Task, args arch.SyscallArguments, resolveSyml
}
// removeXattr implements removexattr(2) from the given *fs.Dirent.
-func removeXattr(t *kernel.Task, d *fs.Dirent, nameAddr usermem.Addr) error {
+func removeXattr(t *kernel.Task, d *fs.Dirent, nameAddr hostarch.Addr) error {
name, err := copyInXattrName(t, nameAddr)
if err != nil {
return err
diff --git a/pkg/sentry/syscalls/linux/timespec.go b/pkg/sentry/syscalls/linux/timespec.go
index ddc3ee26e..3edc922eb 100644
--- a/pkg/sentry/syscalls/linux/timespec.go
+++ b/pkg/sentry/syscalls/linux/timespec.go
@@ -18,13 +18,13 @@ import (
"time"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
)
// copyTimespecIn copies a Timespec from the untrusted app range to the kernel.
-func copyTimespecIn(t *kernel.Task, addr usermem.Addr) (linux.Timespec, error) {
+func copyTimespecIn(t *kernel.Task, addr hostarch.Addr) (linux.Timespec, error) {
switch t.Arch().Width() {
case 8:
ts := linux.Timespec{}
@@ -33,8 +33,8 @@ func copyTimespecIn(t *kernel.Task, addr usermem.Addr) (linux.Timespec, error) {
if err != nil {
return ts, err
}
- ts.Sec = int64(usermem.ByteOrder.Uint64(in[0:]))
- ts.Nsec = int64(usermem.ByteOrder.Uint64(in[8:]))
+ ts.Sec = int64(hostarch.ByteOrder.Uint64(in[0:]))
+ ts.Nsec = int64(hostarch.ByteOrder.Uint64(in[8:]))
return ts, nil
default:
return linux.Timespec{}, syserror.ENOSYS
@@ -42,12 +42,12 @@ func copyTimespecIn(t *kernel.Task, addr usermem.Addr) (linux.Timespec, error) {
}
// copyTimespecOut copies a Timespec to the untrusted app range.
-func copyTimespecOut(t *kernel.Task, addr usermem.Addr, ts *linux.Timespec) error {
+func copyTimespecOut(t *kernel.Task, addr hostarch.Addr, ts *linux.Timespec) error {
switch t.Arch().Width() {
case 8:
out := t.CopyScratchBuffer(16)
- usermem.ByteOrder.PutUint64(out[0:], uint64(ts.Sec))
- usermem.ByteOrder.PutUint64(out[8:], uint64(ts.Nsec))
+ hostarch.ByteOrder.PutUint64(out[0:], uint64(ts.Sec))
+ hostarch.ByteOrder.PutUint64(out[8:], uint64(ts.Nsec))
_, err := t.CopyOutBytes(addr, out)
return err
default:
@@ -56,7 +56,7 @@ func copyTimespecOut(t *kernel.Task, addr usermem.Addr, ts *linux.Timespec) erro
}
// copyTimevalIn copies a Timeval from the untrusted app range to the kernel.
-func copyTimevalIn(t *kernel.Task, addr usermem.Addr) (linux.Timeval, error) {
+func copyTimevalIn(t *kernel.Task, addr hostarch.Addr) (linux.Timeval, error) {
switch t.Arch().Width() {
case 8:
tv := linux.Timeval{}
@@ -65,8 +65,8 @@ func copyTimevalIn(t *kernel.Task, addr usermem.Addr) (linux.Timeval, error) {
if err != nil {
return tv, err
}
- tv.Sec = int64(usermem.ByteOrder.Uint64(in[0:]))
- tv.Usec = int64(usermem.ByteOrder.Uint64(in[8:]))
+ tv.Sec = int64(hostarch.ByteOrder.Uint64(in[0:]))
+ tv.Usec = int64(hostarch.ByteOrder.Uint64(in[8:]))
return tv, nil
default:
return linux.Timeval{}, syserror.ENOSYS
@@ -74,12 +74,12 @@ func copyTimevalIn(t *kernel.Task, addr usermem.Addr) (linux.Timeval, error) {
}
// copyTimevalOut copies a Timeval to the untrusted app range.
-func copyTimevalOut(t *kernel.Task, addr usermem.Addr, tv *linux.Timeval) error {
+func copyTimevalOut(t *kernel.Task, addr hostarch.Addr, tv *linux.Timeval) error {
switch t.Arch().Width() {
case 8:
out := t.CopyScratchBuffer(16)
- usermem.ByteOrder.PutUint64(out[0:], uint64(tv.Sec))
- usermem.ByteOrder.PutUint64(out[8:], uint64(tv.Usec))
+ hostarch.ByteOrder.PutUint64(out[0:], uint64(tv.Sec))
+ hostarch.ByteOrder.PutUint64(out[8:], uint64(tv.Usec))
_, err := t.CopyOutBytes(addr, out)
return err
default:
@@ -94,7 +94,7 @@ func copyTimevalOut(t *kernel.Task, addr usermem.Addr, tv *linux.Timeval) error
// returned value is the maximum that Duration will allow.
//
// If timespecAddr is NULL, the returned value is negative.
-func copyTimespecInToDuration(t *kernel.Task, timespecAddr usermem.Addr) (time.Duration, error) {
+func copyTimespecInToDuration(t *kernel.Task, timespecAddr hostarch.Addr) (time.Duration, error) {
// Use a negative Duration to indicate "no timeout".
timeout := time.Duration(-1)
if timespecAddr != 0 {
diff --git a/pkg/sentry/syscalls/linux/vfs2/aio.go b/pkg/sentry/syscalls/linux/vfs2/aio.go
index de6789a65..fd1863ef3 100644
--- a/pkg/sentry/syscalls/linux/vfs2/aio.go
+++ b/pkg/sentry/syscalls/linux/vfs2/aio.go
@@ -26,6 +26,8 @@ import (
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
+
+ "gvisor.dev/gvisor/pkg/hostarch"
)
// IoSubmit implements linux syscall io_submit(2).
@@ -40,7 +42,7 @@ func IoSubmit(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
for i := int32(0); i < nrEvents; i++ {
// Copy in the callback address.
- var cbAddr usermem.Addr
+ var cbAddr hostarch.Addr
switch t.Arch().Width() {
case 8:
var cbAddrP primitive.Uint64
@@ -52,7 +54,7 @@ func IoSubmit(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
// Nothing done.
return 0, nil, err
}
- cbAddr = usermem.Addr(cbAddrP)
+ cbAddr = hostarch.Addr(cbAddrP)
default:
return 0, nil, syserror.ENOSYS
}
@@ -79,14 +81,14 @@ func IoSubmit(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
}
// Advance to the next one.
- addr += usermem.Addr(t.Arch().Width())
+ addr += hostarch.Addr(t.Arch().Width())
}
return uintptr(nrEvents), nil, nil
}
// submitCallback processes a single callback.
-func submitCallback(t *kernel.Task, id uint64, cb *linux.IOCallback, cbAddr usermem.Addr) error {
+func submitCallback(t *kernel.Task, id uint64, cb *linux.IOCallback, cbAddr hostarch.Addr) error {
if cb.Reserved2 != 0 {
return syserror.EINVAL
}
@@ -148,7 +150,7 @@ func submitCallback(t *kernel.Task, id uint64, cb *linux.IOCallback, cbAddr user
return nil
}
-func getAIOCallback(t *kernel.Task, fd, eventFD *vfs.FileDescription, cbAddr usermem.Addr, cb *linux.IOCallback, ioseq usermem.IOSequence, aioCtx *mm.AIOContext) kernel.AIOCallback {
+func getAIOCallback(t *kernel.Task, fd, eventFD *vfs.FileDescription, cbAddr hostarch.Addr, cb *linux.IOCallback, ioseq usermem.IOSequence, aioCtx *mm.AIOContext) kernel.AIOCallback {
return func(ctx context.Context) {
// Release references after completing the callback.
defer fd.DecRef(ctx)
@@ -206,12 +208,12 @@ func memoryFor(t *kernel.Task, cb *linux.IOCallback) (usermem.IOSequence, error)
// I/O.
switch cb.OpCode {
case linux.IOCB_CMD_PREAD, linux.IOCB_CMD_PWRITE:
- return t.SingleIOSequence(usermem.Addr(cb.Buf), bytes, usermem.IOOpts{
+ return t.SingleIOSequence(hostarch.Addr(cb.Buf), bytes, usermem.IOOpts{
AddressSpaceActive: false,
})
case linux.IOCB_CMD_PREADV, linux.IOCB_CMD_PWRITEV:
- return t.IovecsIOSequence(usermem.Addr(cb.Buf), bytes, usermem.IOOpts{
+ return t.IovecsIOSequence(hostarch.Addr(cb.Buf), bytes, usermem.IOOpts{
AddressSpaceActive: false,
})
diff --git a/pkg/sentry/syscalls/linux/vfs2/execve.go b/pkg/sentry/syscalls/linux/vfs2/execve.go
index 7a409620d..3315398a4 100644
--- a/pkg/sentry/syscalls/linux/vfs2/execve.go
+++ b/pkg/sentry/syscalls/linux/vfs2/execve.go
@@ -24,7 +24,8 @@ import (
slinux "gvisor.dev/gvisor/pkg/sentry/syscalls/linux"
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
+
+ "gvisor.dev/gvisor/pkg/hostarch"
)
// Execve implements linux syscall execve(2).
@@ -45,7 +46,7 @@ func Execveat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
return execveat(t, dirfd, pathnameAddr, argvAddr, envvAddr, flags)
}
-func execveat(t *kernel.Task, dirfd int32, pathnameAddr, argvAddr, envvAddr usermem.Addr, flags int32) (uintptr, *kernel.SyscallControl, error) {
+func execveat(t *kernel.Task, dirfd int32, pathnameAddr, argvAddr, envvAddr hostarch.Addr, flags int32) (uintptr, *kernel.SyscallControl, error) {
if flags&^(linux.AT_EMPTY_PATH|linux.AT_SYMLINK_NOFOLLOW) != 0 {
return 0, nil, syserror.EINVAL
}
diff --git a/pkg/sentry/syscalls/linux/vfs2/filesystem.go b/pkg/sentry/syscalls/linux/vfs2/filesystem.go
index 01e0f9010..36aa1d3ae 100644
--- a/pkg/sentry/syscalls/linux/vfs2/filesystem.go
+++ b/pkg/sentry/syscalls/linux/vfs2/filesystem.go
@@ -20,7 +20,8 @@ import (
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
+
+ "gvisor.dev/gvisor/pkg/hostarch"
)
// Link implements Linux syscall link(2).
@@ -40,7 +41,7 @@ func Linkat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
return 0, nil, linkat(t, olddirfd, oldpathAddr, newdirfd, newpathAddr, flags)
}
-func linkat(t *kernel.Task, olddirfd int32, oldpathAddr usermem.Addr, newdirfd int32, newpathAddr usermem.Addr, flags int32) error {
+func linkat(t *kernel.Task, olddirfd int32, oldpathAddr hostarch.Addr, newdirfd int32, newpathAddr hostarch.Addr, flags int32) error {
if flags&^(linux.AT_EMPTY_PATH|linux.AT_SYMLINK_FOLLOW) != 0 {
return syserror.EINVAL
}
@@ -86,7 +87,7 @@ func Mkdirat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca
return 0, nil, mkdirat(t, dirfd, addr, mode)
}
-func mkdirat(t *kernel.Task, dirfd int32, addr usermem.Addr, mode uint) error {
+func mkdirat(t *kernel.Task, dirfd int32, addr hostarch.Addr, mode uint) error {
path, err := copyInPath(t, addr)
if err != nil {
return err
@@ -118,7 +119,7 @@ func Mknodat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca
return 0, nil, mknodat(t, dirfd, addr, linux.FileMode(mode), dev)
}
-func mknodat(t *kernel.Task, dirfd int32, addr usermem.Addr, mode linux.FileMode, dev uint32) error {
+func mknodat(t *kernel.Task, dirfd int32, addr hostarch.Addr, mode linux.FileMode, dev uint32) error {
path, err := copyInPath(t, addr)
if err != nil {
return err
@@ -165,7 +166,7 @@ func Creat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
return openat(t, linux.AT_FDCWD, addr, linux.O_WRONLY|linux.O_CREAT|linux.O_TRUNC, mode)
}
-func openat(t *kernel.Task, dirfd int32, pathAddr usermem.Addr, flags uint32, mode uint) (uintptr, *kernel.SyscallControl, error) {
+func openat(t *kernel.Task, dirfd int32, pathAddr hostarch.Addr, flags uint32, mode uint) (uintptr, *kernel.SyscallControl, error) {
path, err := copyInPath(t, pathAddr)
if err != nil {
return 0, nil, err
@@ -217,7 +218,7 @@ func Renameat2(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys
return 0, nil, renameat(t, olddirfd, oldpathAddr, newdirfd, newpathAddr, flags)
}
-func renameat(t *kernel.Task, olddirfd int32, oldpathAddr usermem.Addr, newdirfd int32, newpathAddr usermem.Addr, flags uint32) error {
+func renameat(t *kernel.Task, olddirfd int32, oldpathAddr hostarch.Addr, newdirfd int32, newpathAddr hostarch.Addr, flags uint32) error {
oldpath, err := copyInPath(t, oldpathAddr)
if err != nil {
return err
@@ -250,7 +251,7 @@ func Rmdir(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
return 0, nil, rmdirat(t, linux.AT_FDCWD, pathAddr)
}
-func rmdirat(t *kernel.Task, dirfd int32, pathAddr usermem.Addr) error {
+func rmdirat(t *kernel.Task, dirfd int32, pathAddr hostarch.Addr) error {
path, err := copyInPath(t, pathAddr)
if err != nil {
return err
@@ -269,7 +270,7 @@ func Unlink(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
return 0, nil, unlinkat(t, linux.AT_FDCWD, pathAddr)
}
-func unlinkat(t *kernel.Task, dirfd int32, pathAddr usermem.Addr) error {
+func unlinkat(t *kernel.Task, dirfd int32, pathAddr hostarch.Addr) error {
path, err := copyInPath(t, pathAddr)
if err != nil {
return err
@@ -313,7 +314,7 @@ func Symlinkat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys
return 0, nil, symlinkat(t, targetAddr, newdirfd, linkpathAddr)
}
-func symlinkat(t *kernel.Task, targetAddr usermem.Addr, newdirfd int32, linkpathAddr usermem.Addr) error {
+func symlinkat(t *kernel.Task, targetAddr hostarch.Addr, newdirfd int32, linkpathAddr hostarch.Addr) error {
target, err := t.CopyInString(targetAddr, linux.PATH_MAX)
if err != nil {
return err
diff --git a/pkg/sentry/syscalls/linux/vfs2/getdents.go b/pkg/sentry/syscalls/linux/vfs2/getdents.go
index 5517595b5..b41a3056a 100644
--- a/pkg/sentry/syscalls/linux/vfs2/getdents.go
+++ b/pkg/sentry/syscalls/linux/vfs2/getdents.go
@@ -22,7 +22,8 @@ import (
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/sync"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
+
+ "gvisor.dev/gvisor/pkg/hostarch"
)
// Getdents implements Linux syscall getdents(2).
@@ -58,7 +59,7 @@ func getdents(t *kernel.Task, args arch.SyscallArguments, isGetdents64 bool) (ui
type getdentsCallback struct {
t *kernel.Task
- addr usermem.Addr
+ addr hostarch.Addr
remaining int
isGetdents64 bool
}
@@ -69,7 +70,7 @@ var getdentsCallbackPool = sync.Pool{
},
}
-func getGetdentsCallback(t *kernel.Task, addr usermem.Addr, size int, isGetdents64 bool) *getdentsCallback {
+func getGetdentsCallback(t *kernel.Task, addr hostarch.Addr, size int, isGetdents64 bool) *getdentsCallback {
cb := getdentsCallbackPool.Get().(*getdentsCallback)
*cb = getdentsCallback{
t: t,
@@ -102,9 +103,9 @@ func (cb *getdentsCallback) Handle(dirent vfs.Dirent) error {
return syserror.EINVAL
}
buf = cb.t.CopyScratchBuffer(size)
- usermem.ByteOrder.PutUint64(buf[0:8], dirent.Ino)
- usermem.ByteOrder.PutUint64(buf[8:16], uint64(dirent.NextOff))
- usermem.ByteOrder.PutUint16(buf[16:18], uint16(size))
+ hostarch.ByteOrder.PutUint64(buf[0:8], dirent.Ino)
+ hostarch.ByteOrder.PutUint64(buf[8:16], uint64(dirent.NextOff))
+ hostarch.ByteOrder.PutUint16(buf[16:18], uint16(size))
buf[18] = dirent.Type
copy(buf[19:], dirent.Name)
// Zero out all remaining bytes in buf, including the NUL terminator
@@ -136,9 +137,9 @@ func (cb *getdentsCallback) Handle(dirent vfs.Dirent) error {
return syserror.EINVAL
}
buf = cb.t.CopyScratchBuffer(size)
- usermem.ByteOrder.PutUint64(buf[0:8], dirent.Ino)
- usermem.ByteOrder.PutUint64(buf[8:16], uint64(dirent.NextOff))
- usermem.ByteOrder.PutUint16(buf[16:18], uint16(size))
+ hostarch.ByteOrder.PutUint64(buf[0:8], dirent.Ino)
+ hostarch.ByteOrder.PutUint64(buf[8:16], uint64(dirent.NextOff))
+ hostarch.ByteOrder.PutUint16(buf[16:18], uint16(size))
copy(buf[18:], dirent.Name)
// Zero out all remaining bytes in buf, including the NUL terminator
// after dirent.Name and the zero padding byte between the name and
@@ -155,7 +156,7 @@ func (cb *getdentsCallback) Handle(dirent vfs.Dirent) error {
// cb.remaining.
return err
}
- cb.addr += usermem.Addr(n)
+ cb.addr += hostarch.Addr(n)
cb.remaining -= n
return nil
}
diff --git a/pkg/sentry/syscalls/linux/vfs2/mmap.go b/pkg/sentry/syscalls/linux/vfs2/mmap.go
index 9d9dbf775..c961545f6 100644
--- a/pkg/sentry/syscalls/linux/vfs2/mmap.go
+++ b/pkg/sentry/syscalls/linux/vfs2/mmap.go
@@ -21,7 +21,8 @@ import (
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
+
+ "gvisor.dev/gvisor/pkg/hostarch"
)
// Mmap implements Linux syscall mmap(2).
@@ -48,12 +49,12 @@ func Mmap(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallC
Unmap: fixed,
Map32Bit: map32bit,
Private: private,
- Perms: usermem.AccessType{
+ Perms: hostarch.AccessType{
Read: linux.PROT_READ&prot != 0,
Write: linux.PROT_WRITE&prot != 0,
Execute: linux.PROT_EXEC&prot != 0,
},
- MaxPerms: usermem.AnyAccess,
+ MaxPerms: hostarch.AnyAccess,
GrowsDown: linux.MAP_GROWSDOWN&flags != 0,
Precommit: linux.MAP_POPULATE&flags != 0,
}
diff --git a/pkg/sentry/syscalls/linux/vfs2/mount.go b/pkg/sentry/syscalls/linux/vfs2/mount.go
index 769c9b92f..dd93430e2 100644
--- a/pkg/sentry/syscalls/linux/vfs2/mount.go
+++ b/pkg/sentry/syscalls/linux/vfs2/mount.go
@@ -20,7 +20,8 @@ import (
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
+
+ "gvisor.dev/gvisor/pkg/hostarch"
)
// Mount implements Linux syscall mount(2).
@@ -33,11 +34,11 @@ func Mount(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
// For null-terminated strings related to mount(2), Linux copies in at most
// a page worth of data. See fs/namespace.c:copy_mount_string().
- fsType, err := t.CopyInString(typeAddr, usermem.PageSize)
+ fsType, err := t.CopyInString(typeAddr, hostarch.PageSize)
if err != nil {
return 0, nil, err
}
- source, err := t.CopyInString(sourceAddr, usermem.PageSize)
+ source, err := t.CopyInString(sourceAddr, hostarch.PageSize)
if err != nil {
return 0, nil, err
}
@@ -53,7 +54,7 @@ func Mount(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
// character placement, and the address is passed to each file system.
// Most file systems always treat this data as a string, though, and so
// do all of the ones we implement.
- data, err = t.CopyInString(dataAddr, usermem.PageSize)
+ data, err = t.CopyInString(dataAddr, hostarch.PageSize)
if err != nil {
return 0, nil, err
}
diff --git a/pkg/sentry/syscalls/linux/vfs2/path.go b/pkg/sentry/syscalls/linux/vfs2/path.go
index 90a511d9a..2aaf1ed74 100644
--- a/pkg/sentry/syscalls/linux/vfs2/path.go
+++ b/pkg/sentry/syscalls/linux/vfs2/path.go
@@ -20,10 +20,11 @@ import (
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
+
+ "gvisor.dev/gvisor/pkg/hostarch"
)
-func copyInPath(t *kernel.Task, addr usermem.Addr) (fspath.Path, error) {
+func copyInPath(t *kernel.Task, addr hostarch.Addr) (fspath.Path, error) {
pathname, err := t.CopyInString(addr, linux.PATH_MAX)
if err != nil {
return fspath.Path{}, err
diff --git a/pkg/sentry/syscalls/linux/vfs2/pipe.go b/pkg/sentry/syscalls/linux/vfs2/pipe.go
index 6986e39fe..c6fc1954c 100644
--- a/pkg/sentry/syscalls/linux/vfs2/pipe.go
+++ b/pkg/sentry/syscalls/linux/vfs2/pipe.go
@@ -22,7 +22,8 @@ import (
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
+
+ "gvisor.dev/gvisor/pkg/hostarch"
)
// Pipe implements Linux syscall pipe(2).
@@ -38,7 +39,7 @@ func Pipe2(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
return 0, nil, pipe2(t, addr, flags)
}
-func pipe2(t *kernel.Task, addr usermem.Addr, flags int32) error {
+func pipe2(t *kernel.Task, addr hostarch.Addr, flags int32) error {
if flags&^(linux.O_NONBLOCK|linux.O_CLOEXEC) != 0 {
return syserror.EINVAL
}
diff --git a/pkg/sentry/syscalls/linux/vfs2/poll.go b/pkg/sentry/syscalls/linux/vfs2/poll.go
index c22e4ce54..a69c80edd 100644
--- a/pkg/sentry/syscalls/linux/vfs2/poll.go
+++ b/pkg/sentry/syscalls/linux/vfs2/poll.go
@@ -25,8 +25,9 @@ import (
"gvisor.dev/gvisor/pkg/sentry/limits"
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/pkg/waiter"
+
+ "gvisor.dev/gvisor/pkg/hostarch"
)
// fileCap is the maximum allowable files for poll & select. This has no
@@ -158,7 +159,7 @@ func pollBlock(t *kernel.Task, pfd []linux.PollFD, timeout time.Duration) (time.
}
// copyInPollFDs copies an array of struct pollfd unless nfds exceeds the max.
-func copyInPollFDs(t *kernel.Task, addr usermem.Addr, nfds uint) ([]linux.PollFD, error) {
+func copyInPollFDs(t *kernel.Task, addr hostarch.Addr, nfds uint) ([]linux.PollFD, error) {
if uint64(nfds) > t.ThreadGroup().Limits().GetCapped(limits.NumberOfFiles, fileCap) {
return nil, syserror.EINVAL
}
@@ -173,7 +174,7 @@ func copyInPollFDs(t *kernel.Task, addr usermem.Addr, nfds uint) ([]linux.PollFD
return pfd, nil
}
-func doPoll(t *kernel.Task, addr usermem.Addr, nfds uint, timeout time.Duration) (time.Duration, uintptr, error) {
+func doPoll(t *kernel.Task, addr hostarch.Addr, nfds uint, timeout time.Duration) (time.Duration, uintptr, error) {
pfd, err := copyInPollFDs(t, addr, nfds)
if err != nil {
return timeout, 0, err
@@ -201,7 +202,7 @@ func doPoll(t *kernel.Task, addr usermem.Addr, nfds uint, timeout time.Duration)
}
// CopyInFDSet copies an fd set from select(2)/pselect(2).
-func CopyInFDSet(t *kernel.Task, addr usermem.Addr, nBytes, nBitsInLastPartialByte int) ([]byte, error) {
+func CopyInFDSet(t *kernel.Task, addr hostarch.Addr, nBytes, nBitsInLastPartialByte int) ([]byte, error) {
set := make([]byte, nBytes)
if addr != 0 {
@@ -218,7 +219,7 @@ func CopyInFDSet(t *kernel.Task, addr usermem.Addr, nBytes, nBitsInLastPartialBy
return set, nil
}
-func doSelect(t *kernel.Task, nfds int, readFDs, writeFDs, exceptFDs usermem.Addr, timeout time.Duration) (uintptr, error) {
+func doSelect(t *kernel.Task, nfds int, readFDs, writeFDs, exceptFDs hostarch.Addr, timeout time.Duration) (uintptr, error) {
if nfds < 0 || nfds > fileCap {
return 0, syserror.EINVAL
}
@@ -368,7 +369,7 @@ func timeoutRemaining(t *kernel.Task, startNs ktime.Time, timeout time.Duration)
// copyOutTimespecRemaining copies the time remaining in timeout to timespecAddr.
//
// startNs must be from CLOCK_MONOTONIC.
-func copyOutTimespecRemaining(t *kernel.Task, startNs ktime.Time, timeout time.Duration, timespecAddr usermem.Addr) error {
+func copyOutTimespecRemaining(t *kernel.Task, startNs ktime.Time, timeout time.Duration, timespecAddr hostarch.Addr) error {
if timeout <= 0 {
return nil
}
@@ -381,7 +382,7 @@ func copyOutTimespecRemaining(t *kernel.Task, startNs ktime.Time, timeout time.D
// copyOutTimevalRemaining copies the time remaining in timeout to timevalAddr.
//
// startNs must be from CLOCK_MONOTONIC.
-func copyOutTimevalRemaining(t *kernel.Task, startNs ktime.Time, timeout time.Duration, timevalAddr usermem.Addr) error {
+func copyOutTimevalRemaining(t *kernel.Task, startNs ktime.Time, timeout time.Duration, timevalAddr hostarch.Addr) error {
if timeout <= 0 {
return nil
}
@@ -396,7 +397,7 @@ func copyOutTimevalRemaining(t *kernel.Task, startNs ktime.Time, timeout time.Du
//
// +stateify savable
type pollRestartBlock struct {
- pfdAddr usermem.Addr
+ pfdAddr hostarch.Addr
nfds uint
timeout time.Duration
}
@@ -406,7 +407,7 @@ func (p *pollRestartBlock) Restart(t *kernel.Task) (uintptr, error) {
return poll(t, p.pfdAddr, p.nfds, p.timeout)
}
-func poll(t *kernel.Task, pfdAddr usermem.Addr, nfds uint, timeout time.Duration) (uintptr, error) {
+func poll(t *kernel.Task, pfdAddr hostarch.Addr, nfds uint, timeout time.Duration) (uintptr, error) {
remainingTimeout, n, err := doPoll(t, pfdAddr, nfds, timeout)
// On an interrupt poll(2) is restarted with the remaining timeout.
if err == syserror.EINTR {
@@ -530,7 +531,7 @@ func Pselect(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca
if _, err := maskStruct.CopyIn(t, maskWithSizeAddr); err != nil {
return 0, nil, err
}
- if err := setTempSignalSet(t, usermem.Addr(maskStruct.sigsetAddr), uint(maskStruct.sizeofSigset)); err != nil {
+ if err := setTempSignalSet(t, hostarch.Addr(maskStruct.sigsetAddr), uint(maskStruct.sizeofSigset)); err != nil {
return 0, nil, err
}
}
@@ -551,7 +552,7 @@ func Pselect(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca
// returned value is the maximum that Duration will allow.
//
// If timespecAddr is NULL, the returned value is negative.
-func copyTimespecInToDuration(t *kernel.Task, timespecAddr usermem.Addr) (time.Duration, error) {
+func copyTimespecInToDuration(t *kernel.Task, timespecAddr hostarch.Addr) (time.Duration, error) {
// Use a negative Duration to indicate "no timeout".
timeout := time.Duration(-1)
if timespecAddr != 0 {
@@ -567,7 +568,7 @@ func copyTimespecInToDuration(t *kernel.Task, timespecAddr usermem.Addr) (time.D
return timeout, nil
}
-func setTempSignalSet(t *kernel.Task, maskAddr usermem.Addr, maskSize uint) error {
+func setTempSignalSet(t *kernel.Task, maskAddr hostarch.Addr, maskSize uint) error {
if maskAddr == 0 {
return nil
}
diff --git a/pkg/sentry/syscalls/linux/vfs2/setstat.go b/pkg/sentry/syscalls/linux/vfs2/setstat.go
index 903169dc2..c6330c21a 100644
--- a/pkg/sentry/syscalls/linux/vfs2/setstat.go
+++ b/pkg/sentry/syscalls/linux/vfs2/setstat.go
@@ -23,7 +23,8 @@ import (
"gvisor.dev/gvisor/pkg/sentry/limits"
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
+
+ "gvisor.dev/gvisor/pkg/hostarch"
)
const chmodMask = 0777 | linux.S_ISUID | linux.S_ISGID | linux.S_ISVTX
@@ -43,7 +44,7 @@ func Fchmodat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
return 0, nil, fchmodat(t, dirfd, pathAddr, mode)
}
-func fchmodat(t *kernel.Task, dirfd int32, pathAddr usermem.Addr, mode uint) error {
+func fchmodat(t *kernel.Task, dirfd int32, pathAddr hostarch.Addr, mode uint) error {
path, err := copyInPath(t, pathAddr)
if err != nil {
return err
@@ -102,7 +103,7 @@ func Fchownat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
return 0, nil, fchownat(t, dirfd, pathAddr, owner, group, flags)
}
-func fchownat(t *kernel.Task, dirfd int32, pathAddr usermem.Addr, owner, group, flags int32) error {
+func fchownat(t *kernel.Task, dirfd int32, pathAddr hostarch.Addr, owner, group, flags int32) error {
if flags&^(linux.AT_EMPTY_PATH|linux.AT_SYMLINK_NOFOLLOW) != 0 {
return syserror.EINVAL
}
@@ -327,7 +328,7 @@ func Futimesat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys
return 0, nil, setstatat(t, dirfd, path, shouldAllowEmptyPath, followFinalSymlink, &opts)
}
-func populateSetStatOptionsForUtimes(t *kernel.Task, timesAddr usermem.Addr, opts *vfs.SetStatOptions) error {
+func populateSetStatOptionsForUtimes(t *kernel.Task, timesAddr hostarch.Addr, opts *vfs.SetStatOptions) error {
if timesAddr == 0 {
opts.Stat.Mask = linux.STATX_ATIME | linux.STATX_MTIME
opts.Stat.Atime.Nsec = linux.UTIME_NOW
@@ -391,7 +392,7 @@ func Utimensat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys
return 0, nil, setstatat(t, dirfd, path, shouldAllowEmptyPath, shouldFollowFinalSymlink(flags&linux.AT_SYMLINK_NOFOLLOW == 0), &opts)
}
-func populateSetStatOptionsForUtimens(t *kernel.Task, timesAddr usermem.Addr, opts *vfs.SetStatOptions) error {
+func populateSetStatOptionsForUtimens(t *kernel.Task, timesAddr hostarch.Addr, opts *vfs.SetStatOptions) error {
if timesAddr == 0 {
opts.Stat.Mask = linux.STATX_ATIME | linux.STATX_MTIME
opts.Stat.Atime.Nsec = linux.UTIME_NOW
diff --git a/pkg/sentry/syscalls/linux/vfs2/signal.go b/pkg/sentry/syscalls/linux/vfs2/signal.go
index b89f34cdb..6163da103 100644
--- a/pkg/sentry/syscalls/linux/vfs2/signal.go
+++ b/pkg/sentry/syscalls/linux/vfs2/signal.go
@@ -21,11 +21,12 @@ import (
"gvisor.dev/gvisor/pkg/sentry/kernel"
slinux "gvisor.dev/gvisor/pkg/sentry/syscalls/linux"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
+
+ "gvisor.dev/gvisor/pkg/hostarch"
)
// sharedSignalfd is shared between the two calls.
-func sharedSignalfd(t *kernel.Task, fd int32, sigset usermem.Addr, sigsetsize uint, flags int32) (uintptr, *kernel.SyscallControl, error) {
+func sharedSignalfd(t *kernel.Task, fd int32, sigset hostarch.Addr, sigsetsize uint, flags int32) (uintptr, *kernel.SyscallControl, error) {
// Copy in the signal mask.
mask, err := slinux.CopyInSigSet(t, sigset, sigsetsize)
if err != nil {
diff --git a/pkg/sentry/syscalls/linux/vfs2/socket.go b/pkg/sentry/syscalls/linux/vfs2/socket.go
index 346fd1cea..a87a66146 100644
--- a/pkg/sentry/syscalls/linux/vfs2/socket.go
+++ b/pkg/sentry/syscalls/linux/vfs2/socket.go
@@ -31,6 +31,8 @@ import (
"gvisor.dev/gvisor/pkg/syserr"
"gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
+
+ "gvisor.dev/gvisor/pkg/hostarch"
)
// minListenBacklog is the minimum reasonable backlog for listening sockets.
@@ -116,7 +118,7 @@ type multipleMessageHeader64 struct {
// CaptureAddress allocates memory for and copies a socket address structure
// from the untrusted address space range.
-func CaptureAddress(t *kernel.Task, addr usermem.Addr, addrlen uint32) ([]byte, error) {
+func CaptureAddress(t *kernel.Task, addr hostarch.Addr, addrlen uint32) ([]byte, error) {
if addrlen > maxAddrLen {
return nil, syserror.EINVAL
}
@@ -132,7 +134,7 @@ func CaptureAddress(t *kernel.Task, addr usermem.Addr, addrlen uint32) ([]byte,
// writeAddress writes a sockaddr structure and its length to an output buffer
// in the unstrusted address space range. If the address is bigger than the
// buffer, it is truncated.
-func writeAddress(t *kernel.Task, addr linux.SockAddr, addrLen uint32, addrPtr usermem.Addr, addrLenPtr usermem.Addr) error {
+func writeAddress(t *kernel.Task, addr linux.SockAddr, addrLen uint32, addrPtr hostarch.Addr, addrLenPtr hostarch.Addr) error {
// Get the buffer length.
var bufLen uint32
if _, err := primitive.CopyUint32In(t, addrLenPtr, &bufLen); err != nil {
@@ -279,7 +281,7 @@ func Connect(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca
// accept is the implementation of the accept syscall. It is called by accept
// and accept4 syscall handlers.
-func accept(t *kernel.Task, fd int32, addr usermem.Addr, addrLen usermem.Addr, flags int) (uintptr, error) {
+func accept(t *kernel.Task, fd int32, addr hostarch.Addr, addrLen hostarch.Addr, flags int) (uintptr, error) {
// Check that no unsupported flags are passed in.
if flags & ^(linux.SOCK_NONBLOCK|linux.SOCK_CLOEXEC) != 0 {
return 0, syserror.EINVAL
@@ -475,7 +477,7 @@ func GetSockOpt(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sy
// getSockOpt tries to handle common socket options, or dispatches to a specific
// socket implementation.
-func getSockOpt(t *kernel.Task, s socket.SocketVFS2, level, name int, optValAddr usermem.Addr, len int) (marshal.Marshallable, *syserr.Error) {
+func getSockOpt(t *kernel.Task, s socket.SocketVFS2, level, name int, optValAddr hostarch.Addr, len int) (marshal.Marshallable, *syserr.Error) {
if level == linux.SOL_SOCKET {
switch name {
case linux.SO_TYPE, linux.SO_DOMAIN, linux.SO_PROTOCOL:
@@ -738,7 +740,7 @@ func RecvMMsg(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
return uintptr(count), nil, nil
}
-func recvSingleMsg(t *kernel.Task, s socket.SocketVFS2, msgPtr usermem.Addr, flags int32, haveDeadline bool, deadline ktime.Time) (uintptr, error) {
+func recvSingleMsg(t *kernel.Task, s socket.SocketVFS2, msgPtr hostarch.Addr, flags int32, haveDeadline bool, deadline ktime.Time) (uintptr, error) {
// Capture the message header and io vectors.
var msg MessageHeader64
if _, err := msg.CopyIn(t, msgPtr); err != nil {
@@ -748,7 +750,7 @@ func recvSingleMsg(t *kernel.Task, s socket.SocketVFS2, msgPtr usermem.Addr, fla
if msg.IovLen > linux.UIO_MAXIOV {
return 0, syserror.EMSGSIZE
}
- dst, err := t.IovecsIOSequence(usermem.Addr(msg.Iov), int(msg.IovLen), usermem.IOOpts{
+ dst, err := t.IovecsIOSequence(hostarch.Addr(msg.Iov), int(msg.IovLen), usermem.IOOpts{
AddressSpaceActive: true,
})
if err != nil {
@@ -799,7 +801,7 @@ func recvSingleMsg(t *kernel.Task, s socket.SocketVFS2, msgPtr usermem.Addr, fla
// Copy the address to the caller.
if msg.NameLen != 0 {
- if err := writeAddress(t, sender, senderLen, usermem.Addr(msg.Name), usermem.Addr(msgPtr+nameLenOffset)); err != nil {
+ if err := writeAddress(t, sender, senderLen, hostarch.Addr(msg.Name), hostarch.Addr(msgPtr+nameLenOffset)); err != nil {
return 0, err
}
}
@@ -809,7 +811,7 @@ func recvSingleMsg(t *kernel.Task, s socket.SocketVFS2, msgPtr usermem.Addr, fla
return 0, err
}
if len(controlData) > 0 {
- if _, err := t.CopyOutBytes(usermem.Addr(msg.Control), controlData); err != nil {
+ if _, err := t.CopyOutBytes(hostarch.Addr(msg.Control), controlData); err != nil {
return 0, err
}
}
@@ -824,7 +826,7 @@ func recvSingleMsg(t *kernel.Task, s socket.SocketVFS2, msgPtr usermem.Addr, fla
// recvFrom is the implementation of the recvfrom syscall. It is called by
// recvfrom and recv syscall handlers.
-func recvFrom(t *kernel.Task, fd int32, bufPtr usermem.Addr, bufLen uint64, flags int32, namePtr usermem.Addr, nameLenPtr usermem.Addr) (uintptr, error) {
+func recvFrom(t *kernel.Task, fd int32, bufPtr hostarch.Addr, bufLen uint64, flags int32, namePtr hostarch.Addr, nameLenPtr hostarch.Addr) (uintptr, error) {
if int(bufLen) < 0 {
return 0, syserror.EINVAL
}
@@ -1000,7 +1002,7 @@ func SendMMsg(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
return uintptr(count), nil, nil
}
-func sendSingleMsg(t *kernel.Task, s socket.SocketVFS2, file *vfs.FileDescription, msgPtr usermem.Addr, flags int32) (uintptr, error) {
+func sendSingleMsg(t *kernel.Task, s socket.SocketVFS2, file *vfs.FileDescription, msgPtr hostarch.Addr, flags int32) (uintptr, error) {
// Capture the message header.
var msg MessageHeader64
if _, err := msg.CopyIn(t, msgPtr); err != nil {
@@ -1014,7 +1016,7 @@ func sendSingleMsg(t *kernel.Task, s socket.SocketVFS2, file *vfs.FileDescriptio
return 0, syserror.ENOBUFS
}
controlData = make([]byte, msg.ControlLen)
- if _, err := t.CopyInBytes(usermem.Addr(msg.Control), controlData); err != nil {
+ if _, err := t.CopyInBytes(hostarch.Addr(msg.Control), controlData); err != nil {
return 0, err
}
}
@@ -1023,7 +1025,7 @@ func sendSingleMsg(t *kernel.Task, s socket.SocketVFS2, file *vfs.FileDescriptio
var to []byte
if msg.NameLen != 0 {
var err error
- to, err = CaptureAddress(t, usermem.Addr(msg.Name), msg.NameLen)
+ to, err = CaptureAddress(t, hostarch.Addr(msg.Name), msg.NameLen)
if err != nil {
return 0, err
}
@@ -1033,7 +1035,7 @@ func sendSingleMsg(t *kernel.Task, s socket.SocketVFS2, file *vfs.FileDescriptio
if msg.IovLen > linux.UIO_MAXIOV {
return 0, syserror.EMSGSIZE
}
- src, err := t.IovecsIOSequence(usermem.Addr(msg.Iov), int(msg.IovLen), usermem.IOOpts{
+ src, err := t.IovecsIOSequence(hostarch.Addr(msg.Iov), int(msg.IovLen), usermem.IOOpts{
AddressSpaceActive: true,
})
if err != nil {
@@ -1067,7 +1069,7 @@ func sendSingleMsg(t *kernel.Task, s socket.SocketVFS2, file *vfs.FileDescriptio
// sendTo is the implementation of the sendto syscall. It is called by sendto
// and send syscall handlers.
-func sendTo(t *kernel.Task, fd int32, bufPtr usermem.Addr, bufLen uint64, flags int32, namePtr usermem.Addr, nameLen uint32) (uintptr, error) {
+func sendTo(t *kernel.Task, fd int32, bufPtr hostarch.Addr, bufLen uint64, flags int32, namePtr hostarch.Addr, nameLen uint32) (uintptr, error) {
bl := int(bufLen)
if bl < 0 {
return 0, syserror.EINVAL
diff --git a/pkg/sentry/syscalls/linux/vfs2/stat.go b/pkg/sentry/syscalls/linux/vfs2/stat.go
index 0f5d5189c..69e77fa99 100644
--- a/pkg/sentry/syscalls/linux/vfs2/stat.go
+++ b/pkg/sentry/syscalls/linux/vfs2/stat.go
@@ -24,7 +24,8 @@ import (
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
+
+ "gvisor.dev/gvisor/pkg/hostarch"
)
// Stat implements Linux syscall stat(2).
@@ -50,7 +51,7 @@ func Newfstatat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sy
return 0, nil, fstatat(t, dirfd, pathAddr, statAddr, flags)
}
-func fstatat(t *kernel.Task, dirfd int32, pathAddr, statAddr usermem.Addr, flags int32) error {
+func fstatat(t *kernel.Task, dirfd int32, pathAddr, statAddr hostarch.Addr, flags int32) error {
if flags&^(linux.AT_EMPTY_PATH|linux.AT_SYMLINK_NOFOLLOW) != 0 {
return syserror.EINVAL
}
@@ -264,7 +265,7 @@ func Faccessat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys
return 0, nil, accessAt(t, dirfd, addr, mode)
}
-func accessAt(t *kernel.Task, dirfd int32, pathAddr usermem.Addr, mode uint) error {
+func accessAt(t *kernel.Task, dirfd int32, pathAddr hostarch.Addr, mode uint) error {
const rOK = 4
const wOK = 2
const xOK = 1
@@ -312,7 +313,7 @@ func Readlinkat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sy
return readlinkat(t, dirfd, pathAddr, bufAddr, size)
}
-func readlinkat(t *kernel.Task, dirfd int32, pathAddr, bufAddr usermem.Addr, size uint) (uintptr, *kernel.SyscallControl, error) {
+func readlinkat(t *kernel.Task, dirfd int32, pathAddr, bufAddr hostarch.Addr, size uint) (uintptr, *kernel.SyscallControl, error) {
if int(size) <= 0 {
return 0, nil, syserror.EINVAL
}
diff --git a/pkg/sentry/syscalls/linux/vfs2/vfs2_abi_autogen_unsafe.go b/pkg/sentry/syscalls/linux/vfs2/vfs2_abi_autogen_unsafe.go
index 8f217bc36..d9e2dd3d9 100644
--- a/pkg/sentry/syscalls/linux/vfs2/vfs2_abi_autogen_unsafe.go
+++ b/pkg/sentry/syscalls/linux/vfs2/vfs2_abi_autogen_unsafe.go
@@ -10,9 +10,9 @@ package vfs2
import (
"gvisor.dev/gvisor/pkg/gohacks"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal"
"gvisor.dev/gvisor/pkg/safecopy"
- "gvisor.dev/gvisor/pkg/usermem"
"io"
"reflect"
"runtime"
@@ -31,17 +31,17 @@ func (s *sigSetWithSize) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (s *sigSetWithSize) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.sigsetAddr))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.sigsetAddr))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.sizeofSigset))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.sizeofSigset))
dst = dst[8:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (s *sigSetWithSize) UnmarshalBytes(src []byte) {
- s.sigsetAddr = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.sigsetAddr = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.sizeofSigset = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.sizeofSigset = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
}
@@ -63,7 +63,7 @@ func (s *sigSetWithSize) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (s *sigSetWithSize) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (s *sigSetWithSize) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -80,13 +80,13 @@ func (s *sigSetWithSize) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, lim
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (s *sigSetWithSize) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *sigSetWithSize) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return s.CopyOutN(cc, addr, s.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (s *sigSetWithSize) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *sigSetWithSize) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -124,21 +124,21 @@ func (m *MessageHeader64) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (m *MessageHeader64) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(m.Name))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(m.Name))
dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(m.NameLen))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(m.NameLen))
dst = dst[4:]
// Padding: dst[:sizeof(uint32)] ~= uint32(0)
dst = dst[4:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(m.Iov))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(m.Iov))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(m.IovLen))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(m.IovLen))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(m.Control))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(m.Control))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(m.ControlLen))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(m.ControlLen))
dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(m.Flags))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(m.Flags))
dst = dst[4:]
// Padding: dst[:sizeof(int32)] ~= int32(0)
dst = dst[4:]
@@ -146,21 +146,21 @@ func (m *MessageHeader64) MarshalBytes(dst []byte) {
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (m *MessageHeader64) UnmarshalBytes(src []byte) {
- m.Name = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ m.Name = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- m.NameLen = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ m.NameLen = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
// Padding: var _ uint32 ~= src[:sizeof(uint32)]
src = src[4:]
- m.Iov = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ m.Iov = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- m.IovLen = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ m.IovLen = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- m.Control = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ m.Control = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- m.ControlLen = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ m.ControlLen = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- m.Flags = int32(usermem.ByteOrder.Uint32(src[:4]))
+ m.Flags = int32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
// Padding: var _ int32 ~= src[:sizeof(int32)]
src = src[4:]
@@ -184,7 +184,7 @@ func (m *MessageHeader64) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (m *MessageHeader64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (m *MessageHeader64) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -201,13 +201,13 @@ func (m *MessageHeader64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, li
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (m *MessageHeader64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (m *MessageHeader64) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return m.CopyOutN(cc, addr, m.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (m *MessageHeader64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (m *MessageHeader64) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -248,7 +248,7 @@ func (m *multipleMessageHeader64) SizeBytes() int {
func (m *multipleMessageHeader64) MarshalBytes(dst []byte) {
m.msgHdr.MarshalBytes(dst[:m.msgHdr.SizeBytes()])
dst = dst[m.msgHdr.SizeBytes():]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(m.msgLen))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(m.msgLen))
dst = dst[4:]
// Padding: dst[:sizeof(int32)] ~= int32(0)
dst = dst[4:]
@@ -258,7 +258,7 @@ func (m *multipleMessageHeader64) MarshalBytes(dst []byte) {
func (m *multipleMessageHeader64) UnmarshalBytes(src []byte) {
m.msgHdr.UnmarshalBytes(src[:m.msgHdr.SizeBytes()])
src = src[m.msgHdr.SizeBytes():]
- m.msgLen = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ m.msgLen = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
// Padding: var _ int32 ~= src[:sizeof(int32)]
src = src[4:]
@@ -292,7 +292,7 @@ func (m *multipleMessageHeader64) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (m *multipleMessageHeader64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (m *multipleMessageHeader64) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
if !m.msgHdr.Packed() {
// Type multipleMessageHeader64 doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := cc.CopyScratchBuffer(m.SizeBytes()) // escapes: okay.
@@ -316,13 +316,13 @@ func (m *multipleMessageHeader64) CopyOutN(cc marshal.CopyContext, addr usermem.
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (m *multipleMessageHeader64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (m *multipleMessageHeader64) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return m.CopyOutN(cc, addr, m.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (m *multipleMessageHeader64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (m *multipleMessageHeader64) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
if !m.msgHdr.Packed() {
// Type multipleMessageHeader64 doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := cc.CopyScratchBuffer(m.SizeBytes()) // escapes: okay.
diff --git a/pkg/sentry/syscalls/linux/vfs2/xattr.go b/pkg/sentry/syscalls/linux/vfs2/xattr.go
index e05723ef9..c261050c6 100644
--- a/pkg/sentry/syscalls/linux/vfs2/xattr.go
+++ b/pkg/sentry/syscalls/linux/vfs2/xattr.go
@@ -23,7 +23,8 @@ import (
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
+
+ "gvisor.dev/gvisor/pkg/hostarch"
)
// ListXattr implements Linux syscall listxattr(2).
@@ -291,7 +292,7 @@ func Fremovexattr(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.
return 0, nil, file.RemoveXattr(t, name)
}
-func copyInXattrName(t *kernel.Task, nameAddr usermem.Addr) (string, error) {
+func copyInXattrName(t *kernel.Task, nameAddr hostarch.Addr) (string, error) {
name, err := t.CopyInString(nameAddr, linux.XATTR_NAME_MAX+1)
if err != nil {
if err == syserror.ENAMETOOLONG {
@@ -305,7 +306,7 @@ func copyInXattrName(t *kernel.Task, nameAddr usermem.Addr) (string, error) {
return name, nil
}
-func copyOutXattrNameList(t *kernel.Task, listAddr usermem.Addr, size uint, names []string) (int, error) {
+func copyOutXattrNameList(t *kernel.Task, listAddr hostarch.Addr, size uint, names []string) (int, error) {
if size > linux.XATTR_LIST_MAX {
size = linux.XATTR_LIST_MAX
}
@@ -327,7 +328,7 @@ func copyOutXattrNameList(t *kernel.Task, listAddr usermem.Addr, size uint, name
return t.CopyOutBytes(listAddr, buf.Bytes())
}
-func copyInXattrValue(t *kernel.Task, valueAddr usermem.Addr, size uint) (string, error) {
+func copyInXattrValue(t *kernel.Task, valueAddr hostarch.Addr, size uint) (string, error) {
if size > linux.XATTR_SIZE_MAX {
return "", syserror.E2BIG
}
@@ -338,7 +339,7 @@ func copyInXattrValue(t *kernel.Task, valueAddr usermem.Addr, size uint) (string
return gohacks.StringFromImmutableBytes(buf), nil
}
-func copyOutXattrValue(t *kernel.Task, valueAddr usermem.Addr, size uint, value string) (int, error) {
+func copyOutXattrValue(t *kernel.Task, valueAddr hostarch.Addr, size uint, value string) (int, error) {
if size > linux.XATTR_SIZE_MAX {
size = linux.XATTR_SIZE_MAX
}