summaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorgVisor bot <gvisor-bot@google.com>2021-03-29 20:35:44 +0000
committergVisor bot <gvisor-bot@google.com>2021-03-29 20:35:44 +0000
commit08cc017c088017546ed712cce700bf4374c864c0 (patch)
treeaf024e69d8855f4f867ef435ced35532b368a981
parent6a422755602daeaef4be60969c1acddc8b7b3041 (diff)
parent8a2f7e716dcc62f04d2808e8ade34941c94fc956 (diff)
Merge release-20210322.0-29-g8a2f7e716 (automated)
-rw-r--r--pkg/abi/linux/linux_abi_autogen_unsafe.go2034
-rw-r--r--pkg/abi/linux/linux_amd64_abi_autogen_unsafe.go206
-rw-r--r--pkg/abi/linux/linux_arm64_abi_autogen_unsafe.go106
-rw-r--r--pkg/coverage/coverage.go4
-rw-r--r--pkg/hostarch/access_type.go (renamed from pkg/usermem/access_type.go)2
-rw-r--r--pkg/hostarch/addr.go (renamed from pkg/usermem/addr.go)4
-rw-r--r--pkg/hostarch/addr_range.go (renamed from pkg/usermem/addr_range.go)2
-rw-r--r--pkg/hostarch/addr_range_seq_unsafe.go (renamed from pkg/usermem/addr_range_seq_unsafe.go)2
-rw-r--r--pkg/hostarch/hostarch.go7
-rw-r--r--pkg/hostarch/hostarch_arm64.go (renamed from pkg/usermem/usermem_arm64.go)2
-rw-r--r--pkg/hostarch/hostarch_arm64_state_autogen.go (renamed from pkg/usermem/usermem_arm64_state_autogen.go)2
-rw-r--r--pkg/hostarch/hostarch_state_autogen.go80
-rw-r--r--pkg/hostarch/hostarch_unsafe_state_autogen.go3
-rw-r--r--pkg/hostarch/hostarch_x86.go (renamed from pkg/usermem/usermem_x86.go)2
-rw-r--r--pkg/hostarch/hostarch_x86_state_autogen.go (renamed from pkg/usermem/usermem_x86_state_autogen.go)2
-rw-r--r--pkg/marshal/marshal.go20
-rw-r--r--pkg/marshal/marshal_impl_util.go8
-rw-r--r--pkg/marshal/primitive/primitive.go53
-rw-r--r--pkg/marshal/primitive/primitive_abi_autogen_unsafe.go106
-rw-r--r--pkg/ring0/defs_impl_amd64.go4
-rw-r--r--pkg/ring0/defs_impl_arm64.go4
-rw-r--r--pkg/ring0/kernel_amd64.go16
-rw-r--r--pkg/ring0/pagetables/allocator_unsafe.go10
-rw-r--r--pkg/ring0/pagetables/pagetables.go18
-rw-r--r--pkg/ring0/pagetables/pagetables_aarch64.go6
-rw-r--r--pkg/ring0/pagetables/pagetables_x86.go6
-rw-r--r--pkg/sentry/arch/arch.go20
-rw-r--r--pkg/sentry/arch/arch_abi_autogen_unsafe.go56
-rw-r--r--pkg/sentry/arch/arch_amd64.go32
-rw-r--r--pkg/sentry/arch/arch_amd64_abi_autogen_unsafe.go130
-rw-r--r--pkg/sentry/arch/arch_arm64.go28
-rw-r--r--pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go74
-rw-r--r--pkg/sentry/arch/auxv.go4
-rw-r--r--pkg/sentry/arch/fpu/fpu_amd64.go16
-rw-r--r--pkg/sentry/arch/signal.go50
-rw-r--r--pkg/sentry/arch/signal_amd64.go8
-rw-r--r--pkg/sentry/arch/signal_arm64.go6
-rw-r--r--pkg/sentry/arch/signal_stack.go10
-rw-r--r--pkg/sentry/arch/stack.go44
-rw-r--r--pkg/sentry/arch/stack_unsafe.go6
-rw-r--r--pkg/sentry/devices/tundev/tundev.go5
-rw-r--r--pkg/sentry/fs/anon/anon.go4
-rw-r--r--pkg/sentry/fs/copy_up.go3
-rw-r--r--pkg/sentry/fs/dev/dev.go11
-rw-r--r--pkg/sentry/fs/dev/net_tun.go5
-rw-r--r--pkg/sentry/fs/fsutil/dirty_set.go4
-rw-r--r--pkg/sentry/fs/fsutil/file_range_set.go8
-rw-r--r--pkg/sentry/fs/fsutil/host_file_mapper.go6
-rw-r--r--pkg/sentry/fs/fsutil/host_mappable.go13
-rw-r--r--pkg/sentry/fs/fsutil/inode_cached.go21
-rw-r--r--pkg/sentry/fs/gofer/attr.go4
-rw-r--r--pkg/sentry/fs/inotify.go3
-rw-r--r--pkg/sentry/fs/inotify_event.go9
-rw-r--r--pkg/sentry/fs/offset.go4
-rw-r--r--pkg/sentry/fs/overlay.go10
-rw-r--r--pkg/sentry/fs/proc/exec_args.go7
-rw-r--r--pkg/sentry/fs/proc/inode.go4
-rw-r--r--pkg/sentry/fs/proc/meminfo.go4
-rw-r--r--pkg/sentry/fs/proc/net.go20
-rw-r--r--pkg/sentry/fs/proc/seqfile/seqfile.go3
-rw-r--r--pkg/sentry/fs/proc/sys_net.go21
-rw-r--r--pkg/sentry/fs/proc/task.go13
-rw-r--r--pkg/sentry/fs/proc/uid_gid_map.go3
-rw-r--r--pkg/sentry/fs/ramfs/tree.go4
-rw-r--r--pkg/sentry/fs/sys/sys.go6
-rw-r--r--pkg/sentry/fs/timerfd/timerfd.go3
-rw-r--r--pkg/sentry/fs/tmpfs/inode_file.go27
-rw-r--r--pkg/sentry/fs/tmpfs/tmpfs.go16
-rw-r--r--pkg/sentry/fs/tty/dir.go3
-rw-r--r--pkg/sentry/fsimpl/eventfd/eventfd.go7
-rw-r--r--pkg/sentry/fsimpl/fuse/read_write.go20
-rw-r--r--pkg/sentry/fsimpl/fuse/request_response.go20
-rw-r--r--pkg/sentry/fsimpl/gofer/directory.go4
-rw-r--r--pkg/sentry/fsimpl/gofer/gofer.go8
-rw-r--r--pkg/sentry/fsimpl/gofer/regular_file.go29
-rw-r--r--pkg/sentry/fsimpl/gofer/save_restore.go4
-rw-r--r--pkg/sentry/fsimpl/host/host.go5
-rw-r--r--pkg/sentry/fsimpl/host/save_restore.go4
-rw-r--r--pkg/sentry/fsimpl/kernfs/inode_impl_util.go4
-rw-r--r--pkg/sentry/fsimpl/kernfs/mmap_util.go14
-rw-r--r--pkg/sentry/fsimpl/overlay/copy_up.go6
-rw-r--r--pkg/sentry/fsimpl/overlay/regular_file.go9
-rw-r--r--pkg/sentry/fsimpl/pipefs/pipefs.go4
-rw-r--r--pkg/sentry/fsimpl/proc/task_files.go31
-rw-r--r--pkg/sentry/fsimpl/proc/task_net.go20
-rw-r--r--pkg/sentry/fsimpl/proc/tasks_files.go4
-rw-r--r--pkg/sentry/fsimpl/proc/tasks_sys.go11
-rw-r--r--pkg/sentry/fsimpl/proc/yama.go3
-rw-r--r--pkg/sentry/fsimpl/timerfd/timerfd.go3
-rw-r--r--pkg/sentry/fsimpl/tmpfs/regular_file.go25
-rw-r--r--pkg/sentry/fsimpl/tmpfs/tmpfs.go14
-rw-r--r--pkg/sentry/hostmm/hostmm.go6
-rw-r--r--pkg/sentry/kernel/auth/auth_abi_autogen_unsafe.go26
-rw-r--r--pkg/sentry/kernel/eventfd/eventfd.go7
-rw-r--r--pkg/sentry/kernel/futex/futex.go48
-rw-r--r--pkg/sentry/kernel/kcov.go8
-rw-r--r--pkg/sentry/kernel/kernel_abi_autogen_unsafe.go50
-rw-r--r--pkg/sentry/kernel/pipe/pipe.go6
-rw-r--r--pkg/sentry/kernel/pipe/vfs.go21
-rw-r--r--pkg/sentry/kernel/ptrace.go11
-rw-r--r--pkg/sentry/kernel/ptrace_amd64.go3
-rw-r--r--pkg/sentry/kernel/ptrace_arm64.go4
-rw-r--r--pkg/sentry/kernel/rseq.go35
-rw-r--r--pkg/sentry/kernel/seccomp.go10
-rw-r--r--pkg/sentry/kernel/shm/shm.go30
-rw-r--r--pkg/sentry/kernel/syscalls.go8
-rw-r--r--pkg/sentry/kernel/task.go12
-rw-r--r--pkg/sentry/kernel/task_clone.go11
-rw-r--r--pkg/sentry/kernel/task_futex.go27
-rw-r--r--pkg/sentry/kernel/task_image.go4
-rw-r--r--pkg/sentry/kernel/task_log.go13
-rw-r--r--pkg/sentry/kernel/task_run.go8
-rw-r--r--pkg/sentry/kernel/task_signals.go16
-rw-r--r--pkg/sentry/kernel/task_start.go4
-rw-r--r--pkg/sentry/kernel/task_syscall.go12
-rw-r--r--pkg/sentry/kernel/task_usermem.go67
-rw-r--r--pkg/sentry/kernel/vdso.go4
-rw-r--r--pkg/sentry/loader/elf.go49
-rw-r--r--pkg/sentry/loader/loader.go11
-rw-r--r--pkg/sentry/loader/vdso.go25
-rw-r--r--pkg/sentry/memmap/mapping_set.go28
-rw-r--r--pkg/sentry/memmap/memmap.go38
-rw-r--r--pkg/sentry/mm/address_space.go10
-rw-r--r--pkg/sentry/mm/aio_context.go21
-rw-r--r--pkg/sentry/mm/io.go75
-rw-r--r--pkg/sentry/mm/lifecycle.go4
-rw-r--r--pkg/sentry/mm/metadata.go18
-rw-r--r--pkg/sentry/mm/mm.go24
-rw-r--r--pkg/sentry/mm/pma.go74
-rw-r--r--pkg/sentry/mm/pma_set.go2
-rw-r--r--pkg/sentry/mm/procfs.go20
-rw-r--r--pkg/sentry/mm/shm.go6
-rw-r--r--pkg/sentry/mm/special_mappable.go14
-rw-r--r--pkg/sentry/mm/syscalls.go106
-rw-r--r--pkg/sentry/mm/vma.go86
-rw-r--r--pkg/sentry/mm/vma_set.go2
-rw-r--r--pkg/sentry/pgalloc/pgalloc.go34
-rw-r--r--pkg/sentry/pgalloc/save_restore.go10
-rw-r--r--pkg/sentry/platform/kvm/address_space.go18
-rw-r--r--pkg/sentry/platform/kvm/bluepill_fault.go4
-rw-r--r--pkg/sentry/platform/kvm/context.go6
-rw-r--r--pkg/sentry/platform/kvm/kvm.go10
-rw-r--r--pkg/sentry/platform/kvm/machine.go6
-rw-r--r--pkg/sentry/platform/kvm/machine_amd64.go46
-rw-r--r--pkg/sentry/platform/kvm/machine_arm64.go16
-rw-r--r--pkg/sentry/platform/kvm/machine_arm64_unsafe.go14
-rw-r--r--pkg/sentry/platform/kvm/physical_map.go10
-rw-r--r--pkg/sentry/platform/kvm/virtual_map.go6
-rw-r--r--pkg/sentry/platform/mmap_min_addr.go8
-rw-r--r--pkg/sentry/platform/platform.go43
-rw-r--r--pkg/sentry/platform/ptrace/ptrace.go26
-rw-r--r--pkg/sentry/platform/ptrace/ptrace_unsafe.go4
-rw-r--r--pkg/sentry/platform/ptrace/stub_unsafe.go8
-rw-r--r--pkg/sentry/platform/ptrace/subprocess.go8
-rw-r--r--pkg/sentry/socket/control/control.go26
-rw-r--r--pkg/sentry/socket/hostinet/socket.go19
-rw-r--r--pkg/sentry/socket/hostinet/socket_unsafe.go3
-rw-r--r--pkg/sentry/socket/hostinet/stack.go10
-rw-r--r--pkg/sentry/socket/netfilter/extensions.go4
-rw-r--r--pkg/sentry/socket/netfilter/ipv4.go4
-rw-r--r--pkg/sentry/socket/netfilter/ipv6.go4
-rw-r--r--pkg/sentry/socket/netfilter/netfilter.go14
-rw-r--r--pkg/sentry/socket/netfilter/owner_matcher.go6
-rw-r--r--pkg/sentry/socket/netfilter/targets.go24
-rw-r--r--pkg/sentry/socket/netfilter/tcp_matcher.go6
-rw-r--r--pkg/sentry/socket/netfilter/udp_matcher.go6
-rw-r--r--pkg/sentry/socket/netlink/message.go14
-rw-r--r--pkg/sentry/socket/netlink/socket.go9
-rw-r--r--pkg/sentry/socket/netstack/netstack.go101
-rw-r--r--pkg/sentry/socket/netstack/netstack_vfs2.go7
-rw-r--r--pkg/sentry/socket/socket.go21
-rw-r--r--pkg/sentry/socket/unix/unix.go3
-rw-r--r--pkg/sentry/socket/unix/unix_vfs2.go3
-rw-r--r--pkg/sentry/strace/epoll.go7
-rw-r--r--pkg/sentry/strace/poll.go5
-rw-r--r--pkg/sentry/strace/select.go5
-rw-r--r--pkg/sentry/strace/signal.go9
-rw-r--r--pkg/sentry/strace/socket.go33
-rw-r--r--pkg/sentry/strace/strace.go39
-rw-r--r--pkg/sentry/syscalls/linux/linux64.go6
-rw-r--r--pkg/sentry/syscalls/linux/linux_abi_autogen_unsafe.go108
-rw-r--r--pkg/sentry/syscalls/linux/sigset.go16
-rw-r--r--pkg/sentry/syscalls/linux/sys_aio.go17
-rw-r--r--pkg/sentry/syscalls/linux/sys_file.go34
-rw-r--r--pkg/sentry/syscalls/linux/sys_futex.go12
-rw-r--r--pkg/sentry/syscalls/linux/sys_getdents.go3
-rw-r--r--pkg/sentry/syscalls/linux/sys_mempolicy.go15
-rw-r--r--pkg/sentry/syscalls/linux/sys_mmap.go15
-rw-r--r--pkg/sentry/syscalls/linux/sys_mount.go7
-rw-r--r--pkg/sentry/syscalls/linux/sys_pipe.go4
-rw-r--r--pkg/sentry/syscalls/linux/sys_poll.go18
-rw-r--r--pkg/sentry/syscalls/linux/sys_random.go4
-rw-r--r--pkg/sentry/syscalls/linux/sys_rlimit.go6
-rw-r--r--pkg/sentry/syscalls/linux/sys_seccomp.go10
-rw-r--r--pkg/sentry/syscalls/linux/sys_sem.go6
-rw-r--r--pkg/sentry/syscalls/linux/sys_signal.go4
-rw-r--r--pkg/sentry/syscalls/linux/sys_socket.go29
-rw-r--r--pkg/sentry/syscalls/linux/sys_stat.go10
-rw-r--r--pkg/sentry/syscalls/linux/sys_thread.go9
-rw-r--r--pkg/sentry/syscalls/linux/sys_time.go12
-rw-r--r--pkg/sentry/syscalls/linux/sys_xattr.go12
-rw-r--r--pkg/sentry/syscalls/linux/timespec.go28
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/aio.go16
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/execve.go5
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/filesystem.go19
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/getdents.go21
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/mmap.go7
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/mount.go9
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/path.go5
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/pipe.go5
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/poll.go25
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/setstat.go11
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/signal.go5
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/socket.go30
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/stat.go9
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/vfs2_abi_autogen_unsafe.go60
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/xattr.go11
-rw-r--r--pkg/sentry/vfs/anonfs.go4
-rw-r--r--pkg/sentry/vfs/filesystem_impl_util.go4
-rw-r--r--pkg/sentry/vfs/inotify.go11
-rw-r--r--pkg/usermem/bytes_io.go21
-rw-r--r--pkg/usermem/bytes_io_unsafe.go7
-rw-r--r--pkg/usermem/usermem.go52
-rw-r--r--pkg/usermem/usermem_state_autogen.go77
224 files changed, 3155 insertions, 3053 deletions
diff --git a/pkg/abi/linux/linux_abi_autogen_unsafe.go b/pkg/abi/linux/linux_abi_autogen_unsafe.go
index 17801750c..15ec4e8b6 100644
--- a/pkg/abi/linux/linux_abi_autogen_unsafe.go
+++ b/pkg/abi/linux/linux_abi_autogen_unsafe.go
@@ -10,9 +10,9 @@ package linux
import (
"gvisor.dev/gvisor/pkg/gohacks"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal"
"gvisor.dev/gvisor/pkg/safecopy"
- "gvisor.dev/gvisor/pkg/usermem"
"io"
"reflect"
"runtime"
@@ -118,57 +118,57 @@ func (i *IOCallback) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (i *IOCallback) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(i.Data))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(i.Data))
dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(i.Key))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.Key))
dst = dst[4:]
// Padding: dst[:sizeof(uint32)] ~= uint32(0)
dst = dst[4:]
- usermem.ByteOrder.PutUint16(dst[:2], uint16(i.OpCode))
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(i.OpCode))
dst = dst[2:]
- usermem.ByteOrder.PutUint16(dst[:2], uint16(i.ReqPrio))
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(i.ReqPrio))
dst = dst[2:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(i.FD))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.FD))
dst = dst[4:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(i.Buf))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(i.Buf))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(i.Bytes))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(i.Bytes))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(i.Offset))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(i.Offset))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(i.Reserved2))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(i.Reserved2))
dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(i.Flags))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.Flags))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(i.ResFD))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.ResFD))
dst = dst[4:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (i *IOCallback) UnmarshalBytes(src []byte) {
- i.Data = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ i.Data = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- i.Key = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ i.Key = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
// Padding: var _ uint32 ~= src[:sizeof(uint32)]
src = src[4:]
- i.OpCode = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ i.OpCode = uint16(hostarch.ByteOrder.Uint16(src[:2]))
src = src[2:]
- i.ReqPrio = int16(usermem.ByteOrder.Uint16(src[:2]))
+ i.ReqPrio = int16(hostarch.ByteOrder.Uint16(src[:2]))
src = src[2:]
- i.FD = int32(usermem.ByteOrder.Uint32(src[:4]))
+ i.FD = int32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- i.Buf = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ i.Buf = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- i.Bytes = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ i.Bytes = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- i.Offset = int64(usermem.ByteOrder.Uint64(src[:8]))
+ i.Offset = int64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- i.Reserved2 = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ i.Reserved2 = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- i.Flags = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ i.Flags = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- i.ResFD = int32(usermem.ByteOrder.Uint32(src[:4]))
+ i.ResFD = int32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
}
@@ -190,7 +190,7 @@ func (i *IOCallback) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (i *IOCallback) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (i *IOCallback) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -207,13 +207,13 @@ func (i *IOCallback) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit i
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (i *IOCallback) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (i *IOCallback) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return i.CopyOutN(cc, addr, i.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (i *IOCallback) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (i *IOCallback) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -251,25 +251,25 @@ func (i *IOEvent) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (i *IOEvent) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(i.Data))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(i.Data))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(i.Obj))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(i.Obj))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(i.Result))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(i.Result))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(i.Result2))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(i.Result2))
dst = dst[8:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (i *IOEvent) UnmarshalBytes(src []byte) {
- i.Data = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ i.Data = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- i.Obj = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ i.Obj = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- i.Result = int64(usermem.ByteOrder.Uint64(src[:8]))
+ i.Result = int64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- i.Result2 = int64(usermem.ByteOrder.Uint64(src[:8]))
+ i.Result2 = int64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
}
@@ -291,7 +291,7 @@ func (i *IOEvent) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (i *IOEvent) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (i *IOEvent) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -308,13 +308,13 @@ func (i *IOEvent) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int)
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (i *IOEvent) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (i *IOEvent) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return i.CopyOutN(cc, addr, i.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (i *IOEvent) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (i *IOEvent) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -352,25 +352,25 @@ func (b *BPFInstruction) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (b *BPFInstruction) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint16(dst[:2], uint16(b.OpCode))
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(b.OpCode))
dst = dst[2:]
dst[0] = byte(b.JumpIfTrue)
dst = dst[1:]
dst[0] = byte(b.JumpIfFalse)
dst = dst[1:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(b.K))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(b.K))
dst = dst[4:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (b *BPFInstruction) UnmarshalBytes(src []byte) {
- b.OpCode = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ b.OpCode = uint16(hostarch.ByteOrder.Uint16(src[:2]))
src = src[2:]
b.JumpIfTrue = uint8(src[0])
src = src[1:]
b.JumpIfFalse = uint8(src[0])
src = src[1:]
- b.K = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ b.K = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
}
@@ -392,7 +392,7 @@ func (b *BPFInstruction) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (b *BPFInstruction) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (b *BPFInstruction) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -409,13 +409,13 @@ func (b *BPFInstruction) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, lim
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (b *BPFInstruction) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (b *BPFInstruction) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return b.CopyOutN(cc, addr, b.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (b *BPFInstruction) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (b *BPFInstruction) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -447,7 +447,7 @@ func (b *BPFInstruction) WriteTo(writer io.Writer) (int64, error) {
}
// CopyBPFInstructionSliceIn copies in a slice of BPFInstruction objects from the task's memory.
-func CopyBPFInstructionSliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []BPFInstruction) (int, error) {
+func CopyBPFInstructionSliceIn(cc marshal.CopyContext, addr hostarch.Addr, dst []BPFInstruction) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
@@ -472,7 +472,7 @@ func CopyBPFInstructionSliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []
}
// CopyBPFInstructionSliceOut copies a slice of BPFInstruction objects to the task's memory.
-func CopyBPFInstructionSliceOut(cc marshal.CopyContext, addr usermem.Addr, src []BPFInstruction) (int, error) {
+func CopyBPFInstructionSliceOut(cc marshal.CopyContext, addr hostarch.Addr, src []BPFInstruction) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
@@ -539,21 +539,21 @@ func (c *CapUserData) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (c *CapUserData) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(c.Effective))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(c.Effective))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(c.Permitted))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(c.Permitted))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(c.Inheritable))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(c.Inheritable))
dst = dst[4:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (c *CapUserData) UnmarshalBytes(src []byte) {
- c.Effective = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ c.Effective = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- c.Permitted = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ c.Permitted = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- c.Inheritable = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ c.Inheritable = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
}
@@ -575,7 +575,7 @@ func (c *CapUserData) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (c *CapUserData) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (c *CapUserData) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -592,13 +592,13 @@ func (c *CapUserData) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (c *CapUserData) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (c *CapUserData) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return c.CopyOutN(cc, addr, c.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (c *CapUserData) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (c *CapUserData) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -630,7 +630,7 @@ func (c *CapUserData) WriteTo(writer io.Writer) (int64, error) {
}
// CopyCapUserDataSliceIn copies in a slice of CapUserData objects from the task's memory.
-func CopyCapUserDataSliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []CapUserData) (int, error) {
+func CopyCapUserDataSliceIn(cc marshal.CopyContext, addr hostarch.Addr, dst []CapUserData) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
@@ -655,7 +655,7 @@ func CopyCapUserDataSliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []Cap
}
// CopyCapUserDataSliceOut copies a slice of CapUserData objects to the task's memory.
-func CopyCapUserDataSliceOut(cc marshal.CopyContext, addr usermem.Addr, src []CapUserData) (int, error) {
+func CopyCapUserDataSliceOut(cc marshal.CopyContext, addr hostarch.Addr, src []CapUserData) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
@@ -722,17 +722,17 @@ func (c *CapUserHeader) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (c *CapUserHeader) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(c.Version))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(c.Version))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(c.Pid))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(c.Pid))
dst = dst[4:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (c *CapUserHeader) UnmarshalBytes(src []byte) {
- c.Version = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ c.Version = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- c.Pid = int32(usermem.ByteOrder.Uint32(src[:4]))
+ c.Pid = int32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
}
@@ -754,7 +754,7 @@ func (c *CapUserHeader) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (c *CapUserHeader) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (c *CapUserHeader) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -771,13 +771,13 @@ func (c *CapUserHeader) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limi
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (c *CapUserHeader) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (c *CapUserHeader) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return c.CopyOutN(cc, addr, c.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (c *CapUserHeader) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (c *CapUserHeader) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -859,7 +859,7 @@ func (s *SockErrCMsgIPv4) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (s *SockErrCMsgIPv4) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (s *SockErrCMsgIPv4) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
if !s.Offender.Packed() && s.SockExtendedErr.Packed() {
// Type SockErrCMsgIPv4 doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
@@ -883,13 +883,13 @@ func (s *SockErrCMsgIPv4) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, li
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (s *SockErrCMsgIPv4) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *SockErrCMsgIPv4) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return s.CopyOutN(cc, addr, s.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (s *SockErrCMsgIPv4) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *SockErrCMsgIPv4) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
if !s.Offender.Packed() && s.SockExtendedErr.Packed() {
// Type SockErrCMsgIPv4 doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
@@ -989,7 +989,7 @@ func (s *SockErrCMsgIPv6) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (s *SockErrCMsgIPv6) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (s *SockErrCMsgIPv6) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
if !s.Offender.Packed() && s.SockExtendedErr.Packed() {
// Type SockErrCMsgIPv6 doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
@@ -1013,13 +1013,13 @@ func (s *SockErrCMsgIPv6) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, li
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (s *SockErrCMsgIPv6) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *SockErrCMsgIPv6) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return s.CopyOutN(cc, addr, s.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (s *SockErrCMsgIPv6) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *SockErrCMsgIPv6) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
if !s.Offender.Packed() && s.SockExtendedErr.Packed() {
// Type SockErrCMsgIPv6 doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
@@ -1075,7 +1075,7 @@ func (s *SockExtendedErr) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (s *SockExtendedErr) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Errno))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Errno))
dst = dst[4:]
dst[0] = byte(s.Origin)
dst = dst[1:]
@@ -1085,15 +1085,15 @@ func (s *SockExtendedErr) MarshalBytes(dst []byte) {
dst = dst[1:]
dst[0] = byte(s.Pad)
dst = dst[1:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Info))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Info))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Data))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Data))
dst = dst[4:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (s *SockExtendedErr) UnmarshalBytes(src []byte) {
- s.Errno = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ s.Errno = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
s.Origin = uint8(src[0])
src = src[1:]
@@ -1103,9 +1103,9 @@ func (s *SockExtendedErr) UnmarshalBytes(src []byte) {
src = src[1:]
s.Pad = uint8(src[0])
src = src[1:]
- s.Info = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ s.Info = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- s.Data = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ s.Data = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
}
@@ -1127,7 +1127,7 @@ func (s *SockExtendedErr) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (s *SockExtendedErr) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (s *SockExtendedErr) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1144,13 +1144,13 @@ func (s *SockExtendedErr) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, li
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (s *SockExtendedErr) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *SockExtendedErr) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return s.CopyOutN(cc, addr, s.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (s *SockExtendedErr) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *SockExtendedErr) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1188,17 +1188,17 @@ func (f *FOwnerEx) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (f *FOwnerEx) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Type))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Type))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.PID))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.PID))
dst = dst[4:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (f *FOwnerEx) UnmarshalBytes(src []byte) {
- f.Type = int32(usermem.ByteOrder.Uint32(src[:4]))
+ f.Type = int32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.PID = int32(usermem.ByteOrder.Uint32(src[:4]))
+ f.PID = int32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
}
@@ -1220,7 +1220,7 @@ func (f *FOwnerEx) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FOwnerEx) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FOwnerEx) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1237,13 +1237,13 @@ func (f *FOwnerEx) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FOwnerEx) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FOwnerEx) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FOwnerEx) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FOwnerEx) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1283,17 +1283,17 @@ func (f *Flock) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (f *Flock) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint16(dst[:2], uint16(f.Type))
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(f.Type))
dst = dst[2:]
- usermem.ByteOrder.PutUint16(dst[:2], uint16(f.Whence))
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(f.Whence))
dst = dst[2:]
// Padding: dst[:sizeof(byte)*4] ~= [4]byte{0}
dst = dst[1*(4):]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Start))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.Start))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Len))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.Len))
dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.PID))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.PID))
dst = dst[4:]
// Padding: dst[:sizeof(byte)*4] ~= [4]byte{0}
dst = dst[1*(4):]
@@ -1301,17 +1301,17 @@ func (f *Flock) MarshalBytes(dst []byte) {
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (f *Flock) UnmarshalBytes(src []byte) {
- f.Type = int16(usermem.ByteOrder.Uint16(src[:2]))
+ f.Type = int16(hostarch.ByteOrder.Uint16(src[:2]))
src = src[2:]
- f.Whence = int16(usermem.ByteOrder.Uint16(src[:2]))
+ f.Whence = int16(hostarch.ByteOrder.Uint16(src[:2]))
src = src[2:]
// Padding: ~ copy([4]byte(f._), src[:sizeof(byte)*4])
src = src[1*(4):]
- f.Start = int64(usermem.ByteOrder.Uint64(src[:8]))
+ f.Start = int64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- f.Len = int64(usermem.ByteOrder.Uint64(src[:8]))
+ f.Len = int64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- f.PID = int32(usermem.ByteOrder.Uint32(src[:4]))
+ f.PID = int32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
// Padding: ~ copy([4]byte(f._), src[:sizeof(byte)*4])
src = src[1*(4):]
@@ -1335,7 +1335,7 @@ func (f *Flock) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *Flock) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *Flock) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1352,13 +1352,13 @@ func (f *Flock) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *Flock) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *Flock) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *Flock) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *Flock) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1400,29 +1400,29 @@ func (s *Statx) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (s *Statx) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Mask))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Mask))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Blksize))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Blksize))
dst = dst[4:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Attributes))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Attributes))
dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Nlink))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Nlink))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.UID))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.UID))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.GID))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.GID))
dst = dst[4:]
- usermem.ByteOrder.PutUint16(dst[:2], uint16(s.Mode))
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(s.Mode))
dst = dst[2:]
// Padding: dst[:sizeof(uint16)] ~= uint16(0)
dst = dst[2:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Ino))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Ino))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Size))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Size))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Blocks))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Blocks))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.AttributesMask))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.AttributesMask))
dst = dst[8:]
s.Atime.MarshalBytes(dst[:s.Atime.SizeBytes()])
dst = dst[s.Atime.SizeBytes():]
@@ -1432,41 +1432,41 @@ func (s *Statx) MarshalBytes(dst []byte) {
dst = dst[s.Ctime.SizeBytes():]
s.Mtime.MarshalBytes(dst[:s.Mtime.SizeBytes()])
dst = dst[s.Mtime.SizeBytes():]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.RdevMajor))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.RdevMajor))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.RdevMinor))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.RdevMinor))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.DevMajor))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.DevMajor))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.DevMinor))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.DevMinor))
dst = dst[4:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (s *Statx) UnmarshalBytes(src []byte) {
- s.Mask = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ s.Mask = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- s.Blksize = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ s.Blksize = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- s.Attributes = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Attributes = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.Nlink = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ s.Nlink = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- s.UID = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ s.UID = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- s.GID = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ s.GID = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- s.Mode = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ s.Mode = uint16(hostarch.ByteOrder.Uint16(src[:2]))
src = src[2:]
// Padding: var _ uint16 ~= src[:sizeof(uint16)]
src = src[2:]
- s.Ino = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Ino = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.Size = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Size = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.Blocks = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Blocks = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.AttributesMask = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.AttributesMask = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
s.Atime.UnmarshalBytes(src[:s.Atime.SizeBytes()])
src = src[s.Atime.SizeBytes():]
@@ -1476,13 +1476,13 @@ func (s *Statx) UnmarshalBytes(src []byte) {
src = src[s.Ctime.SizeBytes():]
s.Mtime.UnmarshalBytes(src[:s.Mtime.SizeBytes()])
src = src[s.Mtime.SizeBytes():]
- s.RdevMajor = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ s.RdevMajor = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- s.RdevMinor = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ s.RdevMinor = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- s.DevMajor = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ s.DevMajor = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- s.DevMinor = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ s.DevMinor = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
}
@@ -1514,7 +1514,7 @@ func (s *Statx) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (s *Statx) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (s *Statx) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
if !s.Atime.Packed() && s.Btime.Packed() && s.Ctime.Packed() && s.Mtime.Packed() {
// Type Statx doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
@@ -1538,13 +1538,13 @@ func (s *Statx) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (s *Statx) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *Statx) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return s.CopyOutN(cc, addr, s.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (s *Statx) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *Statx) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
if !s.Atime.Packed() && s.Btime.Packed() && s.Ctime.Packed() && s.Mtime.Packed() {
// Type Statx doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
@@ -1602,64 +1602,64 @@ func (s *Statfs) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (s *Statfs) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Type))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Type))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.BlockSize))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.BlockSize))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Blocks))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Blocks))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.BlocksFree))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.BlocksFree))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.BlocksAvailable))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.BlocksAvailable))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Files))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Files))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.FilesFree))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.FilesFree))
dst = dst[8:]
for idx := 0; idx < 2; idx++ {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.FSID[idx]))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.FSID[idx]))
dst = dst[4:]
}
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.NameLength))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.NameLength))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.FragmentSize))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.FragmentSize))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Flags))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Flags))
dst = dst[8:]
for idx := 0; idx < 4; idx++ {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Spare[idx]))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Spare[idx]))
dst = dst[8:]
}
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (s *Statfs) UnmarshalBytes(src []byte) {
- s.Type = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Type = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.BlockSize = int64(usermem.ByteOrder.Uint64(src[:8]))
+ s.BlockSize = int64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.Blocks = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Blocks = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.BlocksFree = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.BlocksFree = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.BlocksAvailable = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.BlocksAvailable = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.Files = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Files = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.FilesFree = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.FilesFree = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
for idx := 0; idx < 2; idx++ {
- s.FSID[idx] = int32(usermem.ByteOrder.Uint32(src[:4]))
+ s.FSID[idx] = int32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
}
- s.NameLength = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.NameLength = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.FragmentSize = int64(usermem.ByteOrder.Uint64(src[:8]))
+ s.FragmentSize = int64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.Flags = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Flags = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
for idx := 0; idx < 4; idx++ {
- s.Spare[idx] = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Spare[idx] = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
}
}
@@ -1682,7 +1682,7 @@ func (s *Statfs) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (s *Statfs) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (s *Statfs) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1699,13 +1699,13 @@ func (s *Statfs) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int)
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (s *Statfs) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *Statfs) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return s.CopyOutN(cc, addr, s.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (s *Statfs) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *Statfs) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1743,35 +1743,35 @@ func (f *FUSEAttr) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (f *FUSEAttr) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Ino))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.Ino))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Size))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.Size))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Blocks))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.Blocks))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Atime))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.Atime))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Mtime))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.Mtime))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Ctime))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.Ctime))
dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.AtimeNsec))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.AtimeNsec))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.MtimeNsec))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.MtimeNsec))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.CtimeNsec))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.CtimeNsec))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Mode))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Mode))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Nlink))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Nlink))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.UID))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.UID))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.GID))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.GID))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Rdev))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Rdev))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.BlkSize))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.BlkSize))
dst = dst[4:]
// Padding: dst[:sizeof(uint32)] ~= uint32(0)
dst = dst[4:]
@@ -1779,35 +1779,35 @@ func (f *FUSEAttr) MarshalBytes(dst []byte) {
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (f *FUSEAttr) UnmarshalBytes(src []byte) {
- f.Ino = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ f.Ino = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- f.Size = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ f.Size = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- f.Blocks = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ f.Blocks = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- f.Atime = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ f.Atime = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- f.Mtime = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ f.Mtime = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- f.Ctime = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ f.Ctime = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- f.AtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.AtimeNsec = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.MtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.MtimeNsec = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.CtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.CtimeNsec = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.Mode = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.Mode = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.Nlink = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.Nlink = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.UID = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.UID = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.GID = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.GID = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.Rdev = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.Rdev = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.BlkSize = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.BlkSize = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
// Padding: var _ uint32 ~= src[:sizeof(uint32)]
src = src[4:]
@@ -1831,7 +1831,7 @@ func (f *FUSEAttr) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEAttr) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEAttr) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1848,13 +1848,13 @@ func (f *FUSEAttr) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEAttr) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEAttr) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEAttr) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEAttr) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1892,11 +1892,11 @@ func (f *FUSECreateMeta) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (f *FUSECreateMeta) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Flags))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Mode))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Mode))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Umask))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Umask))
dst = dst[4:]
// Padding: dst[:sizeof(uint32)] ~= uint32(0)
dst = dst[4:]
@@ -1904,11 +1904,11 @@ func (f *FUSECreateMeta) MarshalBytes(dst []byte) {
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (f *FUSECreateMeta) UnmarshalBytes(src []byte) {
- f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.Flags = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.Mode = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.Mode = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.Umask = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.Umask = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
// Padding: var _ uint32 ~= src[:sizeof(uint32)]
src = src[4:]
@@ -1932,7 +1932,7 @@ func (f *FUSECreateMeta) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSECreateMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSECreateMeta) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1949,13 +1949,13 @@ func (f *FUSECreateMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, lim
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSECreateMeta) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSECreateMeta) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSECreateMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSECreateMeta) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1993,25 +1993,25 @@ func (f *FUSEDirentMeta) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (f *FUSEDirentMeta) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Ino))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.Ino))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Off))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.Off))
dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.NameLen))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.NameLen))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Type))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Type))
dst = dst[4:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (f *FUSEDirentMeta) UnmarshalBytes(src []byte) {
- f.Ino = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ f.Ino = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- f.Off = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ f.Off = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- f.NameLen = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.NameLen = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.Type = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.Type = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
}
@@ -2033,7 +2033,7 @@ func (f *FUSEDirentMeta) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEDirentMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEDirentMeta) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2050,13 +2050,13 @@ func (f *FUSEDirentMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, lim
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEDirentMeta) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEDirentMeta) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEDirentMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEDirentMeta) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2095,17 +2095,17 @@ func (f *FUSEEntryOut) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (f *FUSEEntryOut) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.NodeID))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.NodeID))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Generation))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.Generation))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.EntryValid))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.EntryValid))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.AttrValid))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.AttrValid))
dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.EntryValidNSec))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.EntryValidNSec))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.AttrValidNSec))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.AttrValidNSec))
dst = dst[4:]
f.Attr.MarshalBytes(dst[:f.Attr.SizeBytes()])
dst = dst[f.Attr.SizeBytes():]
@@ -2113,17 +2113,17 @@ func (f *FUSEEntryOut) MarshalBytes(dst []byte) {
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (f *FUSEEntryOut) UnmarshalBytes(src []byte) {
- f.NodeID = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ f.NodeID = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- f.Generation = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ f.Generation = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- f.EntryValid = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ f.EntryValid = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- f.AttrValid = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ f.AttrValid = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- f.EntryValidNSec = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.EntryValidNSec = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.AttrValidNSec = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.AttrValidNSec = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
f.Attr.UnmarshalBytes(src[:f.Attr.SizeBytes()])
src = src[f.Attr.SizeBytes():]
@@ -2157,7 +2157,7 @@ func (f *FUSEEntryOut) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEEntryOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEEntryOut) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
if !f.Attr.Packed() {
// Type FUSEEntryOut doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
@@ -2181,13 +2181,13 @@ func (f *FUSEEntryOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEEntryOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEEntryOut) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEEntryOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEEntryOut) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
if !f.Attr.Packed() {
// Type FUSEEntryOut doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
@@ -2243,21 +2243,21 @@ func (f *FUSEGetAttrIn) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (f *FUSEGetAttrIn) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.GetAttrFlags))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.GetAttrFlags))
dst = dst[4:]
// Padding: dst[:sizeof(uint32)] ~= uint32(0)
dst = dst[4:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Fh))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.Fh))
dst = dst[8:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (f *FUSEGetAttrIn) UnmarshalBytes(src []byte) {
- f.GetAttrFlags = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.GetAttrFlags = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
// Padding: var _ uint32 ~= src[:sizeof(uint32)]
src = src[4:]
- f.Fh = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ f.Fh = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
}
@@ -2279,7 +2279,7 @@ func (f *FUSEGetAttrIn) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEGetAttrIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEGetAttrIn) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2296,13 +2296,13 @@ func (f *FUSEGetAttrIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limi
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEGetAttrIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEGetAttrIn) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEGetAttrIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEGetAttrIn) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2341,9 +2341,9 @@ func (f *FUSEGetAttrOut) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (f *FUSEGetAttrOut) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.AttrValid))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.AttrValid))
dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.AttrValidNsec))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.AttrValidNsec))
dst = dst[4:]
// Padding: dst[:sizeof(uint32)] ~= uint32(0)
dst = dst[4:]
@@ -2353,9 +2353,9 @@ func (f *FUSEGetAttrOut) MarshalBytes(dst []byte) {
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (f *FUSEGetAttrOut) UnmarshalBytes(src []byte) {
- f.AttrValid = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ f.AttrValid = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- f.AttrValidNsec = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.AttrValidNsec = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
// Padding: var _ uint32 ~= src[:sizeof(uint32)]
src = src[4:]
@@ -2391,7 +2391,7 @@ func (f *FUSEGetAttrOut) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEGetAttrOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEGetAttrOut) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
if !f.Attr.Packed() {
// Type FUSEGetAttrOut doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
@@ -2415,13 +2415,13 @@ func (f *FUSEGetAttrOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, lim
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEGetAttrOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEGetAttrOut) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEGetAttrOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEGetAttrOut) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
if !f.Attr.Packed() {
// Type FUSEGetAttrOut doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
@@ -2479,19 +2479,19 @@ func (f *FUSEHeaderIn) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (f *FUSEHeaderIn) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Len))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Len))
dst = dst[4:]
f.Opcode.MarshalBytes(dst[:f.Opcode.SizeBytes()])
dst = dst[f.Opcode.SizeBytes():]
f.Unique.MarshalBytes(dst[:f.Unique.SizeBytes()])
dst = dst[f.Unique.SizeBytes():]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.NodeID))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.NodeID))
dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.UID))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.UID))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.GID))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.GID))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.PID))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.PID))
dst = dst[4:]
// Padding: dst[:sizeof(uint32)] ~= uint32(0)
dst = dst[4:]
@@ -2499,19 +2499,19 @@ func (f *FUSEHeaderIn) MarshalBytes(dst []byte) {
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (f *FUSEHeaderIn) UnmarshalBytes(src []byte) {
- f.Len = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.Len = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
f.Opcode.UnmarshalBytes(src[:f.Opcode.SizeBytes()])
src = src[f.Opcode.SizeBytes():]
f.Unique.UnmarshalBytes(src[:f.Unique.SizeBytes()])
src = src[f.Unique.SizeBytes():]
- f.NodeID = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ f.NodeID = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- f.UID = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.UID = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.GID = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.GID = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.PID = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.PID = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
// Padding: var _ uint32 ~= src[:sizeof(uint32)]
src = src[4:]
@@ -2545,7 +2545,7 @@ func (f *FUSEHeaderIn) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEHeaderIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEHeaderIn) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
if !f.Opcode.Packed() && f.Unique.Packed() {
// Type FUSEHeaderIn doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
@@ -2569,13 +2569,13 @@ func (f *FUSEHeaderIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEHeaderIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEHeaderIn) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEHeaderIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEHeaderIn) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
if !f.Opcode.Packed() && f.Unique.Packed() {
// Type FUSEHeaderIn doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
@@ -2632,9 +2632,9 @@ func (f *FUSEHeaderOut) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (f *FUSEHeaderOut) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Len))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Len))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Error))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Error))
dst = dst[4:]
f.Unique.MarshalBytes(dst[:f.Unique.SizeBytes()])
dst = dst[f.Unique.SizeBytes():]
@@ -2642,9 +2642,9 @@ func (f *FUSEHeaderOut) MarshalBytes(dst []byte) {
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (f *FUSEHeaderOut) UnmarshalBytes(src []byte) {
- f.Len = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.Len = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.Error = int32(usermem.ByteOrder.Uint32(src[:4]))
+ f.Error = int32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
f.Unique.UnmarshalBytes(src[:f.Unique.SizeBytes()])
src = src[f.Unique.SizeBytes():]
@@ -2678,7 +2678,7 @@ func (f *FUSEHeaderOut) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEHeaderOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEHeaderOut) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
if !f.Unique.Packed() {
// Type FUSEHeaderOut doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
@@ -2702,13 +2702,13 @@ func (f *FUSEHeaderOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limi
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEHeaderOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEHeaderOut) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEHeaderOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEHeaderOut) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
if !f.Unique.Packed() {
// Type FUSEHeaderOut doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
@@ -2764,25 +2764,25 @@ func (f *FUSEInitIn) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (f *FUSEInitIn) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Major))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Major))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Minor))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Minor))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.MaxReadahead))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.MaxReadahead))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Flags))
dst = dst[4:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (f *FUSEInitIn) UnmarshalBytes(src []byte) {
- f.Major = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.Major = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.Minor = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.Minor = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.MaxReadahead = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.MaxReadahead = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.Flags = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
}
@@ -2804,7 +2804,7 @@ func (f *FUSEInitIn) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEInitIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEInitIn) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2821,13 +2821,13 @@ func (f *FUSEInitIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit i
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEInitIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEInitIn) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEInitIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEInitIn) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2866,23 +2866,23 @@ func (f *FUSEInitOut) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (f *FUSEInitOut) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Major))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Major))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Minor))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Minor))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.MaxReadahead))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.MaxReadahead))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Flags))
dst = dst[4:]
- usermem.ByteOrder.PutUint16(dst[:2], uint16(f.MaxBackground))
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(f.MaxBackground))
dst = dst[2:]
- usermem.ByteOrder.PutUint16(dst[:2], uint16(f.CongestionThreshold))
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(f.CongestionThreshold))
dst = dst[2:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.MaxWrite))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.MaxWrite))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.TimeGran))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.TimeGran))
dst = dst[4:]
- usermem.ByteOrder.PutUint16(dst[:2], uint16(f.MaxPages))
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(f.MaxPages))
dst = dst[2:]
// Padding: dst[:sizeof(uint16)] ~= uint16(0)
dst = dst[2:]
@@ -2892,23 +2892,23 @@ func (f *FUSEInitOut) MarshalBytes(dst []byte) {
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (f *FUSEInitOut) UnmarshalBytes(src []byte) {
- f.Major = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.Major = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.Minor = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.Minor = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.MaxReadahead = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.MaxReadahead = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.Flags = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.MaxBackground = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ f.MaxBackground = uint16(hostarch.ByteOrder.Uint16(src[:2]))
src = src[2:]
- f.CongestionThreshold = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ f.CongestionThreshold = uint16(hostarch.ByteOrder.Uint16(src[:2]))
src = src[2:]
- f.MaxWrite = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.MaxWrite = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.TimeGran = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.TimeGran = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.MaxPages = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ f.MaxPages = uint16(hostarch.ByteOrder.Uint16(src[:2]))
src = src[2:]
// Padding: var _ uint16 ~= src[:sizeof(uint16)]
src = src[2:]
@@ -2934,7 +2934,7 @@ func (f *FUSEInitOut) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEInitOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEInitOut) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2951,13 +2951,13 @@ func (f *FUSEInitOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEInitOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEInitOut) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEInitOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEInitOut) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2995,17 +2995,17 @@ func (f *FUSEMkdirMeta) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (f *FUSEMkdirMeta) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Mode))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Mode))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Umask))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Umask))
dst = dst[4:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (f *FUSEMkdirMeta) UnmarshalBytes(src []byte) {
- f.Mode = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.Mode = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.Umask = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.Umask = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
}
@@ -3027,7 +3027,7 @@ func (f *FUSEMkdirMeta) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEMkdirMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEMkdirMeta) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3044,13 +3044,13 @@ func (f *FUSEMkdirMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limi
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEMkdirMeta) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEMkdirMeta) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEMkdirMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEMkdirMeta) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3088,11 +3088,11 @@ func (f *FUSEMknodMeta) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (f *FUSEMknodMeta) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Mode))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Mode))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Rdev))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Rdev))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Umask))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Umask))
dst = dst[4:]
// Padding: dst[:sizeof(uint32)] ~= uint32(0)
dst = dst[4:]
@@ -3100,11 +3100,11 @@ func (f *FUSEMknodMeta) MarshalBytes(dst []byte) {
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (f *FUSEMknodMeta) UnmarshalBytes(src []byte) {
- f.Mode = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.Mode = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.Rdev = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.Rdev = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.Umask = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.Umask = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
// Padding: var _ uint32 ~= src[:sizeof(uint32)]
src = src[4:]
@@ -3128,7 +3128,7 @@ func (f *FUSEMknodMeta) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEMknodMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEMknodMeta) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3145,13 +3145,13 @@ func (f *FUSEMknodMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limi
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEMknodMeta) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEMknodMeta) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEMknodMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEMknodMeta) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3190,12 +3190,12 @@ func (f *FUSEOpID) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (f *FUSEOpID) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(*f))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(*f))
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (f *FUSEOpID) UnmarshalBytes(src []byte) {
- *f = FUSEOpID(uint64(usermem.ByteOrder.Uint64(src[:8])))
+ *f = FUSEOpID(uint64(hostarch.ByteOrder.Uint64(src[:8])))
}
// Packed implements marshal.Marshallable.Packed.
@@ -3217,7 +3217,7 @@ func (f *FUSEOpID) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEOpID) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEOpID) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3234,13 +3234,13 @@ func (f *FUSEOpID) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEOpID) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEOpID) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEOpID) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEOpID) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3279,12 +3279,12 @@ func (f *FUSEOpcode) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (f *FUSEOpcode) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(*f))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(*f))
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (f *FUSEOpcode) UnmarshalBytes(src []byte) {
- *f = FUSEOpcode(uint32(usermem.ByteOrder.Uint32(src[:4])))
+ *f = FUSEOpcode(uint32(hostarch.ByteOrder.Uint32(src[:4])))
}
// Packed implements marshal.Marshallable.Packed.
@@ -3306,7 +3306,7 @@ func (f *FUSEOpcode) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEOpcode) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEOpcode) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3323,13 +3323,13 @@ func (f *FUSEOpcode) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit i
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEOpcode) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEOpcode) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEOpcode) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEOpcode) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3367,7 +3367,7 @@ func (f *FUSEOpenIn) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (f *FUSEOpenIn) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Flags))
dst = dst[4:]
// Padding: dst[:sizeof(uint32)] ~= uint32(0)
dst = dst[4:]
@@ -3375,7 +3375,7 @@ func (f *FUSEOpenIn) MarshalBytes(dst []byte) {
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (f *FUSEOpenIn) UnmarshalBytes(src []byte) {
- f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.Flags = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
// Padding: var _ uint32 ~= src[:sizeof(uint32)]
src = src[4:]
@@ -3399,7 +3399,7 @@ func (f *FUSEOpenIn) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEOpenIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEOpenIn) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3416,13 +3416,13 @@ func (f *FUSEOpenIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit i
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEOpenIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEOpenIn) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEOpenIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEOpenIn) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3460,9 +3460,9 @@ func (f *FUSEOpenOut) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (f *FUSEOpenOut) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Fh))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.Fh))
dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.OpenFlag))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.OpenFlag))
dst = dst[4:]
// Padding: dst[:sizeof(uint32)] ~= uint32(0)
dst = dst[4:]
@@ -3470,9 +3470,9 @@ func (f *FUSEOpenOut) MarshalBytes(dst []byte) {
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (f *FUSEOpenOut) UnmarshalBytes(src []byte) {
- f.Fh = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ f.Fh = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- f.OpenFlag = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.OpenFlag = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
// Padding: var _ uint32 ~= src[:sizeof(uint32)]
src = src[4:]
@@ -3496,7 +3496,7 @@ func (f *FUSEOpenOut) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEOpenOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEOpenOut) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3513,13 +3513,13 @@ func (f *FUSEOpenOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEOpenOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEOpenOut) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEOpenOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEOpenOut) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3557,17 +3557,17 @@ func (f *FUSEReadIn) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (f *FUSEReadIn) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Fh))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.Fh))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Offset))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.Offset))
dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Size))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Size))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.ReadFlags))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.ReadFlags))
dst = dst[4:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.LockOwner))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.LockOwner))
dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Flags))
dst = dst[4:]
// Padding: dst[:sizeof(uint32)] ~= uint32(0)
dst = dst[4:]
@@ -3575,17 +3575,17 @@ func (f *FUSEReadIn) MarshalBytes(dst []byte) {
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (f *FUSEReadIn) UnmarshalBytes(src []byte) {
- f.Fh = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ f.Fh = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- f.Offset = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ f.Offset = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- f.Size = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.Size = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.ReadFlags = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.ReadFlags = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.LockOwner = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ f.LockOwner = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.Flags = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
// Padding: var _ uint32 ~= src[:sizeof(uint32)]
src = src[4:]
@@ -3609,7 +3609,7 @@ func (f *FUSEReadIn) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEReadIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEReadIn) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3626,13 +3626,13 @@ func (f *FUSEReadIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit i
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEReadIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEReadIn) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEReadIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEReadIn) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3670,25 +3670,25 @@ func (f *FUSEReleaseIn) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (f *FUSEReleaseIn) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Fh))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.Fh))
dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Flags))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.ReleaseFlags))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.ReleaseFlags))
dst = dst[4:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.LockOwner))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.LockOwner))
dst = dst[8:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (f *FUSEReleaseIn) UnmarshalBytes(src []byte) {
- f.Fh = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ f.Fh = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.Flags = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.ReleaseFlags = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.ReleaseFlags = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.LockOwner = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ f.LockOwner = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
}
@@ -3710,7 +3710,7 @@ func (f *FUSEReleaseIn) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEReleaseIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEReleaseIn) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3727,13 +3727,13 @@ func (f *FUSEReleaseIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limi
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEReleaseIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEReleaseIn) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEReleaseIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEReleaseIn) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3771,35 +3771,35 @@ func (f *FUSESetAttrIn) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (f *FUSESetAttrIn) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Valid))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Valid))
dst = dst[4:]
// Padding: dst[:sizeof(uint32)] ~= uint32(0)
dst = dst[4:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Fh))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.Fh))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Size))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.Size))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.LockOwner))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.LockOwner))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Atime))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.Atime))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Mtime))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.Mtime))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Ctime))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.Ctime))
dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.AtimeNsec))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.AtimeNsec))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.MtimeNsec))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.MtimeNsec))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.CtimeNsec))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.CtimeNsec))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Mode))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Mode))
dst = dst[4:]
// Padding: dst[:sizeof(uint32)] ~= uint32(0)
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.UID))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.UID))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.GID))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.GID))
dst = dst[4:]
// Padding: dst[:sizeof(uint32)] ~= uint32(0)
dst = dst[4:]
@@ -3807,35 +3807,35 @@ func (f *FUSESetAttrIn) MarshalBytes(dst []byte) {
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (f *FUSESetAttrIn) UnmarshalBytes(src []byte) {
- f.Valid = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.Valid = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
// Padding: var _ uint32 ~= src[:sizeof(uint32)]
src = src[4:]
- f.Fh = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ f.Fh = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- f.Size = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ f.Size = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- f.LockOwner = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ f.LockOwner = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- f.Atime = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ f.Atime = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- f.Mtime = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ f.Mtime = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- f.Ctime = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ f.Ctime = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- f.AtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.AtimeNsec = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.MtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.MtimeNsec = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.CtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.CtimeNsec = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.Mode = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.Mode = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
// Padding: var _ uint32 ~= src[:sizeof(uint32)]
src = src[4:]
- f.UID = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.UID = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.GID = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.GID = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
// Padding: var _ uint32 ~= src[:sizeof(uint32)]
src = src[4:]
@@ -3859,7 +3859,7 @@ func (f *FUSESetAttrIn) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSESetAttrIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSESetAttrIn) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3876,13 +3876,13 @@ func (f *FUSESetAttrIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limi
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSESetAttrIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSESetAttrIn) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSESetAttrIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSESetAttrIn) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3920,17 +3920,17 @@ func (f *FUSEWriteIn) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (f *FUSEWriteIn) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Fh))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.Fh))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Offset))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.Offset))
dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Size))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Size))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.WriteFlags))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.WriteFlags))
dst = dst[4:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.LockOwner))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.LockOwner))
dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Flags))
dst = dst[4:]
// Padding: dst[:sizeof(uint32)] ~= uint32(0)
dst = dst[4:]
@@ -3938,17 +3938,17 @@ func (f *FUSEWriteIn) MarshalBytes(dst []byte) {
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (f *FUSEWriteIn) UnmarshalBytes(src []byte) {
- f.Fh = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ f.Fh = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- f.Offset = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ f.Offset = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- f.Size = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.Size = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.WriteFlags = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.WriteFlags = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.LockOwner = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ f.LockOwner = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.Flags = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
// Padding: var _ uint32 ~= src[:sizeof(uint32)]
src = src[4:]
@@ -3972,7 +3972,7 @@ func (f *FUSEWriteIn) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEWriteIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEWriteIn) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3989,13 +3989,13 @@ func (f *FUSEWriteIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEWriteIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEWriteIn) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEWriteIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEWriteIn) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -4033,7 +4033,7 @@ func (f *FUSEWriteOut) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (f *FUSEWriteOut) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Size))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Size))
dst = dst[4:]
// Padding: dst[:sizeof(uint32)] ~= uint32(0)
dst = dst[4:]
@@ -4041,7 +4041,7 @@ func (f *FUSEWriteOut) MarshalBytes(dst []byte) {
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (f *FUSEWriteOut) UnmarshalBytes(src []byte) {
- f.Size = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.Size = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
// Padding: var _ uint32 ~= src[:sizeof(uint32)]
src = src[4:]
@@ -4065,7 +4065,7 @@ func (f *FUSEWriteOut) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEWriteOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEWriteOut) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -4082,13 +4082,13 @@ func (f *FUSEWriteOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEWriteOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEWriteOut) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEWriteOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEWriteOut) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -4126,21 +4126,21 @@ func (r *RobustListHead) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (r *RobustListHead) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(r.List))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(r.List))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(r.FutexOffset))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(r.FutexOffset))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(r.ListOpPending))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(r.ListOpPending))
dst = dst[8:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (r *RobustListHead) UnmarshalBytes(src []byte) {
- r.List = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ r.List = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- r.FutexOffset = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ r.FutexOffset = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- r.ListOpPending = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ r.ListOpPending = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
}
@@ -4162,7 +4162,7 @@ func (r *RobustListHead) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (r *RobustListHead) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (r *RobustListHead) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -4179,13 +4179,13 @@ func (r *RobustListHead) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, lim
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (r *RobustListHead) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (r *RobustListHead) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return r.CopyOutN(cc, addr, r.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (r *RobustListHead) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (r *RobustListHead) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -4223,17 +4223,17 @@ func (d *DigestMetadata) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (d *DigestMetadata) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint16(dst[:2], uint16(d.DigestAlgorithm))
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(d.DigestAlgorithm))
dst = dst[2:]
- usermem.ByteOrder.PutUint16(dst[:2], uint16(d.DigestSize))
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(d.DigestSize))
dst = dst[2:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (d *DigestMetadata) UnmarshalBytes(src []byte) {
- d.DigestAlgorithm = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ d.DigestAlgorithm = uint16(hostarch.ByteOrder.Uint16(src[:2]))
src = src[2:]
- d.DigestSize = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ d.DigestSize = uint16(hostarch.ByteOrder.Uint16(src[:2]))
src = src[2:]
}
@@ -4255,7 +4255,7 @@ func (d *DigestMetadata) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (d *DigestMetadata) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (d *DigestMetadata) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -4272,13 +4272,13 @@ func (d *DigestMetadata) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, lim
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (d *DigestMetadata) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (d *DigestMetadata) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return d.CopyOutN(cc, addr, d.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (d *DigestMetadata) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (d *DigestMetadata) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -4316,57 +4316,57 @@ func (i *IPCPerm) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (i *IPCPerm) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(i.Key))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.Key))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(i.UID))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.UID))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(i.GID))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.GID))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(i.CUID))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.CUID))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(i.CGID))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.CGID))
dst = dst[4:]
- usermem.ByteOrder.PutUint16(dst[:2], uint16(i.Mode))
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(i.Mode))
dst = dst[2:]
// Padding: dst[:sizeof(uint16)] ~= uint16(0)
dst = dst[2:]
- usermem.ByteOrder.PutUint16(dst[:2], uint16(i.Seq))
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(i.Seq))
dst = dst[2:]
// Padding: dst[:sizeof(uint16)] ~= uint16(0)
dst = dst[2:]
// Padding: dst[:sizeof(uint32)] ~= uint32(0)
dst = dst[4:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(i.unused1))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(i.unused1))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(i.unused2))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(i.unused2))
dst = dst[8:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (i *IPCPerm) UnmarshalBytes(src []byte) {
- i.Key = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ i.Key = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- i.UID = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ i.UID = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- i.GID = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ i.GID = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- i.CUID = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ i.CUID = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- i.CGID = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ i.CGID = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- i.Mode = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ i.Mode = uint16(hostarch.ByteOrder.Uint16(src[:2]))
src = src[2:]
// Padding: var _ uint16 ~= src[:sizeof(uint16)]
src = src[2:]
- i.Seq = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ i.Seq = uint16(hostarch.ByteOrder.Uint16(src[:2]))
src = src[2:]
// Padding: var _ uint16 ~= src[:sizeof(uint16)]
src = src[2:]
// Padding: var _ uint32 ~= src[:sizeof(uint32)]
src = src[4:]
- i.unused1 = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ i.unused1 = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- i.unused2 = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ i.unused2 = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
}
@@ -4388,7 +4388,7 @@ func (i *IPCPerm) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (i *IPCPerm) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (i *IPCPerm) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -4405,13 +4405,13 @@ func (i *IPCPerm) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int)
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (i *IPCPerm) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (i *IPCPerm) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return i.CopyOutN(cc, addr, i.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (i *IPCPerm) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (i *IPCPerm) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -4451,65 +4451,65 @@ func (s *Sysinfo) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (s *Sysinfo) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Uptime))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Uptime))
dst = dst[8:]
for idx := 0; idx < 3; idx++ {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Loads[idx]))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Loads[idx]))
dst = dst[8:]
}
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.TotalRAM))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.TotalRAM))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.FreeRAM))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.FreeRAM))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.SharedRAM))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.SharedRAM))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.BufferRAM))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.BufferRAM))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.TotalSwap))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.TotalSwap))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.FreeSwap))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.FreeSwap))
dst = dst[8:]
- usermem.ByteOrder.PutUint16(dst[:2], uint16(s.Procs))
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(s.Procs))
dst = dst[2:]
// Padding: dst[:sizeof(byte)*6] ~= [6]byte{0}
dst = dst[1*(6):]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.TotalHigh))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.TotalHigh))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.FreeHigh))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.FreeHigh))
dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Unit))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Unit))
dst = dst[4:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (s *Sysinfo) UnmarshalBytes(src []byte) {
- s.Uptime = int64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Uptime = int64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
for idx := 0; idx < 3; idx++ {
- s.Loads[idx] = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Loads[idx] = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
}
- s.TotalRAM = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.TotalRAM = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.FreeRAM = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.FreeRAM = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.SharedRAM = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.SharedRAM = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.BufferRAM = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.BufferRAM = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.TotalSwap = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.TotalSwap = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.FreeSwap = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.FreeSwap = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.Procs = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ s.Procs = uint16(hostarch.ByteOrder.Uint16(src[:2]))
src = src[2:]
// Padding: ~ copy([6]byte(s._), src[:sizeof(byte)*6])
src = src[1*(6):]
- s.TotalHigh = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.TotalHigh = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.FreeHigh = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.FreeHigh = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.Unit = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ s.Unit = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
}
@@ -4533,7 +4533,7 @@ func (s *Sysinfo) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (s *Sysinfo) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (s *Sysinfo) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Type Sysinfo doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
s.MarshalBytes(buf) // escapes: fallback.
@@ -4542,13 +4542,13 @@ func (s *Sysinfo) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int)
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (s *Sysinfo) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *Sysinfo) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return s.CopyOutN(cc, addr, s.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (s *Sysinfo) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *Sysinfo) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Type Sysinfo doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
@@ -4575,12 +4575,12 @@ func (n *NumaPolicy) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (n *NumaPolicy) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(*n))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(*n))
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (n *NumaPolicy) UnmarshalBytes(src []byte) {
- *n = NumaPolicy(int32(usermem.ByteOrder.Uint32(src[:4])))
+ *n = NumaPolicy(int32(hostarch.ByteOrder.Uint32(src[:4])))
}
// Packed implements marshal.Marshallable.Packed.
@@ -4602,7 +4602,7 @@ func (n *NumaPolicy) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (n *NumaPolicy) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (n *NumaPolicy) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -4619,13 +4619,13 @@ func (n *NumaPolicy) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit i
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (n *NumaPolicy) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (n *NumaPolicy) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return n.CopyOutN(cc, addr, n.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (n *NumaPolicy) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (n *NumaPolicy) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -4664,21 +4664,21 @@ func (i *IFConf) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (i *IFConf) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(i.Len))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.Len))
dst = dst[4:]
// Padding: dst[:sizeof(byte)*4] ~= [4]byte{0}
dst = dst[1*(4):]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(i.Ptr))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(i.Ptr))
dst = dst[8:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (i *IFConf) UnmarshalBytes(src []byte) {
- i.Len = int32(usermem.ByteOrder.Uint32(src[:4]))
+ i.Len = int32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
// Padding: ~ copy([4]byte(i._), src[:sizeof(byte)*4])
src = src[1*(4):]
- i.Ptr = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ i.Ptr = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
}
@@ -4700,7 +4700,7 @@ func (i *IFConf) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (i *IFConf) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (i *IFConf) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -4717,13 +4717,13 @@ func (i *IFConf) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int)
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (i *IFConf) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (i *IFConf) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return i.CopyOutN(cc, addr, i.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (i *IFConf) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (i *IFConf) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -4803,7 +4803,7 @@ func (ifr *IFReq) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (ifr *IFReq) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (ifr *IFReq) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -4820,13 +4820,13 @@ func (ifr *IFReq) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int)
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (ifr *IFReq) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (ifr *IFReq) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return ifr.CopyOutN(cc, addr, ifr.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (ifr *IFReq) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (ifr *IFReq) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -4898,7 +4898,7 @@ func (en *ExtensionName) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (en *ExtensionName) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (en *ExtensionName) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -4915,13 +4915,13 @@ func (en *ExtensionName) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, lim
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (en *ExtensionName) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (en *ExtensionName) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return en.CopyOutN(cc, addr, en.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (en *ExtensionName) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (en *ExtensionName) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -4963,13 +4963,13 @@ func (i *IPTEntry) SizeBytes() int {
func (i *IPTEntry) MarshalBytes(dst []byte) {
i.IP.MarshalBytes(dst[:i.IP.SizeBytes()])
dst = dst[i.IP.SizeBytes():]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(i.NFCache))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.NFCache))
dst = dst[4:]
- usermem.ByteOrder.PutUint16(dst[:2], uint16(i.TargetOffset))
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(i.TargetOffset))
dst = dst[2:]
- usermem.ByteOrder.PutUint16(dst[:2], uint16(i.NextOffset))
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(i.NextOffset))
dst = dst[2:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(i.Comeback))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.Comeback))
dst = dst[4:]
i.Counters.MarshalBytes(dst[:i.Counters.SizeBytes()])
dst = dst[i.Counters.SizeBytes():]
@@ -4979,13 +4979,13 @@ func (i *IPTEntry) MarshalBytes(dst []byte) {
func (i *IPTEntry) UnmarshalBytes(src []byte) {
i.IP.UnmarshalBytes(src[:i.IP.SizeBytes()])
src = src[i.IP.SizeBytes():]
- i.NFCache = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ i.NFCache = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- i.TargetOffset = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ i.TargetOffset = uint16(hostarch.ByteOrder.Uint16(src[:2]))
src = src[2:]
- i.NextOffset = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ i.NextOffset = uint16(hostarch.ByteOrder.Uint16(src[:2]))
src = src[2:]
- i.Comeback = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ i.Comeback = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
i.Counters.UnmarshalBytes(src[:i.Counters.SizeBytes()])
src = src[i.Counters.SizeBytes():]
@@ -5019,7 +5019,7 @@ func (i *IPTEntry) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (i *IPTEntry) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (i *IPTEntry) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
if !i.Counters.Packed() && i.IP.Packed() {
// Type IPTEntry doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
@@ -5043,13 +5043,13 @@ func (i *IPTEntry) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (i *IPTEntry) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (i *IPTEntry) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return i.CopyOutN(cc, addr, i.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (i *IPTEntry) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (i *IPTEntry) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
if !i.Counters.Packed() && i.IP.Packed() {
// Type IPTEntry doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
@@ -5109,7 +5109,7 @@ func (i *IPTGetEntries) SizeBytes() int {
func (i *IPTGetEntries) MarshalBytes(dst []byte) {
i.Name.MarshalBytes(dst[:i.Name.SizeBytes()])
dst = dst[i.Name.SizeBytes():]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(i.Size))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.Size))
dst = dst[4:]
// Padding: dst[:sizeof(byte)*4] ~= [4]byte{0}
dst = dst[1*(4):]
@@ -5119,7 +5119,7 @@ func (i *IPTGetEntries) MarshalBytes(dst []byte) {
func (i *IPTGetEntries) UnmarshalBytes(src []byte) {
i.Name.UnmarshalBytes(src[:i.Name.SizeBytes()])
src = src[i.Name.SizeBytes():]
- i.Size = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ i.Size = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
// Padding: ~ copy([4]byte(i._), src[:sizeof(byte)*4])
src = src[1*(4):]
@@ -5153,7 +5153,7 @@ func (i *IPTGetEntries) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (i *IPTGetEntries) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (i *IPTGetEntries) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
if !i.Name.Packed() {
// Type IPTGetEntries doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
@@ -5177,13 +5177,13 @@ func (i *IPTGetEntries) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limi
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (i *IPTGetEntries) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (i *IPTGetEntries) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return i.CopyOutN(cc, addr, i.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (i *IPTGetEntries) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (i *IPTGetEntries) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
if !i.Name.Packed() {
// Type IPTGetEntries doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
@@ -5244,19 +5244,19 @@ func (i *IPTGetinfo) SizeBytes() int {
func (i *IPTGetinfo) MarshalBytes(dst []byte) {
i.Name.MarshalBytes(dst[:i.Name.SizeBytes()])
dst = dst[i.Name.SizeBytes():]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(i.ValidHooks))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.ValidHooks))
dst = dst[4:]
for idx := 0; idx < NF_INET_NUMHOOKS; idx++ {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(i.HookEntry[idx]))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.HookEntry[idx]))
dst = dst[4:]
}
for idx := 0; idx < NF_INET_NUMHOOKS; idx++ {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(i.Underflow[idx]))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.Underflow[idx]))
dst = dst[4:]
}
- usermem.ByteOrder.PutUint32(dst[:4], uint32(i.NumEntries))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.NumEntries))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(i.Size))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.Size))
dst = dst[4:]
}
@@ -5264,19 +5264,19 @@ func (i *IPTGetinfo) MarshalBytes(dst []byte) {
func (i *IPTGetinfo) UnmarshalBytes(src []byte) {
i.Name.UnmarshalBytes(src[:i.Name.SizeBytes()])
src = src[i.Name.SizeBytes():]
- i.ValidHooks = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ i.ValidHooks = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
for idx := 0; idx < NF_INET_NUMHOOKS; idx++ {
- i.HookEntry[idx] = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ i.HookEntry[idx] = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
}
for idx := 0; idx < NF_INET_NUMHOOKS; idx++ {
- i.Underflow[idx] = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ i.Underflow[idx] = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
}
- i.NumEntries = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ i.NumEntries = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- i.Size = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ i.Size = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
}
@@ -5308,7 +5308,7 @@ func (i *IPTGetinfo) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (i *IPTGetinfo) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (i *IPTGetinfo) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
if !i.Name.Packed() {
// Type IPTGetinfo doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
@@ -5332,13 +5332,13 @@ func (i *IPTGetinfo) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit i
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (i *IPTGetinfo) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (i *IPTGetinfo) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return i.CopyOutN(cc, addr, i.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (i *IPTGetinfo) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (i *IPTGetinfo) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
if !i.Name.Packed() {
// Type IPTGetinfo doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
@@ -5426,7 +5426,7 @@ func (i *IPTIP) MarshalBytes(dst []byte) {
dst[0] = byte(i.OutputInterfaceMask[idx])
dst = dst[1:]
}
- usermem.ByteOrder.PutUint16(dst[:2], uint16(i.Protocol))
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(i.Protocol))
dst = dst[2:]
dst[0] = byte(i.Flags)
dst = dst[1:]
@@ -5460,7 +5460,7 @@ func (i *IPTIP) UnmarshalBytes(src []byte) {
i.OutputInterfaceMask[idx] = src[0]
src = src[1:]
}
- i.Protocol = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ i.Protocol = uint16(hostarch.ByteOrder.Uint16(src[:2]))
src = src[2:]
i.Flags = uint8(src[0])
src = src[1:]
@@ -5496,7 +5496,7 @@ func (i *IPTIP) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (i *IPTIP) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (i *IPTIP) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
if !i.Dst.Packed() && i.DstMask.Packed() && i.Src.Packed() && i.SrcMask.Packed() {
// Type IPTIP doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
@@ -5520,13 +5520,13 @@ func (i *IPTIP) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (i *IPTIP) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (i *IPTIP) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return i.CopyOutN(cc, addr, i.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (i *IPTIP) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (i *IPTIP) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
if !i.Dst.Packed() && i.DstMask.Packed() && i.Src.Packed() && i.SrcMask.Packed() {
// Type IPTIP doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
@@ -5596,7 +5596,7 @@ func (ke *KernelIPTEntry) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (ke *KernelIPTEntry) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (ke *KernelIPTEntry) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Type KernelIPTEntry doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := cc.CopyScratchBuffer(ke.SizeBytes()) // escapes: okay.
ke.MarshalBytes(buf) // escapes: fallback.
@@ -5605,13 +5605,13 @@ func (ke *KernelIPTEntry) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, li
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (ke *KernelIPTEntry) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (ke *KernelIPTEntry) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return ke.CopyOutN(cc, addr, ke.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (ke *KernelIPTEntry) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (ke *KernelIPTEntry) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Type KernelIPTEntry doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := cc.CopyScratchBuffer(ke.SizeBytes()) // escapes: okay.
length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
@@ -5651,7 +5651,7 @@ func (ke *KernelIPTGetEntries) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (ke *KernelIPTGetEntries) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (ke *KernelIPTGetEntries) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Type KernelIPTGetEntries doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := cc.CopyScratchBuffer(ke.SizeBytes()) // escapes: okay.
ke.MarshalBytes(buf) // escapes: fallback.
@@ -5660,13 +5660,13 @@ func (ke *KernelIPTGetEntries) CopyOutN(cc marshal.CopyContext, addr usermem.Add
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (ke *KernelIPTGetEntries) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (ke *KernelIPTGetEntries) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return ke.CopyOutN(cc, addr, ke.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (ke *KernelIPTGetEntries) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (ke *KernelIPTGetEntries) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Type KernelIPTGetEntries doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := cc.CopyScratchBuffer(ke.SizeBytes()) // escapes: okay.
length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
@@ -5726,7 +5726,7 @@ func (tn *TableName) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (tn *TableName) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (tn *TableName) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -5743,13 +5743,13 @@ func (tn *TableName) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit i
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (tn *TableName) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (tn *TableName) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return tn.CopyOutN(cc, addr, tn.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (tn *TableName) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (tn *TableName) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -5787,17 +5787,17 @@ func (x *XTCounters) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (x *XTCounters) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(x.Pcnt))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(x.Pcnt))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(x.Bcnt))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(x.Bcnt))
dst = dst[8:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (x *XTCounters) UnmarshalBytes(src []byte) {
- x.Pcnt = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ x.Pcnt = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- x.Bcnt = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ x.Bcnt = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
}
@@ -5819,7 +5819,7 @@ func (x *XTCounters) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (x *XTCounters) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (x *XTCounters) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -5836,13 +5836,13 @@ func (x *XTCounters) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit i
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (x *XTCounters) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (x *XTCounters) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return x.CopyOutN(cc, addr, x.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (x *XTCounters) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (x *XTCounters) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -5923,7 +5923,7 @@ func (x *XTGetRevision) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (x *XTGetRevision) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (x *XTGetRevision) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
if !x.Name.Packed() {
// Type XTGetRevision doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := cc.CopyScratchBuffer(x.SizeBytes()) // escapes: okay.
@@ -5947,13 +5947,13 @@ func (x *XTGetRevision) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limi
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (x *XTGetRevision) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (x *XTGetRevision) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return x.CopyOutN(cc, addr, x.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (x *XTGetRevision) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (x *XTGetRevision) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
if !x.Name.Packed() {
// Type XTGetRevision doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := cc.CopyScratchBuffer(x.SizeBytes()) // escapes: okay.
@@ -6014,13 +6014,13 @@ func (i *IP6TEntry) SizeBytes() int {
func (i *IP6TEntry) MarshalBytes(dst []byte) {
i.IPv6.MarshalBytes(dst[:i.IPv6.SizeBytes()])
dst = dst[i.IPv6.SizeBytes():]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(i.NFCache))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.NFCache))
dst = dst[4:]
- usermem.ByteOrder.PutUint16(dst[:2], uint16(i.TargetOffset))
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(i.TargetOffset))
dst = dst[2:]
- usermem.ByteOrder.PutUint16(dst[:2], uint16(i.NextOffset))
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(i.NextOffset))
dst = dst[2:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(i.Comeback))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.Comeback))
dst = dst[4:]
// Padding: dst[:sizeof(byte)*4] ~= [4]byte{0}
dst = dst[1*(4):]
@@ -6032,13 +6032,13 @@ func (i *IP6TEntry) MarshalBytes(dst []byte) {
func (i *IP6TEntry) UnmarshalBytes(src []byte) {
i.IPv6.UnmarshalBytes(src[:i.IPv6.SizeBytes()])
src = src[i.IPv6.SizeBytes():]
- i.NFCache = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ i.NFCache = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- i.TargetOffset = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ i.TargetOffset = uint16(hostarch.ByteOrder.Uint16(src[:2]))
src = src[2:]
- i.NextOffset = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ i.NextOffset = uint16(hostarch.ByteOrder.Uint16(src[:2]))
src = src[2:]
- i.Comeback = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ i.Comeback = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
// Padding: ~ copy([4]byte(i._), src[:sizeof(byte)*4])
src = src[1*(4):]
@@ -6074,7 +6074,7 @@ func (i *IP6TEntry) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (i *IP6TEntry) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (i *IP6TEntry) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
if !i.Counters.Packed() && i.IPv6.Packed() {
// Type IP6TEntry doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
@@ -6098,13 +6098,13 @@ func (i *IP6TEntry) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit in
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (i *IP6TEntry) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (i *IP6TEntry) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return i.CopyOutN(cc, addr, i.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (i *IP6TEntry) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (i *IP6TEntry) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
if !i.Counters.Packed() && i.IPv6.Packed() {
// Type IP6TEntry doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
@@ -6193,7 +6193,7 @@ func (i *IP6TIP) MarshalBytes(dst []byte) {
dst[0] = byte(i.OutputInterfaceMask[idx])
dst = dst[1:]
}
- usermem.ByteOrder.PutUint16(dst[:2], uint16(i.Protocol))
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(i.Protocol))
dst = dst[2:]
dst[0] = byte(i.TOS)
dst = dst[1:]
@@ -6231,7 +6231,7 @@ func (i *IP6TIP) UnmarshalBytes(src []byte) {
i.OutputInterfaceMask[idx] = src[0]
src = src[1:]
}
- i.Protocol = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ i.Protocol = uint16(hostarch.ByteOrder.Uint16(src[:2]))
src = src[2:]
i.TOS = uint8(src[0])
src = src[1:]
@@ -6271,7 +6271,7 @@ func (i *IP6TIP) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (i *IP6TIP) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (i *IP6TIP) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
if !i.Dst.Packed() && i.DstMask.Packed() && i.Src.Packed() && i.SrcMask.Packed() {
// Type IP6TIP doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
@@ -6295,13 +6295,13 @@ func (i *IP6TIP) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int)
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (i *IP6TIP) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (i *IP6TIP) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return i.CopyOutN(cc, addr, i.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (i *IP6TIP) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (i *IP6TIP) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
if !i.Dst.Packed() && i.DstMask.Packed() && i.Src.Packed() && i.SrcMask.Packed() {
// Type IP6TIP doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
@@ -6362,23 +6362,23 @@ func (i *IP6TReplace) SizeBytes() int {
func (i *IP6TReplace) MarshalBytes(dst []byte) {
i.Name.MarshalBytes(dst[:i.Name.SizeBytes()])
dst = dst[i.Name.SizeBytes():]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(i.ValidHooks))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.ValidHooks))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(i.NumEntries))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.NumEntries))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(i.Size))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.Size))
dst = dst[4:]
for idx := 0; idx < NF_INET_NUMHOOKS; idx++ {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(i.HookEntry[idx]))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.HookEntry[idx]))
dst = dst[4:]
}
for idx := 0; idx < NF_INET_NUMHOOKS; idx++ {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(i.Underflow[idx]))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.Underflow[idx]))
dst = dst[4:]
}
- usermem.ByteOrder.PutUint32(dst[:4], uint32(i.NumCounters))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.NumCounters))
dst = dst[4:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(i.Counters))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(i.Counters))
dst = dst[8:]
}
@@ -6386,23 +6386,23 @@ func (i *IP6TReplace) MarshalBytes(dst []byte) {
func (i *IP6TReplace) UnmarshalBytes(src []byte) {
i.Name.UnmarshalBytes(src[:i.Name.SizeBytes()])
src = src[i.Name.SizeBytes():]
- i.ValidHooks = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ i.ValidHooks = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- i.NumEntries = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ i.NumEntries = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- i.Size = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ i.Size = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
for idx := 0; idx < NF_INET_NUMHOOKS; idx++ {
- i.HookEntry[idx] = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ i.HookEntry[idx] = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
}
for idx := 0; idx < NF_INET_NUMHOOKS; idx++ {
- i.Underflow[idx] = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ i.Underflow[idx] = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
}
- i.NumCounters = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ i.NumCounters = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- i.Counters = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ i.Counters = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
}
@@ -6434,7 +6434,7 @@ func (i *IP6TReplace) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (i *IP6TReplace) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (i *IP6TReplace) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
if !i.Name.Packed() {
// Type IP6TReplace doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
@@ -6458,13 +6458,13 @@ func (i *IP6TReplace) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (i *IP6TReplace) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (i *IP6TReplace) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return i.CopyOutN(cc, addr, i.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (i *IP6TReplace) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (i *IP6TReplace) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
if !i.Name.Packed() {
// Type IP6TReplace doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
@@ -6534,7 +6534,7 @@ func (ke *KernelIP6TGetEntries) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (ke *KernelIP6TGetEntries) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (ke *KernelIP6TGetEntries) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Type KernelIP6TGetEntries doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := cc.CopyScratchBuffer(ke.SizeBytes()) // escapes: okay.
ke.MarshalBytes(buf) // escapes: fallback.
@@ -6543,13 +6543,13 @@ func (ke *KernelIP6TGetEntries) CopyOutN(cc marshal.CopyContext, addr usermem.Ad
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (ke *KernelIP6TGetEntries) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (ke *KernelIP6TGetEntries) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return ke.CopyOutN(cc, addr, ke.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (ke *KernelIP6TGetEntries) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (ke *KernelIP6TGetEntries) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Type KernelIP6TGetEntries doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := cc.CopyScratchBuffer(ke.SizeBytes()) // escapes: okay.
length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
@@ -6575,25 +6575,25 @@ func (s *SockAddrNetlink) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (s *SockAddrNetlink) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint16(dst[:2], uint16(s.Family))
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(s.Family))
dst = dst[2:]
// Padding: dst[:sizeof(uint16)] ~= uint16(0)
dst = dst[2:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.PortID))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.PortID))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Groups))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Groups))
dst = dst[4:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (s *SockAddrNetlink) UnmarshalBytes(src []byte) {
- s.Family = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ s.Family = uint16(hostarch.ByteOrder.Uint16(src[:2]))
src = src[2:]
// Padding: var _ uint16 ~= src[:sizeof(uint16)]
src = src[2:]
- s.PortID = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ s.PortID = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- s.Groups = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ s.Groups = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
}
@@ -6615,7 +6615,7 @@ func (s *SockAddrNetlink) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (s *SockAddrNetlink) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (s *SockAddrNetlink) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -6632,13 +6632,13 @@ func (s *SockAddrNetlink) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, li
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (s *SockAddrNetlink) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *SockAddrNetlink) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return s.CopyOutN(cc, addr, s.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (s *SockAddrNetlink) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *SockAddrNetlink) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -6676,21 +6676,21 @@ func (p *PollFD) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (p *PollFD) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(p.FD))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(p.FD))
dst = dst[4:]
- usermem.ByteOrder.PutUint16(dst[:2], uint16(p.Events))
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(p.Events))
dst = dst[2:]
- usermem.ByteOrder.PutUint16(dst[:2], uint16(p.REvents))
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(p.REvents))
dst = dst[2:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (p *PollFD) UnmarshalBytes(src []byte) {
- p.FD = int32(usermem.ByteOrder.Uint32(src[:4]))
+ p.FD = int32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- p.Events = int16(usermem.ByteOrder.Uint16(src[:2]))
+ p.Events = int16(hostarch.ByteOrder.Uint16(src[:2]))
src = src[2:]
- p.REvents = int16(usermem.ByteOrder.Uint16(src[:2]))
+ p.REvents = int16(hostarch.ByteOrder.Uint16(src[:2]))
src = src[2:]
}
@@ -6712,7 +6712,7 @@ func (p *PollFD) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (p *PollFD) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (p *PollFD) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -6729,13 +6729,13 @@ func (p *PollFD) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int)
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (p *PollFD) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (p *PollFD) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return p.CopyOutN(cc, addr, p.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (p *PollFD) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (p *PollFD) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -6767,7 +6767,7 @@ func (p *PollFD) WriteTo(writer io.Writer) (int64, error) {
}
// CopyPollFDSliceIn copies in a slice of PollFD objects from the task's memory.
-func CopyPollFDSliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []PollFD) (int, error) {
+func CopyPollFDSliceIn(cc marshal.CopyContext, addr hostarch.Addr, dst []PollFD) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
@@ -6792,7 +6792,7 @@ func CopyPollFDSliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []PollFD)
}
// CopyPollFDSliceOut copies a slice of PollFD objects to the task's memory.
-func CopyPollFDSliceOut(cc marshal.CopyContext, addr usermem.Addr, src []PollFD) (int, error) {
+func CopyPollFDSliceOut(cc marshal.CopyContext, addr hostarch.Addr, src []PollFD) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
@@ -6859,29 +6859,29 @@ func (r *RSeqCriticalSection) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (r *RSeqCriticalSection) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(r.Version))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(r.Version))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(r.Flags))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(r.Flags))
dst = dst[4:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(r.Start))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(r.Start))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(r.PostCommitOffset))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(r.PostCommitOffset))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(r.Abort))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(r.Abort))
dst = dst[8:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (r *RSeqCriticalSection) UnmarshalBytes(src []byte) {
- r.Version = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ r.Version = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- r.Flags = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ r.Flags = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- r.Start = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ r.Start = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- r.PostCommitOffset = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ r.PostCommitOffset = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- r.Abort = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ r.Abort = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
}
@@ -6903,7 +6903,7 @@ func (r *RSeqCriticalSection) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (r *RSeqCriticalSection) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (r *RSeqCriticalSection) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -6920,13 +6920,13 @@ func (r *RSeqCriticalSection) CopyOutN(cc marshal.CopyContext, addr usermem.Addr
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (r *RSeqCriticalSection) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (r *RSeqCriticalSection) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return r.CopyOutN(cc, addr, r.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (r *RSeqCriticalSection) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (r *RSeqCriticalSection) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -6970,33 +6970,33 @@ func (r *Rusage) MarshalBytes(dst []byte) {
dst = dst[r.UTime.SizeBytes():]
r.STime.MarshalBytes(dst[:r.STime.SizeBytes()])
dst = dst[r.STime.SizeBytes():]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(r.MaxRSS))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(r.MaxRSS))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(r.IXRSS))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(r.IXRSS))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(r.IDRSS))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(r.IDRSS))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(r.ISRSS))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(r.ISRSS))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(r.MinFlt))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(r.MinFlt))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(r.MajFlt))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(r.MajFlt))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(r.NSwap))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(r.NSwap))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(r.InBlock))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(r.InBlock))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(r.OuBlock))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(r.OuBlock))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(r.MsgSnd))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(r.MsgSnd))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(r.MsgRcv))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(r.MsgRcv))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(r.NSignals))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(r.NSignals))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(r.NVCSw))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(r.NVCSw))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(r.NIvCSw))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(r.NIvCSw))
dst = dst[8:]
}
@@ -7006,33 +7006,33 @@ func (r *Rusage) UnmarshalBytes(src []byte) {
src = src[r.UTime.SizeBytes():]
r.STime.UnmarshalBytes(src[:r.STime.SizeBytes()])
src = src[r.STime.SizeBytes():]
- r.MaxRSS = int64(usermem.ByteOrder.Uint64(src[:8]))
+ r.MaxRSS = int64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- r.IXRSS = int64(usermem.ByteOrder.Uint64(src[:8]))
+ r.IXRSS = int64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- r.IDRSS = int64(usermem.ByteOrder.Uint64(src[:8]))
+ r.IDRSS = int64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- r.ISRSS = int64(usermem.ByteOrder.Uint64(src[:8]))
+ r.ISRSS = int64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- r.MinFlt = int64(usermem.ByteOrder.Uint64(src[:8]))
+ r.MinFlt = int64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- r.MajFlt = int64(usermem.ByteOrder.Uint64(src[:8]))
+ r.MajFlt = int64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- r.NSwap = int64(usermem.ByteOrder.Uint64(src[:8]))
+ r.NSwap = int64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- r.InBlock = int64(usermem.ByteOrder.Uint64(src[:8]))
+ r.InBlock = int64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- r.OuBlock = int64(usermem.ByteOrder.Uint64(src[:8]))
+ r.OuBlock = int64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- r.MsgSnd = int64(usermem.ByteOrder.Uint64(src[:8]))
+ r.MsgSnd = int64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- r.MsgRcv = int64(usermem.ByteOrder.Uint64(src[:8]))
+ r.MsgRcv = int64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- r.NSignals = int64(usermem.ByteOrder.Uint64(src[:8]))
+ r.NSignals = int64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- r.NVCSw = int64(usermem.ByteOrder.Uint64(src[:8]))
+ r.NVCSw = int64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- r.NIvCSw = int64(usermem.ByteOrder.Uint64(src[:8]))
+ r.NIvCSw = int64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
}
@@ -7064,7 +7064,7 @@ func (r *Rusage) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (r *Rusage) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (r *Rusage) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
if !r.STime.Packed() && r.UTime.Packed() {
// Type Rusage doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := cc.CopyScratchBuffer(r.SizeBytes()) // escapes: okay.
@@ -7088,13 +7088,13 @@ func (r *Rusage) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int)
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (r *Rusage) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (r *Rusage) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return r.CopyOutN(cc, addr, r.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (r *Rusage) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (r *Rusage) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
if !r.STime.Packed() && r.UTime.Packed() {
// Type Rusage doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := cc.CopyScratchBuffer(r.SizeBytes()) // escapes: okay.
@@ -7151,28 +7151,28 @@ func (s *SeccompData) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (s *SeccompData) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Nr))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Nr))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Arch))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Arch))
dst = dst[4:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.InstructionPointer))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.InstructionPointer))
dst = dst[8:]
for idx := 0; idx < 6; idx++ {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Args[idx]))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Args[idx]))
dst = dst[8:]
}
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (s *SeccompData) UnmarshalBytes(src []byte) {
- s.Nr = int32(usermem.ByteOrder.Uint32(src[:4]))
+ s.Nr = int32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- s.Arch = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ s.Arch = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- s.InstructionPointer = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.InstructionPointer = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
for idx := 0; idx < 6; idx++ {
- s.Args[idx] = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Args[idx] = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
}
}
@@ -7195,7 +7195,7 @@ func (s *SeccompData) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (s *SeccompData) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (s *SeccompData) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -7212,13 +7212,13 @@ func (s *SeccompData) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (s *SeccompData) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *SeccompData) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return s.CopyOutN(cc, addr, s.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (s *SeccompData) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *SeccompData) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -7256,49 +7256,49 @@ func (s *SemInfo) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (s *SemInfo) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.SemMap))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.SemMap))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.SemMni))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.SemMni))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.SemMns))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.SemMns))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.SemMnu))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.SemMnu))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.SemMsl))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.SemMsl))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.SemOpm))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.SemOpm))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.SemUme))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.SemUme))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.SemUsz))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.SemUsz))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.SemVmx))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.SemVmx))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.SemAem))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.SemAem))
dst = dst[4:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (s *SemInfo) UnmarshalBytes(src []byte) {
- s.SemMap = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ s.SemMap = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- s.SemMni = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ s.SemMni = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- s.SemMns = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ s.SemMns = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- s.SemMnu = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ s.SemMnu = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- s.SemMsl = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ s.SemMsl = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- s.SemOpm = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ s.SemOpm = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- s.SemUme = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ s.SemUme = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- s.SemUsz = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ s.SemUsz = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- s.SemVmx = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ s.SemVmx = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- s.SemAem = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ s.SemAem = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
}
@@ -7320,7 +7320,7 @@ func (s *SemInfo) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (s *SemInfo) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (s *SemInfo) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -7337,13 +7337,13 @@ func (s *SemInfo) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int)
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (s *SemInfo) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *SemInfo) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return s.CopyOutN(cc, addr, s.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (s *SemInfo) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *SemInfo) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -7381,21 +7381,21 @@ func (s *Sembuf) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (s *Sembuf) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint16(dst[:2], uint16(s.SemNum))
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(s.SemNum))
dst = dst[2:]
- usermem.ByteOrder.PutUint16(dst[:2], uint16(s.SemOp))
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(s.SemOp))
dst = dst[2:]
- usermem.ByteOrder.PutUint16(dst[:2], uint16(s.SemFlg))
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(s.SemFlg))
dst = dst[2:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (s *Sembuf) UnmarshalBytes(src []byte) {
- s.SemNum = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ s.SemNum = uint16(hostarch.ByteOrder.Uint16(src[:2]))
src = src[2:]
- s.SemOp = int16(usermem.ByteOrder.Uint16(src[:2]))
+ s.SemOp = int16(hostarch.ByteOrder.Uint16(src[:2]))
src = src[2:]
- s.SemFlg = int16(usermem.ByteOrder.Uint16(src[:2]))
+ s.SemFlg = int16(hostarch.ByteOrder.Uint16(src[:2]))
src = src[2:]
}
@@ -7417,7 +7417,7 @@ func (s *Sembuf) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (s *Sembuf) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (s *Sembuf) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -7434,13 +7434,13 @@ func (s *Sembuf) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int)
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (s *Sembuf) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *Sembuf) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return s.CopyOutN(cc, addr, s.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (s *Sembuf) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *Sembuf) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -7472,7 +7472,7 @@ func (s *Sembuf) WriteTo(writer io.Writer) (int64, error) {
}
// CopySembufSliceIn copies in a slice of Sembuf objects from the task's memory.
-func CopySembufSliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []Sembuf) (int, error) {
+func CopySembufSliceIn(cc marshal.CopyContext, addr hostarch.Addr, dst []Sembuf) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
@@ -7497,7 +7497,7 @@ func CopySembufSliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []Sembuf)
}
// CopySembufSliceOut copies a slice of Sembuf objects to the task's memory.
-func CopySembufSliceOut(cc marshal.CopyContext, addr usermem.Addr, src []Sembuf) (int, error) {
+func CopySembufSliceOut(cc marshal.CopyContext, addr hostarch.Addr, src []Sembuf) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
@@ -7565,37 +7565,37 @@ func (s *ShmInfo) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (s *ShmInfo) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.UsedIDs))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.UsedIDs))
dst = dst[4:]
// Padding: dst[:sizeof(byte)*4] ~= [4]byte{0}
dst = dst[1*(4):]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.ShmTot))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.ShmTot))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.ShmRss))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.ShmRss))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.ShmSwp))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.ShmSwp))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.SwapAttempts))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.SwapAttempts))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.SwapSuccesses))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.SwapSuccesses))
dst = dst[8:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (s *ShmInfo) UnmarshalBytes(src []byte) {
- s.UsedIDs = int32(usermem.ByteOrder.Uint32(src[:4]))
+ s.UsedIDs = int32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
// Padding: ~ copy([4]byte(s._), src[:sizeof(byte)*4])
src = src[1*(4):]
- s.ShmTot = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.ShmTot = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.ShmRss = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.ShmRss = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.ShmSwp = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.ShmSwp = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.SwapAttempts = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.SwapAttempts = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.SwapSuccesses = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.SwapSuccesses = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
}
@@ -7617,7 +7617,7 @@ func (s *ShmInfo) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (s *ShmInfo) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (s *ShmInfo) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -7634,13 +7634,13 @@ func (s *ShmInfo) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int)
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (s *ShmInfo) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *ShmInfo) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return s.CopyOutN(cc, addr, s.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (s *ShmInfo) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *ShmInfo) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -7678,29 +7678,29 @@ func (s *ShmParams) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (s *ShmParams) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.ShmMax))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.ShmMax))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.ShmMin))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.ShmMin))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.ShmMni))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.ShmMni))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.ShmSeg))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.ShmSeg))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.ShmAll))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.ShmAll))
dst = dst[8:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (s *ShmParams) UnmarshalBytes(src []byte) {
- s.ShmMax = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.ShmMax = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.ShmMin = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.ShmMin = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.ShmMni = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.ShmMni = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.ShmSeg = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.ShmSeg = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.ShmAll = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.ShmAll = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
}
@@ -7722,7 +7722,7 @@ func (s *ShmParams) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (s *ShmParams) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (s *ShmParams) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -7739,13 +7739,13 @@ func (s *ShmParams) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit in
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (s *ShmParams) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *ShmParams) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return s.CopyOutN(cc, addr, s.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (s *ShmParams) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *ShmParams) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -7789,7 +7789,7 @@ func (s *ShmidDS) SizeBytes() int {
func (s *ShmidDS) MarshalBytes(dst []byte) {
s.ShmPerm.MarshalBytes(dst[:s.ShmPerm.SizeBytes()])
dst = dst[s.ShmPerm.SizeBytes():]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.ShmSegsz))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.ShmSegsz))
dst = dst[8:]
s.ShmAtime.MarshalBytes(dst[:s.ShmAtime.SizeBytes()])
dst = dst[s.ShmAtime.SizeBytes():]
@@ -7797,15 +7797,15 @@ func (s *ShmidDS) MarshalBytes(dst []byte) {
dst = dst[s.ShmDtime.SizeBytes():]
s.ShmCtime.MarshalBytes(dst[:s.ShmCtime.SizeBytes()])
dst = dst[s.ShmCtime.SizeBytes():]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.ShmCpid))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.ShmCpid))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.ShmLpid))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.ShmLpid))
dst = dst[4:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.ShmNattach))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.ShmNattach))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Unused4))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Unused4))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Unused5))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Unused5))
dst = dst[8:]
}
@@ -7813,7 +7813,7 @@ func (s *ShmidDS) MarshalBytes(dst []byte) {
func (s *ShmidDS) UnmarshalBytes(src []byte) {
s.ShmPerm.UnmarshalBytes(src[:s.ShmPerm.SizeBytes()])
src = src[s.ShmPerm.SizeBytes():]
- s.ShmSegsz = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.ShmSegsz = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
s.ShmAtime.UnmarshalBytes(src[:s.ShmAtime.SizeBytes()])
src = src[s.ShmAtime.SizeBytes():]
@@ -7821,15 +7821,15 @@ func (s *ShmidDS) UnmarshalBytes(src []byte) {
src = src[s.ShmDtime.SizeBytes():]
s.ShmCtime.UnmarshalBytes(src[:s.ShmCtime.SizeBytes()])
src = src[s.ShmCtime.SizeBytes():]
- s.ShmCpid = int32(usermem.ByteOrder.Uint32(src[:4]))
+ s.ShmCpid = int32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- s.ShmLpid = int32(usermem.ByteOrder.Uint32(src[:4]))
+ s.ShmLpid = int32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- s.ShmNattach = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.ShmNattach = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.Unused4 = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Unused4 = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.Unused5 = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Unused5 = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
}
@@ -7861,7 +7861,7 @@ func (s *ShmidDS) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (s *ShmidDS) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (s *ShmidDS) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
if !s.ShmAtime.Packed() && s.ShmCtime.Packed() && s.ShmDtime.Packed() && s.ShmPerm.Packed() {
// Type ShmidDS doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
@@ -7885,13 +7885,13 @@ func (s *ShmidDS) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int)
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (s *ShmidDS) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *ShmidDS) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return s.CopyOutN(cc, addr, s.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (s *ShmidDS) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *ShmidDS) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
if !s.ShmAtime.Packed() && s.ShmCtime.Packed() && s.ShmDtime.Packed() && s.ShmPerm.Packed() {
// Type ShmidDS doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
@@ -7948,13 +7948,13 @@ func (s *Sigevent) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (s *Sigevent) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Value))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Value))
dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Signo))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Signo))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Notify))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Notify))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Tid))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Tid))
dst = dst[4:]
for idx := 0; idx < 44; idx++ {
dst[0] = byte(s.UnRemainder[idx])
@@ -7964,13 +7964,13 @@ func (s *Sigevent) MarshalBytes(dst []byte) {
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (s *Sigevent) UnmarshalBytes(src []byte) {
- s.Value = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Value = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.Signo = int32(usermem.ByteOrder.Uint32(src[:4]))
+ s.Signo = int32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- s.Notify = int32(usermem.ByteOrder.Uint32(src[:4]))
+ s.Notify = int32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- s.Tid = int32(usermem.ByteOrder.Uint32(src[:4]))
+ s.Tid = int32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
for idx := 0; idx < 44; idx++ {
s.UnRemainder[idx] = src[0]
@@ -7996,7 +7996,7 @@ func (s *Sigevent) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (s *Sigevent) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (s *Sigevent) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -8013,13 +8013,13 @@ func (s *Sigevent) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (s *Sigevent) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *Sigevent) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return s.CopyOutN(cc, addr, s.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (s *Sigevent) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *Sigevent) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -8058,12 +8058,12 @@ func (s *SignalSet) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (s *SignalSet) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(*s))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(*s))
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (s *SignalSet) UnmarshalBytes(src []byte) {
- *s = SignalSet(uint64(usermem.ByteOrder.Uint64(src[:8])))
+ *s = SignalSet(uint64(hostarch.ByteOrder.Uint64(src[:8])))
}
// Packed implements marshal.Marshallable.Packed.
@@ -8085,7 +8085,7 @@ func (s *SignalSet) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (s *SignalSet) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (s *SignalSet) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -8102,13 +8102,13 @@ func (s *SignalSet) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit in
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (s *SignalSet) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *SignalSet) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return s.CopyOutN(cc, addr, s.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (s *SignalSet) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *SignalSet) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -8147,39 +8147,39 @@ func (s *SignalfdSiginfo) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (s *SignalfdSiginfo) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Signo))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Signo))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Errno))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Errno))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Code))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Code))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.PID))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.PID))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.UID))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.UID))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.FD))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.FD))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.TID))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.TID))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Band))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Band))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Overrun))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Overrun))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.TrapNo))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.TrapNo))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Status))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Status))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Int))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Int))
dst = dst[4:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Ptr))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Ptr))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.UTime))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.UTime))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.STime))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.STime))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Addr))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Addr))
dst = dst[8:]
- usermem.ByteOrder.PutUint16(dst[:2], uint16(s.AddrLSB))
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(s.AddrLSB))
dst = dst[2:]
// Padding: dst[:sizeof(uint8)*48] ~= [48]uint8{0}
dst = dst[1*(48):]
@@ -8187,39 +8187,39 @@ func (s *SignalfdSiginfo) MarshalBytes(dst []byte) {
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (s *SignalfdSiginfo) UnmarshalBytes(src []byte) {
- s.Signo = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ s.Signo = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- s.Errno = int32(usermem.ByteOrder.Uint32(src[:4]))
+ s.Errno = int32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- s.Code = int32(usermem.ByteOrder.Uint32(src[:4]))
+ s.Code = int32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- s.PID = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ s.PID = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- s.UID = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ s.UID = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- s.FD = int32(usermem.ByteOrder.Uint32(src[:4]))
+ s.FD = int32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- s.TID = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ s.TID = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- s.Band = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ s.Band = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- s.Overrun = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ s.Overrun = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- s.TrapNo = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ s.TrapNo = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- s.Status = int32(usermem.ByteOrder.Uint32(src[:4]))
+ s.Status = int32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- s.Int = int32(usermem.ByteOrder.Uint32(src[:4]))
+ s.Int = int32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- s.Ptr = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Ptr = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.UTime = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.UTime = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.STime = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.STime = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.Addr = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Addr = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.AddrLSB = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ s.AddrLSB = uint16(hostarch.ByteOrder.Uint16(src[:2]))
src = src[2:]
// Padding: ~ copy([48]uint8(s._), src[:sizeof(uint8)*48])
src = src[1*(48):]
@@ -8245,7 +8245,7 @@ func (s *SignalfdSiginfo) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (s *SignalfdSiginfo) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (s *SignalfdSiginfo) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Type SignalfdSiginfo doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
s.MarshalBytes(buf) // escapes: fallback.
@@ -8254,13 +8254,13 @@ func (s *SignalfdSiginfo) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, li
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (s *SignalfdSiginfo) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *SignalfdSiginfo) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return s.CopyOutN(cc, addr, s.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (s *SignalfdSiginfo) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *SignalfdSiginfo) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Type SignalfdSiginfo doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
@@ -8286,21 +8286,21 @@ func (c *ControlMessageCredentials) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (c *ControlMessageCredentials) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(c.PID))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(c.PID))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(c.UID))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(c.UID))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(c.GID))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(c.GID))
dst = dst[4:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (c *ControlMessageCredentials) UnmarshalBytes(src []byte) {
- c.PID = int32(usermem.ByteOrder.Uint32(src[:4]))
+ c.PID = int32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- c.UID = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ c.UID = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- c.GID = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ c.GID = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
}
@@ -8322,7 +8322,7 @@ func (c *ControlMessageCredentials) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (c *ControlMessageCredentials) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (c *ControlMessageCredentials) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -8339,13 +8339,13 @@ func (c *ControlMessageCredentials) CopyOutN(cc marshal.CopyContext, addr userme
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (c *ControlMessageCredentials) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (c *ControlMessageCredentials) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return c.CopyOutN(cc, addr, c.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (c *ControlMessageCredentials) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (c *ControlMessageCredentials) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -8417,7 +8417,7 @@ func (i *Inet6Addr) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (i *Inet6Addr) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (i *Inet6Addr) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -8434,13 +8434,13 @@ func (i *Inet6Addr) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit in
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (i *Inet6Addr) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (i *Inet6Addr) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return i.CopyOutN(cc, addr, i.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (i *Inet6Addr) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (i *Inet6Addr) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -8512,7 +8512,7 @@ func (i *InetAddr) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (i *InetAddr) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (i *InetAddr) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -8529,13 +8529,13 @@ func (i *InetAddr) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (i *InetAddr) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (i *InetAddr) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return i.CopyOutN(cc, addr, i.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (i *InetAddr) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (i *InetAddr) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -8573,17 +8573,17 @@ func (l *Linger) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (l *Linger) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(l.OnOff))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(l.OnOff))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(l.Linger))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(l.Linger))
dst = dst[4:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (l *Linger) UnmarshalBytes(src []byte) {
- l.OnOff = int32(usermem.ByteOrder.Uint32(src[:4]))
+ l.OnOff = int32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- l.Linger = int32(usermem.ByteOrder.Uint32(src[:4]))
+ l.Linger = int32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
}
@@ -8605,7 +8605,7 @@ func (l *Linger) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (l *Linger) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (l *Linger) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -8622,13 +8622,13 @@ func (l *Linger) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int)
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (l *Linger) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (l *Linger) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return l.CopyOutN(cc, addr, l.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (l *Linger) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (l *Linger) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -8668,9 +8668,9 @@ func (s *SockAddrInet) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (s *SockAddrInet) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint16(dst[:2], uint16(s.Family))
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(s.Family))
dst = dst[2:]
- usermem.ByteOrder.PutUint16(dst[:2], uint16(s.Port))
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(s.Port))
dst = dst[2:]
s.Addr.MarshalBytes(dst[:s.Addr.SizeBytes()])
dst = dst[s.Addr.SizeBytes():]
@@ -8680,9 +8680,9 @@ func (s *SockAddrInet) MarshalBytes(dst []byte) {
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (s *SockAddrInet) UnmarshalBytes(src []byte) {
- s.Family = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ s.Family = uint16(hostarch.ByteOrder.Uint16(src[:2]))
src = src[2:]
- s.Port = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ s.Port = uint16(hostarch.ByteOrder.Uint16(src[:2]))
src = src[2:]
s.Addr.UnmarshalBytes(src[:s.Addr.SizeBytes()])
src = src[s.Addr.SizeBytes():]
@@ -8718,7 +8718,7 @@ func (s *SockAddrInet) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (s *SockAddrInet) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (s *SockAddrInet) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
if !s.Addr.Packed() {
// Type SockAddrInet doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
@@ -8742,13 +8742,13 @@ func (s *SockAddrInet) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (s *SockAddrInet) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *SockAddrInet) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return s.CopyOutN(cc, addr, s.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (s *SockAddrInet) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *SockAddrInet) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
if !s.Addr.Packed() {
// Type SockAddrInet doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
@@ -8805,33 +8805,33 @@ func (s *SockAddrInet6) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (s *SockAddrInet6) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint16(dst[:2], uint16(s.Family))
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(s.Family))
dst = dst[2:]
- usermem.ByteOrder.PutUint16(dst[:2], uint16(s.Port))
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(s.Port))
dst = dst[2:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Flowinfo))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Flowinfo))
dst = dst[4:]
for idx := 0; idx < 16; idx++ {
dst[0] = byte(s.Addr[idx])
dst = dst[1:]
}
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Scope_id))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Scope_id))
dst = dst[4:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (s *SockAddrInet6) UnmarshalBytes(src []byte) {
- s.Family = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ s.Family = uint16(hostarch.ByteOrder.Uint16(src[:2]))
src = src[2:]
- s.Port = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ s.Port = uint16(hostarch.ByteOrder.Uint16(src[:2]))
src = src[2:]
- s.Flowinfo = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ s.Flowinfo = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
for idx := 0; idx < 16; idx++ {
s.Addr[idx] = src[0]
src = src[1:]
}
- s.Scope_id = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ s.Scope_id = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
}
@@ -8853,7 +8853,7 @@ func (s *SockAddrInet6) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (s *SockAddrInet6) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (s *SockAddrInet6) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -8870,13 +8870,13 @@ func (s *SockAddrInet6) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limi
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (s *SockAddrInet6) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *SockAddrInet6) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return s.CopyOutN(cc, addr, s.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (s *SockAddrInet6) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *SockAddrInet6) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -8915,13 +8915,13 @@ func (s *SockAddrLink) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (s *SockAddrLink) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint16(dst[:2], uint16(s.Family))
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(s.Family))
dst = dst[2:]
- usermem.ByteOrder.PutUint16(dst[:2], uint16(s.Protocol))
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(s.Protocol))
dst = dst[2:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.InterfaceIndex))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.InterfaceIndex))
dst = dst[4:]
- usermem.ByteOrder.PutUint16(dst[:2], uint16(s.ARPHardwareType))
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(s.ARPHardwareType))
dst = dst[2:]
dst[0] = byte(s.PacketType)
dst = dst[1:]
@@ -8935,13 +8935,13 @@ func (s *SockAddrLink) MarshalBytes(dst []byte) {
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (s *SockAddrLink) UnmarshalBytes(src []byte) {
- s.Family = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ s.Family = uint16(hostarch.ByteOrder.Uint16(src[:2]))
src = src[2:]
- s.Protocol = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ s.Protocol = uint16(hostarch.ByteOrder.Uint16(src[:2]))
src = src[2:]
- s.InterfaceIndex = int32(usermem.ByteOrder.Uint32(src[:4]))
+ s.InterfaceIndex = int32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- s.ARPHardwareType = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ s.ARPHardwareType = uint16(hostarch.ByteOrder.Uint16(src[:2]))
src = src[2:]
s.PacketType = src[0]
src = src[1:]
@@ -8971,7 +8971,7 @@ func (s *SockAddrLink) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (s *SockAddrLink) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (s *SockAddrLink) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -8988,13 +8988,13 @@ func (s *SockAddrLink) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (s *SockAddrLink) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *SockAddrLink) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return s.CopyOutN(cc, addr, s.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (s *SockAddrLink) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *SockAddrLink) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -9033,7 +9033,7 @@ func (s *SockAddrUnix) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (s *SockAddrUnix) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint16(dst[:2], uint16(s.Family))
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(s.Family))
dst = dst[2:]
for idx := 0; idx < UnixPathMax; idx++ {
dst[0] = byte(s.Path[idx])
@@ -9043,7 +9043,7 @@ func (s *SockAddrUnix) MarshalBytes(dst []byte) {
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (s *SockAddrUnix) UnmarshalBytes(src []byte) {
- s.Family = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ s.Family = uint16(hostarch.ByteOrder.Uint16(src[:2]))
src = src[2:]
for idx := 0; idx < UnixPathMax; idx++ {
s.Path[idx] = int8(src[0])
@@ -9069,7 +9069,7 @@ func (s *SockAddrUnix) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (s *SockAddrUnix) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (s *SockAddrUnix) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -9086,13 +9086,13 @@ func (s *SockAddrUnix) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (s *SockAddrUnix) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *SockAddrUnix) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return s.CopyOutN(cc, addr, s.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (s *SockAddrUnix) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *SockAddrUnix) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -9146,93 +9146,93 @@ func (t *TCPInfo) MarshalBytes(dst []byte) {
dst = dst[1:]
dst[0] = byte(t.DeliveryRateAppLimited)
dst = dst[1:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.RTO))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.RTO))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.ATO))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.ATO))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.SndMss))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.SndMss))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.RcvMss))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.RcvMss))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.Unacked))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.Unacked))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.Sacked))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.Sacked))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.Lost))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.Lost))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.Retrans))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.Retrans))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.Fackets))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.Fackets))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.LastDataSent))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.LastDataSent))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.LastAckSent))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.LastAckSent))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.LastDataRecv))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.LastDataRecv))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.LastAckRecv))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.LastAckRecv))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.PMTU))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.PMTU))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.RcvSsthresh))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.RcvSsthresh))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.RTT))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.RTT))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.RTTVar))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.RTTVar))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.SndSsthresh))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.SndSsthresh))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.SndCwnd))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.SndCwnd))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.Advmss))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.Advmss))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.Reordering))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.Reordering))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.RcvRTT))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.RcvRTT))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.RcvSpace))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.RcvSpace))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.TotalRetrans))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.TotalRetrans))
dst = dst[4:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(t.PacingRate))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(t.PacingRate))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(t.MaxPacingRate))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(t.MaxPacingRate))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(t.BytesAcked))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(t.BytesAcked))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(t.BytesReceived))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(t.BytesReceived))
dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.SegsOut))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.SegsOut))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.SegsIn))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.SegsIn))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.NotSentBytes))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.NotSentBytes))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.MinRTT))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.MinRTT))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.DataSegsIn))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.DataSegsIn))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.DataSegsOut))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.DataSegsOut))
dst = dst[4:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(t.DeliveryRate))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(t.DeliveryRate))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(t.BusyTime))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(t.BusyTime))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(t.RwndLimited))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(t.RwndLimited))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(t.SndBufLimited))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(t.SndBufLimited))
dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.Delivered))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.Delivered))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.DeliveredCE))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.DeliveredCE))
dst = dst[4:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(t.BytesSent))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(t.BytesSent))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(t.BytesRetrans))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(t.BytesRetrans))
dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.DSACKDups))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.DSACKDups))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.ReordSeen))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.ReordSeen))
dst = dst[4:]
}
@@ -9254,93 +9254,93 @@ func (t *TCPInfo) UnmarshalBytes(src []byte) {
src = src[1:]
t.DeliveryRateAppLimited = uint8(src[0])
src = src[1:]
- t.RTO = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ t.RTO = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- t.ATO = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ t.ATO = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- t.SndMss = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ t.SndMss = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- t.RcvMss = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ t.RcvMss = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- t.Unacked = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ t.Unacked = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- t.Sacked = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ t.Sacked = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- t.Lost = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ t.Lost = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- t.Retrans = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ t.Retrans = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- t.Fackets = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ t.Fackets = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- t.LastDataSent = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ t.LastDataSent = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- t.LastAckSent = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ t.LastAckSent = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- t.LastDataRecv = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ t.LastDataRecv = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- t.LastAckRecv = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ t.LastAckRecv = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- t.PMTU = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ t.PMTU = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- t.RcvSsthresh = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ t.RcvSsthresh = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- t.RTT = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ t.RTT = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- t.RTTVar = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ t.RTTVar = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- t.SndSsthresh = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ t.SndSsthresh = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- t.SndCwnd = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ t.SndCwnd = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- t.Advmss = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ t.Advmss = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- t.Reordering = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ t.Reordering = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- t.RcvRTT = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ t.RcvRTT = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- t.RcvSpace = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ t.RcvSpace = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- t.TotalRetrans = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ t.TotalRetrans = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- t.PacingRate = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ t.PacingRate = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- t.MaxPacingRate = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ t.MaxPacingRate = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- t.BytesAcked = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ t.BytesAcked = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- t.BytesReceived = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ t.BytesReceived = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- t.SegsOut = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ t.SegsOut = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- t.SegsIn = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ t.SegsIn = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- t.NotSentBytes = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ t.NotSentBytes = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- t.MinRTT = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ t.MinRTT = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- t.DataSegsIn = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ t.DataSegsIn = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- t.DataSegsOut = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ t.DataSegsOut = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- t.DeliveryRate = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ t.DeliveryRate = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- t.BusyTime = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ t.BusyTime = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- t.RwndLimited = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ t.RwndLimited = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- t.SndBufLimited = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ t.SndBufLimited = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- t.Delivered = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ t.Delivered = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- t.DeliveredCE = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ t.DeliveredCE = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- t.BytesSent = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ t.BytesSent = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- t.BytesRetrans = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ t.BytesRetrans = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- t.DSACKDups = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ t.DSACKDups = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- t.ReordSeen = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ t.ReordSeen = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
}
@@ -9362,7 +9362,7 @@ func (t *TCPInfo) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (t *TCPInfo) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (t *TCPInfo) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -9379,13 +9379,13 @@ func (t *TCPInfo) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int)
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (t *TCPInfo) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (t *TCPInfo) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return t.CopyOutN(cc, addr, t.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (t *TCPInfo) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (t *TCPInfo) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -9424,12 +9424,12 @@ func (c *ClockT) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (c *ClockT) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(*c))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(*c))
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (c *ClockT) UnmarshalBytes(src []byte) {
- *c = ClockT(int64(usermem.ByteOrder.Uint64(src[:8])))
+ *c = ClockT(int64(hostarch.ByteOrder.Uint64(src[:8])))
}
// Packed implements marshal.Marshallable.Packed.
@@ -9451,7 +9451,7 @@ func (c *ClockT) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (c *ClockT) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (c *ClockT) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -9468,13 +9468,13 @@ func (c *ClockT) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int)
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (c *ClockT) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (c *ClockT) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return c.CopyOutN(cc, addr, c.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (c *ClockT) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (c *ClockT) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -9556,7 +9556,7 @@ func (i *ItimerVal) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (i *ItimerVal) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (i *ItimerVal) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
if !i.Interval.Packed() && i.Value.Packed() {
// Type ItimerVal doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
@@ -9580,13 +9580,13 @@ func (i *ItimerVal) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit in
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (i *ItimerVal) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (i *ItimerVal) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return i.CopyOutN(cc, addr, i.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (i *ItimerVal) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (i *ItimerVal) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
if !i.Interval.Packed() && i.Value.Packed() {
// Type ItimerVal doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
@@ -9686,7 +9686,7 @@ func (i *Itimerspec) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (i *Itimerspec) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (i *Itimerspec) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
if !i.Interval.Packed() && i.Value.Packed() {
// Type Itimerspec doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
@@ -9710,13 +9710,13 @@ func (i *Itimerspec) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit i
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (i *Itimerspec) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (i *Itimerspec) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return i.CopyOutN(cc, addr, i.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (i *Itimerspec) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (i *Itimerspec) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
if !i.Interval.Packed() && i.Value.Packed() {
// Type Itimerspec doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
@@ -9772,9 +9772,9 @@ func (sxts *StatxTimestamp) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (sxts *StatxTimestamp) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(sxts.Sec))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(sxts.Sec))
dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(sxts.Nsec))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(sxts.Nsec))
dst = dst[4:]
// Padding: dst[:sizeof(int32)] ~= int32(0)
dst = dst[4:]
@@ -9782,9 +9782,9 @@ func (sxts *StatxTimestamp) MarshalBytes(dst []byte) {
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (sxts *StatxTimestamp) UnmarshalBytes(src []byte) {
- sxts.Sec = int64(usermem.ByteOrder.Uint64(src[:8]))
+ sxts.Sec = int64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- sxts.Nsec = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ sxts.Nsec = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
// Padding: var _ int32 ~= src[:sizeof(int32)]
src = src[4:]
@@ -9808,7 +9808,7 @@ func (sxts *StatxTimestamp) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (sxts *StatxTimestamp) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (sxts *StatxTimestamp) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -9825,13 +9825,13 @@ func (sxts *StatxTimestamp) CopyOutN(cc marshal.CopyContext, addr usermem.Addr,
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (sxts *StatxTimestamp) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (sxts *StatxTimestamp) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return sxts.CopyOutN(cc, addr, sxts.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (sxts *StatxTimestamp) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (sxts *StatxTimestamp) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -9870,12 +9870,12 @@ func (t *TimeT) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (t *TimeT) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(*t))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(*t))
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (t *TimeT) UnmarshalBytes(src []byte) {
- *t = TimeT(int64(usermem.ByteOrder.Uint64(src[:8])))
+ *t = TimeT(int64(hostarch.ByteOrder.Uint64(src[:8])))
}
// Packed implements marshal.Marshallable.Packed.
@@ -9897,7 +9897,7 @@ func (t *TimeT) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (t *TimeT) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (t *TimeT) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -9914,13 +9914,13 @@ func (t *TimeT) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (t *TimeT) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (t *TimeT) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return t.CopyOutN(cc, addr, t.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (t *TimeT) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (t *TimeT) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -9959,12 +9959,12 @@ func (t *TimerID) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (t *TimerID) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(*t))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(*t))
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (t *TimerID) UnmarshalBytes(src []byte) {
- *t = TimerID(int32(usermem.ByteOrder.Uint32(src[:4])))
+ *t = TimerID(int32(hostarch.ByteOrder.Uint32(src[:4])))
}
// Packed implements marshal.Marshallable.Packed.
@@ -9986,7 +9986,7 @@ func (t *TimerID) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (t *TimerID) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (t *TimerID) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -10003,13 +10003,13 @@ func (t *TimerID) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int)
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (t *TimerID) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (t *TimerID) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return t.CopyOutN(cc, addr, t.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (t *TimerID) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (t *TimerID) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -10047,17 +10047,17 @@ func (ts *Timespec) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (ts *Timespec) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(ts.Sec))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(ts.Sec))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(ts.Nsec))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(ts.Nsec))
dst = dst[8:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (ts *Timespec) UnmarshalBytes(src []byte) {
- ts.Sec = int64(usermem.ByteOrder.Uint64(src[:8]))
+ ts.Sec = int64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- ts.Nsec = int64(usermem.ByteOrder.Uint64(src[:8]))
+ ts.Nsec = int64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
}
@@ -10079,7 +10079,7 @@ func (ts *Timespec) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (ts *Timespec) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (ts *Timespec) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -10096,13 +10096,13 @@ func (ts *Timespec) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit in
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (ts *Timespec) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (ts *Timespec) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return ts.CopyOutN(cc, addr, ts.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (ts *Timespec) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (ts *Timespec) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -10134,7 +10134,7 @@ func (ts *Timespec) WriteTo(writer io.Writer) (int64, error) {
}
// CopyTimespecSliceIn copies in a slice of Timespec objects from the task's memory.
-func CopyTimespecSliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []Timespec) (int, error) {
+func CopyTimespecSliceIn(cc marshal.CopyContext, addr hostarch.Addr, dst []Timespec) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
@@ -10159,7 +10159,7 @@ func CopyTimespecSliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []Timesp
}
// CopyTimespecSliceOut copies a slice of Timespec objects to the task's memory.
-func CopyTimespecSliceOut(cc marshal.CopyContext, addr usermem.Addr, src []Timespec) (int, error) {
+func CopyTimespecSliceOut(cc marshal.CopyContext, addr hostarch.Addr, src []Timespec) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
@@ -10226,17 +10226,17 @@ func (tv *Timeval) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (tv *Timeval) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(tv.Sec))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(tv.Sec))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(tv.Usec))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(tv.Usec))
dst = dst[8:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (tv *Timeval) UnmarshalBytes(src []byte) {
- tv.Sec = int64(usermem.ByteOrder.Uint64(src[:8]))
+ tv.Sec = int64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- tv.Usec = int64(usermem.ByteOrder.Uint64(src[:8]))
+ tv.Usec = int64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
}
@@ -10258,7 +10258,7 @@ func (tv *Timeval) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (tv *Timeval) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (tv *Timeval) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -10275,13 +10275,13 @@ func (tv *Timeval) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (tv *Timeval) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (tv *Timeval) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return tv.CopyOutN(cc, addr, tv.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (tv *Timeval) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (tv *Timeval) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -10313,7 +10313,7 @@ func (tv *Timeval) WriteTo(writer io.Writer) (int64, error) {
}
// CopyTimevalSliceIn copies in a slice of Timeval objects from the task's memory.
-func CopyTimevalSliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []Timeval) (int, error) {
+func CopyTimevalSliceIn(cc marshal.CopyContext, addr hostarch.Addr, dst []Timeval) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
@@ -10338,7 +10338,7 @@ func CopyTimevalSliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []Timeval
}
// CopyTimevalSliceOut copies a slice of Timeval objects to the task's memory.
-func CopyTimevalSliceOut(cc marshal.CopyContext, addr usermem.Addr, src []Timeval) (int, error) {
+func CopyTimevalSliceOut(cc marshal.CopyContext, addr hostarch.Addr, src []Timeval) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
@@ -10459,7 +10459,7 @@ func (t *Tms) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (t *Tms) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (t *Tms) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
if !t.CSTime.Packed() && t.CUTime.Packed() && t.STime.Packed() && t.UTime.Packed() {
// Type Tms doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := cc.CopyScratchBuffer(t.SizeBytes()) // escapes: okay.
@@ -10483,13 +10483,13 @@ func (t *Tms) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (in
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (t *Tms) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (t *Tms) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return t.CopyOutN(cc, addr, t.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (t *Tms) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (t *Tms) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
if !t.CSTime.Packed() && t.CUTime.Packed() && t.STime.Packed() && t.UTime.Packed() {
// Type Tms doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := cc.CopyScratchBuffer(t.SizeBytes()) // escapes: okay.
@@ -10545,17 +10545,17 @@ func (u *Utime) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (u *Utime) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(u.Actime))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(u.Actime))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(u.Modtime))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(u.Modtime))
dst = dst[8:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (u *Utime) UnmarshalBytes(src []byte) {
- u.Actime = int64(usermem.ByteOrder.Uint64(src[:8]))
+ u.Actime = int64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- u.Modtime = int64(usermem.ByteOrder.Uint64(src[:8]))
+ u.Modtime = int64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
}
@@ -10577,7 +10577,7 @@ func (u *Utime) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (u *Utime) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (u *Utime) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -10594,13 +10594,13 @@ func (u *Utime) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (u *Utime) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (u *Utime) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return u.CopyOutN(cc, addr, u.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (u *Utime) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (u *Utime) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -10639,13 +10639,13 @@ func (t *Termios) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (t *Termios) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.InputFlags))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.InputFlags))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.OutputFlags))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.OutputFlags))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.ControlFlags))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.ControlFlags))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.LocalFlags))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(t.LocalFlags))
dst = dst[4:]
dst[0] = byte(t.LineDiscipline)
dst = dst[1:]
@@ -10657,13 +10657,13 @@ func (t *Termios) MarshalBytes(dst []byte) {
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (t *Termios) UnmarshalBytes(src []byte) {
- t.InputFlags = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ t.InputFlags = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- t.OutputFlags = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ t.OutputFlags = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- t.ControlFlags = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ t.ControlFlags = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- t.LocalFlags = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ t.LocalFlags = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
t.LineDiscipline = uint8(src[0])
src = src[1:]
@@ -10691,7 +10691,7 @@ func (t *Termios) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (t *Termios) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (t *Termios) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -10708,13 +10708,13 @@ func (t *Termios) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int)
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (t *Termios) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (t *Termios) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return t.CopyOutN(cc, addr, t.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (t *Termios) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (t *Termios) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -10753,9 +10753,9 @@ func (w *WindowSize) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (w *WindowSize) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint16(dst[:2], uint16(w.Rows))
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(w.Rows))
dst = dst[2:]
- usermem.ByteOrder.PutUint16(dst[:2], uint16(w.Cols))
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(w.Cols))
dst = dst[2:]
// Padding: dst[:sizeof(byte)*4] ~= [4]byte{0}
dst = dst[1*(4):]
@@ -10763,9 +10763,9 @@ func (w *WindowSize) MarshalBytes(dst []byte) {
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (w *WindowSize) UnmarshalBytes(src []byte) {
- w.Rows = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ w.Rows = uint16(hostarch.ByteOrder.Uint16(src[:2]))
src = src[2:]
- w.Cols = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ w.Cols = uint16(hostarch.ByteOrder.Uint16(src[:2]))
src = src[2:]
// Padding: ~ copy([4]byte(w._), src[:sizeof(byte)*4])
src = src[1*(4):]
@@ -10789,7 +10789,7 @@ func (w *WindowSize) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (w *WindowSize) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (w *WindowSize) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -10806,13 +10806,13 @@ func (w *WindowSize) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit i
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (w *WindowSize) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (w *WindowSize) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return w.CopyOutN(cc, addr, w.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (w *WindowSize) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (w *WindowSize) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -10850,25 +10850,25 @@ func (w *Winsize) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (w *Winsize) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint16(dst[:2], uint16(w.Row))
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(w.Row))
dst = dst[2:]
- usermem.ByteOrder.PutUint16(dst[:2], uint16(w.Col))
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(w.Col))
dst = dst[2:]
- usermem.ByteOrder.PutUint16(dst[:2], uint16(w.Xpixel))
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(w.Xpixel))
dst = dst[2:]
- usermem.ByteOrder.PutUint16(dst[:2], uint16(w.Ypixel))
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(w.Ypixel))
dst = dst[2:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (w *Winsize) UnmarshalBytes(src []byte) {
- w.Row = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ w.Row = uint16(hostarch.ByteOrder.Uint16(src[:2]))
src = src[2:]
- w.Col = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ w.Col = uint16(hostarch.ByteOrder.Uint16(src[:2]))
src = src[2:]
- w.Xpixel = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ w.Xpixel = uint16(hostarch.ByteOrder.Uint16(src[:2]))
src = src[2:]
- w.Ypixel = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ w.Ypixel = uint16(hostarch.ByteOrder.Uint16(src[:2]))
src = src[2:]
}
@@ -10890,7 +10890,7 @@ func (w *Winsize) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (w *Winsize) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (w *Winsize) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -10907,13 +10907,13 @@ func (w *Winsize) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int)
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (w *Winsize) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (w *Winsize) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return w.CopyOutN(cc, addr, w.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (w *Winsize) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (w *Winsize) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -11029,7 +11029,7 @@ func (u *UtsName) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (u *UtsName) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (u *UtsName) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -11046,13 +11046,13 @@ func (u *UtsName) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int)
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (u *UtsName) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (u *UtsName) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return u.CopyOutN(cc, addr, u.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (u *UtsName) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (u *UtsName) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
diff --git a/pkg/abi/linux/linux_amd64_abi_autogen_unsafe.go b/pkg/abi/linux/linux_amd64_abi_autogen_unsafe.go
index 0e1218af1..1cfe7c011 100644
--- a/pkg/abi/linux/linux_amd64_abi_autogen_unsafe.go
+++ b/pkg/abi/linux/linux_amd64_abi_autogen_unsafe.go
@@ -16,9 +16,9 @@ package linux
import (
"gvisor.dev/gvisor/pkg/gohacks"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal"
"gvisor.dev/gvisor/pkg/safecopy"
- "gvisor.dev/gvisor/pkg/usermem"
"io"
"reflect"
"runtime"
@@ -42,20 +42,20 @@ func (e *EpollEvent) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (e *EpollEvent) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(e.Events))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(e.Events))
dst = dst[4:]
for idx := 0; idx < 2; idx++ {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(e.Data[idx]))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(e.Data[idx]))
dst = dst[4:]
}
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (e *EpollEvent) UnmarshalBytes(src []byte) {
- e.Events = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ e.Events = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
for idx := 0; idx < 2; idx++ {
- e.Data[idx] = int32(usermem.ByteOrder.Uint32(src[:4]))
+ e.Data[idx] = int32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
}
}
@@ -78,7 +78,7 @@ func (e *EpollEvent) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (e *EpollEvent) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (e *EpollEvent) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -95,13 +95,13 @@ func (e *EpollEvent) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit i
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (e *EpollEvent) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (e *EpollEvent) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return e.CopyOutN(cc, addr, e.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (e *EpollEvent) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (e *EpollEvent) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -133,7 +133,7 @@ func (e *EpollEvent) WriteTo(writer io.Writer) (int64, error) {
}
// CopyEpollEventSliceIn copies in a slice of EpollEvent objects from the task's memory.
-func CopyEpollEventSliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []EpollEvent) (int, error) {
+func CopyEpollEventSliceIn(cc marshal.CopyContext, addr hostarch.Addr, dst []EpollEvent) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
@@ -158,7 +158,7 @@ func CopyEpollEventSliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []Epol
}
// CopyEpollEventSliceOut copies a slice of EpollEvent objects to the task's memory.
-func CopyEpollEventSliceOut(cc marshal.CopyContext, addr usermem.Addr, src []EpollEvent) (int, error) {
+func CopyEpollEventSliceOut(cc marshal.CopyContext, addr hostarch.Addr, src []EpollEvent) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
@@ -229,27 +229,27 @@ func (s *Stat) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (s *Stat) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Dev))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Dev))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Ino))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Ino))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Nlink))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Nlink))
dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Mode))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Mode))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.UID))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.UID))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.GID))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.GID))
dst = dst[4:]
// Padding: dst[:sizeof(int32)] ~= int32(0)
dst = dst[4:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Rdev))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Rdev))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Size))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Size))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Blksize))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Blksize))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Blocks))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Blocks))
dst = dst[8:]
s.ATime.MarshalBytes(dst[:s.ATime.SizeBytes()])
dst = dst[s.ATime.SizeBytes():]
@@ -263,27 +263,27 @@ func (s *Stat) MarshalBytes(dst []byte) {
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (s *Stat) UnmarshalBytes(src []byte) {
- s.Dev = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Dev = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.Ino = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Ino = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.Nlink = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Nlink = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.Mode = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ s.Mode = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- s.UID = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ s.UID = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- s.GID = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ s.GID = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
// Padding: var _ int32 ~= src[:sizeof(int32)]
src = src[4:]
- s.Rdev = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Rdev = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.Size = int64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Size = int64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.Blksize = int64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Blksize = int64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.Blocks = int64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Blocks = int64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
s.ATime.UnmarshalBytes(src[:s.ATime.SizeBytes()])
src = src[s.ATime.SizeBytes():]
@@ -323,7 +323,7 @@ func (s *Stat) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (s *Stat) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (s *Stat) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
if !s.ATime.Packed() && s.CTime.Packed() && s.MTime.Packed() {
// Type Stat doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
@@ -347,13 +347,13 @@ func (s *Stat) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (i
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (s *Stat) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *Stat) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return s.CopyOutN(cc, addr, s.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (s *Stat) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *Stat) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
if !s.ATime.Packed() && s.CTime.Packed() && s.MTime.Packed() {
// Type Stat doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
@@ -409,117 +409,117 @@ func (p *PtraceRegs) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (p *PtraceRegs) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(p.R15))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.R15))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(p.R14))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.R14))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(p.R13))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.R13))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(p.R12))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.R12))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(p.Rbp))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.Rbp))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(p.Rbx))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.Rbx))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(p.R11))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.R11))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(p.R10))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.R10))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(p.R9))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.R9))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(p.R8))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.R8))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(p.Rax))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.Rax))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(p.Rcx))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.Rcx))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(p.Rdx))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.Rdx))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(p.Rsi))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.Rsi))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(p.Rdi))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.Rdi))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(p.Orig_rax))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.Orig_rax))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(p.Rip))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.Rip))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(p.Cs))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.Cs))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(p.Eflags))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.Eflags))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(p.Rsp))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.Rsp))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(p.Ss))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.Ss))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(p.Fs_base))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.Fs_base))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(p.Gs_base))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.Gs_base))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(p.Ds))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.Ds))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(p.Es))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.Es))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(p.Fs))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.Fs))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(p.Gs))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.Gs))
dst = dst[8:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (p *PtraceRegs) UnmarshalBytes(src []byte) {
- p.R15 = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ p.R15 = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- p.R14 = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ p.R14 = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- p.R13 = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ p.R13 = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- p.R12 = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ p.R12 = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- p.Rbp = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ p.Rbp = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- p.Rbx = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ p.Rbx = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- p.R11 = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ p.R11 = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- p.R10 = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ p.R10 = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- p.R9 = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ p.R9 = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- p.R8 = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ p.R8 = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- p.Rax = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ p.Rax = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- p.Rcx = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ p.Rcx = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- p.Rdx = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ p.Rdx = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- p.Rsi = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ p.Rsi = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- p.Rdi = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ p.Rdi = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- p.Orig_rax = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ p.Orig_rax = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- p.Rip = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ p.Rip = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- p.Cs = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ p.Cs = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- p.Eflags = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ p.Eflags = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- p.Rsp = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ p.Rsp = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- p.Ss = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ p.Ss = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- p.Fs_base = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ p.Fs_base = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- p.Gs_base = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ p.Gs_base = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- p.Ds = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ p.Ds = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- p.Es = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ p.Es = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- p.Fs = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ p.Fs = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- p.Gs = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ p.Gs = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
}
@@ -541,7 +541,7 @@ func (p *PtraceRegs) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (p *PtraceRegs) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (p *PtraceRegs) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -558,13 +558,13 @@ func (p *PtraceRegs) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit i
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (p *PtraceRegs) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (p *PtraceRegs) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return p.CopyOutN(cc, addr, p.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (p *PtraceRegs) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (p *PtraceRegs) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -609,17 +609,17 @@ func (s *SemidDS) MarshalBytes(dst []byte) {
dst = dst[s.SemPerm.SizeBytes():]
s.SemOTime.MarshalBytes(dst[:s.SemOTime.SizeBytes()])
dst = dst[s.SemOTime.SizeBytes():]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.unused1))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.unused1))
dst = dst[8:]
s.SemCTime.MarshalBytes(dst[:s.SemCTime.SizeBytes()])
dst = dst[s.SemCTime.SizeBytes():]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.unused2))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.unused2))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.SemNSems))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.SemNSems))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.unused3))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.unused3))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.unused4))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.unused4))
dst = dst[8:]
}
@@ -629,17 +629,17 @@ func (s *SemidDS) UnmarshalBytes(src []byte) {
src = src[s.SemPerm.SizeBytes():]
s.SemOTime.UnmarshalBytes(src[:s.SemOTime.SizeBytes()])
src = src[s.SemOTime.SizeBytes():]
- s.unused1 = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.unused1 = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
s.SemCTime.UnmarshalBytes(src[:s.SemCTime.SizeBytes()])
src = src[s.SemCTime.SizeBytes():]
- s.unused2 = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.unused2 = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.SemNSems = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.SemNSems = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.unused3 = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.unused3 = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.unused4 = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.unused4 = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
}
@@ -671,7 +671,7 @@ func (s *SemidDS) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (s *SemidDS) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (s *SemidDS) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
if !s.SemCTime.Packed() && s.SemOTime.Packed() && s.SemPerm.Packed() {
// Type SemidDS doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
@@ -695,13 +695,13 @@ func (s *SemidDS) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int)
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (s *SemidDS) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *SemidDS) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return s.CopyOutN(cc, addr, s.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (s *SemidDS) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *SemidDS) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
if !s.SemCTime.Packed() && s.SemOTime.Packed() && s.SemPerm.Packed() {
// Type SemidDS doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
diff --git a/pkg/abi/linux/linux_arm64_abi_autogen_unsafe.go b/pkg/abi/linux/linux_arm64_abi_autogen_unsafe.go
index 38087c1a0..ab6a7c106 100644
--- a/pkg/abi/linux/linux_arm64_abi_autogen_unsafe.go
+++ b/pkg/abi/linux/linux_arm64_abi_autogen_unsafe.go
@@ -15,9 +15,9 @@ package linux
import (
"gvisor.dev/gvisor/pkg/gohacks"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal"
"gvisor.dev/gvisor/pkg/safecopy"
- "gvisor.dev/gvisor/pkg/usermem"
"io"
"reflect"
"runtime"
@@ -41,24 +41,24 @@ func (e *EpollEvent) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (e *EpollEvent) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(e.Events))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(e.Events))
dst = dst[4:]
// Padding: dst[:sizeof(int32)] ~= int32(0)
dst = dst[4:]
for idx := 0; idx < 2; idx++ {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(e.Data[idx]))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(e.Data[idx]))
dst = dst[4:]
}
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (e *EpollEvent) UnmarshalBytes(src []byte) {
- e.Events = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ e.Events = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
// Padding: var _ int32 ~= src[:sizeof(int32)]
src = src[4:]
for idx := 0; idx < 2; idx++ {
- e.Data[idx] = int32(usermem.ByteOrder.Uint32(src[:4]))
+ e.Data[idx] = int32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
}
}
@@ -81,7 +81,7 @@ func (e *EpollEvent) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (e *EpollEvent) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (e *EpollEvent) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -98,13 +98,13 @@ func (e *EpollEvent) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit i
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (e *EpollEvent) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (e *EpollEvent) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return e.CopyOutN(cc, addr, e.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (e *EpollEvent) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (e *EpollEvent) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -136,7 +136,7 @@ func (e *EpollEvent) WriteTo(writer io.Writer) (int64, error) {
}
// CopyEpollEventSliceIn copies in a slice of EpollEvent objects from the task's memory.
-func CopyEpollEventSliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []EpollEvent) (int, error) {
+func CopyEpollEventSliceIn(cc marshal.CopyContext, addr hostarch.Addr, dst []EpollEvent) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
@@ -161,7 +161,7 @@ func CopyEpollEventSliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []Epol
}
// CopyEpollEventSliceOut copies a slice of EpollEvent objects to the task's memory.
-func CopyEpollEventSliceOut(cc marshal.CopyContext, addr usermem.Addr, src []EpollEvent) (int, error) {
+func CopyEpollEventSliceOut(cc marshal.CopyContext, addr hostarch.Addr, src []EpollEvent) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
@@ -232,29 +232,29 @@ func (s *Stat) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (s *Stat) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Dev))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Dev))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Ino))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Ino))
dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Mode))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Mode))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Nlink))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Nlink))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.UID))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.UID))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.GID))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.GID))
dst = dst[4:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Rdev))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Rdev))
dst = dst[8:]
// Padding: dst[:sizeof(uint64)] ~= uint64(0)
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Size))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Size))
dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Blksize))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Blksize))
dst = dst[4:]
// Padding: dst[:sizeof(int32)] ~= int32(0)
dst = dst[4:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Blocks))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Blocks))
dst = dst[8:]
s.ATime.MarshalBytes(dst[:s.ATime.SizeBytes()])
dst = dst[s.ATime.SizeBytes():]
@@ -268,29 +268,29 @@ func (s *Stat) MarshalBytes(dst []byte) {
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (s *Stat) UnmarshalBytes(src []byte) {
- s.Dev = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Dev = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.Ino = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Ino = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.Mode = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ s.Mode = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- s.Nlink = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ s.Nlink = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- s.UID = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ s.UID = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- s.GID = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ s.GID = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- s.Rdev = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Rdev = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
// Padding: var _ uint64 ~= src[:sizeof(uint64)]
src = src[8:]
- s.Size = int64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Size = int64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.Blksize = int32(usermem.ByteOrder.Uint32(src[:4]))
+ s.Blksize = int32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
// Padding: var _ int32 ~= src[:sizeof(int32)]
src = src[4:]
- s.Blocks = int64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Blocks = int64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
s.ATime.UnmarshalBytes(src[:s.ATime.SizeBytes()])
src = src[s.ATime.SizeBytes():]
@@ -330,7 +330,7 @@ func (s *Stat) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (s *Stat) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (s *Stat) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
if !s.ATime.Packed() && s.CTime.Packed() && s.MTime.Packed() {
// Type Stat doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
@@ -354,13 +354,13 @@ func (s *Stat) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (i
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (s *Stat) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *Stat) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return s.CopyOutN(cc, addr, s.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (s *Stat) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *Stat) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
if !s.ATime.Packed() && s.CTime.Packed() && s.MTime.Packed() {
// Type Stat doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
@@ -418,28 +418,28 @@ func (p *PtraceRegs) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (p *PtraceRegs) MarshalBytes(dst []byte) {
for idx := 0; idx < 31; idx++ {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(p.Regs[idx]))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.Regs[idx]))
dst = dst[8:]
}
- usermem.ByteOrder.PutUint64(dst[:8], uint64(p.Sp))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.Sp))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(p.Pc))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.Pc))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(p.Pstate))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.Pstate))
dst = dst[8:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (p *PtraceRegs) UnmarshalBytes(src []byte) {
for idx := 0; idx < 31; idx++ {
- p.Regs[idx] = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ p.Regs[idx] = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
}
- p.Sp = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ p.Sp = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- p.Pc = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ p.Pc = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- p.Pstate = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ p.Pstate = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
}
@@ -461,7 +461,7 @@ func (p *PtraceRegs) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (p *PtraceRegs) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (p *PtraceRegs) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -478,13 +478,13 @@ func (p *PtraceRegs) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit i
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (p *PtraceRegs) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (p *PtraceRegs) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return p.CopyOutN(cc, addr, p.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (p *PtraceRegs) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (p *PtraceRegs) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -531,11 +531,11 @@ func (s *SemidDS) MarshalBytes(dst []byte) {
dst = dst[s.SemOTime.SizeBytes():]
s.SemCTime.MarshalBytes(dst[:s.SemCTime.SizeBytes()])
dst = dst[s.SemCTime.SizeBytes():]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.SemNSems))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.SemNSems))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.unused3))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.unused3))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.unused4))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.unused4))
dst = dst[8:]
}
@@ -547,11 +547,11 @@ func (s *SemidDS) UnmarshalBytes(src []byte) {
src = src[s.SemOTime.SizeBytes():]
s.SemCTime.UnmarshalBytes(src[:s.SemCTime.SizeBytes()])
src = src[s.SemCTime.SizeBytes():]
- s.SemNSems = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.SemNSems = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.unused3 = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.unused3 = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.unused4 = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.unused4 = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
}
@@ -583,7 +583,7 @@ func (s *SemidDS) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (s *SemidDS) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (s *SemidDS) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
if !s.SemCTime.Packed() && s.SemOTime.Packed() && s.SemPerm.Packed() {
// Type SemidDS doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
@@ -607,13 +607,13 @@ func (s *SemidDS) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int)
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (s *SemidDS) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *SemidDS) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return s.CopyOutN(cc, addr, s.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (s *SemidDS) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *SemidDS) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
if !s.SemCTime.Packed() && s.SemOTime.Packed() && s.SemPerm.Packed() {
// Type SemidDS doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
diff --git a/pkg/coverage/coverage.go b/pkg/coverage/coverage.go
index 6f3d72e83..a6778a005 100644
--- a/pkg/coverage/coverage.go
+++ b/pkg/coverage/coverage.go
@@ -28,8 +28,8 @@ import (
"sort"
"testing"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/usermem"
"github.com/bazelbuild/rules_go/go/tools/coverdata"
)
@@ -141,7 +141,7 @@ func ConsumeCoverageData(w io.Writer) int {
// Non-zero coverage data found; consume it and report as a PC.
counters[index] = 0
pc := globalData.syntheticPCs[fileNum][index]
- usermem.ByteOrder.PutUint64(pcBuffer[:], pc)
+ hostarch.ByteOrder.PutUint64(pcBuffer[:], pc)
n, err := w.Write(pcBuffer[:])
if err != nil {
if err == io.EOF {
diff --git a/pkg/usermem/access_type.go b/pkg/hostarch/access_type.go
index 2cfca29af..e30476840 100644
--- a/pkg/usermem/access_type.go
+++ b/pkg/hostarch/access_type.go
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package usermem
+package hostarch
import "golang.org/x/sys/unix"
diff --git a/pkg/usermem/addr.go b/pkg/hostarch/addr.go
index c4100481e..0cf0f3c81 100644
--- a/pkg/usermem/addr.go
+++ b/pkg/hostarch/addr.go
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package usermem
+package hostarch
import (
"fmt"
@@ -57,7 +57,7 @@ func (v Addr) RoundUp() (addr Addr, ok bool) {
func (v Addr) MustRoundUp() Addr {
addr, ok := v.RoundUp()
if !ok {
- panic(fmt.Sprintf("usermem.Addr(%d).RoundUp() wraps", v))
+ panic(fmt.Sprintf("hostarch.Addr(%d).RoundUp() wraps", v))
}
return addr
}
diff --git a/pkg/usermem/addr_range.go b/pkg/hostarch/addr_range.go
index 6e030127b..8a771d6fc 100644
--- a/pkg/usermem/addr_range.go
+++ b/pkg/hostarch/addr_range.go
@@ -1,4 +1,4 @@
-package usermem
+package hostarch
// A Range represents a contiguous range of T.
//
diff --git a/pkg/usermem/addr_range_seq_unsafe.go b/pkg/hostarch/addr_range_seq_unsafe.go
index c9a1415a0..ecc17d595 100644
--- a/pkg/usermem/addr_range_seq_unsafe.go
+++ b/pkg/hostarch/addr_range_seq_unsafe.go
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package usermem
+package hostarch
import (
"bytes"
diff --git a/pkg/hostarch/hostarch.go b/pkg/hostarch/hostarch.go
new file mode 100644
index 000000000..fdd29c567
--- /dev/null
+++ b/pkg/hostarch/hostarch.go
@@ -0,0 +1,7 @@
+// Copyright 2021 The gVisor Authors.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package hostarch contains host arch address operations for user memory.
+package hostarch
diff --git a/pkg/usermem/usermem_arm64.go b/pkg/hostarch/hostarch_arm64.go
index 7e7529585..a31a8aeeb 100644
--- a/pkg/usermem/usermem_arm64.go
+++ b/pkg/hostarch/hostarch_arm64.go
@@ -14,7 +14,7 @@
// +build arm64
-package usermem
+package hostarch
import (
"encoding/binary"
diff --git a/pkg/usermem/usermem_arm64_state_autogen.go b/pkg/hostarch/hostarch_arm64_state_autogen.go
index d7c365e5d..ca13bb4d4 100644
--- a/pkg/usermem/usermem_arm64_state_autogen.go
+++ b/pkg/hostarch/hostarch_arm64_state_autogen.go
@@ -2,4 +2,4 @@
// +build arm64
-package usermem
+package hostarch
diff --git a/pkg/hostarch/hostarch_state_autogen.go b/pkg/hostarch/hostarch_state_autogen.go
new file mode 100644
index 000000000..32939e06b
--- /dev/null
+++ b/pkg/hostarch/hostarch_state_autogen.go
@@ -0,0 +1,80 @@
+// automatically generated by stateify.
+
+package hostarch
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (a *AccessType) StateTypeName() string {
+ return "pkg/hostarch.AccessType"
+}
+
+func (a *AccessType) StateFields() []string {
+ return []string{
+ "Read",
+ "Write",
+ "Execute",
+ }
+}
+
+func (a *AccessType) beforeSave() {}
+
+// +checklocksignore
+func (a *AccessType) StateSave(stateSinkObject state.Sink) {
+ a.beforeSave()
+ stateSinkObject.Save(0, &a.Read)
+ stateSinkObject.Save(1, &a.Write)
+ stateSinkObject.Save(2, &a.Execute)
+}
+
+func (a *AccessType) afterLoad() {}
+
+// +checklocksignore
+func (a *AccessType) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &a.Read)
+ stateSourceObject.Load(1, &a.Write)
+ stateSourceObject.Load(2, &a.Execute)
+}
+
+func (v *Addr) StateTypeName() string {
+ return "pkg/hostarch.Addr"
+}
+
+func (v *Addr) StateFields() []string {
+ return nil
+}
+
+func (r *AddrRange) StateTypeName() string {
+ return "pkg/hostarch.AddrRange"
+}
+
+func (r *AddrRange) StateFields() []string {
+ return []string{
+ "Start",
+ "End",
+ }
+}
+
+func (r *AddrRange) beforeSave() {}
+
+// +checklocksignore
+func (r *AddrRange) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.Start)
+ stateSinkObject.Save(1, &r.End)
+}
+
+func (r *AddrRange) afterLoad() {}
+
+// +checklocksignore
+func (r *AddrRange) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.Start)
+ stateSourceObject.Load(1, &r.End)
+}
+
+func init() {
+ state.Register((*AccessType)(nil))
+ state.Register((*Addr)(nil))
+ state.Register((*AddrRange)(nil))
+}
diff --git a/pkg/hostarch/hostarch_unsafe_state_autogen.go b/pkg/hostarch/hostarch_unsafe_state_autogen.go
new file mode 100644
index 000000000..419f24913
--- /dev/null
+++ b/pkg/hostarch/hostarch_unsafe_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package hostarch
diff --git a/pkg/usermem/usermem_x86.go b/pkg/hostarch/hostarch_x86.go
index d96f829fb..af6ef2b7f 100644
--- a/pkg/usermem/usermem_x86.go
+++ b/pkg/hostarch/hostarch_x86.go
@@ -14,7 +14,7 @@
// +build amd64 386
-package usermem
+package hostarch
import "encoding/binary"
diff --git a/pkg/usermem/usermem_x86_state_autogen.go b/pkg/hostarch/hostarch_x86_state_autogen.go
index cca386593..98bb9050d 100644
--- a/pkg/usermem/usermem_x86_state_autogen.go
+++ b/pkg/hostarch/hostarch_x86_state_autogen.go
@@ -2,4 +2,4 @@
// +build amd64 386
-package usermem
+package hostarch
diff --git a/pkg/marshal/marshal.go b/pkg/marshal/marshal.go
index d8cb44b40..eb036feae 100644
--- a/pkg/marshal/marshal.go
+++ b/pkg/marshal/marshal.go
@@ -23,7 +23,7 @@ package marshal
import (
"io"
- "gvisor.dev/gvisor/pkg/usermem"
+ "gvisor.dev/gvisor/pkg/hostarch"
)
// CopyContext defines the memory operations required to marshal to and from
@@ -36,11 +36,11 @@ type CopyContext interface {
// CopyOutBytes writes the contents of b to the task's memory. See
// kernel.CopyOutBytes.
- CopyOutBytes(addr usermem.Addr, b []byte) (int, error)
+ CopyOutBytes(addr hostarch.Addr, b []byte) (int, error)
// CopyInBytes reads the contents of the task's memory to b. See
// kernel.CopyInBytes.
- CopyInBytes(addr usermem.Addr, b []byte) (int, error)
+ CopyInBytes(addr hostarch.Addr, b []byte) (int, error)
}
// Marshallable represents operations on a type that can be marshalled to and
@@ -108,7 +108,7 @@ type Marshallable interface {
// If the copy-in from the task memory is only partially successful, CopyIn
// should still attempt to deserialize as much data as possible. See comment
// for UnmarshalBytes.
- CopyIn(cc CopyContext, addr usermem.Addr) (int, error)
+ CopyIn(cc CopyContext, addr hostarch.Addr) (int, error)
// CopyOut serializes a Marshallable type to a task's memory. This may only
// be called from a task goroutine. This is more efficient than calling
@@ -119,7 +119,7 @@ type Marshallable interface {
// The copy-out to the task memory may be partially successful, in which
// case CopyOut returns how much data was serialized. See comment for
// MarshalBytes for implications.
- CopyOut(cc CopyContext, addr usermem.Addr) (int, error)
+ CopyOut(cc CopyContext, addr hostarch.Addr) (int, error)
// CopyOutN is like CopyOut, but explicitly requests a partial
// copy-out. Note that this may yield unexpected results for non-packed
@@ -127,7 +127,7 @@ type Marshallable interface {
// comment on MarshalBytes.
//
// The limit must be less than or equal to SizeBytes().
- CopyOutN(cc CopyContext, addr usermem.Addr, limit int) (int, error)
+ CopyOutN(cc CopyContext, addr hostarch.Addr, limit int) (int, error)
}
// go-marshal generates additional functions for a type based on additional
@@ -157,10 +157,10 @@ type Marshallable interface {
// func UnmarshalUnsafeFooSlice(dst []Foo, src []byte) (int, error) { ... }
//
// // CopyFooSliceIn copies in a slice of Foo objects from the task's memory.
-// func CopyFooSliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []Foo) (int, error) { ... }
+// func CopyFooSliceIn(cc marshal.CopyContext, addr hostarch.Addr, dst []Foo) (int, error) { ... }
//
// // CopyFooSliceIn copies out a slice of Foo objects to the task's memory.
-// func CopyFooSliceOut(cc marshal.CopyContext, addr usermem.Addr, src []Foo) (int, error) { ... }
+// func CopyFooSliceOut(cc marshal.CopyContext, addr hostarch.Addr, src []Foo) (int, error) { ... }
//
// The name of the functions are of the format "Copy%sIn" and "Copy%sOut", where
// %s is the first argument to the slice clause. This directive is not supported
@@ -175,10 +175,10 @@ type Marshallable interface {
// This is only valid on newtypes on primitives, and causes the generated
// functions to accept slices of the inner type instead:
//
-// func CopyInt32SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []int32) (int, error) { ... }
+// func CopyInt32SliceIn(cc marshal.CopyContext, addr hostarch.Addr, dst []int32) (int, error) { ... }
//
// Without "inner", they would instead be:
//
-// func CopyInt32SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []Int32) (int, error) { ... }
+// func CopyInt32SliceIn(cc marshal.CopyContext, addr hostarch.Addr, dst []Int32) (int, error) { ... }
//
// This may help avoid a cast depending on how the generated functions are used.
diff --git a/pkg/marshal/marshal_impl_util.go b/pkg/marshal/marshal_impl_util.go
index ea75e09f2..9e6a6fa29 100644
--- a/pkg/marshal/marshal_impl_util.go
+++ b/pkg/marshal/marshal_impl_util.go
@@ -17,7 +17,7 @@ package marshal
import (
"io"
- "gvisor.dev/gvisor/pkg/usermem"
+ "gvisor.dev/gvisor/pkg/hostarch"
)
// StubMarshallable implements the Marshallable interface.
@@ -63,16 +63,16 @@ func (StubMarshallable) UnmarshalUnsafe(src []byte) {
}
// CopyIn implements Marshallable.CopyIn.
-func (StubMarshallable) CopyIn(cc CopyContext, addr usermem.Addr) (int, error) {
+func (StubMarshallable) CopyIn(cc CopyContext, addr hostarch.Addr) (int, error) {
panic("Please implement your own CopyIn function")
}
// CopyOut implements Marshallable.CopyOut.
-func (StubMarshallable) CopyOut(cc CopyContext, addr usermem.Addr) (int, error) {
+func (StubMarshallable) CopyOut(cc CopyContext, addr hostarch.Addr) (int, error) {
panic("Please implement your own CopyOut function")
}
// CopyOutN implements Marshallable.CopyOutN.
-func (StubMarshallable) CopyOutN(cc CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (StubMarshallable) CopyOutN(cc CopyContext, addr hostarch.Addr, limit int) (int, error) {
panic("Please implement your own CopyOutN function")
}
diff --git a/pkg/marshal/primitive/primitive.go b/pkg/marshal/primitive/primitive.go
index 4b342de6b..32c8ed138 100644
--- a/pkg/marshal/primitive/primitive.go
+++ b/pkg/marshal/primitive/primitive.go
@@ -20,6 +20,7 @@ import (
"io"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal"
"gvisor.dev/gvisor/pkg/usermem"
)
@@ -102,17 +103,17 @@ func (b *ByteSlice) UnmarshalUnsafe(src []byte) {
}
// CopyIn implements marshal.Marshallable.CopyIn.
-func (b *ByteSlice) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (b *ByteSlice) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return cc.CopyInBytes(addr, *b)
}
// CopyOut implements marshal.Marshallable.CopyOut.
-func (b *ByteSlice) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (b *ByteSlice) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return cc.CopyOutBytes(addr, *b)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
-func (b *ByteSlice) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (b *ByteSlice) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
return cc.CopyOutBytes(addr, (*b)[:limit])
}
@@ -131,7 +132,7 @@ var _ marshal.Marshallable = (*ByteSlice)(nil)
// CopyInt8In is a convenient wrapper for copying in an int8 from the task's
// memory.
-func CopyInt8In(cc marshal.CopyContext, addr usermem.Addr, dst *int8) (int, error) {
+func CopyInt8In(cc marshal.CopyContext, addr hostarch.Addr, dst *int8) (int, error) {
var buf Int8
n, err := buf.CopyIn(cc, addr)
if err != nil {
@@ -143,14 +144,14 @@ func CopyInt8In(cc marshal.CopyContext, addr usermem.Addr, dst *int8) (int, erro
// CopyInt8Out is a convenient wrapper for copying out an int8 to the task's
// memory.
-func CopyInt8Out(cc marshal.CopyContext, addr usermem.Addr, src int8) (int, error) {
+func CopyInt8Out(cc marshal.CopyContext, addr hostarch.Addr, src int8) (int, error) {
srcP := Int8(src)
return srcP.CopyOut(cc, addr)
}
// CopyUint8In is a convenient wrapper for copying in a uint8 from the task's
// memory.
-func CopyUint8In(cc marshal.CopyContext, addr usermem.Addr, dst *uint8) (int, error) {
+func CopyUint8In(cc marshal.CopyContext, addr hostarch.Addr, dst *uint8) (int, error) {
var buf Uint8
n, err := buf.CopyIn(cc, addr)
if err != nil {
@@ -162,7 +163,7 @@ func CopyUint8In(cc marshal.CopyContext, addr usermem.Addr, dst *uint8) (int, er
// CopyUint8Out is a convenient wrapper for copying out a uint8 to the task's
// memory.
-func CopyUint8Out(cc marshal.CopyContext, addr usermem.Addr, src uint8) (int, error) {
+func CopyUint8Out(cc marshal.CopyContext, addr hostarch.Addr, src uint8) (int, error) {
srcP := Uint8(src)
return srcP.CopyOut(cc, addr)
}
@@ -171,7 +172,7 @@ func CopyUint8Out(cc marshal.CopyContext, addr usermem.Addr, src uint8) (int, er
// CopyInt16In is a convenient wrapper for copying in an int16 from the task's
// memory.
-func CopyInt16In(cc marshal.CopyContext, addr usermem.Addr, dst *int16) (int, error) {
+func CopyInt16In(cc marshal.CopyContext, addr hostarch.Addr, dst *int16) (int, error) {
var buf Int16
n, err := buf.CopyIn(cc, addr)
if err != nil {
@@ -183,14 +184,14 @@ func CopyInt16In(cc marshal.CopyContext, addr usermem.Addr, dst *int16) (int, er
// CopyInt16Out is a convenient wrapper for copying out an int16 to the task's
// memory.
-func CopyInt16Out(cc marshal.CopyContext, addr usermem.Addr, src int16) (int, error) {
+func CopyInt16Out(cc marshal.CopyContext, addr hostarch.Addr, src int16) (int, error) {
srcP := Int16(src)
return srcP.CopyOut(cc, addr)
}
// CopyUint16In is a convenient wrapper for copying in a uint16 from the task's
// memory.
-func CopyUint16In(cc marshal.CopyContext, addr usermem.Addr, dst *uint16) (int, error) {
+func CopyUint16In(cc marshal.CopyContext, addr hostarch.Addr, dst *uint16) (int, error) {
var buf Uint16
n, err := buf.CopyIn(cc, addr)
if err != nil {
@@ -202,7 +203,7 @@ func CopyUint16In(cc marshal.CopyContext, addr usermem.Addr, dst *uint16) (int,
// CopyUint16Out is a convenient wrapper for copying out a uint16 to the task's
// memory.
-func CopyUint16Out(cc marshal.CopyContext, addr usermem.Addr, src uint16) (int, error) {
+func CopyUint16Out(cc marshal.CopyContext, addr hostarch.Addr, src uint16) (int, error) {
srcP := Uint16(src)
return srcP.CopyOut(cc, addr)
}
@@ -211,7 +212,7 @@ func CopyUint16Out(cc marshal.CopyContext, addr usermem.Addr, src uint16) (int,
// CopyInt32In is a convenient wrapper for copying in an int32 from the task's
// memory.
-func CopyInt32In(cc marshal.CopyContext, addr usermem.Addr, dst *int32) (int, error) {
+func CopyInt32In(cc marshal.CopyContext, addr hostarch.Addr, dst *int32) (int, error) {
var buf Int32
n, err := buf.CopyIn(cc, addr)
if err != nil {
@@ -223,14 +224,14 @@ func CopyInt32In(cc marshal.CopyContext, addr usermem.Addr, dst *int32) (int, er
// CopyInt32Out is a convenient wrapper for copying out an int32 to the task's
// memory.
-func CopyInt32Out(cc marshal.CopyContext, addr usermem.Addr, src int32) (int, error) {
+func CopyInt32Out(cc marshal.CopyContext, addr hostarch.Addr, src int32) (int, error) {
srcP := Int32(src)
return srcP.CopyOut(cc, addr)
}
// CopyUint32In is a convenient wrapper for copying in a uint32 from the task's
// memory.
-func CopyUint32In(cc marshal.CopyContext, addr usermem.Addr, dst *uint32) (int, error) {
+func CopyUint32In(cc marshal.CopyContext, addr hostarch.Addr, dst *uint32) (int, error) {
var buf Uint32
n, err := buf.CopyIn(cc, addr)
if err != nil {
@@ -242,7 +243,7 @@ func CopyUint32In(cc marshal.CopyContext, addr usermem.Addr, dst *uint32) (int,
// CopyUint32Out is a convenient wrapper for copying out a uint32 to the task's
// memory.
-func CopyUint32Out(cc marshal.CopyContext, addr usermem.Addr, src uint32) (int, error) {
+func CopyUint32Out(cc marshal.CopyContext, addr hostarch.Addr, src uint32) (int, error) {
srcP := Uint32(src)
return srcP.CopyOut(cc, addr)
}
@@ -251,7 +252,7 @@ func CopyUint32Out(cc marshal.CopyContext, addr usermem.Addr, src uint32) (int,
// CopyInt64In is a convenient wrapper for copying in an int64 from the task's
// memory.
-func CopyInt64In(cc marshal.CopyContext, addr usermem.Addr, dst *int64) (int, error) {
+func CopyInt64In(cc marshal.CopyContext, addr hostarch.Addr, dst *int64) (int, error) {
var buf Int64
n, err := buf.CopyIn(cc, addr)
if err != nil {
@@ -263,14 +264,14 @@ func CopyInt64In(cc marshal.CopyContext, addr usermem.Addr, dst *int64) (int, er
// CopyInt64Out is a convenient wrapper for copying out an int64 to the task's
// memory.
-func CopyInt64Out(cc marshal.CopyContext, addr usermem.Addr, src int64) (int, error) {
+func CopyInt64Out(cc marshal.CopyContext, addr hostarch.Addr, src int64) (int, error) {
srcP := Int64(src)
return srcP.CopyOut(cc, addr)
}
// CopyUint64In is a convenient wrapper for copying in a uint64 from the task's
// memory.
-func CopyUint64In(cc marshal.CopyContext, addr usermem.Addr, dst *uint64) (int, error) {
+func CopyUint64In(cc marshal.CopyContext, addr hostarch.Addr, dst *uint64) (int, error) {
var buf Uint64
n, err := buf.CopyIn(cc, addr)
if err != nil {
@@ -282,14 +283,14 @@ func CopyUint64In(cc marshal.CopyContext, addr usermem.Addr, dst *uint64) (int,
// CopyUint64Out is a convenient wrapper for copying out a uint64 to the task's
// memory.
-func CopyUint64Out(cc marshal.CopyContext, addr usermem.Addr, src uint64) (int, error) {
+func CopyUint64Out(cc marshal.CopyContext, addr hostarch.Addr, src uint64) (int, error) {
srcP := Uint64(src)
return srcP.CopyOut(cc, addr)
}
// CopyByteSliceIn is a convenient wrapper for copying in a []byte from the
// task's memory.
-func CopyByteSliceIn(cc marshal.CopyContext, addr usermem.Addr, dst *[]byte) (int, error) {
+func CopyByteSliceIn(cc marshal.CopyContext, addr hostarch.Addr, dst *[]byte) (int, error) {
var buf ByteSlice
n, err := buf.CopyIn(cc, addr)
if err != nil {
@@ -301,14 +302,14 @@ func CopyByteSliceIn(cc marshal.CopyContext, addr usermem.Addr, dst *[]byte) (in
// CopyByteSliceOut is a convenient wrapper for copying out a []byte to the
// task's memory.
-func CopyByteSliceOut(cc marshal.CopyContext, addr usermem.Addr, src []byte) (int, error) {
+func CopyByteSliceOut(cc marshal.CopyContext, addr hostarch.Addr, src []byte) (int, error) {
srcP := ByteSlice(src)
return srcP.CopyOut(cc, addr)
}
// CopyStringIn is a convenient wrapper for copying in a string from the
// task's memory.
-func CopyStringIn(cc marshal.CopyContext, addr usermem.Addr, dst *string) (int, error) {
+func CopyStringIn(cc marshal.CopyContext, addr hostarch.Addr, dst *string) (int, error) {
var buf ByteSlice
n, err := buf.CopyIn(cc, addr)
if err != nil {
@@ -320,12 +321,12 @@ func CopyStringIn(cc marshal.CopyContext, addr usermem.Addr, dst *string) (int,
// CopyStringOut is a convenient wrapper for copying out a string to the task's
// memory.
-func CopyStringOut(cc marshal.CopyContext, addr usermem.Addr, src string) (int, error) {
+func CopyStringOut(cc marshal.CopyContext, addr hostarch.Addr, src string) (int, error) {
srcP := ByteSlice(src)
return srcP.CopyOut(cc, addr)
}
-// IOCopyContext wraps an object implementing usermem.IO to implement
+// IOCopyContext wraps an object implementing hostarch.IO to implement
// marshal.CopyContext.
type IOCopyContext struct {
Ctx context.Context
@@ -339,11 +340,11 @@ func (i *IOCopyContext) CopyScratchBuffer(size int) []byte {
}
// CopyOutBytes implements marshal.CopyContext.CopyOutBytes.
-func (i *IOCopyContext) CopyOutBytes(addr usermem.Addr, b []byte) (int, error) {
+func (i *IOCopyContext) CopyOutBytes(addr hostarch.Addr, b []byte) (int, error) {
return i.IO.CopyOut(i.Ctx, addr, b, i.Opts)
}
// CopyInBytes implements marshal.CopyContext.CopyInBytes.
-func (i *IOCopyContext) CopyInBytes(addr usermem.Addr, b []byte) (int, error) {
+func (i *IOCopyContext) CopyInBytes(addr hostarch.Addr, b []byte) (int, error) {
return i.IO.CopyIn(i.Ctx, addr, b, i.Opts)
}
diff --git a/pkg/marshal/primitive/primitive_abi_autogen_unsafe.go b/pkg/marshal/primitive/primitive_abi_autogen_unsafe.go
index 7564cecec..8a3b4dd5d 100644
--- a/pkg/marshal/primitive/primitive_abi_autogen_unsafe.go
+++ b/pkg/marshal/primitive/primitive_abi_autogen_unsafe.go
@@ -10,9 +10,9 @@ package primitive
import (
"gvisor.dev/gvisor/pkg/gohacks"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal"
"gvisor.dev/gvisor/pkg/safecopy"
- "gvisor.dev/gvisor/pkg/usermem"
"io"
"reflect"
"runtime"
@@ -37,12 +37,12 @@ func (i *Int16) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (i *Int16) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint16(dst[:2], uint16(*i))
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(*i))
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (i *Int16) UnmarshalBytes(src []byte) {
- *i = Int16(int16(usermem.ByteOrder.Uint16(src[:2])))
+ *i = Int16(int16(hostarch.ByteOrder.Uint16(src[:2])))
}
// Packed implements marshal.Marshallable.Packed.
@@ -64,7 +64,7 @@ func (i *Int16) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (i *Int16) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (i *Int16) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -81,13 +81,13 @@ func (i *Int16) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (i *Int16) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (i *Int16) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return i.CopyOutN(cc, addr, i.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (i *Int16) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (i *Int16) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -120,7 +120,7 @@ func (i *Int16) WriteTo(w io.Writer) (int64, error) {
// CopyInt16SliceIn copies in a slice of int16 objects from the task's memory.
//go:nosplit
-func CopyInt16SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []int16) (int, error) {
+func CopyInt16SliceIn(cc marshal.CopyContext, addr hostarch.Addr, dst []int16) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
@@ -146,7 +146,7 @@ func CopyInt16SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []int16) (i
// CopyInt16SliceOut copies a slice of int16 objects to the task's memory.
//go:nosplit
-func CopyInt16SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []int16) (int, error) {
+func CopyInt16SliceOut(cc marshal.CopyContext, addr hostarch.Addr, src []int16) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
@@ -214,12 +214,12 @@ func (i *Int32) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (i *Int32) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(*i))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(*i))
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (i *Int32) UnmarshalBytes(src []byte) {
- *i = Int32(int32(usermem.ByteOrder.Uint32(src[:4])))
+ *i = Int32(int32(hostarch.ByteOrder.Uint32(src[:4])))
}
// Packed implements marshal.Marshallable.Packed.
@@ -241,7 +241,7 @@ func (i *Int32) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (i *Int32) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (i *Int32) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -258,13 +258,13 @@ func (i *Int32) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (i *Int32) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (i *Int32) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return i.CopyOutN(cc, addr, i.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (i *Int32) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (i *Int32) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -297,7 +297,7 @@ func (i *Int32) WriteTo(w io.Writer) (int64, error) {
// CopyInt32SliceIn copies in a slice of int32 objects from the task's memory.
//go:nosplit
-func CopyInt32SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []int32) (int, error) {
+func CopyInt32SliceIn(cc marshal.CopyContext, addr hostarch.Addr, dst []int32) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
@@ -323,7 +323,7 @@ func CopyInt32SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []int32) (i
// CopyInt32SliceOut copies a slice of int32 objects to the task's memory.
//go:nosplit
-func CopyInt32SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []int32) (int, error) {
+func CopyInt32SliceOut(cc marshal.CopyContext, addr hostarch.Addr, src []int32) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
@@ -391,12 +391,12 @@ func (i *Int64) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (i *Int64) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(*i))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(*i))
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (i *Int64) UnmarshalBytes(src []byte) {
- *i = Int64(int64(usermem.ByteOrder.Uint64(src[:8])))
+ *i = Int64(int64(hostarch.ByteOrder.Uint64(src[:8])))
}
// Packed implements marshal.Marshallable.Packed.
@@ -418,7 +418,7 @@ func (i *Int64) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (i *Int64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (i *Int64) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -435,13 +435,13 @@ func (i *Int64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (i *Int64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (i *Int64) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return i.CopyOutN(cc, addr, i.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (i *Int64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (i *Int64) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -474,7 +474,7 @@ func (i *Int64) WriteTo(w io.Writer) (int64, error) {
// CopyInt64SliceIn copies in a slice of int64 objects from the task's memory.
//go:nosplit
-func CopyInt64SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []int64) (int, error) {
+func CopyInt64SliceIn(cc marshal.CopyContext, addr hostarch.Addr, dst []int64) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
@@ -500,7 +500,7 @@ func CopyInt64SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []int64) (i
// CopyInt64SliceOut copies a slice of int64 objects to the task's memory.
//go:nosplit
-func CopyInt64SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []int64) (int, error) {
+func CopyInt64SliceOut(cc marshal.CopyContext, addr hostarch.Addr, src []int64) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
@@ -595,7 +595,7 @@ func (i *Int8) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (i *Int8) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (i *Int8) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -612,13 +612,13 @@ func (i *Int8) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (i
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (i *Int8) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (i *Int8) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return i.CopyOutN(cc, addr, i.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (i *Int8) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (i *Int8) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -651,7 +651,7 @@ func (i *Int8) WriteTo(w io.Writer) (int64, error) {
// CopyInt8SliceIn copies in a slice of int8 objects from the task's memory.
//go:nosplit
-func CopyInt8SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []int8) (int, error) {
+func CopyInt8SliceIn(cc marshal.CopyContext, addr hostarch.Addr, dst []int8) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
@@ -677,7 +677,7 @@ func CopyInt8SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []int8) (int
// CopyInt8SliceOut copies a slice of int8 objects to the task's memory.
//go:nosplit
-func CopyInt8SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []int8) (int, error) {
+func CopyInt8SliceOut(cc marshal.CopyContext, addr hostarch.Addr, src []int8) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
@@ -745,12 +745,12 @@ func (u *Uint16) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (u *Uint16) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint16(dst[:2], uint16(*u))
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(*u))
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (u *Uint16) UnmarshalBytes(src []byte) {
- *u = Uint16(uint16(usermem.ByteOrder.Uint16(src[:2])))
+ *u = Uint16(uint16(hostarch.ByteOrder.Uint16(src[:2])))
}
// Packed implements marshal.Marshallable.Packed.
@@ -772,7 +772,7 @@ func (u *Uint16) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (u *Uint16) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (u *Uint16) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -789,13 +789,13 @@ func (u *Uint16) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int)
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (u *Uint16) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (u *Uint16) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return u.CopyOutN(cc, addr, u.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (u *Uint16) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (u *Uint16) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -828,7 +828,7 @@ func (u *Uint16) WriteTo(w io.Writer) (int64, error) {
// CopyUint16SliceIn copies in a slice of uint16 objects from the task's memory.
//go:nosplit
-func CopyUint16SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []uint16) (int, error) {
+func CopyUint16SliceIn(cc marshal.CopyContext, addr hostarch.Addr, dst []uint16) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
@@ -854,7 +854,7 @@ func CopyUint16SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []uint16)
// CopyUint16SliceOut copies a slice of uint16 objects to the task's memory.
//go:nosplit
-func CopyUint16SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []uint16) (int, error) {
+func CopyUint16SliceOut(cc marshal.CopyContext, addr hostarch.Addr, src []uint16) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
@@ -922,12 +922,12 @@ func (u *Uint32) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (u *Uint32) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(*u))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(*u))
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (u *Uint32) UnmarshalBytes(src []byte) {
- *u = Uint32(uint32(usermem.ByteOrder.Uint32(src[:4])))
+ *u = Uint32(uint32(hostarch.ByteOrder.Uint32(src[:4])))
}
// Packed implements marshal.Marshallable.Packed.
@@ -949,7 +949,7 @@ func (u *Uint32) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (u *Uint32) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (u *Uint32) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -966,13 +966,13 @@ func (u *Uint32) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int)
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (u *Uint32) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (u *Uint32) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return u.CopyOutN(cc, addr, u.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (u *Uint32) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (u *Uint32) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1005,7 +1005,7 @@ func (u *Uint32) WriteTo(w io.Writer) (int64, error) {
// CopyUint32SliceIn copies in a slice of uint32 objects from the task's memory.
//go:nosplit
-func CopyUint32SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []uint32) (int, error) {
+func CopyUint32SliceIn(cc marshal.CopyContext, addr hostarch.Addr, dst []uint32) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
@@ -1031,7 +1031,7 @@ func CopyUint32SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []uint32)
// CopyUint32SliceOut copies a slice of uint32 objects to the task's memory.
//go:nosplit
-func CopyUint32SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []uint32) (int, error) {
+func CopyUint32SliceOut(cc marshal.CopyContext, addr hostarch.Addr, src []uint32) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
@@ -1099,12 +1099,12 @@ func (u *Uint64) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (u *Uint64) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(*u))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(*u))
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (u *Uint64) UnmarshalBytes(src []byte) {
- *u = Uint64(uint64(usermem.ByteOrder.Uint64(src[:8])))
+ *u = Uint64(uint64(hostarch.ByteOrder.Uint64(src[:8])))
}
// Packed implements marshal.Marshallable.Packed.
@@ -1126,7 +1126,7 @@ func (u *Uint64) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (u *Uint64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (u *Uint64) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1143,13 +1143,13 @@ func (u *Uint64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int)
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (u *Uint64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (u *Uint64) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return u.CopyOutN(cc, addr, u.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (u *Uint64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (u *Uint64) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1182,7 +1182,7 @@ func (u *Uint64) WriteTo(w io.Writer) (int64, error) {
// CopyUint64SliceIn copies in a slice of uint64 objects from the task's memory.
//go:nosplit
-func CopyUint64SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []uint64) (int, error) {
+func CopyUint64SliceIn(cc marshal.CopyContext, addr hostarch.Addr, dst []uint64) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
@@ -1208,7 +1208,7 @@ func CopyUint64SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []uint64)
// CopyUint64SliceOut copies a slice of uint64 objects to the task's memory.
//go:nosplit
-func CopyUint64SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []uint64) (int, error) {
+func CopyUint64SliceOut(cc marshal.CopyContext, addr hostarch.Addr, src []uint64) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
@@ -1303,7 +1303,7 @@ func (u *Uint8) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (u *Uint8) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (u *Uint8) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1320,13 +1320,13 @@ func (u *Uint8) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (u *Uint8) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (u *Uint8) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return u.CopyOutN(cc, addr, u.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (u *Uint8) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (u *Uint8) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1359,7 +1359,7 @@ func (u *Uint8) WriteTo(w io.Writer) (int64, error) {
// CopyUint8SliceIn copies in a slice of uint8 objects from the task's memory.
//go:nosplit
-func CopyUint8SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []uint8) (int, error) {
+func CopyUint8SliceIn(cc marshal.CopyContext, addr hostarch.Addr, dst []uint8) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
@@ -1385,7 +1385,7 @@ func CopyUint8SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []uint8) (i
// CopyUint8SliceOut copies a slice of uint8 objects to the task's memory.
//go:nosplit
-func CopyUint8SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []uint8) (int, error) {
+func CopyUint8SliceOut(cc marshal.CopyContext, addr hostarch.Addr, src []uint8) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
diff --git a/pkg/ring0/defs_impl_amd64.go b/pkg/ring0/defs_impl_amd64.go
index 411ea9126..cd8f735c0 100644
--- a/pkg/ring0/defs_impl_amd64.go
+++ b/pkg/ring0/defs_impl_amd64.go
@@ -7,10 +7,10 @@ package ring0
import (
"fmt"
"gvisor.dev/gvisor/pkg/cpuid"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/ring0/pagetables"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/arch/fpu"
- "gvisor.dev/gvisor/pkg/usermem"
"io"
"reflect"
)
@@ -112,7 +112,7 @@ var (
UserspaceSize = uintptr(1) << (VirtualAddressBits() - 1)
// MaximumUserAddress is the largest possible user address.
- MaximumUserAddress = (UserspaceSize - 1) & ^uintptr(usermem.PageSize-1)
+ MaximumUserAddress = (UserspaceSize - 1) & ^uintptr(hostarch.PageSize-1)
// KernelStartAddress is the starting kernel address.
KernelStartAddress = ^uintptr(0) - (UserspaceSize - 1)
diff --git a/pkg/ring0/defs_impl_arm64.go b/pkg/ring0/defs_impl_arm64.go
index 578c6b822..90f6af9fc 100644
--- a/pkg/ring0/defs_impl_arm64.go
+++ b/pkg/ring0/defs_impl_arm64.go
@@ -6,10 +6,10 @@ package ring0
import (
"fmt"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/ring0/pagetables"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/arch/fpu"
- "gvisor.dev/gvisor/pkg/usermem"
"io"
"reflect"
)
@@ -216,7 +216,7 @@ var (
UserspaceSize = uintptr(1) << (VirtualAddressBits())
// MaximumUserAddress is the largest possible user address.
- MaximumUserAddress = (UserspaceSize - 1) & ^uintptr(usermem.PageSize-1)
+ MaximumUserAddress = (UserspaceSize - 1) & ^uintptr(hostarch.PageSize-1)
// KernelStartAddress is the starting kernel address.
KernelStartAddress = ^uintptr(0) - (UserspaceSize - 1)
diff --git a/pkg/ring0/kernel_amd64.go b/pkg/ring0/kernel_amd64.go
index 33c259757..92d2330cb 100644
--- a/pkg/ring0/kernel_amd64.go
+++ b/pkg/ring0/kernel_amd64.go
@@ -20,7 +20,7 @@ import (
"encoding/binary"
"reflect"
- "gvisor.dev/gvisor/pkg/usermem"
+ "gvisor.dev/gvisor/pkg/hostarch"
)
// init initializes architecture-specific state.
@@ -34,7 +34,7 @@ func (k *Kernel) init(maxCPUs int) {
entries = make([]kernelEntry, maxCPUs+padding-1)
totalSize := entrySize * uintptr(maxCPUs+padding-1)
addr := reflect.ValueOf(&entries[0]).Pointer()
- if addr&(usermem.PageSize-1) == 0 && totalSize >= usermem.PageSize {
+ if addr&(hostarch.PageSize-1) == 0 && totalSize >= hostarch.PageSize {
// The runtime forces power-of-2 alignment for allocations, and we are therefore
// safe once the first address is aligned and the chunk is at least a full page.
break
@@ -44,10 +44,10 @@ func (k *Kernel) init(maxCPUs int) {
k.cpuEntries = entries
k.globalIDT = &idt64{}
- if reflect.TypeOf(idt64{}).Size() != usermem.PageSize {
+ if reflect.TypeOf(idt64{}).Size() != hostarch.PageSize {
panic("Size of globalIDT should be PageSize")
}
- if reflect.ValueOf(k.globalIDT).Pointer()&(usermem.PageSize-1) != 0 {
+ if reflect.ValueOf(k.globalIDT).Pointer()&(hostarch.PageSize-1) != 0 {
panic("Allocated globalIDT should be page aligned")
}
@@ -71,13 +71,13 @@ func (k *Kernel) EntryRegions() map[uintptr]uintptr {
addr := reflect.ValueOf(&k.cpuEntries[0]).Pointer()
size := reflect.TypeOf(kernelEntry{}).Size() * uintptr(len(k.cpuEntries))
- end, _ := usermem.Addr(addr + size).RoundUp()
- regions[uintptr(usermem.Addr(addr).RoundDown())] = uintptr(end)
+ end, _ := hostarch.Addr(addr + size).RoundUp()
+ regions[uintptr(hostarch.Addr(addr).RoundDown())] = uintptr(end)
addr = reflect.ValueOf(k.globalIDT).Pointer()
size = reflect.TypeOf(idt64{}).Size()
- end, _ = usermem.Addr(addr + size).RoundUp()
- regions[uintptr(usermem.Addr(addr).RoundDown())] = uintptr(end)
+ end, _ = hostarch.Addr(addr + size).RoundUp()
+ regions[uintptr(hostarch.Addr(addr).RoundDown())] = uintptr(end)
return regions
}
diff --git a/pkg/ring0/pagetables/allocator_unsafe.go b/pkg/ring0/pagetables/allocator_unsafe.go
index d08bfdeb3..191d0942b 100644
--- a/pkg/ring0/pagetables/allocator_unsafe.go
+++ b/pkg/ring0/pagetables/allocator_unsafe.go
@@ -17,23 +17,23 @@ package pagetables
import (
"unsafe"
- "gvisor.dev/gvisor/pkg/usermem"
+ "gvisor.dev/gvisor/pkg/hostarch"
)
// newAlignedPTEs returns a set of aligned PTEs.
func newAlignedPTEs() *PTEs {
ptes := new(PTEs)
- offset := physicalFor(ptes) & (usermem.PageSize - 1)
+ offset := physicalFor(ptes) & (hostarch.PageSize - 1)
if offset == 0 {
// Already aligned.
return ptes
}
// Need to force an aligned allocation.
- unaligned := make([]byte, (2*usermem.PageSize)-1)
- offset = uintptr(unsafe.Pointer(&unaligned[0])) & (usermem.PageSize - 1)
+ unaligned := make([]byte, (2*hostarch.PageSize)-1)
+ offset = uintptr(unsafe.Pointer(&unaligned[0])) & (hostarch.PageSize - 1)
if offset != 0 {
- offset = usermem.PageSize - offset
+ offset = hostarch.PageSize - offset
}
return (*PTEs)(unsafe.Pointer(&unaligned[offset]))
}
diff --git a/pkg/ring0/pagetables/pagetables.go b/pkg/ring0/pagetables/pagetables.go
index 8c0a6aa82..3f17fba49 100644
--- a/pkg/ring0/pagetables/pagetables.go
+++ b/pkg/ring0/pagetables/pagetables.go
@@ -21,7 +21,7 @@
package pagetables
import (
- "gvisor.dev/gvisor/pkg/usermem"
+ "gvisor.dev/gvisor/pkg/hostarch"
)
// PageTables is a set of page tables.
@@ -142,7 +142,7 @@ func (*mapVisitor) requiresSplit() bool { return true }
//
// +checkescape:hard,stack
//go:nosplit
-func (p *PageTables) Map(addr usermem.Addr, length uintptr, opts MapOpts, physical uintptr) bool {
+func (p *PageTables) Map(addr hostarch.Addr, length uintptr, opts MapOpts, physical uintptr) bool {
if p.readOnlyShared {
panic("Should not modify read-only shared pagetables.")
}
@@ -198,7 +198,7 @@ func (v *unmapVisitor) visit(start uintptr, pte *PTE, align uintptr) bool {
//
// +checkescape:hard,stack
//go:nosplit
-func (p *PageTables) Unmap(addr usermem.Addr, length uintptr) bool {
+func (p *PageTables) Unmap(addr hostarch.Addr, length uintptr) bool {
if p.readOnlyShared {
panic("Should not modify read-only shared pagetables.")
}
@@ -249,7 +249,7 @@ func (v *emptyVisitor) visit(start uintptr, pte *PTE, align uintptr) bool {
//
// +checkescape:hard,stack
//go:nosplit
-func (p *PageTables) IsEmpty(addr usermem.Addr, length uintptr) bool {
+func (p *PageTables) IsEmpty(addr hostarch.Addr, length uintptr) bool {
w := emptyWalker{
pageTables: p,
}
@@ -298,9 +298,9 @@ func (*lookupVisitor) requiresSplit() bool { return false }
//
// +checkescape:hard,stack
//go:nosplit
-func (p *PageTables) Lookup(addr usermem.Addr, findFirst bool) (virtual usermem.Addr, physical, size uintptr, opts MapOpts) {
- mask := uintptr(usermem.PageSize - 1)
- addr &^= usermem.Addr(mask)
+func (p *PageTables) Lookup(addr hostarch.Addr, findFirst bool) (virtual hostarch.Addr, physical, size uintptr, opts MapOpts) {
+ mask := uintptr(hostarch.PageSize - 1)
+ addr &^= hostarch.Addr(mask)
w := lookupWalker{
pageTables: p,
visitor: lookupVisitor{
@@ -308,12 +308,12 @@ func (p *PageTables) Lookup(addr usermem.Addr, findFirst bool) (virtual usermem.
findFirst: findFirst,
},
}
- end := ^usermem.Addr(0) &^ usermem.Addr(mask)
+ end := ^hostarch.Addr(0) &^ hostarch.Addr(mask)
if !findFirst {
end = addr + 1
}
w.iterateRange(uintptr(addr), uintptr(end))
- return usermem.Addr(w.visitor.target), w.visitor.physical, w.visitor.size, w.visitor.opts
+ return hostarch.Addr(w.visitor.target), w.visitor.physical, w.visitor.size, w.visitor.opts
}
// MarkReadOnlyShared marks the pagetables read-only and can be shared.
diff --git a/pkg/ring0/pagetables/pagetables_aarch64.go b/pkg/ring0/pagetables/pagetables_aarch64.go
index 163a3aea3..86eb00a4f 100644
--- a/pkg/ring0/pagetables/pagetables_aarch64.go
+++ b/pkg/ring0/pagetables/pagetables_aarch64.go
@@ -19,7 +19,7 @@ package pagetables
import (
"sync/atomic"
- "gvisor.dev/gvisor/pkg/usermem"
+ "gvisor.dev/gvisor/pkg/hostarch"
)
// archPageTables is architecture-specific data.
@@ -85,7 +85,7 @@ const (
// MapOpts are x86 options.
type MapOpts struct {
// AccessType defines permissions.
- AccessType usermem.AccessType
+ AccessType hostarch.AccessType
// Global indicates the page is globally accessible.
Global bool
@@ -120,7 +120,7 @@ func (p *PTE) Opts() MapOpts {
v := atomic.LoadUintptr((*uintptr)(p))
return MapOpts{
- AccessType: usermem.AccessType{
+ AccessType: hostarch.AccessType{
Read: true,
Write: v&readOnly == 0,
Execute: v&xn == 0,
diff --git a/pkg/ring0/pagetables/pagetables_x86.go b/pkg/ring0/pagetables/pagetables_x86.go
index 32edd2f0a..e43698173 100644
--- a/pkg/ring0/pagetables/pagetables_x86.go
+++ b/pkg/ring0/pagetables/pagetables_x86.go
@@ -19,7 +19,7 @@ package pagetables
import (
"sync/atomic"
- "gvisor.dev/gvisor/pkg/usermem"
+ "gvisor.dev/gvisor/pkg/hostarch"
)
// archPageTables is architecture-specific data.
@@ -63,7 +63,7 @@ const (
// MapOpts are x86 options.
type MapOpts struct {
// AccessType defines permissions.
- AccessType usermem.AccessType
+ AccessType hostarch.AccessType
// Global indicates the page is globally accessible.
Global bool
@@ -97,7 +97,7 @@ func (p *PTE) Valid() bool {
func (p *PTE) Opts() MapOpts {
v := atomic.LoadUintptr((*uintptr)(p))
return MapOpts{
- AccessType: usermem.AccessType{
+ AccessType: hostarch.AccessType{
Read: v&present != 0,
Write: v&writable != 0,
Execute: v&executeDisable == 0,
diff --git a/pkg/sentry/arch/arch.go b/pkg/sentry/arch/arch.go
index 921151137..290863ee6 100644
--- a/pkg/sentry/arch/arch.go
+++ b/pkg/sentry/arch/arch.go
@@ -22,11 +22,11 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/cpuid"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/marshal"
"gvisor.dev/gvisor/pkg/sentry/arch/fpu"
"gvisor.dev/gvisor/pkg/sentry/limits"
- "gvisor.dev/gvisor/pkg/usermem"
)
// Arch describes an architecture.
@@ -188,11 +188,11 @@ type Context interface {
// returned layout must be no lower than min, and MaxAddr for the returned
// layout must be no higher than max. Repeated calls to NewMmapLayout may
// return different layouts.
- NewMmapLayout(min, max usermem.Addr, limits *limits.LimitSet) (MmapLayout, error)
+ NewMmapLayout(min, max hostarch.Addr, limits *limits.LimitSet) (MmapLayout, error)
// PIELoadAddress returns a preferred load address for a
// position-independent executable within l.
- PIELoadAddress(l MmapLayout) usermem.Addr
+ PIELoadAddress(l MmapLayout) hostarch.Addr
// FeatureSet returns the FeatureSet in use in this context.
FeatureSet() *cpuid.FeatureSet
@@ -257,18 +257,18 @@ const (
// +stateify savable
type MmapLayout struct {
// MinAddr is the lowest mappable address.
- MinAddr usermem.Addr
+ MinAddr hostarch.Addr
// MaxAddr is the highest mappable address.
- MaxAddr usermem.Addr
+ MaxAddr hostarch.Addr
// BottomUpBase is the lowest address that may be returned for a
// MmapBottomUp mmap.
- BottomUpBase usermem.Addr
+ BottomUpBase hostarch.Addr
// TopDownBase is the highest address that may be returned for a
// MmapTopDown mmap.
- TopDownBase usermem.Addr
+ TopDownBase hostarch.Addr
// DefaultDirection is the direction for most non-fixed mmaps in this
// layout.
@@ -316,9 +316,9 @@ type SyscallArgument struct {
// SyscallArguments represents the set of arguments passed to a syscall.
type SyscallArguments [6]SyscallArgument
-// Pointer returns the usermem.Addr representation of a pointer argument.
-func (a SyscallArgument) Pointer() usermem.Addr {
- return usermem.Addr(a.Value)
+// Pointer returns the hostarch.Addr representation of a pointer argument.
+func (a SyscallArgument) Pointer() hostarch.Addr {
+ return hostarch.Addr(a.Value)
}
// Int returns the int32 representation of a 32-bit signed integer argument.
diff --git a/pkg/sentry/arch/arch_abi_autogen_unsafe.go b/pkg/sentry/arch/arch_abi_autogen_unsafe.go
index 308a9dfb4..6ab83879c 100644
--- a/pkg/sentry/arch/arch_abi_autogen_unsafe.go
+++ b/pkg/sentry/arch/arch_abi_autogen_unsafe.go
@@ -13,9 +13,9 @@ package arch
import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/gohacks"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal"
"gvisor.dev/gvisor/pkg/safecopy"
- "gvisor.dev/gvisor/pkg/usermem"
"io"
"reflect"
"runtime"
@@ -36,11 +36,11 @@ func (s *SignalAct) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (s *SignalAct) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Handler))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Handler))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Flags))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Flags))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Restorer))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Restorer))
dst = dst[8:]
s.Mask.MarshalBytes(dst[:s.Mask.SizeBytes()])
dst = dst[s.Mask.SizeBytes():]
@@ -48,11 +48,11 @@ func (s *SignalAct) MarshalBytes(dst []byte) {
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (s *SignalAct) UnmarshalBytes(src []byte) {
- s.Handler = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Handler = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.Flags = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Flags = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.Restorer = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Restorer = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
s.Mask.UnmarshalBytes(src[:s.Mask.SizeBytes()])
src = src[s.Mask.SizeBytes():]
@@ -86,7 +86,7 @@ func (s *SignalAct) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (s *SignalAct) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (s *SignalAct) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
if !s.Mask.Packed() {
// Type SignalAct doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
@@ -110,13 +110,13 @@ func (s *SignalAct) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit in
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (s *SignalAct) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *SignalAct) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return s.CopyOutN(cc, addr, s.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (s *SignalAct) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *SignalAct) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
if !s.Mask.Packed() {
// Type SignalAct doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
@@ -173,11 +173,11 @@ func (s *SignalInfo) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (s *SignalInfo) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Signo))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Signo))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Errno))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Errno))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Code))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Code))
dst = dst[4:]
// Padding: dst[:sizeof(uint32)] ~= uint32(0)
dst = dst[4:]
@@ -189,11 +189,11 @@ func (s *SignalInfo) MarshalBytes(dst []byte) {
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (s *SignalInfo) UnmarshalBytes(src []byte) {
- s.Signo = int32(usermem.ByteOrder.Uint32(src[:4]))
+ s.Signo = int32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- s.Errno = int32(usermem.ByteOrder.Uint32(src[:4]))
+ s.Errno = int32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- s.Code = int32(usermem.ByteOrder.Uint32(src[:4]))
+ s.Code = int32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
// Padding: var _ uint32 ~= src[:sizeof(uint32)]
src = src[4:]
@@ -221,7 +221,7 @@ func (s *SignalInfo) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (s *SignalInfo) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (s *SignalInfo) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -238,13 +238,13 @@ func (s *SignalInfo) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit i
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (s *SignalInfo) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *SignalInfo) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return s.CopyOutN(cc, addr, s.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (s *SignalInfo) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *SignalInfo) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -282,25 +282,25 @@ func (s *SignalStack) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (s *SignalStack) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Addr))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Addr))
dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Flags))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Flags))
dst = dst[4:]
// Padding: dst[:sizeof(uint32)] ~= uint32(0)
dst = dst[4:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Size))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Size))
dst = dst[8:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (s *SignalStack) UnmarshalBytes(src []byte) {
- s.Addr = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Addr = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.Flags = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ s.Flags = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
// Padding: var _ uint32 ~= src[:sizeof(uint32)]
src = src[4:]
- s.Size = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Size = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
}
@@ -322,7 +322,7 @@ func (s *SignalStack) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (s *SignalStack) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (s *SignalStack) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -339,13 +339,13 @@ func (s *SignalStack) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (s *SignalStack) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *SignalStack) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return s.CopyOutN(cc, addr, s.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (s *SignalStack) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *SignalStack) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
diff --git a/pkg/sentry/arch/arch_amd64.go b/pkg/sentry/arch/arch_amd64.go
index 2571be60f..d6b4d2357 100644
--- a/pkg/sentry/arch/arch_amd64.go
+++ b/pkg/sentry/arch/arch_amd64.go
@@ -23,11 +23,11 @@ import (
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/cpuid"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal"
"gvisor.dev/gvisor/pkg/marshal/primitive"
"gvisor.dev/gvisor/pkg/sentry/arch/fpu"
"gvisor.dev/gvisor/pkg/sentry/limits"
- "gvisor.dev/gvisor/pkg/usermem"
)
// Host specifies the host architecture.
@@ -37,7 +37,7 @@ const Host = AMD64
const (
// maxAddr64 is the maximum userspace address. It is TASK_SIZE in Linux
// for a 64-bit process.
- maxAddr64 usermem.Addr = (1 << 47) - usermem.PageSize
+ maxAddr64 hostarch.Addr = (1 << 47) - hostarch.PageSize
// maxStackRand64 is the maximum randomization to apply to the stack.
// It is defined by arch/x86/mm/mmap.c:stack_maxrandom_size in Linux.
@@ -45,7 +45,7 @@ const (
// maxMmapRand64 is the maximum randomization to apply to the mmap
// layout. It is defined by arch/x86/mm/mmap.c:arch_mmap_rnd in Linux.
- maxMmapRand64 = (1 << 28) * usermem.PageSize
+ maxMmapRand64 = (1 << 28) * hostarch.PageSize
// minGap64 is the minimum gap to leave at the top of the address space
// for the stack. It is defined by arch/x86/mm/mmap.c:MIN_GAP in Linux.
@@ -56,7 +56,7 @@ const (
//
// The Platform {Min,Max}UserAddress() may preclude loading at this
// address. See other preferredFoo comments below.
- preferredPIELoadAddr usermem.Addr = maxAddr64 / 3 * 2
+ preferredPIELoadAddr hostarch.Addr = maxAddr64 / 3 * 2
)
// These constants are selected as heuristics to help make the Platform's
@@ -92,13 +92,13 @@ const (
// This is all "preferred" because the layout min/max address may not
// allow us to select such a TopDownBase, in which case we have to fall
// back to a layout that TSAN may not be happy with.
- preferredTopDownAllocMin usermem.Addr = 0x7e8000000000
- preferredAllocationGap = 128 << 30 // 128 GB
- preferredTopDownBaseMin = preferredTopDownAllocMin + preferredAllocationGap
+ preferredTopDownAllocMin hostarch.Addr = 0x7e8000000000
+ preferredAllocationGap = 128 << 30 // 128 GB
+ preferredTopDownBaseMin = preferredTopDownAllocMin + preferredAllocationGap
// minMmapRand64 is the smallest we are willing to make the
// randomization to stay above preferredTopDownBaseMin.
- minMmapRand64 = (1 << 26) * usermem.PageSize
+ minMmapRand64 = (1 << 26) * hostarch.PageSize
)
// context64 represents an AMD64 context.
@@ -207,12 +207,12 @@ func (c *context64) FeatureSet() *cpuid.FeatureSet {
}
// mmapRand returns a random adjustment for randomizing an mmap layout.
-func mmapRand(max uint64) usermem.Addr {
- return usermem.Addr(rand.Int63n(int64(max))).RoundDown()
+func mmapRand(max uint64) hostarch.Addr {
+ return hostarch.Addr(rand.Int63n(int64(max))).RoundDown()
}
// NewMmapLayout implements Context.NewMmapLayout consistently with Linux.
-func (c *context64) NewMmapLayout(min, max usermem.Addr, r *limits.LimitSet) (MmapLayout, error) {
+func (c *context64) NewMmapLayout(min, max hostarch.Addr, r *limits.LimitSet) (MmapLayout, error) {
min, ok := min.RoundUp()
if !ok {
return MmapLayout{}, unix.EINVAL
@@ -230,7 +230,7 @@ func (c *context64) NewMmapLayout(min, max usermem.Addr, r *limits.LimitSet) (Mm
// MAX_GAP in Linux.
maxGap := (max / 6) * 5
- gap := usermem.Addr(stackSize.Cur)
+ gap := hostarch.Addr(stackSize.Cur)
if gap < minGap64 {
gap = minGap64
}
@@ -243,7 +243,7 @@ func (c *context64) NewMmapLayout(min, max usermem.Addr, r *limits.LimitSet) (Mm
}
topDownMin := max - gap - maxMmapRand64
- maxRand := usermem.Addr(maxMmapRand64)
+ maxRand := hostarch.Addr(maxMmapRand64)
if topDownMin < preferredTopDownBaseMin {
// Try to keep TopDownBase above preferredTopDownBaseMin by
// shrinking maxRand.
@@ -278,7 +278,7 @@ func (c *context64) NewMmapLayout(min, max usermem.Addr, r *limits.LimitSet) (Mm
}
// PIELoadAddress implements Context.PIELoadAddress.
-func (c *context64) PIELoadAddress(l MmapLayout) usermem.Addr {
+func (c *context64) PIELoadAddress(l MmapLayout) hostarch.Addr {
base := preferredPIELoadAddr
max, ok := base.AddLength(maxMmapRand64)
if !ok {
@@ -311,7 +311,7 @@ func (c *context64) PtracePeekUser(addr uintptr) (marshal.Marshallable, error) {
regs := c.ptraceGetRegs()
buf := make([]byte, regs.SizeBytes())
regs.MarshalUnsafe(buf)
- return c.Native(uintptr(usermem.ByteOrder.Uint64(buf[addr:]))), nil
+ return c.Native(uintptr(hostarch.ByteOrder.Uint64(buf[addr:]))), nil
}
// Note: x86 debug registers are missing.
return c.Native(0), nil
@@ -326,7 +326,7 @@ func (c *context64) PtracePokeUser(addr, data uintptr) error {
regs := c.ptraceGetRegs()
buf := make([]byte, regs.SizeBytes())
regs.MarshalUnsafe(buf)
- usermem.ByteOrder.PutUint64(buf[addr:], uint64(data))
+ hostarch.ByteOrder.PutUint64(buf[addr:], uint64(data))
_, err := c.PtraceSetRegs(bytes.NewBuffer(buf))
return err
}
diff --git a/pkg/sentry/arch/arch_amd64_abi_autogen_unsafe.go b/pkg/sentry/arch/arch_amd64_abi_autogen_unsafe.go
index 767dbfe0b..d8f71795b 100644
--- a/pkg/sentry/arch/arch_amd64_abi_autogen_unsafe.go
+++ b/pkg/sentry/arch/arch_amd64_abi_autogen_unsafe.go
@@ -15,9 +15,9 @@ package arch
import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/gohacks"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal"
"gvisor.dev/gvisor/pkg/safecopy"
- "gvisor.dev/gvisor/pkg/usermem"
"io"
"reflect"
"runtime"
@@ -39,124 +39,124 @@ func (s *SignalContext64) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (s *SignalContext64) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.R8))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.R8))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.R9))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.R9))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.R10))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.R10))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.R11))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.R11))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.R12))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.R12))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.R13))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.R13))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.R14))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.R14))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.R15))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.R15))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Rdi))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Rdi))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Rsi))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Rsi))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Rbp))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Rbp))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Rbx))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Rbx))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Rdx))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Rdx))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Rax))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Rax))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Rcx))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Rcx))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Rsp))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Rsp))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Rip))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Rip))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Eflags))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Eflags))
dst = dst[8:]
- usermem.ByteOrder.PutUint16(dst[:2], uint16(s.Cs))
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(s.Cs))
dst = dst[2:]
- usermem.ByteOrder.PutUint16(dst[:2], uint16(s.Gs))
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(s.Gs))
dst = dst[2:]
- usermem.ByteOrder.PutUint16(dst[:2], uint16(s.Fs))
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(s.Fs))
dst = dst[2:]
- usermem.ByteOrder.PutUint16(dst[:2], uint16(s.Ss))
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(s.Ss))
dst = dst[2:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Err))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Err))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Trapno))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Trapno))
dst = dst[8:]
s.Oldmask.MarshalBytes(dst[:s.Oldmask.SizeBytes()])
dst = dst[s.Oldmask.SizeBytes():]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Cr2))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Cr2))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Fpstate))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Fpstate))
dst = dst[8:]
for idx := 0; idx < 8; idx++ {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Reserved[idx]))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Reserved[idx]))
dst = dst[8:]
}
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (s *SignalContext64) UnmarshalBytes(src []byte) {
- s.R8 = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.R8 = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.R9 = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.R9 = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.R10 = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.R10 = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.R11 = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.R11 = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.R12 = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.R12 = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.R13 = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.R13 = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.R14 = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.R14 = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.R15 = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.R15 = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.Rdi = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Rdi = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.Rsi = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Rsi = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.Rbp = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Rbp = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.Rbx = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Rbx = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.Rdx = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Rdx = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.Rax = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Rax = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.Rcx = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Rcx = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.Rsp = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Rsp = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.Rip = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Rip = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.Eflags = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Eflags = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.Cs = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ s.Cs = uint16(hostarch.ByteOrder.Uint16(src[:2]))
src = src[2:]
- s.Gs = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ s.Gs = uint16(hostarch.ByteOrder.Uint16(src[:2]))
src = src[2:]
- s.Fs = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ s.Fs = uint16(hostarch.ByteOrder.Uint16(src[:2]))
src = src[2:]
- s.Ss = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ s.Ss = uint16(hostarch.ByteOrder.Uint16(src[:2]))
src = src[2:]
- s.Err = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Err = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.Trapno = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Trapno = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
s.Oldmask.UnmarshalBytes(src[:s.Oldmask.SizeBytes()])
src = src[s.Oldmask.SizeBytes():]
- s.Cr2 = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Cr2 = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.Fpstate = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Fpstate = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
for idx := 0; idx < 8; idx++ {
- s.Reserved[idx] = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Reserved[idx] = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
}
}
@@ -189,7 +189,7 @@ func (s *SignalContext64) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (s *SignalContext64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (s *SignalContext64) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
if !s.Oldmask.Packed() {
// Type SignalContext64 doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
@@ -213,13 +213,13 @@ func (s *SignalContext64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, li
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (s *SignalContext64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *SignalContext64) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return s.CopyOutN(cc, addr, s.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (s *SignalContext64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *SignalContext64) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
if !s.Oldmask.Packed() {
// Type SignalContext64 doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
@@ -278,9 +278,9 @@ func (u *UContext64) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (u *UContext64) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(u.Flags))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(u.Flags))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(u.Link))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(u.Link))
dst = dst[8:]
u.Stack.MarshalBytes(dst[:u.Stack.SizeBytes()])
dst = dst[u.Stack.SizeBytes():]
@@ -292,9 +292,9 @@ func (u *UContext64) MarshalBytes(dst []byte) {
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (u *UContext64) UnmarshalBytes(src []byte) {
- u.Flags = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ u.Flags = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- u.Link = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ u.Link = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
u.Stack.UnmarshalBytes(src[:u.Stack.SizeBytes()])
src = src[u.Stack.SizeBytes():]
@@ -332,7 +332,7 @@ func (u *UContext64) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (u *UContext64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (u *UContext64) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
if !u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
// Type UContext64 doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := cc.CopyScratchBuffer(u.SizeBytes()) // escapes: okay.
@@ -356,13 +356,13 @@ func (u *UContext64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit i
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (u *UContext64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (u *UContext64) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return u.CopyOutN(cc, addr, u.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (u *UContext64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (u *UContext64) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
if !u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
// Type UContext64 doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := cc.CopyScratchBuffer(u.SizeBytes()) // escapes: okay.
diff --git a/pkg/sentry/arch/arch_arm64.go b/pkg/sentry/arch/arch_arm64.go
index 14ad9483b..348f238fd 100644
--- a/pkg/sentry/arch/arch_arm64.go
+++ b/pkg/sentry/arch/arch_arm64.go
@@ -22,11 +22,11 @@ import (
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/cpuid"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal"
"gvisor.dev/gvisor/pkg/marshal/primitive"
"gvisor.dev/gvisor/pkg/sentry/arch/fpu"
"gvisor.dev/gvisor/pkg/sentry/limits"
- "gvisor.dev/gvisor/pkg/usermem"
)
// Host specifies the host architecture.
@@ -36,7 +36,7 @@ const Host = ARM64
const (
// maxAddr64 is the maximum userspace address. It is TASK_SIZE in Linux
// for a 64-bit process.
- maxAddr64 usermem.Addr = (1 << 48)
+ maxAddr64 hostarch.Addr = (1 << 48)
// maxStackRand64 is the maximum randomization to apply to the stack.
// It is defined by arch/arm64/mm/mmap.c:(STACK_RND_MASK << PAGE_SHIFT) in Linux.
@@ -44,7 +44,7 @@ const (
// maxMmapRand64 is the maximum randomization to apply to the mmap
// layout. It is defined by arch/arm64/mm/mmap.c:arch_mmap_rnd in Linux.
- maxMmapRand64 = (1 << 33) * usermem.PageSize
+ maxMmapRand64 = (1 << 33) * hostarch.PageSize
// minGap64 is the minimum gap to leave at the top of the address space
// for the stack. It is defined by arch/arm64/mm/mmap.c:MIN_GAP in Linux.
@@ -55,7 +55,7 @@ const (
//
// The Platform {Min,Max}UserAddress() may preclude loading at this
// address. See other preferredFoo comments below.
- preferredPIELoadAddr usermem.Addr = maxAddr64 / 6 * 5
+ preferredPIELoadAddr hostarch.Addr = maxAddr64 / 6 * 5
)
var (
@@ -66,13 +66,13 @@ var (
// These constants are selected as heuristics to help make the Platform's
// potentially limited address space conform as closely to Linux as possible.
const (
- preferredTopDownAllocMin usermem.Addr = 0x7e8000000000
- preferredAllocationGap = 128 << 30 // 128 GB
- preferredTopDownBaseMin = preferredTopDownAllocMin + preferredAllocationGap
+ preferredTopDownAllocMin hostarch.Addr = 0x7e8000000000
+ preferredAllocationGap = 128 << 30 // 128 GB
+ preferredTopDownBaseMin = preferredTopDownAllocMin + preferredAllocationGap
// minMmapRand64 is the smallest we are willing to make the
// randomization to stay above preferredTopDownBaseMin.
- minMmapRand64 = (1 << 18) * usermem.PageSize
+ minMmapRand64 = (1 << 18) * hostarch.PageSize
)
// context64 represents an ARM64 context.
@@ -187,12 +187,12 @@ func (c *context64) FeatureSet() *cpuid.FeatureSet {
}
// mmapRand returns a random adjustment for randomizing an mmap layout.
-func mmapRand(max uint64) usermem.Addr {
- return usermem.Addr(rand.Int63n(int64(max))).RoundDown()
+func mmapRand(max uint64) hostarch.Addr {
+ return hostarch.Addr(rand.Int63n(int64(max))).RoundDown()
}
// NewMmapLayout implements Context.NewMmapLayout consistently with Linux.
-func (c *context64) NewMmapLayout(min, max usermem.Addr, r *limits.LimitSet) (MmapLayout, error) {
+func (c *context64) NewMmapLayout(min, max hostarch.Addr, r *limits.LimitSet) (MmapLayout, error) {
min, ok := min.RoundUp()
if !ok {
return MmapLayout{}, unix.EINVAL
@@ -210,7 +210,7 @@ func (c *context64) NewMmapLayout(min, max usermem.Addr, r *limits.LimitSet) (Mm
// MAX_GAP in Linux.
maxGap := (max / 6) * 5
- gap := usermem.Addr(stackSize.Cur)
+ gap := hostarch.Addr(stackSize.Cur)
if gap < minGap64 {
gap = minGap64
}
@@ -223,7 +223,7 @@ func (c *context64) NewMmapLayout(min, max usermem.Addr, r *limits.LimitSet) (Mm
}
topDownMin := max - gap - maxMmapRand64
- maxRand := usermem.Addr(maxMmapRand64)
+ maxRand := hostarch.Addr(maxMmapRand64)
if topDownMin < preferredTopDownBaseMin {
// Try to keep TopDownBase above preferredTopDownBaseMin by
// shrinking maxRand.
@@ -258,7 +258,7 @@ func (c *context64) NewMmapLayout(min, max usermem.Addr, r *limits.LimitSet) (Mm
}
// PIELoadAddress implements Context.PIELoadAddress.
-func (c *context64) PIELoadAddress(l MmapLayout) usermem.Addr {
+func (c *context64) PIELoadAddress(l MmapLayout) hostarch.Addr {
base := preferredPIELoadAddr
max, ok := base.AddLength(maxMmapRand64)
if !ok {
diff --git a/pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go b/pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go
index 2faa3a852..40d0c1c75 100644
--- a/pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go
+++ b/pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go
@@ -15,9 +15,9 @@ package arch
import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/gohacks"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal"
"gvisor.dev/gvisor/pkg/safecopy"
- "gvisor.dev/gvisor/pkg/usermem"
"io"
"reflect"
"runtime"
@@ -43,12 +43,12 @@ func (f *FpsimdContext) SizeBytes() int {
func (f *FpsimdContext) MarshalBytes(dst []byte) {
f.Head.MarshalBytes(dst[:f.Head.SizeBytes()])
dst = dst[f.Head.SizeBytes():]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Fpsr))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Fpsr))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Fpcr))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Fpcr))
dst = dst[4:]
for idx := 0; idx < 64; idx++ {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Vregs[idx]))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.Vregs[idx]))
dst = dst[8:]
}
}
@@ -57,12 +57,12 @@ func (f *FpsimdContext) MarshalBytes(dst []byte) {
func (f *FpsimdContext) UnmarshalBytes(src []byte) {
f.Head.UnmarshalBytes(src[:f.Head.SizeBytes()])
src = src[f.Head.SizeBytes():]
- f.Fpsr = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.Fpsr = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.Fpcr = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.Fpcr = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
for idx := 0; idx < 64; idx++ {
- f.Vregs[idx] = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ f.Vregs[idx] = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
}
}
@@ -95,7 +95,7 @@ func (f *FpsimdContext) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FpsimdContext) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FpsimdContext) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
if !f.Head.Packed() {
// Type FpsimdContext doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
@@ -119,13 +119,13 @@ func (f *FpsimdContext) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limi
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FpsimdContext) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FpsimdContext) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FpsimdContext) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FpsimdContext) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
if !f.Head.Packed() {
// Type FpsimdContext doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
@@ -184,17 +184,17 @@ func (s *SignalContext64) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (s *SignalContext64) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.FaultAddr))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.FaultAddr))
dst = dst[8:]
for idx := 0; idx < 31; idx++ {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Regs[idx]))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Regs[idx]))
dst = dst[8:]
}
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Sp))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Sp))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Pc))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Pc))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Pstate))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Pstate))
dst = dst[8:]
for idx := 0; idx < 8; idx++ {
dst[0] = byte(s._pad[idx])
@@ -206,17 +206,17 @@ func (s *SignalContext64) MarshalBytes(dst []byte) {
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (s *SignalContext64) UnmarshalBytes(src []byte) {
- s.FaultAddr = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.FaultAddr = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
for idx := 0; idx < 31; idx++ {
- s.Regs[idx] = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Regs[idx] = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
}
- s.Sp = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Sp = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.Pc = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Pc = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.Pstate = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Pstate = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
for idx := 0; idx < 8; idx++ {
s._pad[idx] = src[0]
@@ -254,7 +254,7 @@ func (s *SignalContext64) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (s *SignalContext64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (s *SignalContext64) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
if !s.Fpsimd64.Packed() {
// Type SignalContext64 doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
@@ -278,13 +278,13 @@ func (s *SignalContext64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, li
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (s *SignalContext64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *SignalContext64) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return s.CopyOutN(cc, addr, s.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (s *SignalContext64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *SignalContext64) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
if !s.Fpsimd64.Packed() {
// Type SignalContext64 doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
@@ -345,9 +345,9 @@ func (u *UContext64) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (u *UContext64) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(u.Flags))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(u.Flags))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(u.Link))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(u.Link))
dst = dst[8:]
u.Stack.MarshalBytes(dst[:u.Stack.SizeBytes()])
dst = dst[u.Stack.SizeBytes():]
@@ -367,9 +367,9 @@ func (u *UContext64) MarshalBytes(dst []byte) {
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (u *UContext64) UnmarshalBytes(src []byte) {
- u.Flags = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ u.Flags = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- u.Link = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ u.Link = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
u.Stack.UnmarshalBytes(src[:u.Stack.SizeBytes()])
src = src[u.Stack.SizeBytes():]
@@ -415,7 +415,7 @@ func (u *UContext64) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (u *UContext64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (u *UContext64) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
if !u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
// Type UContext64 doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := cc.CopyScratchBuffer(u.SizeBytes()) // escapes: okay.
@@ -439,13 +439,13 @@ func (u *UContext64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit i
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (u *UContext64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (u *UContext64) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return u.CopyOutN(cc, addr, u.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (u *UContext64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (u *UContext64) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
if !u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
// Type UContext64 doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := cc.CopyScratchBuffer(u.SizeBytes()) // escapes: okay.
@@ -501,17 +501,17 @@ func (a *aarch64Ctx) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (a *aarch64Ctx) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(a.Magic))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(a.Magic))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(a.Size))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(a.Size))
dst = dst[4:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (a *aarch64Ctx) UnmarshalBytes(src []byte) {
- a.Magic = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ a.Magic = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- a.Size = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ a.Size = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
}
@@ -533,7 +533,7 @@ func (a *aarch64Ctx) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (a *aarch64Ctx) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (a *aarch64Ctx) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -550,13 +550,13 @@ func (a *aarch64Ctx) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit i
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (a *aarch64Ctx) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (a *aarch64Ctx) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return a.CopyOutN(cc, addr, a.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (a *aarch64Ctx) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (a *aarch64Ctx) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
diff --git a/pkg/sentry/arch/auxv.go b/pkg/sentry/arch/auxv.go
index 2b4c8f3fc..19ca18121 100644
--- a/pkg/sentry/arch/auxv.go
+++ b/pkg/sentry/arch/auxv.go
@@ -15,7 +15,7 @@
package arch
import (
- "gvisor.dev/gvisor/pkg/usermem"
+ "gvisor.dev/gvisor/pkg/hostarch"
)
// An AuxEntry represents an entry in an ELF auxiliary vector.
@@ -23,7 +23,7 @@ import (
// +stateify savable
type AuxEntry struct {
Key uint64
- Value usermem.Addr
+ Value hostarch.Addr
}
// An Auxv represents an ELF auxiliary vector.
diff --git a/pkg/sentry/arch/fpu/fpu_amd64.go b/pkg/sentry/arch/fpu/fpu_amd64.go
index 3a62f51be..1e9625bee 100644
--- a/pkg/sentry/arch/fpu/fpu_amd64.go
+++ b/pkg/sentry/arch/fpu/fpu_amd64.go
@@ -21,9 +21,9 @@ import (
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/cpuid"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sync"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
)
// initX86FPState (defined in asm files) sets up initial state.
@@ -146,11 +146,11 @@ const (
// any of the reserved bits of the MXCSR register." - Intel SDM Vol. 1, Section
// 10.5.1.2 "SSE State")
func sanitizeMXCSR(f State) {
- mxcsr := usermem.ByteOrder.Uint32(f[mxcsrOffset:])
+ mxcsr := hostarch.ByteOrder.Uint32(f[mxcsrOffset:])
initMXCSRMask.Do(func() {
temp := State(alignedBytes(uint(ptraceFPRegsSize), 16))
initX86FPState(&temp[0], false /* useXsave */)
- mxcsrMask = usermem.ByteOrder.Uint32(temp[mxcsrMaskOffset:])
+ mxcsrMask = hostarch.ByteOrder.Uint32(temp[mxcsrMaskOffset:])
if mxcsrMask == 0 {
// "If the value of the MXCSR_MASK field is 00000000H, then the
// MXCSR_MASK value is the default value of 0000FFBFH." - Intel SDM
@@ -160,7 +160,7 @@ func sanitizeMXCSR(f State) {
}
})
mxcsr &= mxcsrMask
- usermem.ByteOrder.PutUint32(f[mxcsrOffset:], mxcsr)
+ hostarch.ByteOrder.PutUint32(f[mxcsrOffset:], mxcsr)
}
// PtraceGetXstateRegs implements ptrace(PTRACE_GETREGS, NT_X86_XSTATE) by
@@ -177,7 +177,7 @@ func (s *State) PtraceGetXstateRegs(dst io.Writer, maxlen int, featureSet *cpuid
// Area". Linux uses the first 8 bytes of this area to store the OS XSTATE
// mask. GDB relies on this: see
// gdb/x86-linux-nat.c:x86_linux_read_description().
- usermem.ByteOrder.PutUint64(f[userXstateXCR0Offset:], featureSet.ValidXCR0Mask())
+ hostarch.ByteOrder.PutUint64(f[userXstateXCR0Offset:], featureSet.ValidXCR0Mask())
if len(f) > maxlen {
f = f[:maxlen]
}
@@ -208,9 +208,9 @@ func (s *State) PtraceSetXstateRegs(src io.Reader, maxlen int, featureSet *cpuid
// Force reserved bits in MXCSR to 0. This is consistent with Linux.
sanitizeMXCSR(State(f))
// Users can't enable *more* XCR0 bits than what we, and the CPU, support.
- xstateBV := usermem.ByteOrder.Uint64(f[xstateBVOffset:])
+ xstateBV := hostarch.ByteOrder.Uint64(f[xstateBVOffset:])
xstateBV &= featureSet.ValidXCR0Mask()
- usermem.ByteOrder.PutUint64(f[xstateBVOffset:], xstateBV)
+ hostarch.ByteOrder.PutUint64(f[xstateBVOffset:], xstateBV)
// Force XCOMP_BV and reserved bytes in the XSAVE header to 0.
reserved := f[xsaveHeaderZeroedOffset : xsaveHeaderZeroedOffset+xsaveHeaderZeroedBytes]
for i := range reserved {
@@ -266,7 +266,7 @@ func (s *State) AfterLoad() {
// What was in use?
savedBV := fxsaveBV
if len(old) >= xstateBVOffset+8 {
- savedBV = usermem.ByteOrder.Uint64(old[xstateBVOffset:])
+ savedBV = hostarch.ByteOrder.Uint64(old[xstateBVOffset:])
}
// Supported features must be a superset of saved features.
diff --git a/pkg/sentry/arch/signal.go b/pkg/sentry/arch/signal.go
index 35d2e07c3..67d7edf68 100644
--- a/pkg/sentry/arch/signal.go
+++ b/pkg/sentry/arch/signal.go
@@ -16,7 +16,7 @@ package arch
import (
"gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/usermem"
+ "gvisor.dev/gvisor/pkg/hostarch"
)
// SignalAct represents the action that should be taken when a signal is
@@ -154,107 +154,107 @@ func (s *SignalInfo) FixSignalCodeForUser() {
// PID returns the si_pid field.
func (s *SignalInfo) PID() int32 {
- return int32(usermem.ByteOrder.Uint32(s.Fields[0:4]))
+ return int32(hostarch.ByteOrder.Uint32(s.Fields[0:4]))
}
// SetPID mutates the si_pid field.
func (s *SignalInfo) SetPID(val int32) {
- usermem.ByteOrder.PutUint32(s.Fields[0:4], uint32(val))
+ hostarch.ByteOrder.PutUint32(s.Fields[0:4], uint32(val))
}
// UID returns the si_uid field.
func (s *SignalInfo) UID() int32 {
- return int32(usermem.ByteOrder.Uint32(s.Fields[4:8]))
+ return int32(hostarch.ByteOrder.Uint32(s.Fields[4:8]))
}
// SetUID mutates the si_uid field.
func (s *SignalInfo) SetUID(val int32) {
- usermem.ByteOrder.PutUint32(s.Fields[4:8], uint32(val))
+ hostarch.ByteOrder.PutUint32(s.Fields[4:8], uint32(val))
}
// Sigval returns the sigval field, which is aliased to both si_int and si_ptr.
func (s *SignalInfo) Sigval() uint64 {
- return usermem.ByteOrder.Uint64(s.Fields[8:16])
+ return hostarch.ByteOrder.Uint64(s.Fields[8:16])
}
// SetSigval mutates the sigval field.
func (s *SignalInfo) SetSigval(val uint64) {
- usermem.ByteOrder.PutUint64(s.Fields[8:16], val)
+ hostarch.ByteOrder.PutUint64(s.Fields[8:16], val)
}
// TimerID returns the si_timerid field.
func (s *SignalInfo) TimerID() linux.TimerID {
- return linux.TimerID(usermem.ByteOrder.Uint32(s.Fields[0:4]))
+ return linux.TimerID(hostarch.ByteOrder.Uint32(s.Fields[0:4]))
}
// SetTimerID sets the si_timerid field.
func (s *SignalInfo) SetTimerID(val linux.TimerID) {
- usermem.ByteOrder.PutUint32(s.Fields[0:4], uint32(val))
+ hostarch.ByteOrder.PutUint32(s.Fields[0:4], uint32(val))
}
// Overrun returns the si_overrun field.
func (s *SignalInfo) Overrun() int32 {
- return int32(usermem.ByteOrder.Uint32(s.Fields[4:8]))
+ return int32(hostarch.ByteOrder.Uint32(s.Fields[4:8]))
}
// SetOverrun sets the si_overrun field.
func (s *SignalInfo) SetOverrun(val int32) {
- usermem.ByteOrder.PutUint32(s.Fields[4:8], uint32(val))
+ hostarch.ByteOrder.PutUint32(s.Fields[4:8], uint32(val))
}
// Addr returns the si_addr field.
func (s *SignalInfo) Addr() uint64 {
- return usermem.ByteOrder.Uint64(s.Fields[0:8])
+ return hostarch.ByteOrder.Uint64(s.Fields[0:8])
}
// SetAddr sets the si_addr field.
func (s *SignalInfo) SetAddr(val uint64) {
- usermem.ByteOrder.PutUint64(s.Fields[0:8], val)
+ hostarch.ByteOrder.PutUint64(s.Fields[0:8], val)
}
// Status returns the si_status field.
func (s *SignalInfo) Status() int32 {
- return int32(usermem.ByteOrder.Uint32(s.Fields[8:12]))
+ return int32(hostarch.ByteOrder.Uint32(s.Fields[8:12]))
}
// SetStatus mutates the si_status field.
func (s *SignalInfo) SetStatus(val int32) {
- usermem.ByteOrder.PutUint32(s.Fields[8:12], uint32(val))
+ hostarch.ByteOrder.PutUint32(s.Fields[8:12], uint32(val))
}
// CallAddr returns the si_call_addr field.
func (s *SignalInfo) CallAddr() uint64 {
- return usermem.ByteOrder.Uint64(s.Fields[0:8])
+ return hostarch.ByteOrder.Uint64(s.Fields[0:8])
}
// SetCallAddr mutates the si_call_addr field.
func (s *SignalInfo) SetCallAddr(val uint64) {
- usermem.ByteOrder.PutUint64(s.Fields[0:8], val)
+ hostarch.ByteOrder.PutUint64(s.Fields[0:8], val)
}
// Syscall returns the si_syscall field.
func (s *SignalInfo) Syscall() int32 {
- return int32(usermem.ByteOrder.Uint32(s.Fields[8:12]))
+ return int32(hostarch.ByteOrder.Uint32(s.Fields[8:12]))
}
// SetSyscall mutates the si_syscall field.
func (s *SignalInfo) SetSyscall(val int32) {
- usermem.ByteOrder.PutUint32(s.Fields[8:12], uint32(val))
+ hostarch.ByteOrder.PutUint32(s.Fields[8:12], uint32(val))
}
// Arch returns the si_arch field.
func (s *SignalInfo) Arch() uint32 {
- return usermem.ByteOrder.Uint32(s.Fields[12:16])
+ return hostarch.ByteOrder.Uint32(s.Fields[12:16])
}
// SetArch mutates the si_arch field.
func (s *SignalInfo) SetArch(val uint32) {
- usermem.ByteOrder.PutUint32(s.Fields[12:16], val)
+ hostarch.ByteOrder.PutUint32(s.Fields[12:16], val)
}
// Band returns the si_band field.
func (s *SignalInfo) Band() int64 {
- return int64(usermem.ByteOrder.Uint64(s.Fields[0:8]))
+ return int64(hostarch.ByteOrder.Uint64(s.Fields[0:8]))
}
// SetBand mutates the si_band field.
@@ -262,15 +262,15 @@ func (s *SignalInfo) SetBand(val int64) {
// Note: this assumes the platform uses `long` as `__ARCH_SI_BAND_T`.
// On some platforms, which gVisor doesn't support, `__ARCH_SI_BAND_T` is
// `int`. See siginfo.h.
- usermem.ByteOrder.PutUint64(s.Fields[0:8], uint64(val))
+ hostarch.ByteOrder.PutUint64(s.Fields[0:8], uint64(val))
}
// FD returns the si_fd field.
func (s *SignalInfo) FD() uint32 {
- return usermem.ByteOrder.Uint32(s.Fields[8:12])
+ return hostarch.ByteOrder.Uint32(s.Fields[8:12])
}
// SetFD mutates the si_fd field.
func (s *SignalInfo) SetFD(val uint32) {
- usermem.ByteOrder.PutUint32(s.Fields[8:12], val)
+ hostarch.ByteOrder.PutUint32(s.Fields[8:12], val)
}
diff --git a/pkg/sentry/arch/signal_amd64.go b/pkg/sentry/arch/signal_amd64.go
index ee3743483..082ed92b1 100644
--- a/pkg/sentry/arch/signal_amd64.go
+++ b/pkg/sentry/arch/signal_amd64.go
@@ -21,10 +21,10 @@ import (
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/marshal/primitive"
"gvisor.dev/gvisor/pkg/sentry/arch/fpu"
- "gvisor.dev/gvisor/pkg/usermem"
)
// SignalContext64 is equivalent to struct sigcontext, the type passed as the
@@ -133,7 +133,7 @@ func (c *context64) SignalSetup(st *Stack, act *SignalAct, info *SignalInfo, alt
// space on the user stack naturally caps the amount of memory the
// sentry will allocate for this purpose.
fpSize, _ := c.fpuFrameSize()
- sp = (sp - usermem.Addr(fpSize)) & ^usermem.Addr(63)
+ sp = (sp - hostarch.Addr(fpSize)) & ^hostarch.Addr(63)
// Construct the UContext64 now since we need its size.
uc := &UContext64{
@@ -180,8 +180,8 @@ func (c *context64) SignalSetup(st *Stack, act *SignalAct, info *SignalInfo, alt
ucSize := uc.SizeBytes()
// st.Arch.Width() is for the restorer address. sizeof(siginfo) == 128.
frameSize := int(st.Arch.Width()) + ucSize + 128
- frameBottom := (sp-usermem.Addr(frameSize)) & ^usermem.Addr(15) - 8
- sp = frameBottom + usermem.Addr(frameSize)
+ frameBottom := (sp-hostarch.Addr(frameSize)) & ^hostarch.Addr(15) - 8
+ sp = frameBottom + hostarch.Addr(frameSize)
st.Bottom = sp
// Prior to proceeding, figure out if the frame will exhaust the range
diff --git a/pkg/sentry/arch/signal_arm64.go b/pkg/sentry/arch/signal_arm64.go
index 53281dcba..da71fb873 100644
--- a/pkg/sentry/arch/signal_arm64.go
+++ b/pkg/sentry/arch/signal_arm64.go
@@ -19,9 +19,9 @@ package arch
import (
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/sentry/arch/fpu"
- "gvisor.dev/gvisor/pkg/usermem"
)
// SignalContext64 is equivalent to struct sigcontext, the type passed as the
@@ -107,8 +107,8 @@ func (c *context64) SignalSetup(st *Stack, act *SignalAct, info *SignalInfo, alt
// sizeof(siginfo) == 128.
// R30 stores the restorer address.
frameSize := ucSize + 128
- frameBottom := (sp - usermem.Addr(frameSize)) & ^usermem.Addr(15)
- sp = frameBottom + usermem.Addr(frameSize)
+ frameBottom := (sp - hostarch.Addr(frameSize)) & ^hostarch.Addr(15)
+ sp = frameBottom + hostarch.Addr(frameSize)
st.Bottom = sp
// Prior to proceeding, figure out if the frame will exhaust the range
diff --git a/pkg/sentry/arch/signal_stack.go b/pkg/sentry/arch/signal_stack.go
index a1eae98f9..c732c7503 100644
--- a/pkg/sentry/arch/signal_stack.go
+++ b/pkg/sentry/arch/signal_stack.go
@@ -17,8 +17,8 @@
package arch
import (
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal"
- "gvisor.dev/gvisor/pkg/usermem"
)
const (
@@ -36,8 +36,8 @@ func (s SignalStack) IsEnabled() bool {
}
// Top returns the stack's top address.
-func (s SignalStack) Top() usermem.Addr {
- return usermem.Addr(s.Addr + s.Size)
+func (s SignalStack) Top() hostarch.Addr {
+ return hostarch.Addr(s.Addr + s.Size)
}
// SetOnStack marks this signal stack as in use.
@@ -49,8 +49,8 @@ func (s *SignalStack) SetOnStack() {
}
// Contains checks if the stack pointer is within this stack.
-func (s *SignalStack) Contains(sp usermem.Addr) bool {
- return usermem.Addr(s.Addr) < sp && sp <= usermem.Addr(s.Addr+s.Size)
+func (s *SignalStack) Contains(sp hostarch.Addr) bool {
+ return hostarch.Addr(s.Addr) < sp && sp <= hostarch.Addr(s.Addr+s.Size)
}
// NativeSignalStack is a type that is equivalent to stack_t in the guest
diff --git a/pkg/sentry/arch/stack.go b/pkg/sentry/arch/stack.go
index 5f06c751d..65a794c7c 100644
--- a/pkg/sentry/arch/stack.go
+++ b/pkg/sentry/arch/stack.go
@@ -16,18 +16,20 @@ package arch
import (
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal/primitive"
+
"gvisor.dev/gvisor/pkg/usermem"
)
-// Stack is a simple wrapper around a usermem.IO and an address. Stack
+// Stack is a simple wrapper around a hostarch.IO and an address. Stack
// implements marshal.CopyContext, and marshallable values can be pushed or
// popped from the stack through the marshal.Marshallable interface.
//
// Stack is not thread-safe.
type Stack struct {
// Our arch info.
- // We use this for automatic Native conversion of usermem.Addrs during
+ // We use this for automatic Native conversion of hostarch.Addrs during
// Push() and Pop().
Arch Context
@@ -35,7 +37,7 @@ type Stack struct {
IO usermem.IO
// Our current stack bottom.
- Bottom usermem.Addr
+ Bottom hostarch.Addr
// Scratch buffer used for marshalling to avoid having to repeatedly
// allocate scratch memory.
@@ -59,20 +61,20 @@ func (s *Stack) CopyScratchBuffer(size int) []byte {
// StackBottomMagic is the special address callers must past to all stack
// marshalling operations to cause the src/dst address to be computed based on
// the current end of the stack.
-const StackBottomMagic = ^usermem.Addr(0) // usermem.Addr(-1)
+const StackBottomMagic = ^hostarch.Addr(0) // hostarch.Addr(-1)
// CopyOutBytes implements marshal.CopyContext.CopyOutBytes. CopyOutBytes
// computes an appropriate address based on the current end of the
// stack. Callers use the sentinel address StackBottomMagic to marshal methods
// to indicate this.
-func (s *Stack) CopyOutBytes(sentinel usermem.Addr, b []byte) (int, error) {
+func (s *Stack) CopyOutBytes(sentinel hostarch.Addr, b []byte) (int, error) {
if sentinel != StackBottomMagic {
panic("Attempted to copy out to stack with absolute address")
}
c := len(b)
- n, err := s.IO.CopyOut(context.Background(), s.Bottom-usermem.Addr(c), b, usermem.IOOpts{})
+ n, err := s.IO.CopyOut(context.Background(), s.Bottom-hostarch.Addr(c), b, usermem.IOOpts{})
if err == nil && n == c {
- s.Bottom -= usermem.Addr(n)
+ s.Bottom -= hostarch.Addr(n)
}
return n, err
}
@@ -81,21 +83,21 @@ func (s *Stack) CopyOutBytes(sentinel usermem.Addr, b []byte) (int, error) {
// an appropriate address based on the current end of the stack. Callers must
// use the sentinel address StackBottomMagic to marshal methods to indicate
// this.
-func (s *Stack) CopyInBytes(sentinel usermem.Addr, b []byte) (int, error) {
+func (s *Stack) CopyInBytes(sentinel hostarch.Addr, b []byte) (int, error) {
if sentinel != StackBottomMagic {
panic("Attempted to copy in from stack with absolute address")
}
n, err := s.IO.CopyIn(context.Background(), s.Bottom, b, usermem.IOOpts{})
if err == nil {
- s.Bottom += usermem.Addr(n)
+ s.Bottom += hostarch.Addr(n)
}
return n, err
}
// Align aligns the stack to the given offset.
func (s *Stack) Align(offset int) {
- if s.Bottom%usermem.Addr(offset) != 0 {
- s.Bottom -= (s.Bottom % usermem.Addr(offset))
+ if s.Bottom%hostarch.Addr(offset) != 0 {
+ s.Bottom -= (s.Bottom % hostarch.Addr(offset))
}
}
@@ -119,16 +121,16 @@ func (s *Stack) PushNullTerminatedByteSlice(bs []byte) (int, error) {
// stack.
type StackLayout struct {
// ArgvStart is the beginning of the argument vector.
- ArgvStart usermem.Addr
+ ArgvStart hostarch.Addr
// ArgvEnd is the end of the argument vector.
- ArgvEnd usermem.Addr
+ ArgvEnd hostarch.Addr
// EnvvStart is the beginning of the environment vector.
- EnvvStart usermem.Addr
+ EnvvStart hostarch.Addr
// EnvvEnd is the end of the environment vector.
- EnvvEnd usermem.Addr
+ EnvvEnd hostarch.Addr
}
// Load pushes the given args, env and aux vector to the stack using the
@@ -148,7 +150,7 @@ func (s *Stack) Load(args []string, env []string, aux Auxv) (StackLayout, error)
// to be in this order. See: https://www.uclibc.org/docs/psABI-x86_64.pdf
// page 29.
l.EnvvEnd = s.Bottom
- envAddrs := make([]usermem.Addr, len(env))
+ envAddrs := make([]hostarch.Addr, len(env))
for i := len(env) - 1; i >= 0; i-- {
if _, err := s.PushNullTerminatedByteSlice([]byte(env[i])); err != nil {
return StackLayout{}, err
@@ -159,7 +161,7 @@ func (s *Stack) Load(args []string, env []string, aux Auxv) (StackLayout, error)
// Push our strings.
l.ArgvEnd = s.Bottom
- argAddrs := make([]usermem.Addr, len(args))
+ argAddrs := make([]hostarch.Addr, len(args))
for i := len(args) - 1; i >= 0; i-- {
if _, err := s.PushNullTerminatedByteSlice([]byte(args[i])); err != nil {
return StackLayout{}, err
@@ -178,7 +180,7 @@ func (s *Stack) Load(args []string, env []string, aux Auxv) (StackLayout, error)
argvSize := s.Arch.Width() * uint(len(args)+1)
envvSize := s.Arch.Width() * uint(len(env)+1)
auxvSize := s.Arch.Width() * 2 * uint(len(aux)+1)
- total := usermem.Addr(argvSize) + usermem.Addr(envvSize) + usermem.Addr(auxvSize) + usermem.Addr(s.Arch.Width())
+ total := hostarch.Addr(argvSize) + hostarch.Addr(envvSize) + hostarch.Addr(auxvSize) + hostarch.Addr(s.Arch.Width())
expectedBottom := s.Bottom - total
if expectedBottom%32 != 0 {
s.Bottom -= expectedBottom % 32
@@ -188,11 +190,11 @@ func (s *Stack) Load(args []string, env []string, aux Auxv) (StackLayout, error)
// NOTE: We need an extra zero here per spec.
// The Push function will automatically terminate
// strings and arrays with a single null value.
- auxv := make([]usermem.Addr, 0, len(aux))
+ auxv := make([]hostarch.Addr, 0, len(aux))
for _, a := range aux {
- auxv = append(auxv, usermem.Addr(a.Key), a.Value)
+ auxv = append(auxv, hostarch.Addr(a.Key), a.Value)
}
- auxv = append(auxv, usermem.Addr(0))
+ auxv = append(auxv, hostarch.Addr(0))
_, err := s.pushAddrSliceAndTerminator(auxv)
if err != nil {
return StackLayout{}, err
diff --git a/pkg/sentry/arch/stack_unsafe.go b/pkg/sentry/arch/stack_unsafe.go
index 0e478e434..f4712d58f 100644
--- a/pkg/sentry/arch/stack_unsafe.go
+++ b/pkg/sentry/arch/stack_unsafe.go
@@ -17,19 +17,19 @@ package arch
import (
"unsafe"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal/primitive"
- "gvisor.dev/gvisor/pkg/usermem"
)
// pushAddrSliceAndTerminator copies a slices of addresses to the stack, and
// also pushes an extra null address element at the end of the slice.
//
// Internally, we unsafely transmute the slice type from the arch-dependent
-// []usermem.Addr type, to a slice of fixed-sized ints so that we can pass it to
+// []hostarch.Addr type, to a slice of fixed-sized ints so that we can pass it to
// go-marshal.
//
// On error, the contents of the stack and the bottom cursor are undefined.
-func (s *Stack) pushAddrSliceAndTerminator(src []usermem.Addr) (int, error) {
+func (s *Stack) pushAddrSliceAndTerminator(src []hostarch.Addr) (int, error) {
// Note: Stack grows upwards, so push the terminator first.
switch s.Arch.Width() {
case 8:
diff --git a/pkg/sentry/devices/tundev/tundev.go b/pkg/sentry/devices/tundev/tundev.go
index c43158aa4..a12eeb8e7 100644
--- a/pkg/sentry/devices/tundev/tundev.go
+++ b/pkg/sentry/devices/tundev/tundev.go
@@ -18,6 +18,7 @@ package tundev
import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fsimpl/devtmpfs"
"gvisor.dev/gvisor/pkg/sentry/inet"
@@ -89,7 +90,7 @@ func (fd *tunFD) Ioctl(ctx context.Context, uio usermem.IO, args arch.SyscallArg
}
// Validate flags.
- flags, err := netstack.LinuxToTUNFlags(usermem.ByteOrder.Uint16(req.Data[:]))
+ flags, err := netstack.LinuxToTUNFlags(hostarch.ByteOrder.Uint16(req.Data[:]))
if err != nil {
return 0, err
}
@@ -98,7 +99,7 @@ func (fd *tunFD) Ioctl(ctx context.Context, uio usermem.IO, args arch.SyscallArg
case linux.TUNGETIFF:
var req linux.IFReq
copy(req.IFName[:], fd.device.Name())
- usermem.ByteOrder.PutUint16(req.Data[:], netstack.TUNFlagsToLinux(fd.device.Flags()))
+ hostarch.ByteOrder.PutUint16(req.Data[:], netstack.TUNFlagsToLinux(fd.device.Flags()))
_, err := req.CopyOut(t, data)
return 0, err
diff --git a/pkg/sentry/fs/anon/anon.go b/pkg/sentry/fs/anon/anon.go
index 5c421f5fb..8bda22a8e 100644
--- a/pkg/sentry/fs/anon/anon.go
+++ b/pkg/sentry/fs/anon/anon.go
@@ -19,9 +19,9 @@ package anon
import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/fs/fsutil"
- "gvisor.dev/gvisor/pkg/usermem"
)
// NewInode constructs an anonymous Inode that is not associated
@@ -37,6 +37,6 @@ func NewInode(ctx context.Context) *fs.Inode {
Type: fs.Anonymous,
DeviceID: PseudoDevice.DeviceID(),
InodeID: PseudoDevice.NextIno(),
- BlockSize: usermem.PageSize,
+ BlockSize: hostarch.PageSize,
})
}
diff --git a/pkg/sentry/fs/copy_up.go b/pkg/sentry/fs/copy_up.go
index 58deb25fc..5aa668873 100644
--- a/pkg/sentry/fs/copy_up.go
+++ b/pkg/sentry/fs/copy_up.go
@@ -20,6 +20,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sync"
@@ -339,7 +340,7 @@ func cleanupUpper(ctx context.Context, parent *Inode, name string, copyUpErr err
// size is the same used by io.Copy.
var copyUpBuffers = sync.Pool{
New: func() interface{} {
- b := make([]byte, 8*usermem.PageSize)
+ b := make([]byte, 8*hostarch.PageSize)
return &b
},
}
diff --git a/pkg/sentry/fs/dev/dev.go b/pkg/sentry/fs/dev/dev.go
index acbd401a0..e84ba7a5d 100644
--- a/pkg/sentry/fs/dev/dev.go
+++ b/pkg/sentry/fs/dev/dev.go
@@ -19,6 +19,7 @@ import (
"math"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/fs/ramfs"
"gvisor.dev/gvisor/pkg/sentry/fs/tmpfs"
@@ -49,7 +50,7 @@ func newCharacterDevice(ctx context.Context, iops fs.InodeOperations, msrc *fs.M
return fs.NewInode(ctx, iops, msrc, fs.StableAttr{
DeviceID: devDevice.DeviceID(),
InodeID: devDevice.NextIno(),
- BlockSize: usermem.PageSize,
+ BlockSize: hostarch.PageSize,
Type: fs.CharacterDevice,
DeviceFileMajor: major,
DeviceFileMinor: minor,
@@ -60,7 +61,7 @@ func newMemDevice(ctx context.Context, iops fs.InodeOperations, msrc *fs.MountSo
return fs.NewInode(ctx, iops, msrc, fs.StableAttr{
DeviceID: devDevice.DeviceID(),
InodeID: devDevice.NextIno(),
- BlockSize: usermem.PageSize,
+ BlockSize: hostarch.PageSize,
Type: fs.CharacterDevice,
DeviceFileMajor: memDevMajor,
DeviceFileMinor: minor,
@@ -72,7 +73,7 @@ func newDirectory(ctx context.Context, contents map[string]*fs.Inode, msrc *fs.M
return fs.NewInode(ctx, iops, msrc, fs.StableAttr{
DeviceID: devDevice.DeviceID(),
InodeID: devDevice.NextIno(),
- BlockSize: usermem.PageSize,
+ BlockSize: hostarch.PageSize,
Type: fs.Directory,
})
}
@@ -82,7 +83,7 @@ func newSymlink(ctx context.Context, target string, msrc *fs.MountSource) *fs.In
return fs.NewInode(ctx, iops, msrc, fs.StableAttr{
DeviceID: devDevice.DeviceID(),
InodeID: devDevice.NextIno(),
- BlockSize: usermem.PageSize,
+ BlockSize: hostarch.PageSize,
Type: fs.Symlink,
})
}
@@ -137,7 +138,7 @@ func New(ctx context.Context, msrc *fs.MountSource) *fs.Inode {
return fs.NewInode(ctx, iops, msrc, fs.StableAttr{
DeviceID: devDevice.DeviceID(),
InodeID: devDevice.NextIno(),
- BlockSize: usermem.PageSize,
+ BlockSize: hostarch.PageSize,
Type: fs.Directory,
})
}
diff --git a/pkg/sentry/fs/dev/net_tun.go b/pkg/sentry/fs/dev/net_tun.go
index 11a2984d8..77e8d222a 100644
--- a/pkg/sentry/fs/dev/net_tun.go
+++ b/pkg/sentry/fs/dev/net_tun.go
@@ -17,6 +17,7 @@ package dev
import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/fs/fsutil"
@@ -110,7 +111,7 @@ func (n *netTunFileOperations) Ioctl(ctx context.Context, file *fs.File, io user
}
// Validate flags.
- flags, err := netstack.LinuxToTUNFlags(usermem.ByteOrder.Uint16(req.Data[:]))
+ flags, err := netstack.LinuxToTUNFlags(hostarch.ByteOrder.Uint16(req.Data[:]))
if err != nil {
return 0, err
}
@@ -119,7 +120,7 @@ func (n *netTunFileOperations) Ioctl(ctx context.Context, file *fs.File, io user
case linux.TUNGETIFF:
var req linux.IFReq
copy(req.IFName[:], n.device.Name())
- usermem.ByteOrder.PutUint16(req.Data[:], netstack.TUNFlagsToLinux(n.device.Flags()))
+ hostarch.ByteOrder.PutUint16(req.Data[:], netstack.TUNFlagsToLinux(n.device.Flags()))
_, err := req.CopyOut(t, data)
return 0, err
diff --git a/pkg/sentry/fs/fsutil/dirty_set.go b/pkg/sentry/fs/fsutil/dirty_set.go
index 2c9446c1d..38383e730 100644
--- a/pkg/sentry/fs/fsutil/dirty_set.go
+++ b/pkg/sentry/fs/fsutil/dirty_set.go
@@ -18,9 +18,9 @@ import (
"math"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/safemem"
"gvisor.dev/gvisor/pkg/sentry/memmap"
- "gvisor.dev/gvisor/pkg/usermem"
)
// DirtySet maps offsets into a memmap.Mappable to DirtyInfo. It is used to
@@ -215,7 +215,7 @@ func syncDirtyRange(ctx context.Context, mr memmap.MappableRange, cache *FileRan
if max < wbr.Start {
break
}
- ims, err := mem.MapInternal(cseg.FileRangeOf(wbr), usermem.Read)
+ ims, err := mem.MapInternal(cseg.FileRangeOf(wbr), hostarch.Read)
if err != nil {
return err
}
diff --git a/pkg/sentry/fs/fsutil/file_range_set.go b/pkg/sentry/fs/fsutil/file_range_set.go
index 1dc409d38..fdaceb1db 100644
--- a/pkg/sentry/fs/fsutil/file_range_set.go
+++ b/pkg/sentry/fs/fsutil/file_range_set.go
@@ -20,11 +20,11 @@ import (
"math"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/safemem"
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sentry/pgalloc"
"gvisor.dev/gvisor/pkg/sentry/usage"
- "gvisor.dev/gvisor/pkg/usermem"
)
// FileRangeSet maps offsets into a memmap.Mappable to offsets into a
@@ -130,7 +130,7 @@ func (frs *FileRangeSet) Fill(ctx context.Context, required, optional memmap.Map
// MemoryFile.AllocateAndFill truncates down to a page
// boundary, but FileRangeSet.Fill is supposed to
// zero-fill to the end of the page in this case.
- donepgaddr, ok := usermem.Addr(done).RoundUp()
+ donepgaddr, ok := hostarch.Addr(done).RoundUp()
if donepg := uint64(donepgaddr); ok && donepg != done {
dsts.DropFirst64(donepg - done)
done = donepg
@@ -184,7 +184,7 @@ func (frs *FileRangeSet) DropAll(mf *pgalloc.MemoryFile) {
// bytes after the new EOF on the same page are zeroed, and pages after the new
// EOF are freed.
func (frs *FileRangeSet) Truncate(end uint64, mf *pgalloc.MemoryFile) {
- pgendaddr, ok := usermem.Addr(end).RoundUp()
+ pgendaddr, ok := hostarch.Addr(end).RoundUp()
if ok {
pgend := uint64(pgendaddr)
@@ -208,7 +208,7 @@ func (frs *FileRangeSet) Truncate(end uint64, mf *pgalloc.MemoryFile) {
if seg.Ok() {
fr := seg.FileRange()
fr.Start += end - seg.Start()
- ims, err := mf.MapInternal(fr, usermem.Write)
+ ims, err := mf.MapInternal(fr, hostarch.Write)
if err != nil {
// There's no good recourse from here. This means
// that we can't keep cached memory consistent with
diff --git a/pkg/sentry/fs/fsutil/host_file_mapper.go b/pkg/sentry/fs/fsutil/host_file_mapper.go
index 54f7b7cdc..23528bf25 100644
--- a/pkg/sentry/fs/fsutil/host_file_mapper.go
+++ b/pkg/sentry/fs/fsutil/host_file_mapper.go
@@ -18,11 +18,11 @@ import (
"fmt"
"golang.org/x/sys/unix"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/safemem"
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/usermem"
)
// HostFileMapper caches mappings of an arbitrary host file descriptor. It is
@@ -50,13 +50,13 @@ type HostFileMapper struct {
}
const (
- chunkShift = usermem.HugePageShift
+ chunkShift = hostarch.HugePageShift
chunkSize = 1 << chunkShift
chunkMask = chunkSize - 1
)
func pagesInChunk(mr memmap.MappableRange, chunkStart uint64) int32 {
- return int32(mr.Intersect(memmap.MappableRange{chunkStart, chunkStart + chunkSize}).Length() / usermem.PageSize)
+ return int32(mr.Intersect(memmap.MappableRange{chunkStart, chunkStart + chunkSize}).Length() / hostarch.PageSize)
}
type mapping struct {
diff --git a/pkg/sentry/fs/fsutil/host_mappable.go b/pkg/sentry/fs/fsutil/host_mappable.go
index c15d8a946..e1e38b498 100644
--- a/pkg/sentry/fs/fsutil/host_mappable.go
+++ b/pkg/sentry/fs/fsutil/host_mappable.go
@@ -18,6 +18,7 @@ import (
"math"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/safemem"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/memmap"
@@ -59,7 +60,7 @@ func NewHostMappable(backingFile CachedFileObject) *HostMappable {
}
// AddMapping implements memmap.Mappable.AddMapping.
-func (h *HostMappable) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) error {
+func (h *HostMappable) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar hostarch.AddrRange, offset uint64, writable bool) error {
// Hot path. Avoid defers.
h.mu.Lock()
mapped := h.mappings.AddMapping(ms, ar, offset, writable)
@@ -71,7 +72,7 @@ func (h *HostMappable) AddMapping(ctx context.Context, ms memmap.MappingSpace, a
}
// RemoveMapping implements memmap.Mappable.RemoveMapping.
-func (h *HostMappable) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) {
+func (h *HostMappable) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar hostarch.AddrRange, offset uint64, writable bool) {
// Hot path. Avoid defers.
h.mu.Lock()
unmapped := h.mappings.RemoveMapping(ms, ar, offset, writable)
@@ -82,18 +83,18 @@ func (h *HostMappable) RemoveMapping(ctx context.Context, ms memmap.MappingSpace
}
// CopyMapping implements memmap.Mappable.CopyMapping.
-func (h *HostMappable) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR usermem.AddrRange, offset uint64, writable bool) error {
+func (h *HostMappable) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR hostarch.AddrRange, offset uint64, writable bool) error {
return h.AddMapping(ctx, ms, dstAR, offset, writable)
}
// Translate implements memmap.Mappable.Translate.
-func (h *HostMappable) Translate(ctx context.Context, required, optional memmap.MappableRange, at usermem.AccessType) ([]memmap.Translation, error) {
+func (h *HostMappable) Translate(ctx context.Context, required, optional memmap.MappableRange, at hostarch.AccessType) ([]memmap.Translation, error) {
return []memmap.Translation{
{
Source: optional,
File: h,
Offset: optional.Start,
- Perms: usermem.AnyAccess,
+ Perms: hostarch.AnyAccess,
},
}, nil
}
@@ -124,7 +125,7 @@ func (h *HostMappable) NotifyChangeFD() error {
}
// MapInternal implements memmap.File.MapInternal.
-func (h *HostMappable) MapInternal(fr memmap.FileRange, at usermem.AccessType) (safemem.BlockSeq, error) {
+func (h *HostMappable) MapInternal(fr memmap.FileRange, at hostarch.AccessType) (safemem.BlockSeq, error) {
return h.hostFileMapper.MapInternal(fr, h.backingFile.FD(), at.Write)
}
diff --git a/pkg/sentry/fs/fsutil/inode_cached.go b/pkg/sentry/fs/fsutil/inode_cached.go
index 0ed7aafa5..7856b354b 100644
--- a/pkg/sentry/fs/fsutil/inode_cached.go
+++ b/pkg/sentry/fs/fsutil/inode_cached.go
@@ -19,6 +19,7 @@ import (
"io"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/safemem"
"gvisor.dev/gvisor/pkg/sentry/fs"
@@ -622,7 +623,7 @@ func (rw *inodeReadWriter) ReadToBlocks(dsts safemem.BlockSeq) (uint64, error) {
switch {
case seg.Ok():
// Get internal mappings from the cache.
- ims, err := mem.MapInternal(seg.FileRangeOf(seg.Range().Intersect(mr)), usermem.Read)
+ ims, err := mem.MapInternal(seg.FileRangeOf(seg.Range().Intersect(mr)), hostarch.Read)
if err != nil {
unlock()
return done, err
@@ -647,7 +648,7 @@ func (rw *inodeReadWriter) ReadToBlocks(dsts safemem.BlockSeq) (uint64, error) {
// Read into the cache, then re-enter the loop to read from the
// cache.
reqMR := memmap.MappableRange{
- Start: uint64(usermem.Addr(gapMR.Start).RoundDown()),
+ Start: uint64(hostarch.Addr(gapMR.Start).RoundDown()),
End: fs.OffsetPageEnd(int64(gapMR.End)),
}
optMR := gap.Range()
@@ -729,7 +730,7 @@ func (rw *inodeReadWriter) WriteFromBlocks(srcs safemem.BlockSeq) (uint64, error
case seg.Ok() && seg.Start() < mr.End:
// Get internal mappings from the cache.
segMR := seg.Range().Intersect(mr)
- ims, err := mf.MapInternal(seg.FileRangeOf(segMR), usermem.Write)
+ ims, err := mf.MapInternal(seg.FileRangeOf(segMR), hostarch.Write)
if err != nil {
rw.maybeGrowFile()
rw.c.dataMu.Unlock()
@@ -786,7 +787,7 @@ func (c *CachingInodeOperations) useHostPageCache() bool {
}
// AddMapping implements memmap.Mappable.AddMapping.
-func (c *CachingInodeOperations) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) error {
+func (c *CachingInodeOperations) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar hostarch.AddrRange, offset uint64, writable bool) error {
// Hot path. Avoid defers.
c.mapsMu.Lock()
mapped := c.mappings.AddMapping(ms, ar, offset, writable)
@@ -808,7 +809,7 @@ func (c *CachingInodeOperations) AddMapping(ctx context.Context, ms memmap.Mappi
}
// RemoveMapping implements memmap.Mappable.RemoveMapping.
-func (c *CachingInodeOperations) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) {
+func (c *CachingInodeOperations) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar hostarch.AddrRange, offset uint64, writable bool) {
// Hot path. Avoid defers.
c.mapsMu.Lock()
unmapped := c.mappings.RemoveMapping(ms, ar, offset, writable)
@@ -836,12 +837,12 @@ func (c *CachingInodeOperations) RemoveMapping(ctx context.Context, ms memmap.Ma
}
// CopyMapping implements memmap.Mappable.CopyMapping.
-func (c *CachingInodeOperations) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR usermem.AddrRange, offset uint64, writable bool) error {
+func (c *CachingInodeOperations) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR hostarch.AddrRange, offset uint64, writable bool) error {
return c.AddMapping(ctx, ms, dstAR, offset, writable)
}
// Translate implements memmap.Mappable.Translate.
-func (c *CachingInodeOperations) Translate(ctx context.Context, required, optional memmap.MappableRange, at usermem.AccessType) ([]memmap.Translation, error) {
+func (c *CachingInodeOperations) Translate(ctx context.Context, required, optional memmap.MappableRange, at hostarch.AccessType) ([]memmap.Translation, error) {
// Hot path. Avoid defer.
if c.useHostPageCache() {
mr := optional
@@ -853,7 +854,7 @@ func (c *CachingInodeOperations) Translate(ctx context.Context, required, option
Source: mr,
File: c,
Offset: mr.Start,
- Perms: usermem.AnyAccess,
+ Perms: hostarch.AnyAccess,
},
}, nil
}
@@ -885,7 +886,7 @@ func (c *CachingInodeOperations) Translate(ctx context.Context, required, option
segMR := seg.Range().Intersect(optional)
// TODO(jamieliu): Make Translations writable even if writability is
// not required if already kept-dirty by another writable translation.
- perms := usermem.AccessType{
+ perms := hostarch.AccessType{
Read: true,
Execute: true,
}
@@ -1050,7 +1051,7 @@ func (c *CachingInodeOperations) DecRef(fr memmap.FileRange) {
// MapInternal implements memmap.File.MapInternal. This is used when we
// directly map an underlying host fd and CachingInodeOperations is used as the
// memmap.File during translation.
-func (c *CachingInodeOperations) MapInternal(fr memmap.FileRange, at usermem.AccessType) (safemem.BlockSeq, error) {
+func (c *CachingInodeOperations) MapInternal(fr memmap.FileRange, at hostarch.AccessType) (safemem.BlockSeq, error) {
return c.hostFileMapper.MapInternal(fr, c.backingFile.FD(), at.Write)
}
diff --git a/pkg/sentry/fs/gofer/attr.go b/pkg/sentry/fs/gofer/attr.go
index cffc756cc..d6bff3f40 100644
--- a/pkg/sentry/fs/gofer/attr.go
+++ b/pkg/sentry/fs/gofer/attr.go
@@ -17,11 +17,11 @@ package gofer
import (
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/p9"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time"
- "gvisor.dev/gvisor/pkg/usermem"
)
// getattr returns the 9p attributes of the p9.File. On success, Mode, Size, and RDev
@@ -98,7 +98,7 @@ func bsize(pattr p9.Attr) int64 {
// Some files, particularly those that are not on a local file system,
// may have no clue of their block size. Better not to report something
// misleading or buggy and have a safe default.
- return usermem.PageSize
+ return hostarch.PageSize
}
// ntype returns an fs.InodeType from 9p attributes.
diff --git a/pkg/sentry/fs/inotify.go b/pkg/sentry/fs/inotify.go
index fb81d903d..1b83643db 100644
--- a/pkg/sentry/fs/inotify.go
+++ b/pkg/sentry/fs/inotify.go
@@ -20,6 +20,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sentry/uniqueid"
@@ -216,7 +217,7 @@ func (i *Inotify) Ioctl(ctx context.Context, _ *File, io usermem.IO, args arch.S
n += uint32(e.sizeOf())
}
var buf [4]byte
- usermem.ByteOrder.PutUint32(buf[:], n)
+ hostarch.ByteOrder.PutUint32(buf[:], n)
_, err := io.CopyOut(ctx, args[2].Pointer(), buf[:], usermem.IOOpts{})
return 0, err
diff --git a/pkg/sentry/fs/inotify_event.go b/pkg/sentry/fs/inotify_event.go
index 686e1b1cd..399aff1ed 100644
--- a/pkg/sentry/fs/inotify_event.go
+++ b/pkg/sentry/fs/inotify_event.go
@@ -19,6 +19,7 @@ import (
"fmt"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/usermem"
)
@@ -100,10 +101,10 @@ func (e *Event) sizeOf() int {
// construct the output. We use a buffer allocated ahead of time for
// performance. buf must be at least inotifyEventBaseSize bytes.
func (e *Event) CopyTo(ctx context.Context, buf []byte, dst usermem.IOSequence) (int64, error) {
- usermem.ByteOrder.PutUint32(buf[0:], uint32(e.wd))
- usermem.ByteOrder.PutUint32(buf[4:], e.mask)
- usermem.ByteOrder.PutUint32(buf[8:], e.cookie)
- usermem.ByteOrder.PutUint32(buf[12:], e.len)
+ hostarch.ByteOrder.PutUint32(buf[0:], uint32(e.wd))
+ hostarch.ByteOrder.PutUint32(buf[4:], e.mask)
+ hostarch.ByteOrder.PutUint32(buf[8:], e.cookie)
+ hostarch.ByteOrder.PutUint32(buf[12:], e.len)
writeLen := 0
diff --git a/pkg/sentry/fs/offset.go b/pkg/sentry/fs/offset.go
index 53b5df175..3a8c97d8f 100644
--- a/pkg/sentry/fs/offset.go
+++ b/pkg/sentry/fs/offset.go
@@ -17,14 +17,14 @@ package fs
import (
"math"
- "gvisor.dev/gvisor/pkg/usermem"
+ "gvisor.dev/gvisor/pkg/hostarch"
)
// OffsetPageEnd returns the file offset rounded up to the nearest
// page boundary. OffsetPageEnd panics if rounding up causes overflow,
// which shouldn't be possible given that offset is an int64.
func OffsetPageEnd(offset int64) uint64 {
- end, ok := usermem.Addr(offset).RoundUp()
+ end, ok := hostarch.Addr(offset).RoundUp()
if !ok {
panic("impossible overflow")
}
diff --git a/pkg/sentry/fs/overlay.go b/pkg/sentry/fs/overlay.go
index 01a1235b8..f96f5a3e5 100644
--- a/pkg/sentry/fs/overlay.go
+++ b/pkg/sentry/fs/overlay.go
@@ -19,11 +19,11 @@ import (
"strings"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sync"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
)
// The virtual filesystem implements an overlay configuration. For a high-level
@@ -274,7 +274,7 @@ func (o *overlayEntry) markDirectoryDirty() {
}
// AddMapping implements memmap.Mappable.AddMapping.
-func (o *overlayEntry) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) error {
+func (o *overlayEntry) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar hostarch.AddrRange, offset uint64, writable bool) error {
o.mapsMu.Lock()
defer o.mapsMu.Unlock()
if err := o.inodeLocked().Mappable().AddMapping(ctx, ms, ar, offset, writable); err != nil {
@@ -285,7 +285,7 @@ func (o *overlayEntry) AddMapping(ctx context.Context, ms memmap.MappingSpace, a
}
// RemoveMapping implements memmap.Mappable.RemoveMapping.
-func (o *overlayEntry) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) {
+func (o *overlayEntry) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar hostarch.AddrRange, offset uint64, writable bool) {
o.mapsMu.Lock()
defer o.mapsMu.Unlock()
o.inodeLocked().Mappable().RemoveMapping(ctx, ms, ar, offset, writable)
@@ -293,7 +293,7 @@ func (o *overlayEntry) RemoveMapping(ctx context.Context, ms memmap.MappingSpace
}
// CopyMapping implements memmap.Mappable.CopyMapping.
-func (o *overlayEntry) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR usermem.AddrRange, offset uint64, writable bool) error {
+func (o *overlayEntry) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR hostarch.AddrRange, offset uint64, writable bool) error {
o.mapsMu.Lock()
defer o.mapsMu.Unlock()
if err := o.inodeLocked().Mappable().CopyMapping(ctx, ms, srcAR, dstAR, offset, writable); err != nil {
@@ -304,7 +304,7 @@ func (o *overlayEntry) CopyMapping(ctx context.Context, ms memmap.MappingSpace,
}
// Translate implements memmap.Mappable.Translate.
-func (o *overlayEntry) Translate(ctx context.Context, required, optional memmap.MappableRange, at usermem.AccessType) ([]memmap.Translation, error) {
+func (o *overlayEntry) Translate(ctx context.Context, required, optional memmap.MappableRange, at hostarch.AccessType) ([]memmap.Translation, error) {
o.dataMu.RLock()
defer o.dataMu.RUnlock()
return o.inodeLocked().Mappable().Translate(ctx, required, optional, at)
diff --git a/pkg/sentry/fs/proc/exec_args.go b/pkg/sentry/fs/proc/exec_args.go
index e6171dd1d..24426b225 100644
--- a/pkg/sentry/fs/proc/exec_args.go
+++ b/pkg/sentry/fs/proc/exec_args.go
@@ -21,6 +21,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/fs/fsutil"
"gvisor.dev/gvisor/pkg/sentry/kernel"
@@ -113,7 +114,7 @@ func (f *execArgFile) Read(ctx context.Context, _ *fs.File, dst usermem.IOSequen
defer m.DecUsers(ctx)
// Figure out the bounds of the exec arg we are trying to read.
- var execArgStart, execArgEnd usermem.Addr
+ var execArgStart, execArgEnd hostarch.Addr
switch f.arg {
case cmdlineExecArg:
execArgStart, execArgEnd = m.ArgvStart(), m.ArgvEnd()
@@ -172,8 +173,8 @@ func (f *execArgFile) Read(ctx context.Context, _ *fs.File, dst usermem.IOSequen
// https://elixir.bootlin.com/linux/v4.20/source/fs/proc/base.c#L208
// we'll return one page total between argv and envp because of the
// above page restrictions.
- if lengthEnvv > usermem.PageSize-len(buf) {
- lengthEnvv = usermem.PageSize - len(buf)
+ if lengthEnvv > hostarch.PageSize-len(buf) {
+ lengthEnvv = hostarch.PageSize - len(buf)
}
// Make a new buffer to fit the whole thing
tmp := make([]byte, length+lengthEnvv)
diff --git a/pkg/sentry/fs/proc/inode.go b/pkg/sentry/fs/proc/inode.go
index d2859a4c2..78132f7a5 100644
--- a/pkg/sentry/fs/proc/inode.go
+++ b/pkg/sentry/fs/proc/inode.go
@@ -17,13 +17,13 @@ package proc
import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/fs/fsutil"
"gvisor.dev/gvisor/pkg/sentry/fs/proc/device"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/mm"
- "gvisor.dev/gvisor/pkg/usermem"
)
// LINT.IfChange
@@ -125,7 +125,7 @@ func newProcInode(ctx context.Context, iops fs.InodeOperations, msrc *fs.MountSo
sattr := fs.StableAttr{
DeviceID: device.ProcDevice.DeviceID(),
InodeID: device.ProcDevice.NextIno(),
- BlockSize: usermem.PageSize,
+ BlockSize: hostarch.PageSize,
Type: typ,
}
if t != nil {
diff --git a/pkg/sentry/fs/proc/meminfo.go b/pkg/sentry/fs/proc/meminfo.go
index 91617267d..7d975d333 100644
--- a/pkg/sentry/fs/proc/meminfo.go
+++ b/pkg/sentry/fs/proc/meminfo.go
@@ -19,10 +19,10 @@ import (
"fmt"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/fs/proc/seqfile"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/usage"
- "gvisor.dev/gvisor/pkg/usermem"
)
// LINT.IfChange
@@ -53,7 +53,7 @@ func (d *meminfoData) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle)
anon := snapshot.Anonymous + snapshot.Tmpfs
file := snapshot.PageCache + snapshot.Mapped
// We don't actually have active/inactive LRUs, so just make up numbers.
- activeFile := (file / 2) &^ (usermem.PageSize - 1)
+ activeFile := (file / 2) &^ (hostarch.PageSize - 1)
inactiveFile := file - activeFile
var buf bytes.Buffer
diff --git a/pkg/sentry/fs/proc/net.go b/pkg/sentry/fs/proc/net.go
index 203cfa061..91c35eea9 100644
--- a/pkg/sentry/fs/proc/net.go
+++ b/pkg/sentry/fs/proc/net.go
@@ -23,6 +23,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/fs/proc/seqfile"
@@ -35,7 +36,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/socket/unix/transport"
"gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/pkg/usermem"
)
// LINT.IfChange
@@ -367,10 +367,10 @@ func (n *netRoute) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([]
)
if len(rt.GatewayAddr) == header.IPv4AddressSize {
flags |= linux.RTF_GATEWAY
- gw = usermem.ByteOrder.Uint32(rt.GatewayAddr)
+ gw = hostarch.ByteOrder.Uint32(rt.GatewayAddr)
}
if len(rt.DstAddr) == header.IPv4AddressSize {
- prefix = usermem.ByteOrder.Uint32(rt.DstAddr)
+ prefix = hostarch.ByteOrder.Uint32(rt.DstAddr)
}
l := fmt.Sprintf(
"%s\t%08X\t%08X\t%04X\t%d\t%d\t%d\t%08X\t%d\t%d\t%d",
@@ -520,7 +520,7 @@ func networkToHost16(n uint16) uint16 {
// binary.BigEndian.Uint16() require a read of binary.BigEndian and an
// interface method call, defeating inlining.
buf := [2]byte{byte(n >> 8 & 0xff), byte(n & 0xff)}
- return usermem.ByteOrder.Uint16(buf[:])
+ return hostarch.ByteOrder.Uint16(buf[:])
}
func writeInetAddr(w io.Writer, family int, i linux.SockAddr) {
@@ -542,14 +542,14 @@ func writeInetAddr(w io.Writer, family int, i linux.SockAddr) {
// __be32 which is a typedef for an unsigned int, and is printed with
// %X. This means that for a little-endian machine, Linux prints the
// least-significant byte of the address first. To emulate this, we first
- // invert the byte order for the address using usermem.ByteOrder.Uint32,
+ // invert the byte order for the address using hostarch.ByteOrder.Uint32,
// which makes it have the equivalent encoding to a __be32 on a little
// endian machine. Note that this operation is a no-op on a big endian
// machine. Then similar to Linux, we format it with %X, which will print
// the most-significant byte of the __be32 address first, which is now
// actually the least-significant byte of the original address in
// linux.SockAddrInet.Addr on little endian machines, due to the conversion.
- addr := usermem.ByteOrder.Uint32(a.Addr[:])
+ addr := hostarch.ByteOrder.Uint32(a.Addr[:])
fmt.Fprintf(w, "%08X:%04X ", addr, port)
case linux.AF_INET6:
@@ -559,10 +559,10 @@ func writeInetAddr(w io.Writer, family int, i linux.SockAddr) {
}
port := networkToHost16(a.Port)
- addr0 := usermem.ByteOrder.Uint32(a.Addr[0:4])
- addr1 := usermem.ByteOrder.Uint32(a.Addr[4:8])
- addr2 := usermem.ByteOrder.Uint32(a.Addr[8:12])
- addr3 := usermem.ByteOrder.Uint32(a.Addr[12:16])
+ addr0 := hostarch.ByteOrder.Uint32(a.Addr[0:4])
+ addr1 := hostarch.ByteOrder.Uint32(a.Addr[4:8])
+ addr2 := hostarch.ByteOrder.Uint32(a.Addr[8:12])
+ addr3 := hostarch.ByteOrder.Uint32(a.Addr[12:16])
fmt.Fprintf(w, "%08X%08X%08X%08X:%04X ", addr0, addr1, addr2, addr3, port)
}
}
diff --git a/pkg/sentry/fs/proc/seqfile/seqfile.go b/pkg/sentry/fs/proc/seqfile/seqfile.go
index 6121f0e95..b01688b1d 100644
--- a/pkg/sentry/fs/proc/seqfile/seqfile.go
+++ b/pkg/sentry/fs/proc/seqfile/seqfile.go
@@ -20,6 +20,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/fs/fsutil"
"gvisor.dev/gvisor/pkg/sentry/fs/proc/device"
@@ -131,7 +132,7 @@ func NewSeqFileInode(ctx context.Context, source SeqSource, msrc *fs.MountSource
sattr := fs.StableAttr{
DeviceID: device.ProcDevice.DeviceID(),
InodeID: device.ProcDevice.NextIno(),
- BlockSize: usermem.PageSize,
+ BlockSize: hostarch.PageSize,
Type: fs.SpecialFile,
}
return fs.NewInode(ctx, iops, msrc, sattr)
diff --git a/pkg/sentry/fs/proc/sys_net.go b/pkg/sentry/fs/proc/sys_net.go
index bbe282c03..1d09afdd7 100644
--- a/pkg/sentry/fs/proc/sys_net.go
+++ b/pkg/sentry/fs/proc/sys_net.go
@@ -21,6 +21,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/fs/fsutil"
"gvisor.dev/gvisor/pkg/sentry/fs/proc/device"
@@ -76,7 +77,7 @@ func newTCPMemInode(ctx context.Context, msrc *fs.MountSource, s inet.Stack, dir
sattr := fs.StableAttr{
DeviceID: device.ProcDevice.DeviceID(),
InodeID: device.ProcDevice.NextIno(),
- BlockSize: usermem.PageSize,
+ BlockSize: hostarch.PageSize,
Type: fs.SpecialFile,
}
return fs.NewInode(ctx, tm, msrc, sattr)
@@ -136,7 +137,7 @@ func (f *tcpMemFile) Write(ctx context.Context, _ *fs.File, src usermem.IOSequen
f.tcpMemInode.mu.Lock()
defer f.tcpMemInode.mu.Unlock()
- src = src.TakeFirst(usermem.PageSize - 1)
+ src = src.TakeFirst(hostarch.PageSize - 1)
size, err := readSize(f.tcpMemInode.dir, f.tcpMemInode.s)
if err != nil {
return 0, err
@@ -192,7 +193,7 @@ func newTCPSackInode(ctx context.Context, msrc *fs.MountSource, s inet.Stack) *f
sattr := fs.StableAttr{
DeviceID: device.ProcDevice.DeviceID(),
InodeID: device.ProcDevice.NextIno(),
- BlockSize: usermem.PageSize,
+ BlockSize: hostarch.PageSize,
Type: fs.SpecialFile,
}
return fs.NewInode(ctx, ts, msrc, sattr)
@@ -264,7 +265,7 @@ func (f *tcpSackFile) Write(ctx context.Context, _ *fs.File, src usermem.IOSeque
// Only consider size of one memory page for input for performance reasons.
// We are only reading if it's zero or not anyway.
- src = src.TakeFirst(usermem.PageSize - 1)
+ src = src.TakeFirst(hostarch.PageSize - 1)
var v int32
n, err := usermem.CopyInt32StringInVec(ctx, src.IO, src.Addrs, &v, src.Opts)
@@ -294,7 +295,7 @@ func newTCPRecoveryInode(ctx context.Context, msrc *fs.MountSource, s inet.Stack
sattr := fs.StableAttr{
DeviceID: device.ProcDevice.DeviceID(),
InodeID: device.ProcDevice.NextIno(),
- BlockSize: usermem.PageSize,
+ BlockSize: hostarch.PageSize,
Type: fs.SpecialFile,
}
return fs.NewInode(ctx, ts, msrc, sattr)
@@ -354,7 +355,7 @@ func (f *tcpRecoveryFile) Write(ctx context.Context, _ *fs.File, src usermem.IOS
if src.NumBytes() == 0 {
return 0, nil
}
- src = src.TakeFirst(usermem.PageSize - 1)
+ src = src.TakeFirst(hostarch.PageSize - 1)
var v int32
n, err := usermem.CopyInt32StringInVec(ctx, src.IO, src.Addrs, &v, src.Opts)
@@ -413,7 +414,7 @@ func newIPForwardingInode(ctx context.Context, msrc *fs.MountSource, s inet.Stac
sattr := fs.StableAttr{
DeviceID: device.ProcDevice.DeviceID(),
InodeID: device.ProcDevice.NextIno(),
- BlockSize: usermem.PageSize,
+ BlockSize: hostarch.PageSize,
Type: fs.SpecialFile,
}
return fs.NewInode(ctx, ipf, msrc, sattr)
@@ -486,7 +487,7 @@ func (f *ipForwardingFile) Write(ctx context.Context, _ *fs.File, src usermem.IO
// Only consider size of one memory page for input for performance reasons.
// We are only reading if it's zero or not anyway.
- src = src.TakeFirst(usermem.PageSize - 1)
+ src = src.TakeFirst(hostarch.PageSize - 1)
var v int32
n, err := usermem.CopyInt32StringInVec(ctx, src.IO, src.Addrs, &v, src.Opts)
@@ -524,7 +525,7 @@ func newPortRangeInode(ctx context.Context, msrc *fs.MountSource, s inet.Stack)
sattr := fs.StableAttr{
DeviceID: device.ProcDevice.DeviceID(),
InodeID: device.ProcDevice.NextIno(),
- BlockSize: usermem.PageSize,
+ BlockSize: hostarch.PageSize,
Type: fs.SpecialFile,
}
return fs.NewInode(ctx, ipf, msrc, sattr)
@@ -589,7 +590,7 @@ func (pf *portRangeFile) Write(ctx context.Context, _ *fs.File, src usermem.IOSe
// Only consider size of one memory page for input for performance
// reasons.
- src = src.TakeFirst(usermem.PageSize - 1)
+ src = src.TakeFirst(hostarch.PageSize - 1)
ports := make([]int32, 2)
n, err := usermem.CopyInt32StringsInVec(ctx, src.IO, src.Addrs, ports, src.Opts)
diff --git a/pkg/sentry/fs/proc/task.go b/pkg/sentry/fs/proc/task.go
index f43d6c221..ae5ed25f9 100644
--- a/pkg/sentry/fs/proc/task.go
+++ b/pkg/sentry/fs/proc/task.go
@@ -23,6 +23,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/fs/fsutil"
"gvisor.dev/gvisor/pkg/sentry/fs/proc/device"
@@ -469,7 +470,7 @@ func (m *memDataFile) Read(ctx context.Context, _ *fs.File, dst usermem.IOSequen
defer mm.DecUsers(ctx)
// Buffer the read data because of MM locks
buf := make([]byte, dst.NumBytes())
- n, readErr := mm.CopyIn(ctx, usermem.Addr(offset), buf, usermem.IOOpts{IgnorePermissions: true})
+ n, readErr := mm.CopyIn(ctx, hostarch.Addr(offset), buf, usermem.IOOpts{IgnorePermissions: true})
if n > 0 {
if _, err := dst.CopyOut(ctx, buf[:n]); err != nil {
return 0, syserror.EFAULT
@@ -632,7 +633,7 @@ func (s *taskStatData) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle)
rss = mm.ResidentSetSize()
}
})
- fmt.Fprintf(&buf, "%d %d ", vss, rss/usermem.PageSize)
+ fmt.Fprintf(&buf, "%d %d ", vss, rss/hostarch.PageSize)
// rsslim.
fmt.Fprintf(&buf, "%d ", s.t.ThreadGroup().Limits().Get(limits.Rss).Cur)
@@ -684,7 +685,7 @@ func (s *statmData) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([
})
var buf bytes.Buffer
- fmt.Fprintf(&buf, "%d %d 0 0 0 0 0\n", vss/usermem.PageSize, rss/usermem.PageSize)
+ fmt.Fprintf(&buf, "%d %d 0 0 0 0 0\n", vss/hostarch.PageSize, rss/hostarch.PageSize)
return []seqfile.SeqData{{Buf: buf.Bytes(), Handle: (*statmData)(nil)}}, 0
}
@@ -939,8 +940,8 @@ func (f *auxvecFile) Read(ctx context.Context, _ *fs.File, dst usermem.IOSequenc
buf := make([]byte, size)
for i, e := range auxv {
- usermem.ByteOrder.PutUint64(buf[16*i:], e.Key)
- usermem.ByteOrder.PutUint64(buf[16*i+8:], uint64(e.Value))
+ hostarch.ByteOrder.PutUint64(buf[16*i:], e.Key)
+ hostarch.ByteOrder.PutUint64(buf[16*i+8:], uint64(e.Value))
}
n, err := dst.CopyOut(ctx, buf[offset:])
@@ -1020,7 +1021,7 @@ func (f *oomScoreAdjFile) Write(ctx context.Context, _ *fs.File, src usermem.IOS
}
// Limit input size so as not to impact performance if input size is large.
- src = src.TakeFirst(usermem.PageSize - 1)
+ src = src.TakeFirst(hostarch.PageSize - 1)
var v int32
n, err := usermem.CopyInt32StringInVec(ctx, src.IO, src.Addrs, &v, src.Opts)
diff --git a/pkg/sentry/fs/proc/uid_gid_map.go b/pkg/sentry/fs/proc/uid_gid_map.go
index 2bc9485d8..30d5ad4cf 100644
--- a/pkg/sentry/fs/proc/uid_gid_map.go
+++ b/pkg/sentry/fs/proc/uid_gid_map.go
@@ -21,6 +21,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/fs/fsutil"
"gvisor.dev/gvisor/pkg/sentry/kernel"
@@ -132,7 +133,7 @@ func (imfo *idMapFileOperations) Write(ctx context.Context, file *fs.File, src u
// the system page size, and the write must be performed at the start of
// the file ..." - user_namespaces(7)
srclen := src.NumBytes()
- if srclen >= usermem.PageSize || offset != 0 {
+ if srclen >= hostarch.PageSize || offset != 0 {
return 0, syserror.EINVAL
}
b := make([]byte, srclen)
diff --git a/pkg/sentry/fs/ramfs/tree.go b/pkg/sentry/fs/ramfs/tree.go
index dfc9d3453..0ace636c9 100644
--- a/pkg/sentry/fs/ramfs/tree.go
+++ b/pkg/sentry/fs/ramfs/tree.go
@@ -20,9 +20,9 @@ import (
"strings"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/fs/anon"
- "gvisor.dev/gvisor/pkg/usermem"
)
// MakeDirectoryTree constructs a ramfs tree of all directories containing
@@ -71,7 +71,7 @@ func emptyDir(ctx context.Context, msrc *fs.MountSource) *fs.Inode {
return fs.NewInode(ctx, dir, msrc, fs.StableAttr{
DeviceID: anon.PseudoDevice.DeviceID(),
InodeID: anon.PseudoDevice.NextIno(),
- BlockSize: usermem.PageSize,
+ BlockSize: hostarch.PageSize,
Type: fs.Directory,
})
}
diff --git a/pkg/sentry/fs/sys/sys.go b/pkg/sentry/fs/sys/sys.go
index 0891645e4..101779a7a 100644
--- a/pkg/sentry/fs/sys/sys.go
+++ b/pkg/sentry/fs/sys/sys.go
@@ -17,16 +17,16 @@ package sys
import (
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/fs/ramfs"
- "gvisor.dev/gvisor/pkg/usermem"
)
func newFile(ctx context.Context, node fs.InodeOperations, msrc *fs.MountSource) *fs.Inode {
sattr := fs.StableAttr{
DeviceID: sysfsDevice.DeviceID(),
InodeID: sysfsDevice.NextIno(),
- BlockSize: usermem.PageSize,
+ BlockSize: hostarch.PageSize,
Type: fs.SpecialFile,
}
return fs.NewInode(ctx, node, msrc, sattr)
@@ -37,7 +37,7 @@ func newDir(ctx context.Context, msrc *fs.MountSource, contents map[string]*fs.I
return fs.NewInode(ctx, d, msrc, fs.StableAttr{
DeviceID: sysfsDevice.DeviceID(),
InodeID: sysfsDevice.NextIno(),
- BlockSize: usermem.PageSize,
+ BlockSize: hostarch.PageSize,
Type: fs.SpecialDirectory,
})
}
diff --git a/pkg/sentry/fs/timerfd/timerfd.go b/pkg/sentry/fs/timerfd/timerfd.go
index 46511a6ac..c8ebe256c 100644
--- a/pkg/sentry/fs/timerfd/timerfd.go
+++ b/pkg/sentry/fs/timerfd/timerfd.go
@@ -20,6 +20,7 @@ import (
"sync/atomic"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/fs/anon"
"gvisor.dev/gvisor/pkg/sentry/fs/fsutil"
@@ -124,7 +125,7 @@ func (t *TimerOperations) Read(ctx context.Context, file *fs.File, dst usermem.I
}
if val := atomic.SwapUint64(&t.val, 0); val != 0 {
var buf [sizeofUint64]byte
- usermem.ByteOrder.PutUint64(buf[:], val)
+ hostarch.ByteOrder.PutUint64(buf[:], val)
if _, err := dst.CopyOut(ctx, buf[:]); err != nil {
// Linux does not undo consuming the number of expirations even if
// writing to userspace fails.
diff --git a/pkg/sentry/fs/tmpfs/inode_file.go b/pkg/sentry/fs/tmpfs/inode_file.go
index ad4aea282..f4de8c968 100644
--- a/pkg/sentry/fs/tmpfs/inode_file.go
+++ b/pkg/sentry/fs/tmpfs/inode_file.go
@@ -21,6 +21,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/safemem"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/fs/fsutil"
@@ -125,7 +126,7 @@ func NewMemfdInode(ctx context.Context, allowSeals bool) *fs.Inode {
Type: fs.RegularFile,
DeviceID: tmpfsDevice.DeviceID(),
InodeID: tmpfsDevice.NextIno(),
- BlockSize: usermem.PageSize,
+ BlockSize: hostarch.PageSize,
})
}
@@ -392,7 +393,7 @@ func (rw *fileReadWriter) ReadToBlocks(dsts safemem.BlockSeq) (uint64, error) {
switch {
case seg.Ok():
// Get internal mappings.
- ims, err := mf.MapInternal(seg.FileRangeOf(seg.Range().Intersect(mr)), usermem.Read)
+ ims, err := mf.MapInternal(seg.FileRangeOf(seg.Range().Intersect(mr)), hostarch.Read)
if err != nil {
return done, err
}
@@ -463,7 +464,7 @@ func (rw *fileReadWriter) WriteFromBlocks(srcs safemem.BlockSeq) (uint64, error)
//
// See Linux, mm/filemap.c:generic_perform_write() and
// mm/shmem.c:shmem_write_begin().
- if pgstart := int64(usermem.Addr(rw.f.attr.Size).RoundDown()); end > pgstart {
+ if pgstart := int64(hostarch.Addr(rw.f.attr.Size).RoundDown()); end > pgstart {
end = pgstart
}
if end <= rw.offset {
@@ -483,8 +484,8 @@ func (rw *fileReadWriter) WriteFromBlocks(srcs safemem.BlockSeq) (uint64, error)
mf := rw.f.kernel.MemoryFile()
// Page-aligned mr for when we need to allocate memory. RoundUp can't
// overflow since end is an int64.
- pgstartaddr := usermem.Addr(rw.offset).RoundDown()
- pgendaddr, _ := usermem.Addr(end).RoundUp()
+ pgstartaddr := hostarch.Addr(rw.offset).RoundDown()
+ pgendaddr, _ := hostarch.Addr(end).RoundUp()
pgMR := memmap.MappableRange{uint64(pgstartaddr), uint64(pgendaddr)}
var done uint64
@@ -494,7 +495,7 @@ func (rw *fileReadWriter) WriteFromBlocks(srcs safemem.BlockSeq) (uint64, error)
switch {
case seg.Ok():
// Get internal mappings.
- ims, err := mf.MapInternal(seg.FileRangeOf(seg.Range().Intersect(mr)), usermem.Write)
+ ims, err := mf.MapInternal(seg.FileRangeOf(seg.Range().Intersect(mr)), hostarch.Write)
if err != nil {
return done, err
}
@@ -527,7 +528,7 @@ func (rw *fileReadWriter) WriteFromBlocks(srcs safemem.BlockSeq) (uint64, error)
}
// AddMapping implements memmap.Mappable.AddMapping.
-func (f *fileInodeOperations) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) error {
+func (f *fileInodeOperations) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar hostarch.AddrRange, offset uint64, writable bool) error {
f.mapsMu.Lock()
defer f.mapsMu.Unlock()
@@ -544,7 +545,7 @@ func (f *fileInodeOperations) AddMapping(ctx context.Context, ms memmap.MappingS
pagesBefore := f.writableMappingPages
// ar is guaranteed to be page aligned per memmap.Mappable.
- f.writableMappingPages += uint64(ar.Length() / usermem.PageSize)
+ f.writableMappingPages += uint64(ar.Length() / hostarch.PageSize)
if f.writableMappingPages < pagesBefore {
panic(fmt.Sprintf("Overflow while mapping potentially writable pages pointing to a tmpfs file. Before %v, after %v", pagesBefore, f.writableMappingPages))
@@ -555,7 +556,7 @@ func (f *fileInodeOperations) AddMapping(ctx context.Context, ms memmap.MappingS
}
// RemoveMapping implements memmap.Mappable.RemoveMapping.
-func (f *fileInodeOperations) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) {
+func (f *fileInodeOperations) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar hostarch.AddrRange, offset uint64, writable bool) {
f.mapsMu.Lock()
defer f.mapsMu.Unlock()
@@ -565,7 +566,7 @@ func (f *fileInodeOperations) RemoveMapping(ctx context.Context, ms memmap.Mappi
pagesBefore := f.writableMappingPages
// ar is guaranteed to be page aligned per memmap.Mappable.
- f.writableMappingPages -= uint64(ar.Length() / usermem.PageSize)
+ f.writableMappingPages -= uint64(ar.Length() / hostarch.PageSize)
if f.writableMappingPages > pagesBefore {
panic(fmt.Sprintf("Underflow while unmapping potentially writable pages pointing to a tmpfs file. Before %v, after %v", pagesBefore, f.writableMappingPages))
@@ -574,12 +575,12 @@ func (f *fileInodeOperations) RemoveMapping(ctx context.Context, ms memmap.Mappi
}
// CopyMapping implements memmap.Mappable.CopyMapping.
-func (f *fileInodeOperations) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR usermem.AddrRange, offset uint64, writable bool) error {
+func (f *fileInodeOperations) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR hostarch.AddrRange, offset uint64, writable bool) error {
return f.AddMapping(ctx, ms, dstAR, offset, writable)
}
// Translate implements memmap.Mappable.Translate.
-func (f *fileInodeOperations) Translate(ctx context.Context, required, optional memmap.MappableRange, at usermem.AccessType) ([]memmap.Translation, error) {
+func (f *fileInodeOperations) Translate(ctx context.Context, required, optional memmap.MappableRange, at hostarch.AccessType) ([]memmap.Translation, error) {
f.dataMu.Lock()
defer f.dataMu.Unlock()
@@ -612,7 +613,7 @@ func (f *fileInodeOperations) Translate(ctx context.Context, required, optional
Source: segMR,
File: mf,
Offset: seg.FileRangeOf(segMR).Start,
- Perms: usermem.AnyAccess,
+ Perms: hostarch.AnyAccess,
})
translatedEnd = segMR.End
}
diff --git a/pkg/sentry/fs/tmpfs/tmpfs.go b/pkg/sentry/fs/tmpfs/tmpfs.go
index cf4ed5de0..577052888 100644
--- a/pkg/sentry/fs/tmpfs/tmpfs.go
+++ b/pkg/sentry/fs/tmpfs/tmpfs.go
@@ -20,6 +20,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/fs/fsutil"
"gvisor.dev/gvisor/pkg/sentry/fs/ramfs"
@@ -28,7 +29,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/socket/unix/transport"
"gvisor.dev/gvisor/pkg/sentry/usage"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
)
var fsInfo = fs.Info{
@@ -41,8 +41,8 @@ var fsInfo = fs.Info{
// chosen to ensure that BlockSize * Blocks does not overflow int64 (which
// applications may also handle incorrectly).
// TODO(b/29637826): allow configuring a tmpfs size and enforce it.
- TotalBlocks: math.MaxInt64 / usermem.PageSize,
- FreeBlocks: math.MaxInt64 / usermem.PageSize,
+ TotalBlocks: math.MaxInt64 / hostarch.PageSize,
+ FreeBlocks: math.MaxInt64 / hostarch.PageSize,
}
// rename implements fs.InodeOperations.Rename for tmpfs nodes.
@@ -99,7 +99,7 @@ func NewDir(ctx context.Context, contents map[string]*fs.Inode, owner fs.FileOwn
return fs.NewInode(ctx, d, msrc, fs.StableAttr{
DeviceID: tmpfsDevice.DeviceID(),
InodeID: tmpfsDevice.NextIno(),
- BlockSize: usermem.PageSize,
+ BlockSize: hostarch.PageSize,
Type: fs.Directory,
})
}
@@ -232,7 +232,7 @@ func (d *Dir) newCreateOps() *ramfs.CreateOps {
return fs.NewInode(ctx, iops, dir.MountSource, fs.StableAttr{
DeviceID: tmpfsDevice.DeviceID(),
InodeID: tmpfsDevice.NextIno(),
- BlockSize: usermem.PageSize,
+ BlockSize: hostarch.PageSize,
Type: fs.RegularFile,
}), nil
},
@@ -281,7 +281,7 @@ func NewSymlink(ctx context.Context, target string, owner fs.FileOwner, msrc *fs
return fs.NewInode(ctx, s, msrc, fs.StableAttr{
DeviceID: tmpfsDevice.DeviceID(),
InodeID: tmpfsDevice.NextIno(),
- BlockSize: usermem.PageSize,
+ BlockSize: hostarch.PageSize,
Type: fs.Symlink,
})
}
@@ -311,7 +311,7 @@ func NewSocket(ctx context.Context, socket transport.BoundEndpoint, owner fs.Fil
return fs.NewInode(ctx, s, msrc, fs.StableAttr{
DeviceID: tmpfsDevice.DeviceID(),
InodeID: tmpfsDevice.NextIno(),
- BlockSize: usermem.PageSize,
+ BlockSize: hostarch.PageSize,
Type: fs.Socket,
})
}
@@ -348,7 +348,7 @@ func NewFifo(ctx context.Context, owner fs.FileOwner, perms fs.FilePermissions,
return fs.NewInode(ctx, fifoIops, msrc, fs.StableAttr{
DeviceID: tmpfsDevice.DeviceID(),
InodeID: tmpfsDevice.NextIno(),
- BlockSize: usermem.PageSize,
+ BlockSize: hostarch.PageSize,
Type: fs.Pipe,
})
}
diff --git a/pkg/sentry/fs/tty/dir.go b/pkg/sentry/fs/tty/dir.go
index c2da80bc2..13c9dbe7d 100644
--- a/pkg/sentry/fs/tty/dir.go
+++ b/pkg/sentry/fs/tty/dir.go
@@ -22,6 +22,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/fs/fsutil"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
@@ -122,7 +123,7 @@ func newDir(ctx context.Context, m *fs.MountSource) *fs.Inode {
// TODO(b/75267214): Since ptsDevice must be shared between
// different mounts, we must not assign fixed numbers.
InodeID: ptsDevice.NextIno(),
- BlockSize: usermem.PageSize,
+ BlockSize: hostarch.PageSize,
Type: fs.Directory,
})
}
diff --git a/pkg/sentry/fsimpl/eventfd/eventfd.go b/pkg/sentry/fsimpl/eventfd/eventfd.go
index 30bd05357..4f79cfcb7 100644
--- a/pkg/sentry/fsimpl/eventfd/eventfd.go
+++ b/pkg/sentry/fsimpl/eventfd/eventfd.go
@@ -23,6 +23,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
"gvisor.dev/gvisor/pkg/fdnotifier"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/syserror"
@@ -188,7 +189,7 @@ func (efd *EventFileDescription) read(ctx context.Context, dst usermem.IOSequenc
efd.queue.Notify(waiter.WritableEvents)
var buf [8]byte
- usermem.ByteOrder.PutUint64(buf[:], val)
+ hostarch.ByteOrder.PutUint64(buf[:], val)
_, err := dst.CopyOut(ctx, buf[:])
return err
}
@@ -196,7 +197,7 @@ func (efd *EventFileDescription) read(ctx context.Context, dst usermem.IOSequenc
// Preconditions: Must be called with efd.mu locked.
func (efd *EventFileDescription) hostWriteLocked(val uint64) error {
var buf [8]byte
- usermem.ByteOrder.PutUint64(buf[:], val)
+ hostarch.ByteOrder.PutUint64(buf[:], val)
_, err := unix.Write(efd.hostfd, buf[:])
if err == unix.EWOULDBLOCK {
return syserror.ErrWouldBlock
@@ -209,7 +210,7 @@ func (efd *EventFileDescription) write(ctx context.Context, src usermem.IOSequen
if _, err := src.CopyIn(ctx, buf[:]); err != nil {
return err
}
- val := usermem.ByteOrder.Uint64(buf[:])
+ val := hostarch.ByteOrder.Uint64(buf[:])
return efd.Signal(val)
}
diff --git a/pkg/sentry/fsimpl/fuse/read_write.go b/pkg/sentry/fsimpl/fuse/read_write.go
index 23ce91849..66ea889f9 100644
--- a/pkg/sentry/fsimpl/fuse/read_write.go
+++ b/pkg/sentry/fsimpl/fuse/read_write.go
@@ -20,11 +20,11 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
)
// ReadInPages sends FUSE_READ requests for the size after round it up to
@@ -43,10 +43,10 @@ func (fs *filesystem) ReadInPages(ctx context.Context, fd *regularFileFD, off ui
}
// Round up to a multiple of page size.
- readSize, _ := usermem.PageRoundUp(uint64(size))
+ readSize, _ := hostarch.PageRoundUp(uint64(size))
// One request cannnot exceed either maxRead or maxPages.
- maxPages := fs.conn.maxRead >> usermem.PageShift
+ maxPages := fs.conn.maxRead >> hostarch.PageShift
if maxPages > uint32(fs.conn.maxPages) {
maxPages = uint32(fs.conn.maxPages)
}
@@ -54,9 +54,9 @@ func (fs *filesystem) ReadInPages(ctx context.Context, fd *regularFileFD, off ui
var outs [][]byte
var sizeRead uint32
- // readSize is a multiple of usermem.PageSize.
+ // readSize is a multiple of hostarch.PageSize.
// Always request bytes as a multiple of pages.
- pagesRead, pagesToRead := uint32(0), uint32(readSize>>usermem.PageShift)
+ pagesRead, pagesToRead := uint32(0), uint32(readSize>>hostarch.PageShift)
// Reuse the same struct for unmarshalling to avoid unnecessary memory allocation.
in := linux.FUSEReadIn{
@@ -76,8 +76,8 @@ func (fs *filesystem) ReadInPages(ctx context.Context, fd *regularFileFD, off ui
pagesCanRead = maxPages
}
- in.Offset = off + (uint64(pagesRead) << usermem.PageShift)
- in.Size = pagesCanRead << usermem.PageShift
+ in.Offset = off + (uint64(pagesRead) << hostarch.PageShift)
+ in.Size = pagesCanRead << hostarch.PageShift
// TODO(gvisor.dev/issue/3247): support async read.
@@ -159,7 +159,7 @@ func (fs *filesystem) Write(ctx context.Context, fd *regularFileFD, off uint64,
}
// One request cannnot exceed either maxWrite or maxPages.
- maxWrite := uint32(fs.conn.maxPages) << usermem.PageShift
+ maxWrite := uint32(fs.conn.maxPages) << hostarch.PageShift
if maxWrite > fs.conn.maxWrite {
maxWrite = fs.conn.maxWrite
}
@@ -188,8 +188,8 @@ func (fs *filesystem) Write(ctx context.Context, fd *regularFileFD, off uint64,
// Limit the write size to one page.
// Note that the bigWrites flag is obsolete,
// latest libfuse always sets it on.
- if !fs.conn.bigWrites && toWrite > usermem.PageSize {
- toWrite = usermem.PageSize
+ if !fs.conn.bigWrites && toWrite > hostarch.PageSize {
+ toWrite = hostarch.PageSize
}
// Limit the write size to maxWrite.
diff --git a/pkg/sentry/fsimpl/fuse/request_response.go b/pkg/sentry/fsimpl/fuse/request_response.go
index 10fb9d7d2..8a72489fa 100644
--- a/pkg/sentry/fsimpl/fuse/request_response.go
+++ b/pkg/sentry/fsimpl/fuse/request_response.go
@@ -19,10 +19,10 @@ import (
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- "gvisor.dev/gvisor/pkg/usermem"
)
// fuseInitRes is a variable-length wrapper of linux.FUSEInitOut. The FUSE
@@ -45,29 +45,29 @@ func (r *fuseInitRes) UnmarshalBytes(src []byte) {
out := &r.initOut
// Introduced before FUSE kernel version 7.13.
- out.Major = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ out.Major = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- out.Minor = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ out.Minor = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- out.MaxReadahead = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ out.MaxReadahead = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- out.Flags = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ out.Flags = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- out.MaxBackground = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ out.MaxBackground = uint16(hostarch.ByteOrder.Uint16(src[:2]))
src = src[2:]
- out.CongestionThreshold = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ out.CongestionThreshold = uint16(hostarch.ByteOrder.Uint16(src[:2]))
src = src[2:]
- out.MaxWrite = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ out.MaxWrite = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
// Introduced in FUSE kernel version 7.23.
if len(src) >= 4 {
- out.TimeGran = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ out.TimeGran = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
}
// Introduced in FUSE kernel version 7.28.
if len(src) >= 2 {
- out.MaxPages = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ out.MaxPages = uint16(hostarch.ByteOrder.Uint16(src[:2]))
src = src[2:]
}
_ = src // Remove unused warning.
diff --git a/pkg/sentry/fsimpl/gofer/directory.go b/pkg/sentry/fsimpl/gofer/directory.go
index 9da01cba3..177e42649 100644
--- a/pkg/sentry/fsimpl/gofer/directory.go
+++ b/pkg/sentry/fsimpl/gofer/directory.go
@@ -20,6 +20,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/p9"
"gvisor.dev/gvisor/pkg/refsvfs2"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
@@ -28,7 +29,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/sync"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
)
func (d *dentry) isDir() bool {
@@ -98,7 +98,7 @@ func (d *dentry) createSyntheticChildLocked(opts *createSyntheticOpts) {
mode: uint32(opts.mode),
uid: uint32(opts.kuid),
gid: uint32(opts.kgid),
- blockSize: usermem.PageSize, // arbitrary
+ blockSize: hostarch.PageSize, // arbitrary
atime: now,
mtime: now,
ctime: now,
diff --git a/pkg/sentry/fsimpl/gofer/gofer.go b/pkg/sentry/fsimpl/gofer/gofer.go
index 692da02c1..a0c05231a 100644
--- a/pkg/sentry/fsimpl/gofer/gofer.go
+++ b/pkg/sentry/fsimpl/gofer/gofer.go
@@ -44,6 +44,7 @@ import (
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/p9"
refs_vfs1 "gvisor.dev/gvisor/pkg/refs"
@@ -60,7 +61,6 @@ import (
"gvisor.dev/gvisor/pkg/sync"
"gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/unet"
- "gvisor.dev/gvisor/pkg/usermem"
)
// Name is the default filesystem name.
@@ -872,7 +872,7 @@ func (fs *filesystem) newDentry(ctx context.Context, file p9file, qid p9.QID, ma
mode: uint32(attr.Mode),
uid: uint32(fs.opts.dfltuid),
gid: uint32(fs.opts.dfltgid),
- blockSize: usermem.PageSize,
+ blockSize: hostarch.PageSize,
readFD: -1,
writeFD: -1,
mmapFD: -1,
@@ -1217,8 +1217,8 @@ func (d *dentry) updateSizeLocked(newSize uint64) {
// so we can't race with Write or another truncate.)
d.dataMu.Unlock()
if d.size < oldSize {
- oldpgend, _ := usermem.PageRoundUp(oldSize)
- newpgend, _ := usermem.PageRoundUp(d.size)
+ oldpgend, _ := hostarch.PageRoundUp(oldSize)
+ newpgend, _ := hostarch.PageRoundUp(d.size)
if oldpgend != newpgend {
d.mapsMu.Lock()
d.mappings.Invalidate(memmap.MappableRange{newpgend, oldpgend}, memmap.InvalidateOpts{
diff --git a/pkg/sentry/fsimpl/gofer/regular_file.go b/pkg/sentry/fsimpl/gofer/regular_file.go
index 4f1ad0c88..47563538c 100644
--- a/pkg/sentry/fsimpl/gofer/regular_file.go
+++ b/pkg/sentry/fsimpl/gofer/regular_file.go
@@ -22,6 +22,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/p9"
"gvisor.dev/gvisor/pkg/safemem"
@@ -291,8 +292,8 @@ func (fd *regularFileFD) writeCache(ctx context.Context, d *dentry, offset int64
}
// Remove touched pages from the cache.
- pgstart := usermem.PageRoundDown(uint64(offset))
- pgend, ok := usermem.PageRoundUp(uint64(offset + src.NumBytes()))
+ pgstart := hostarch.PageRoundDown(uint64(offset))
+ pgend, ok := hostarch.PageRoundUp(uint64(offset + src.NumBytes()))
if !ok {
return syserror.EINVAL
}
@@ -408,7 +409,7 @@ func (rw *dentryReadWriter) ReadToBlocks(dsts safemem.BlockSeq) (uint64, error)
switch {
case seg.Ok():
// Get internal mappings from the cache.
- ims, err := mf.MapInternal(seg.FileRangeOf(seg.Range().Intersect(mr)), usermem.Read)
+ ims, err := mf.MapInternal(seg.FileRangeOf(seg.Range().Intersect(mr)), hostarch.Read)
if err != nil {
dataMuUnlock()
rw.d.handleMu.RUnlock()
@@ -434,9 +435,9 @@ func (rw *dentryReadWriter) ReadToBlocks(dsts safemem.BlockSeq) (uint64, error)
if fillCache {
// Read into the cache, then re-enter the loop to read from the
// cache.
- gapEnd, _ := usermem.PageRoundUp(gapMR.End)
+ gapEnd, _ := hostarch.PageRoundUp(gapMR.End)
reqMR := memmap.MappableRange{
- Start: usermem.PageRoundDown(gapMR.Start),
+ Start: hostarch.PageRoundDown(gapMR.Start),
End: gapEnd,
}
optMR := gap.Range()
@@ -527,7 +528,7 @@ func (rw *dentryReadWriter) WriteFromBlocks(srcs safemem.BlockSeq) (uint64, erro
case seg.Ok():
// Get internal mappings from the cache.
segMR := seg.Range().Intersect(mr)
- ims, err := mf.MapInternal(seg.FileRangeOf(segMR), usermem.Write)
+ ims, err := mf.MapInternal(seg.FileRangeOf(segMR), hostarch.Write)
if err != nil {
retErr = err
goto exitLoop
@@ -714,7 +715,7 @@ func (d *dentry) mayCachePages() bool {
}
// AddMapping implements memmap.Mappable.AddMapping.
-func (d *dentry) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) error {
+func (d *dentry) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar hostarch.AddrRange, offset uint64, writable bool) error {
d.mapsMu.Lock()
mapped := d.mappings.AddMapping(ms, ar, offset, writable)
// Do this unconditionally since whether we have a host FD can change
@@ -735,7 +736,7 @@ func (d *dentry) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar user
}
// RemoveMapping implements memmap.Mappable.RemoveMapping.
-func (d *dentry) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) {
+func (d *dentry) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar hostarch.AddrRange, offset uint64, writable bool) {
d.mapsMu.Lock()
unmapped := d.mappings.RemoveMapping(ms, ar, offset, writable)
for _, r := range unmapped {
@@ -759,12 +760,12 @@ func (d *dentry) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar u
}
// CopyMapping implements memmap.Mappable.CopyMapping.
-func (d *dentry) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR usermem.AddrRange, offset uint64, writable bool) error {
+func (d *dentry) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR hostarch.AddrRange, offset uint64, writable bool) error {
return d.AddMapping(ctx, ms, dstAR, offset, writable)
}
// Translate implements memmap.Mappable.Translate.
-func (d *dentry) Translate(ctx context.Context, required, optional memmap.MappableRange, at usermem.AccessType) ([]memmap.Translation, error) {
+func (d *dentry) Translate(ctx context.Context, required, optional memmap.MappableRange, at hostarch.AccessType) ([]memmap.Translation, error) {
d.handleMu.RLock()
if d.mmapFD >= 0 && !d.fs.opts.forcePageCache {
d.handleMu.RUnlock()
@@ -777,7 +778,7 @@ func (d *dentry) Translate(ctx context.Context, required, optional memmap.Mappab
Source: mr,
File: &d.pf,
Offset: mr.Start,
- Perms: usermem.AnyAccess,
+ Perms: hostarch.AnyAccess,
},
}, nil
}
@@ -786,7 +787,7 @@ func (d *dentry) Translate(ctx context.Context, required, optional memmap.Mappab
// Constrain translations to d.size (rounded up) to prevent translation to
// pages that may be concurrently truncated.
- pgend, _ := usermem.PageRoundUp(d.size)
+ pgend, _ := hostarch.PageRoundUp(d.size)
var beyondEOF bool
if required.End > pgend {
if required.Start >= pgend {
@@ -811,7 +812,7 @@ func (d *dentry) Translate(ctx context.Context, required, optional memmap.Mappab
segMR := seg.Range().Intersect(optional)
// TODO(jamieliu): Make Translations writable even if writability is
// not required if already kept-dirty by another writable translation.
- perms := usermem.AccessType{
+ perms := hostarch.AccessType{
Read: true,
Execute: true,
}
@@ -954,7 +955,7 @@ func (d *dentryPlatformFile) DecRef(fr memmap.FileRange) {
}
// MapInternal implements memmap.File.MapInternal.
-func (d *dentryPlatformFile) MapInternal(fr memmap.FileRange, at usermem.AccessType) (safemem.BlockSeq, error) {
+func (d *dentryPlatformFile) MapInternal(fr memmap.FileRange, at hostarch.AccessType) (safemem.BlockSeq, error) {
d.handleMu.RLock()
defer d.handleMu.RUnlock()
return d.hostFileMapper.MapInternal(fr, int(d.mmapFD), at.Write)
diff --git a/pkg/sentry/fsimpl/gofer/save_restore.go b/pkg/sentry/fsimpl/gofer/save_restore.go
index c90071e4e..83e841a51 100644
--- a/pkg/sentry/fsimpl/gofer/save_restore.go
+++ b/pkg/sentry/fsimpl/gofer/save_restore.go
@@ -22,12 +22,12 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
"gvisor.dev/gvisor/pkg/fdnotifier"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/p9"
"gvisor.dev/gvisor/pkg/refsvfs2"
"gvisor.dev/gvisor/pkg/safemem"
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
)
type saveRestoreContextID int
@@ -85,7 +85,7 @@ func (fs *filesystem) PrepareSave(ctx context.Context) error {
func (fd *specialFileFD) savePipeData(ctx context.Context) error {
fd.bufMu.Lock()
defer fd.bufMu.Unlock()
- var buf [usermem.PageSize]byte
+ var buf [hostarch.PageSize]byte
for {
n, err := fd.handle.readToBlocksAt(ctx, safemem.BlockSeqOf(safemem.BlockFromSafeSlice(buf[:])), ^uint64(0))
if n != 0 {
diff --git a/pkg/sentry/fsimpl/host/host.go b/pkg/sentry/fsimpl/host/host.go
index b9cce4181..3b90375b6 100644
--- a/pkg/sentry/fsimpl/host/host.go
+++ b/pkg/sentry/fsimpl/host/host.go
@@ -26,6 +26,7 @@ import (
"gvisor.dev/gvisor/pkg/context"
"gvisor.dev/gvisor/pkg/fdnotifier"
"gvisor.dev/gvisor/pkg/fspath"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/sentry/fsimpl/kernfs"
"gvisor.dev/gvisor/pkg/sentry/hostfd"
@@ -431,8 +432,8 @@ func (i *inode) SetStat(ctx context.Context, fs *vfs.Filesystem, creds *auth.Cre
}
oldSize := uint64(hostStat.Size)
if s.Size < oldSize {
- oldpgend, _ := usermem.PageRoundUp(oldSize)
- newpgend, _ := usermem.PageRoundUp(s.Size)
+ oldpgend, _ := hostarch.PageRoundUp(oldSize)
+ newpgend, _ := hostarch.PageRoundUp(s.Size)
if oldpgend != newpgend {
i.CachedMappable.InvalidateRange(memmap.MappableRange{newpgend, oldpgend})
}
diff --git a/pkg/sentry/fsimpl/host/save_restore.go b/pkg/sentry/fsimpl/host/save_restore.go
index 5688bddc8..31301c715 100644
--- a/pkg/sentry/fsimpl/host/save_restore.go
+++ b/pkg/sentry/fsimpl/host/save_restore.go
@@ -21,9 +21,9 @@ import (
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/fdnotifier"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/safemem"
"gvisor.dev/gvisor/pkg/sentry/hostfd"
- "gvisor.dev/gvisor/pkg/usermem"
)
// beforeSave is invoked by stateify.
@@ -38,7 +38,7 @@ func (i *inode) beforeSave() {
// EBADF from the read.
i.bufMu.Lock()
defer i.bufMu.Unlock()
- var buf [usermem.PageSize]byte
+ var buf [hostarch.PageSize]byte
for {
n, err := hostfd.Preadv2(int32(i.hostFD), safemem.BlockSeqOf(safemem.BlockFromSafeSlice(buf[:])), -1 /* offset */, 0 /* flags */)
if n != 0 {
diff --git a/pkg/sentry/fsimpl/kernfs/inode_impl_util.go b/pkg/sentry/fsimpl/kernfs/inode_impl_util.go
index 6b890a39c..3d0866ecf 100644
--- a/pkg/sentry/fsimpl/kernfs/inode_impl_util.go
+++ b/pkg/sentry/fsimpl/kernfs/inode_impl_util.go
@@ -20,12 +20,12 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time"
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/sync"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
)
// InodeNoopRefCount partially implements the Inode interface, specifically the
@@ -206,7 +206,7 @@ func (a *InodeAttrs) Init(ctx context.Context, creds *auth.Credentials, devMajor
atomic.StoreUint32(&a.uid, uint32(creds.EffectiveKUID))
atomic.StoreUint32(&a.gid, uint32(creds.EffectiveKGID))
atomic.StoreUint32(&a.nlink, nlink)
- atomic.StoreUint32(&a.blockSize, usermem.PageSize)
+ atomic.StoreUint32(&a.blockSize, hostarch.PageSize)
now := ktime.NowFromContext(ctx).Nanoseconds()
atomic.StoreInt64(&a.atime, now)
atomic.StoreInt64(&a.mtime, now)
diff --git a/pkg/sentry/fsimpl/kernfs/mmap_util.go b/pkg/sentry/fsimpl/kernfs/mmap_util.go
index bd6a134b4..d1539d904 100644
--- a/pkg/sentry/fsimpl/kernfs/mmap_util.go
+++ b/pkg/sentry/fsimpl/kernfs/mmap_util.go
@@ -16,11 +16,11 @@ package kernfs
import (
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/safemem"
"gvisor.dev/gvisor/pkg/sentry/fs/fsutil"
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/usermem"
)
// inodePlatformFile implements memmap.File. It exists solely because inode
@@ -66,7 +66,7 @@ func (i *inodePlatformFile) DecRef(fr memmap.FileRange) {
}
// MapInternal implements memmap.File.MapInternal.
-func (i *inodePlatformFile) MapInternal(fr memmap.FileRange, at usermem.AccessType) (safemem.BlockSeq, error) {
+func (i *inodePlatformFile) MapInternal(fr memmap.FileRange, at hostarch.AccessType) (safemem.BlockSeq, error) {
return i.fileMapper.MapInternal(fr, i.hostFD, at.Write)
}
@@ -100,7 +100,7 @@ func (i *CachedMappable) Init(hostFD int) {
}
// AddMapping implements memmap.Mappable.AddMapping.
-func (i *CachedMappable) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) error {
+func (i *CachedMappable) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar hostarch.AddrRange, offset uint64, writable bool) error {
i.mapsMu.Lock()
mapped := i.mappings.AddMapping(ms, ar, offset, writable)
for _, r := range mapped {
@@ -111,7 +111,7 @@ func (i *CachedMappable) AddMapping(ctx context.Context, ms memmap.MappingSpace,
}
// RemoveMapping implements memmap.Mappable.RemoveMapping.
-func (i *CachedMappable) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) {
+func (i *CachedMappable) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar hostarch.AddrRange, offset uint64, writable bool) {
i.mapsMu.Lock()
unmapped := i.mappings.RemoveMapping(ms, ar, offset, writable)
for _, r := range unmapped {
@@ -121,19 +121,19 @@ func (i *CachedMappable) RemoveMapping(ctx context.Context, ms memmap.MappingSpa
}
// CopyMapping implements memmap.Mappable.CopyMapping.
-func (i *CachedMappable) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR usermem.AddrRange, offset uint64, writable bool) error {
+func (i *CachedMappable) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR hostarch.AddrRange, offset uint64, writable bool) error {
return i.AddMapping(ctx, ms, dstAR, offset, writable)
}
// Translate implements memmap.Mappable.Translate.
-func (i *CachedMappable) Translate(ctx context.Context, required, optional memmap.MappableRange, at usermem.AccessType) ([]memmap.Translation, error) {
+func (i *CachedMappable) Translate(ctx context.Context, required, optional memmap.MappableRange, at hostarch.AccessType) ([]memmap.Translation, error) {
mr := optional
return []memmap.Translation{
{
Source: mr,
File: &i.pf,
Offset: mr.Start,
- Perms: usermem.AnyAccess,
+ Perms: hostarch.AnyAccess,
},
}, nil
}
diff --git a/pkg/sentry/fsimpl/overlay/copy_up.go b/pkg/sentry/fsimpl/overlay/copy_up.go
index 27b00cf6f..45aa5a494 100644
--- a/pkg/sentry/fsimpl/overlay/copy_up.go
+++ b/pkg/sentry/fsimpl/overlay/copy_up.go
@@ -21,11 +21,11 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
"gvisor.dev/gvisor/pkg/fspath"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
)
func (d *dentry) isCopiedUp() bool {
@@ -138,8 +138,8 @@ func (d *dentry) copyUpLocked(ctx context.Context) error {
// We may have memory mappings of the file on the lower layer.
// Switch to mapping the file on the upper layer instead.
mmapOpts = &memmap.MMapOpts{
- Perms: usermem.ReadWrite,
- MaxPerms: usermem.ReadWrite,
+ Perms: hostarch.ReadWrite,
+ MaxPerms: hostarch.ReadWrite,
}
if err := newFD.ConfigureMMap(ctx, mmapOpts); err != nil {
cleanupUndoCopyUp()
diff --git a/pkg/sentry/fsimpl/overlay/regular_file.go b/pkg/sentry/fsimpl/overlay/regular_file.go
index d791c06db..43bfd69a3 100644
--- a/pkg/sentry/fsimpl/overlay/regular_file.go
+++ b/pkg/sentry/fsimpl/overlay/regular_file.go
@@ -19,6 +19,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
@@ -445,7 +446,7 @@ func (fd *regularFileFD) ensureMappable(ctx context.Context, opts *memmap.MMapOp
}
// AddMapping implements memmap.Mappable.AddMapping.
-func (d *dentry) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) error {
+func (d *dentry) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar hostarch.AddrRange, offset uint64, writable bool) error {
d.mapsMu.Lock()
defer d.mapsMu.Unlock()
if err := d.wrappedMappable.AddMapping(ctx, ms, ar, offset, writable); err != nil {
@@ -458,7 +459,7 @@ func (d *dentry) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar user
}
// RemoveMapping implements memmap.Mappable.RemoveMapping.
-func (d *dentry) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) {
+func (d *dentry) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar hostarch.AddrRange, offset uint64, writable bool) {
d.mapsMu.Lock()
defer d.mapsMu.Unlock()
d.wrappedMappable.RemoveMapping(ctx, ms, ar, offset, writable)
@@ -468,7 +469,7 @@ func (d *dentry) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar u
}
// CopyMapping implements memmap.Mappable.CopyMapping.
-func (d *dentry) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR usermem.AddrRange, offset uint64, writable bool) error {
+func (d *dentry) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR hostarch.AddrRange, offset uint64, writable bool) error {
d.mapsMu.Lock()
defer d.mapsMu.Unlock()
if err := d.wrappedMappable.CopyMapping(ctx, ms, srcAR, dstAR, offset, writable); err != nil {
@@ -481,7 +482,7 @@ func (d *dentry) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR,
}
// Translate implements memmap.Mappable.Translate.
-func (d *dentry) Translate(ctx context.Context, required, optional memmap.MappableRange, at usermem.AccessType) ([]memmap.Translation, error) {
+func (d *dentry) Translate(ctx context.Context, required, optional memmap.MappableRange, at hostarch.AccessType) ([]memmap.Translation, error) {
d.dataMu.RLock()
defer d.dataMu.RUnlock()
return d.wrappedMappable.Translate(ctx, required, optional, at)
diff --git a/pkg/sentry/fsimpl/pipefs/pipefs.go b/pkg/sentry/fsimpl/pipefs/pipefs.go
index 3f05e444e..08aedc2ad 100644
--- a/pkg/sentry/fsimpl/pipefs/pipefs.go
+++ b/pkg/sentry/fsimpl/pipefs/pipefs.go
@@ -22,13 +22,13 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
"gvisor.dev/gvisor/pkg/fspath"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/fsimpl/kernfs"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/kernel/pipe"
ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time"
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
)
// +stateify savable
@@ -131,7 +131,7 @@ func (i *inode) Stat(_ context.Context, vfsfs *vfs.Filesystem, opts vfs.StatOpti
ts := linux.NsecToStatxTimestamp(i.ctime.Nanoseconds())
return linux.Statx{
Mask: linux.STATX_TYPE | linux.STATX_MODE | linux.STATX_NLINK | linux.STATX_UID | linux.STATX_GID | linux.STATX_ATIME | linux.STATX_MTIME | linux.STATX_CTIME | linux.STATX_INO | linux.STATX_SIZE | linux.STATX_BLOCKS,
- Blksize: usermem.PageSize,
+ Blksize: hostarch.PageSize,
Nlink: 1,
UID: uint32(i.uid),
GID: uint32(i.gid),
diff --git a/pkg/sentry/fsimpl/proc/task_files.go b/pkg/sentry/fsimpl/proc/task_files.go
index fdae163d1..85909d551 100644
--- a/pkg/sentry/fsimpl/proc/task_files.go
+++ b/pkg/sentry/fsimpl/proc/task_files.go
@@ -21,6 +21,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/safemem"
"gvisor.dev/gvisor/pkg/sentry/fsbridge"
"gvisor.dev/gvisor/pkg/sentry/fsimpl/kernfs"
@@ -122,8 +123,8 @@ func (d *auxvData) Generate(ctx context.Context, buf *bytes.Buffer) error {
buf.Grow((len(auxv) + 1) * 16)
for _, e := range auxv {
var tmp [16]byte
- usermem.ByteOrder.PutUint64(tmp[:8], e.Key)
- usermem.ByteOrder.PutUint64(tmp[8:], uint64(e.Value))
+ hostarch.ByteOrder.PutUint64(tmp[:8], e.Key)
+ hostarch.ByteOrder.PutUint64(tmp[8:], uint64(e.Value))
buf.Write(tmp[:])
}
var atNull [16]byte
@@ -168,15 +169,15 @@ func (d *cmdlineData) Generate(ctx context.Context, buf *bytes.Buffer) error {
defer m.DecUsers(ctx)
// Figure out the bounds of the exec arg we are trying to read.
- var ar usermem.AddrRange
+ var ar hostarch.AddrRange
switch d.arg {
case cmdlineDataArg:
- ar = usermem.AddrRange{
+ ar = hostarch.AddrRange{
Start: m.ArgvStart(),
End: m.ArgvEnd(),
}
case environDataArg:
- ar = usermem.AddrRange{
+ ar = hostarch.AddrRange{
Start: m.EnvvStart(),
End: m.EnvvEnd(),
}
@@ -192,7 +193,7 @@ func (d *cmdlineData) Generate(ctx context.Context, buf *bytes.Buffer) error {
// until Linux 4.9 (272ddc8b3735 "proc: don't use FOLL_FORCE for reading
// cmdline and environment").
writer := &bufferWriter{buf: buf}
- if n, err := m.CopyInTo(ctx, usermem.AddrRangeSeqOf(ar), writer, usermem.IOOpts{}); n == 0 || err != nil {
+ if n, err := m.CopyInTo(ctx, hostarch.AddrRangeSeqOf(ar), writer, usermem.IOOpts{}); n == 0 || err != nil {
// Nothing to copy or something went wrong.
return err
}
@@ -209,7 +210,7 @@ func (d *cmdlineData) Generate(ctx context.Context, buf *bytes.Buffer) error {
}
// There is no NULL terminator in the string, return into envp.
- arEnvv := usermem.AddrRange{
+ arEnvv := hostarch.AddrRange{
Start: m.EnvvStart(),
End: m.EnvvEnd(),
}
@@ -218,11 +219,11 @@ func (d *cmdlineData) Generate(ctx context.Context, buf *bytes.Buffer) error {
// https://elixir.bootlin.com/linux/v4.20/source/fs/proc/base.c#L208
// we'll return one page total between argv and envp because of the
// above page restrictions.
- if buf.Len() >= usermem.PageSize {
+ if buf.Len() >= hostarch.PageSize {
// Returned at least one page already, nothing else to add.
return nil
}
- remaining := usermem.PageSize - buf.Len()
+ remaining := hostarch.PageSize - buf.Len()
if int(arEnvv.Length()) > remaining {
end, ok := arEnvv.Start.AddLength(uint64(remaining))
if !ok {
@@ -230,7 +231,7 @@ func (d *cmdlineData) Generate(ctx context.Context, buf *bytes.Buffer) error {
}
arEnvv.End = end
}
- if _, err := m.CopyInTo(ctx, usermem.AddrRangeSeqOf(arEnvv), writer, usermem.IOOpts{}); err != nil {
+ if _, err := m.CopyInTo(ctx, hostarch.AddrRangeSeqOf(arEnvv), writer, usermem.IOOpts{}); err != nil {
return err
}
@@ -323,7 +324,7 @@ func (d *idMapData) Write(ctx context.Context, src usermem.IOSequence, offset in
// the system page size, and the write must be performed at the start of
// the file ..." - user_namespaces(7)
srclen := src.NumBytes()
- if srclen >= usermem.PageSize || offset != 0 {
+ if srclen >= hostarch.PageSize || offset != 0 {
return 0, syserror.EINVAL
}
b := make([]byte, srclen)
@@ -481,7 +482,7 @@ func (fd *memFD) PRead(ctx context.Context, dst usermem.IOSequence, offset int64
defer m.DecUsers(ctx)
// Buffer the read data because of MM locks
buf := make([]byte, dst.NumBytes())
- n, readErr := m.CopyIn(ctx, usermem.Addr(offset), buf, usermem.IOOpts{IgnorePermissions: true})
+ n, readErr := m.CopyIn(ctx, hostarch.Addr(offset), buf, usermem.IOOpts{IgnorePermissions: true})
if n > 0 {
if _, err := dst.CopyOut(ctx, buf[:n]); err != nil {
return 0, syserror.EFAULT
@@ -613,7 +614,7 @@ func (s *taskStatData) Generate(ctx context.Context, buf *bytes.Buffer) error {
rss = mm.ResidentSetSize()
}
})
- fmt.Fprintf(buf, "%d %d ", vss, rss/usermem.PageSize)
+ fmt.Fprintf(buf, "%d %d ", vss, rss/hostarch.PageSize)
// rsslim.
fmt.Fprintf(buf, "%d ", s.task.ThreadGroup().Limits().Get(limits.Rss).Cur)
@@ -655,7 +656,7 @@ func (s *statmData) Generate(ctx context.Context, buf *bytes.Buffer) error {
}
})
- fmt.Fprintf(buf, "%d %d 0 0 0 0 0\n", vss/usermem.PageSize, rss/usermem.PageSize)
+ fmt.Fprintf(buf, "%d %d 0 0 0 0 0\n", vss/hostarch.PageSize, rss/hostarch.PageSize)
return nil
}
@@ -774,7 +775,7 @@ func (o *oomScoreAdj) Write(ctx context.Context, src usermem.IOSequence, offset
}
// Limit input size so as not to impact performance if input size is large.
- src = src.TakeFirst(usermem.PageSize - 1)
+ src = src.TakeFirst(hostarch.PageSize - 1)
var v int32
n, err := usermem.CopyInt32StringInVec(ctx, src.IO, src.Addrs, &v, src.Opts)
diff --git a/pkg/sentry/fsimpl/proc/task_net.go b/pkg/sentry/fsimpl/proc/task_net.go
index d4f6a5a9b..177cb828f 100644
--- a/pkg/sentry/fsimpl/proc/task_net.go
+++ b/pkg/sentry/fsimpl/proc/task_net.go
@@ -23,6 +23,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/sentry/fsimpl/kernfs"
"gvisor.dev/gvisor/pkg/sentry/inet"
@@ -34,7 +35,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/pkg/usermem"
)
func (fs *filesystem) newTaskNetDir(ctx context.Context, task *kernel.Task) kernfs.Inode {
@@ -295,7 +295,7 @@ func networkToHost16(n uint16) uint16 {
// binary.BigEndian.Uint16() require a read of binary.BigEndian and an
// interface method call, defeating inlining.
buf := [2]byte{byte(n >> 8 & 0xff), byte(n & 0xff)}
- return usermem.ByteOrder.Uint16(buf[:])
+ return hostarch.ByteOrder.Uint16(buf[:])
}
func writeInetAddr(w io.Writer, family int, i linux.SockAddr) {
@@ -317,14 +317,14 @@ func writeInetAddr(w io.Writer, family int, i linux.SockAddr) {
// __be32 which is a typedef for an unsigned int, and is printed with
// %X. This means that for a little-endian machine, Linux prints the
// least-significant byte of the address first. To emulate this, we first
- // invert the byte order for the address using usermem.ByteOrder.Uint32,
+ // invert the byte order for the address using hostarch.ByteOrder.Uint32,
// which makes it have the equivalent encoding to a __be32 on a little
// endian machine. Note that this operation is a no-op on a big endian
// machine. Then similar to Linux, we format it with %X, which will print
// the most-significant byte of the __be32 address first, which is now
// actually the least-significant byte of the original address in
// linux.SockAddrInet.Addr on little endian machines, due to the conversion.
- addr := usermem.ByteOrder.Uint32(a.Addr[:])
+ addr := hostarch.ByteOrder.Uint32(a.Addr[:])
fmt.Fprintf(w, "%08X:%04X ", addr, port)
case linux.AF_INET6:
@@ -334,10 +334,10 @@ func writeInetAddr(w io.Writer, family int, i linux.SockAddr) {
}
port := networkToHost16(a.Port)
- addr0 := usermem.ByteOrder.Uint32(a.Addr[0:4])
- addr1 := usermem.ByteOrder.Uint32(a.Addr[4:8])
- addr2 := usermem.ByteOrder.Uint32(a.Addr[8:12])
- addr3 := usermem.ByteOrder.Uint32(a.Addr[12:16])
+ addr0 := hostarch.ByteOrder.Uint32(a.Addr[0:4])
+ addr1 := hostarch.ByteOrder.Uint32(a.Addr[4:8])
+ addr2 := hostarch.ByteOrder.Uint32(a.Addr[8:12])
+ addr3 := hostarch.ByteOrder.Uint32(a.Addr[12:16])
fmt.Fprintf(w, "%08X%08X%08X%08X:%04X ", addr0, addr1, addr2, addr3, port)
}
}
@@ -739,10 +739,10 @@ func (d *netRouteData) Generate(ctx context.Context, buf *bytes.Buffer) error {
)
if len(rt.GatewayAddr) == header.IPv4AddressSize {
flags |= linux.RTF_GATEWAY
- gw = usermem.ByteOrder.Uint32(rt.GatewayAddr)
+ gw = hostarch.ByteOrder.Uint32(rt.GatewayAddr)
}
if len(rt.DstAddr) == header.IPv4AddressSize {
- prefix = usermem.ByteOrder.Uint32(rt.DstAddr)
+ prefix = hostarch.ByteOrder.Uint32(rt.DstAddr)
}
l := fmt.Sprintf(
"%s\t%08X\t%08X\t%04X\t%d\t%d\t%d\t%08X\t%d\t%d\t%d",
diff --git a/pkg/sentry/fsimpl/proc/tasks_files.go b/pkg/sentry/fsimpl/proc/tasks_files.go
index 01b7a6678..f0029cda6 100644
--- a/pkg/sentry/fsimpl/proc/tasks_files.go
+++ b/pkg/sentry/fsimpl/proc/tasks_files.go
@@ -21,6 +21,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/fsimpl/kernfs"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
@@ -28,7 +29,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/usage"
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
)
// +stateify savable
@@ -270,7 +270,7 @@ func (*meminfoData) Generate(ctx context.Context, buf *bytes.Buffer) error {
anon := snapshot.Anonymous + snapshot.Tmpfs
file := snapshot.PageCache + snapshot.Mapped
// We don't actually have active/inactive LRUs, so just make up numbers.
- activeFile := (file / 2) &^ (usermem.PageSize - 1)
+ activeFile := (file / 2) &^ (hostarch.PageSize - 1)
inactiveFile := file - activeFile
fmt.Fprintf(buf, "MemTotal: %8d kB\n", totalSize/1024)
diff --git a/pkg/sentry/fsimpl/proc/tasks_sys.go b/pkg/sentry/fsimpl/proc/tasks_sys.go
index fb274b78e..9b14dd6b9 100644
--- a/pkg/sentry/fsimpl/proc/tasks_sys.go
+++ b/pkg/sentry/fsimpl/proc/tasks_sys.go
@@ -21,6 +21,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/fsimpl/kernfs"
"gvisor.dev/gvisor/pkg/sentry/inet"
"gvisor.dev/gvisor/pkg/sentry/kernel"
@@ -214,7 +215,7 @@ func (d *tcpSackData) Write(ctx context.Context, src usermem.IOSequence, offset
}
// Limit the amount of memory allocated.
- src = src.TakeFirst(usermem.PageSize - 1)
+ src = src.TakeFirst(hostarch.PageSize - 1)
var v int32
n, err := usermem.CopyInt32StringInVec(ctx, src.IO, src.Addrs, &v, src.Opts)
@@ -262,7 +263,7 @@ func (d *tcpRecoveryData) Write(ctx context.Context, src usermem.IOSequence, off
}
// Limit the amount of memory allocated.
- src = src.TakeFirst(usermem.PageSize - 1)
+ src = src.TakeFirst(hostarch.PageSize - 1)
var v int32
n, err := usermem.CopyInt32StringInVec(ctx, src.IO, src.Addrs, &v, src.Opts)
@@ -318,7 +319,7 @@ func (d *tcpMemData) Write(ctx context.Context, src usermem.IOSequence, offset i
defer d.mu.Unlock()
// Limit the amount of memory allocated.
- src = src.TakeFirst(usermem.PageSize - 1)
+ src = src.TakeFirst(hostarch.PageSize - 1)
size, err := d.readSizeLocked()
if err != nil {
return 0, err
@@ -406,7 +407,7 @@ func (ipf *ipForwarding) Write(ctx context.Context, src usermem.IOSequence, offs
}
// Limit input size so as not to impact performance if input size is large.
- src = src.TakeFirst(usermem.PageSize - 1)
+ src = src.TakeFirst(hostarch.PageSize - 1)
var v int32
n, err := usermem.CopyInt32StringInVec(ctx, src.IO, src.Addrs, &v, src.Opts)
@@ -463,7 +464,7 @@ func (pr *portRange) Write(ctx context.Context, src usermem.IOSequence, offset i
// Limit input size so as not to impact performance if input size is
// large.
- src = src.TakeFirst(usermem.PageSize - 1)
+ src = src.TakeFirst(hostarch.PageSize - 1)
ports := make([]int32, 2)
n, err := usermem.CopyInt32StringsInVec(ctx, src.IO, src.Addrs, ports, src.Opts)
diff --git a/pkg/sentry/fsimpl/proc/yama.go b/pkg/sentry/fsimpl/proc/yama.go
index aebfe8944..e039ec45e 100644
--- a/pkg/sentry/fsimpl/proc/yama.go
+++ b/pkg/sentry/fsimpl/proc/yama.go
@@ -21,6 +21,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/fsimpl/kernfs"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
@@ -62,7 +63,7 @@ func (s *yamaPtraceScope) Write(ctx context.Context, src usermem.IOSequence, off
}
// Limit the amount of memory allocated.
- src = src.TakeFirst(usermem.PageSize - 1)
+ src = src.TakeFirst(hostarch.PageSize - 1)
var v int32
n, err := usermem.CopyInt32StringInVec(ctx, src.IO, src.Addrs, &v, src.Opts)
diff --git a/pkg/sentry/fsimpl/timerfd/timerfd.go b/pkg/sentry/fsimpl/timerfd/timerfd.go
index 64d33c3a8..cbb8b67c5 100644
--- a/pkg/sentry/fsimpl/timerfd/timerfd.go
+++ b/pkg/sentry/fsimpl/timerfd/timerfd.go
@@ -19,6 +19,7 @@ import (
"sync/atomic"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time"
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/syserror"
@@ -72,7 +73,7 @@ func (tfd *TimerFileDescription) Read(ctx context.Context, dst usermem.IOSequenc
}
if val := atomic.SwapUint64(&tfd.val, 0); val != 0 {
var buf [sizeofUint64]byte
- usermem.ByteOrder.PutUint64(buf[:], val)
+ hostarch.ByteOrder.PutUint64(buf[:], val)
if _, err := dst.CopyOut(ctx, buf[:]); err != nil {
// Linux does not undo consuming the number of
// expirations even if writing to userspace fails.
diff --git a/pkg/sentry/fsimpl/tmpfs/regular_file.go b/pkg/sentry/fsimpl/tmpfs/regular_file.go
index a6d161882..cd849e87e 100644
--- a/pkg/sentry/fsimpl/tmpfs/regular_file.go
+++ b/pkg/sentry/fsimpl/tmpfs/regular_file.go
@@ -22,6 +22,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/safemem"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/fs/fsutil"
@@ -224,7 +225,7 @@ func (rf *regularFile) truncateLocked(newSize uint64) (bool, error) {
}
// AddMapping implements memmap.Mappable.AddMapping.
-func (rf *regularFile) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) error {
+func (rf *regularFile) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar hostarch.AddrRange, offset uint64, writable bool) error {
rf.mapsMu.Lock()
defer rf.mapsMu.Unlock()
rf.dataMu.RLock()
@@ -240,7 +241,7 @@ func (rf *regularFile) AddMapping(ctx context.Context, ms memmap.MappingSpace, a
pagesBefore := rf.writableMappingPages
// ar is guaranteed to be page aligned per memmap.Mappable.
- rf.writableMappingPages += uint64(ar.Length() / usermem.PageSize)
+ rf.writableMappingPages += uint64(ar.Length() / hostarch.PageSize)
if rf.writableMappingPages < pagesBefore {
panic(fmt.Sprintf("Overflow while mapping potentially writable pages pointing to a tmpfs file. Before %v, after %v", pagesBefore, rf.writableMappingPages))
@@ -251,7 +252,7 @@ func (rf *regularFile) AddMapping(ctx context.Context, ms memmap.MappingSpace, a
}
// RemoveMapping implements memmap.Mappable.RemoveMapping.
-func (rf *regularFile) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) {
+func (rf *regularFile) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar hostarch.AddrRange, offset uint64, writable bool) {
rf.mapsMu.Lock()
defer rf.mapsMu.Unlock()
@@ -261,7 +262,7 @@ func (rf *regularFile) RemoveMapping(ctx context.Context, ms memmap.MappingSpace
pagesBefore := rf.writableMappingPages
// ar is guaranteed to be page aligned per memmap.Mappable.
- rf.writableMappingPages -= uint64(ar.Length() / usermem.PageSize)
+ rf.writableMappingPages -= uint64(ar.Length() / hostarch.PageSize)
if rf.writableMappingPages > pagesBefore {
panic(fmt.Sprintf("Underflow while unmapping potentially writable pages pointing to a tmpfs file. Before %v, after %v", pagesBefore, rf.writableMappingPages))
@@ -270,12 +271,12 @@ func (rf *regularFile) RemoveMapping(ctx context.Context, ms memmap.MappingSpace
}
// CopyMapping implements memmap.Mappable.CopyMapping.
-func (rf *regularFile) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR usermem.AddrRange, offset uint64, writable bool) error {
+func (rf *regularFile) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR hostarch.AddrRange, offset uint64, writable bool) error {
return rf.AddMapping(ctx, ms, dstAR, offset, writable)
}
// Translate implements memmap.Mappable.Translate.
-func (rf *regularFile) Translate(ctx context.Context, required, optional memmap.MappableRange, at usermem.AccessType) ([]memmap.Translation, error) {
+func (rf *regularFile) Translate(ctx context.Context, required, optional memmap.MappableRange, at hostarch.AccessType) ([]memmap.Translation, error) {
rf.dataMu.Lock()
defer rf.dataMu.Unlock()
@@ -307,7 +308,7 @@ func (rf *regularFile) Translate(ctx context.Context, required, optional memmap.
Source: segMR,
File: rf.memFile,
Offset: seg.FileRangeOf(segMR).Start,
- Perms: usermem.AnyAccess,
+ Perms: hostarch.AnyAccess,
})
translatedEnd = segMR.End
}
@@ -539,7 +540,7 @@ func (rw *regularFileReadWriter) ReadToBlocks(dsts safemem.BlockSeq) (uint64, er
switch {
case seg.Ok():
// Get internal mappings.
- ims, err := rw.file.memFile.MapInternal(seg.FileRangeOf(seg.Range().Intersect(mr)), usermem.Read)
+ ims, err := rw.file.memFile.MapInternal(seg.FileRangeOf(seg.Range().Intersect(mr)), hostarch.Read)
if err != nil {
return done, err
}
@@ -608,7 +609,7 @@ func (rw *regularFileReadWriter) WriteFromBlocks(srcs safemem.BlockSeq) (uint64,
//
// See Linux, mm/filemap.c:generic_perform_write() and
// mm/shmem.c:shmem_write_begin().
- if pgstart := uint64(usermem.Addr(rw.file.size).RoundDown()); end > pgstart {
+ if pgstart := uint64(hostarch.Addr(rw.file.size).RoundDown()); end > pgstart {
end = pgstart
}
if end <= rw.off {
@@ -619,8 +620,8 @@ func (rw *regularFileReadWriter) WriteFromBlocks(srcs safemem.BlockSeq) (uint64,
// Page-aligned mr for when we need to allocate memory. RoundUp can't
// overflow since end is an int64.
- pgstartaddr := usermem.Addr(rw.off).RoundDown()
- pgendaddr, _ := usermem.Addr(end).RoundUp()
+ pgstartaddr := hostarch.Addr(rw.off).RoundDown()
+ pgendaddr, _ := hostarch.Addr(end).RoundUp()
pgMR := memmap.MappableRange{uint64(pgstartaddr), uint64(pgendaddr)}
var (
@@ -633,7 +634,7 @@ func (rw *regularFileReadWriter) WriteFromBlocks(srcs safemem.BlockSeq) (uint64,
switch {
case seg.Ok():
// Get internal mappings.
- ims, err := rw.file.memFile.MapInternal(seg.FileRangeOf(seg.Range().Intersect(mr)), usermem.Write)
+ ims, err := rw.file.memFile.MapInternal(seg.FileRangeOf(seg.Range().Intersect(mr)), hostarch.Write)
if err != nil {
retErr = err
goto exitLoop
diff --git a/pkg/sentry/fsimpl/tmpfs/tmpfs.go b/pkg/sentry/fsimpl/tmpfs/tmpfs.go
index 8df81f589..9ae25ce9e 100644
--- a/pkg/sentry/fsimpl/tmpfs/tmpfs.go
+++ b/pkg/sentry/fsimpl/tmpfs/tmpfs.go
@@ -36,6 +36,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/kernel/time"
"gvisor.dev/gvisor/pkg/sentry/pgalloc"
@@ -43,7 +44,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/vfs/memxattr"
"gvisor.dev/gvisor/pkg/sync"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
)
// Name is the default filesystem name.
@@ -252,8 +252,8 @@ func (d *dentry) releaseChildrenLocked(ctx context.Context) {
// immutable
var globalStatfs = linux.Statfs{
Type: linux.TMPFS_MAGIC,
- BlockSize: usermem.PageSize,
- FragmentSize: usermem.PageSize,
+ BlockSize: hostarch.PageSize,
+ FragmentSize: hostarch.PageSize,
NameLength: linux.NAME_MAX,
// tmpfs currently does not support configurable size limits. In Linux,
@@ -263,9 +263,9 @@ var globalStatfs = linux.Statfs{
// chosen to ensure that BlockSize * Blocks does not overflow int64 (which
// applications may also handle incorrectly).
// TODO(b/29637826): allow configuring a tmpfs size and enforce it.
- Blocks: math.MaxInt64 / usermem.PageSize,
- BlocksFree: math.MaxInt64 / usermem.PageSize,
- BlocksAvailable: math.MaxInt64 / usermem.PageSize,
+ Blocks: math.MaxInt64 / hostarch.PageSize,
+ BlocksFree: math.MaxInt64 / hostarch.PageSize,
+ BlocksAvailable: math.MaxInt64 / hostarch.PageSize,
}
// dentry implements vfs.DentryImpl.
@@ -485,7 +485,7 @@ func (i *inode) statTo(stat *linux.Statx) {
linux.STATX_UID | linux.STATX_GID | linux.STATX_INO | linux.STATX_SIZE |
linux.STATX_BLOCKS | linux.STATX_ATIME | linux.STATX_CTIME |
linux.STATX_MTIME
- stat.Blksize = usermem.PageSize
+ stat.Blksize = hostarch.PageSize
stat.Nlink = atomic.LoadUint32(&i.nlink)
stat.UID = atomic.LoadUint32(&i.uid)
stat.GID = atomic.LoadUint32(&i.gid)
diff --git a/pkg/sentry/hostmm/hostmm.go b/pkg/sentry/hostmm/hostmm.go
index c47b96b54..285ea9050 100644
--- a/pkg/sentry/hostmm/hostmm.go
+++ b/pkg/sentry/hostmm/hostmm.go
@@ -23,8 +23,8 @@ import (
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/fd"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/log"
- "gvisor.dev/gvisor/pkg/usermem"
)
// NotifyCurrentMemcgPressureCallback requests that f is called whenever the
@@ -88,7 +88,7 @@ func NotifyCurrentMemcgPressureCallback(f func(), level string) (func(), error)
if n != sizeofUint64 {
panic(fmt.Sprintf("short read from memory pressure level eventfd: got %d bytes, wanted %d", n, sizeofUint64))
}
- val := usermem.ByteOrder.Uint64(buf[:])
+ val := hostarch.ByteOrder.Uint64(buf[:])
if val >= stopVal {
// Assume this was due to the notifier's "destructor" (the
// function returned by NotifyCurrentMemcgPressureCallback
@@ -103,7 +103,7 @@ func NotifyCurrentMemcgPressureCallback(f func(), level string) (func(), error)
return func() {
rw := fd.NewReadWriter(eventFD.FD())
var buf [sizeofUint64]byte
- usermem.ByteOrder.PutUint64(buf[:], stopVal)
+ hostarch.ByteOrder.PutUint64(buf[:], stopVal)
for {
n, err := rw.Write(buf[:])
if err != nil {
diff --git a/pkg/sentry/kernel/auth/auth_abi_autogen_unsafe.go b/pkg/sentry/kernel/auth/auth_abi_autogen_unsafe.go
index 71072539d..89239952c 100644
--- a/pkg/sentry/kernel/auth/auth_abi_autogen_unsafe.go
+++ b/pkg/sentry/kernel/auth/auth_abi_autogen_unsafe.go
@@ -10,9 +10,9 @@ package auth
import (
"gvisor.dev/gvisor/pkg/gohacks"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal"
"gvisor.dev/gvisor/pkg/safecopy"
- "gvisor.dev/gvisor/pkg/usermem"
"io"
"reflect"
"runtime"
@@ -31,12 +31,12 @@ func (gid *GID) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (gid *GID) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(*gid))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(*gid))
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (gid *GID) UnmarshalBytes(src []byte) {
- *gid = GID(uint32(usermem.ByteOrder.Uint32(src[:4])))
+ *gid = GID(uint32(hostarch.ByteOrder.Uint32(src[:4])))
}
// Packed implements marshal.Marshallable.Packed.
@@ -58,7 +58,7 @@ func (gid *GID) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (gid *GID) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (gid *GID) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -75,13 +75,13 @@ func (gid *GID) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (gid *GID) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (gid *GID) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return gid.CopyOutN(cc, addr, gid.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (gid *GID) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (gid *GID) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -114,7 +114,7 @@ func (gid *GID) WriteTo(w io.Writer) (int64, error) {
// CopyGIDSliceIn copies in a slice of GID objects from the task's memory.
//go:nosplit
-func CopyGIDSliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []GID) (int, error) {
+func CopyGIDSliceIn(cc marshal.CopyContext, addr hostarch.Addr, dst []GID) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
@@ -140,7 +140,7 @@ func CopyGIDSliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []GID) (int,
// CopyGIDSliceOut copies a slice of GID objects to the task's memory.
//go:nosplit
-func CopyGIDSliceOut(cc marshal.CopyContext, addr usermem.Addr, src []GID) (int, error) {
+func CopyGIDSliceOut(cc marshal.CopyContext, addr hostarch.Addr, src []GID) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
@@ -208,12 +208,12 @@ func (uid *UID) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (uid *UID) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(*uid))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(*uid))
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (uid *UID) UnmarshalBytes(src []byte) {
- *uid = UID(uint32(usermem.ByteOrder.Uint32(src[:4])))
+ *uid = UID(uint32(hostarch.ByteOrder.Uint32(src[:4])))
}
// Packed implements marshal.Marshallable.Packed.
@@ -235,7 +235,7 @@ func (uid *UID) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (uid *UID) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (uid *UID) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -252,13 +252,13 @@ func (uid *UID) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (uid *UID) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (uid *UID) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return uid.CopyOutN(cc, addr, uid.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (uid *UID) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (uid *UID) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
diff --git a/pkg/sentry/kernel/eventfd/eventfd.go b/pkg/sentry/kernel/eventfd/eventfd.go
index 2aca02fd5..4466fbc9d 100644
--- a/pkg/sentry/kernel/eventfd/eventfd.go
+++ b/pkg/sentry/kernel/eventfd/eventfd.go
@@ -23,6 +23,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
"gvisor.dev/gvisor/pkg/fdnotifier"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/fs/anon"
"gvisor.dev/gvisor/pkg/sentry/fs/fsutil"
@@ -186,7 +187,7 @@ func (e *EventOperations) read(ctx context.Context, dst usermem.IOSequence) erro
e.wq.Notify(waiter.WritableEvents)
var buf [8]byte
- usermem.ByteOrder.PutUint64(buf[:], val)
+ hostarch.ByteOrder.PutUint64(buf[:], val)
_, err := dst.CopyOut(ctx, buf[:])
return err
}
@@ -194,7 +195,7 @@ func (e *EventOperations) read(ctx context.Context, dst usermem.IOSequence) erro
// Must be called with e.mu locked.
func (e *EventOperations) hostWrite(val uint64) error {
var buf [8]byte
- usermem.ByteOrder.PutUint64(buf[:], val)
+ hostarch.ByteOrder.PutUint64(buf[:], val)
_, err := unix.Write(e.hostfd, buf[:])
if err == unix.EWOULDBLOCK {
return syserror.ErrWouldBlock
@@ -207,7 +208,7 @@ func (e *EventOperations) write(ctx context.Context, src usermem.IOSequence) err
if _, err := src.CopyIn(ctx, buf[:]); err != nil {
return err
}
- val := usermem.ByteOrder.Uint64(buf[:])
+ val := hostarch.ByteOrder.Uint64(buf[:])
return e.Signal(val)
}
diff --git a/pkg/sentry/kernel/futex/futex.go b/pkg/sentry/kernel/futex/futex.go
index e4dcc4d40..0427cf3f4 100644
--- a/pkg/sentry/kernel/futex/futex.go
+++ b/pkg/sentry/kernel/futex/futex.go
@@ -20,10 +20,10 @@ package futex
import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sync"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
)
// KeyKind indicates the type of a Key.
@@ -83,8 +83,8 @@ func (k *Key) clone() Key {
}
// Preconditions: k.Kind == KindPrivate or KindSharedPrivate.
-func (k *Key) addr() usermem.Addr {
- return usermem.Addr(k.Offset)
+func (k *Key) addr() hostarch.Addr {
+ return hostarch.Addr(k.Offset)
}
// matches returns true if a wakeup on k2 should wake a waiter waiting on k.
@@ -97,14 +97,14 @@ func (k *Key) matches(k2 *Key) bool {
type Target interface {
context.Context
- // SwapUint32 gives access to usermem.IO.SwapUint32.
- SwapUint32(addr usermem.Addr, new uint32) (uint32, error)
+ // SwapUint32 gives access to hostarch.IO.SwapUint32.
+ SwapUint32(addr hostarch.Addr, new uint32) (uint32, error)
- // CompareAndSwap gives access to usermem.IO.CompareAndSwapUint32.
- CompareAndSwapUint32(addr usermem.Addr, old, new uint32) (uint32, error)
+ // CompareAndSwap gives access to hostarch.IO.CompareAndSwapUint32.
+ CompareAndSwapUint32(addr hostarch.Addr, old, new uint32) (uint32, error)
- // LoadUint32 gives access to usermem.IO.LoadUint32.
- LoadUint32(addr usermem.Addr) (uint32, error)
+ // LoadUint32 gives access to hostarch.IO.LoadUint32.
+ LoadUint32(addr hostarch.Addr) (uint32, error)
// GetSharedKey returns a Key with kind KindSharedPrivate or
// KindSharedMappable corresponding to the memory mapped at address addr.
@@ -112,11 +112,11 @@ type Target interface {
// If GetSharedKey returns a Key with a non-nil MappingIdentity, a
// reference is held on the MappingIdentity, which must be dropped by the
// caller when the Key is no longer in use.
- GetSharedKey(addr usermem.Addr) (Key, error)
+ GetSharedKey(addr hostarch.Addr) (Key, error)
}
// check performs a basic equality check on the given address.
-func check(t Target, addr usermem.Addr, val uint32) error {
+func check(t Target, addr hostarch.Addr, val uint32) error {
cur, err := t.LoadUint32(addr)
if err != nil {
return err
@@ -128,7 +128,7 @@ func check(t Target, addr usermem.Addr, val uint32) error {
}
// atomicOp performs a complex operation on the given address.
-func atomicOp(t Target, addr usermem.Addr, opIn uint32) (bool, error) {
+func atomicOp(t Target, addr hostarch.Addr, opIn uint32) (bool, error) {
opType := (opIn >> 28) & 0xf
cmp := (opIn >> 24) & 0xf
opArg := (opIn >> 12) & 0xfff
@@ -328,7 +328,7 @@ const (
)
// getKey returns a Key representing address addr in c.
-func getKey(t Target, addr usermem.Addr, private bool) (Key, error) {
+func getKey(t Target, addr hostarch.Addr, private bool) (Key, error) {
// Ensure the address is aligned.
// It must be a DWORD boundary.
if addr&0x3 != 0 {
@@ -341,7 +341,7 @@ func getKey(t Target, addr usermem.Addr, private bool) (Key, error) {
}
// bucketIndexForAddr returns the index into Manager.buckets for addr.
-func bucketIndexForAddr(addr usermem.Addr) uintptr {
+func bucketIndexForAddr(addr hostarch.Addr) uintptr {
// - The bottom 2 bits of addr must be 0, per getKey.
//
// - On amd64, the top 16 bits of addr (bits 48-63) must be equal to bit 47
@@ -448,7 +448,7 @@ func (m *Manager) lockBuckets(k1, k2 *Key) (*bucket, *bucket) {
// Wake wakes up to n waiters matching the bitmask on the given addr.
// The number of waiters woken is returned.
-func (m *Manager) Wake(t Target, addr usermem.Addr, private bool, bitmask uint32, n int) (int, error) {
+func (m *Manager) Wake(t Target, addr hostarch.Addr, private bool, bitmask uint32, n int) (int, error) {
// This function is very hot; avoid defer.
k, err := getKey(t, addr, private)
if err != nil {
@@ -463,7 +463,7 @@ func (m *Manager) Wake(t Target, addr usermem.Addr, private bool, bitmask uint32
return r, nil
}
-func (m *Manager) doRequeue(t Target, addr, naddr usermem.Addr, private bool, checkval bool, val uint32, nwake int, nreq int) (int, error) {
+func (m *Manager) doRequeue(t Target, addr, naddr hostarch.Addr, private bool, checkval bool, val uint32, nwake int, nreq int) (int, error) {
k1, err := getKey(t, addr, private)
if err != nil {
return 0, err
@@ -498,14 +498,14 @@ func (m *Manager) doRequeue(t Target, addr, naddr usermem.Addr, private bool, ch
// Requeue wakes up to nwake waiters on the given addr, and unconditionally
// requeues up to nreq waiters on naddr.
-func (m *Manager) Requeue(t Target, addr, naddr usermem.Addr, private bool, nwake int, nreq int) (int, error) {
+func (m *Manager) Requeue(t Target, addr, naddr hostarch.Addr, private bool, nwake int, nreq int) (int, error) {
return m.doRequeue(t, addr, naddr, private, false, 0, nwake, nreq)
}
// RequeueCmp atomically checks that the addr contains val (via the Target),
// wakes up to nwake waiters on addr and then unconditionally requeues nreq
// waiters on naddr.
-func (m *Manager) RequeueCmp(t Target, addr, naddr usermem.Addr, private bool, val uint32, nwake int, nreq int) (int, error) {
+func (m *Manager) RequeueCmp(t Target, addr, naddr hostarch.Addr, private bool, val uint32, nwake int, nreq int) (int, error) {
return m.doRequeue(t, addr, naddr, private, true, val, nwake, nreq)
}
@@ -513,7 +513,7 @@ func (m *Manager) RequeueCmp(t Target, addr, naddr usermem.Addr, private bool, v
// waiters unconditionally from addr1, and, based on the original value at addr2
// and a comparison encoded in op, wakes up to nwake2 waiters from addr2.
// It returns the total number of waiters woken.
-func (m *Manager) WakeOp(t Target, addr1, addr2 usermem.Addr, private bool, nwake1 int, nwake2 int, op uint32) (int, error) {
+func (m *Manager) WakeOp(t Target, addr1, addr2 hostarch.Addr, private bool, nwake1 int, nwake2 int, op uint32) (int, error) {
k1, err := getKey(t, addr1, private)
if err != nil {
return 0, err
@@ -553,7 +553,7 @@ func (m *Manager) WakeOp(t Target, addr1, addr2 usermem.Addr, private bool, nwak
// enqueues w to be woken by a send to w.C. If WaitPrepare returns nil, the
// Waiter must be subsequently removed by calling WaitComplete, whether or not
// a wakeup is received on w.C.
-func (m *Manager) WaitPrepare(w *Waiter, t Target, addr usermem.Addr, private bool, val uint32, bitmask uint32) error {
+func (m *Manager) WaitPrepare(w *Waiter, t Target, addr hostarch.Addr, private bool, val uint32, bitmask uint32) error {
k, err := getKey(t, addr, private)
if err != nil {
return err
@@ -631,7 +631,7 @@ func (m *Manager) WaitComplete(w *Waiter, t Target) {
// FUTEX_OWNER_DIED is only set by the Linux when robust lists are in use (see
// exit_robust_list()). Given we don't support robust lists, although handled
// below, it's never set.
-func (m *Manager) LockPI(w *Waiter, t Target, addr usermem.Addr, tid uint32, private, try bool) (bool, error) {
+func (m *Manager) LockPI(w *Waiter, t Target, addr hostarch.Addr, tid uint32, private, try bool) (bool, error) {
k, err := getKey(t, addr, private)
if err != nil {
return false, err
@@ -663,7 +663,7 @@ func (m *Manager) LockPI(w *Waiter, t Target, addr usermem.Addr, tid uint32, pri
return success, nil
}
-func (m *Manager) lockPILocked(w *Waiter, t Target, addr usermem.Addr, tid uint32, b *bucket, try bool) (bool, error) {
+func (m *Manager) lockPILocked(w *Waiter, t Target, addr hostarch.Addr, tid uint32, b *bucket, try bool) (bool, error) {
for {
cur, err := t.LoadUint32(addr)
if err != nil {
@@ -724,7 +724,7 @@ func (m *Manager) lockPILocked(w *Waiter, t Target, addr usermem.Addr, tid uint3
// The address provided must contain the caller's TID. If there are waiters,
// TID of the next waiter (FIFO) is set to the given address, and the waiter
// woken up. If there are no waiters, 0 is set to the address.
-func (m *Manager) UnlockPI(t Target, addr usermem.Addr, tid uint32, private bool) error {
+func (m *Manager) UnlockPI(t Target, addr hostarch.Addr, tid uint32, private bool) error {
k, err := getKey(t, addr, private)
if err != nil {
return err
@@ -738,7 +738,7 @@ func (m *Manager) UnlockPI(t Target, addr usermem.Addr, tid uint32, private bool
return err
}
-func (m *Manager) unlockPILocked(t Target, addr usermem.Addr, tid uint32, b *bucket, key *Key) error {
+func (m *Manager) unlockPILocked(t Target, addr hostarch.Addr, tid uint32, b *bucket, key *Key) error {
cur, err := t.LoadUint32(addr)
if err != nil {
return err
diff --git a/pkg/sentry/kernel/kcov.go b/pkg/sentry/kernel/kcov.go
index 4fcdfc541..4b943106b 100644
--- a/pkg/sentry/kernel/kcov.go
+++ b/pkg/sentry/kernel/kcov.go
@@ -22,13 +22,13 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
"gvisor.dev/gvisor/pkg/coverage"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/safemem"
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sentry/mm"
"gvisor.dev/gvisor/pkg/sentry/pgalloc"
"gvisor.dev/gvisor/pkg/sentry/usage"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
)
// kcovAreaSizeMax is the maximum number of uint64 entries allowed in the kcov
@@ -130,7 +130,7 @@ func (kcov *Kcov) InitTrace(size uint64) error {
// To simplify all the logic around mapping, we require that the length of the
// shared region is a multiple of the system page size.
- if (8*size)&(usermem.PageSize-1) != 0 {
+ if (8*size)&(hostarch.PageSize-1) != 0 {
return syserror.EINVAL
}
@@ -286,7 +286,7 @@ func (rw *kcovReadWriter) ReadToBlocks(dsts safemem.BlockSeq) (uint64, error) {
}
// Get internal mappings.
- bs, err := rw.mf.MapInternal(memmap.FileRange{start, end}, usermem.Read)
+ bs, err := rw.mf.MapInternal(memmap.FileRange{start, end}, hostarch.Read)
if err != nil {
return 0, err
}
@@ -314,7 +314,7 @@ func (rw *kcovReadWriter) WriteFromBlocks(srcs safemem.BlockSeq) (uint64, error)
}
// Get internal mapping.
- bs, err := rw.mf.MapInternal(memmap.FileRange{start, end}, usermem.Write)
+ bs, err := rw.mf.MapInternal(memmap.FileRange{start, end}, hostarch.Write)
if err != nil {
return 0, err
}
diff --git a/pkg/sentry/kernel/kernel_abi_autogen_unsafe.go b/pkg/sentry/kernel/kernel_abi_autogen_unsafe.go
index 1d9e9413d..0494f7c31 100644
--- a/pkg/sentry/kernel/kernel_abi_autogen_unsafe.go
+++ b/pkg/sentry/kernel/kernel_abi_autogen_unsafe.go
@@ -10,9 +10,9 @@ package kernel
import (
"gvisor.dev/gvisor/pkg/gohacks"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal"
"gvisor.dev/gvisor/pkg/safecopy"
- "gvisor.dev/gvisor/pkg/usermem"
"io"
"reflect"
"runtime"
@@ -31,12 +31,12 @@ func (tid *ThreadID) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (tid *ThreadID) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(*tid))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(*tid))
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (tid *ThreadID) UnmarshalBytes(src []byte) {
- *tid = ThreadID(int32(usermem.ByteOrder.Uint32(src[:4])))
+ *tid = ThreadID(int32(hostarch.ByteOrder.Uint32(src[:4])))
}
// Packed implements marshal.Marshallable.Packed.
@@ -58,7 +58,7 @@ func (tid *ThreadID) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (tid *ThreadID) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (tid *ThreadID) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -75,13 +75,13 @@ func (tid *ThreadID) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit i
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (tid *ThreadID) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (tid *ThreadID) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return tid.CopyOutN(cc, addr, tid.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (tid *ThreadID) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (tid *ThreadID) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -119,41 +119,41 @@ func (v *vdsoParams) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (v *vdsoParams) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(v.monotonicReady))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(v.monotonicReady))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(v.monotonicBaseCycles))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(v.monotonicBaseCycles))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(v.monotonicBaseRef))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(v.monotonicBaseRef))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(v.monotonicFrequency))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(v.monotonicFrequency))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(v.realtimeReady))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(v.realtimeReady))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(v.realtimeBaseCycles))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(v.realtimeBaseCycles))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(v.realtimeBaseRef))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(v.realtimeBaseRef))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(v.realtimeFrequency))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(v.realtimeFrequency))
dst = dst[8:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (v *vdsoParams) UnmarshalBytes(src []byte) {
- v.monotonicReady = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ v.monotonicReady = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- v.monotonicBaseCycles = int64(usermem.ByteOrder.Uint64(src[:8]))
+ v.monotonicBaseCycles = int64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- v.monotonicBaseRef = int64(usermem.ByteOrder.Uint64(src[:8]))
+ v.monotonicBaseRef = int64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- v.monotonicFrequency = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ v.monotonicFrequency = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- v.realtimeReady = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ v.realtimeReady = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- v.realtimeBaseCycles = int64(usermem.ByteOrder.Uint64(src[:8]))
+ v.realtimeBaseCycles = int64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- v.realtimeBaseRef = int64(usermem.ByteOrder.Uint64(src[:8]))
+ v.realtimeBaseRef = int64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- v.realtimeFrequency = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ v.realtimeFrequency = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
}
@@ -175,7 +175,7 @@ func (v *vdsoParams) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (v *vdsoParams) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (v *vdsoParams) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -192,13 +192,13 @@ func (v *vdsoParams) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit i
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (v *vdsoParams) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (v *vdsoParams) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return v.CopyOutN(cc, addr, v.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (v *vdsoParams) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (v *vdsoParams) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
diff --git a/pkg/sentry/kernel/pipe/pipe.go b/pkg/sentry/kernel/pipe/pipe.go
index d004f2357..06769931a 100644
--- a/pkg/sentry/kernel/pipe/pipe.go
+++ b/pkg/sentry/kernel/pipe/pipe.go
@@ -22,18 +22,18 @@ import (
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/safemem"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sync"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/pkg/waiter"
)
const (
// MinimumPipeSize is a hard limit of the minimum size of a pipe.
// It corresponds to fs/pipe.c:pipe_min_size.
- MinimumPipeSize = usermem.PageSize
+ MinimumPipeSize = hostarch.PageSize
// MaximumPipeSize is a hard limit on the maximum size of a pipe.
// It corresponds to fs/pipe.c:pipe_max_size.
@@ -41,7 +41,7 @@ const (
// DefaultPipeSize is the system-wide default size of a pipe in bytes.
// It corresponds to pipe_fs_i.h:PIPE_DEF_BUFFERS.
- DefaultPipeSize = 16 * usermem.PageSize
+ DefaultPipeSize = 16 * hostarch.PageSize
// atomicIOBytes is the maximum number of bytes that the pipe will
// guarantee atomic reads or writes atomically.
diff --git a/pkg/sentry/kernel/pipe/vfs.go b/pkg/sentry/kernel/pipe/vfs.go
index e524afad5..95b948edb 100644
--- a/pkg/sentry/kernel/pipe/vfs.go
+++ b/pkg/sentry/kernel/pipe/vfs.go
@@ -17,6 +17,7 @@ package pipe
import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/safemem"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/vfs"
@@ -274,7 +275,7 @@ func (fd *VFSPipeFD) SpliceToNonPipe(ctx context.Context, out *vfs.FileDescripti
}
src := usermem.IOSequence{
IO: fd,
- Addrs: usermem.AddrRangeSeqOf(usermem.AddrRange{0, usermem.Addr(count)}),
+ Addrs: hostarch.AddrRangeSeqOf(hostarch.AddrRange{0, hostarch.Addr(count)}),
}
var (
@@ -302,7 +303,7 @@ func (fd *VFSPipeFD) SpliceToNonPipe(ctx context.Context, out *vfs.FileDescripti
func (fd *VFSPipeFD) SpliceFromNonPipe(ctx context.Context, in *vfs.FileDescription, off, count int64) (int64, error) {
dst := usermem.IOSequence{
IO: fd,
- Addrs: usermem.AddrRangeSeqOf(usermem.AddrRange{0, usermem.Addr(count)}),
+ Addrs: hostarch.AddrRangeSeqOf(hostarch.AddrRange{0, hostarch.Addr(count)}),
}
var (
@@ -328,7 +329,7 @@ func (fd *VFSPipeFD) SpliceFromNonPipe(ctx context.Context, in *vfs.FileDescript
// fd.pipe.Notify(waiter.WritableEvents) after the read is completed.
//
// Preconditions: fd.pipe.mu must be locked.
-func (fd *VFSPipeFD) CopyIn(ctx context.Context, addr usermem.Addr, dst []byte, opts usermem.IOOpts) (int, error) {
+func (fd *VFSPipeFD) CopyIn(ctx context.Context, addr hostarch.Addr, dst []byte, opts usermem.IOOpts) (int, error) {
n, err := fd.pipe.peekLocked(int64(len(dst)), func(srcs safemem.BlockSeq) (uint64, error) {
return safemem.CopySeq(safemem.BlockSeqOf(safemem.BlockFromSafeSlice(dst)), srcs)
})
@@ -340,7 +341,7 @@ func (fd *VFSPipeFD) CopyIn(ctx context.Context, addr usermem.Addr, dst []byte,
// is completed.
//
// Preconditions: fd.pipe.mu must be locked.
-func (fd *VFSPipeFD) CopyOut(ctx context.Context, addr usermem.Addr, src []byte, opts usermem.IOOpts) (int, error) {
+func (fd *VFSPipeFD) CopyOut(ctx context.Context, addr hostarch.Addr, src []byte, opts usermem.IOOpts) (int, error) {
n, err := fd.pipe.writeLocked(int64(len(src)), func(dsts safemem.BlockSeq) (uint64, error) {
return safemem.CopySeq(dsts, safemem.BlockSeqOf(safemem.BlockFromSafeSlice(src)))
})
@@ -350,7 +351,7 @@ func (fd *VFSPipeFD) CopyOut(ctx context.Context, addr usermem.Addr, src []byte,
// ZeroOut implements usermem.IO.ZeroOut.
//
// Preconditions: fd.pipe.mu must be locked.
-func (fd *VFSPipeFD) ZeroOut(ctx context.Context, addr usermem.Addr, toZero int64, opts usermem.IOOpts) (int64, error) {
+func (fd *VFSPipeFD) ZeroOut(ctx context.Context, addr hostarch.Addr, toZero int64, opts usermem.IOOpts) (int64, error) {
n, err := fd.pipe.writeLocked(toZero, func(dsts safemem.BlockSeq) (uint64, error) {
return safemem.ZeroSeq(dsts)
})
@@ -362,7 +363,7 @@ func (fd *VFSPipeFD) ZeroOut(ctx context.Context, addr usermem.Addr, toZero int6
// fd.pipe.Notify(waiter.WritableEvents) after the read is completed.
//
// Preconditions: fd.pipe.mu must be locked.
-func (fd *VFSPipeFD) CopyInTo(ctx context.Context, ars usermem.AddrRangeSeq, dst safemem.Writer, opts usermem.IOOpts) (int64, error) {
+func (fd *VFSPipeFD) CopyInTo(ctx context.Context, ars hostarch.AddrRangeSeq, dst safemem.Writer, opts usermem.IOOpts) (int64, error) {
return fd.pipe.peekLocked(ars.NumBytes(), func(srcs safemem.BlockSeq) (uint64, error) {
return dst.WriteFromBlocks(srcs)
})
@@ -373,25 +374,25 @@ func (fd *VFSPipeFD) CopyInTo(ctx context.Context, ars usermem.AddrRangeSeq, dst
// is completed.
//
// Preconditions: fd.pipe.mu must be locked.
-func (fd *VFSPipeFD) CopyOutFrom(ctx context.Context, ars usermem.AddrRangeSeq, src safemem.Reader, opts usermem.IOOpts) (int64, error) {
+func (fd *VFSPipeFD) CopyOutFrom(ctx context.Context, ars hostarch.AddrRangeSeq, src safemem.Reader, opts usermem.IOOpts) (int64, error) {
return fd.pipe.writeLocked(ars.NumBytes(), func(dsts safemem.BlockSeq) (uint64, error) {
return src.ReadToBlocks(dsts)
})
}
// SwapUint32 implements usermem.IO.SwapUint32.
-func (fd *VFSPipeFD) SwapUint32(ctx context.Context, addr usermem.Addr, new uint32, opts usermem.IOOpts) (uint32, error) {
+func (fd *VFSPipeFD) SwapUint32(ctx context.Context, addr hostarch.Addr, new uint32, opts usermem.IOOpts) (uint32, error) {
// How did a pipe get passed as the virtual address space to futex(2)?
panic("VFSPipeFD.SwapUint32 called unexpectedly")
}
// CompareAndSwapUint32 implements usermem.IO.CompareAndSwapUint32.
-func (fd *VFSPipeFD) CompareAndSwapUint32(ctx context.Context, addr usermem.Addr, old, new uint32, opts usermem.IOOpts) (uint32, error) {
+func (fd *VFSPipeFD) CompareAndSwapUint32(ctx context.Context, addr hostarch.Addr, old, new uint32, opts usermem.IOOpts) (uint32, error) {
panic("VFSPipeFD.CompareAndSwapUint32 called unexpectedly")
}
// LoadUint32 implements usermem.IO.LoadUint32.
-func (fd *VFSPipeFD) LoadUint32(ctx context.Context, addr usermem.Addr, opts usermem.IOOpts) (uint32, error) {
+func (fd *VFSPipeFD) LoadUint32(ctx context.Context, addr hostarch.Addr, opts usermem.IOOpts) (uint32, error) {
panic("VFSPipeFD.LoadUint32 called unexpectedly")
}
diff --git a/pkg/sentry/kernel/ptrace.go b/pkg/sentry/kernel/ptrace.go
index f5a60e749..57c7659e7 100644
--- a/pkg/sentry/kernel/ptrace.go
+++ b/pkg/sentry/kernel/ptrace.go
@@ -19,6 +19,7 @@ import (
"sync/atomic"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal/primitive"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/mm"
@@ -1011,7 +1012,7 @@ func (t *Task) ptraceSetOptionsLocked(opts uintptr) error {
}
// Ptrace implements the ptrace system call.
-func (t *Task) Ptrace(req int64, pid ThreadID, addr, data usermem.Addr) error {
+func (t *Task) Ptrace(req int64, pid ThreadID, addr, data hostarch.Addr) error {
// PTRACE_TRACEME ignores all other arguments.
if req == linux.PTRACE_TRACEME {
return t.ptraceTraceme()
@@ -1190,7 +1191,7 @@ func (t *Task) Ptrace(req int64, pid ThreadID, addr, data usermem.Addr) error {
panic(fmt.Sprintf("%#x + %#x overflows. Invalid reg size > %#x", ar.Start, n, ar.Length()))
}
ar.End = end
- return t.CopyOutIovecs(data, usermem.AddrRangeSeqOf(ar))
+ return t.CopyOutIovecs(data, hostarch.AddrRangeSeqOf(ar))
case linux.PTRACE_SETREGSET:
ars, err := t.CopyInIovecs(data, 1)
@@ -1214,8 +1215,8 @@ func (t *Task) Ptrace(req int64, pid ThreadID, addr, data usermem.Addr) error {
return err
}
t.p.FullStateChanged()
- ar.End -= usermem.Addr(n)
- return t.CopyOutIovecs(data, usermem.AddrRangeSeqOf(ar))
+ ar.End -= hostarch.Addr(n)
+ return t.CopyOutIovecs(data, hostarch.AddrRangeSeqOf(ar))
case linux.PTRACE_GETSIGINFO:
t.tg.pidns.owner.mu.RLock()
@@ -1267,7 +1268,7 @@ func (t *Task) Ptrace(req int64, pid ThreadID, addr, data usermem.Addr) error {
case linux.PTRACE_GETEVENTMSG:
t.tg.pidns.owner.mu.RLock()
defer t.tg.pidns.owner.mu.RUnlock()
- _, err := primitive.CopyUint64Out(t, usermem.Addr(data), target.ptraceEventMsg)
+ _, err := primitive.CopyUint64Out(t, hostarch.Addr(data), target.ptraceEventMsg)
return err
// PEEKSIGINFO is unimplemented but seems to have no users anywhere.
diff --git a/pkg/sentry/kernel/ptrace_amd64.go b/pkg/sentry/kernel/ptrace_amd64.go
index 7aea3dcd8..5ae05b5c3 100644
--- a/pkg/sentry/kernel/ptrace_amd64.go
+++ b/pkg/sentry/kernel/ptrace_amd64.go
@@ -18,12 +18,13 @@ package kernel
import (
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
)
// ptraceArch implements arch-specific ptrace commands.
-func (t *Task) ptraceArch(target *Task, req int64, addr, data usermem.Addr) error {
+func (t *Task) ptraceArch(target *Task, req int64, addr, data hostarch.Addr) error {
switch req {
case linux.PTRACE_PEEKUSR: // aka PTRACE_PEEKUSER
n, err := target.Arch().PtracePeekUser(uintptr(addr))
diff --git a/pkg/sentry/kernel/ptrace_arm64.go b/pkg/sentry/kernel/ptrace_arm64.go
index d971b96b3..46dd84cbc 100644
--- a/pkg/sentry/kernel/ptrace_arm64.go
+++ b/pkg/sentry/kernel/ptrace_arm64.go
@@ -17,11 +17,11 @@
package kernel
import (
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
)
// ptraceArch implements arch-specific ptrace commands.
-func (t *Task) ptraceArch(target *Task, req int64, addr, data usermem.Addr) error {
+func (t *Task) ptraceArch(target *Task, req int64, addr, data hostarch.Addr) error {
return syserror.EIO
}
diff --git a/pkg/sentry/kernel/rseq.go b/pkg/sentry/kernel/rseq.go
index 2a9023fdf..4bc5bca44 100644
--- a/pkg/sentry/kernel/rseq.go
+++ b/pkg/sentry/kernel/rseq.go
@@ -18,6 +18,7 @@ import (
"fmt"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/hostcpu"
"gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
@@ -43,8 +44,8 @@ type OldRSeqCriticalRegion struct {
// application handler while its instruction pointer is in CriticalSection,
// set the instruction pointer to Restart and application register r10 (on
// amd64) to the former instruction pointer.
- CriticalSection usermem.AddrRange
- Restart usermem.Addr
+ CriticalSection hostarch.AddrRange
+ Restart hostarch.Addr
}
// RSeqAvailable returns true if t supports (old and new) restartable sequences.
@@ -55,7 +56,7 @@ func (t *Task) RSeqAvailable() bool {
// SetRSeq registers addr as this thread's rseq structure.
//
// Preconditions: The caller must be running on the task goroutine.
-func (t *Task) SetRSeq(addr usermem.Addr, length, signature uint32) error {
+func (t *Task) SetRSeq(addr hostarch.Addr, length, signature uint32) error {
if t.rseqAddr != 0 {
if t.rseqAddr != addr {
return syserror.EINVAL
@@ -100,7 +101,7 @@ func (t *Task) SetRSeq(addr usermem.Addr, length, signature uint32) error {
// ClearRSeq unregisters addr as this thread's rseq structure.
//
// Preconditions: The caller must be running on the task goroutine.
-func (t *Task) ClearRSeq(addr usermem.Addr, length, signature uint32) error {
+func (t *Task) ClearRSeq(addr hostarch.Addr, length, signature uint32) error {
if t.rseqAddr == 0 {
return syserror.EINVAL
}
@@ -166,7 +167,7 @@ func (t *Task) SetOldRSeqCriticalRegion(r OldRSeqCriticalRegion) error {
// CPU number.
//
// Preconditions: The caller must be running on the task goroutine.
-func (t *Task) OldRSeqCPUAddr() usermem.Addr {
+func (t *Task) OldRSeqCPUAddr() hostarch.Addr {
return t.oldRSeqCPUAddr
}
@@ -177,7 +178,7 @@ func (t *Task) OldRSeqCPUAddr() usermem.Addr {
// * t.RSeqAvailable() == true.
// * The caller must be running on the task goroutine.
// * t's AddressSpace must be active.
-func (t *Task) SetOldRSeqCPUAddr(addr usermem.Addr) error {
+func (t *Task) SetOldRSeqCPUAddr(addr hostarch.Addr) error {
t.oldRSeqCPUAddr = addr
// Check that addr is writable.
@@ -221,7 +222,7 @@ func (t *Task) oldRSeqCopyOutCPU() error {
}
buf := t.CopyScratchBuffer(4)
- usermem.ByteOrder.PutUint32(buf, uint32(t.rseqCPU))
+ hostarch.ByteOrder.PutUint32(buf, uint32(t.rseqCPU))
_, err := t.CopyOutBytes(t.oldRSeqCPUAddr, buf)
return err
}
@@ -236,8 +237,8 @@ func (t *Task) rseqCopyOutCPU() error {
buf := t.CopyScratchBuffer(8)
// CPUIDStart and CPUID are the first two fields in linux.RSeq.
- usermem.ByteOrder.PutUint32(buf, uint32(t.rseqCPU)) // CPUIDStart
- usermem.ByteOrder.PutUint32(buf[4:], uint32(t.rseqCPU)) // CPUID
+ hostarch.ByteOrder.PutUint32(buf, uint32(t.rseqCPU)) // CPUIDStart
+ hostarch.ByteOrder.PutUint32(buf[4:], uint32(t.rseqCPU)) // CPUID
// N.B. This write is not atomic, but since this occurs on the task
// goroutine then as long as userspace uses a single-instruction read
// it can't see an invalid value.
@@ -251,8 +252,8 @@ func (t *Task) rseqCopyOutCPU() error {
func (t *Task) rseqClearCPU() error {
buf := t.CopyScratchBuffer(8)
// CPUIDStart and CPUID are the first two fields in linux.RSeq.
- usermem.ByteOrder.PutUint32(buf, 0) // CPUIDStart
- usermem.ByteOrder.PutUint32(buf[4:], linux.RSEQ_CPU_ID_UNINITIALIZED) // CPUID
+ hostarch.ByteOrder.PutUint32(buf, 0) // CPUIDStart
+ hostarch.ByteOrder.PutUint32(buf[4:], linux.RSEQ_CPU_ID_UNINITIALIZED) // CPUID
// N.B. This write is not atomic, but since this occurs on the task
// goroutine then as long as userspace uses a single-instruction read
// it can't see an invalid value.
@@ -305,7 +306,7 @@ func (t *Task) rseqAddrInterrupt() {
return
}
- critAddr := usermem.Addr(usermem.ByteOrder.Uint64(buf))
+ critAddr := hostarch.Addr(hostarch.ByteOrder.Uint64(buf))
if critAddr == 0 {
return
}
@@ -325,7 +326,7 @@ func (t *Task) rseqAddrInterrupt() {
return
}
- start := usermem.Addr(cs.Start)
+ start := hostarch.Addr(cs.Start)
critRange, ok := start.ToRange(cs.PostCommitOffset)
if !ok {
t.Debugf("Invalid start and offset in %+v", cs)
@@ -334,7 +335,7 @@ func (t *Task) rseqAddrInterrupt() {
return
}
- abort := usermem.Addr(cs.Abort)
+ abort := hostarch.Addr(cs.Abort)
if critRange.Contains(abort) {
t.Debugf("Abort in critical section in %+v", cs)
t.forceSignal(linux.SIGSEGV, false /* unconditional */)
@@ -353,7 +354,7 @@ func (t *Task) rseqAddrInterrupt() {
return
}
- sig := usermem.ByteOrder.Uint32(buf)
+ sig := hostarch.ByteOrder.Uint32(buf)
if sig != t.rseqSignature {
t.Debugf("Mismatched rseq signature %d != %d", sig, t.rseqSignature)
t.forceSignal(linux.SIGSEGV, false /* unconditional */)
@@ -376,7 +377,7 @@ func (t *Task) rseqAddrInterrupt() {
}
// Finally we can actually decide whether or not to restart.
- if !critRange.Contains(usermem.Addr(t.Arch().IP())) {
+ if !critRange.Contains(hostarch.Addr(t.Arch().IP())) {
return
}
@@ -386,7 +387,7 @@ func (t *Task) rseqAddrInterrupt() {
// Preconditions: The caller must be running on the task goroutine.
func (t *Task) oldRSeqInterrupt() {
r := t.tg.oldRSeqCritical.Load().(*OldRSeqCriticalRegion)
- if ip := t.Arch().IP(); r.CriticalSection.Contains(usermem.Addr(ip)) {
+ if ip := t.Arch().IP(); r.CriticalSection.Contains(hostarch.Addr(ip)) {
t.Debugf("Interrupted rseq critical section at %#x; restarting at %#x", ip, r.Restart)
t.Arch().SetIP(uintptr(r.Restart))
t.Arch().SetOldRSeqInterruptedIP(ip)
diff --git a/pkg/sentry/kernel/seccomp.go b/pkg/sentry/kernel/seccomp.go
index 8163a6132..a95e174a2 100644
--- a/pkg/sentry/kernel/seccomp.go
+++ b/pkg/sentry/kernel/seccomp.go
@@ -18,9 +18,9 @@ import (
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/bpf"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
)
const maxSyscallFilterInstructions = 1 << 15
@@ -35,11 +35,11 @@ func dataAsBPFInput(t *Task, d *linux.SeccompData) bpf.Input {
return bpf.InputBytes{
Data: buf,
// Go-marshal always uses the native byte order.
- Order: usermem.ByteOrder,
+ Order: hostarch.ByteOrder,
}
}
-func seccompSiginfo(t *Task, errno, sysno int32, ip usermem.Addr) *arch.SignalInfo {
+func seccompSiginfo(t *Task, errno, sysno int32, ip hostarch.Addr) *arch.SignalInfo {
si := &arch.SignalInfo{
Signo: int32(linux.SIGSYS),
Errno: errno,
@@ -56,7 +56,7 @@ func seccompSiginfo(t *Task, errno, sysno int32, ip usermem.Addr) *arch.SignalIn
// in because vsyscalls do not use the values in t.Arch().)
//
// Preconditions: The caller must be running on the task goroutine.
-func (t *Task) checkSeccompSyscall(sysno int32, args arch.SyscallArguments, ip usermem.Addr) linux.BPFAction {
+func (t *Task) checkSeccompSyscall(sysno int32, args arch.SyscallArguments, ip hostarch.Addr) linux.BPFAction {
result := linux.BPFAction(t.evaluateSyscallFilters(sysno, args, ip))
action := result & linux.SECCOMP_RET_ACTION
switch action {
@@ -102,7 +102,7 @@ func (t *Task) checkSeccompSyscall(sysno int32, args arch.SyscallArguments, ip u
return action
}
-func (t *Task) evaluateSyscallFilters(sysno int32, args arch.SyscallArguments, ip usermem.Addr) uint32 {
+func (t *Task) evaluateSyscallFilters(sysno int32, args arch.SyscallArguments, ip hostarch.Addr) uint32 {
data := linux.SeccompData{
Nr: sysno,
Arch: t.image.st.AuditNumber,
diff --git a/pkg/sentry/kernel/shm/shm.go b/pkg/sentry/kernel/shm/shm.go
index 92d60ba78..a73f1bdca 100644
--- a/pkg/sentry/kernel/shm/shm.go
+++ b/pkg/sentry/kernel/shm/shm.go
@@ -38,6 +38,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
@@ -47,7 +48,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/usage"
"gvisor.dev/gvisor/pkg/sync"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
)
// Key represents a shm segment key. Analogous to a file name.
@@ -197,13 +197,13 @@ func (r *Registry) FindOrCreate(ctx context.Context, pid int32, key Key, size ui
}
var sizeAligned uint64
- if val, ok := usermem.Addr(size).RoundUp(); ok {
+ if val, ok := hostarch.Addr(size).RoundUp(); ok {
sizeAligned = uint64(val)
} else {
return nil, syserror.EINVAL
}
- if numPages := sizeAligned / usermem.PageSize; r.totalPages+numPages > linux.SHMALL {
+ if numPages := sizeAligned / hostarch.PageSize; r.totalPages+numPages > linux.SHMALL {
// "... allocating a segment of the requested size would cause the
// system to exceed the system-wide limit on shared memory (SHMALL)."
// - man shmget(2)
@@ -232,7 +232,7 @@ func (r *Registry) newShm(ctx context.Context, pid int32, key Key, creator fs.Fi
panic(fmt.Sprintf("context.Context %T lacks non-nil value for key %T", ctx, pgalloc.CtxMemoryFileProvider))
}
- effectiveSize := uint64(usermem.Addr(size).MustRoundUp())
+ effectiveSize := uint64(hostarch.Addr(size).MustRoundUp())
fr, err := mfp.MemoryFile().Allocate(effectiveSize, usage.Anonymous)
if err != nil {
return nil, err
@@ -267,7 +267,7 @@ func (r *Registry) newShm(ctx context.Context, pid int32, key Key, creator fs.Fi
r.shms[id] = shm
r.keysToShms[key] = shm
- r.totalPages += effectiveSize / usermem.PageSize
+ r.totalPages += effectiveSize / hostarch.PageSize
return shm, nil
}
@@ -318,7 +318,7 @@ func (r *Registry) remove(s *Shm) {
}
delete(r.shms, s.ID)
- r.totalPages -= s.effectiveSize / usermem.PageSize
+ r.totalPages -= s.effectiveSize / hostarch.PageSize
}
// Release drops the self-reference of each active shm segment in the registry.
@@ -386,7 +386,7 @@ type Shm struct {
// effectiveSize of the segment, rounding up to the next page
// boundary. Immutable.
//
- // Invariant: effectiveSize must be a multiple of usermem.PageSize.
+ // Invariant: effectiveSize must be a multiple of hostarch.PageSize.
effectiveSize uint64
// fr is the offset into mfp.MemoryFile() that backs this contents of this
@@ -467,7 +467,7 @@ func (s *Shm) Msync(context.Context, memmap.MappableRange) error {
}
// AddMapping implements memmap.Mappable.AddMapping.
-func (s *Shm) AddMapping(ctx context.Context, _ memmap.MappingSpace, _ usermem.AddrRange, _ uint64, _ bool) error {
+func (s *Shm) AddMapping(ctx context.Context, _ memmap.MappingSpace, _ hostarch.AddrRange, _ uint64, _ bool) error {
s.mu.Lock()
defer s.mu.Unlock()
s.attachTime = ktime.NowFromContext(ctx)
@@ -482,7 +482,7 @@ func (s *Shm) AddMapping(ctx context.Context, _ memmap.MappingSpace, _ usermem.A
}
// RemoveMapping implements memmap.Mappable.RemoveMapping.
-func (s *Shm) RemoveMapping(ctx context.Context, _ memmap.MappingSpace, _ usermem.AddrRange, _ uint64, _ bool) {
+func (s *Shm) RemoveMapping(ctx context.Context, _ memmap.MappingSpace, _ hostarch.AddrRange, _ uint64, _ bool) {
s.mu.Lock()
defer s.mu.Unlock()
// RemoveMapping may be called during task exit, when ctx
@@ -503,12 +503,12 @@ func (s *Shm) RemoveMapping(ctx context.Context, _ memmap.MappingSpace, _ userme
}
// CopyMapping implements memmap.Mappable.CopyMapping.
-func (*Shm) CopyMapping(context.Context, memmap.MappingSpace, usermem.AddrRange, usermem.AddrRange, uint64, bool) error {
+func (*Shm) CopyMapping(context.Context, memmap.MappingSpace, hostarch.AddrRange, hostarch.AddrRange, uint64, bool) error {
return nil
}
// Translate implements memmap.Mappable.Translate.
-func (s *Shm) Translate(ctx context.Context, required, optional memmap.MappableRange, at usermem.AccessType) ([]memmap.Translation, error) {
+func (s *Shm) Translate(ctx context.Context, required, optional memmap.MappableRange, at hostarch.AccessType) ([]memmap.Translation, error) {
var err error
if required.End > s.fr.Length() {
err = &memmap.BusError{syserror.EFAULT}
@@ -519,7 +519,7 @@ func (s *Shm) Translate(ctx context.Context, required, optional memmap.MappableR
Source: source,
File: s.mfp.MemoryFile(),
Offset: s.fr.Start + source.Start,
- Perms: usermem.AnyAccess,
+ Perms: hostarch.AnyAccess,
},
}, err
}
@@ -543,7 +543,7 @@ type AttachOpts struct {
//
// Postconditions: The returned MMapOpts are valid only as long as a reference
// continues to be held on s.
-func (s *Shm) ConfigureAttach(ctx context.Context, addr usermem.Addr, opts AttachOpts) (memmap.MMapOpts, error) {
+func (s *Shm) ConfigureAttach(ctx context.Context, addr hostarch.Addr, opts AttachOpts) (memmap.MMapOpts, error) {
s.mu.Lock()
defer s.mu.Unlock()
if s.pendingDestruction && s.ReadRefs() == 0 {
@@ -565,12 +565,12 @@ func (s *Shm) ConfigureAttach(ctx context.Context, addr usermem.Addr, opts Attac
Offset: 0,
Addr: addr,
Fixed: opts.Remap,
- Perms: usermem.AccessType{
+ Perms: hostarch.AccessType{
Read: true,
Write: !opts.Readonly,
Execute: opts.Execute,
},
- MaxPerms: usermem.AnyAccess,
+ MaxPerms: hostarch.AnyAccess,
Mappable: s,
MappingIdentity: s,
}, nil
diff --git a/pkg/sentry/kernel/syscalls.go b/pkg/sentry/kernel/syscalls.go
index 332bdb8e8..953d4310e 100644
--- a/pkg/sentry/kernel/syscalls.go
+++ b/pkg/sentry/kernel/syscalls.go
@@ -20,9 +20,9 @@ import (
"gvisor.dev/gvisor/pkg/abi"
"gvisor.dev/gvisor/pkg/bits"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/usermem"
)
// maxSyscallNum is the highest supported syscall number.
@@ -243,7 +243,7 @@ type SyscallTable struct {
// Emulate is a collection of instruction addresses to emulate. The
// keys are addresses, and the values are system call numbers.
- Emulate map[usermem.Addr]uintptr
+ Emulate map[hostarch.Addr]uintptr
// The function to call in case of a missing system call.
Missing MissingFn
@@ -316,7 +316,7 @@ func (s *SyscallTable) Init() {
}
if s.Emulate == nil {
// Ensure non-nil emulate table.
- s.Emulate = make(map[usermem.Addr]uintptr)
+ s.Emulate = make(map[hostarch.Addr]uintptr)
}
max := s.MaxSysno() // Checked during RegisterSyscallTable.
@@ -359,7 +359,7 @@ func (s *SyscallTable) LookupNo(name string) (uintptr, error) {
}
// LookupEmulate looks up an emulation syscall number.
-func (s *SyscallTable) LookupEmulate(addr usermem.Addr) (uintptr, bool) {
+func (s *SyscallTable) LookupEmulate(addr hostarch.Addr) (uintptr, bool) {
sysno, ok := s.Emulate[addr]
return sysno, ok
}
diff --git a/pkg/sentry/kernel/task.go b/pkg/sentry/kernel/task.go
index 36141dd09..399985039 100644
--- a/pkg/sentry/kernel/task.go
+++ b/pkg/sentry/kernel/task.go
@@ -21,6 +21,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/bpf"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/inet"
@@ -33,7 +34,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/sync"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/pkg/waiter"
)
@@ -470,7 +470,7 @@ type Task struct {
// ThreadID to 0, and wake any futex waiters.
//
// cleartid is exclusive to the task goroutine.
- cleartid usermem.Addr
+ cleartid hostarch.Addr
// This is mostly a fake cpumask just for sched_set/getaffinity as we
// don't really control the affinity.
@@ -540,12 +540,12 @@ type Task struct {
// oldRSeqCPUAddr is a pointer to the userspace old rseq CPU variable.
//
// oldRSeqCPUAddr is exclusive to the task goroutine.
- oldRSeqCPUAddr usermem.Addr
+ oldRSeqCPUAddr hostarch.Addr
// rseqAddr is a pointer to the userspace linux.RSeq structure.
//
// rseqAddr is exclusive to the task goroutine.
- rseqAddr usermem.Addr
+ rseqAddr hostarch.Addr
// rseqSignature is the signature that the rseq abort IP must be signed
// with.
@@ -575,7 +575,7 @@ type Task struct {
// robustList is a pointer to the head of the tasks's robust futex
// list.
- robustList usermem.Addr
+ robustList hostarch.Addr
// startTime is the real time at which the task started. It is set when
// a Task is created or invokes execve(2).
@@ -652,7 +652,7 @@ func (t *Task) Kernel() *Kernel {
// SetClearTID sets t's cleartid.
//
// Preconditions: The caller must be running on the task goroutine.
-func (t *Task) SetClearTID(addr usermem.Addr) {
+func (t *Task) SetClearTID(addr hostarch.Addr) {
t.cleartid = addr
}
diff --git a/pkg/sentry/kernel/task_clone.go b/pkg/sentry/kernel/task_clone.go
index f305e69c0..405771f3f 100644
--- a/pkg/sentry/kernel/task_clone.go
+++ b/pkg/sentry/kernel/task_clone.go
@@ -20,6 +20,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/bpf"
"gvisor.dev/gvisor/pkg/cleanup"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/inet"
"gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
@@ -85,12 +86,12 @@ type CloneOptions struct {
// Stack is the initial stack pointer of the new task. If Stack is 0, the
// new task will start with the same stack pointer as its parent.
- Stack usermem.Addr
+ Stack hostarch.Addr
// If SetTLS is true, set the new task's TLS (thread-local storage)
// descriptor to TLS. If SetTLS is false, TLS is ignored.
SetTLS bool
- TLS usermem.Addr
+ TLS hostarch.Addr
// If ChildClearTID is true, when the child exits, 0 is written to the
// address ChildTID in the child's memory, and if the write is successful a
@@ -101,7 +102,7 @@ type CloneOptions struct {
// Linux, failed writes are silently ignored.)
ChildClearTID bool
ChildSetTID bool
- ChildTID usermem.Addr
+ ChildTID hostarch.Addr
// If ParentSetTID is true, the child's thread ID (in the parent's PID
// namespace) is written to address ParentTID in the parent's memory. (As
@@ -112,7 +113,7 @@ type CloneOptions struct {
// and child's memory, but this is a documentation error fixed by
// 87ab04792ced ("clone.2: Fix description of CLONE_PARENT_SETTID").
ParentSetTID bool
- ParentTID usermem.Addr
+ ParentTID hostarch.Addr
// If Vfork is true, place the parent in vforkStop until the cloned task
// releases its TaskImage.
@@ -268,7 +269,7 @@ func (t *Task) Clone(opts *CloneOptions) (ThreadID, *SyscallControl, error) {
}
tg := t.tg
- rseqAddr := usermem.Addr(0)
+ rseqAddr := hostarch.Addr(0)
rseqSignature := uint32(0)
if opts.NewThreadGroup {
if tg.mounts != nil {
diff --git a/pkg/sentry/kernel/task_futex.go b/pkg/sentry/kernel/task_futex.go
index 195c7da9b..4dc41b82b 100644
--- a/pkg/sentry/kernel/task_futex.go
+++ b/pkg/sentry/kernel/task_futex.go
@@ -16,6 +16,7 @@ package kernel
import (
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal/primitive"
"gvisor.dev/gvisor/pkg/sentry/kernel/futex"
"gvisor.dev/gvisor/pkg/usermem"
@@ -30,33 +31,33 @@ func (t *Task) Futex() *futex.Manager {
}
// SwapUint32 implements futex.Target.SwapUint32.
-func (t *Task) SwapUint32(addr usermem.Addr, new uint32) (uint32, error) {
+func (t *Task) SwapUint32(addr hostarch.Addr, new uint32) (uint32, error) {
return t.MemoryManager().SwapUint32(t, addr, new, usermem.IOOpts{
AddressSpaceActive: true,
})
}
// CompareAndSwapUint32 implements futex.Target.CompareAndSwapUint32.
-func (t *Task) CompareAndSwapUint32(addr usermem.Addr, old, new uint32) (uint32, error) {
+func (t *Task) CompareAndSwapUint32(addr hostarch.Addr, old, new uint32) (uint32, error) {
return t.MemoryManager().CompareAndSwapUint32(t, addr, old, new, usermem.IOOpts{
AddressSpaceActive: true,
})
}
// LoadUint32 implements futex.Target.LoadUint32.
-func (t *Task) LoadUint32(addr usermem.Addr) (uint32, error) {
+func (t *Task) LoadUint32(addr hostarch.Addr) (uint32, error) {
return t.MemoryManager().LoadUint32(t, addr, usermem.IOOpts{
AddressSpaceActive: true,
})
}
// GetSharedKey implements futex.Target.GetSharedKey.
-func (t *Task) GetSharedKey(addr usermem.Addr) (futex.Key, error) {
+func (t *Task) GetSharedKey(addr hostarch.Addr) (futex.Key, error) {
return t.MemoryManager().GetSharedFutexKey(t, addr)
}
// GetRobustList sets the robust futex list for the task.
-func (t *Task) GetRobustList() usermem.Addr {
+func (t *Task) GetRobustList() hostarch.Addr {
t.mu.Lock()
addr := t.robustList
t.mu.Unlock()
@@ -64,7 +65,7 @@ func (t *Task) GetRobustList() usermem.Addr {
}
// SetRobustList sets the robust futex list for the task.
-func (t *Task) SetRobustList(addr usermem.Addr) {
+func (t *Task) SetRobustList(addr hostarch.Addr) {
t.mu.Lock()
t.robustList = addr
t.mu.Unlock()
@@ -84,28 +85,28 @@ func (t *Task) exitRobustList() {
}
var rl linux.RobustListHead
- if _, err := rl.CopyIn(t, usermem.Addr(addr)); err != nil {
+ if _, err := rl.CopyIn(t, hostarch.Addr(addr)); err != nil {
return
}
next := primitive.Uint64(rl.List)
done := 0
- var pendingLockAddr usermem.Addr
+ var pendingLockAddr hostarch.Addr
if rl.ListOpPending != 0 {
- pendingLockAddr = usermem.Addr(rl.ListOpPending + rl.FutexOffset)
+ pendingLockAddr = hostarch.Addr(rl.ListOpPending + rl.FutexOffset)
}
// Wake up normal elements.
- for usermem.Addr(next) != addr {
+ for hostarch.Addr(next) != addr {
// We traverse to the next element of the list before we
// actually wake anything. This prevents the race where waking
// this futex causes a modification of the list.
- thisLockAddr := usermem.Addr(uint64(next) + rl.FutexOffset)
+ thisLockAddr := hostarch.Addr(uint64(next) + rl.FutexOffset)
// Try to decode the next element in the list before waking the
// current futex. But don't check the error until after we've
// woken the current futex. Linux does it in this order too
- _, nextErr := next.CopyIn(t, usermem.Addr(next))
+ _, nextErr := next.CopyIn(t, hostarch.Addr(next))
// Wakeup the current futex if it's not pending.
if thisLockAddr != pendingLockAddr {
@@ -133,7 +134,7 @@ func (t *Task) exitRobustList() {
}
// wakeRobustListOne wakes a single futex from the robust list.
-func (t *Task) wakeRobustListOne(addr usermem.Addr) {
+func (t *Task) wakeRobustListOne(addr hostarch.Addr) {
// Bit 0 in address signals PI futex.
pi := addr&1 == 1
addr = addr &^ 1
diff --git a/pkg/sentry/kernel/task_image.go b/pkg/sentry/kernel/task_image.go
index ce5fbd299..bd5543d4e 100644
--- a/pkg/sentry/kernel/task_image.go
+++ b/pkg/sentry/kernel/task_image.go
@@ -19,12 +19,12 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel/futex"
"gvisor.dev/gvisor/pkg/sentry/loader"
"gvisor.dev/gvisor/pkg/sentry/mm"
"gvisor.dev/gvisor/pkg/syserr"
- "gvisor.dev/gvisor/pkg/usermem"
)
var errNoSyscalls = syserr.New("no syscall table found", linux.ENOEXEC)
@@ -129,7 +129,7 @@ func (t *Task) Stack() *arch.Stack {
return &arch.Stack{
Arch: t.Arch(),
IO: t.MemoryManager(),
- Bottom: usermem.Addr(t.Arch().Stack()),
+ Bottom: hostarch.Addr(t.Arch().Stack()),
}
}
diff --git a/pkg/sentry/kernel/task_log.go b/pkg/sentry/kernel/task_log.go
index c70e5e6ce..72b9a0384 100644
--- a/pkg/sentry/kernel/task_log.go
+++ b/pkg/sentry/kernel/task_log.go
@@ -20,6 +20,7 @@ import (
"sort"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/usermem"
)
@@ -108,9 +109,9 @@ func (t *Task) debugDumpStack() {
return
}
t.Debugf("Stack:")
- start := usermem.Addr(t.Arch().Stack())
+ start := hostarch.Addr(t.Arch().Stack())
// Round addr down to a 16-byte boundary.
- start &= ^usermem.Addr(15)
+ start &= ^hostarch.Addr(15)
// Print 16 bytes per line, one byte at a time.
for offset := uint64(0); offset < maxStackDebugBytes; offset += 16 {
addr, ok := start.AddLength(offset)
@@ -127,7 +128,7 @@ func (t *Task) debugDumpStack() {
t.Debugf("%x: % x", addr, data[:n])
}
if err != nil {
- t.Debugf("Error reading stack at address %x: %v", addr+usermem.Addr(n), err)
+ t.Debugf("Error reading stack at address %x: %v", addr+hostarch.Addr(n), err)
break
}
}
@@ -147,9 +148,9 @@ func (t *Task) debugDumpCode() {
}
t.Debugf("Code:")
// Print code on both sides of the instruction register.
- start := usermem.Addr(t.Arch().IP()) - maxCodeDebugBytes/2
+ start := hostarch.Addr(t.Arch().IP()) - maxCodeDebugBytes/2
// Round addr down to a 16-byte boundary.
- start &= ^usermem.Addr(15)
+ start &= ^hostarch.Addr(15)
// Print 16 bytes per line, one byte at a time.
for offset := uint64(0); offset < maxCodeDebugBytes; offset += 16 {
addr, ok := start.AddLength(offset)
@@ -166,7 +167,7 @@ func (t *Task) debugDumpCode() {
t.Debugf("%x: % x", addr, data[:n])
}
if err != nil {
- t.Debugf("Error reading stack at address %x: %v", addr+usermem.Addr(n), err)
+ t.Debugf("Error reading stack at address %x: %v", addr+hostarch.Addr(n), err)
break
}
}
diff --git a/pkg/sentry/kernel/task_run.go b/pkg/sentry/kernel/task_run.go
index 3ccecf4b6..068f25af1 100644
--- a/pkg/sentry/kernel/task_run.go
+++ b/pkg/sentry/kernel/task_run.go
@@ -23,13 +23,13 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/goid"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/hostcpu"
ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time"
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sentry/platform"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
)
// A taskRunState is a reified state in the task state machine. See README.md
@@ -148,7 +148,7 @@ func (*runApp) handleCPUIDInstruction(t *Task) error {
region := trace.StartRegion(t.traceContext, cpuidRegion)
expected := arch.CPUIDInstruction[:]
found := make([]byte, len(expected))
- _, err := t.CopyInBytes(usermem.Addr(t.Arch().IP()), found)
+ _, err := t.CopyInBytes(hostarch.Addr(t.Arch().IP()), found)
if err == nil && bytes.Equal(expected, found) {
// Skip the cpuid instruction.
t.Arch().CPUIDEmulate(t)
@@ -307,8 +307,8 @@ func (app *runApp) execute(t *Task) taskRunState {
// normally.
if at.Any() {
region := trace.StartRegion(t.traceContext, faultRegion)
- addr := usermem.Addr(info.Addr())
- err := t.MemoryManager().HandleUserFault(t, addr, at, usermem.Addr(t.Arch().Stack()))
+ addr := hostarch.Addr(info.Addr())
+ err := t.MemoryManager().HandleUserFault(t, addr, at, hostarch.Addr(t.Arch().Stack()))
region.End()
if err == nil {
// The fault was handled appropriately.
diff --git a/pkg/sentry/kernel/task_signals.go b/pkg/sentry/kernel/task_signals.go
index 75af3af79..c2b9fc08f 100644
--- a/pkg/sentry/kernel/task_signals.go
+++ b/pkg/sentry/kernel/task_signals.go
@@ -23,11 +23,11 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/eventchannel"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
ucspb "gvisor.dev/gvisor/pkg/sentry/kernel/uncaught_signal_go_proto"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/pkg/waiter"
)
@@ -243,7 +243,7 @@ func (t *Task) deliverSignalToHandler(info *arch.SignalInfo, act arch.SignalAct)
// Are executing on the main stack,
// or the provided alternate stack?
- sp := usermem.Addr(t.Arch().Stack())
+ sp := hostarch.Addr(t.Arch().Stack())
// N.B. This is a *copy* of the alternate stack that the user's signal
// handler expects to see in its ucontext (even if it's not in use).
@@ -251,7 +251,7 @@ func (t *Task) deliverSignalToHandler(info *arch.SignalInfo, act arch.SignalAct)
if act.IsOnStack() && alt.IsEnabled() {
alt.SetOnStack()
if !alt.Contains(sp) {
- sp = usermem.Addr(alt.Top())
+ sp = hostarch.Addr(alt.Top())
}
}
@@ -652,7 +652,7 @@ func (t *Task) SignalStack() arch.SignalStack {
// onSignalStack returns true if the task is executing on the given signal stack.
func (t *Task) onSignalStack(alt arch.SignalStack) bool {
- sp := usermem.Addr(t.Arch().Stack())
+ sp := hostarch.Addr(t.Arch().Stack())
return alt.Contains(sp)
}
@@ -720,7 +720,7 @@ func (tg *ThreadGroup) SetSignalAct(sig linux.Signal, actptr *arch.SignalAct) (a
// CopyOutSignalAct converts the given SignalAct into an architecture-specific
// type and then copies it out to task memory.
-func (t *Task) CopyOutSignalAct(addr usermem.Addr, s *arch.SignalAct) error {
+func (t *Task) CopyOutSignalAct(addr hostarch.Addr, s *arch.SignalAct) error {
n := t.Arch().NewSignalAct()
n.SerializeFrom(s)
_, err := n.CopyOut(t, addr)
@@ -729,7 +729,7 @@ func (t *Task) CopyOutSignalAct(addr usermem.Addr, s *arch.SignalAct) error {
// CopyInSignalAct copies an architecture-specific sigaction type from task
// memory and then converts it into a SignalAct.
-func (t *Task) CopyInSignalAct(addr usermem.Addr) (arch.SignalAct, error) {
+func (t *Task) CopyInSignalAct(addr hostarch.Addr) (arch.SignalAct, error) {
n := t.Arch().NewSignalAct()
var s arch.SignalAct
if _, err := n.CopyIn(t, addr); err != nil {
@@ -741,7 +741,7 @@ func (t *Task) CopyInSignalAct(addr usermem.Addr) (arch.SignalAct, error) {
// CopyOutSignalStack converts the given SignalStack into an
// architecture-specific type and then copies it out to task memory.
-func (t *Task) CopyOutSignalStack(addr usermem.Addr, s *arch.SignalStack) error {
+func (t *Task) CopyOutSignalStack(addr hostarch.Addr, s *arch.SignalStack) error {
n := t.Arch().NewSignalStack()
n.SerializeFrom(s)
_, err := n.CopyOut(t, addr)
@@ -750,7 +750,7 @@ func (t *Task) CopyOutSignalStack(addr usermem.Addr, s *arch.SignalStack) error
// CopyInSignalStack copies an architecture-specific stack_t from task memory
// and then converts it into a SignalStack.
-func (t *Task) CopyInSignalStack(addr usermem.Addr) (arch.SignalStack, error) {
+func (t *Task) CopyInSignalStack(addr hostarch.Addr) (arch.SignalStack, error) {
n := t.Arch().NewSignalStack()
var s arch.SignalStack
if _, err := n.CopyIn(t, addr); err != nil {
diff --git a/pkg/sentry/kernel/task_start.go b/pkg/sentry/kernel/task_start.go
index 36e1384f1..fc18b6253 100644
--- a/pkg/sentry/kernel/task_start.go
+++ b/pkg/sentry/kernel/task_start.go
@@ -17,6 +17,7 @@ package kernel
import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/inet"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
@@ -25,7 +26,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/usage"
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
)
// TaskConfig defines the configuration of a new Task (see below).
@@ -86,7 +86,7 @@ type TaskConfig struct {
MountNamespaceVFS2 *vfs.MountNamespace
// RSeqAddr is a pointer to the the userspace linux.RSeq structure.
- RSeqAddr usermem.Addr
+ RSeqAddr hostarch.Addr
// RSeqSignature is the signature that the rseq abort IP must be signed
// with.
diff --git a/pkg/sentry/kernel/task_syscall.go b/pkg/sentry/kernel/task_syscall.go
index 2e84bd88a..2c658d001 100644
--- a/pkg/sentry/kernel/task_syscall.go
+++ b/pkg/sentry/kernel/task_syscall.go
@@ -22,12 +22,12 @@ import (
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/bits"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal"
"gvisor.dev/gvisor/pkg/metric"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
)
var vsyscallCount = metric.MustCreateNewUint64Metric("/kernel/vsyscall_count", false /* sync */, "Number of times vsyscalls were invoked by the application")
@@ -153,7 +153,7 @@ func (t *Task) doSyscall() taskRunState {
// Check seccomp filters. The nil check is for performance (as seccomp use
// is rare), not needed for correctness.
if t.syscallFilters.Load() != nil {
- switch r := t.checkSeccompSyscall(int32(sysno), args, usermem.Addr(t.Arch().IP())); r {
+ switch r := t.checkSeccompSyscall(int32(sysno), args, hostarch.Addr(t.Arch().IP())); r {
case linux.SECCOMP_RET_ERRNO, linux.SECCOMP_RET_TRAP:
t.Debugf("Syscall %d: denied by seccomp", sysno)
return (*runSyscallExit)(nil)
@@ -283,12 +283,12 @@ func (*runSyscallExit) execute(t *Task) taskRunState {
// doVsyscall is the entry point for a vsyscall invocation of syscall sysno, as
// indicated by an execution fault at address addr. doVsyscall returns the
// task's next run state.
-func (t *Task) doVsyscall(addr usermem.Addr, sysno uintptr) taskRunState {
+func (t *Task) doVsyscall(addr hostarch.Addr, sysno uintptr) taskRunState {
vsyscallCount.Increment()
// Grab the caller up front, to make sure there's a sensible stack.
caller := t.Arch().Native(uintptr(0))
- if _, err := caller.CopyIn(t, usermem.Addr(t.Arch().Stack())); err != nil {
+ if _, err := caller.CopyIn(t, hostarch.Addr(t.Arch().Stack())); err != nil {
t.Debugf("vsyscall %d: error reading return address from stack: %v", sysno, err)
t.forceSignal(linux.SIGSEGV, false /* unconditional */)
t.SendSignal(SignalInfoPriv(linux.SIGSEGV))
@@ -322,7 +322,7 @@ func (t *Task) doVsyscall(addr usermem.Addr, sysno uintptr) taskRunState {
}
type runVsyscallAfterPtraceEventSeccomp struct {
- addr usermem.Addr
+ addr hostarch.Addr
sysno uintptr
caller marshal.Marshallable
}
@@ -337,7 +337,7 @@ func (r *runVsyscallAfterPtraceEventSeccomp) execute(t *Task) taskRunState {
// currently emulated call. ... The tracer MUST NOT modify rip or rsp." -
// Documentation/prctl/seccomp_filter.txt. On Linux, changing orig_ax or ip
// causes do_exit(SIGSYS), and changing sp is ignored.
- if (sysno != ^uintptr(0) && sysno != r.sysno) || usermem.Addr(t.Arch().IP()) != r.addr {
+ if (sysno != ^uintptr(0) && sysno != r.sysno) || hostarch.Addr(t.Arch().IP()) != r.addr {
t.PrepareExit(ExitStatus{Signo: int(linux.SIGSYS)})
return (*runExit)(nil)
}
diff --git a/pkg/sentry/kernel/task_usermem.go b/pkg/sentry/kernel/task_usermem.go
index 94dabbcd8..fc6d9438a 100644
--- a/pkg/sentry/kernel/task_usermem.go
+++ b/pkg/sentry/kernel/task_usermem.go
@@ -19,6 +19,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/mm"
"gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
@@ -27,7 +28,7 @@ import (
// MAX_RW_COUNT is the maximum size in bytes of a single read or write.
// Reads and writes that exceed this size may be silently truncated.
// (Linux: include/linux/fs.h:MAX_RW_COUNT)
-var MAX_RW_COUNT = int(usermem.Addr(math.MaxInt32).RoundDown())
+var MAX_RW_COUNT = int(hostarch.Addr(math.MaxInt32).RoundDown())
// Activate ensures that the task has an active address space.
func (t *Task) Activate() {
@@ -49,7 +50,7 @@ func (t *Task) Deactivate() {
// data without reflection and pass in a byte slice.
//
// This Task's AddressSpace must be active.
-func (t *Task) CopyInBytes(addr usermem.Addr, dst []byte) (int, error) {
+func (t *Task) CopyInBytes(addr hostarch.Addr, dst []byte) (int, error) {
return t.MemoryManager().CopyIn(t, addr, dst, usermem.IOOpts{
AddressSpaceActive: true,
})
@@ -59,7 +60,7 @@ func (t *Task) CopyInBytes(addr usermem.Addr, dst []byte) (int, error) {
// data without reflection and pass in a byte slice.
//
// This Task's AddressSpace must be active.
-func (t *Task) CopyOutBytes(addr usermem.Addr, src []byte) (int, error) {
+func (t *Task) CopyOutBytes(addr hostarch.Addr, src []byte) (int, error) {
return t.MemoryManager().CopyOut(t, addr, src, usermem.IOOpts{
AddressSpaceActive: true,
})
@@ -70,7 +71,7 @@ func (t *Task) CopyOutBytes(addr usermem.Addr, src []byte) (int, error) {
// user memory that is unmapped or not readable by the user.
//
// This Task's AddressSpace must be active.
-func (t *Task) CopyInString(addr usermem.Addr, maxlen int) (string, error) {
+func (t *Task) CopyInString(addr hostarch.Addr, maxlen int) (string, error) {
return usermem.CopyStringIn(t, t.MemoryManager(), addr, maxlen, usermem.IOOpts{
AddressSpaceActive: true,
})
@@ -90,7 +91,7 @@ func (t *Task) CopyInString(addr usermem.Addr, maxlen int) (string, error) {
// { "abc" } => 4 (3 for length, 1 for elements)
//
// This Task's AddressSpace must be active.
-func (t *Task) CopyInVector(addr usermem.Addr, maxElemSize, maxTotalSize int) ([]string, error) {
+func (t *Task) CopyInVector(addr hostarch.Addr, maxElemSize, maxTotalSize int) ([]string, error) {
var v []string
for {
argAddr := t.Arch().Native(0)
@@ -109,12 +110,12 @@ func (t *Task) CopyInVector(addr usermem.Addr, maxElemSize, maxTotalSize int) ([
if maxTotalSize < thisMax {
thisMax = maxTotalSize
}
- arg, err := t.CopyInString(usermem.Addr(t.Arch().Value(argAddr)), thisMax)
+ arg, err := t.CopyInString(hostarch.Addr(t.Arch().Value(argAddr)), thisMax)
if err != nil {
return v, err
}
v = append(v, arg)
- addr += usermem.Addr(t.Arch().Width())
+ addr += hostarch.Addr(t.Arch().Width())
maxTotalSize -= len(arg) + 1
}
return v, nil
@@ -126,7 +127,7 @@ func (t *Task) CopyInVector(addr usermem.Addr, maxElemSize, maxTotalSize int) ([
// Preconditions: Same as usermem.IO.CopyOut, plus:
// * The caller must be running on the task goroutine.
// * t's AddressSpace must be active.
-func (t *Task) CopyOutIovecs(addr usermem.Addr, src usermem.AddrRangeSeq) error {
+func (t *Task) CopyOutIovecs(addr hostarch.Addr, src hostarch.AddrRangeSeq) error {
switch t.Arch().Width() {
case 8:
const itemLen = 16
@@ -137,8 +138,8 @@ func (t *Task) CopyOutIovecs(addr usermem.Addr, src usermem.AddrRangeSeq) error
b := t.CopyScratchBuffer(itemLen)
for ; !src.IsEmpty(); src = src.Tail() {
ar := src.Head()
- usermem.ByteOrder.PutUint64(b[0:8], uint64(ar.Start))
- usermem.ByteOrder.PutUint64(b[8:16], uint64(ar.Length()))
+ hostarch.ByteOrder.PutUint64(b[0:8], uint64(ar.Start))
+ hostarch.ByteOrder.PutUint64(b[8:16], uint64(ar.Length()))
if _, err := t.CopyOutBytes(addr, b); err != nil {
return err
}
@@ -153,8 +154,8 @@ func (t *Task) CopyOutIovecs(addr usermem.Addr, src usermem.AddrRangeSeq) error
}
// CopyInIovecs copies an array of numIovecs struct iovecs from the memory
-// mapped at addr, converts them to usermem.AddrRanges, and returns them as a
-// usermem.AddrRangeSeq.
+// mapped at addr, converts them to hostarch.AddrRanges, and returns them as a
+// hostarch.AddrRangeSeq.
//
// CopyInIovecs shares the following properties with Linux's
// lib/iov_iter.c:import_iovec() => fs/read_write.c:rw_copy_check_uvector():
@@ -175,42 +176,42 @@ func (t *Task) CopyOutIovecs(addr usermem.Addr, src usermem.AddrRangeSeq) error
// Preconditions: Same as usermem.IO.CopyIn, plus:
// * The caller must be running on the task goroutine.
// * t's AddressSpace must be active.
-func (t *Task) CopyInIovecs(addr usermem.Addr, numIovecs int) (usermem.AddrRangeSeq, error) {
+func (t *Task) CopyInIovecs(addr hostarch.Addr, numIovecs int) (hostarch.AddrRangeSeq, error) {
if numIovecs == 0 {
- return usermem.AddrRangeSeq{}, nil
+ return hostarch.AddrRangeSeq{}, nil
}
- var dst []usermem.AddrRange
+ var dst []hostarch.AddrRange
if numIovecs > 1 {
- dst = make([]usermem.AddrRange, 0, numIovecs)
+ dst = make([]hostarch.AddrRange, 0, numIovecs)
}
switch t.Arch().Width() {
case 8:
const itemLen = 16
if _, ok := addr.AddLength(uint64(numIovecs) * itemLen); !ok {
- return usermem.AddrRangeSeq{}, syserror.EFAULT
+ return hostarch.AddrRangeSeq{}, syserror.EFAULT
}
b := t.CopyScratchBuffer(itemLen)
for i := 0; i < numIovecs; i++ {
if _, err := t.CopyInBytes(addr, b); err != nil {
- return usermem.AddrRangeSeq{}, err
+ return hostarch.AddrRangeSeq{}, err
}
- base := usermem.Addr(usermem.ByteOrder.Uint64(b[0:8]))
- length := usermem.ByteOrder.Uint64(b[8:16])
+ base := hostarch.Addr(hostarch.ByteOrder.Uint64(b[0:8]))
+ length := hostarch.ByteOrder.Uint64(b[8:16])
if length > math.MaxInt64 {
- return usermem.AddrRangeSeq{}, syserror.EINVAL
+ return hostarch.AddrRangeSeq{}, syserror.EINVAL
}
ar, ok := t.MemoryManager().CheckIORange(base, int64(length))
if !ok {
- return usermem.AddrRangeSeq{}, syserror.EFAULT
+ return hostarch.AddrRangeSeq{}, syserror.EFAULT
}
if numIovecs == 1 {
// Special case to avoid allocating dst.
- return usermem.AddrRangeSeqOf(ar).TakeFirst(MAX_RW_COUNT), nil
+ return hostarch.AddrRangeSeqOf(ar).TakeFirst(MAX_RW_COUNT), nil
}
dst = append(dst, ar)
@@ -218,7 +219,7 @@ func (t *Task) CopyInIovecs(addr usermem.Addr, numIovecs int) (usermem.AddrRange
}
default:
- return usermem.AddrRangeSeq{}, syserror.ENOSYS
+ return hostarch.AddrRangeSeq{}, syserror.ENOSYS
}
// Truncate to MAX_RW_COUNT.
@@ -226,13 +227,13 @@ func (t *Task) CopyInIovecs(addr usermem.Addr, numIovecs int) (usermem.AddrRange
for i := range dst {
dstlen := uint64(dst[i].Length())
if rem := uint64(MAX_RW_COUNT) - total; rem < dstlen {
- dst[i].End -= usermem.Addr(dstlen - rem)
+ dst[i].End -= hostarch.Addr(dstlen - rem)
dstlen = rem
}
total += dstlen
}
- return usermem.AddrRangeSeqFromSlice(dst), nil
+ return hostarch.AddrRangeSeqFromSlice(dst), nil
}
// SingleIOSequence returns a usermem.IOSequence representing [addr,
@@ -245,7 +246,7 @@ func (t *Task) CopyInIovecs(addr usermem.Addr, numIovecs int) (usermem.AddrRange
// write syscalls in Linux do not use import_single_range(). However they check
// access_ok() in fs/read_write.c:vfs_read/vfs_write, and overflowing address
// ranges are truncated to MAX_RW_COUNT by fs/read_write.c:rw_verify_area().)
-func (t *Task) SingleIOSequence(addr usermem.Addr, length int, opts usermem.IOOpts) (usermem.IOSequence, error) {
+func (t *Task) SingleIOSequence(addr hostarch.Addr, length int, opts usermem.IOOpts) (usermem.IOSequence, error) {
if length > MAX_RW_COUNT {
length = MAX_RW_COUNT
}
@@ -255,7 +256,7 @@ func (t *Task) SingleIOSequence(addr usermem.Addr, length int, opts usermem.IOOp
}
return usermem.IOSequence{
IO: t.MemoryManager(),
- Addrs: usermem.AddrRangeSeqOf(ar),
+ Addrs: hostarch.AddrRangeSeqOf(ar),
Opts: opts,
}, nil
}
@@ -267,7 +268,7 @@ func (t *Task) SingleIOSequence(addr usermem.Addr, length int, opts usermem.IOOp
// IovecsIOSequence is analogous to Linux's lib/iov_iter.c:import_iovec().
//
// Preconditions: Same as Task.CopyInIovecs.
-func (t *Task) IovecsIOSequence(addr usermem.Addr, iovcnt int, opts usermem.IOOpts) (usermem.IOSequence, error) {
+func (t *Task) IovecsIOSequence(addr hostarch.Addr, iovcnt int, opts usermem.IOOpts) (usermem.IOSequence, error) {
if iovcnt < 0 || iovcnt > linux.UIO_MAXIOV {
return usermem.IOSequence{}, syserror.EINVAL
}
@@ -317,7 +318,7 @@ func (cc *taskCopyContext) getMemoryManager() (*mm.MemoryManager, error) {
}
// CopyInBytes implements marshal.CopyContext.CopyInBytes.
-func (cc *taskCopyContext) CopyInBytes(addr usermem.Addr, dst []byte) (int, error) {
+func (cc *taskCopyContext) CopyInBytes(addr hostarch.Addr, dst []byte) (int, error) {
tmm, err := cc.getMemoryManager()
if err != nil {
return 0, err
@@ -327,7 +328,7 @@ func (cc *taskCopyContext) CopyInBytes(addr usermem.Addr, dst []byte) (int, erro
}
// CopyOutBytes implements marshal.CopyContext.CopyOutBytes.
-func (cc *taskCopyContext) CopyOutBytes(addr usermem.Addr, src []byte) (int, error) {
+func (cc *taskCopyContext) CopyOutBytes(addr hostarch.Addr, src []byte) (int, error) {
tmm, err := cc.getMemoryManager()
if err != nil {
return 0, err
@@ -360,11 +361,11 @@ func (cc *ownTaskCopyContext) CopyScratchBuffer(size int) []byte {
}
// CopyInBytes implements marshal.CopyContext.CopyInBytes.
-func (cc *ownTaskCopyContext) CopyInBytes(addr usermem.Addr, dst []byte) (int, error) {
+func (cc *ownTaskCopyContext) CopyInBytes(addr hostarch.Addr, dst []byte) (int, error) {
return cc.t.MemoryManager().CopyIn(cc.t, addr, dst, cc.opts)
}
// CopyOutBytes implements marshal.CopyContext.CopyOutBytes.
-func (cc *ownTaskCopyContext) CopyOutBytes(addr usermem.Addr, src []byte) (int, error) {
+func (cc *ownTaskCopyContext) CopyOutBytes(addr hostarch.Addr, src []byte) (int, error) {
return cc.t.MemoryManager().CopyOut(cc.t, addr, src, cc.opts)
}
diff --git a/pkg/sentry/kernel/vdso.go b/pkg/sentry/kernel/vdso.go
index 9e5c2d26f..cc0917504 100644
--- a/pkg/sentry/kernel/vdso.go
+++ b/pkg/sentry/kernel/vdso.go
@@ -17,10 +17,10 @@ package kernel
import (
"fmt"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/safemem"
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sentry/pgalloc"
- "gvisor.dev/gvisor/pkg/usermem"
)
// vdsoParams are the parameters exposed to the VDSO.
@@ -96,7 +96,7 @@ func NewVDSOParamPage(mfp pgalloc.MemoryFileProvider, fr memmap.FileRange) *VDSO
// access returns a mapping of the param page.
func (v *VDSOParamPage) access() (safemem.Block, error) {
- bs, err := v.mfp.MemoryFile().MapInternal(v.fr, usermem.ReadWrite)
+ bs, err := v.mfp.MemoryFile().MapInternal(v.fr, hostarch.ReadWrite)
if err != nil {
return safemem.Block{}, err
}
diff --git a/pkg/sentry/loader/elf.go b/pkg/sentry/loader/elf.go
index cd9fa4031..e92d9fdc3 100644
--- a/pkg/sentry/loader/elf.go
+++ b/pkg/sentry/loader/elf.go
@@ -25,6 +25,7 @@ import (
"gvisor.dev/gvisor/pkg/binary"
"gvisor.dev/gvisor/pkg/context"
"gvisor.dev/gvisor/pkg/cpuid"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fsbridge"
@@ -41,7 +42,7 @@ const (
// maxTotalPhdrSize is the maximum combined size of all program
// headers. Linux limits this to one page.
- maxTotalPhdrSize = usermem.PageSize
+ maxTotalPhdrSize = hostarch.PageSize
)
var (
@@ -52,8 +53,8 @@ var (
prog64Size = int(binary.Size(elf.Prog64{}))
)
-func progFlagsAsPerms(f elf.ProgFlag) usermem.AccessType {
- var p usermem.AccessType
+func progFlagsAsPerms(f elf.ProgFlag) hostarch.AccessType {
+ var p hostarch.AccessType
if f&elf.PF_R == elf.PF_R {
p.Read = true
}
@@ -75,7 +76,7 @@ type elfInfo struct {
arch arch.Arch
// entry is the program entry point.
- entry usermem.Addr
+ entry hostarch.Addr
// phdrs are the program headers.
phdrs []elf.ProgHeader
@@ -230,7 +231,7 @@ func parseHeader(ctx context.Context, f fullReader) (elfInfo, error) {
return elfInfo{
os: os,
arch: a,
- entry: usermem.Addr(hdr.Entry),
+ entry: hostarch.Addr(hdr.Entry),
phdrs: phdrs,
phdrOff: hdr.Phoff,
phdrSize: prog64Size,
@@ -240,9 +241,9 @@ func parseHeader(ctx context.Context, f fullReader) (elfInfo, error) {
// mapSegment maps a phdr into the Task. offset is the offset to apply to
// phdr.Vaddr.
-func mapSegment(ctx context.Context, m *mm.MemoryManager, f fsbridge.File, phdr *elf.ProgHeader, offset usermem.Addr) error {
+func mapSegment(ctx context.Context, m *mm.MemoryManager, f fsbridge.File, phdr *elf.ProgHeader, offset hostarch.Addr) error {
// We must make a page-aligned mapping.
- adjust := usermem.Addr(phdr.Vaddr).PageOffset()
+ adjust := hostarch.Addr(phdr.Vaddr).PageOffset()
addr, ok := offset.AddLength(phdr.Vaddr)
if !ok {
@@ -250,14 +251,14 @@ func mapSegment(ctx context.Context, m *mm.MemoryManager, f fsbridge.File, phdr
ctx.Warningf("Computed segment load address overflows: %#x + %#x", phdr.Vaddr, offset)
return syserror.ENOEXEC
}
- addr -= usermem.Addr(adjust)
+ addr -= hostarch.Addr(adjust)
fileSize := phdr.Filesz + adjust
if fileSize < phdr.Filesz {
ctx.Infof("Computed segment file size overflows: %#x + %#x", phdr.Filesz, adjust)
return syserror.ENOEXEC
}
- ms, ok := usermem.Addr(fileSize).RoundUp()
+ ms, ok := hostarch.Addr(fileSize).RoundUp()
if !ok {
ctx.Infof("fileSize %#x too large", fileSize)
return syserror.ENOEXEC
@@ -281,7 +282,7 @@ func mapSegment(ctx context.Context, m *mm.MemoryManager, f fsbridge.File, phdr
Unmap: true,
Private: true,
Perms: prot,
- MaxPerms: usermem.AnyAccess,
+ MaxPerms: hostarch.AnyAccess,
}
defer func() {
if mopts.MappingIdentity != nil {
@@ -312,7 +313,7 @@ func mapSegment(ctx context.Context, m *mm.MemoryManager, f fsbridge.File, phdr
panic(fmt.Sprintf("zeroSize too big? %#x", uint64(zeroSize)))
}
if _, err := m.ZeroOut(ctx, zeroAddr, zeroSize, usermem.IOOpts{IgnorePermissions: true}); err != nil {
- ctx.Warningf("Failed to zero end of page [%#x, %#x): %v", zeroAddr, zeroAddr+usermem.Addr(zeroSize), err)
+ ctx.Warningf("Failed to zero end of page [%#x, %#x): %v", zeroAddr, zeroAddr+hostarch.Addr(zeroSize), err)
return err
}
}
@@ -330,7 +331,7 @@ func mapSegment(ctx context.Context, m *mm.MemoryManager, f fsbridge.File, phdr
if !ok {
panic(fmt.Sprintf("anonymous memory doesn't fit in pre-sized range? %#x + %#x", addr, mapSize))
}
- anonSize, ok := usermem.Addr(memSize - mapSize).RoundUp()
+ anonSize, ok := hostarch.Addr(memSize - mapSize).RoundUp()
if !ok {
ctx.Infof("extra anon pages too large: %#x", memSize-mapSize)
return syserror.ENOEXEC
@@ -339,7 +340,7 @@ func mapSegment(ctx context.Context, m *mm.MemoryManager, f fsbridge.File, phdr
// N.B. Linux uses vm_brk_flags to map these pages, which only
// honors the X bit, always mapping at least RW. ignoring These
// pages are not included in the final brk region.
- prot := usermem.ReadWrite
+ prot := hostarch.ReadWrite
if phdr.Flags&elf.PF_X == elf.PF_X {
prot.Execute = true
}
@@ -352,7 +353,7 @@ func mapSegment(ctx context.Context, m *mm.MemoryManager, f fsbridge.File, phdr
Fixed: true,
Private: true,
Perms: prot,
- MaxPerms: usermem.AnyAccess,
+ MaxPerms: hostarch.AnyAccess,
}); err != nil {
ctx.Infof("Error mapping PT_LOAD segment %v anonymous memory: %v", phdr, err)
return err
@@ -371,19 +372,19 @@ type loadedELF struct {
arch arch.Arch
// entry is the entry point of the ELF.
- entry usermem.Addr
+ entry hostarch.Addr
// start is the end of the ELF.
- start usermem.Addr
+ start hostarch.Addr
// end is the end of the ELF.
- end usermem.Addr
+ end hostarch.Addr
// interpter is the path to the ELF interpreter.
interpreter string
// phdrAddr is the address of the ELF program headers.
- phdrAddr usermem.Addr
+ phdrAddr hostarch.Addr
// phdrSize is the size of a single program header in the ELF.
phdrSize int
@@ -407,14 +408,14 @@ type loadedELF struct {
// It does not load the ELF interpreter, or return any auxv entries.
//
// Preconditions: f is an ELF file.
-func loadParsedELF(ctx context.Context, m *mm.MemoryManager, f fsbridge.File, info elfInfo, sharedLoadOffset usermem.Addr) (loadedELF, error) {
+func loadParsedELF(ctx context.Context, m *mm.MemoryManager, f fsbridge.File, info elfInfo, sharedLoadOffset hostarch.Addr) (loadedELF, error) {
first := true
- var start, end usermem.Addr
+ var start, end hostarch.Addr
var interpreter string
for _, phdr := range info.phdrs {
switch phdr.Type {
case elf.PT_LOAD:
- vaddr := usermem.Addr(phdr.Vaddr)
+ vaddr := hostarch.Addr(phdr.Vaddr)
if first {
first = false
start = vaddr
@@ -492,7 +493,7 @@ func loadParsedELF(ctx context.Context, m *mm.MemoryManager, f fsbridge.File, in
// Note that the vaddr of the first PT_LOAD segment is ignored when
// choosing the load address (even if it is non-zero). The vaddr does
// become an offset from that load address.
- var offset usermem.Addr
+ var offset hostarch.Addr
if info.sharedObject {
totalSize := end - start
totalSize, ok := totalSize.RoundUp()
@@ -688,8 +689,8 @@ func loadELF(ctx context.Context, args LoadArgs) (loadedELF, arch.Context, error
// ELF-specific auxv entries.
bin.auxv = arch.Auxv{
arch.AuxEntry{linux.AT_PHDR, bin.phdrAddr},
- arch.AuxEntry{linux.AT_PHENT, usermem.Addr(bin.phdrSize)},
- arch.AuxEntry{linux.AT_PHNUM, usermem.Addr(bin.phdrNum)},
+ arch.AuxEntry{linux.AT_PHENT, hostarch.Addr(bin.phdrSize)},
+ arch.AuxEntry{linux.AT_PHNUM, hostarch.Addr(bin.phdrNum)},
arch.AuxEntry{linux.AT_ENTRY, bin.entry},
}
if bin.interpreter != "" {
diff --git a/pkg/sentry/loader/loader.go b/pkg/sentry/loader/loader.go
index c69b62db9..47e3775a3 100644
--- a/pkg/sentry/loader/loader.go
+++ b/pkg/sentry/loader/loader.go
@@ -25,6 +25,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
"gvisor.dev/gvisor/pkg/cpuid"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/rand"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fsbridge"
@@ -266,17 +267,17 @@ func Load(ctx context.Context, args LoadArgs, extraAuxv []arch.AuxEntry, vdso *V
// Add generic auxv entries.
auxv := append(loaded.auxv, arch.Auxv{
- arch.AuxEntry{linux.AT_UID, usermem.Addr(c.RealKUID.In(c.UserNamespace).OrOverflow())},
- arch.AuxEntry{linux.AT_EUID, usermem.Addr(c.EffectiveKUID.In(c.UserNamespace).OrOverflow())},
- arch.AuxEntry{linux.AT_GID, usermem.Addr(c.RealKGID.In(c.UserNamespace).OrOverflow())},
- arch.AuxEntry{linux.AT_EGID, usermem.Addr(c.EffectiveKGID.In(c.UserNamespace).OrOverflow())},
+ arch.AuxEntry{linux.AT_UID, hostarch.Addr(c.RealKUID.In(c.UserNamespace).OrOverflow())},
+ arch.AuxEntry{linux.AT_EUID, hostarch.Addr(c.EffectiveKUID.In(c.UserNamespace).OrOverflow())},
+ arch.AuxEntry{linux.AT_GID, hostarch.Addr(c.RealKGID.In(c.UserNamespace).OrOverflow())},
+ arch.AuxEntry{linux.AT_EGID, hostarch.Addr(c.EffectiveKGID.In(c.UserNamespace).OrOverflow())},
// The conditions that require AT_SECURE = 1 never arise. See
// kernel.Task.updateCredsForExecLocked.
arch.AuxEntry{linux.AT_SECURE, 0},
arch.AuxEntry{linux.AT_CLKTCK, linux.CLOCKS_PER_SEC},
arch.AuxEntry{linux.AT_EXECFN, execfn},
arch.AuxEntry{linux.AT_RANDOM, random},
- arch.AuxEntry{linux.AT_PAGESZ, usermem.PageSize},
+ arch.AuxEntry{linux.AT_PAGESZ, hostarch.PageSize},
arch.AuxEntry{linux.AT_SYSINFO_EHDR, vdsoAddr},
}...)
auxv = append(auxv, extraAuxv...)
diff --git a/pkg/sentry/loader/vdso.go b/pkg/sentry/loader/vdso.go
index a32d37d62..fd54261fd 100644
--- a/pkg/sentry/loader/vdso.go
+++ b/pkg/sentry/loader/vdso.go
@@ -23,6 +23,7 @@ import (
"gvisor.dev/gvisor/pkg/abi"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/safemem"
"gvisor.dev/gvisor/pkg/sentry/arch"
@@ -90,7 +91,7 @@ func validateVDSO(ctx context.Context, f fullReader, size uint64) (elfInfo, erro
var first *elf.ProgHeader
var prev *elf.ProgHeader
- var prevEnd usermem.Addr
+ var prevEnd hostarch.Addr
for i, phdr := range info.phdrs {
if phdr.Type != elf.PT_LOAD {
continue
@@ -119,7 +120,7 @@ func validateVDSO(ctx context.Context, f fullReader, size uint64) (elfInfo, erro
return elfInfo{}, syserror.ENOEXEC
}
- start := usermem.Addr(memoryOffset)
+ start := hostarch.Addr(memoryOffset)
end, ok := start.AddLength(phdr.Memsz)
if !ok {
log.Warningf("PT_LOAD segment size overflows: %#x + %#x", start, end)
@@ -210,7 +211,7 @@ func PrepareVDSO(mfp pgalloc.MemoryFileProvider) (*VDSO, error) {
}
// Then copy it into a VDSO mapping.
- size, ok := usermem.Addr(len(vdsodata.Binary)).RoundUp()
+ size, ok := hostarch.Addr(len(vdsodata.Binary)).RoundUp()
if !ok {
return nil, fmt.Errorf("VDSO size overflows? %#x", len(vdsodata.Binary))
}
@@ -221,7 +222,7 @@ func PrepareVDSO(mfp pgalloc.MemoryFileProvider) (*VDSO, error) {
return nil, fmt.Errorf("unable to allocate VDSO memory: %v", err)
}
- ims, err := mf.MapInternal(vdso, usermem.ReadWrite)
+ ims, err := mf.MapInternal(vdso, hostarch.ReadWrite)
if err != nil {
mf.DecRef(vdso)
return nil, fmt.Errorf("unable to map VDSO memory: %v", err)
@@ -234,7 +235,7 @@ func PrepareVDSO(mfp pgalloc.MemoryFileProvider) (*VDSO, error) {
}
// Finally, allocate a param page for this VDSO.
- paramPage, err := mf.Allocate(usermem.PageSize, usage.System)
+ paramPage, err := mf.Allocate(hostarch.PageSize, usage.System)
if err != nil {
mf.DecRef(vdso)
return nil, fmt.Errorf("unable to allocate VDSO param page: %v", err)
@@ -266,7 +267,7 @@ func PrepareVDSO(mfp pgalloc.MemoryFileProvider) (*VDSO, error) {
// compatibility with such binaries, we load the VDSO much like Linux.
//
// loadVDSO takes a reference on the VDSO and parameter page FrameRegions.
-func loadVDSO(ctx context.Context, m *mm.MemoryManager, v *VDSO, bin loadedELF) (usermem.Addr, error) {
+func loadVDSO(ctx context.Context, m *mm.MemoryManager, v *VDSO, bin loadedELF) (hostarch.Addr, error) {
if v.os != bin.os {
ctx.Warningf("Binary ELF OS %v and VDSO ELF OS %v differ", bin.os, v.os)
return 0, syserror.ENOEXEC
@@ -297,8 +298,8 @@ func loadVDSO(ctx context.Context, m *mm.MemoryManager, v *VDSO, bin loadedELF)
Fixed: true,
Unmap: true,
Private: true,
- Perms: usermem.Read,
- MaxPerms: usermem.Read,
+ Perms: hostarch.Read,
+ MaxPerms: hostarch.Read,
})
if err != nil {
ctx.Infof("Unable to map VDSO param page: %v", err)
@@ -318,8 +319,8 @@ func loadVDSO(ctx context.Context, m *mm.MemoryManager, v *VDSO, bin loadedELF)
Fixed: true,
Unmap: true,
Private: true,
- Perms: usermem.Read,
- MaxPerms: usermem.AnyAccess,
+ Perms: hostarch.Read,
+ MaxPerms: hostarch.AnyAccess,
})
if err != nil {
ctx.Infof("Unable to map VDSO: %v", err)
@@ -349,7 +350,7 @@ func loadVDSO(ctx context.Context, m *mm.MemoryManager, v *VDSO, bin loadedELF)
return 0, syserror.ENOEXEC
}
segPage := segAddr.RoundDown()
- segSize := usermem.Addr(phdr.Memsz)
+ segSize := hostarch.Addr(phdr.Memsz)
segSize, ok = segSize.AddLength(segAddr.PageOffset())
if !ok {
ctx.Warningf("PT_LOAD segment memsize %#x + offset %#x overflows", phdr.Memsz, segAddr.PageOffset())
@@ -371,7 +372,7 @@ func loadVDSO(ctx context.Context, m *mm.MemoryManager, v *VDSO, bin loadedELF)
}
perms := progFlagsAsPerms(phdr.Flags)
- if perms != usermem.Read {
+ if perms != hostarch.Read {
if err := m.MProtect(segPage, uint64(segSize), perms, false); err != nil {
ctx.Warningf("Unable to set PT_LOAD segment protections %+v at [%#x, %#x): %v", perms, segAddr, segEnd, err)
return 0, syserror.ENOEXEC
diff --git a/pkg/sentry/memmap/mapping_set.go b/pkg/sentry/memmap/mapping_set.go
index 457ed87f8..32863bb5e 100644
--- a/pkg/sentry/memmap/mapping_set.go
+++ b/pkg/sentry/memmap/mapping_set.go
@@ -18,7 +18,7 @@ import (
"fmt"
"math"
- "gvisor.dev/gvisor/pkg/usermem"
+ "gvisor.dev/gvisor/pkg/hostarch"
)
// MappingSet maps offsets into a Mappable to mappings of those offsets. It is
@@ -39,7 +39,7 @@ type MappingsOfRange map[MappingOfRange]struct{}
// +stateify savable
type MappingOfRange struct {
MappingSpace MappingSpace
- AddrRange usermem.AddrRange
+ AddrRange hostarch.AddrRange
Writable bool
}
@@ -89,9 +89,9 @@ func (mappingSetFunctions) Merge(r1 MappableRange, val1 MappingsOfRange, r2 Mapp
// region with k1.
k2 := MappingOfRange{
MappingSpace: k1.MappingSpace,
- AddrRange: usermem.AddrRange{
+ AddrRange: hostarch.AddrRange{
Start: k1.AddrRange.End,
- End: k1.AddrRange.End + usermem.Addr(r2.Length()),
+ End: k1.AddrRange.End + hostarch.Addr(r2.Length()),
},
Writable: k1.Writable,
}
@@ -102,7 +102,7 @@ func (mappingSetFunctions) Merge(r1 MappableRange, val1 MappingsOfRange, r2 Mapp
// OK. Add it to the merged map.
merged[MappingOfRange{
MappingSpace: k1.MappingSpace,
- AddrRange: usermem.AddrRange{
+ AddrRange: hostarch.AddrRange{
Start: k1.AddrRange.Start,
End: k2.AddrRange.End,
},
@@ -124,11 +124,11 @@ func (mappingSetFunctions) Split(r MappableRange, val MappingsOfRange, split uin
// split is a value in MappableRange, we need the offset into the
// corresponding MappingsOfRange.
- offset := usermem.Addr(split - r.Start)
+ offset := hostarch.Addr(split - r.Start)
for k := range val {
k1 := MappingOfRange{
MappingSpace: k.MappingSpace,
- AddrRange: usermem.AddrRange{
+ AddrRange: hostarch.AddrRange{
Start: k.AddrRange.Start,
End: k.AddrRange.Start + offset,
},
@@ -138,7 +138,7 @@ func (mappingSetFunctions) Split(r MappableRange, val MappingsOfRange, split uin
k2 := MappingOfRange{
MappingSpace: k.MappingSpace,
- AddrRange: usermem.AddrRange{
+ AddrRange: hostarch.AddrRange{
Start: k.AddrRange.Start + offset,
End: k.AddrRange.End,
},
@@ -157,18 +157,18 @@ func (mappingSetFunctions) Split(r MappableRange, val MappingsOfRange, split uin
// indicating that ms maps addresses [0x4000, 0x6000) to MappableRange [0x0,
// 0x2000). Then for subsetRange = [0x1000, 0x2000), subsetMapping returns a
// MappingOfRange for which AddrRange = [0x5000, 0x6000).
-func subsetMapping(wholeRange, subsetRange MappableRange, ms MappingSpace, addr usermem.Addr, writable bool) MappingOfRange {
+func subsetMapping(wholeRange, subsetRange MappableRange, ms MappingSpace, addr hostarch.Addr, writable bool) MappingOfRange {
if !wholeRange.IsSupersetOf(subsetRange) {
panic(fmt.Sprintf("%v is not a superset of %v", wholeRange, subsetRange))
}
offset := subsetRange.Start - wholeRange.Start
- start := addr + usermem.Addr(offset)
+ start := addr + hostarch.Addr(offset)
return MappingOfRange{
MappingSpace: ms,
- AddrRange: usermem.AddrRange{
+ AddrRange: hostarch.AddrRange{
Start: start,
- End: start + usermem.Addr(subsetRange.Length()),
+ End: start + hostarch.Addr(subsetRange.Length()),
},
Writable: writable,
}
@@ -178,7 +178,7 @@ func subsetMapping(wholeRange, subsetRange MappableRange, ms MappingSpace, addr
// previously had no mappings.
//
// Preconditions: Same as Mappable.AddMapping.
-func (s *MappingSet) AddMapping(ms MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) []MappableRange {
+func (s *MappingSet) AddMapping(ms MappingSpace, ar hostarch.AddrRange, offset uint64, writable bool) []MappableRange {
mr := MappableRange{offset, offset + uint64(ar.Length())}
var mapped []MappableRange
seg, gap := s.Find(mr.Start)
@@ -205,7 +205,7 @@ func (s *MappingSet) AddMapping(ms MappingSpace, ar usermem.AddrRange, offset ui
// MappableRanges that now have no mappings.
//
// Preconditions: Same as Mappable.RemoveMapping.
-func (s *MappingSet) RemoveMapping(ms MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) []MappableRange {
+func (s *MappingSet) RemoveMapping(ms MappingSpace, ar hostarch.AddrRange, offset uint64, writable bool) []MappableRange {
mr := MappableRange{offset, offset + uint64(ar.Length())}
var unmapped []MappableRange
diff --git a/pkg/sentry/memmap/memmap.go b/pkg/sentry/memmap/memmap.go
index 49e21026e..72868646a 100644
--- a/pkg/sentry/memmap/memmap.go
+++ b/pkg/sentry/memmap/memmap.go
@@ -19,8 +19,8 @@ import (
"fmt"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/safemem"
- "gvisor.dev/gvisor/pkg/usermem"
)
// Mappable represents a memory-mappable object, a mutable mapping from uint64
@@ -29,8 +29,8 @@ import (
// See mm/mm.go for Mappable's place in the lock order.
//
// All Mappable methods have the following preconditions:
-// * usermem.AddrRanges and MappableRanges must be non-empty (Length() != 0).
-// * usermem.Addrs and Mappable offsets must be page-aligned.
+// * hostarch.AddrRanges and MappableRanges must be non-empty (Length() != 0).
+// * hostarch.Addrs and Mappable offsets must be page-aligned.
type Mappable interface {
// AddMapping notifies the Mappable of a mapping from addresses ar in ms to
// offsets [offset, offset+ar.Length()) in this Mappable.
@@ -42,7 +42,7 @@ type Mappable interface {
// lifetime of the mapping.
//
// Preconditions: offset+ar.Length() does not overflow.
- AddMapping(ctx context.Context, ms MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) error
+ AddMapping(ctx context.Context, ms MappingSpace, ar hostarch.AddrRange, offset uint64, writable bool) error
// RemoveMapping notifies the Mappable of the removal of a mapping from
// addresses ar in ms to offsets [offset, offset+ar.Length()) in this
@@ -52,7 +52,7 @@ type Mappable interface {
// * offset+ar.Length() does not overflow.
// * The removed mapping must exist. writable must match the
// corresponding call to AddMapping.
- RemoveMapping(ctx context.Context, ms MappingSpace, ar usermem.AddrRange, offset uint64, writable bool)
+ RemoveMapping(ctx context.Context, ms MappingSpace, ar hostarch.AddrRange, offset uint64, writable bool)
// CopyMapping notifies the Mappable of an attempt to copy a mapping in ms
// from srcAR to dstAR. For most Mappables, this is equivalent to
@@ -66,7 +66,7 @@ type Mappable interface {
// * offset+srcAR.Length() and offset+dstAR.Length() do not overflow.
// * The mapping at srcAR must exist. writable must match the
// corresponding call to AddMapping.
- CopyMapping(ctx context.Context, ms MappingSpace, srcAR, dstAR usermem.AddrRange, offset uint64, writable bool) error
+ CopyMapping(ctx context.Context, ms MappingSpace, srcAR, dstAR hostarch.AddrRange, offset uint64, writable bool) error
// Translate returns the Mappable's current mappings for at least the range
// of offsets specified by required, and at most the range of offsets
@@ -90,7 +90,7 @@ type Mappable interface {
// synchronize with invalidation.
//
// Postconditions: See CheckTranslateResult.
- Translate(ctx context.Context, required, optional MappableRange, at usermem.AccessType) ([]Translation, error)
+ Translate(ctx context.Context, required, optional MappableRange, at hostarch.AccessType) ([]Translation, error)
// InvalidateUnsavable requests that the Mappable invalidate Translations
// that cannot be preserved across save/restore.
@@ -113,7 +113,7 @@ type Translation struct {
// Perms is the set of permissions for which platform.AddressSpace.MapFile
// and platform.AddressSpace.MapInternal on this Translation is permitted.
- Perms usermem.AccessType
+ Perms hostarch.AccessType
}
// FileRange returns the FileRange represented by t.
@@ -125,18 +125,18 @@ func (t Translation) FileRange() FileRange {
// postconditions for Mappable.Translate(required, optional, at).
//
// Preconditions: Same as Mappable.Translate.
-func CheckTranslateResult(required, optional MappableRange, at usermem.AccessType, ts []Translation, terr error) error {
+func CheckTranslateResult(required, optional MappableRange, at hostarch.AccessType, ts []Translation, terr error) error {
// Verify that the inputs to Mappable.Translate were valid.
if !required.WellFormed() || required.Length() == 0 {
panic(fmt.Sprintf("invalid required range: %v", required))
}
- if !usermem.Addr(required.Start).IsPageAligned() || !usermem.Addr(required.End).IsPageAligned() {
+ if !hostarch.Addr(required.Start).IsPageAligned() || !hostarch.Addr(required.End).IsPageAligned() {
panic(fmt.Sprintf("unaligned required range: %v", required))
}
if !optional.IsSupersetOf(required) {
panic(fmt.Sprintf("optional range %v is not a superset of required range %v", optional, required))
}
- if !usermem.Addr(optional.Start).IsPageAligned() || !usermem.Addr(optional.End).IsPageAligned() {
+ if !hostarch.Addr(optional.Start).IsPageAligned() || !hostarch.Addr(optional.End).IsPageAligned() {
panic(fmt.Sprintf("unaligned optional range: %v", optional))
}
@@ -148,13 +148,13 @@ func CheckTranslateResult(required, optional MappableRange, at usermem.AccessTyp
if !t.Source.WellFormed() || t.Source.Length() == 0 {
return fmt.Errorf("Translation %+v has invalid Source", t)
}
- if !usermem.Addr(t.Source.Start).IsPageAligned() || !usermem.Addr(t.Source.End).IsPageAligned() {
+ if !hostarch.Addr(t.Source.Start).IsPageAligned() || !hostarch.Addr(t.Source.End).IsPageAligned() {
return fmt.Errorf("Translation %+v has unaligned Source", t)
}
if t.File == nil {
return fmt.Errorf("Translation %+v has nil File", t)
}
- if !usermem.Addr(t.Offset).IsPageAligned() {
+ if !hostarch.Addr(t.Offset).IsPageAligned() {
return fmt.Errorf("Translation %+v has unaligned Offset", t)
}
// Translations must be contiguous and in increasing order of
@@ -210,7 +210,7 @@ func (mr MappableRange) String() string {
return fmt.Sprintf("[%#x, %#x)", mr.Start, mr.End)
}
-// MappingSpace represents a mutable mapping from usermem.Addrs to (Mappable,
+// MappingSpace represents a mutable mapping from hostarch.Addrs to (Mappable,
// uint64 offset) pairs.
type MappingSpace interface {
// Invalidate is called to notify the MappingSpace that values returned by
@@ -223,7 +223,7 @@ type MappingSpace interface {
// Preconditions:
// * ar.Length() != 0.
// * ar must be page-aligned.
- Invalidate(ar usermem.AddrRange, opts InvalidateOpts)
+ Invalidate(ar hostarch.AddrRange, opts InvalidateOpts)
}
// InvalidateOpts holds options to MappingSpace.Invalidate.
@@ -321,7 +321,7 @@ type MMapOpts struct {
Offset uint64
// Addr is the suggested address for the mapping.
- Addr usermem.Addr
+ Addr hostarch.Addr
// Fixed specifies whether this is a fixed mapping (it must be located at
// Addr).
@@ -338,7 +338,7 @@ type MMapOpts struct {
Map32Bit bool
// Perms is the set of permissions to the applied to this mapping.
- Perms usermem.AccessType
+ Perms hostarch.AccessType
// MaxPerms limits the set of permissions that may ever apply to this
// mapping. If Mappable is not nil, all memmap.Translations returned by
@@ -346,7 +346,7 @@ type MMapOpts struct {
//
// Preconditions: MaxAccessType should be an effective AccessType, as
// access cannot be limited beyond effective AccessTypes.
- MaxPerms usermem.AccessType
+ MaxPerms hostarch.AccessType
// Private is true if writes to the mapping should be propagated to a copy
// that is exclusive to the MemoryManager.
@@ -410,7 +410,7 @@ type File interface {
//
// Postconditions: The returned mapping is valid as long as at least one
// reference is held on the mapped pages.
- MapInternal(fr FileRange, at usermem.AccessType) (safemem.BlockSeq, error)
+ MapInternal(fr FileRange, at hostarch.AccessType) (safemem.BlockSeq, error)
// FD returns the file descriptor represented by the File.
//
diff --git a/pkg/sentry/mm/address_space.go b/pkg/sentry/mm/address_space.go
index a93e76c75..534e0e957 100644
--- a/pkg/sentry/mm/address_space.go
+++ b/pkg/sentry/mm/address_space.go
@@ -19,8 +19,8 @@ import (
"sync/atomic"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/platform"
- "gvisor.dev/gvisor/pkg/usermem"
)
// AddressSpace returns the platform.AddressSpace bound to mm.
@@ -172,17 +172,17 @@ func (mm *MemoryManager) Deactivate() {
// * ar.Length() != 0.
// * ar must be page-aligned.
// * pseg == mm.pmas.LowerBoundSegment(ar.Start).
-func (mm *MemoryManager) mapASLocked(pseg pmaIterator, ar usermem.AddrRange, precommit bool) error {
+func (mm *MemoryManager) mapASLocked(pseg pmaIterator, ar hostarch.AddrRange, precommit bool) error {
// By default, map entire pmas at a time, under the assumption that there
// is no cost to mapping more of a pma than necessary.
- mapAR := usermem.AddrRange{0, ^usermem.Addr(usermem.PageSize - 1)}
+ mapAR := hostarch.AddrRange{0, ^hostarch.Addr(hostarch.PageSize - 1)}
if precommit {
// When explicitly precommitting, only map ar, since overmapping may
// incur unexpected resource usage.
mapAR = ar
} else if mapUnit := mm.p.MapUnit(); mapUnit != 0 {
// Limit the range we map to ar, aligned to mapUnit.
- mapMask := usermem.Addr(mapUnit - 1)
+ mapMask := hostarch.Addr(mapUnit - 1)
mapAR.Start = ar.Start &^ mapMask
// If rounding ar.End up overflows, just keep the existing mapAR.End.
if end := (ar.End + mapMask) &^ mapMask; end >= ar.End {
@@ -218,7 +218,7 @@ func (mm *MemoryManager) mapASLocked(pseg pmaIterator, ar usermem.AddrRange, pre
// unmapASLocked removes all AddressSpace mappings for addresses in ar.
//
// Preconditions: mm.activeMu must be locked.
-func (mm *MemoryManager) unmapASLocked(ar usermem.AddrRange) {
+func (mm *MemoryManager) unmapASLocked(ar hostarch.AddrRange) {
if mm.as == nil {
// No AddressSpace? Force all mappings to be unmapped on the next
// Activate.
diff --git a/pkg/sentry/mm/aio_context.go b/pkg/sentry/mm/aio_context.go
index 5ab2ef79f..346866d3c 100644
--- a/pkg/sentry/mm/aio_context.go
+++ b/pkg/sentry/mm/aio_context.go
@@ -17,6 +17,7 @@ package mm
import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sentry/pgalloc"
"gvisor.dev/gvisor/pkg/sentry/usage"
@@ -83,7 +84,7 @@ func (mm *MemoryManager) destroyAIOContextLocked(ctx context.Context, id uint64)
// the same address. Then it would be unmapping memory that it doesn't own.
// This is, however, the way Linux implements AIO. Keeps the same [weird]
// semantics in case anyone relies on it.
- mm.MUnmap(ctx, usermem.Addr(id), aioRingBufferSize)
+ mm.MUnmap(ctx, hostarch.Addr(id), aioRingBufferSize)
delete(mm.aioManager.contexts, id)
aioCtx.destroy()
@@ -259,7 +260,7 @@ type aioMappable struct {
fr memmap.FileRange
}
-var aioRingBufferSize = uint64(usermem.Addr(linux.AIORingSize).MustRoundUp())
+var aioRingBufferSize = uint64(hostarch.Addr(linux.AIORingSize).MustRoundUp())
func newAIOMappable(mfp pgalloc.MemoryFileProvider) (*aioMappable, error) {
fr, err := mfp.MemoryFile().Allocate(aioRingBufferSize, usage.Anonymous)
@@ -300,7 +301,7 @@ func (m *aioMappable) Msync(ctx context.Context, mr memmap.MappableRange) error
}
// AddMapping implements memmap.Mappable.AddMapping.
-func (m *aioMappable) AddMapping(_ context.Context, _ memmap.MappingSpace, ar usermem.AddrRange, offset uint64, _ bool) error {
+func (m *aioMappable) AddMapping(_ context.Context, _ memmap.MappingSpace, ar hostarch.AddrRange, offset uint64, _ bool) error {
// Don't allow mappings to be expanded (in Linux, fs/aio.c:aio_ring_mmap()
// sets VM_DONTEXPAND).
if offset != 0 || uint64(ar.Length()) != aioRingBufferSize {
@@ -310,11 +311,11 @@ func (m *aioMappable) AddMapping(_ context.Context, _ memmap.MappingSpace, ar us
}
// RemoveMapping implements memmap.Mappable.RemoveMapping.
-func (m *aioMappable) RemoveMapping(context.Context, memmap.MappingSpace, usermem.AddrRange, uint64, bool) {
+func (m *aioMappable) RemoveMapping(context.Context, memmap.MappingSpace, hostarch.AddrRange, uint64, bool) {
}
// CopyMapping implements memmap.Mappable.CopyMapping.
-func (m *aioMappable) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR usermem.AddrRange, offset uint64, _ bool) error {
+func (m *aioMappable) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR hostarch.AddrRange, offset uint64, _ bool) error {
// Don't allow mappings to be expanded (in Linux, fs/aio.c:aio_ring_mmap()
// sets VM_DONTEXPAND).
if offset != 0 || uint64(dstAR.Length()) != aioRingBufferSize {
@@ -346,7 +347,7 @@ func (m *aioMappable) CopyMapping(ctx context.Context, ms memmap.MappingSpace, s
}
// Translate implements memmap.Mappable.Translate.
-func (m *aioMappable) Translate(ctx context.Context, required, optional memmap.MappableRange, at usermem.AccessType) ([]memmap.Translation, error) {
+func (m *aioMappable) Translate(ctx context.Context, required, optional memmap.MappableRange, at hostarch.AccessType) ([]memmap.Translation, error) {
var err error
if required.End > m.fr.Length() {
err = &memmap.BusError{syserror.EFAULT}
@@ -357,7 +358,7 @@ func (m *aioMappable) Translate(ctx context.Context, required, optional memmap.M
Source: source,
File: m.mfp.MemoryFile(),
Offset: m.fr.Start + source.Start,
- Perms: usermem.AnyAccess,
+ Perms: hostarch.AnyAccess,
},
}, err
}
@@ -389,8 +390,8 @@ func (mm *MemoryManager) NewAIOContext(ctx context.Context, events uint32) (uint
// Linux uses "do_mmap_pgoff(..., PROT_READ | PROT_WRITE, ...)" in
// fs/aio.c:aio_setup_ring(). Since we don't implement AIO_RING_MAGIC,
// user mode should not write to this page.
- Perms: usermem.Read,
- MaxPerms: usermem.Read,
+ Perms: hostarch.Read,
+ MaxPerms: hostarch.Read,
})
if err != nil {
return 0, err
@@ -435,6 +436,6 @@ func (mm *MemoryManager) LookupAIOContext(ctx context.Context, id uint64) (*AIOC
// bytes from id).
func (mm *MemoryManager) isValidAddr(ctx context.Context, id uint64) bool {
var buf [4]byte
- _, err := mm.CopyIn(ctx, usermem.Addr(id), buf[:], usermem.IOOpts{})
+ _, err := mm.CopyIn(ctx, hostarch.Addr(id), buf[:], usermem.IOOpts{})
return err == nil
}
diff --git a/pkg/sentry/mm/io.go b/pkg/sentry/mm/io.go
index a8ac48080..16f318ab3 100644
--- a/pkg/sentry/mm/io.go
+++ b/pkg/sentry/mm/io.go
@@ -16,6 +16,7 @@ package mm
import (
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/safemem"
"gvisor.dev/gvisor/pkg/sentry/platform"
"gvisor.dev/gvisor/pkg/syserror"
@@ -60,11 +61,11 @@ const (
rwMapMinBytes = 512
)
-// CheckIORange is similar to usermem.Addr.ToRange, but applies bounds checks
+// CheckIORange is similar to hostarch.Addr.ToRange, but applies bounds checks
// consistent with Linux's arch/x86/include/asm/uaccess.h:access_ok().
//
// Preconditions: length >= 0.
-func (mm *MemoryManager) CheckIORange(addr usermem.Addr, length int64) (usermem.AddrRange, bool) {
+func (mm *MemoryManager) CheckIORange(addr hostarch.Addr, length int64) (hostarch.AddrRange, bool) {
// Note that access_ok() constrains end even if length == 0.
ar, ok := addr.ToRange(uint64(length))
return ar, (ok && ar.End <= mm.layout.MaxAddr)
@@ -72,7 +73,7 @@ func (mm *MemoryManager) CheckIORange(addr usermem.Addr, length int64) (usermem.
// checkIOVec applies bound checks consistent with Linux's
// arch/x86/include/asm/uaccess.h:access_ok() to ars.
-func (mm *MemoryManager) checkIOVec(ars usermem.AddrRangeSeq) bool {
+func (mm *MemoryManager) checkIOVec(ars hostarch.AddrRangeSeq) bool {
for !ars.IsEmpty() {
ar := ars.Head()
if _, ok := mm.CheckIORange(ar.Start, int64(ar.Length())); !ok {
@@ -100,7 +101,7 @@ func translateIOError(ctx context.Context, err error) error {
}
// CopyOut implements usermem.IO.CopyOut.
-func (mm *MemoryManager) CopyOut(ctx context.Context, addr usermem.Addr, src []byte, opts usermem.IOOpts) (int, error) {
+func (mm *MemoryManager) CopyOut(ctx context.Context, addr hostarch.Addr, src []byte, opts usermem.IOOpts) (int, error) {
ar, ok := mm.CheckIORange(addr, int64(len(src)))
if !ok {
return 0, syserror.EFAULT
@@ -116,24 +117,24 @@ func (mm *MemoryManager) CopyOut(ctx context.Context, addr usermem.Addr, src []b
}
// Go through internal mappings.
- n64, err := mm.withInternalMappings(ctx, ar, usermem.Write, opts.IgnorePermissions, func(ims safemem.BlockSeq) (uint64, error) {
+ n64, err := mm.withInternalMappings(ctx, ar, hostarch.Write, opts.IgnorePermissions, func(ims safemem.BlockSeq) (uint64, error) {
n, err := safemem.CopySeq(ims, safemem.BlockSeqOf(safemem.BlockFromSafeSlice(src)))
return n, translateIOError(ctx, err)
})
return int(n64), err
}
-func (mm *MemoryManager) asCopyOut(ctx context.Context, addr usermem.Addr, src []byte) (int, error) {
+func (mm *MemoryManager) asCopyOut(ctx context.Context, addr hostarch.Addr, src []byte) (int, error) {
var done int
for {
- n, err := mm.as.CopyOut(addr+usermem.Addr(done), src[done:])
+ n, err := mm.as.CopyOut(addr+hostarch.Addr(done), src[done:])
done += n
if err == nil {
return done, nil
}
if f, ok := err.(platform.SegmentationFault); ok {
ar, _ := addr.ToRange(uint64(len(src)))
- if err := mm.handleASIOFault(ctx, f.Addr, ar, usermem.Write); err != nil {
+ if err := mm.handleASIOFault(ctx, f.Addr, ar, hostarch.Write); err != nil {
return done, err
}
continue
@@ -143,7 +144,7 @@ func (mm *MemoryManager) asCopyOut(ctx context.Context, addr usermem.Addr, src [
}
// CopyIn implements usermem.IO.CopyIn.
-func (mm *MemoryManager) CopyIn(ctx context.Context, addr usermem.Addr, dst []byte, opts usermem.IOOpts) (int, error) {
+func (mm *MemoryManager) CopyIn(ctx context.Context, addr hostarch.Addr, dst []byte, opts usermem.IOOpts) (int, error) {
ar, ok := mm.CheckIORange(addr, int64(len(dst)))
if !ok {
return 0, syserror.EFAULT
@@ -159,24 +160,24 @@ func (mm *MemoryManager) CopyIn(ctx context.Context, addr usermem.Addr, dst []by
}
// Go through internal mappings.
- n64, err := mm.withInternalMappings(ctx, ar, usermem.Read, opts.IgnorePermissions, func(ims safemem.BlockSeq) (uint64, error) {
+ n64, err := mm.withInternalMappings(ctx, ar, hostarch.Read, opts.IgnorePermissions, func(ims safemem.BlockSeq) (uint64, error) {
n, err := safemem.CopySeq(safemem.BlockSeqOf(safemem.BlockFromSafeSlice(dst)), ims)
return n, translateIOError(ctx, err)
})
return int(n64), err
}
-func (mm *MemoryManager) asCopyIn(ctx context.Context, addr usermem.Addr, dst []byte) (int, error) {
+func (mm *MemoryManager) asCopyIn(ctx context.Context, addr hostarch.Addr, dst []byte) (int, error) {
var done int
for {
- n, err := mm.as.CopyIn(addr+usermem.Addr(done), dst[done:])
+ n, err := mm.as.CopyIn(addr+hostarch.Addr(done), dst[done:])
done += n
if err == nil {
return done, nil
}
if f, ok := err.(platform.SegmentationFault); ok {
ar, _ := addr.ToRange(uint64(len(dst)))
- if err := mm.handleASIOFault(ctx, f.Addr, ar, usermem.Read); err != nil {
+ if err := mm.handleASIOFault(ctx, f.Addr, ar, hostarch.Read); err != nil {
return done, err
}
continue
@@ -186,7 +187,7 @@ func (mm *MemoryManager) asCopyIn(ctx context.Context, addr usermem.Addr, dst []
}
// ZeroOut implements usermem.IO.ZeroOut.
-func (mm *MemoryManager) ZeroOut(ctx context.Context, addr usermem.Addr, toZero int64, opts usermem.IOOpts) (int64, error) {
+func (mm *MemoryManager) ZeroOut(ctx context.Context, addr hostarch.Addr, toZero int64, opts usermem.IOOpts) (int64, error) {
ar, ok := mm.CheckIORange(addr, toZero)
if !ok {
return 0, syserror.EFAULT
@@ -202,23 +203,23 @@ func (mm *MemoryManager) ZeroOut(ctx context.Context, addr usermem.Addr, toZero
}
// Go through internal mappings.
- return mm.withInternalMappings(ctx, ar, usermem.Write, opts.IgnorePermissions, func(dsts safemem.BlockSeq) (uint64, error) {
+ return mm.withInternalMappings(ctx, ar, hostarch.Write, opts.IgnorePermissions, func(dsts safemem.BlockSeq) (uint64, error) {
n, err := safemem.ZeroSeq(dsts)
return n, translateIOError(ctx, err)
})
}
-func (mm *MemoryManager) asZeroOut(ctx context.Context, addr usermem.Addr, toZero int64) (int64, error) {
+func (mm *MemoryManager) asZeroOut(ctx context.Context, addr hostarch.Addr, toZero int64) (int64, error) {
var done int64
for {
- n, err := mm.as.ZeroOut(addr+usermem.Addr(done), uintptr(toZero-done))
+ n, err := mm.as.ZeroOut(addr+hostarch.Addr(done), uintptr(toZero-done))
done += int64(n)
if err == nil {
return done, nil
}
if f, ok := err.(platform.SegmentationFault); ok {
ar, _ := addr.ToRange(uint64(toZero))
- if err := mm.handleASIOFault(ctx, f.Addr, ar, usermem.Write); err != nil {
+ if err := mm.handleASIOFault(ctx, f.Addr, ar, hostarch.Write); err != nil {
return done, err
}
continue
@@ -228,7 +229,7 @@ func (mm *MemoryManager) asZeroOut(ctx context.Context, addr usermem.Addr, toZer
}
// CopyOutFrom implements usermem.IO.CopyOutFrom.
-func (mm *MemoryManager) CopyOutFrom(ctx context.Context, ars usermem.AddrRangeSeq, src safemem.Reader, opts usermem.IOOpts) (int64, error) {
+func (mm *MemoryManager) CopyOutFrom(ctx context.Context, ars hostarch.AddrRangeSeq, src safemem.Reader, opts usermem.IOOpts) (int64, error) {
if !mm.checkIOVec(ars) {
return 0, syserror.EFAULT
}
@@ -269,11 +270,11 @@ func (mm *MemoryManager) CopyOutFrom(ctx context.Context, ars usermem.AddrRangeS
}
// Go through internal mappings.
- return mm.withVecInternalMappings(ctx, ars, usermem.Write, opts.IgnorePermissions, src.ReadToBlocks)
+ return mm.withVecInternalMappings(ctx, ars, hostarch.Write, opts.IgnorePermissions, src.ReadToBlocks)
}
// CopyInTo implements usermem.IO.CopyInTo.
-func (mm *MemoryManager) CopyInTo(ctx context.Context, ars usermem.AddrRangeSeq, dst safemem.Writer, opts usermem.IOOpts) (int64, error) {
+func (mm *MemoryManager) CopyInTo(ctx context.Context, ars hostarch.AddrRangeSeq, dst safemem.Writer, opts usermem.IOOpts) (int64, error) {
if !mm.checkIOVec(ars) {
return 0, syserror.EFAULT
}
@@ -306,11 +307,11 @@ func (mm *MemoryManager) CopyInTo(ctx context.Context, ars usermem.AddrRangeSeq,
}
// Go through internal mappings.
- return mm.withVecInternalMappings(ctx, ars, usermem.Read, opts.IgnorePermissions, dst.WriteFromBlocks)
+ return mm.withVecInternalMappings(ctx, ars, hostarch.Read, opts.IgnorePermissions, dst.WriteFromBlocks)
}
// SwapUint32 implements usermem.IO.SwapUint32.
-func (mm *MemoryManager) SwapUint32(ctx context.Context, addr usermem.Addr, new uint32, opts usermem.IOOpts) (uint32, error) {
+func (mm *MemoryManager) SwapUint32(ctx context.Context, addr hostarch.Addr, new uint32, opts usermem.IOOpts) (uint32, error) {
ar, ok := mm.CheckIORange(addr, 4)
if !ok {
return 0, syserror.EFAULT
@@ -324,7 +325,7 @@ func (mm *MemoryManager) SwapUint32(ctx context.Context, addr usermem.Addr, new
return old, nil
}
if f, ok := err.(platform.SegmentationFault); ok {
- if err := mm.handleASIOFault(ctx, f.Addr, ar, usermem.ReadWrite); err != nil {
+ if err := mm.handleASIOFault(ctx, f.Addr, ar, hostarch.ReadWrite); err != nil {
return 0, err
}
continue
@@ -335,7 +336,7 @@ func (mm *MemoryManager) SwapUint32(ctx context.Context, addr usermem.Addr, new
// Go through internal mappings.
var old uint32
- _, err := mm.withInternalMappings(ctx, ar, usermem.ReadWrite, opts.IgnorePermissions, func(ims safemem.BlockSeq) (uint64, error) {
+ _, err := mm.withInternalMappings(ctx, ar, hostarch.ReadWrite, opts.IgnorePermissions, func(ims safemem.BlockSeq) (uint64, error) {
if ims.NumBlocks() != 1 || ims.NumBytes() != 4 {
// Atomicity is unachievable across mappings.
return 0, syserror.EFAULT
@@ -353,7 +354,7 @@ func (mm *MemoryManager) SwapUint32(ctx context.Context, addr usermem.Addr, new
}
// CompareAndSwapUint32 implements usermem.IO.CompareAndSwapUint32.
-func (mm *MemoryManager) CompareAndSwapUint32(ctx context.Context, addr usermem.Addr, old, new uint32, opts usermem.IOOpts) (uint32, error) {
+func (mm *MemoryManager) CompareAndSwapUint32(ctx context.Context, addr hostarch.Addr, old, new uint32, opts usermem.IOOpts) (uint32, error) {
ar, ok := mm.CheckIORange(addr, 4)
if !ok {
return 0, syserror.EFAULT
@@ -367,7 +368,7 @@ func (mm *MemoryManager) CompareAndSwapUint32(ctx context.Context, addr usermem.
return prev, nil
}
if f, ok := err.(platform.SegmentationFault); ok {
- if err := mm.handleASIOFault(ctx, f.Addr, ar, usermem.ReadWrite); err != nil {
+ if err := mm.handleASIOFault(ctx, f.Addr, ar, hostarch.ReadWrite); err != nil {
return 0, err
}
continue
@@ -378,7 +379,7 @@ func (mm *MemoryManager) CompareAndSwapUint32(ctx context.Context, addr usermem.
// Go through internal mappings.
var prev uint32
- _, err := mm.withInternalMappings(ctx, ar, usermem.ReadWrite, opts.IgnorePermissions, func(ims safemem.BlockSeq) (uint64, error) {
+ _, err := mm.withInternalMappings(ctx, ar, hostarch.ReadWrite, opts.IgnorePermissions, func(ims safemem.BlockSeq) (uint64, error) {
if ims.NumBlocks() != 1 || ims.NumBytes() != 4 {
// Atomicity is unachievable across mappings.
return 0, syserror.EFAULT
@@ -396,7 +397,7 @@ func (mm *MemoryManager) CompareAndSwapUint32(ctx context.Context, addr usermem.
}
// LoadUint32 implements usermem.IO.LoadUint32.
-func (mm *MemoryManager) LoadUint32(ctx context.Context, addr usermem.Addr, opts usermem.IOOpts) (uint32, error) {
+func (mm *MemoryManager) LoadUint32(ctx context.Context, addr hostarch.Addr, opts usermem.IOOpts) (uint32, error) {
ar, ok := mm.CheckIORange(addr, 4)
if !ok {
return 0, syserror.EFAULT
@@ -410,7 +411,7 @@ func (mm *MemoryManager) LoadUint32(ctx context.Context, addr usermem.Addr, opts
return val, nil
}
if f, ok := err.(platform.SegmentationFault); ok {
- if err := mm.handleASIOFault(ctx, f.Addr, ar, usermem.Read); err != nil {
+ if err := mm.handleASIOFault(ctx, f.Addr, ar, hostarch.Read); err != nil {
return 0, err
}
continue
@@ -421,7 +422,7 @@ func (mm *MemoryManager) LoadUint32(ctx context.Context, addr usermem.Addr, opts
// Go through internal mappings.
var val uint32
- _, err := mm.withInternalMappings(ctx, ar, usermem.Read, opts.IgnorePermissions, func(ims safemem.BlockSeq) (uint64, error) {
+ _, err := mm.withInternalMappings(ctx, ar, hostarch.Read, opts.IgnorePermissions, func(ims safemem.BlockSeq) (uint64, error) {
if ims.NumBlocks() != 1 || ims.NumBytes() != 4 {
// Atomicity is unachievable across mappings.
return 0, syserror.EFAULT
@@ -445,11 +446,11 @@ func (mm *MemoryManager) LoadUint32(ctx context.Context, addr usermem.Addr, opts
// * mm.as != nil.
// * ioar.Length() != 0.
// * ioar.Contains(addr).
-func (mm *MemoryManager) handleASIOFault(ctx context.Context, addr usermem.Addr, ioar usermem.AddrRange, at usermem.AccessType) error {
+func (mm *MemoryManager) handleASIOFault(ctx context.Context, addr hostarch.Addr, ioar hostarch.AddrRange, at hostarch.AccessType) error {
// Try to map all remaining pages in the I/O operation. This RoundUp can't
// overflow because otherwise it would have been caught by CheckIORange.
end, _ := ioar.End.RoundUp()
- ar := usermem.AddrRange{addr.RoundDown(), end}
+ ar := hostarch.AddrRange{addr.RoundDown(), end}
// Don't bother trying existingPMAsLocked; in most cases, if we did have
// existing pmas, we wouldn't have faulted.
@@ -498,7 +499,7 @@ func (mm *MemoryManager) handleASIOFault(ctx context.Context, addr usermem.Addr,
// more useful for usermem.IO methods.
//
// Preconditions: 0 < ar.Length() <= math.MaxInt64.
-func (mm *MemoryManager) withInternalMappings(ctx context.Context, ar usermem.AddrRange, at usermem.AccessType, ignorePermissions bool, f func(safemem.BlockSeq) (uint64, error)) (int64, error) {
+func (mm *MemoryManager) withInternalMappings(ctx context.Context, ar hostarch.AddrRange, at hostarch.AccessType, ignorePermissions bool, f func(safemem.BlockSeq) (uint64, error)) (int64, error) {
// If pmas are already available, we can do IO without touching mm.vmas or
// mm.mappingMu.
mm.activeMu.RLock()
@@ -567,7 +568,7 @@ func (mm *MemoryManager) withInternalMappings(ctx context.Context, ar usermem.Ad
// internal mappings for the subset of ars for which this property holds.
//
// Preconditions: !ars.IsEmpty().
-func (mm *MemoryManager) withVecInternalMappings(ctx context.Context, ars usermem.AddrRangeSeq, at usermem.AccessType, ignorePermissions bool, f func(safemem.BlockSeq) (uint64, error)) (int64, error) {
+func (mm *MemoryManager) withVecInternalMappings(ctx context.Context, ars hostarch.AddrRangeSeq, at hostarch.AccessType, ignorePermissions bool, f func(safemem.BlockSeq) (uint64, error)) (int64, error) {
// withInternalMappings is faster than withVecInternalMappings because of
// iterator plumbing (this isn't generally practical in the vector case due
// to iterator invalidation between AddrRanges). Use it if possible.
@@ -630,12 +631,12 @@ func (mm *MemoryManager) withVecInternalMappings(ctx context.Context, ars userme
// truncatedAddrRangeSeq returns a copy of ars, but with the end truncated to
// at most address end on AddrRange arsit.Head(). It is used in vector I/O paths to
-// truncate usermem.AddrRangeSeq when errors occur.
+// truncate hostarch.AddrRangeSeq when errors occur.
//
// Preconditions:
// * !arsit.IsEmpty().
// * end <= arsit.Head().End.
-func truncatedAddrRangeSeq(ars, arsit usermem.AddrRangeSeq, end usermem.Addr) usermem.AddrRangeSeq {
+func truncatedAddrRangeSeq(ars, arsit hostarch.AddrRangeSeq, end hostarch.Addr) hostarch.AddrRangeSeq {
ar := arsit.Head()
if end <= ar.Start {
return ars.TakeFirst64(ars.NumBytes() - arsit.NumBytes())
diff --git a/pkg/sentry/mm/lifecycle.go b/pkg/sentry/mm/lifecycle.go
index 120707429..a79ef9223 100644
--- a/pkg/sentry/mm/lifecycle.go
+++ b/pkg/sentry/mm/lifecycle.go
@@ -19,12 +19,12 @@ import (
"sync/atomic"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/limits"
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sentry/pgalloc"
"gvisor.dev/gvisor/pkg/sentry/platform"
- "gvisor.dev/gvisor/pkg/usermem"
)
// NewMemoryManager returns a new MemoryManager with no mappings and 1 user.
@@ -139,7 +139,7 @@ func (mm *MemoryManager) Fork(ctx context.Context) (*MemoryManager, error) {
}
srcvseg := mm.vmas.FirstSegment()
dstpgap := mm2.pmas.FirstGap()
- var unmapAR usermem.AddrRange
+ var unmapAR hostarch.AddrRange
for srcpseg := mm.pmas.FirstSegment(); srcpseg.Ok(); srcpseg = srcpseg.NextSegment() {
pma := srcpseg.ValuePtr()
if !pma.private {
diff --git a/pkg/sentry/mm/metadata.go b/pkg/sentry/mm/metadata.go
index 0cfd60f6c..28c5fead9 100644
--- a/pkg/sentry/mm/metadata.go
+++ b/pkg/sentry/mm/metadata.go
@@ -16,9 +16,9 @@ package mm
import (
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fsbridge"
- "gvisor.dev/gvisor/pkg/usermem"
)
// Dumpability describes if and how core dumps should be created.
@@ -54,14 +54,14 @@ func (mm *MemoryManager) SetDumpability(d Dumpability) {
// ArgvStart returns the start of the application argument vector.
//
// There is no guarantee that this value is sensible w.r.t. ArgvEnd.
-func (mm *MemoryManager) ArgvStart() usermem.Addr {
+func (mm *MemoryManager) ArgvStart() hostarch.Addr {
mm.metadataMu.Lock()
defer mm.metadataMu.Unlock()
return mm.argv.Start
}
// SetArgvStart sets the start of the application argument vector.
-func (mm *MemoryManager) SetArgvStart(a usermem.Addr) {
+func (mm *MemoryManager) SetArgvStart(a hostarch.Addr) {
mm.metadataMu.Lock()
defer mm.metadataMu.Unlock()
mm.argv.Start = a
@@ -70,14 +70,14 @@ func (mm *MemoryManager) SetArgvStart(a usermem.Addr) {
// ArgvEnd returns the end of the application argument vector.
//
// There is no guarantee that this value is sensible w.r.t. ArgvStart.
-func (mm *MemoryManager) ArgvEnd() usermem.Addr {
+func (mm *MemoryManager) ArgvEnd() hostarch.Addr {
mm.metadataMu.Lock()
defer mm.metadataMu.Unlock()
return mm.argv.End
}
// SetArgvEnd sets the end of the application argument vector.
-func (mm *MemoryManager) SetArgvEnd(a usermem.Addr) {
+func (mm *MemoryManager) SetArgvEnd(a hostarch.Addr) {
mm.metadataMu.Lock()
defer mm.metadataMu.Unlock()
mm.argv.End = a
@@ -86,14 +86,14 @@ func (mm *MemoryManager) SetArgvEnd(a usermem.Addr) {
// EnvvStart returns the start of the application environment vector.
//
// There is no guarantee that this value is sensible w.r.t. EnvvEnd.
-func (mm *MemoryManager) EnvvStart() usermem.Addr {
+func (mm *MemoryManager) EnvvStart() hostarch.Addr {
mm.metadataMu.Lock()
defer mm.metadataMu.Unlock()
return mm.envv.Start
}
// SetEnvvStart sets the start of the application environment vector.
-func (mm *MemoryManager) SetEnvvStart(a usermem.Addr) {
+func (mm *MemoryManager) SetEnvvStart(a hostarch.Addr) {
mm.metadataMu.Lock()
defer mm.metadataMu.Unlock()
mm.envv.Start = a
@@ -102,14 +102,14 @@ func (mm *MemoryManager) SetEnvvStart(a usermem.Addr) {
// EnvvEnd returns the end of the application environment vector.
//
// There is no guarantee that this value is sensible w.r.t. EnvvStart.
-func (mm *MemoryManager) EnvvEnd() usermem.Addr {
+func (mm *MemoryManager) EnvvEnd() hostarch.Addr {
mm.metadataMu.Lock()
defer mm.metadataMu.Unlock()
return mm.envv.End
}
// SetEnvvEnd sets the end of the application environment vector.
-func (mm *MemoryManager) SetEnvvEnd(a usermem.Addr) {
+func (mm *MemoryManager) SetEnvvEnd(a hostarch.Addr) {
mm.metadataMu.Lock()
defer mm.metadataMu.Unlock()
mm.envv.End = a
diff --git a/pkg/sentry/mm/mm.go b/pkg/sentry/mm/mm.go
index 92cc87d84..57969b26c 100644
--- a/pkg/sentry/mm/mm.go
+++ b/pkg/sentry/mm/mm.go
@@ -36,6 +36,7 @@ package mm
import (
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/safemem"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fsbridge"
@@ -43,7 +44,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/pgalloc"
"gvisor.dev/gvisor/pkg/sentry/platform"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/usermem"
)
// MemoryManager implements a virtual address space.
@@ -97,7 +97,7 @@ type MemoryManager struct {
// binary into the mm.
//
// brk is protected by mappingMu.
- brk usermem.AddrRange
+ brk hostarch.AddrRange
// usageAS is vmas.Span(), cached to accelerate RLIMIT_AS checks.
//
@@ -198,14 +198,14 @@ type MemoryManager struct {
// requirements apply to argv; we do not require that argv.WellFormed().
//
// argv is protected by metadataMu.
- argv usermem.AddrRange
+ argv hostarch.AddrRange
// envv is the application envv. This is set up by the loader and may be
// modified by prctl(PR_SET_MM_ENV_START/PR_SET_MM_ENV_END). No
// requirements apply to envv; we do not require that envv.WellFormed().
//
// envv is protected by metadataMu.
- envv usermem.AddrRange
+ envv hostarch.AddrRange
// auxv is the ELF's auxiliary vector.
//
@@ -268,20 +268,20 @@ type vma struct {
// realPerms are the memory permissions on this vma, as defined by the
// application.
- realPerms usermem.AccessType `state:".(int)"`
+ realPerms hostarch.AccessType `state:".(int)"`
// effectivePerms are the memory permissions on this vma which are
// actually used to control access.
//
// Invariant: effectivePerms == realPerms.Effective().
- effectivePerms usermem.AccessType `state:"manual"`
+ effectivePerms hostarch.AccessType `state:"manual"`
// maxPerms limits the set of permissions that may ever apply to this
// memory, as well as accesses for which usermem.IOOpts.IgnorePermissions
// is true (e.g. ptrace(PTRACE_POKEDATA)).
//
// Invariant: maxPerms == maxPerms.Effective().
- maxPerms usermem.AccessType `state:"manual"`
+ maxPerms hostarch.AccessType `state:"manual"`
// private is true if this is a MAP_PRIVATE mapping, such that writes to
// the mapping are propagated to a copy.
@@ -421,8 +421,8 @@ type pma struct {
off uint64
// translatePerms is the permissions returned by memmap.Mappable.Translate.
- // If private is true, translatePerms is usermem.AnyAccess.
- translatePerms usermem.AccessType
+ // If private is true, translatePerms is hostarch.AnyAccess.
+ translatePerms hostarch.AccessType
// effectivePerms is the permissions allowed for non-ignorePermissions
// accesses. maxPerms is the permissions allowed for ignorePermissions
@@ -432,8 +432,8 @@ type pma struct {
//
// These are stored in the pma so that the IO implementation can avoid
// iterating mm.vmas when pmas already exist.
- effectivePerms usermem.AccessType
- maxPerms usermem.AccessType
+ effectivePerms hostarch.AccessType
+ maxPerms hostarch.AccessType
// needCOW is true if writes to the mapping must be propagated to a copy.
needCOW bool
@@ -465,7 +465,7 @@ type privateRefs struct {
}
type invalidateArgs struct {
- ar usermem.AddrRange
+ ar hostarch.AddrRange
opts memmap.InvalidateOpts
}
diff --git a/pkg/sentry/mm/pma.go b/pkg/sentry/mm/pma.go
index 7e5f7de64..5583f62b2 100644
--- a/pkg/sentry/mm/pma.go
+++ b/pkg/sentry/mm/pma.go
@@ -18,12 +18,12 @@ import (
"fmt"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/safecopy"
"gvisor.dev/gvisor/pkg/safemem"
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sentry/usage"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
)
// existingPMAsLocked checks that pmas exist for all addresses in ar, and
@@ -34,7 +34,7 @@ import (
// Preconditions:
// * mm.activeMu must be locked.
// * ar.Length() != 0.
-func (mm *MemoryManager) existingPMAsLocked(ar usermem.AddrRange, at usermem.AccessType, ignorePermissions bool, needInternalMappings bool) pmaIterator {
+func (mm *MemoryManager) existingPMAsLocked(ar hostarch.AddrRange, at hostarch.AccessType, ignorePermissions bool, needInternalMappings bool) pmaIterator {
if checkInvariants {
if !ar.WellFormed() || ar.Length() == 0 {
panic(fmt.Sprintf("invalid ar: %v", ar))
@@ -70,7 +70,7 @@ func (mm *MemoryManager) existingPMAsLocked(ar usermem.AddrRange, at usermem.Acc
// and support access of type (at, ignorePermissions).
//
// Preconditions: mm.activeMu must be locked.
-func (mm *MemoryManager) existingVecPMAsLocked(ars usermem.AddrRangeSeq, at usermem.AccessType, ignorePermissions bool, needInternalMappings bool) bool {
+func (mm *MemoryManager) existingVecPMAsLocked(ars hostarch.AddrRangeSeq, at hostarch.AccessType, ignorePermissions bool, needInternalMappings bool) bool {
for ; !ars.IsEmpty(); ars = ars.Tail() {
if ar := ars.Head(); ar.Length() != 0 && !mm.existingPMAsLocked(ar, at, ignorePermissions, needInternalMappings).Ok() {
return false
@@ -98,7 +98,7 @@ func (mm *MemoryManager) existingVecPMAsLocked(ars usermem.AddrRangeSeq, at user
// * vseg.Range().Contains(ar.Start).
// * vmas must exist for all addresses in ar, and support accesses of type at
// (i.e. permission checks must have been performed against vmas).
-func (mm *MemoryManager) getPMAsLocked(ctx context.Context, vseg vmaIterator, ar usermem.AddrRange, at usermem.AccessType) (pmaIterator, pmaGapIterator, error) {
+func (mm *MemoryManager) getPMAsLocked(ctx context.Context, vseg vmaIterator, ar hostarch.AddrRange, at hostarch.AccessType) (pmaIterator, pmaGapIterator, error) {
if checkInvariants {
if !ar.WellFormed() || ar.Length() == 0 {
panic(fmt.Sprintf("invalid ar: %v", ar))
@@ -118,7 +118,7 @@ func (mm *MemoryManager) getPMAsLocked(ctx context.Context, vseg vmaIterator, ar
end = ar.End.RoundDown()
alignerr = syserror.EFAULT
}
- ar = usermem.AddrRange{ar.Start.RoundDown(), end}
+ ar = hostarch.AddrRange{ar.Start.RoundDown(), end}
pstart, pend, perr := mm.getPMAsInternalLocked(ctx, vseg, ar, at)
if pend.Start() <= ar.Start {
@@ -145,7 +145,7 @@ func (mm *MemoryManager) getPMAsLocked(ctx context.Context, vseg vmaIterator, ar
// * mm.activeMu must be locked for writing.
// * vmas must exist for all addresses in ars, and support accesses of type at
// (i.e. permission checks must have been performed against vmas).
-func (mm *MemoryManager) getVecPMAsLocked(ctx context.Context, ars usermem.AddrRangeSeq, at usermem.AccessType) (usermem.AddrRangeSeq, error) {
+func (mm *MemoryManager) getVecPMAsLocked(ctx context.Context, ars hostarch.AddrRangeSeq, at hostarch.AccessType) (hostarch.AddrRangeSeq, error) {
for arsit := ars; !arsit.IsEmpty(); arsit = arsit.Tail() {
ar := arsit.Head()
if ar.Length() == 0 {
@@ -164,7 +164,7 @@ func (mm *MemoryManager) getVecPMAsLocked(ctx context.Context, ars usermem.AddrR
end = ar.End.RoundDown()
alignerr = syserror.EFAULT
}
- ar = usermem.AddrRange{ar.Start.RoundDown(), end}
+ ar = hostarch.AddrRange{ar.Start.RoundDown(), end}
_, pend, perr := mm.getPMAsInternalLocked(ctx, mm.vmas.FindSegment(ar.Start), ar, at)
if perr != nil {
@@ -191,7 +191,7 @@ func (mm *MemoryManager) getVecPMAsLocked(ctx context.Context, ars usermem.AddrR
//
// getPMAsInternalLocked is an implementation helper for getPMAsLocked and
// getVecPMAsLocked; other clients should call one of those instead.
-func (mm *MemoryManager) getPMAsInternalLocked(ctx context.Context, vseg vmaIterator, ar usermem.AddrRange, at usermem.AccessType) (pmaIterator, pmaGapIterator, error) {
+func (mm *MemoryManager) getPMAsInternalLocked(ctx context.Context, vseg vmaIterator, ar hostarch.AddrRange, at hostarch.AccessType) (pmaIterator, pmaGapIterator, error) {
if checkInvariants {
if !ar.WellFormed() || ar.Length() == 0 || !ar.IsPageAligned() {
panic(fmt.Sprintf("invalid ar: %v", ar))
@@ -245,7 +245,7 @@ func (mm *MemoryManager) getPMAsInternalLocked(ctx context.Context, vseg vmaIter
pseg, pgap = mm.pmas.Insert(pgap, allocAR, pma{
file: mf,
off: fr.Start,
- translatePerms: usermem.AnyAccess,
+ translatePerms: hostarch.AnyAccess,
effectivePerms: vma.effectivePerms,
maxPerms: vma.maxPerms,
// Since we just allocated this memory and have the
@@ -335,7 +335,7 @@ func (mm *MemoryManager) getPMAsInternalLocked(ctx context.Context, vseg vmaIter
// Neither of these cases has enough spatial locality to
// benefit from copying nearby pages, so if the vma is
// executable, only copy the pages required.
- var copyAR usermem.AddrRange
+ var copyAR hostarch.AddrRange
if vseg.ValuePtr().effectivePerms.Execute {
copyAR = pseg.Range().Intersect(ar)
} else {
@@ -366,7 +366,7 @@ func (mm *MemoryManager) getPMAsInternalLocked(ctx context.Context, vseg vmaIter
// Replace the pma with a copy in the part of the address
// range where copying was successful. This doesn't change
// RSS.
- copyAR.End = copyAR.Start + usermem.Addr(fr.Length())
+ copyAR.End = copyAR.Start + hostarch.Addr(fr.Length())
if copyAR != pseg.Range() {
pseg = mm.pmas.Isolate(pseg, copyAR)
pstart = pmaIterator{} // iterators invalidated
@@ -380,7 +380,7 @@ func (mm *MemoryManager) getPMAsInternalLocked(ctx context.Context, vseg vmaIter
mf.IncRef(fr)
oldpma.file = mf
oldpma.off = fr.Start
- oldpma.translatePerms = usermem.AnyAccess
+ oldpma.translatePerms = hostarch.AnyAccess
oldpma.effectivePerms = vma.effectivePerms
oldpma.maxPerms = vma.maxPerms
oldpma.needCOW = false
@@ -499,14 +499,14 @@ const (
// privateAllocUnit may reduce page faults by allowing fewer, larger pmas
// to be mapped, but may result in larger amounts of wasted memory in the
// presence of fragmentation. privateAllocUnit must be a power-of-2
- // multiple of usermem.PageSize.
- privateAllocUnit = usermem.HugePageSize
+ // multiple of hostarch.PageSize.
+ privateAllocUnit = hostarch.HugePageSize
privateAllocMask = privateAllocUnit - 1
)
-func privateAligned(ar usermem.AddrRange) usermem.AddrRange {
- aligned := usermem.AddrRange{ar.Start &^ privateAllocMask, ar.End}
+func privateAligned(ar hostarch.AddrRange) hostarch.AddrRange {
+ aligned := hostarch.AddrRange{ar.Start &^ privateAllocMask, ar.End}
if end := (ar.End + privateAllocMask) &^ privateAllocMask; end >= ar.End {
aligned.End = end
}
@@ -548,7 +548,7 @@ func (mm *MemoryManager) isPMACopyOnWriteLocked(vseg vmaIterator, pseg pmaIterat
rseg := mm.privateRefs.refs.FindSegment(fr.Start)
if rseg.Ok() && rseg.Value() == 1 && fr.End <= rseg.End() {
pma.needCOW = false
- // pma.private => pma.translatePerms == usermem.AnyAccess
+ // pma.private => pma.translatePerms == hostarch.AnyAccess
vma := vseg.ValuePtr()
pma.effectivePerms = vma.effectivePerms
pma.maxPerms = vma.maxPerms
@@ -558,7 +558,7 @@ func (mm *MemoryManager) isPMACopyOnWriteLocked(vseg vmaIterator, pseg pmaIterat
}
// Invalidate implements memmap.MappingSpace.Invalidate.
-func (mm *MemoryManager) Invalidate(ar usermem.AddrRange, opts memmap.InvalidateOpts) {
+func (mm *MemoryManager) Invalidate(ar hostarch.AddrRange, opts memmap.InvalidateOpts) {
if checkInvariants {
if !ar.WellFormed() || ar.Length() == 0 || !ar.IsPageAligned() {
panic(fmt.Sprintf("invalid ar: %v", ar))
@@ -581,7 +581,7 @@ func (mm *MemoryManager) Invalidate(ar usermem.AddrRange, opts memmap.Invalidate
// * mm.activeMu must be locked for writing.
// * ar.Length() != 0.
// * ar must be page-aligned.
-func (mm *MemoryManager) invalidateLocked(ar usermem.AddrRange, invalidatePrivate, invalidateShared bool) {
+func (mm *MemoryManager) invalidateLocked(ar hostarch.AddrRange, invalidatePrivate, invalidateShared bool) {
if checkInvariants {
if !ar.WellFormed() || ar.Length() == 0 || !ar.IsPageAligned() {
panic(fmt.Sprintf("invalid ar: %v", ar))
@@ -627,7 +627,7 @@ func (mm *MemoryManager) invalidateLocked(ar usermem.AddrRange, invalidatePrivat
// Preconditions:
// * ar.Length() != 0.
// * ar must be page-aligned.
-func (mm *MemoryManager) Pin(ctx context.Context, ar usermem.AddrRange, at usermem.AccessType, ignorePermissions bool) ([]PinnedRange, error) {
+func (mm *MemoryManager) Pin(ctx context.Context, ar hostarch.AddrRange, at hostarch.AccessType, ignorePermissions bool) ([]PinnedRange, error) {
if checkInvariants {
if !ar.WellFormed() || ar.Length() == 0 || !ar.IsPageAligned() {
panic(fmt.Sprintf("invalid ar: %v", ar))
@@ -683,7 +683,7 @@ func (mm *MemoryManager) Pin(ctx context.Context, ar usermem.AddrRange, at userm
// PinnedRanges are returned by MemoryManager.Pin.
type PinnedRange struct {
// Source is the corresponding range of addresses.
- Source usermem.AddrRange
+ Source hostarch.AddrRange
// File is the mapped file.
File memmap.File
@@ -713,7 +713,7 @@ func Unpin(prs []PinnedRange) {
// * !oldAR.Overlaps(newAR).
// * mm.pmas.IsEmptyRange(newAR).
// * oldAR and newAR must be page-aligned.
-func (mm *MemoryManager) movePMAsLocked(oldAR, newAR usermem.AddrRange) {
+func (mm *MemoryManager) movePMAsLocked(oldAR, newAR hostarch.AddrRange) {
if checkInvariants {
if !oldAR.WellFormed() || oldAR.Length() == 0 || !oldAR.IsPageAligned() {
panic(fmt.Sprintf("invalid oldAR: %v", oldAR))
@@ -731,7 +731,7 @@ func (mm *MemoryManager) movePMAsLocked(oldAR, newAR usermem.AddrRange) {
}
type movedPMA struct {
- oldAR usermem.AddrRange
+ oldAR hostarch.AddrRange
pma pma
}
var movedPMAs []movedPMA
@@ -751,7 +751,7 @@ func (mm *MemoryManager) movePMAsLocked(oldAR, newAR usermem.AddrRange) {
pgap := mm.pmas.FindGap(newAR.Start)
for i := range movedPMAs {
mpma := &movedPMAs[i]
- pmaNewAR := usermem.AddrRange{mpma.oldAR.Start + off, mpma.oldAR.End + off}
+ pmaNewAR := hostarch.AddrRange{mpma.oldAR.Start + off, mpma.oldAR.End + off}
pgap = mm.pmas.Insert(pgap, pmaNewAR, mpma.pma).NextGap()
}
@@ -776,7 +776,7 @@ func (mm *MemoryManager) movePMAsLocked(oldAR, newAR usermem.AddrRange) {
//
// Postconditions: getPMAInternalMappingsLocked does not invalidate iterators
// into mm.pmas.
-func (mm *MemoryManager) getPMAInternalMappingsLocked(pseg pmaIterator, ar usermem.AddrRange) (pmaGapIterator, error) {
+func (mm *MemoryManager) getPMAInternalMappingsLocked(pseg pmaIterator, ar hostarch.AddrRange) (pmaGapIterator, error) {
if checkInvariants {
if !ar.WellFormed() || ar.Length() == 0 {
panic(fmt.Sprintf("invalid ar: %v", ar))
@@ -808,7 +808,7 @@ func (mm *MemoryManager) getPMAInternalMappingsLocked(pseg pmaIterator, ar userm
//
// Postconditions: getVecPMAInternalMappingsLocked does not invalidate iterators
// into mm.pmas.
-func (mm *MemoryManager) getVecPMAInternalMappingsLocked(ars usermem.AddrRangeSeq) (usermem.AddrRangeSeq, error) {
+func (mm *MemoryManager) getVecPMAInternalMappingsLocked(ars hostarch.AddrRangeSeq) (hostarch.AddrRangeSeq, error) {
for arsit := ars; !arsit.IsEmpty(); arsit = arsit.Tail() {
ar := arsit.Head()
if ar.Length() == 0 {
@@ -829,7 +829,7 @@ func (mm *MemoryManager) getVecPMAInternalMappingsLocked(ars usermem.AddrRangeSe
// in ar.
// * ar.Length() != 0.
// * pseg.Range().Contains(ar.Start).
-func (mm *MemoryManager) internalMappingsLocked(pseg pmaIterator, ar usermem.AddrRange) safemem.BlockSeq {
+func (mm *MemoryManager) internalMappingsLocked(pseg pmaIterator, ar hostarch.AddrRange) safemem.BlockSeq {
if checkInvariants {
if !ar.WellFormed() || ar.Length() == 0 {
panic(fmt.Sprintf("invalid ar: %v", ar))
@@ -866,7 +866,7 @@ func (mm *MemoryManager) internalMappingsLocked(pseg pmaIterator, ar usermem.Add
// * mm.activeMu must be locked.
// * Internal mappings must have been previously established for all addresses
// in ars.
-func (mm *MemoryManager) vecInternalMappingsLocked(ars usermem.AddrRangeSeq) safemem.BlockSeq {
+func (mm *MemoryManager) vecInternalMappingsLocked(ars hostarch.AddrRangeSeq) safemem.BlockSeq {
var ims []safemem.Block
for ; !ars.IsEmpty(); ars = ars.Tail() {
ar := ars.Head()
@@ -931,7 +931,7 @@ func (mm *MemoryManager) decPrivateRef(fr memmap.FileRange) {
// MemoryManager to reflect the insertion of a pma at ar.
//
// Preconditions: mm.activeMu must be locked for writing.
-func (mm *MemoryManager) addRSSLocked(ar usermem.AddrRange) {
+func (mm *MemoryManager) addRSSLocked(ar hostarch.AddrRange) {
mm.curRSS += uint64(ar.Length())
if mm.curRSS > mm.maxRSS {
mm.maxRSS = mm.curRSS
@@ -942,19 +942,19 @@ func (mm *MemoryManager) addRSSLocked(ar usermem.AddrRange) {
// reflect the removal of a pma at ar.
//
// Preconditions: mm.activeMu must be locked for writing.
-func (mm *MemoryManager) removeRSSLocked(ar usermem.AddrRange) {
+func (mm *MemoryManager) removeRSSLocked(ar hostarch.AddrRange) {
mm.curRSS -= uint64(ar.Length())
}
// pmaSetFunctions implements segment.Functions for pmaSet.
type pmaSetFunctions struct{}
-func (pmaSetFunctions) MinKey() usermem.Addr {
+func (pmaSetFunctions) MinKey() hostarch.Addr {
return 0
}
-func (pmaSetFunctions) MaxKey() usermem.Addr {
- return ^usermem.Addr(0)
+func (pmaSetFunctions) MaxKey() hostarch.Addr {
+ return ^hostarch.Addr(0)
}
func (pmaSetFunctions) ClearValue(pma *pma) {
@@ -962,7 +962,7 @@ func (pmaSetFunctions) ClearValue(pma *pma) {
pma.internalMappings = safemem.BlockSeq{}
}
-func (pmaSetFunctions) Merge(ar1 usermem.AddrRange, pma1 pma, ar2 usermem.AddrRange, pma2 pma) (pma, bool) {
+func (pmaSetFunctions) Merge(ar1 hostarch.AddrRange, pma1 pma, ar2 hostarch.AddrRange, pma2 pma) (pma, bool) {
if pma1.file != pma2.file ||
pma1.off+uint64(ar1.Length()) != pma2.off ||
pma1.translatePerms != pma2.translatePerms ||
@@ -980,7 +980,7 @@ func (pmaSetFunctions) Merge(ar1 usermem.AddrRange, pma1 pma, ar2 usermem.AddrRa
return pma1, true
}
-func (pmaSetFunctions) Split(ar usermem.AddrRange, p pma, split usermem.Addr) (pma, pma) {
+func (pmaSetFunctions) Split(ar hostarch.AddrRange, p pma, split hostarch.Addr) (pma, pma) {
newlen1 := uint64(split - ar.Start)
p2 := p
p2.off += newlen1
@@ -997,7 +997,7 @@ func (pmaSetFunctions) Split(ar usermem.AddrRange, p pma, split usermem.Addr) (p
// Preconditions:
// * mm.activeMu must be locked.
// * addr <= pgap.Start().
-func (mm *MemoryManager) findOrSeekPrevUpperBoundPMA(addr usermem.Addr, pgap pmaGapIterator) pmaIterator {
+func (mm *MemoryManager) findOrSeekPrevUpperBoundPMA(addr hostarch.Addr, pgap pmaGapIterator) pmaIterator {
if checkInvariants {
if !pgap.Ok() {
panic("terminal pma iterator")
@@ -1045,7 +1045,7 @@ func (pseg pmaIterator) fileRange() memmap.FileRange {
// Preconditions:
// * pseg.Range().IsSupersetOf(ar).
// * ar.Length != 0.
-func (pseg pmaIterator) fileRangeOf(ar usermem.AddrRange) memmap.FileRange {
+func (pseg pmaIterator) fileRangeOf(ar hostarch.AddrRange) memmap.FileRange {
if checkInvariants {
if !pseg.Ok() {
panic("terminal pma iterator")
diff --git a/pkg/sentry/mm/pma_set.go b/pkg/sentry/mm/pma_set.go
index dbcf2b053..14a7c83b9 100644
--- a/pkg/sentry/mm/pma_set.go
+++ b/pkg/sentry/mm/pma_set.go
@@ -1,7 +1,7 @@
package mm
import (
- __generics_imported0 "gvisor.dev/gvisor/pkg/usermem"
+ __generics_imported0 "gvisor.dev/gvisor/pkg/hostarch"
)
import (
diff --git a/pkg/sentry/mm/procfs.go b/pkg/sentry/mm/procfs.go
index 73bfbea49..f1440e884 100644
--- a/pkg/sentry/mm/procfs.go
+++ b/pkg/sentry/mm/procfs.go
@@ -19,9 +19,9 @@ import (
"fmt"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/fs/proc/seqfile"
"gvisor.dev/gvisor/pkg/sentry/memmap"
- "gvisor.dev/gvisor/pkg/usermem"
)
const (
@@ -29,7 +29,7 @@ const (
// include/linux/kdev_t.h:MINORBITS
devMinorBits = 20
- vsyscallEnd = usermem.Addr(0xffffffffff601000)
+ vsyscallEnd = hostarch.Addr(0xffffffffff601000)
vsyscallMapsEntry = "ffffffffff600000-ffffffffff601000 r-xp 00000000 00:00 0 [vsyscall]\n"
vsyscallSmapsEntry = vsyscallMapsEntry +
"Size: 4 kB\n" +
@@ -62,7 +62,7 @@ func (mm *MemoryManager) NeedsUpdate(generation int64) bool {
func (mm *MemoryManager) ReadMapsDataInto(ctx context.Context, buf *bytes.Buffer) {
mm.mappingMu.RLock()
defer mm.mappingMu.RUnlock()
- var start usermem.Addr
+ var start hostarch.Addr
for vseg := mm.vmas.LowerBoundSegment(start); vseg.Ok(); vseg = vseg.NextSegment() {
mm.appendVMAMapsEntryLocked(ctx, vseg, buf)
@@ -88,9 +88,9 @@ func (mm *MemoryManager) ReadMapsSeqFileData(ctx context.Context, handle seqfile
mm.mappingMu.RLock()
defer mm.mappingMu.RUnlock()
var data []seqfile.SeqData
- var start usermem.Addr
+ var start hostarch.Addr
if handle != nil {
- start = *handle.(*usermem.Addr)
+ start = *handle.(*hostarch.Addr)
}
for vseg := mm.vmas.LowerBoundSegment(start); vseg.Ok(); vseg = vseg.NextSegment() {
vmaAddr := vseg.End()
@@ -177,7 +177,7 @@ func (mm *MemoryManager) appendVMAMapsEntryLocked(ctx context.Context, vseg vmaI
func (mm *MemoryManager) ReadSmapsDataInto(ctx context.Context, buf *bytes.Buffer) {
mm.mappingMu.RLock()
defer mm.mappingMu.RUnlock()
- var start usermem.Addr
+ var start hostarch.Addr
for vseg := mm.vmas.LowerBoundSegment(start); vseg.Ok(); vseg = vseg.NextSegment() {
mm.vmaSmapsEntryIntoLocked(ctx, vseg, buf)
@@ -196,9 +196,9 @@ func (mm *MemoryManager) ReadSmapsSeqFileData(ctx context.Context, handle seqfil
mm.mappingMu.RLock()
defer mm.mappingMu.RUnlock()
var data []seqfile.SeqData
- var start usermem.Addr
+ var start hostarch.Addr
if handle != nil {
- start = *handle.(*usermem.Addr)
+ start = *handle.(*hostarch.Addr)
}
for vseg := mm.vmas.LowerBoundSegment(start); vseg.Ok(); vseg = vseg.NextSegment() {
vmaAddr := vseg.End()
@@ -279,8 +279,8 @@ func (mm *MemoryManager) vmaSmapsEntryIntoLocked(ctx context.Context, vseg vmaIt
// Swap is not implemented.
fmt.Fprintf(b, "Swap: %8d kB\n", 0)
fmt.Fprintf(b, "SwapPss: %8d kB\n", 0)
- fmt.Fprintf(b, "KernelPageSize: %8d kB\n", usermem.PageSize/1024)
- fmt.Fprintf(b, "MMUPageSize: %8d kB\n", usermem.PageSize/1024)
+ fmt.Fprintf(b, "KernelPageSize: %8d kB\n", hostarch.PageSize/1024)
+ fmt.Fprintf(b, "MMUPageSize: %8d kB\n", hostarch.PageSize/1024)
locked := rss
if vma.mlockMode == memmap.MLockNone {
locked = 0
diff --git a/pkg/sentry/mm/shm.go b/pkg/sentry/mm/shm.go
index 6432731d4..3130be80c 100644
--- a/pkg/sentry/mm/shm.go
+++ b/pkg/sentry/mm/shm.go
@@ -16,13 +16,13 @@ package mm
import (
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/kernel/shm"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
)
// DetachShm unmaps a sysv shared memory segment.
-func (mm *MemoryManager) DetachShm(ctx context.Context, addr usermem.Addr) error {
+func (mm *MemoryManager) DetachShm(ctx context.Context, addr hostarch.Addr) error {
if addr != addr.RoundDown() {
// "... shmaddr is not aligned on a page boundary." - man shmdt(2)
return syserror.EINVAL
@@ -52,7 +52,7 @@ func (mm *MemoryManager) DetachShm(ctx context.Context, addr usermem.Addr) error
}
// Remove all vmas that could have been created by the same attach.
- end := addr + usermem.Addr(detached.EffectiveSize())
+ end := addr + hostarch.Addr(detached.EffectiveSize())
for vseg.Ok() && vseg.End() <= end {
vma := vseg.ValuePtr()
if vma.mappable == detached && uint64(vseg.Start()-addr) == vma.off {
diff --git a/pkg/sentry/mm/special_mappable.go b/pkg/sentry/mm/special_mappable.go
index 48d8b6a2b..e748b7ff8 100644
--- a/pkg/sentry/mm/special_mappable.go
+++ b/pkg/sentry/mm/special_mappable.go
@@ -16,11 +16,11 @@ package mm
import (
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sentry/pgalloc"
"gvisor.dev/gvisor/pkg/sentry/usage"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
)
// SpecialMappable implements memmap.MappingIdentity and memmap.Mappable with
@@ -77,21 +77,21 @@ func (m *SpecialMappable) Msync(ctx context.Context, mr memmap.MappableRange) er
}
// AddMapping implements memmap.Mappable.AddMapping.
-func (*SpecialMappable) AddMapping(context.Context, memmap.MappingSpace, usermem.AddrRange, uint64, bool) error {
+func (*SpecialMappable) AddMapping(context.Context, memmap.MappingSpace, hostarch.AddrRange, uint64, bool) error {
return nil
}
// RemoveMapping implements memmap.Mappable.RemoveMapping.
-func (*SpecialMappable) RemoveMapping(context.Context, memmap.MappingSpace, usermem.AddrRange, uint64, bool) {
+func (*SpecialMappable) RemoveMapping(context.Context, memmap.MappingSpace, hostarch.AddrRange, uint64, bool) {
}
// CopyMapping implements memmap.Mappable.CopyMapping.
-func (*SpecialMappable) CopyMapping(context.Context, memmap.MappingSpace, usermem.AddrRange, usermem.AddrRange, uint64, bool) error {
+func (*SpecialMappable) CopyMapping(context.Context, memmap.MappingSpace, hostarch.AddrRange, hostarch.AddrRange, uint64, bool) error {
return nil
}
// Translate implements memmap.Mappable.Translate.
-func (m *SpecialMappable) Translate(ctx context.Context, required, optional memmap.MappableRange, at usermem.AccessType) ([]memmap.Translation, error) {
+func (m *SpecialMappable) Translate(ctx context.Context, required, optional memmap.MappableRange, at hostarch.AccessType) ([]memmap.Translation, error) {
var err error
if required.End > m.fr.Length() {
err = &memmap.BusError{syserror.EFAULT}
@@ -102,7 +102,7 @@ func (m *SpecialMappable) Translate(ctx context.Context, required, optional memm
Source: source,
File: m.mfp.MemoryFile(),
Offset: m.fr.Start + source.Start,
- Perms: usermem.AnyAccess,
+ Perms: hostarch.AnyAccess,
},
}, err
}
@@ -146,7 +146,7 @@ func NewSharedAnonMappable(length uint64, mfp pgalloc.MemoryFileProvider) (*Spec
if length == 0 {
return nil, syserror.EINVAL
}
- alignedLen, ok := usermem.Addr(length).RoundUp()
+ alignedLen, ok := hostarch.Addr(length).RoundUp()
if !ok {
return nil, syserror.EINVAL
}
diff --git a/pkg/sentry/mm/syscalls.go b/pkg/sentry/mm/syscalls.go
index 69e37330b..7ad6b7c21 100644
--- a/pkg/sentry/mm/syscalls.go
+++ b/pkg/sentry/mm/syscalls.go
@@ -21,20 +21,20 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/kernel/futex"
"gvisor.dev/gvisor/pkg/sentry/limits"
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
)
// HandleUserFault handles an application page fault. sp is the faulting
// application thread's stack pointer.
//
// Preconditions: mm.as != nil.
-func (mm *MemoryManager) HandleUserFault(ctx context.Context, addr usermem.Addr, at usermem.AccessType, sp usermem.Addr) error {
- ar, ok := addr.RoundDown().ToRange(usermem.PageSize)
+func (mm *MemoryManager) HandleUserFault(ctx context.Context, addr hostarch.Addr, at hostarch.AccessType, sp hostarch.Addr) error {
+ ar, ok := addr.RoundDown().ToRange(hostarch.PageSize)
if !ok {
return syserror.EFAULT
}
@@ -72,11 +72,11 @@ func (mm *MemoryManager) HandleUserFault(ctx context.Context, addr usermem.Addr,
}
// MMap establishes a memory mapping.
-func (mm *MemoryManager) MMap(ctx context.Context, opts memmap.MMapOpts) (usermem.Addr, error) {
+func (mm *MemoryManager) MMap(ctx context.Context, opts memmap.MMapOpts) (hostarch.Addr, error) {
if opts.Length == 0 {
return 0, syserror.EINVAL
}
- length, ok := usermem.Addr(opts.Length).RoundUp()
+ length, ok := hostarch.Addr(opts.Length).RoundUp()
if !ok {
return 0, syserror.ENOMEM
}
@@ -84,7 +84,7 @@ func (mm *MemoryManager) MMap(ctx context.Context, opts memmap.MMapOpts) (userme
if opts.Mappable != nil {
// Offset must be aligned.
- if usermem.Addr(opts.Offset).RoundDown() != usermem.Addr(opts.Offset) {
+ if hostarch.Addr(opts.Offset).RoundDown() != hostarch.Addr(opts.Offset) {
return 0, syserror.EINVAL
}
// Offset + length must not overflow.
@@ -157,7 +157,7 @@ func (mm *MemoryManager) MMap(ctx context.Context, opts memmap.MMapOpts) (userme
// Preconditions:
// * mm.mappingMu must be locked.
// * vseg.Range().IsSupersetOf(ar).
-func (mm *MemoryManager) populateVMA(ctx context.Context, vseg vmaIterator, ar usermem.AddrRange, precommit bool) {
+func (mm *MemoryManager) populateVMA(ctx context.Context, vseg vmaIterator, ar hostarch.AddrRange, precommit bool) {
if !vseg.ValuePtr().effectivePerms.Any() {
// Linux doesn't populate inaccessible pages. See
// mm/gup.c:populate_vma_page_range.
@@ -175,7 +175,7 @@ func (mm *MemoryManager) populateVMA(ctx context.Context, vseg vmaIterator, ar u
}
// Ensure that we have usable pmas.
- pseg, _, err := mm.getPMAsLocked(ctx, vseg, ar, usermem.NoAccess)
+ pseg, _, err := mm.getPMAsLocked(ctx, vseg, ar, hostarch.NoAccess)
if err != nil {
// mm/util.c:vm_mmap_pgoff() ignores the error, if any, from
// mm/gup.c:mm_populate(). If it matters, we'll get it again when
@@ -203,7 +203,7 @@ func (mm *MemoryManager) populateVMA(ctx context.Context, vseg vmaIterator, ar u
// * vseg.Range().IsSupersetOf(ar).
//
// Postconditions: mm.mappingMu will be unlocked.
-func (mm *MemoryManager) populateVMAAndUnlock(ctx context.Context, vseg vmaIterator, ar usermem.AddrRange, precommit bool) {
+func (mm *MemoryManager) populateVMAAndUnlock(ctx context.Context, vseg vmaIterator, ar hostarch.AddrRange, precommit bool) {
// See populateVMA above for commentary.
if !vseg.ValuePtr().effectivePerms.Any() {
mm.mappingMu.Unlock()
@@ -221,7 +221,7 @@ func (mm *MemoryManager) populateVMAAndUnlock(ctx context.Context, vseg vmaItera
// mm.mappingMu doesn't need to be write-locked for getPMAsLocked, and it
// isn't needed at all for mapASLocked.
mm.mappingMu.DowngradeLock()
- pseg, _, err := mm.getPMAsLocked(ctx, vseg, ar, usermem.NoAccess)
+ pseg, _, err := mm.getPMAsLocked(ctx, vseg, ar, hostarch.NoAccess)
mm.mappingMu.RUnlock()
if err != nil {
mm.activeMu.Unlock()
@@ -234,7 +234,7 @@ func (mm *MemoryManager) populateVMAAndUnlock(ctx context.Context, vseg vmaItera
}
// MapStack allocates the initial process stack.
-func (mm *MemoryManager) MapStack(ctx context.Context) (usermem.AddrRange, error) {
+func (mm *MemoryManager) MapStack(ctx context.Context) (hostarch.AddrRange, error) {
// maxStackSize is the maximum supported process stack size in bytes.
//
// This limit exists because stack growing isn't implemented, so the entire
@@ -242,7 +242,7 @@ func (mm *MemoryManager) MapStack(ctx context.Context) (usermem.AddrRange, error
const maxStackSize = 128 << 20
stackSize := limits.FromContext(ctx).Get(limits.Stack)
- r, ok := usermem.Addr(stackSize.Cur).RoundUp()
+ r, ok := hostarch.Addr(stackSize.Cur).RoundUp()
sz := uint64(r)
if !ok {
// RLIM_INFINITY rounds up to 0.
@@ -251,16 +251,16 @@ func (mm *MemoryManager) MapStack(ctx context.Context) (usermem.AddrRange, error
ctx.Warningf("Capping stack size from RLIMIT_STACK of %v down to %v.", sz, maxStackSize)
sz = maxStackSize
} else if sz == 0 {
- return usermem.AddrRange{}, syserror.ENOMEM
+ return hostarch.AddrRange{}, syserror.ENOMEM
}
- szaddr := usermem.Addr(sz)
+ szaddr := hostarch.Addr(sz)
ctx.Debugf("Allocating stack with size of %v bytes", sz)
// Determine the stack's desired location. Unlike Linux, address
// randomization can't be disabled.
- stackEnd := mm.layout.MaxAddr - usermem.Addr(mrand.Int63n(int64(mm.layout.MaxStackRand))).RoundDown()
+ stackEnd := mm.layout.MaxAddr - hostarch.Addr(mrand.Int63n(int64(mm.layout.MaxStackRand))).RoundDown()
if stackEnd < szaddr {
- return usermem.AddrRange{}, syserror.ENOMEM
+ return hostarch.AddrRange{}, syserror.ENOMEM
}
stackStart := stackEnd - szaddr
mm.mappingMu.Lock()
@@ -268,8 +268,8 @@ func (mm *MemoryManager) MapStack(ctx context.Context) (usermem.AddrRange, error
_, ar, err := mm.createVMALocked(ctx, memmap.MMapOpts{
Length: sz,
Addr: stackStart,
- Perms: usermem.ReadWrite,
- MaxPerms: usermem.AnyAccess,
+ Perms: hostarch.ReadWrite,
+ MaxPerms: hostarch.AnyAccess,
Private: true,
GrowsDown: true,
MLockMode: mm.defMLockMode,
@@ -279,14 +279,14 @@ func (mm *MemoryManager) MapStack(ctx context.Context) (usermem.AddrRange, error
}
// MUnmap implements the semantics of Linux's munmap(2).
-func (mm *MemoryManager) MUnmap(ctx context.Context, addr usermem.Addr, length uint64) error {
+func (mm *MemoryManager) MUnmap(ctx context.Context, addr hostarch.Addr, length uint64) error {
if addr != addr.RoundDown() {
return syserror.EINVAL
}
if length == 0 {
return syserror.EINVAL
}
- la, ok := usermem.Addr(length).RoundUp()
+ la, ok := hostarch.Addr(length).RoundUp()
if !ok {
return syserror.EINVAL
}
@@ -308,7 +308,7 @@ type MRemapOpts struct {
// NewAddr is the new address for the remapping. NewAddr is ignored unless
// Move is MMRemapMustMove.
- NewAddr usermem.Addr
+ NewAddr hostarch.Addr
}
// MRemapMoveMode controls MRemap's moving behavior.
@@ -328,7 +328,7 @@ const (
)
// MRemap implements the semantics of Linux's mremap(2).
-func (mm *MemoryManager) MRemap(ctx context.Context, oldAddr usermem.Addr, oldSize uint64, newSize uint64, opts MRemapOpts) (usermem.Addr, error) {
+func (mm *MemoryManager) MRemap(ctx context.Context, oldAddr hostarch.Addr, oldSize uint64, newSize uint64, opts MRemapOpts) (hostarch.Addr, error) {
// "Note that old_address has to be page aligned." - mremap(2)
if oldAddr.RoundDown() != oldAddr {
return 0, syserror.EINVAL
@@ -336,9 +336,9 @@ func (mm *MemoryManager) MRemap(ctx context.Context, oldAddr usermem.Addr, oldSi
// Linux treats an old_size that rounds up to 0 as 0, which is otherwise a
// valid size. However, new_size can't be 0 after rounding.
- oldSizeAddr, _ := usermem.Addr(oldSize).RoundUp()
+ oldSizeAddr, _ := hostarch.Addr(oldSize).RoundUp()
oldSize = uint64(oldSizeAddr)
- newSizeAddr, ok := usermem.Addr(newSize).RoundUp()
+ newSizeAddr, ok := hostarch.Addr(newSize).RoundUp()
if !ok || newSizeAddr == 0 {
return 0, syserror.EINVAL
}
@@ -392,8 +392,8 @@ func (mm *MemoryManager) MRemap(ctx context.Context, oldAddr usermem.Addr, oldSi
if newSize < oldSize {
// If oldAddr+oldSize didn't overflow, oldAddr+newSize can't
// either.
- newEnd := oldAddr + usermem.Addr(newSize)
- mm.unmapLocked(ctx, usermem.AddrRange{newEnd, oldEnd})
+ newEnd := oldAddr + hostarch.Addr(newSize)
+ mm.unmapLocked(ctx, hostarch.AddrRange{newEnd, oldEnd})
}
return oldAddr, nil
}
@@ -438,7 +438,7 @@ func (mm *MemoryManager) MRemap(ctx context.Context, oldAddr usermem.Addr, oldSi
}
// Find a location for the new mapping.
- var newAR usermem.AddrRange
+ var newAR hostarch.AddrRange
switch opts.Move {
case MRemapMayMove:
newAddr, err := mm.findAvailableLocked(newSize, findAvailableOpts{})
@@ -457,7 +457,7 @@ func (mm *MemoryManager) MRemap(ctx context.Context, oldAddr usermem.Addr, oldSi
if !ok {
return 0, syserror.EINVAL
}
- if (usermem.AddrRange{oldAddr, oldEnd}).Overlaps(newAR) {
+ if (hostarch.AddrRange{oldAddr, oldEnd}).Overlaps(newAR) {
return 0, syserror.EINVAL
}
@@ -479,8 +479,8 @@ func (mm *MemoryManager) MRemap(ctx context.Context, oldAddr usermem.Addr, oldSi
// correct: compare Linux's mm/mremap.c:mremap_to() => do_munmap(),
// vma_to_resize().
if newSize < oldSize {
- oldNewEnd := oldAddr + usermem.Addr(newSize)
- mm.unmapLocked(ctx, usermem.AddrRange{oldNewEnd, oldEnd})
+ oldNewEnd := oldAddr + hostarch.Addr(newSize)
+ mm.unmapLocked(ctx, hostarch.AddrRange{oldNewEnd, oldEnd})
oldEnd = oldNewEnd
}
@@ -488,7 +488,7 @@ func (mm *MemoryManager) MRemap(ctx context.Context, oldAddr usermem.Addr, oldSi
vseg = mm.vmas.FindSegment(oldAddr)
}
- oldAR := usermem.AddrRange{oldAddr, oldEnd}
+ oldAR := hostarch.AddrRange{oldAddr, oldEnd}
// Check that oldEnd maps to the same vma as oldAddr.
if vseg.End() < oldEnd {
@@ -588,14 +588,14 @@ func (mm *MemoryManager) MRemap(ctx context.Context, oldAddr usermem.Addr, oldSi
}
// MProtect implements the semantics of Linux's mprotect(2).
-func (mm *MemoryManager) MProtect(addr usermem.Addr, length uint64, realPerms usermem.AccessType, growsDown bool) error {
+func (mm *MemoryManager) MProtect(addr hostarch.Addr, length uint64, realPerms hostarch.AccessType, growsDown bool) error {
if addr.RoundDown() != addr {
return syserror.EINVAL
}
if length == 0 {
return nil
}
- rlength, ok := usermem.Addr(length).RoundUp()
+ rlength, ok := hostarch.Addr(length).RoundUp()
if !ok {
return syserror.ENOMEM
}
@@ -692,19 +692,19 @@ func (mm *MemoryManager) MProtect(addr usermem.Addr, length uint64, realPerms us
}
// BrkSetup sets mm's brk address to addr and its brk size to 0.
-func (mm *MemoryManager) BrkSetup(ctx context.Context, addr usermem.Addr) {
+func (mm *MemoryManager) BrkSetup(ctx context.Context, addr hostarch.Addr) {
mm.mappingMu.Lock()
defer mm.mappingMu.Unlock()
// Unmap the existing brk.
if mm.brk.Length() != 0 {
mm.unmapLocked(ctx, mm.brk)
}
- mm.brk = usermem.AddrRange{addr, addr}
+ mm.brk = hostarch.AddrRange{addr, addr}
}
// Brk implements the semantics of Linux's brk(2), except that it returns an
// error on failure.
-func (mm *MemoryManager) Brk(ctx context.Context, addr usermem.Addr) (usermem.Addr, error) {
+func (mm *MemoryManager) Brk(ctx context.Context, addr hostarch.Addr) (hostarch.Addr, error) {
mm.mappingMu.Lock()
// Can't defer mm.mappingMu.Unlock(); see below.
@@ -741,8 +741,8 @@ func (mm *MemoryManager) Brk(ctx context.Context, addr usermem.Addr) (usermem.Ad
Fixed: true,
// Compare Linux's
// arch/x86/include/asm/page_types.h:VM_DATA_DEFAULT_FLAGS.
- Perms: usermem.ReadWrite,
- MaxPerms: usermem.AnyAccess,
+ Perms: hostarch.ReadWrite,
+ MaxPerms: hostarch.AnyAccess,
Private: true,
// Linux: mm/mmap.c:sys_brk() => do_brk_flags() includes
// mm->def_flags.
@@ -762,7 +762,7 @@ func (mm *MemoryManager) Brk(ctx context.Context, addr usermem.Addr) (usermem.Ad
}
case newbrkpg < oldbrkpg:
- mm.unmapLocked(ctx, usermem.AddrRange{newbrkpg, oldbrkpg})
+ mm.unmapLocked(ctx, hostarch.AddrRange{newbrkpg, oldbrkpg})
fallthrough
default:
@@ -775,9 +775,9 @@ func (mm *MemoryManager) Brk(ctx context.Context, addr usermem.Addr) (usermem.Ad
// MLock implements the semantics of Linux's mlock()/mlock2()/munlock(),
// depending on mode.
-func (mm *MemoryManager) MLock(ctx context.Context, addr usermem.Addr, length uint64, mode memmap.MLockMode) error {
+func (mm *MemoryManager) MLock(ctx context.Context, addr hostarch.Addr, length uint64, mode memmap.MLockMode) error {
// Linux allows this to overflow.
- la, _ := usermem.Addr(length + addr.PageOffset()).RoundUp()
+ la, _ := hostarch.Addr(length + addr.PageOffset()).RoundUp()
ar, ok := addr.RoundDown().ToRange(uint64(la))
if !ok {
return syserror.EINVAL
@@ -850,7 +850,7 @@ func (mm *MemoryManager) MLock(ctx context.Context, addr usermem.Addr, length ui
mm.mappingMu.RUnlock()
return syserror.ENOMEM
}
- _, _, err := mm.getPMAsLocked(ctx, vseg, vseg.Range().Intersect(ar), usermem.NoAccess)
+ _, _, err := mm.getPMAsLocked(ctx, vseg, vseg.Range().Intersect(ar), hostarch.NoAccess)
if err != nil {
mm.activeMu.Unlock()
mm.mappingMu.RUnlock()
@@ -945,7 +945,7 @@ func (mm *MemoryManager) MLockAll(ctx context.Context, opts MLockAllOpts) error
mm.mappingMu.DowngradeLock()
for vseg := mm.vmas.FirstSegment(); vseg.Ok(); vseg = vseg.NextSegment() {
if vseg.ValuePtr().effectivePerms.Any() {
- mm.getPMAsLocked(ctx, vseg, vseg.Range(), usermem.NoAccess)
+ mm.getPMAsLocked(ctx, vseg, vseg.Range(), hostarch.NoAccess)
}
}
@@ -965,7 +965,7 @@ func (mm *MemoryManager) MLockAll(ctx context.Context, opts MLockAllOpts) error
}
// NumaPolicy implements the semantics of Linux's get_mempolicy(MPOL_F_ADDR).
-func (mm *MemoryManager) NumaPolicy(addr usermem.Addr) (linux.NumaPolicy, uint64, error) {
+func (mm *MemoryManager) NumaPolicy(addr hostarch.Addr) (linux.NumaPolicy, uint64, error) {
mm.mappingMu.RLock()
defer mm.mappingMu.RUnlock()
vseg := mm.vmas.FindSegment(addr)
@@ -977,12 +977,12 @@ func (mm *MemoryManager) NumaPolicy(addr usermem.Addr) (linux.NumaPolicy, uint64
}
// SetNumaPolicy implements the semantics of Linux's mbind().
-func (mm *MemoryManager) SetNumaPolicy(addr usermem.Addr, length uint64, policy linux.NumaPolicy, nodemask uint64) error {
+func (mm *MemoryManager) SetNumaPolicy(addr hostarch.Addr, length uint64, policy linux.NumaPolicy, nodemask uint64) error {
if !addr.IsPageAligned() {
return syserror.EINVAL
}
// Linux allows this to overflow.
- la, _ := usermem.Addr(length).RoundUp()
+ la, _ := hostarch.Addr(length).RoundUp()
ar, ok := addr.ToRange(uint64(la))
if !ok {
return syserror.EINVAL
@@ -1018,7 +1018,7 @@ func (mm *MemoryManager) SetNumaPolicy(addr usermem.Addr, length uint64, policy
}
// SetDontFork implements the semantics of madvise MADV_DONTFORK.
-func (mm *MemoryManager) SetDontFork(addr usermem.Addr, length uint64, dontfork bool) error {
+func (mm *MemoryManager) SetDontFork(addr hostarch.Addr, length uint64, dontfork bool) error {
ar, ok := addr.ToRange(length)
if !ok {
return syserror.EINVAL
@@ -1044,7 +1044,7 @@ func (mm *MemoryManager) SetDontFork(addr usermem.Addr, length uint64, dontfork
}
// Decommit implements the semantics of Linux's madvise(MADV_DONTNEED).
-func (mm *MemoryManager) Decommit(addr usermem.Addr, length uint64) error {
+func (mm *MemoryManager) Decommit(addr hostarch.Addr, length uint64) error {
ar, ok := addr.ToRange(length)
if !ok {
return syserror.EINVAL
@@ -1112,14 +1112,14 @@ type MSyncOpts struct {
}
// MSync implements the semantics of Linux's msync().
-func (mm *MemoryManager) MSync(ctx context.Context, addr usermem.Addr, length uint64, opts MSyncOpts) error {
+func (mm *MemoryManager) MSync(ctx context.Context, addr hostarch.Addr, length uint64, opts MSyncOpts) error {
if addr != addr.RoundDown() {
return syserror.EINVAL
}
if length == 0 {
return nil
}
- la, ok := usermem.Addr(length).RoundUp()
+ la, ok := hostarch.Addr(length).RoundUp()
if !ok {
return syserror.ENOMEM
}
@@ -1188,7 +1188,7 @@ func (mm *MemoryManager) MSync(ctx context.Context, addr usermem.Addr, length ui
}
// GetSharedFutexKey is used by kernel.Task.GetSharedKey.
-func (mm *MemoryManager) GetSharedFutexKey(ctx context.Context, addr usermem.Addr) (futex.Key, error) {
+func (mm *MemoryManager) GetSharedFutexKey(ctx context.Context, addr hostarch.Addr) (futex.Key, error) {
ar, ok := addr.ToRange(4) // sizeof(int32).
if !ok {
return futex.Key{}, syserror.EFAULT
@@ -1196,7 +1196,7 @@ func (mm *MemoryManager) GetSharedFutexKey(ctx context.Context, addr usermem.Add
mm.mappingMu.RLock()
defer mm.mappingMu.RUnlock()
- vseg, _, err := mm.getVMAsLocked(ctx, ar, usermem.Read, false)
+ vseg, _, err := mm.getVMAsLocked(ctx, ar, hostarch.Read, false)
if err != nil {
return futex.Key{}, err
}
@@ -1230,7 +1230,7 @@ func (mm *MemoryManager) VirtualMemorySize() uint64 {
// VirtualMemorySizeRange returns the combined length in bytes of all mappings
// in ar in mm.
-func (mm *MemoryManager) VirtualMemorySizeRange(ar usermem.AddrRange) uint64 {
+func (mm *MemoryManager) VirtualMemorySizeRange(ar hostarch.AddrRange) uint64 {
mm.mappingMu.RLock()
defer mm.mappingMu.RUnlock()
return uint64(mm.vmas.SpanRange(ar))
diff --git a/pkg/sentry/mm/vma.go b/pkg/sentry/mm/vma.go
index b8df72813..0d019e41d 100644
--- a/pkg/sentry/mm/vma.go
+++ b/pkg/sentry/mm/vma.go
@@ -19,18 +19,18 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/limits"
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
)
// Preconditions:
// * mm.mappingMu must be locked for writing.
// * opts must be valid as defined by the checks in MMap.
-func (mm *MemoryManager) createVMALocked(ctx context.Context, opts memmap.MMapOpts) (vmaIterator, usermem.AddrRange, error) {
+func (mm *MemoryManager) createVMALocked(ctx context.Context, opts memmap.MMapOpts) (vmaIterator, hostarch.AddrRange, error) {
if opts.MaxPerms != opts.MaxPerms.Effective() {
panic(fmt.Sprintf("Non-effective MaxPerms %s cannot be enforced", opts.MaxPerms))
}
@@ -47,7 +47,7 @@ func (mm *MemoryManager) createVMALocked(ctx context.Context, opts memmap.MMapOp
if opts.Force && opts.Unmap && opts.Fixed {
addr = opts.Addr
} else {
- return vmaIterator{}, usermem.AddrRange{}, err
+ return vmaIterator{}, hostarch.AddrRange{}, err
}
}
ar, _ := addr.ToRange(opts.Length)
@@ -58,7 +58,7 @@ func (mm *MemoryManager) createVMALocked(ctx context.Context, opts memmap.MMapOp
newUsageAS -= uint64(mm.vmas.SpanRange(ar))
}
if limitAS := limits.FromContext(ctx).Get(limits.AS).Cur; newUsageAS > limitAS {
- return vmaIterator{}, usermem.AddrRange{}, syserror.ENOMEM
+ return vmaIterator{}, hostarch.AddrRange{}, syserror.ENOMEM
}
if opts.MLockMode != memmap.MLockNone {
@@ -66,14 +66,14 @@ func (mm *MemoryManager) createVMALocked(ctx context.Context, opts memmap.MMapOp
if creds := auth.CredentialsFromContext(ctx); !creds.HasCapabilityIn(linux.CAP_IPC_LOCK, creds.UserNamespace.Root()) {
mlockLimit := limits.FromContext(ctx).Get(limits.MemoryLocked).Cur
if mlockLimit == 0 {
- return vmaIterator{}, usermem.AddrRange{}, syserror.EPERM
+ return vmaIterator{}, hostarch.AddrRange{}, syserror.EPERM
}
newLockedAS := mm.lockedAS + opts.Length
if opts.Unmap {
newLockedAS -= mm.mlockedBytesRangeLocked(ar)
}
if newLockedAS > mlockLimit {
- return vmaIterator{}, usermem.AddrRange{}, syserror.EAGAIN
+ return vmaIterator{}, hostarch.AddrRange{}, syserror.EAGAIN
}
}
}
@@ -93,7 +93,7 @@ func (mm *MemoryManager) createVMALocked(ctx context.Context, opts memmap.MMapOp
// The expression for writable is vma.canWriteMappableLocked(), but we
// don't yet have a vma.
if err := opts.Mappable.AddMapping(ctx, mm, ar, opts.Offset, !opts.Private && opts.MaxPerms.Write); err != nil {
- return vmaIterator{}, usermem.AddrRange{}, err
+ return vmaIterator{}, hostarch.AddrRange{}, err
}
}
@@ -137,7 +137,7 @@ type findAvailableOpts struct {
//
// - Unmap allows existing guard pages in the returned range.
- Addr usermem.Addr
+ Addr hostarch.Addr
Fixed bool
Unmap bool
Map32Bit bool
@@ -153,13 +153,13 @@ const (
// findAvailableLocked finds an allocatable range.
//
// Preconditions: mm.mappingMu must be locked.
-func (mm *MemoryManager) findAvailableLocked(length uint64, opts findAvailableOpts) (usermem.Addr, error) {
+func (mm *MemoryManager) findAvailableLocked(length uint64, opts findAvailableOpts) (hostarch.Addr, error) {
if opts.Fixed {
opts.Map32Bit = false
}
allowedAR := mm.applicationAddrRange()
if opts.Map32Bit {
- allowedAR = allowedAR.Intersect(usermem.AddrRange{map32Start, map32End})
+ allowedAR = allowedAR.Intersect(hostarch.AddrRange{map32Start, map32End})
}
// Does the provided suggestion work?
@@ -181,33 +181,33 @@ func (mm *MemoryManager) findAvailableLocked(length uint64, opts findAvailableOp
}
// Prefer hugepage alignment if a hugepage or more is requested.
- alignment := uint64(usermem.PageSize)
- if length >= usermem.HugePageSize {
- alignment = usermem.HugePageSize
+ alignment := uint64(hostarch.PageSize)
+ if length >= hostarch.HugePageSize {
+ alignment = hostarch.HugePageSize
}
if opts.Map32Bit {
return mm.findLowestAvailableLocked(length, alignment, allowedAR)
}
if mm.layout.DefaultDirection == arch.MmapBottomUp {
- return mm.findLowestAvailableLocked(length, alignment, usermem.AddrRange{mm.layout.BottomUpBase, mm.layout.MaxAddr})
+ return mm.findLowestAvailableLocked(length, alignment, hostarch.AddrRange{mm.layout.BottomUpBase, mm.layout.MaxAddr})
}
- return mm.findHighestAvailableLocked(length, alignment, usermem.AddrRange{mm.layout.MinAddr, mm.layout.TopDownBase})
+ return mm.findHighestAvailableLocked(length, alignment, hostarch.AddrRange{mm.layout.MinAddr, mm.layout.TopDownBase})
}
-func (mm *MemoryManager) applicationAddrRange() usermem.AddrRange {
- return usermem.AddrRange{mm.layout.MinAddr, mm.layout.MaxAddr}
+func (mm *MemoryManager) applicationAddrRange() hostarch.AddrRange {
+ return hostarch.AddrRange{mm.layout.MinAddr, mm.layout.MaxAddr}
}
// Preconditions: mm.mappingMu must be locked.
-func (mm *MemoryManager) findLowestAvailableLocked(length, alignment uint64, bounds usermem.AddrRange) (usermem.Addr, error) {
- for gap := mm.vmas.LowerBoundGap(bounds.Start); gap.Ok() && gap.Start() < bounds.End; gap = gap.NextLargeEnoughGap(usermem.Addr(length)) {
+func (mm *MemoryManager) findLowestAvailableLocked(length, alignment uint64, bounds hostarch.AddrRange) (hostarch.Addr, error) {
+ for gap := mm.vmas.LowerBoundGap(bounds.Start); gap.Ok() && gap.Start() < bounds.End; gap = gap.NextLargeEnoughGap(hostarch.Addr(length)) {
if gr := gap.availableRange().Intersect(bounds); uint64(gr.Length()) >= length {
// Can we shift up to match the alignment?
if offset := uint64(gr.Start) % alignment; offset != 0 {
if uint64(gr.Length()) >= length+alignment-offset {
// Yes, we're aligned.
- return gr.Start + usermem.Addr(alignment-offset), nil
+ return gr.Start + hostarch.Addr(alignment-offset), nil
}
}
@@ -219,15 +219,15 @@ func (mm *MemoryManager) findLowestAvailableLocked(length, alignment uint64, bou
}
// Preconditions: mm.mappingMu must be locked.
-func (mm *MemoryManager) findHighestAvailableLocked(length, alignment uint64, bounds usermem.AddrRange) (usermem.Addr, error) {
- for gap := mm.vmas.UpperBoundGap(bounds.End); gap.Ok() && gap.End() > bounds.Start; gap = gap.PrevLargeEnoughGap(usermem.Addr(length)) {
+func (mm *MemoryManager) findHighestAvailableLocked(length, alignment uint64, bounds hostarch.AddrRange) (hostarch.Addr, error) {
+ for gap := mm.vmas.UpperBoundGap(bounds.End); gap.Ok() && gap.End() > bounds.Start; gap = gap.PrevLargeEnoughGap(hostarch.Addr(length)) {
if gr := gap.availableRange().Intersect(bounds); uint64(gr.Length()) >= length {
// Can we shift down to match the alignment?
- start := gr.End - usermem.Addr(length)
+ start := gr.End - hostarch.Addr(length)
if offset := uint64(start) % alignment; offset != 0 {
- if gr.Start <= start-usermem.Addr(offset) {
+ if gr.Start <= start-hostarch.Addr(offset) {
// Yes, we're aligned.
- return start - usermem.Addr(offset), nil
+ return start - hostarch.Addr(offset), nil
}
}
@@ -239,7 +239,7 @@ func (mm *MemoryManager) findHighestAvailableLocked(length, alignment uint64, bo
}
// Preconditions: mm.mappingMu must be locked.
-func (mm *MemoryManager) mlockedBytesRangeLocked(ar usermem.AddrRange) uint64 {
+func (mm *MemoryManager) mlockedBytesRangeLocked(ar hostarch.AddrRange) uint64 {
var total uint64
for vseg := mm.vmas.LowerBoundSegment(ar.Start); vseg.Ok() && vseg.Start() < ar.End; vseg = vseg.NextSegment() {
if vseg.ValuePtr().mlockMode != memmap.MLockNone {
@@ -264,7 +264,7 @@ func (mm *MemoryManager) mlockedBytesRangeLocked(ar usermem.AddrRange) uint64 {
// Preconditions:
// * mm.mappingMu must be locked for reading; it may be temporarily unlocked.
// * ar.Length() != 0.
-func (mm *MemoryManager) getVMAsLocked(ctx context.Context, ar usermem.AddrRange, at usermem.AccessType, ignorePermissions bool) (vmaIterator, vmaGapIterator, error) {
+func (mm *MemoryManager) getVMAsLocked(ctx context.Context, ar hostarch.AddrRange, at hostarch.AccessType, ignorePermissions bool) (vmaIterator, vmaGapIterator, error) {
if checkInvariants {
if !ar.WellFormed() || ar.Length() == 0 {
panic(fmt.Sprintf("invalid ar: %v", ar))
@@ -320,7 +320,7 @@ func (mm *MemoryManager) getVMAsLocked(ctx context.Context, ar usermem.AddrRange
// temporarily unlocked.
//
// Postconditions: ars is not mutated.
-func (mm *MemoryManager) getVecVMAsLocked(ctx context.Context, ars usermem.AddrRangeSeq, at usermem.AccessType, ignorePermissions bool) (usermem.AddrRangeSeq, error) {
+func (mm *MemoryManager) getVecVMAsLocked(ctx context.Context, ars hostarch.AddrRangeSeq, at hostarch.AccessType, ignorePermissions bool) (hostarch.AddrRangeSeq, error) {
for arsit := ars; !arsit.IsEmpty(); arsit = arsit.Tail() {
ar := arsit.Head()
if ar.Length() == 0 {
@@ -339,7 +339,7 @@ func (mm *MemoryManager) getVecVMAsLocked(ctx context.Context, ars usermem.AddrR
//
// guardBytes is equivalent to Linux's stack_guard_gap after upstream
// 1be7107fbe18 "mm: larger stack guard gap, between vmas".
-const guardBytes = 256 * usermem.PageSize
+const guardBytes = 256 * hostarch.PageSize
// unmapLocked unmaps all addresses in ar and returns the resulting gap in
// mm.vmas.
@@ -348,7 +348,7 @@ const guardBytes = 256 * usermem.PageSize
// * mm.mappingMu must be locked for writing.
// * ar.Length() != 0.
// * ar must be page-aligned.
-func (mm *MemoryManager) unmapLocked(ctx context.Context, ar usermem.AddrRange) vmaGapIterator {
+func (mm *MemoryManager) unmapLocked(ctx context.Context, ar hostarch.AddrRange) vmaGapIterator {
if checkInvariants {
if !ar.WellFormed() || ar.Length() == 0 || !ar.IsPageAligned() {
panic(fmt.Sprintf("invalid ar: %v", ar))
@@ -369,7 +369,7 @@ func (mm *MemoryManager) unmapLocked(ctx context.Context, ar usermem.AddrRange)
// * mm.mappingMu must be locked for writing.
// * ar.Length() != 0.
// * ar must be page-aligned.
-func (mm *MemoryManager) removeVMAsLocked(ctx context.Context, ar usermem.AddrRange) vmaGapIterator {
+func (mm *MemoryManager) removeVMAsLocked(ctx context.Context, ar hostarch.AddrRange) vmaGapIterator {
if checkInvariants {
if !ar.WellFormed() || ar.Length() == 0 || !ar.IsPageAligned() {
panic(fmt.Sprintf("invalid ar: %v", ar))
@@ -426,12 +426,12 @@ func (vma *vma) isPrivateDataLocked() bool {
// vmaSetFunctions implements segment.Functions for vmaSet.
type vmaSetFunctions struct{}
-func (vmaSetFunctions) MinKey() usermem.Addr {
+func (vmaSetFunctions) MinKey() hostarch.Addr {
return 0
}
-func (vmaSetFunctions) MaxKey() usermem.Addr {
- return ^usermem.Addr(0)
+func (vmaSetFunctions) MaxKey() hostarch.Addr {
+ return ^hostarch.Addr(0)
}
func (vmaSetFunctions) ClearValue(vma *vma) {
@@ -440,7 +440,7 @@ func (vmaSetFunctions) ClearValue(vma *vma) {
vma.hint = ""
}
-func (vmaSetFunctions) Merge(ar1 usermem.AddrRange, vma1 vma, ar2 usermem.AddrRange, vma2 vma) (vma, bool) {
+func (vmaSetFunctions) Merge(ar1 hostarch.AddrRange, vma1 vma, ar2 hostarch.AddrRange, vma2 vma) (vma, bool) {
if vma1.mappable != vma2.mappable ||
(vma1.mappable != nil && vma1.off+uint64(ar1.Length()) != vma2.off) ||
vma1.realPerms != vma2.realPerms ||
@@ -462,7 +462,7 @@ func (vmaSetFunctions) Merge(ar1 usermem.AddrRange, vma1 vma, ar2 usermem.AddrRa
return vma1, true
}
-func (vmaSetFunctions) Split(ar usermem.AddrRange, v vma, split usermem.Addr) (vma, vma) {
+func (vmaSetFunctions) Split(ar hostarch.AddrRange, v vma, split hostarch.Addr) (vma, vma) {
v2 := v
if v2.mappable != nil {
v2.off += uint64(split - ar.Start)
@@ -476,7 +476,7 @@ func (vmaSetFunctions) Split(ar usermem.AddrRange, v vma, split usermem.Addr) (v
// Preconditions:
// * vseg.ValuePtr().mappable != nil.
// * vseg.Range().Contains(addr).
-func (vseg vmaIterator) mappableOffsetAt(addr usermem.Addr) uint64 {
+func (vseg vmaIterator) mappableOffsetAt(addr hostarch.Addr) uint64 {
if checkInvariants {
if !vseg.Ok() {
panic("terminal vma iterator")
@@ -503,7 +503,7 @@ func (vseg vmaIterator) mappableRange() memmap.MappableRange {
// * vseg.ValuePtr().mappable != nil.
// * vseg.Range().IsSupersetOf(ar).
// * ar.Length() != 0.
-func (vseg vmaIterator) mappableRangeOf(ar usermem.AddrRange) memmap.MappableRange {
+func (vseg vmaIterator) mappableRangeOf(ar hostarch.AddrRange) memmap.MappableRange {
if checkInvariants {
if !vseg.Ok() {
panic("terminal vma iterator")
@@ -528,7 +528,7 @@ func (vseg vmaIterator) mappableRangeOf(ar usermem.AddrRange) memmap.MappableRan
// * vseg.ValuePtr().mappable != nil.
// * vseg.mappableRange().IsSupersetOf(mr).
// * mr.Length() != 0.
-func (vseg vmaIterator) addrRangeOf(mr memmap.MappableRange) usermem.AddrRange {
+func (vseg vmaIterator) addrRangeOf(mr memmap.MappableRange) hostarch.AddrRange {
if checkInvariants {
if !vseg.Ok() {
panic("terminal vma iterator")
@@ -546,7 +546,7 @@ func (vseg vmaIterator) addrRangeOf(mr memmap.MappableRange) usermem.AddrRange {
vma := vseg.ValuePtr()
vstart := vseg.Start()
- return usermem.AddrRange{vstart + usermem.Addr(mr.Start-vma.off), vstart + usermem.Addr(mr.End-vma.off)}
+ return hostarch.AddrRange{vstart + hostarch.Addr(mr.Start-vma.off), vstart + hostarch.Addr(mr.End-vma.off)}
}
// seekNextLowerBound returns mm.vmas.LowerBoundSegment(addr), but does so by
@@ -555,7 +555,7 @@ func (vseg vmaIterator) addrRangeOf(mr memmap.MappableRange) usermem.AddrRange {
// Preconditions:
// * mm.mappingMu must be locked.
// * addr >= vseg.Start().
-func (vseg vmaIterator) seekNextLowerBound(addr usermem.Addr) vmaIterator {
+func (vseg vmaIterator) seekNextLowerBound(addr hostarch.Addr) vmaIterator {
if checkInvariants {
if !vseg.Ok() {
panic("terminal vma iterator")
@@ -572,7 +572,7 @@ func (vseg vmaIterator) seekNextLowerBound(addr usermem.Addr) vmaIterator {
// availableRange returns the subset of vgap.Range() in which new vmas may be
// created without MMapOpts.Unmap == true.
-func (vgap vmaGapIterator) availableRange() usermem.AddrRange {
+func (vgap vmaGapIterator) availableRange() hostarch.AddrRange {
ar := vgap.Range()
next := vgap.NextSegment()
if !next.Ok() || !next.ValuePtr().growsDown {
@@ -580,7 +580,7 @@ func (vgap vmaGapIterator) availableRange() usermem.AddrRange {
}
// Exclude guard pages.
if ar.Length() < guardBytes {
- return usermem.AddrRange{ar.Start, ar.Start}
+ return hostarch.AddrRange{ar.Start, ar.Start}
}
ar.End -= guardBytes
return ar
diff --git a/pkg/sentry/mm/vma_set.go b/pkg/sentry/mm/vma_set.go
index 333324640..298c86629 100644
--- a/pkg/sentry/mm/vma_set.go
+++ b/pkg/sentry/mm/vma_set.go
@@ -1,7 +1,7 @@
package mm
import (
- __generics_imported0 "gvisor.dev/gvisor/pkg/usermem"
+ __generics_imported0 "gvisor.dev/gvisor/pkg/hostarch"
)
import (
diff --git a/pkg/sentry/pgalloc/pgalloc.go b/pkg/sentry/pgalloc/pgalloc.go
index a4af3e21b..b81292c46 100644
--- a/pkg/sentry/pgalloc/pgalloc.go
+++ b/pkg/sentry/pgalloc/pgalloc.go
@@ -31,6 +31,7 @@ import (
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/safemem"
"gvisor.dev/gvisor/pkg/sentry/hostmm"
@@ -38,7 +39,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/usage"
"gvisor.dev/gvisor/pkg/sync"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
)
// MemoryFile is a memmap.File whose pages may be allocated to arbitrary
@@ -283,7 +283,7 @@ const (
chunkMask = chunkSize - 1
// maxPage is the highest 64-bit page.
- maxPage = math.MaxUint64 &^ (usermem.PageSize - 1)
+ maxPage = math.MaxUint64 &^ (hostarch.PageSize - 1)
)
// NewMemoryFile creates a MemoryFile backed by the given file. If
@@ -344,7 +344,7 @@ func NewMemoryFile(file *os.File, opts MemoryFileOpts) (*MemoryFile, error) {
m, _, errno := unix.Syscall6(
unix.SYS_MMAP,
0,
- usermem.PageSize,
+ hostarch.PageSize,
unix.PROT_EXEC,
unix.MAP_SHARED,
file.Fd(),
@@ -357,7 +357,7 @@ func NewMemoryFile(file *os.File, opts MemoryFileOpts) (*MemoryFile, error) {
if _, _, errno := unix.Syscall(
unix.SYS_MUNMAP,
m,
- usermem.PageSize,
+ hostarch.PageSize,
0); errno != 0 {
panic(fmt.Sprintf("failed to unmap PROT_EXEC MemoryFile mapping: %v", errno))
}
@@ -386,7 +386,7 @@ func (f *MemoryFile) Destroy() {
//
// Preconditions: length must be page-aligned and non-zero.
func (f *MemoryFile) Allocate(length uint64, kind usage.MemoryKind) (memmap.FileRange, error) {
- if length == 0 || length%usermem.PageSize != 0 {
+ if length == 0 || length%hostarch.PageSize != 0 {
panic(fmt.Sprintf("invalid allocation length: %#x", length))
}
@@ -395,9 +395,9 @@ func (f *MemoryFile) Allocate(length uint64, kind usage.MemoryKind) (memmap.File
// Align hugepage-and-larger allocations on hugepage boundaries to try
// to take advantage of hugetmpfs.
- alignment := uint64(usermem.PageSize)
- if length >= usermem.HugePageSize {
- alignment = usermem.HugePageSize
+ alignment := uint64(hostarch.PageSize)
+ if length >= hostarch.HugePageSize {
+ alignment = hostarch.HugePageSize
}
// Find a range in the underlying file.
@@ -524,13 +524,13 @@ func (f *MemoryFile) AllocateAndFill(length uint64, kind usage.MemoryKind, r saf
if err != nil {
return memmap.FileRange{}, err
}
- dsts, err := f.MapInternal(fr, usermem.Write)
+ dsts, err := f.MapInternal(fr, hostarch.Write)
if err != nil {
f.DecRef(fr)
return memmap.FileRange{}, err
}
n, err := safemem.ReadFullToBlocks(r, dsts)
- un := uint64(usermem.Addr(n).RoundDown())
+ un := uint64(hostarch.Addr(n).RoundDown())
if un < length {
// Free unused memory and update fr to contain only the memory that is
// still allocated.
@@ -552,7 +552,7 @@ const (
//
// Preconditions: fr.Length() > 0.
func (f *MemoryFile) Decommit(fr memmap.FileRange) error {
- if !fr.WellFormed() || fr.Length() == 0 || fr.Start%usermem.PageSize != 0 || fr.End%usermem.PageSize != 0 {
+ if !fr.WellFormed() || fr.Length() == 0 || fr.Start%hostarch.PageSize != 0 || fr.End%hostarch.PageSize != 0 {
panic(fmt.Sprintf("invalid range: %v", fr))
}
@@ -614,7 +614,7 @@ func (f *MemoryFile) markDecommitted(fr memmap.FileRange) {
// IncRef implements memmap.File.IncRef.
func (f *MemoryFile) IncRef(fr memmap.FileRange) {
- if !fr.WellFormed() || fr.Length() == 0 || fr.Start%usermem.PageSize != 0 || fr.End%usermem.PageSize != 0 {
+ if !fr.WellFormed() || fr.Length() == 0 || fr.Start%hostarch.PageSize != 0 || fr.End%hostarch.PageSize != 0 {
panic(fmt.Sprintf("invalid range: %v", fr))
}
@@ -633,7 +633,7 @@ func (f *MemoryFile) IncRef(fr memmap.FileRange) {
// DecRef implements memmap.File.DecRef.
func (f *MemoryFile) DecRef(fr memmap.FileRange) {
- if !fr.WellFormed() || fr.Length() == 0 || fr.Start%usermem.PageSize != 0 || fr.End%usermem.PageSize != 0 {
+ if !fr.WellFormed() || fr.Length() == 0 || fr.Start%hostarch.PageSize != 0 || fr.End%hostarch.PageSize != 0 {
panic(fmt.Sprintf("invalid range: %v", fr))
}
@@ -669,7 +669,7 @@ func (f *MemoryFile) DecRef(fr memmap.FileRange) {
}
// MapInternal implements memmap.File.MapInternal.
-func (f *MemoryFile) MapInternal(fr memmap.FileRange, at usermem.AccessType) (safemem.BlockSeq, error) {
+func (f *MemoryFile) MapInternal(fr memmap.FileRange, at hostarch.AccessType) (safemem.BlockSeq, error) {
if !fr.WellFormed() || fr.Length() == 0 {
panic(fmt.Sprintf("invalid range: %v", fr))
}
@@ -935,7 +935,7 @@ func (f *MemoryFile) updateUsageLocked(currentUsage uint64, checkCommitted func(
// Ensure that we have sufficient buffer for the call
// (one byte per page). The length of each slice must
// be page-aligned.
- bufLen := len(s) / usermem.PageSize
+ bufLen := len(s) / hostarch.PageSize
if len(buf) < bufLen {
buf = make([]byte, bufLen)
}
@@ -967,8 +967,8 @@ func (f *MemoryFile) updateUsageLocked(currentUsage uint64, checkCommitted func(
}
}
committedFR := memmap.FileRange{
- Start: r.Start + uint64(i*usermem.PageSize),
- End: r.Start + uint64(j*usermem.PageSize),
+ Start: r.Start + uint64(i*hostarch.PageSize),
+ End: r.Start + uint64(j*hostarch.PageSize),
}
// Advance seg to committedFR.Start.
for seg.Ok() && seg.End() < committedFR.Start {
diff --git a/pkg/sentry/pgalloc/save_restore.go b/pkg/sentry/pgalloc/save_restore.go
index e05c8d074..345cdde55 100644
--- a/pkg/sentry/pgalloc/save_restore.go
+++ b/pkg/sentry/pgalloc/save_restore.go
@@ -23,11 +23,11 @@ import (
"sync/atomic"
"golang.org/x/sys/unix"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/sentry/usage"
"gvisor.dev/gvisor/pkg/state"
"gvisor.dev/gvisor/pkg/state/wire"
- "gvisor.dev/gvisor/pkg/usermem"
)
// SaveTo writes f's state to the given stream.
@@ -49,11 +49,11 @@ func (f *MemoryFile) SaveTo(ctx context.Context, w wire.Writer) error {
// Ensure that all pages that contain data have knownCommitted set, since
// we only store knownCommitted pages below.
- zeroPage := make([]byte, usermem.PageSize)
+ zeroPage := make([]byte, hostarch.PageSize)
err := f.updateUsageLocked(0, func(bs []byte, committed []byte) error {
- for pgoff := 0; pgoff < len(bs); pgoff += usermem.PageSize {
- i := pgoff / usermem.PageSize
- pg := bs[pgoff : pgoff+usermem.PageSize]
+ for pgoff := 0; pgoff < len(bs); pgoff += hostarch.PageSize {
+ i := pgoff / hostarch.PageSize
+ pg := bs[pgoff : pgoff+hostarch.PageSize]
if !bytes.Equal(pg, zeroPage) {
committed[i] = 1
continue
diff --git a/pkg/sentry/platform/kvm/address_space.go b/pkg/sentry/platform/kvm/address_space.go
index 25c21e843..5524e8727 100644
--- a/pkg/sentry/platform/kvm/address_space.go
+++ b/pkg/sentry/platform/kvm/address_space.go
@@ -18,11 +18,11 @@ import (
"sync/atomic"
"gvisor.dev/gvisor/pkg/atomicbitops"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/ring0/pagetables"
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sentry/platform"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/usermem"
)
// dirtySet tracks vCPUs for invalidation.
@@ -118,7 +118,7 @@ type hostMapEntry struct {
// +checkescape:hard,stack
//
//go:nosplit
-func (as *addressSpace) mapLocked(addr usermem.Addr, m hostMapEntry, at usermem.AccessType) (inv bool) {
+func (as *addressSpace) mapLocked(addr hostarch.Addr, m hostMapEntry, at hostarch.AccessType) (inv bool) {
for m.length > 0 {
physical, length, ok := translateToPhysical(m.addr)
if !ok {
@@ -144,14 +144,14 @@ func (as *addressSpace) mapLocked(addr usermem.Addr, m hostMapEntry, at usermem.
}, physical) || inv
m.addr += length
m.length -= length
- addr += usermem.Addr(length)
+ addr += hostarch.Addr(length)
}
return inv
}
// MapFile implements platform.AddressSpace.MapFile.
-func (as *addressSpace) MapFile(addr usermem.Addr, f memmap.File, fr memmap.FileRange, at usermem.AccessType, precommit bool) error {
+func (as *addressSpace) MapFile(addr hostarch.Addr, f memmap.File, fr memmap.FileRange, at hostarch.AccessType, precommit bool) error {
as.mu.Lock()
defer as.mu.Unlock()
@@ -165,7 +165,7 @@ func (as *addressSpace) MapFile(addr usermem.Addr, f memmap.File, fr memmap.File
// We don't execute from application file-mapped memory, and guest page
// tables don't care if we have execute permission (but they do need pages
// to be readable).
- bs, err := f.MapInternal(fr, usermem.AccessType{
+ bs, err := f.MapInternal(fr, hostarch.AccessType{
Read: at.Read || at.Execute || precommit,
Write: at.Write,
})
@@ -187,7 +187,7 @@ func (as *addressSpace) MapFile(addr usermem.Addr, f memmap.File, fr memmap.File
// lookup in our host page tables for this translation.
if precommit {
s := b.ToSlice()
- for i := 0; i < len(s); i += usermem.PageSize {
+ for i := 0; i < len(s); i += hostarch.PageSize {
_ = s[i] // Touch to commit.
}
}
@@ -201,7 +201,7 @@ func (as *addressSpace) MapFile(addr usermem.Addr, f memmap.File, fr memmap.File
length: uintptr(b.Len()),
}, at)
inv = inv || prev
- addr += usermem.Addr(b.Len())
+ addr += hostarch.Addr(b.Len())
}
if inv {
as.invalidate()
@@ -215,12 +215,12 @@ func (as *addressSpace) MapFile(addr usermem.Addr, f memmap.File, fr memmap.File
// +checkescape:hard,stack
//
//go:nosplit
-func (as *addressSpace) unmapLocked(addr usermem.Addr, length uint64) bool {
+func (as *addressSpace) unmapLocked(addr hostarch.Addr, length uint64) bool {
return as.pageTables.Unmap(addr, uintptr(length))
}
// Unmap unmaps the given range by calling pagetables.PageTables.Unmap.
-func (as *addressSpace) Unmap(addr usermem.Addr, length uint64) {
+func (as *addressSpace) Unmap(addr hostarch.Addr, length uint64) {
as.mu.Lock()
defer as.mu.Unlock()
diff --git a/pkg/sentry/platform/kvm/bluepill_fault.go b/pkg/sentry/platform/kvm/bluepill_fault.go
index 37c53fa02..28a613a54 100644
--- a/pkg/sentry/platform/kvm/bluepill_fault.go
+++ b/pkg/sentry/platform/kvm/bluepill_fault.go
@@ -18,7 +18,7 @@ import (
"sync/atomic"
"golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/usermem"
+ "gvisor.dev/gvisor/pkg/hostarch"
)
const (
@@ -47,7 +47,7 @@ func yield() {
//
//go:nosplit
func calculateBluepillFault(physical uintptr, phyRegions []physicalRegion) (virtualStart, physicalStart, length uintptr, ok bool) {
- alignedPhysical := physical &^ uintptr(usermem.PageSize-1)
+ alignedPhysical := physical &^ uintptr(hostarch.PageSize-1)
for _, pr := range phyRegions {
end := pr.physical + pr.length
if physical < pr.physical || physical >= end {
diff --git a/pkg/sentry/platform/kvm/context.go b/pkg/sentry/platform/kvm/context.go
index 706fa53dc..f4d4473a8 100644
--- a/pkg/sentry/platform/kvm/context.go
+++ b/pkg/sentry/platform/kvm/context.go
@@ -18,11 +18,11 @@ import (
"sync/atomic"
pkgcontext "gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/ring0"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/platform"
"gvisor.dev/gvisor/pkg/sentry/platform/interrupt"
- "gvisor.dev/gvisor/pkg/usermem"
)
// context is an implementation of the platform context.
@@ -40,7 +40,7 @@ type context struct {
}
// Switch runs the provided context in the given address space.
-func (c *context) Switch(ctx pkgcontext.Context, mm platform.MemoryManager, ac arch.Context, _ int32) (*arch.SignalInfo, usermem.AccessType, error) {
+func (c *context) Switch(ctx pkgcontext.Context, mm platform.MemoryManager, ac arch.Context, _ int32) (*arch.SignalInfo, hostarch.AccessType, error) {
as := mm.AddressSpace()
localAS := as.(*addressSpace)
@@ -50,7 +50,7 @@ func (c *context) Switch(ctx pkgcontext.Context, mm platform.MemoryManager, ac a
// Enable interrupts (i.e. calls to vCPU.Notify).
if !c.interrupt.Enable(cpu) {
c.machine.Put(cpu) // Already preempted.
- return nil, usermem.NoAccess, platform.ErrContextInterrupt
+ return nil, hostarch.NoAccess, platform.ErrContextInterrupt
}
// Set the active address space.
diff --git a/pkg/sentry/platform/kvm/kvm.go b/pkg/sentry/platform/kvm/kvm.go
index 92c05a9ad..aac0fdffe 100644
--- a/pkg/sentry/platform/kvm/kvm.go
+++ b/pkg/sentry/platform/kvm/kvm.go
@@ -20,11 +20,11 @@ import (
"os"
"golang.org/x/sys/unix"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/ring0"
"gvisor.dev/gvisor/pkg/ring0/pagetables"
"gvisor.dev/gvisor/pkg/sentry/platform"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/usermem"
)
// userMemoryRegion is a region of physical memory.
@@ -146,13 +146,13 @@ func (*KVM) MapUnit() uint64 {
}
// MinUserAddress returns the lowest available address.
-func (*KVM) MinUserAddress() usermem.Addr {
- return usermem.PageSize
+func (*KVM) MinUserAddress() hostarch.Addr {
+ return hostarch.PageSize
}
// MaxUserAddress returns the first address that may not be used.
-func (*KVM) MaxUserAddress() usermem.Addr {
- return usermem.Addr(ring0.MaximumUserAddress)
+func (*KVM) MaxUserAddress() hostarch.Addr {
+ return hostarch.Addr(ring0.MaximumUserAddress)
}
// NewAddressSpace returns a new pagetable root.
diff --git a/pkg/sentry/platform/kvm/machine.go b/pkg/sentry/platform/kvm/machine.go
index 5d586f257..b3d4188a3 100644
--- a/pkg/sentry/platform/kvm/machine.go
+++ b/pkg/sentry/platform/kvm/machine.go
@@ -21,13 +21,13 @@ import (
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/atomicbitops"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/procid"
"gvisor.dev/gvisor/pkg/ring0"
"gvisor.dev/gvisor/pkg/ring0/pagetables"
ktime "gvisor.dev/gvisor/pkg/sentry/time"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/usermem"
)
// machine contains state associated with the VM as a whole.
@@ -227,9 +227,9 @@ func newMachine(vm int) (*machine, error) {
applyPhysicalRegions(func(pr physicalRegion) bool {
// Map everything in the lower half.
m.kernel.PageTables.Map(
- usermem.Addr(pr.virtual),
+ hostarch.Addr(pr.virtual),
pr.length,
- pagetables.MapOpts{AccessType: usermem.AnyAccess},
+ pagetables.MapOpts{AccessType: hostarch.AnyAccess},
pr.physical)
return true // Keep iterating.
diff --git a/pkg/sentry/platform/kvm/machine_amd64.go b/pkg/sentry/platform/kvm/machine_amd64.go
index 3af96c7e5..e8e209249 100644
--- a/pkg/sentry/platform/kvm/machine_amd64.go
+++ b/pkg/sentry/platform/kvm/machine_amd64.go
@@ -24,13 +24,13 @@ import (
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/cpuid"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/ring0"
"gvisor.dev/gvisor/pkg/ring0/pagetables"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/arch/fpu"
"gvisor.dev/gvisor/pkg/sentry/platform"
ktime "gvisor.dev/gvisor/pkg/sentry/time"
- "gvisor.dev/gvisor/pkg/usermem"
)
// initArchState initializes architecture-specific state.
@@ -41,7 +41,7 @@ func (m *machine) initArchState() error {
unix.SYS_IOCTL,
uintptr(m.fd),
_KVM_SET_TSS_ADDR,
- uintptr(reservedMemory-(3*usermem.PageSize))); errno != 0 {
+ uintptr(reservedMemory-(3*hostarch.PageSize))); errno != 0 {
return errno
}
@@ -256,19 +256,19 @@ func (c *vCPU) setSystemTime() error {
// nonCanonical generates a canonical address return.
//
//go:nosplit
-func nonCanonical(addr uint64, signal int32, info *arch.SignalInfo) (usermem.AccessType, error) {
+func nonCanonical(addr uint64, signal int32, info *arch.SignalInfo) (hostarch.AccessType, error) {
*info = arch.SignalInfo{
Signo: signal,
Code: arch.SignalInfoKernel,
}
info.SetAddr(addr) // Include address.
- return usermem.NoAccess, platform.ErrContextSignal
+ return hostarch.NoAccess, platform.ErrContextSignal
}
// fault generates an appropriate fault return.
//
//go:nosplit
-func (c *vCPU) fault(signal int32, info *arch.SignalInfo) (usermem.AccessType, error) {
+func (c *vCPU) fault(signal int32, info *arch.SignalInfo) (hostarch.AccessType, error) {
bluepill(c) // Probably no-op, but may not be.
faultAddr := ring0.ReadCR2()
code, user := c.ErrorCode()
@@ -276,12 +276,12 @@ func (c *vCPU) fault(signal int32, info *arch.SignalInfo) (usermem.AccessType, e
// The last fault serviced by this CPU was not a user
// fault, so we can't reliably trust the faultAddr or
// the code provided here. We need to re-execute.
- return usermem.NoAccess, platform.ErrContextInterrupt
+ return hostarch.NoAccess, platform.ErrContextInterrupt
}
// Reset the pointed SignalInfo.
*info = arch.SignalInfo{Signo: signal}
info.SetAddr(uint64(faultAddr))
- accessType := usermem.AccessType{
+ accessType := hostarch.AccessType{
Read: code&(1<<1) == 0,
Write: code&(1<<1) != 0,
Execute: code&(1<<4) != 0,
@@ -310,14 +310,14 @@ func loadByte(ptr *byte) byte {
//go:nosplit
func prefaultFloatingPointState(data *fpu.State) {
size := len(*data)
- for i := 0; i < size; i += usermem.PageSize {
+ for i := 0; i < size; i += hostarch.PageSize {
loadByte(&(*data)[i])
}
loadByte(&(*data)[size-1])
}
// SwitchToUser unpacks architectural-details.
-func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts, info *arch.SignalInfo) (usermem.AccessType, error) {
+func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts, info *arch.SignalInfo) (hostarch.AccessType, error) {
// Check for canonical addresses.
if regs := switchOpts.Registers; !ring0.IsCanonical(regs.Rip) {
return nonCanonical(regs.Rip, int32(unix.SIGSEGV), info)
@@ -353,7 +353,7 @@ func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts, info *arch.SignalInfo)
switch vector {
case ring0.Syscall, ring0.SyscallInt80:
// Fast path: system call executed.
- return usermem.NoAccess, nil
+ return hostarch.NoAccess, nil
case ring0.PageFault:
return c.fault(int32(unix.SIGSEGV), info)
@@ -364,7 +364,7 @@ func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts, info *arch.SignalInfo)
Code: 1, // TRAP_BRKPT (breakpoint).
}
info.SetAddr(switchOpts.Registers.Rip) // Include address.
- return usermem.AccessType{}, platform.ErrContextSignal
+ return hostarch.AccessType{}, platform.ErrContextSignal
case ring0.GeneralProtectionFault,
ring0.SegmentNotPresent,
@@ -380,9 +380,9 @@ func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts, info *arch.SignalInfo)
// When CPUID faulting is enabled, we will generate a #GP(0) when
// userspace executes a CPUID instruction. This is handled above,
// because we need to be able to map and read user memory.
- return usermem.AccessType{}, platform.ErrContextSignalCPUID
+ return hostarch.AccessType{}, platform.ErrContextSignalCPUID
}
- return usermem.AccessType{}, platform.ErrContextSignal
+ return hostarch.AccessType{}, platform.ErrContextSignal
case ring0.InvalidOpcode:
*info = arch.SignalInfo{
@@ -390,7 +390,7 @@ func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts, info *arch.SignalInfo)
Code: 1, // ILL_ILLOPC (illegal opcode).
}
info.SetAddr(switchOpts.Registers.Rip) // Include address.
- return usermem.AccessType{}, platform.ErrContextSignal
+ return hostarch.AccessType{}, platform.ErrContextSignal
case ring0.DivideByZero:
*info = arch.SignalInfo{
@@ -398,7 +398,7 @@ func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts, info *arch.SignalInfo)
Code: 1, // FPE_INTDIV (divide by zero).
}
info.SetAddr(switchOpts.Registers.Rip) // Include address.
- return usermem.AccessType{}, platform.ErrContextSignal
+ return hostarch.AccessType{}, platform.ErrContextSignal
case ring0.Overflow:
*info = arch.SignalInfo{
@@ -406,7 +406,7 @@ func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts, info *arch.SignalInfo)
Code: 2, // FPE_INTOVF (integer overflow).
}
info.SetAddr(switchOpts.Registers.Rip) // Include address.
- return usermem.AccessType{}, platform.ErrContextSignal
+ return hostarch.AccessType{}, platform.ErrContextSignal
case ring0.X87FloatingPointException,
ring0.SIMDFloatingPointException:
@@ -415,17 +415,17 @@ func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts, info *arch.SignalInfo)
Code: 7, // FPE_FLTINV (invalid operation).
}
info.SetAddr(switchOpts.Registers.Rip) // Include address.
- return usermem.AccessType{}, platform.ErrContextSignal
+ return hostarch.AccessType{}, platform.ErrContextSignal
case ring0.Vector(bounce): // ring0.VirtualizationException
- return usermem.NoAccess, platform.ErrContextInterrupt
+ return hostarch.NoAccess, platform.ErrContextInterrupt
case ring0.AlignmentCheck:
*info = arch.SignalInfo{
Signo: int32(unix.SIGBUS),
Code: 2, // BUS_ADRERR (physical address does not exist).
}
- return usermem.NoAccess, platform.ErrContextSignal
+ return hostarch.NoAccess, platform.ErrContextSignal
case ring0.NMI:
// An NMI is generated only when a fault is not servicable by
@@ -471,9 +471,9 @@ func (m *machine) mapUpperHalf(pageTable *pagetables.PageTables) {
panic("impossible translation")
}
pageTable.Map(
- usermem.Addr(ring0.KernelStartAddress|r.virtual),
+ hostarch.Addr(ring0.KernelStartAddress|r.virtual),
r.length,
- pagetables.MapOpts{AccessType: usermem.Execute},
+ pagetables.MapOpts{AccessType: hostarch.Execute},
physical)
}
})
@@ -484,9 +484,9 @@ func (m *machine) mapUpperHalf(pageTable *pagetables.PageTables) {
panic("impossible translation")
}
pageTable.Map(
- usermem.Addr(ring0.KernelStartAddress|start),
+ hostarch.Addr(ring0.KernelStartAddress|start),
regionLen,
- pagetables.MapOpts{AccessType: usermem.ReadWrite},
+ pagetables.MapOpts{AccessType: hostarch.ReadWrite},
physical)
}
}
diff --git a/pkg/sentry/platform/kvm/machine_arm64.go b/pkg/sentry/platform/kvm/machine_arm64.go
index 2edc9d1b2..03e84d804 100644
--- a/pkg/sentry/platform/kvm/machine_arm64.go
+++ b/pkg/sentry/platform/kvm/machine_arm64.go
@@ -17,12 +17,12 @@
package kvm
import (
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/ring0"
"gvisor.dev/gvisor/pkg/ring0/pagetables"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/arch/fpu"
"gvisor.dev/gvisor/pkg/sentry/platform"
- "gvisor.dev/gvisor/pkg/usermem"
)
type vCPUArchState struct {
@@ -53,9 +53,9 @@ const (
func (m *machine) mapUpperHalf(pageTable *pagetables.PageTables) {
applyPhysicalRegions(func(pr physicalRegion) bool {
pageTable.Map(
- usermem.Addr(ring0.KernelStartAddress|pr.virtual),
+ hostarch.Addr(ring0.KernelStartAddress|pr.virtual),
pr.length,
- pagetables.MapOpts{AccessType: usermem.AnyAccess, Global: true},
+ pagetables.MapOpts{AccessType: hostarch.AnyAccess, Global: true},
pr.physical)
return true // Keep iterating.
@@ -117,13 +117,13 @@ func availableRegionsForSetMem() (phyRegions []physicalRegion) {
// nonCanonical generates a canonical address return.
//
//go:nosplit
-func nonCanonical(addr uint64, signal int32, info *arch.SignalInfo) (usermem.AccessType, error) {
+func nonCanonical(addr uint64, signal int32, info *arch.SignalInfo) (hostarch.AccessType, error) {
*info = arch.SignalInfo{
Signo: signal,
Code: arch.SignalInfoKernel,
}
info.SetAddr(addr) // Include address.
- return usermem.NoAccess, platform.ErrContextSignal
+ return hostarch.NoAccess, platform.ErrContextSignal
}
// isInstructionAbort returns true if it is an instruction abort.
@@ -148,7 +148,7 @@ func isWriteFault(code uint64) bool {
// fault generates an appropriate fault return.
//
//go:nosplit
-func (c *vCPU) fault(signal int32, info *arch.SignalInfo) (usermem.AccessType, error) {
+func (c *vCPU) fault(signal int32, info *arch.SignalInfo) (hostarch.AccessType, error) {
bluepill(c) // Probably no-op, but may not be.
faultAddr := c.GetFaultAddr()
code, user := c.ErrorCode()
@@ -157,7 +157,7 @@ func (c *vCPU) fault(signal int32, info *arch.SignalInfo) (usermem.AccessType, e
// The last fault serviced by this CPU was not a user
// fault, so we can't reliably trust the faultAddr or
// the code provided here. We need to re-execute.
- return usermem.NoAccess, platform.ErrContextInterrupt
+ return hostarch.NoAccess, platform.ErrContextInterrupt
}
// Reset the pointed SignalInfo.
@@ -174,7 +174,7 @@ func (c *vCPU) fault(signal int32, info *arch.SignalInfo) (usermem.AccessType, e
info.Code = 2
}
- accessType := usermem.AccessType{
+ accessType := hostarch.AccessType{
Read: !isWriteFault(uint64(code)),
Write: isWriteFault(uint64(code)),
Execute: isInstructionAbort(uint64(code)),
diff --git a/pkg/sentry/platform/kvm/machine_arm64_unsafe.go b/pkg/sentry/platform/kvm/machine_arm64_unsafe.go
index e7d5f3193..634e55ec0 100644
--- a/pkg/sentry/platform/kvm/machine_arm64_unsafe.go
+++ b/pkg/sentry/platform/kvm/machine_arm64_unsafe.go
@@ -23,12 +23,12 @@ import (
"unsafe"
"golang.org/x/sys/unix"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/ring0"
"gvisor.dev/gvisor/pkg/ring0/pagetables"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/arch/fpu"
"gvisor.dev/gvisor/pkg/sentry/platform"
- "gvisor.dev/gvisor/pkg/usermem"
)
type kvmVcpuInit struct {
@@ -209,7 +209,7 @@ func (c *vCPU) getOneRegister(reg *kvmOneReg) error {
}
// SwitchToUser unpacks architectural-details.
-func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts, info *arch.SignalInfo) (usermem.AccessType, error) {
+func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts, info *arch.SignalInfo) (hostarch.AccessType, error) {
// Check for canonical addresses.
if regs := switchOpts.Registers; !ring0.IsCanonical(regs.Pc) {
return nonCanonical(regs.Pc, int32(unix.SIGSEGV), info)
@@ -246,13 +246,13 @@ func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts, info *arch.SignalInfo)
switch vector {
case ring0.Syscall:
// Fast path: system call executed.
- return usermem.NoAccess, nil
+ return hostarch.NoAccess, nil
case ring0.PageFault:
return c.fault(int32(unix.SIGSEGV), info)
case ring0.El0ErrNMI:
return c.fault(int32(unix.SIGBUS), info)
case ring0.Vector(bounce): // ring0.VirtualizationException.
- return usermem.NoAccess, platform.ErrContextInterrupt
+ return hostarch.NoAccess, platform.ErrContextInterrupt
case ring0.El0SyncUndef:
return c.fault(int32(unix.SIGILL), info)
case ring0.El0SyncDbg:
@@ -261,16 +261,16 @@ func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts, info *arch.SignalInfo)
Code: 1, // TRAP_BRKPT (breakpoint).
}
info.SetAddr(switchOpts.Registers.Pc) // Include address.
- return usermem.AccessType{}, platform.ErrContextSignal
+ return hostarch.AccessType{}, platform.ErrContextSignal
case ring0.El0SyncSpPc:
*info = arch.SignalInfo{
Signo: int32(unix.SIGBUS),
Code: 2, // BUS_ADRERR (physical address does not exist).
}
- return usermem.NoAccess, platform.ErrContextSignal
+ return hostarch.NoAccess, platform.ErrContextSignal
case ring0.El0SyncSys,
ring0.El0SyncWfx:
- return usermem.NoAccess, nil // skip for now.
+ return hostarch.NoAccess, nil // skip for now.
default:
panic(fmt.Sprintf("unexpected vector: 0x%x", vector))
}
diff --git a/pkg/sentry/platform/kvm/physical_map.go b/pkg/sentry/platform/kvm/physical_map.go
index 7376d8b8d..d812e6c26 100644
--- a/pkg/sentry/platform/kvm/physical_map.go
+++ b/pkg/sentry/platform/kvm/physical_map.go
@@ -19,9 +19,9 @@ import (
"sort"
"golang.org/x/sys/unix"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/ring0"
- "gvisor.dev/gvisor/pkg/usermem"
)
type region struct {
@@ -81,7 +81,7 @@ func fillAddressSpace() (excludedRegions []region) {
// faultBlockSize, potentially causing up to faultBlockSize bytes in
// internal fragmentation for each physical region. So we need to
// account for this properly during allocation.
- requiredAddr, ok := usermem.Addr(vSize - pSize + faultBlockSize).RoundUp()
+ requiredAddr, ok := hostarch.Addr(vSize - pSize + faultBlockSize).RoundUp()
if !ok {
panic(fmt.Sprintf(
"overflow for vSize (%x) - pSize (%x) + faultBlockSize (%x)",
@@ -99,7 +99,7 @@ func fillAddressSpace() (excludedRegions []region) {
0, 0)
if errno != 0 {
// Attempt half the size; overflow not possible.
- currentAddr, _ := usermem.Addr(current >> 1).RoundUp()
+ currentAddr, _ := hostarch.Addr(current >> 1).RoundUp()
current = uintptr(currentAddr)
continue
}
@@ -134,8 +134,8 @@ func computePhysicalRegions(excludedRegions []region) (physicalRegions []physica
return
}
if virtual == 0 {
- virtual += usermem.PageSize
- length -= usermem.PageSize
+ virtual += hostarch.PageSize
+ length -= hostarch.PageSize
}
if end := virtual + length; end > ring0.MaximumUserAddress {
length -= (end - ring0.MaximumUserAddress)
diff --git a/pkg/sentry/platform/kvm/virtual_map.go b/pkg/sentry/platform/kvm/virtual_map.go
index 4dcdbf8a7..01d9eb39d 100644
--- a/pkg/sentry/platform/kvm/virtual_map.go
+++ b/pkg/sentry/platform/kvm/virtual_map.go
@@ -22,12 +22,12 @@ import (
"regexp"
"strconv"
- "gvisor.dev/gvisor/pkg/usermem"
+ "gvisor.dev/gvisor/pkg/hostarch"
)
type virtualRegion struct {
region
- accessType usermem.AccessType
+ accessType hostarch.AccessType
shared bool
offset uintptr
filename string
@@ -92,7 +92,7 @@ func applyVirtualRegions(fn func(vr virtualRegion)) error {
virtual: uintptr(start),
length: uintptr(end - start),
},
- accessType: usermem.AccessType{
+ accessType: hostarch.AccessType{
Read: read,
Write: write,
Execute: execute,
diff --git a/pkg/sentry/platform/mmap_min_addr.go b/pkg/sentry/platform/mmap_min_addr.go
index 091c2e365..7335bd802 100644
--- a/pkg/sentry/platform/mmap_min_addr.go
+++ b/pkg/sentry/platform/mmap_min_addr.go
@@ -20,7 +20,7 @@ import (
"strconv"
"strings"
- "gvisor.dev/gvisor/pkg/usermem"
+ "gvisor.dev/gvisor/pkg/hostarch"
)
// systemMMapMinAddrSource is the source file.
@@ -30,8 +30,8 @@ const systemMMapMinAddrSource = "/proc/sys/vm/mmap_min_addr"
var systemMMapMinAddr uint64
// SystemMMapMinAddr returns the minimum system address.
-func SystemMMapMinAddr() usermem.Addr {
- return usermem.Addr(systemMMapMinAddr)
+func SystemMMapMinAddr() hostarch.Addr {
+ return hostarch.Addr(systemMMapMinAddr)
}
// MMapMinAddr is a size zero struct that implements MinUserAddress based on
@@ -41,7 +41,7 @@ type MMapMinAddr struct {
}
// MinUserAddress implements platform.MinUserAddresss.
-func (*MMapMinAddr) MinUserAddress() usermem.Addr {
+func (*MMapMinAddr) MinUserAddress() hostarch.Addr {
return SystemMMapMinAddr()
}
diff --git a/pkg/sentry/platform/platform.go b/pkg/sentry/platform/platform.go
index dcfe839a7..ef7814a6f 100644
--- a/pkg/sentry/platform/platform.go
+++ b/pkg/sentry/platform/platform.go
@@ -23,6 +23,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/seccomp"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/hostmm"
@@ -62,16 +63,16 @@ type Platform interface {
// for AddressSpace.MapFile. As a special case, a MapUnit of 0 indicates
// that the cost of AddressSpace.MapFile is effectively independent of the
// number of pages mapped. If MapUnit is non-zero, it must be a power-of-2
- // multiple of usermem.PageSize.
+ // multiple of hostarch.PageSize.
MapUnit() uint64
// MinUserAddress returns the minimum mappable address on this
// platform.
- MinUserAddress() usermem.Addr
+ MinUserAddress() hostarch.Addr
// MaxUserAddress returns the maximum mappable address on this
// platform.
- MaxUserAddress() usermem.Addr
+ MaxUserAddress() hostarch.Addr
// NewAddressSpace returns a new memory context for this platform.
//
@@ -172,7 +173,7 @@ type MemoryManager interface {
//usermem.IO provides access to the contents of a virtual memory space.
usermem.IO
// MMap establishes a memory mapping.
- MMap(ctx context.Context, opts memmap.MMapOpts) (usermem.Addr, error)
+ MMap(ctx context.Context, opts memmap.MMapOpts) (hostarch.Addr, error)
// AddressSpace returns the AddressSpace bound to mm.
AddressSpace() AddressSpace
}
@@ -195,7 +196,7 @@ type Context interface {
//
// - ErrContextSignal: The Context was interrupted by a signal. The
// returned *arch.SignalInfo contains information about the signal. If
- // arch.SignalInfo.Signo == SIGSEGV, the returned usermem.AccessType
+ // arch.SignalInfo.Signo == SIGSEGV, the returned hostarch.AccessType
// contains the access type of the triggering fault. The caller owns
// the returned SignalInfo.
//
@@ -206,7 +207,7 @@ type Context interface {
// concurrent call to Switch().
//
// - ErrContextCPUPreempted: See the definition of that error for details.
- Switch(ctx context.Context, mm MemoryManager, ac arch.Context, cpu int32) (*arch.SignalInfo, usermem.AccessType, error)
+ Switch(ctx context.Context, mm MemoryManager, ac arch.Context, cpu int32) (*arch.SignalInfo, hostarch.AccessType, error)
// PullFullState() pulls a full state of the application thread.
//
@@ -302,14 +303,14 @@ type AddressSpace interface {
// * at.Any() == true.
// * At least one reference must be held on all pages in fr, and must
// continue to be held as long as pages are mapped.
- MapFile(addr usermem.Addr, f memmap.File, fr memmap.FileRange, at usermem.AccessType, precommit bool) error
+ MapFile(addr hostarch.Addr, f memmap.File, fr memmap.FileRange, at hostarch.AccessType, precommit bool) error
// Unmap unmaps the given range.
//
// Preconditions:
// * addr is page-aligned.
// * length > 0.
- Unmap(addr usermem.Addr, length uint64)
+ Unmap(addr hostarch.Addr, length uint64)
// Release releases this address space. After releasing, a new AddressSpace
// must be acquired via platform.NewAddressSpace().
@@ -337,67 +338,67 @@ type AddressSpaceIO interface {
// CopyOut copies len(src) bytes from src to the memory mapped at addr. It
// returns the number of bytes copied. If the number of bytes copied is <
// len(src), it returns a non-nil error explaining why.
- CopyOut(addr usermem.Addr, src []byte) (int, error)
+ CopyOut(addr hostarch.Addr, src []byte) (int, error)
// CopyIn copies len(dst) bytes from the memory mapped at addr to dst.
// It returns the number of bytes copied. If the number of bytes copied is
// < len(dst), it returns a non-nil error explaining why.
- CopyIn(addr usermem.Addr, dst []byte) (int, error)
+ CopyIn(addr hostarch.Addr, dst []byte) (int, error)
// ZeroOut sets toZero bytes to 0, starting at addr. It returns the number
// of bytes zeroed. If the number of bytes zeroed is < toZero, it returns a
// non-nil error explaining why.
- ZeroOut(addr usermem.Addr, toZero uintptr) (uintptr, error)
+ ZeroOut(addr hostarch.Addr, toZero uintptr) (uintptr, error)
// SwapUint32 atomically sets the uint32 value at addr to new and returns
// the previous value.
//
// Preconditions: addr must be aligned to a 4-byte boundary.
- SwapUint32(addr usermem.Addr, new uint32) (uint32, error)
+ SwapUint32(addr hostarch.Addr, new uint32) (uint32, error)
// CompareAndSwapUint32 atomically compares the uint32 value at addr to
// old; if they are equal, the value in memory is replaced by new. In
// either case, the previous value stored in memory is returned.
//
// Preconditions: addr must be aligned to a 4-byte boundary.
- CompareAndSwapUint32(addr usermem.Addr, old, new uint32) (uint32, error)
+ CompareAndSwapUint32(addr hostarch.Addr, old, new uint32) (uint32, error)
// LoadUint32 atomically loads the uint32 value at addr and returns it.
//
// Preconditions: addr must be aligned to a 4-byte boundary.
- LoadUint32(addr usermem.Addr) (uint32, error)
+ LoadUint32(addr hostarch.Addr) (uint32, error)
}
// NoAddressSpaceIO implements AddressSpaceIO methods by panicking.
type NoAddressSpaceIO struct{}
// CopyOut implements AddressSpaceIO.CopyOut.
-func (NoAddressSpaceIO) CopyOut(addr usermem.Addr, src []byte) (int, error) {
+func (NoAddressSpaceIO) CopyOut(addr hostarch.Addr, src []byte) (int, error) {
panic("This platform does not support AddressSpaceIO")
}
// CopyIn implements AddressSpaceIO.CopyIn.
-func (NoAddressSpaceIO) CopyIn(addr usermem.Addr, dst []byte) (int, error) {
+func (NoAddressSpaceIO) CopyIn(addr hostarch.Addr, dst []byte) (int, error) {
panic("This platform does not support AddressSpaceIO")
}
// ZeroOut implements AddressSpaceIO.ZeroOut.
-func (NoAddressSpaceIO) ZeroOut(addr usermem.Addr, toZero uintptr) (uintptr, error) {
+func (NoAddressSpaceIO) ZeroOut(addr hostarch.Addr, toZero uintptr) (uintptr, error) {
panic("This platform does not support AddressSpaceIO")
}
// SwapUint32 implements AddressSpaceIO.SwapUint32.
-func (NoAddressSpaceIO) SwapUint32(addr usermem.Addr, new uint32) (uint32, error) {
+func (NoAddressSpaceIO) SwapUint32(addr hostarch.Addr, new uint32) (uint32, error) {
panic("This platform does not support AddressSpaceIO")
}
// CompareAndSwapUint32 implements AddressSpaceIO.CompareAndSwapUint32.
-func (NoAddressSpaceIO) CompareAndSwapUint32(addr usermem.Addr, old, new uint32) (uint32, error) {
+func (NoAddressSpaceIO) CompareAndSwapUint32(addr hostarch.Addr, old, new uint32) (uint32, error) {
panic("This platform does not support AddressSpaceIO")
}
// LoadUint32 implements AddressSpaceIO.LoadUint32.
-func (NoAddressSpaceIO) LoadUint32(addr usermem.Addr) (uint32, error) {
+func (NoAddressSpaceIO) LoadUint32(addr hostarch.Addr) (uint32, error) {
panic("This platform does not support AddressSpaceIO")
}
@@ -406,7 +407,7 @@ func (NoAddressSpaceIO) LoadUint32(addr usermem.Addr) (uint32, error) {
// permissions.
type SegmentationFault struct {
// Addr is the address at which the fault occurred.
- Addr usermem.Addr
+ Addr hostarch.Addr
}
// Error implements error.Error.
diff --git a/pkg/sentry/platform/ptrace/ptrace.go b/pkg/sentry/platform/ptrace/ptrace.go
index 571bfcc2e..828458ce2 100644
--- a/pkg/sentry/platform/ptrace/ptrace.go
+++ b/pkg/sentry/platform/ptrace/ptrace.go
@@ -49,11 +49,11 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
pkgcontext "gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/platform"
"gvisor.dev/gvisor/pkg/sentry/platform/interrupt"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/usermem"
)
var (
@@ -88,28 +88,28 @@ type context struct {
// lastFaultAddr is the last faulting address; this is only meaningful if
// lastFaultSP is non-nil.
- lastFaultAddr usermem.Addr
+ lastFaultAddr hostarch.Addr
// lastFaultIP is the address of the last faulting instruction;
// this is also only meaningful if lastFaultSP is non-nil.
- lastFaultIP usermem.Addr
+ lastFaultIP hostarch.Addr
}
// Switch runs the provided context in the given address space.
-func (c *context) Switch(ctx pkgcontext.Context, mm platform.MemoryManager, ac arch.Context, cpu int32) (*arch.SignalInfo, usermem.AccessType, error) {
+func (c *context) Switch(ctx pkgcontext.Context, mm platform.MemoryManager, ac arch.Context, cpu int32) (*arch.SignalInfo, hostarch.AccessType, error) {
as := mm.AddressSpace()
s := as.(*subprocess)
isSyscall := s.switchToApp(c, ac)
var (
faultSP *subprocess
- faultAddr usermem.Addr
- faultIP usermem.Addr
+ faultAddr hostarch.Addr
+ faultIP hostarch.Addr
)
if !isSyscall && linux.Signal(c.signalInfo.Signo) == linux.SIGSEGV {
faultSP = s
- faultAddr = usermem.Addr(c.signalInfo.Addr())
- faultIP = usermem.Addr(ac.IP())
+ faultAddr = hostarch.Addr(c.signalInfo.Addr())
+ faultIP = hostarch.Addr(ac.IP())
}
// Update the context to reflect the outcome of this context switch.
@@ -140,14 +140,14 @@ func (c *context) Switch(ctx pkgcontext.Context, mm platform.MemoryManager, ac a
}
if isSyscall {
- return nil, usermem.NoAccess, nil
+ return nil, hostarch.NoAccess, nil
}
si := c.signalInfo
if faultSP == nil {
// Non-fault signal.
- return &si, usermem.NoAccess, platform.ErrContextSignal
+ return &si, hostarch.NoAccess, platform.ErrContextSignal
}
// Got a page fault. Ideally, we'd get real fault type here, but ptrace
@@ -157,7 +157,7 @@ func (c *context) Switch(ctx pkgcontext.Context, mm platform.MemoryManager, ac a
// pointer.
//
// It was a write fault if the fault is immediately repeated.
- at := usermem.Read
+ at := hostarch.Read
if faultAddr == faultIP {
at.Execute = true
}
@@ -235,8 +235,8 @@ func (*PTrace) MapUnit() uint64 {
// MaxUserAddress returns the first address that may not be used by user
// applications.
-func (*PTrace) MaxUserAddress() usermem.Addr {
- return usermem.Addr(stubStart)
+func (*PTrace) MaxUserAddress() hostarch.Addr {
+ return hostarch.Addr(stubStart)
}
// NewAddressSpace returns a new subprocess.
diff --git a/pkg/sentry/platform/ptrace/ptrace_unsafe.go b/pkg/sentry/platform/ptrace/ptrace_unsafe.go
index 01e73b019..facb96011 100644
--- a/pkg/sentry/platform/ptrace/ptrace_unsafe.go
+++ b/pkg/sentry/platform/ptrace/ptrace_unsafe.go
@@ -19,9 +19,9 @@ import (
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/arch/fpu"
- "gvisor.dev/gvisor/pkg/usermem"
)
// getRegs gets the general purpose register set.
@@ -122,7 +122,7 @@ func (t *thread) getSignalInfo(si *arch.SignalInfo) error {
//
// Precondition: the OS thread must be locked and own t.
func (t *thread) clone() (*thread, error) {
- r, ok := usermem.Addr(stackPointer(&t.initRegs)).RoundUp()
+ r, ok := hostarch.Addr(stackPointer(&t.initRegs)).RoundUp()
if !ok {
return nil, unix.EINVAL
}
diff --git a/pkg/sentry/platform/ptrace/stub_unsafe.go b/pkg/sentry/platform/ptrace/stub_unsafe.go
index 780227248..5c9b7784f 100644
--- a/pkg/sentry/platform/ptrace/stub_unsafe.go
+++ b/pkg/sentry/platform/ptrace/stub_unsafe.go
@@ -19,8 +19,8 @@ import (
"unsafe"
"golang.org/x/sys/unix"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/safecopy"
- "gvisor.dev/gvisor/pkg/usermem"
)
// stub is defined in arch-specific assembly.
@@ -45,8 +45,8 @@ func stubInit() {
stubLen := int(safecopy.FindEndAddress(stubBegin) - stubBegin)
stubSlice := unsafeSlice(stubBegin, stubLen)
mapLen := uintptr(stubLen)
- if offset := mapLen % usermem.PageSize; offset != 0 {
- mapLen += usermem.PageSize - offset
+ if offset := mapLen % hostarch.PageSize; offset != 0 {
+ mapLen += hostarch.PageSize - offset
}
for stubStart > 0 {
@@ -70,7 +70,7 @@ func stubInit() {
}
// Attempt to begin at a lower address.
- stubStart -= uintptr(usermem.PageSize)
+ stubStart -= uintptr(hostarch.PageSize)
continue
}
diff --git a/pkg/sentry/platform/ptrace/subprocess.go b/pkg/sentry/platform/ptrace/subprocess.go
index d2284487a..9c73a725a 100644
--- a/pkg/sentry/platform/ptrace/subprocess.go
+++ b/pkg/sentry/platform/ptrace/subprocess.go
@@ -20,13 +20,13 @@ import (
"runtime"
"golang.org/x/sys/unix"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/procid"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sentry/platform"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/usermem"
)
// Linux kernel errnos which "should never be seen by user programs", but will
@@ -240,7 +240,7 @@ func newSubprocess(create func() (*thread, error)) (*subprocess, error) {
func (s *subprocess) unmap() {
s.Unmap(0, uint64(stubStart))
if maximumUserAddress != stubEnd {
- s.Unmap(usermem.Addr(stubEnd), uint64(maximumUserAddress-stubEnd))
+ s.Unmap(hostarch.Addr(stubEnd), uint64(maximumUserAddress-stubEnd))
}
}
@@ -627,7 +627,7 @@ func (s *subprocess) syscall(sysno uintptr, args ...arch.SyscallArgument) (uintp
}
// MapFile implements platform.AddressSpace.MapFile.
-func (s *subprocess) MapFile(addr usermem.Addr, f memmap.File, fr memmap.FileRange, at usermem.AccessType, precommit bool) error {
+func (s *subprocess) MapFile(addr hostarch.Addr, f memmap.File, fr memmap.FileRange, at hostarch.AccessType, precommit bool) error {
var flags int
if precommit {
flags |= unix.MAP_POPULATE
@@ -644,7 +644,7 @@ func (s *subprocess) MapFile(addr usermem.Addr, f memmap.File, fr memmap.FileRan
}
// Unmap implements platform.AddressSpace.Unmap.
-func (s *subprocess) Unmap(addr usermem.Addr, length uint64) {
+func (s *subprocess) Unmap(addr hostarch.Addr, length uint64) {
ar, ok := addr.ToRange(length)
if !ok {
panic(fmt.Sprintf("addr %#x + length %#x overflows", addr, length))
diff --git a/pkg/sentry/socket/control/control.go b/pkg/sentry/socket/control/control.go
index 65b556489..45a05cd63 100644
--- a/pkg/sentry/socket/control/control.go
+++ b/pkg/sentry/socket/control/control.go
@@ -20,13 +20,13 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/binary"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/socket"
"gvisor.dev/gvisor/pkg/sentry/socket/unix/transport"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
)
const maxInt = int(^uint(0) >> 1)
@@ -181,12 +181,12 @@ func (c *scmCredentials) Equals(oc transport.CredentialsControlMessage) bool {
}
func putUint64(buf []byte, n uint64) []byte {
- usermem.ByteOrder.PutUint64(buf[len(buf):len(buf)+8], n)
+ hostarch.ByteOrder.PutUint64(buf[len(buf):len(buf)+8], n)
return buf[:len(buf)+8]
}
func putUint32(buf []byte, n uint32) []byte {
- usermem.ByteOrder.PutUint32(buf[len(buf):len(buf)+4], n)
+ hostarch.ByteOrder.PutUint32(buf[len(buf):len(buf)+4], n)
return buf[:len(buf)+4]
}
@@ -242,7 +242,7 @@ func putCmsgStruct(buf []byte, msgLevel, msgType uint32, align uint, data interf
hdrBuf := buf
- buf = binary.Marshal(buf, usermem.ByteOrder, data)
+ buf = binary.Marshal(buf, hostarch.ByteOrder, data)
// If the control message data brought us over capacity, omit it.
if cap(buf) != cap(ob) {
@@ -475,7 +475,7 @@ func Parse(t *kernel.Task, socketOrEndpoint interface{}, buf []byte, width uint)
}
var h linux.ControlMessageHeader
- binary.Unmarshal(buf[i:i+linux.SizeOfControlMessageHeader], usermem.ByteOrder, &h)
+ binary.Unmarshal(buf[i:i+linux.SizeOfControlMessageHeader], hostarch.ByteOrder, &h)
if h.Length < uint64(linux.SizeOfControlMessageHeader) {
return socket.ControlMessages{}, syserror.EINVAL
@@ -499,7 +499,7 @@ func Parse(t *kernel.Task, socketOrEndpoint interface{}, buf []byte, width uint)
}
for j := i; j < i+rightsSize; j += linux.SizeOfControlMessageRight {
- fds = append(fds, int32(usermem.ByteOrder.Uint32(buf[j:j+linux.SizeOfControlMessageRight])))
+ fds = append(fds, int32(hostarch.ByteOrder.Uint32(buf[j:j+linux.SizeOfControlMessageRight])))
}
i += binary.AlignUp(length, width)
@@ -510,7 +510,7 @@ func Parse(t *kernel.Task, socketOrEndpoint interface{}, buf []byte, width uint)
}
var creds linux.ControlMessageCredentials
- binary.Unmarshal(buf[i:i+linux.SizeOfControlMessageCredentials], usermem.ByteOrder, &creds)
+ binary.Unmarshal(buf[i:i+linux.SizeOfControlMessageCredentials], hostarch.ByteOrder, &creds)
scmCreds, err := NewSCMCredentials(t, creds)
if err != nil {
return socket.ControlMessages{}, err
@@ -523,7 +523,7 @@ func Parse(t *kernel.Task, socketOrEndpoint interface{}, buf []byte, width uint)
return socket.ControlMessages{}, syserror.EINVAL
}
var ts linux.Timeval
- binary.Unmarshal(buf[i:i+linux.SizeOfTimeval], usermem.ByteOrder, &ts)
+ binary.Unmarshal(buf[i:i+linux.SizeOfTimeval], hostarch.ByteOrder, &ts)
cmsgs.IP.Timestamp = ts.ToNsecCapped()
cmsgs.IP.HasTimestamp = true
i += binary.AlignUp(length, width)
@@ -539,7 +539,7 @@ func Parse(t *kernel.Task, socketOrEndpoint interface{}, buf []byte, width uint)
return socket.ControlMessages{}, syserror.EINVAL
}
cmsgs.IP.HasTOS = true
- binary.Unmarshal(buf[i:i+linux.SizeOfControlMessageTOS], usermem.ByteOrder, &cmsgs.IP.TOS)
+ binary.Unmarshal(buf[i:i+linux.SizeOfControlMessageTOS], hostarch.ByteOrder, &cmsgs.IP.TOS)
i += binary.AlignUp(length, width)
case linux.IP_PKTINFO:
@@ -549,7 +549,7 @@ func Parse(t *kernel.Task, socketOrEndpoint interface{}, buf []byte, width uint)
cmsgs.IP.HasIPPacketInfo = true
var packetInfo linux.ControlMessageIPPacketInfo
- binary.Unmarshal(buf[i:i+linux.SizeOfControlMessageIPPacketInfo], usermem.ByteOrder, &packetInfo)
+ binary.Unmarshal(buf[i:i+linux.SizeOfControlMessageIPPacketInfo], hostarch.ByteOrder, &packetInfo)
cmsgs.IP.PacketInfo = packetInfo
i += binary.AlignUp(length, width)
@@ -559,7 +559,7 @@ func Parse(t *kernel.Task, socketOrEndpoint interface{}, buf []byte, width uint)
if length < addr.SizeBytes() {
return socket.ControlMessages{}, syserror.EINVAL
}
- binary.Unmarshal(buf[i:i+addr.SizeBytes()], usermem.ByteOrder, &addr)
+ binary.Unmarshal(buf[i:i+addr.SizeBytes()], hostarch.ByteOrder, &addr)
cmsgs.IP.OriginalDstAddress = &addr
i += binary.AlignUp(length, width)
@@ -583,7 +583,7 @@ func Parse(t *kernel.Task, socketOrEndpoint interface{}, buf []byte, width uint)
return socket.ControlMessages{}, syserror.EINVAL
}
cmsgs.IP.HasTClass = true
- binary.Unmarshal(buf[i:i+linux.SizeOfControlMessageTClass], usermem.ByteOrder, &cmsgs.IP.TClass)
+ binary.Unmarshal(buf[i:i+linux.SizeOfControlMessageTClass], hostarch.ByteOrder, &cmsgs.IP.TClass)
i += binary.AlignUp(length, width)
case linux.IPV6_RECVORIGDSTADDR:
@@ -591,7 +591,7 @@ func Parse(t *kernel.Task, socketOrEndpoint interface{}, buf []byte, width uint)
if length < addr.SizeBytes() {
return socket.ControlMessages{}, syserror.EINVAL
}
- binary.Unmarshal(buf[i:i+addr.SizeBytes()], usermem.ByteOrder, &addr)
+ binary.Unmarshal(buf[i:i+addr.SizeBytes()], hostarch.ByteOrder, &addr)
cmsgs.IP.OriginalDstAddress = &addr
i += binary.AlignUp(length, width)
diff --git a/pkg/sentry/socket/hostinet/socket.go b/pkg/sentry/socket/hostinet/socket.go
index 2d9dbbdba..a784e23b5 100644
--- a/pkg/sentry/socket/hostinet/socket.go
+++ b/pkg/sentry/socket/hostinet/socket.go
@@ -22,6 +22,7 @@ import (
"gvisor.dev/gvisor/pkg/binary"
"gvisor.dev/gvisor/pkg/context"
"gvisor.dev/gvisor/pkg/fdnotifier"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/marshal"
"gvisor.dev/gvisor/pkg/marshal/primitive"
@@ -321,7 +322,7 @@ func (s *socketOpsCommon) Shutdown(t *kernel.Task, how int) *syserr.Error {
}
// GetSockOpt implements socket.Socket.GetSockOpt.
-func (s *socketOpsCommon) GetSockOpt(t *kernel.Task, level int, name int, outPtr usermem.Addr, outLen int) (marshal.Marshallable, *syserr.Error) {
+func (s *socketOpsCommon) GetSockOpt(t *kernel.Task, level int, name int, outPtr hostarch.Addr, outLen int) (marshal.Marshallable, *syserr.Error) {
if outLen < 0 {
return nil, syserr.ErrInvalidArgument
}
@@ -527,24 +528,24 @@ func parseUnixControlMessages(unixControlMessages []unix.SocketControlMessage) s
switch unixCmsg.Header.Type {
case linux.SO_TIMESTAMP:
controlMessages.IP.HasTimestamp = true
- binary.Unmarshal(unixCmsg.Data[:linux.SizeOfTimeval], usermem.ByteOrder, &controlMessages.IP.Timestamp)
+ binary.Unmarshal(unixCmsg.Data[:linux.SizeOfTimeval], hostarch.ByteOrder, &controlMessages.IP.Timestamp)
}
case linux.SOL_IP:
switch unixCmsg.Header.Type {
case linux.IP_TOS:
controlMessages.IP.HasTOS = true
- binary.Unmarshal(unixCmsg.Data[:linux.SizeOfControlMessageTOS], usermem.ByteOrder, &controlMessages.IP.TOS)
+ binary.Unmarshal(unixCmsg.Data[:linux.SizeOfControlMessageTOS], hostarch.ByteOrder, &controlMessages.IP.TOS)
case linux.IP_PKTINFO:
controlMessages.IP.HasIPPacketInfo = true
var packetInfo linux.ControlMessageIPPacketInfo
- binary.Unmarshal(unixCmsg.Data[:linux.SizeOfControlMessageIPPacketInfo], usermem.ByteOrder, &packetInfo)
+ binary.Unmarshal(unixCmsg.Data[:linux.SizeOfControlMessageIPPacketInfo], hostarch.ByteOrder, &packetInfo)
controlMessages.IP.PacketInfo = packetInfo
case linux.IP_RECVORIGDSTADDR:
var addr linux.SockAddrInet
- binary.Unmarshal(unixCmsg.Data[:addr.SizeBytes()], usermem.ByteOrder, &addr)
+ binary.Unmarshal(unixCmsg.Data[:addr.SizeBytes()], hostarch.ByteOrder, &addr)
controlMessages.IP.OriginalDstAddress = &addr
case unix.IP_RECVERR:
@@ -557,11 +558,11 @@ func parseUnixControlMessages(unixControlMessages []unix.SocketControlMessage) s
switch unixCmsg.Header.Type {
case linux.IPV6_TCLASS:
controlMessages.IP.HasTClass = true
- binary.Unmarshal(unixCmsg.Data[:linux.SizeOfControlMessageTClass], usermem.ByteOrder, &controlMessages.IP.TClass)
+ binary.Unmarshal(unixCmsg.Data[:linux.SizeOfControlMessageTClass], hostarch.ByteOrder, &controlMessages.IP.TClass)
case linux.IPV6_RECVORIGDSTADDR:
var addr linux.SockAddrInet6
- binary.Unmarshal(unixCmsg.Data[:addr.SizeBytes()], usermem.ByteOrder, &addr)
+ binary.Unmarshal(unixCmsg.Data[:addr.SizeBytes()], hostarch.ByteOrder, &addr)
controlMessages.IP.OriginalDstAddress = &addr
case unix.IPV6_RECVERR:
@@ -574,7 +575,7 @@ func parseUnixControlMessages(unixControlMessages []unix.SocketControlMessage) s
switch unixCmsg.Header.Type {
case linux.TCP_INQ:
controlMessages.IP.HasInq = true
- binary.Unmarshal(unixCmsg.Data[:linux.SizeOfControlMessageInq], usermem.ByteOrder, &controlMessages.IP.Inq)
+ binary.Unmarshal(unixCmsg.Data[:linux.SizeOfControlMessageInq], hostarch.ByteOrder, &controlMessages.IP.Inq)
}
}
}
@@ -688,7 +689,7 @@ func (s *socketOpsCommon) State() uint32 {
return 0
}
- binary.Unmarshal(buf, usermem.ByteOrder, &info)
+ binary.Unmarshal(buf, hostarch.ByteOrder, &info)
return uint32(info.State)
}
diff --git a/pkg/sentry/socket/hostinet/socket_unsafe.go b/pkg/sentry/socket/hostinet/socket_unsafe.go
index 2890e640d..d3be2d825 100644
--- a/pkg/sentry/socket/hostinet/socket_unsafe.go
+++ b/pkg/sentry/socket/hostinet/socket_unsafe.go
@@ -20,6 +20,7 @@ import (
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/socket"
@@ -61,7 +62,7 @@ func ioctl(ctx context.Context, fd int, io usermem.IO, args arch.SyscallArgument
return 0, translateIOSyscallError(errno)
}
var buf [4]byte
- usermem.ByteOrder.PutUint32(buf[:], uint32(val))
+ hostarch.ByteOrder.PutUint32(buf[:], uint32(val))
_, err := io.CopyOut(ctx, args[2].Pointer(), buf[:], usermem.IOOpts{
AddressSpaceActive: true,
})
diff --git a/pkg/sentry/socket/hostinet/stack.go b/pkg/sentry/socket/hostinet/stack.go
index 5bcf92e14..26e8ae17a 100644
--- a/pkg/sentry/socket/hostinet/stack.go
+++ b/pkg/sentry/socket/hostinet/stack.go
@@ -22,11 +22,13 @@ import (
"reflect"
"strconv"
"strings"
+
"syscall"
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/binary"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/sentry/inet"
"gvisor.dev/gvisor/pkg/syserr"
@@ -146,7 +148,7 @@ func ExtractHostInterfaces(links []syscall.NetlinkMessage, addrs []syscall.Netli
return fmt.Errorf("RTM_GETLINK returned RTM_NEWLINK message with invalid data length (%d bytes, expected at least %d bytes)", len(link.Data), unix.SizeofIfInfomsg)
}
var ifinfo unix.IfInfomsg
- binary.Unmarshal(link.Data[:unix.SizeofIfInfomsg], usermem.ByteOrder, &ifinfo)
+ binary.Unmarshal(link.Data[:unix.SizeofIfInfomsg], hostarch.ByteOrder, &ifinfo)
inetIF := inet.Interface{
DeviceType: ifinfo.Type,
Flags: ifinfo.Flags,
@@ -177,7 +179,7 @@ func ExtractHostInterfaces(links []syscall.NetlinkMessage, addrs []syscall.Netli
return fmt.Errorf("RTM_GETADDR returned RTM_NEWADDR message with invalid data length (%d bytes, expected at least %d bytes)", len(addr.Data), unix.SizeofIfAddrmsg)
}
var ifaddr unix.IfAddrmsg
- binary.Unmarshal(addr.Data[:unix.SizeofIfAddrmsg], usermem.ByteOrder, &ifaddr)
+ binary.Unmarshal(addr.Data[:unix.SizeofIfAddrmsg], hostarch.ByteOrder, &ifaddr)
inetAddr := inet.InterfaceAddr{
Family: ifaddr.Family,
PrefixLen: ifaddr.Prefixlen,
@@ -209,7 +211,7 @@ func ExtractHostRoutes(routeMsgs []syscall.NetlinkMessage) ([]inet.Route, error)
}
var ifRoute unix.RtMsg
- binary.Unmarshal(routeMsg.Data[:unix.SizeofRtMsg], usermem.ByteOrder, &ifRoute)
+ binary.Unmarshal(routeMsg.Data[:unix.SizeofRtMsg], hostarch.ByteOrder, &ifRoute)
inetRoute := inet.Route{
Family: ifRoute.Family,
DstLen: ifRoute.Dst_len,
@@ -243,7 +245,7 @@ func ExtractHostRoutes(routeMsgs []syscall.NetlinkMessage) ([]inet.Route, error)
if len(attr.Value) != expected {
return nil, fmt.Errorf("RTM_GETROUTE returned RTM_NEWROUTE message with invalid attribute data length (%d bytes, expected %d bytes)", len(attr.Value), expected)
}
- binary.Unmarshal(attr.Value, usermem.ByteOrder, &inetRoute.OutputInterface)
+ binary.Unmarshal(attr.Value, hostarch.ByteOrder, &inetRoute.OutputInterface)
}
}
diff --git a/pkg/sentry/socket/netfilter/extensions.go b/pkg/sentry/socket/netfilter/extensions.go
index e339f9bea..4bd305a44 100644
--- a/pkg/sentry/socket/netfilter/extensions.go
+++ b/pkg/sentry/socket/netfilter/extensions.go
@@ -19,10 +19,10 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/binary"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/syserr"
"gvisor.dev/gvisor/pkg/tcpip"
"gvisor.dev/gvisor/pkg/tcpip/stack"
- "gvisor.dev/gvisor/pkg/usermem"
)
// TODO(gvisor.dev/issue/170): The following per-matcher params should be
@@ -89,7 +89,7 @@ func marshalEntryMatch(name string, data []byte) []byte {
copy(matcher.Name[:], name)
buf := make([]byte, 0, size)
- buf = binary.Marshal(buf, usermem.ByteOrder, matcher)
+ buf = binary.Marshal(buf, hostarch.ByteOrder, matcher)
return append(buf, make([]byte, size-len(buf))...)
}
diff --git a/pkg/sentry/socket/netfilter/ipv4.go b/pkg/sentry/socket/netfilter/ipv4.go
index 2f913787b..1fc4cb651 100644
--- a/pkg/sentry/socket/netfilter/ipv4.go
+++ b/pkg/sentry/socket/netfilter/ipv4.go
@@ -19,11 +19,11 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/binary"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/syserr"
"gvisor.dev/gvisor/pkg/tcpip"
"gvisor.dev/gvisor/pkg/tcpip/header"
"gvisor.dev/gvisor/pkg/tcpip/stack"
- "gvisor.dev/gvisor/pkg/usermem"
)
// emptyIPv4Filter is for comparison with a rule's filters to determine whether
@@ -142,7 +142,7 @@ func modifyEntries4(stk *stack.Stack, optVal []byte, replace *linux.IPTReplace,
}
var entry linux.IPTEntry
buf := optVal[:linux.SizeOfIPTEntry]
- binary.Unmarshal(buf, usermem.ByteOrder, &entry)
+ binary.Unmarshal(buf, hostarch.ByteOrder, &entry)
initialOptValLen := len(optVal)
optVal = optVal[linux.SizeOfIPTEntry:]
diff --git a/pkg/sentry/socket/netfilter/ipv6.go b/pkg/sentry/socket/netfilter/ipv6.go
index 263d9d3b5..67a52b628 100644
--- a/pkg/sentry/socket/netfilter/ipv6.go
+++ b/pkg/sentry/socket/netfilter/ipv6.go
@@ -19,11 +19,11 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/binary"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/syserr"
"gvisor.dev/gvisor/pkg/tcpip"
"gvisor.dev/gvisor/pkg/tcpip/header"
"gvisor.dev/gvisor/pkg/tcpip/stack"
- "gvisor.dev/gvisor/pkg/usermem"
)
// emptyIPv6Filter is for comparison with a rule's filters to determine whether
@@ -145,7 +145,7 @@ func modifyEntries6(stk *stack.Stack, optVal []byte, replace *linux.IPTReplace,
}
var entry linux.IP6TEntry
buf := optVal[:linux.SizeOfIP6TEntry]
- binary.Unmarshal(buf, usermem.ByteOrder, &entry)
+ binary.Unmarshal(buf, hostarch.ByteOrder, &entry)
initialOptValLen := len(optVal)
optVal = optVal[linux.SizeOfIP6TEntry:]
diff --git a/pkg/sentry/socket/netfilter/netfilter.go b/pkg/sentry/socket/netfilter/netfilter.go
index 7ae18b2a3..5200e08ed 100644
--- a/pkg/sentry/socket/netfilter/netfilter.go
+++ b/pkg/sentry/socket/netfilter/netfilter.go
@@ -23,12 +23,12 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/binary"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/syserr"
"gvisor.dev/gvisor/pkg/tcpip"
"gvisor.dev/gvisor/pkg/tcpip/stack"
- "gvisor.dev/gvisor/pkg/usermem"
)
// enableLogging controls whether to log the (de)serialization of netfilter
@@ -83,7 +83,7 @@ func DefaultLinuxTables() *stack.IPTables {
}
// GetInfo returns information about iptables.
-func GetInfo(t *kernel.Task, stack *stack.Stack, outPtr usermem.Addr, ipv6 bool) (linux.IPTGetinfo, *syserr.Error) {
+func GetInfo(t *kernel.Task, stack *stack.Stack, outPtr hostarch.Addr, ipv6 bool) (linux.IPTGetinfo, *syserr.Error) {
// Read in the struct and table name.
var info linux.IPTGetinfo
if _, err := info.CopyIn(t, outPtr); err != nil {
@@ -106,7 +106,7 @@ func GetInfo(t *kernel.Task, stack *stack.Stack, outPtr usermem.Addr, ipv6 bool)
}
// GetEntries4 returns netstack's iptables rules.
-func GetEntries4(t *kernel.Task, stack *stack.Stack, outPtr usermem.Addr, outLen int) (linux.KernelIPTGetEntries, *syserr.Error) {
+func GetEntries4(t *kernel.Task, stack *stack.Stack, outPtr hostarch.Addr, outLen int) (linux.KernelIPTGetEntries, *syserr.Error) {
// Read in the struct and table name.
var userEntries linux.IPTGetEntries
if _, err := userEntries.CopyIn(t, outPtr); err != nil {
@@ -130,7 +130,7 @@ func GetEntries4(t *kernel.Task, stack *stack.Stack, outPtr usermem.Addr, outLen
}
// GetEntries6 returns netstack's ip6tables rules.
-func GetEntries6(t *kernel.Task, stack *stack.Stack, outPtr usermem.Addr, outLen int) (linux.KernelIP6TGetEntries, *syserr.Error) {
+func GetEntries6(t *kernel.Task, stack *stack.Stack, outPtr hostarch.Addr, outLen int) (linux.KernelIP6TGetEntries, *syserr.Error) {
// Read in the struct and table name. IPv4 and IPv6 utilize structs
// with the same layout.
var userEntries linux.IPTGetEntries
@@ -179,7 +179,7 @@ func SetEntries(stk *stack.Stack, optVal []byte, ipv6 bool) *syserr.Error {
var replace linux.IPTReplace
replaceBuf := optVal[:linux.SizeOfIPTReplace]
optVal = optVal[linux.SizeOfIPTReplace:]
- binary.Unmarshal(replaceBuf, usermem.ByteOrder, &replace)
+ binary.Unmarshal(replaceBuf, hostarch.ByteOrder, &replace)
// TODO(gvisor.dev/issue/170): Support other tables.
var table stack.Table
@@ -310,7 +310,7 @@ func parseMatchers(filter stack.IPHeaderFilter, optVal []byte) ([]stack.Matcher,
}
var match linux.XTEntryMatch
buf := optVal[:linux.SizeOfXTEntryMatch]
- binary.Unmarshal(buf, usermem.ByteOrder, &match)
+ binary.Unmarshal(buf, hostarch.ByteOrder, &match)
nflog("set entries: parsed entry match %q: %+v", match.Name.String(), match)
// Check some invariants.
@@ -381,7 +381,7 @@ func hookFromLinux(hook int) stack.Hook {
// TargetRevision returns a linux.XTGetRevision for a given target. It sets
// Revision to the highest supported value, unless the provided revision number
// is larger.
-func TargetRevision(t *kernel.Task, revPtr usermem.Addr, netProto tcpip.NetworkProtocolNumber) (linux.XTGetRevision, *syserr.Error) {
+func TargetRevision(t *kernel.Task, revPtr hostarch.Addr, netProto tcpip.NetworkProtocolNumber) (linux.XTGetRevision, *syserr.Error) {
// Read in the target name and version.
var rev linux.XTGetRevision
if _, err := rev.CopyIn(t, revPtr); err != nil {
diff --git a/pkg/sentry/socket/netfilter/owner_matcher.go b/pkg/sentry/socket/netfilter/owner_matcher.go
index 5f80d82ea..b2cc6be20 100644
--- a/pkg/sentry/socket/netfilter/owner_matcher.go
+++ b/pkg/sentry/socket/netfilter/owner_matcher.go
@@ -19,8 +19,8 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/binary"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/tcpip/stack"
- "gvisor.dev/gvisor/pkg/usermem"
)
const matcherNameOwner = "owner"
@@ -60,7 +60,7 @@ func (ownerMarshaler) marshal(mr matcher) []byte {
}
buf := make([]byte, 0, linux.SizeOfIPTOwnerInfo)
- return marshalEntryMatch(matcherNameOwner, binary.Marshal(buf, usermem.ByteOrder, iptOwnerInfo))
+ return marshalEntryMatch(matcherNameOwner, binary.Marshal(buf, hostarch.ByteOrder, iptOwnerInfo))
}
// unmarshal implements matchMaker.unmarshal.
@@ -72,7 +72,7 @@ func (ownerMarshaler) unmarshal(buf []byte, filter stack.IPHeaderFilter) (stack.
// For alignment reasons, the match's total size may
// exceed what's strictly necessary to hold matchData.
var matchData linux.IPTOwnerInfo
- binary.Unmarshal(buf[:linux.SizeOfIPTOwnerInfo], usermem.ByteOrder, &matchData)
+ binary.Unmarshal(buf[:linux.SizeOfIPTOwnerInfo], hostarch.ByteOrder, &matchData)
nflog("parseMatchers: parsed IPTOwnerInfo: %+v", matchData)
var owner OwnerMatcher
diff --git a/pkg/sentry/socket/netfilter/targets.go b/pkg/sentry/socket/netfilter/targets.go
index f2653d523..80f8c6430 100644
--- a/pkg/sentry/socket/netfilter/targets.go
+++ b/pkg/sentry/socket/netfilter/targets.go
@@ -19,11 +19,11 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/binary"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/syserr"
"gvisor.dev/gvisor/pkg/tcpip"
"gvisor.dev/gvisor/pkg/tcpip/header"
"gvisor.dev/gvisor/pkg/tcpip/stack"
- "gvisor.dev/gvisor/pkg/usermem"
)
// ErrorTargetName is used to mark targets as error targets. Error targets
@@ -167,7 +167,7 @@ func (*standardTargetMaker) marshal(target target) []byte {
}
ret := make([]byte, 0, linux.SizeOfXTStandardTarget)
- return binary.Marshal(ret, usermem.ByteOrder, xt)
+ return binary.Marshal(ret, hostarch.ByteOrder, xt)
}
func (*standardTargetMaker) unmarshal(buf []byte, filter stack.IPHeaderFilter) (target, *syserr.Error) {
@@ -177,7 +177,7 @@ func (*standardTargetMaker) unmarshal(buf []byte, filter stack.IPHeaderFilter) (
}
var standardTarget linux.XTStandardTarget
buf = buf[:linux.SizeOfXTStandardTarget]
- binary.Unmarshal(buf, usermem.ByteOrder, &standardTarget)
+ binary.Unmarshal(buf, hostarch.ByteOrder, &standardTarget)
if standardTarget.Verdict < 0 {
// A Verdict < 0 indicates a non-jump verdict.
@@ -223,7 +223,7 @@ func (*errorTargetMaker) marshal(target target) []byte {
copy(xt.Target.Name[:], ErrorTargetName)
ret := make([]byte, 0, linux.SizeOfXTErrorTarget)
- return binary.Marshal(ret, usermem.ByteOrder, xt)
+ return binary.Marshal(ret, hostarch.ByteOrder, xt)
}
func (*errorTargetMaker) unmarshal(buf []byte, filter stack.IPHeaderFilter) (target, *syserr.Error) {
@@ -233,7 +233,7 @@ func (*errorTargetMaker) unmarshal(buf []byte, filter stack.IPHeaderFilter) (tar
}
var errTgt linux.XTErrorTarget
buf = buf[:linux.SizeOfXTErrorTarget]
- binary.Unmarshal(buf, usermem.ByteOrder, &errTgt)
+ binary.Unmarshal(buf, hostarch.ByteOrder, &errTgt)
// Error targets are used in 2 cases:
// * An actual error case. These rules have an error named
@@ -281,7 +281,7 @@ func (*redirectTargetMaker) marshal(target target) []byte {
xt.NfRange.RangeIPV4.Flags |= linux.NF_NAT_RANGE_PROTO_SPECIFIED
xt.NfRange.RangeIPV4.MinPort = htons(rt.Port)
xt.NfRange.RangeIPV4.MaxPort = xt.NfRange.RangeIPV4.MinPort
- return binary.Marshal(ret, usermem.ByteOrder, xt)
+ return binary.Marshal(ret, hostarch.ByteOrder, xt)
}
func (*redirectTargetMaker) unmarshal(buf []byte, filter stack.IPHeaderFilter) (target, *syserr.Error) {
@@ -297,7 +297,7 @@ func (*redirectTargetMaker) unmarshal(buf []byte, filter stack.IPHeaderFilter) (
var rt linux.XTRedirectTarget
buf = buf[:linux.SizeOfXTRedirectTarget]
- binary.Unmarshal(buf, usermem.ByteOrder, &rt)
+ binary.Unmarshal(buf, hostarch.ByteOrder, &rt)
// Copy linux.XTRedirectTarget to stack.RedirectTarget.
target := redirectTarget{RedirectTarget: stack.RedirectTarget{
@@ -372,7 +372,7 @@ func (*nfNATTargetMaker) marshal(target target) []byte {
nt.Range.MaxProto = nt.Range.MinProto
ret := make([]byte, 0, nfNATMarhsalledSize)
- return binary.Marshal(ret, usermem.ByteOrder, nt)
+ return binary.Marshal(ret, hostarch.ByteOrder, nt)
}
func (*nfNATTargetMaker) unmarshal(buf []byte, filter stack.IPHeaderFilter) (target, *syserr.Error) {
@@ -388,7 +388,7 @@ func (*nfNATTargetMaker) unmarshal(buf []byte, filter stack.IPHeaderFilter) (tar
var natRange linux.NFNATRange
buf = buf[linux.SizeOfXTEntryTarget:nfNATMarhsalledSize]
- binary.Unmarshal(buf, usermem.ByteOrder, &natRange)
+ binary.Unmarshal(buf, hostarch.ByteOrder, &natRange)
// We don't support port or address ranges.
if natRange.MinAddr != natRange.MaxAddr {
@@ -454,7 +454,7 @@ func parseTarget(filter stack.IPHeaderFilter, optVal []byte, ipv6 bool) (stack.T
}
var target linux.XTEntryTarget
buf := optVal[:linux.SizeOfXTEntryTarget]
- binary.Unmarshal(buf, usermem.ByteOrder, &target)
+ binary.Unmarshal(buf, hostarch.ByteOrder, &target)
return unmarshalTarget(target, filter, optVal)
}
@@ -487,11 +487,11 @@ func (jt *JumpTarget) Action(*stack.PacketBuffer, *stack.ConnTrack, stack.Hook,
func ntohs(port uint16) uint16 {
buf := make([]byte, 2)
binary.BigEndian.PutUint16(buf, port)
- return usermem.ByteOrder.Uint16(buf)
+ return hostarch.ByteOrder.Uint16(buf)
}
func htons(port uint16) uint16 {
buf := make([]byte, 2)
- usermem.ByteOrder.PutUint16(buf, port)
+ hostarch.ByteOrder.PutUint16(buf, port)
return binary.BigEndian.Uint16(buf)
}
diff --git a/pkg/sentry/socket/netfilter/tcp_matcher.go b/pkg/sentry/socket/netfilter/tcp_matcher.go
index 678d6b578..69557f515 100644
--- a/pkg/sentry/socket/netfilter/tcp_matcher.go
+++ b/pkg/sentry/socket/netfilter/tcp_matcher.go
@@ -19,9 +19,9 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/binary"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/tcpip/header"
"gvisor.dev/gvisor/pkg/tcpip/stack"
- "gvisor.dev/gvisor/pkg/usermem"
)
const matcherNameTCP = "tcp"
@@ -48,7 +48,7 @@ func (tcpMarshaler) marshal(mr matcher) []byte {
DestinationPortEnd: matcher.destinationPortEnd,
}
buf := make([]byte, 0, linux.SizeOfXTTCP)
- return marshalEntryMatch(matcherNameTCP, binary.Marshal(buf, usermem.ByteOrder, xttcp))
+ return marshalEntryMatch(matcherNameTCP, binary.Marshal(buf, hostarch.ByteOrder, xttcp))
}
// unmarshal implements matchMaker.unmarshal.
@@ -60,7 +60,7 @@ func (tcpMarshaler) unmarshal(buf []byte, filter stack.IPHeaderFilter) (stack.Ma
// For alignment reasons, the match's total size may
// exceed what's strictly necessary to hold matchData.
var matchData linux.XTTCP
- binary.Unmarshal(buf[:linux.SizeOfXTTCP], usermem.ByteOrder, &matchData)
+ binary.Unmarshal(buf[:linux.SizeOfXTTCP], hostarch.ByteOrder, &matchData)
nflog("parseMatchers: parsed XTTCP: %+v", matchData)
if matchData.Option != 0 ||
diff --git a/pkg/sentry/socket/netfilter/udp_matcher.go b/pkg/sentry/socket/netfilter/udp_matcher.go
index f8568873f..6a60e6bd6 100644
--- a/pkg/sentry/socket/netfilter/udp_matcher.go
+++ b/pkg/sentry/socket/netfilter/udp_matcher.go
@@ -19,9 +19,9 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/binary"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/tcpip/header"
"gvisor.dev/gvisor/pkg/tcpip/stack"
- "gvisor.dev/gvisor/pkg/usermem"
)
const matcherNameUDP = "udp"
@@ -48,7 +48,7 @@ func (udpMarshaler) marshal(mr matcher) []byte {
DestinationPortEnd: matcher.destinationPortEnd,
}
buf := make([]byte, 0, linux.SizeOfXTUDP)
- return marshalEntryMatch(matcherNameUDP, binary.Marshal(buf, usermem.ByteOrder, xtudp))
+ return marshalEntryMatch(matcherNameUDP, binary.Marshal(buf, hostarch.ByteOrder, xtudp))
}
// unmarshal implements matchMaker.unmarshal.
@@ -60,7 +60,7 @@ func (udpMarshaler) unmarshal(buf []byte, filter stack.IPHeaderFilter) (stack.Ma
// For alignment reasons, the match's total size may exceed what's
// strictly necessary to hold matchData.
var matchData linux.XTUDP
- binary.Unmarshal(buf[:linux.SizeOfXTUDP], usermem.ByteOrder, &matchData)
+ binary.Unmarshal(buf[:linux.SizeOfXTUDP], hostarch.ByteOrder, &matchData)
nflog("parseMatchers: parsed XTUDP: %+v", matchData)
if matchData.InverseFlags != 0 {
diff --git a/pkg/sentry/socket/netlink/message.go b/pkg/sentry/socket/netlink/message.go
index 0899c61d1..ab0e68af7 100644
--- a/pkg/sentry/socket/netlink/message.go
+++ b/pkg/sentry/socket/netlink/message.go
@@ -20,7 +20,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/binary"
- "gvisor.dev/gvisor/pkg/usermem"
+ "gvisor.dev/gvisor/pkg/hostarch"
)
// alignPad returns the length of padding required for alignment.
@@ -42,7 +42,7 @@ type Message struct {
func NewMessage(hdr linux.NetlinkMessageHeader) *Message {
return &Message{
hdr: hdr,
- buf: binary.Marshal(nil, usermem.ByteOrder, hdr),
+ buf: binary.Marshal(nil, hostarch.ByteOrder, hdr),
}
}
@@ -58,7 +58,7 @@ func ParseMessage(buf []byte) (msg *Message, rest []byte, ok bool) {
return
}
var hdr linux.NetlinkMessageHeader
- binary.Unmarshal(hdrBytes, usermem.ByteOrder, &hdr)
+ binary.Unmarshal(hdrBytes, hostarch.ByteOrder, &hdr)
// Msg portion.
totalMsgLen := int(hdr.Length)
@@ -105,7 +105,7 @@ func (m *Message) GetData(msg interface{}) (AttrsView, bool) {
if !ok {
return nil, false
}
- binary.Unmarshal(msgBytes, usermem.ByteOrder, msg)
+ binary.Unmarshal(msgBytes, hostarch.ByteOrder, msg)
numPad := alignPad(linux.NetlinkMessageHeaderSize+size, linux.NLMSG_ALIGNTO)
// Linux permits the last message not being aligned, just consume all of it.
@@ -126,7 +126,7 @@ func (m *Message) GetData(msg interface{}) (AttrsView, bool) {
// calling Finalize.
func (m *Message) Finalize() []byte {
// Update length, which is the first 4 bytes of the header.
- usermem.ByteOrder.PutUint32(m.buf, uint32(len(m.buf)))
+ hostarch.ByteOrder.PutUint32(m.buf, uint32(len(m.buf)))
// Align the message. Note that the message length in the header (set
// above) is the useful length of the message, not the total aligned
@@ -146,7 +146,7 @@ func (m *Message) putZeros(n int) {
// Put serializes v into the message.
func (m *Message) Put(v interface{}) {
- m.buf = binary.Marshal(m.buf, usermem.ByteOrder, v)
+ m.buf = binary.Marshal(m.buf, hostarch.ByteOrder, v)
}
// PutAttr adds v to the message as a netlink attribute.
@@ -251,7 +251,7 @@ func (v AttrsView) ParseFirst() (hdr linux.NetlinkAttrHeader, value []byte, rest
if !ok {
return
}
- binary.Unmarshal(hdrBytes, usermem.ByteOrder, &hdr)
+ binary.Unmarshal(hdrBytes, hostarch.ByteOrder, &hdr)
value, ok = b.Extract(int(hdr.Length) - linux.NetlinkAttrHeaderSize)
if !ok {
diff --git a/pkg/sentry/socket/netlink/socket.go b/pkg/sentry/socket/netlink/socket.go
index d5ffc75ce..30c297149 100644
--- a/pkg/sentry/socket/netlink/socket.go
+++ b/pkg/sentry/socket/netlink/socket.go
@@ -22,6 +22,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/binary"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal"
"gvisor.dev/gvisor/pkg/marshal/primitive"
"gvisor.dev/gvisor/pkg/sentry/arch"
@@ -222,7 +223,7 @@ func ExtractSockAddr(b []byte) (*linux.SockAddrNetlink, *syserr.Error) {
}
var sa linux.SockAddrNetlink
- binary.Unmarshal(b[:linux.SockAddrNetlinkSize], usermem.ByteOrder, &sa)
+ binary.Unmarshal(b[:linux.SockAddrNetlinkSize], hostarch.ByteOrder, &sa)
if sa.Family != linux.AF_NETLINK {
return nil, syserr.ErrInvalidArgument
@@ -327,7 +328,7 @@ func (s *socketOpsCommon) Shutdown(t *kernel.Task, how int) *syserr.Error {
}
// GetSockOpt implements socket.Socket.GetSockOpt.
-func (s *socketOpsCommon) GetSockOpt(t *kernel.Task, level int, name int, outPtr usermem.Addr, outLen int) (marshal.Marshallable, *syserr.Error) {
+func (s *socketOpsCommon) GetSockOpt(t *kernel.Task, level int, name int, outPtr hostarch.Addr, outLen int) (marshal.Marshallable, *syserr.Error) {
switch level {
case linux.SOL_SOCKET:
switch name {
@@ -388,7 +389,7 @@ func (s *socketOpsCommon) SetSockOpt(t *kernel.Task, level int, name int, opt []
if len(opt) < sizeOfInt32 {
return syserr.ErrInvalidArgument
}
- size := usermem.ByteOrder.Uint32(opt)
+ size := hostarch.ByteOrder.Uint32(opt)
if size < minSendBufferSize {
size = minSendBufferSize
} else if size > maxSendBufferSize {
@@ -411,7 +412,7 @@ func (s *socketOpsCommon) SetSockOpt(t *kernel.Task, level int, name int, opt []
if len(opt) < sizeOfInt32 {
return syserr.ErrInvalidArgument
}
- passcred := usermem.ByteOrder.Uint32(opt)
+ passcred := hostarch.ByteOrder.Uint32(opt)
s.ep.SocketOptions().SetPassCred(passcred != 0)
return nil
diff --git a/pkg/sentry/socket/netstack/netstack.go b/pkg/sentry/socket/netstack/netstack.go
index 64e70ab9d..ed6572bab 100644
--- a/pkg/sentry/socket/netstack/netstack.go
+++ b/pkg/sentry/socket/netstack/netstack.go
@@ -37,6 +37,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/binary"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/marshal"
"gvisor.dev/gvisor/pkg/marshal/primitive"
@@ -600,7 +601,7 @@ func (s *socketOpsCommon) Bind(t *kernel.Task, sockaddr []byte) *syserr.Error {
return syserr.ErrInvalidArgument
}
- family := usermem.ByteOrder.Uint16(sockaddr)
+ family := hostarch.ByteOrder.Uint16(sockaddr)
var addr tcpip.FullAddress
// Bind for AF_PACKET requires only family, protocol and ifindex.
@@ -611,7 +612,7 @@ func (s *socketOpsCommon) Bind(t *kernel.Task, sockaddr []byte) *syserr.Error {
if len(sockaddr) < sockAddrLinkSize {
return syserr.ErrInvalidArgument
}
- binary.Unmarshal(sockaddr[:sockAddrLinkSize], usermem.ByteOrder, &a)
+ binary.Unmarshal(sockaddr[:sockAddrLinkSize], hostarch.ByteOrder, &a)
if a.Protocol != uint16(s.protocol) {
return syserr.ErrInvalidArgument
@@ -757,7 +758,7 @@ func (s *socketOpsCommon) Shutdown(t *kernel.Task, how int) *syserr.Error {
// GetSockOpt implements the linux syscall getsockopt(2) for sockets backed by
// tcpip.Endpoint.
-func (s *SocketOperations) GetSockOpt(t *kernel.Task, level, name int, outPtr usermem.Addr, outLen int) (marshal.Marshallable, *syserr.Error) {
+func (s *SocketOperations) GetSockOpt(t *kernel.Task, level, name int, outPtr hostarch.Addr, outLen int) (marshal.Marshallable, *syserr.Error) {
// TODO(b/78348848): Unlike other socket options, SO_TIMESTAMP is
// implemented specifically for netstack.SocketOperations rather than
// commonEndpoint. commonEndpoint should be extended to support socket
@@ -793,7 +794,7 @@ func (s *SocketOperations) GetSockOpt(t *kernel.Task, level, name int, outPtr us
// GetSockOpt can be used to implement the linux syscall getsockopt(2) for
// sockets backed by a commonEndpoint.
-func GetSockOpt(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, family int, skType linux.SockType, level, name int, outPtr usermem.Addr, outLen int) (marshal.Marshallable, *syserr.Error) {
+func GetSockOpt(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, family int, skType linux.SockType, level, name int, outPtr hostarch.Addr, outLen int) (marshal.Marshallable, *syserr.Error) {
switch level {
case linux.SOL_SOCKET:
return getSockOptSocket(t, s, ep, family, skType, name, outLen)
@@ -1244,7 +1245,7 @@ func getSockOptTCP(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name,
}
// getSockOptIPv6 implements GetSockOpt when level is SOL_IPV6.
-func getSockOptIPv6(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name int, outPtr usermem.Addr, outLen int) (marshal.Marshallable, *syserr.Error) {
+func getSockOptIPv6(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name int, outPtr hostarch.Addr, outLen int) (marshal.Marshallable, *syserr.Error) {
if _, ok := ep.(tcpip.Endpoint); !ok {
log.Warningf("SOL_IPV6 options not supported on endpoints other than tcpip.Endpoint: option = %d", name)
return nil, syserr.ErrUnknownProtocolOption
@@ -1392,7 +1393,7 @@ func getSockOptIPv6(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name
}
// getSockOptIP implements GetSockOpt when level is SOL_IP.
-func getSockOptIP(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name int, outPtr usermem.Addr, outLen int, family int) (marshal.Marshallable, *syserr.Error) {
+func getSockOptIP(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name int, outPtr hostarch.Addr, outLen int, family int) (marshal.Marshallable, *syserr.Error) {
if _, ok := ep.(tcpip.Endpoint); !ok {
log.Warningf("SOL_IP options not supported on endpoints other than tcpip.Endpoint: option = %d", name)
return nil, syserr.ErrUnknownProtocolOption
@@ -1602,7 +1603,7 @@ func (s *SocketOperations) SetSockOpt(t *kernel.Task, level int, name int, optVa
}
s.readMu.Lock()
defer s.readMu.Unlock()
- s.sockOptTimestamp = usermem.ByteOrder.Uint32(optVal) != 0
+ s.sockOptTimestamp = hostarch.ByteOrder.Uint32(optVal) != 0
return nil
}
if level == linux.SOL_TCP && name == linux.TCP_INQ {
@@ -1611,7 +1612,7 @@ func (s *SocketOperations) SetSockOpt(t *kernel.Task, level int, name int, optVa
}
s.readMu.Lock()
defer s.readMu.Unlock()
- s.sockOptInq = usermem.ByteOrder.Uint32(optVal) != 0
+ s.sockOptInq = hostarch.ByteOrder.Uint32(optVal) != 0
return nil
}
@@ -1659,7 +1660,7 @@ func setSockOptSocket(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, nam
return syserr.ErrInvalidArgument
}
- v := usermem.ByteOrder.Uint32(optVal)
+ v := hostarch.ByteOrder.Uint32(optVal)
ep.SocketOptions().SetSendBufferSize(int64(v), true)
return nil
@@ -1668,7 +1669,7 @@ func setSockOptSocket(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, nam
return syserr.ErrInvalidArgument
}
- v := usermem.ByteOrder.Uint32(optVal)
+ v := hostarch.ByteOrder.Uint32(optVal)
return syserr.TranslateNetstackError(ep.SetSockOptInt(tcpip.ReceiveBufferSizeOption, int(v)))
case linux.SO_REUSEADDR:
@@ -1676,7 +1677,7 @@ func setSockOptSocket(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, nam
return syserr.ErrInvalidArgument
}
- v := usermem.ByteOrder.Uint32(optVal)
+ v := hostarch.ByteOrder.Uint32(optVal)
ep.SocketOptions().SetReuseAddress(v != 0)
return nil
@@ -1685,7 +1686,7 @@ func setSockOptSocket(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, nam
return syserr.ErrInvalidArgument
}
- v := usermem.ByteOrder.Uint32(optVal)
+ v := hostarch.ByteOrder.Uint32(optVal)
ep.SocketOptions().SetReusePort(v != 0)
return nil
@@ -1714,7 +1715,7 @@ func setSockOptSocket(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, nam
return syserr.ErrInvalidArgument
}
- v := usermem.ByteOrder.Uint32(optVal)
+ v := hostarch.ByteOrder.Uint32(optVal)
ep.SocketOptions().SetBroadcast(v != 0)
return nil
@@ -1723,7 +1724,7 @@ func setSockOptSocket(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, nam
return syserr.ErrInvalidArgument
}
- v := usermem.ByteOrder.Uint32(optVal)
+ v := hostarch.ByteOrder.Uint32(optVal)
ep.SocketOptions().SetPassCred(v != 0)
return nil
@@ -1732,7 +1733,7 @@ func setSockOptSocket(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, nam
return syserr.ErrInvalidArgument
}
- v := usermem.ByteOrder.Uint32(optVal)
+ v := hostarch.ByteOrder.Uint32(optVal)
ep.SocketOptions().SetKeepAlive(v != 0)
return nil
@@ -1742,7 +1743,7 @@ func setSockOptSocket(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, nam
}
var v linux.Timeval
- binary.Unmarshal(optVal[:linux.SizeOfTimeval], usermem.ByteOrder, &v)
+ binary.Unmarshal(optVal[:linux.SizeOfTimeval], hostarch.ByteOrder, &v)
if v.Usec < 0 || v.Usec >= int64(time.Second/time.Microsecond) {
return syserr.ErrDomain
}
@@ -1755,7 +1756,7 @@ func setSockOptSocket(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, nam
}
var v linux.Timeval
- binary.Unmarshal(optVal[:linux.SizeOfTimeval], usermem.ByteOrder, &v)
+ binary.Unmarshal(optVal[:linux.SizeOfTimeval], hostarch.ByteOrder, &v)
if v.Usec < 0 || v.Usec >= int64(time.Second/time.Microsecond) {
return syserr.ErrDomain
}
@@ -1767,7 +1768,7 @@ func setSockOptSocket(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, nam
return syserr.ErrInvalidArgument
}
- v := usermem.ByteOrder.Uint32(optVal)
+ v := hostarch.ByteOrder.Uint32(optVal)
if v == 0 {
socket.SetSockOptEmitUnimplementedEvent(t, name)
@@ -1781,7 +1782,7 @@ func setSockOptSocket(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, nam
return syserr.ErrInvalidArgument
}
- v := usermem.ByteOrder.Uint32(optVal)
+ v := hostarch.ByteOrder.Uint32(optVal)
ep.SocketOptions().SetNoChecksum(v != 0)
return nil
@@ -1791,7 +1792,7 @@ func setSockOptSocket(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, nam
}
var v linux.Linger
- binary.Unmarshal(optVal[:linux.SizeOfLinger], usermem.ByteOrder, &v)
+ binary.Unmarshal(optVal[:linux.SizeOfLinger], hostarch.ByteOrder, &v)
ep.SocketOptions().SetLinger(tcpip.LingerOption{
Enabled: v.OnOff != 0,
@@ -1824,7 +1825,7 @@ func setSockOptTCP(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name i
return syserr.ErrInvalidArgument
}
- v := usermem.ByteOrder.Uint32(optVal)
+ v := hostarch.ByteOrder.Uint32(optVal)
ep.SocketOptions().SetDelayOption(v == 0)
return nil
@@ -1833,7 +1834,7 @@ func setSockOptTCP(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name i
return syserr.ErrInvalidArgument
}
- v := usermem.ByteOrder.Uint32(optVal)
+ v := hostarch.ByteOrder.Uint32(optVal)
ep.SocketOptions().SetCorkOption(v != 0)
return nil
@@ -1842,7 +1843,7 @@ func setSockOptTCP(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name i
return syserr.ErrInvalidArgument
}
- v := usermem.ByteOrder.Uint32(optVal)
+ v := hostarch.ByteOrder.Uint32(optVal)
ep.SocketOptions().SetQuickAck(v != 0)
return nil
@@ -1851,7 +1852,7 @@ func setSockOptTCP(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name i
return syserr.ErrInvalidArgument
}
- v := usermem.ByteOrder.Uint32(optVal)
+ v := hostarch.ByteOrder.Uint32(optVal)
return syserr.TranslateNetstackError(ep.SetSockOptInt(tcpip.MaxSegOption, int(v)))
case linux.TCP_KEEPIDLE:
@@ -1859,7 +1860,7 @@ func setSockOptTCP(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name i
return syserr.ErrInvalidArgument
}
- v := usermem.ByteOrder.Uint32(optVal)
+ v := hostarch.ByteOrder.Uint32(optVal)
if v < 1 || v > linux.MAX_TCP_KEEPIDLE {
return syserr.ErrInvalidArgument
}
@@ -1871,7 +1872,7 @@ func setSockOptTCP(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name i
return syserr.ErrInvalidArgument
}
- v := usermem.ByteOrder.Uint32(optVal)
+ v := hostarch.ByteOrder.Uint32(optVal)
if v < 1 || v > linux.MAX_TCP_KEEPINTVL {
return syserr.ErrInvalidArgument
}
@@ -1883,7 +1884,7 @@ func setSockOptTCP(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name i
return syserr.ErrInvalidArgument
}
- v := usermem.ByteOrder.Uint32(optVal)
+ v := hostarch.ByteOrder.Uint32(optVal)
if v < 1 || v > linux.MAX_TCP_KEEPCNT {
return syserr.ErrInvalidArgument
}
@@ -1894,7 +1895,7 @@ func setSockOptTCP(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name i
return syserr.ErrInvalidArgument
}
- v := int32(usermem.ByteOrder.Uint32(optVal))
+ v := int32(hostarch.ByteOrder.Uint32(optVal))
if v < 0 {
return syserr.ErrInvalidArgument
}
@@ -1913,7 +1914,7 @@ func setSockOptTCP(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name i
return syserr.ErrInvalidArgument
}
- v := int32(usermem.ByteOrder.Uint32(optVal))
+ v := int32(hostarch.ByteOrder.Uint32(optVal))
opt := tcpip.TCPLingerTimeoutOption(time.Second * time.Duration(v))
return syserr.TranslateNetstackError(ep.SetSockOpt(&opt))
@@ -1921,7 +1922,7 @@ func setSockOptTCP(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name i
if len(optVal) < sizeOfInt32 {
return syserr.ErrInvalidArgument
}
- v := int32(usermem.ByteOrder.Uint32(optVal))
+ v := int32(hostarch.ByteOrder.Uint32(optVal))
if v < 0 {
v = 0
}
@@ -1932,7 +1933,7 @@ func setSockOptTCP(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name i
if len(optVal) < sizeOfInt32 {
return syserr.ErrInvalidArgument
}
- v := usermem.ByteOrder.Uint32(optVal)
+ v := hostarch.ByteOrder.Uint32(optVal)
return syserr.TranslateNetstackError(ep.SetSockOptInt(tcpip.TCPSynCountOption, int(v)))
@@ -1940,7 +1941,7 @@ func setSockOptTCP(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name i
if len(optVal) < sizeOfInt32 {
return syserr.ErrInvalidArgument
}
- v := usermem.ByteOrder.Uint32(optVal)
+ v := hostarch.ByteOrder.Uint32(optVal)
return syserr.TranslateNetstackError(ep.SetSockOptInt(tcpip.TCPWindowClampOption, int(v)))
@@ -1978,7 +1979,7 @@ func setSockOptIPv6(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name
return syserr.ErrInvalidEndpointState
}
- v := usermem.ByteOrder.Uint32(optVal)
+ v := hostarch.ByteOrder.Uint32(optVal)
ep.SocketOptions().SetV6Only(v != 0)
return nil
@@ -2024,7 +2025,7 @@ func setSockOptIPv6(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name
if len(optVal) < sizeOfInt32 {
return syserr.ErrInvalidArgument
}
- v := int32(usermem.ByteOrder.Uint32(optVal))
+ v := int32(hostarch.ByteOrder.Uint32(optVal))
ep.SocketOptions().SetReceiveOriginalDstAddress(v != 0)
return nil
@@ -2033,7 +2034,7 @@ func setSockOptIPv6(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name
if len(optVal) < sizeOfInt32 {
return syserr.ErrInvalidArgument
}
- v := int32(usermem.ByteOrder.Uint32(optVal))
+ v := int32(hostarch.ByteOrder.Uint32(optVal))
if v < -1 || v > 255 {
return syserr.ErrInvalidArgument
}
@@ -2117,12 +2118,12 @@ func copyInMulticastRequest(optVal []byte, allowAddr bool) (linux.InetMulticastR
if len(optVal) >= inetMulticastRequestWithNICSize {
var req linux.InetMulticastRequestWithNIC
- binary.Unmarshal(optVal[:inetMulticastRequestWithNICSize], usermem.ByteOrder, &req)
+ binary.Unmarshal(optVal[:inetMulticastRequestWithNICSize], hostarch.ByteOrder, &req)
return req, nil
}
var req linux.InetMulticastRequestWithNIC
- binary.Unmarshal(optVal[:inetMulticastRequestSize], usermem.ByteOrder, &req.InetMulticastRequest)
+ binary.Unmarshal(optVal[:inetMulticastRequestSize], hostarch.ByteOrder, &req.InetMulticastRequest)
return req, nil
}
@@ -2132,7 +2133,7 @@ func copyInMulticastV6Request(optVal []byte) (linux.Inet6MulticastRequest, *syse
}
var req linux.Inet6MulticastRequest
- binary.Unmarshal(optVal[:inet6MulticastRequestSize], usermem.ByteOrder, &req)
+ binary.Unmarshal(optVal[:inet6MulticastRequestSize], hostarch.ByteOrder, &req)
return req, nil
}
@@ -2145,7 +2146,7 @@ func parseIntOrChar(buf []byte) (int32, *syserr.Error) {
}
if len(buf) >= sizeOfInt32 {
- return int32(usermem.ByteOrder.Uint32(buf)), nil
+ return int32(hostarch.ByteOrder.Uint32(buf)), nil
}
return int32(buf[0]), nil
@@ -3007,7 +3008,7 @@ func interfaceIoctl(ctx context.Context, io usermem.IO, arg int, ifr *linux.IFRe
if arg == linux.SIOCGIFNAME {
// Gets the name of the interface given the interface index
// stored in ifr_ifindex.
- index = int32(usermem.ByteOrder.Uint32(ifr.Data[:4]))
+ index = int32(hostarch.ByteOrder.Uint32(ifr.Data[:4]))
if iface, ok := stack.Interfaces()[index]; ok {
ifr.SetName(iface.Name)
return nil
@@ -3029,7 +3030,7 @@ func interfaceIoctl(ctx context.Context, io usermem.IO, arg int, ifr *linux.IFRe
switch arg {
case linux.SIOCGIFINDEX:
// Copy out the index to the data.
- usermem.ByteOrder.PutUint32(ifr.Data[:], uint32(index))
+ hostarch.ByteOrder.PutUint32(ifr.Data[:], uint32(index))
case linux.SIOCGIFHWADDR:
// Copy the hardware address out.
@@ -3042,7 +3043,7 @@ func interfaceIoctl(ctx context.Context, io usermem.IO, arg int, ifr *linux.IFRe
// sockaddr. sa_family contains the ARPHRD_* device type,
// sa_data the L2 hardware address starting from byte 0. Setting
// the hardware address is a privileged operation.
- usermem.ByteOrder.PutUint16(ifr.Data[:], iface.DeviceType)
+ hostarch.ByteOrder.PutUint16(ifr.Data[:], iface.DeviceType)
n := copy(ifr.Data[2:], iface.Addr)
for i := 2 + n; i < len(ifr.Data); i++ {
ifr.Data[i] = 0 // Clear padding.
@@ -3055,7 +3056,7 @@ func interfaceIoctl(ctx context.Context, io usermem.IO, arg int, ifr *linux.IFRe
}
// Drop the flags that don't fit in the size that we need to return. This
// matches Linux behavior.
- usermem.ByteOrder.PutUint16(ifr.Data[:2], uint16(f))
+ hostarch.ByteOrder.PutUint16(ifr.Data[:2], uint16(f))
case linux.SIOCGIFADDR:
// Copy the IPv4 address out.
@@ -3071,11 +3072,11 @@ func interfaceIoctl(ctx context.Context, io usermem.IO, arg int, ifr *linux.IFRe
case linux.SIOCGIFMETRIC:
// Gets the metric of the device. As per netdevice(7), this
// always just sets ifr_metric to 0.
- usermem.ByteOrder.PutUint32(ifr.Data[:4], 0)
+ hostarch.ByteOrder.PutUint32(ifr.Data[:4], 0)
case linux.SIOCGIFMTU:
// Gets the MTU of the device.
- usermem.ByteOrder.PutUint32(ifr.Data[:4], iface.MTU)
+ hostarch.ByteOrder.PutUint32(ifr.Data[:4], iface.MTU)
case linux.SIOCGIFMAP:
// Gets the hardware parameters of the device.
@@ -3101,8 +3102,8 @@ func interfaceIoctl(ctx context.Context, io usermem.IO, arg int, ifr *linux.IFRe
continue
}
// Populate ifr.ifr_netmask (type sockaddr).
- usermem.ByteOrder.PutUint16(ifr.Data[0:2], uint16(linux.AF_INET))
- usermem.ByteOrder.PutUint16(ifr.Data[2:4], 0)
+ hostarch.ByteOrder.PutUint16(ifr.Data[0:2], uint16(linux.AF_INET))
+ hostarch.ByteOrder.PutUint16(ifr.Data[2:4], 0)
var mask uint32 = 0xffffffff << (32 - addr.PrefixLen)
// Netmask is expected to be returned as a big endian
// value.
@@ -3157,14 +3158,14 @@ func ifconfIoctl(ctx context.Context, t *kernel.Task, io usermem.IO, ifc *linux.
// Populate ifr.ifr_addr.
ifr := linux.IFReq{}
ifr.SetName(iface.Name)
- usermem.ByteOrder.PutUint16(ifr.Data[0:2], uint16(ifaceAddr.Family))
- usermem.ByteOrder.PutUint16(ifr.Data[2:4], 0)
+ hostarch.ByteOrder.PutUint16(ifr.Data[0:2], uint16(ifaceAddr.Family))
+ hostarch.ByteOrder.PutUint16(ifr.Data[2:4], 0)
copy(ifr.Data[4:8], ifaceAddr.Addr[:4])
// Copy the ifr to userspace.
dst := uintptr(ifc.Ptr) + uintptr(ifc.Len)
ifc.Len += int32(linux.SizeOfIFReq)
- if _, err := ifr.CopyOut(t, usermem.Addr(dst)); err != nil {
+ if _, err := ifr.CopyOut(t, hostarch.Addr(dst)); err != nil {
return err
}
}
diff --git a/pkg/sentry/socket/netstack/netstack_vfs2.go b/pkg/sentry/socket/netstack/netstack_vfs2.go
index fc29f8f13..30f3ad153 100644
--- a/pkg/sentry/socket/netstack/netstack_vfs2.go
+++ b/pkg/sentry/socket/netstack/netstack_vfs2.go
@@ -17,6 +17,7 @@ package netstack
import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal"
"gvisor.dev/gvisor/pkg/marshal/primitive"
"gvisor.dev/gvisor/pkg/sentry/arch"
@@ -197,7 +198,7 @@ func (s *SocketVFS2) Ioctl(ctx context.Context, uio usermem.IO, args arch.Syscal
// GetSockOpt implements the linux syscall getsockopt(2) for sockets backed by
// tcpip.Endpoint.
-func (s *SocketVFS2) GetSockOpt(t *kernel.Task, level, name int, outPtr usermem.Addr, outLen int) (marshal.Marshallable, *syserr.Error) {
+func (s *SocketVFS2) GetSockOpt(t *kernel.Task, level, name int, outPtr hostarch.Addr, outLen int) (marshal.Marshallable, *syserr.Error) {
// TODO(b/78348848): Unlike other socket options, SO_TIMESTAMP is
// implemented specifically for netstack.SocketVFS2 rather than
// commonEndpoint. commonEndpoint should be extended to support socket
@@ -245,7 +246,7 @@ func (s *SocketVFS2) SetSockOpt(t *kernel.Task, level int, name int, optVal []by
}
s.readMu.Lock()
defer s.readMu.Unlock()
- s.sockOptTimestamp = usermem.ByteOrder.Uint32(optVal) != 0
+ s.sockOptTimestamp = hostarch.ByteOrder.Uint32(optVal) != 0
return nil
}
if level == linux.SOL_TCP && name == linux.TCP_INQ {
@@ -254,7 +255,7 @@ func (s *SocketVFS2) SetSockOpt(t *kernel.Task, level int, name int, optVal []by
}
s.readMu.Lock()
defer s.readMu.Unlock()
- s.sockOptInq = usermem.ByteOrder.Uint32(optVal) != 0
+ s.sockOptInq = hostarch.ByteOrder.Uint32(optVal) != 0
return nil
}
diff --git a/pkg/sentry/socket/socket.go b/pkg/sentry/socket/socket.go
index 909341dcf..4c3d48096 100644
--- a/pkg/sentry/socket/socket.go
+++ b/pkg/sentry/socket/socket.go
@@ -26,6 +26,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/binary"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal"
"gvisor.dev/gvisor/pkg/sentry/device"
"gvisor.dev/gvisor/pkg/sentry/fs"
@@ -216,7 +217,7 @@ type SocketOps interface {
Shutdown(t *kernel.Task, how int) *syserr.Error
// GetSockOpt implements the getsockopt(2) linux unix.
- GetSockOpt(t *kernel.Task, level int, name int, outPtr usermem.Addr, outLen int) (marshal.Marshallable, *syserr.Error)
+ GetSockOpt(t *kernel.Task, level int, name int, outPtr hostarch.Addr, outLen int) (marshal.Marshallable, *syserr.Error)
// SetSockOpt implements the setsockopt(2) linux unix.
SetSockOpt(t *kernel.Task, level int, name int, opt []byte) *syserr.Error
@@ -356,7 +357,7 @@ func NewDirent(ctx context.Context, d *device.Device) *fs.Dirent {
Type: fs.Socket,
DeviceID: d.DeviceID(),
InodeID: ino,
- BlockSize: usermem.PageSize,
+ BlockSize: hostarch.PageSize,
})
// Dirent name matches net/socket.c:sockfs_dname.
@@ -571,19 +572,19 @@ func UnmarshalSockAddr(family int, data []byte) linux.SockAddr {
switch family {
case unix.AF_INET:
var addr linux.SockAddrInet
- binary.Unmarshal(data[:unix.SizeofSockaddrInet4], usermem.ByteOrder, &addr)
+ binary.Unmarshal(data[:unix.SizeofSockaddrInet4], hostarch.ByteOrder, &addr)
return &addr
case unix.AF_INET6:
var addr linux.SockAddrInet6
- binary.Unmarshal(data[:unix.SizeofSockaddrInet6], usermem.ByteOrder, &addr)
+ binary.Unmarshal(data[:unix.SizeofSockaddrInet6], hostarch.ByteOrder, &addr)
return &addr
case unix.AF_UNIX:
var addr linux.SockAddrUnix
- binary.Unmarshal(data[:unix.SizeofSockaddrUnix], usermem.ByteOrder, &addr)
+ binary.Unmarshal(data[:unix.SizeofSockaddrUnix], hostarch.ByteOrder, &addr)
return &addr
case unix.AF_NETLINK:
var addr linux.SockAddrNetlink
- binary.Unmarshal(data[:unix.SizeofSockaddrNetlink], usermem.ByteOrder, &addr)
+ binary.Unmarshal(data[:unix.SizeofSockaddrNetlink], hostarch.ByteOrder, &addr)
return &addr
default:
panic(fmt.Sprintf("Unsupported socket family %v", family))
@@ -693,7 +694,7 @@ func AddressAndFamily(addr []byte) (tcpip.FullAddress, uint16, *syserr.Error) {
}
// Get the rest of the fields based on the address family.
- switch family := usermem.ByteOrder.Uint16(addr); family {
+ switch family := hostarch.ByteOrder.Uint16(addr); family {
case linux.AF_UNIX:
path := addr[2:]
if len(path) > linux.UnixPathMax {
@@ -715,7 +716,7 @@ func AddressAndFamily(addr []byte) (tcpip.FullAddress, uint16, *syserr.Error) {
if len(addr) < sockAddrInetSize {
return tcpip.FullAddress{}, family, syserr.ErrInvalidArgument
}
- binary.Unmarshal(addr[:sockAddrInetSize], usermem.ByteOrder, &a)
+ binary.Unmarshal(addr[:sockAddrInetSize], hostarch.ByteOrder, &a)
out := tcpip.FullAddress{
Addr: BytesToIPAddress(a.Addr[:]),
@@ -728,7 +729,7 @@ func AddressAndFamily(addr []byte) (tcpip.FullAddress, uint16, *syserr.Error) {
if len(addr) < sockAddrInet6Size {
return tcpip.FullAddress{}, family, syserr.ErrInvalidArgument
}
- binary.Unmarshal(addr[:sockAddrInet6Size], usermem.ByteOrder, &a)
+ binary.Unmarshal(addr[:sockAddrInet6Size], hostarch.ByteOrder, &a)
out := tcpip.FullAddress{
Addr: BytesToIPAddress(a.Addr[:]),
@@ -744,7 +745,7 @@ func AddressAndFamily(addr []byte) (tcpip.FullAddress, uint16, *syserr.Error) {
if len(addr) < sockAddrLinkSize {
return tcpip.FullAddress{}, family, syserr.ErrInvalidArgument
}
- binary.Unmarshal(addr[:sockAddrLinkSize], usermem.ByteOrder, &a)
+ binary.Unmarshal(addr[:sockAddrLinkSize], hostarch.ByteOrder, &a)
if a.Family != linux.AF_PACKET || a.HardwareAddrLen != header.EthernetAddressSize {
return tcpip.FullAddress{}, family, syserr.ErrInvalidArgument
}
diff --git a/pkg/sentry/socket/unix/unix.go b/pkg/sentry/socket/unix/unix.go
index b22f7973a..db7b1affe 100644
--- a/pkg/sentry/socket/unix/unix.go
+++ b/pkg/sentry/socket/unix/unix.go
@@ -24,6 +24,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
"gvisor.dev/gvisor/pkg/fspath"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fs"
@@ -192,7 +193,7 @@ func (s *SocketOperations) Ioctl(ctx context.Context, _ *fs.File, io usermem.IO,
// GetSockOpt implements the linux syscall getsockopt(2) for sockets backed by
// a transport.Endpoint.
-func (s *SocketOperations) GetSockOpt(t *kernel.Task, level, name int, outPtr usermem.Addr, outLen int) (marshal.Marshallable, *syserr.Error) {
+func (s *SocketOperations) GetSockOpt(t *kernel.Task, level, name int, outPtr hostarch.Addr, outLen int) (marshal.Marshallable, *syserr.Error) {
return netstack.GetSockOpt(t, s, s.ep, linux.AF_UNIX, s.ep.Type(), level, name, outPtr, outLen)
}
diff --git a/pkg/sentry/socket/unix/unix_vfs2.go b/pkg/sentry/socket/unix/unix_vfs2.go
index 7890d1048..c39e317ff 100644
--- a/pkg/sentry/socket/unix/unix_vfs2.go
+++ b/pkg/sentry/socket/unix/unix_vfs2.go
@@ -18,6 +18,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
"gvisor.dev/gvisor/pkg/fspath"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fsimpl/sockfs"
@@ -112,7 +113,7 @@ func (s *SocketVFS2) Release(ctx context.Context) {
// GetSockOpt implements the linux syscall getsockopt(2) for sockets backed by
// a transport.Endpoint.
-func (s *SocketVFS2) GetSockOpt(t *kernel.Task, level, name int, outPtr usermem.Addr, outLen int) (marshal.Marshallable, *syserr.Error) {
+func (s *SocketVFS2) GetSockOpt(t *kernel.Task, level, name int, outPtr hostarch.Addr, outLen int) (marshal.Marshallable, *syserr.Error) {
return netstack.GetSockOpt(t, s, s.ep, linux.AF_UNIX, s.ep.Type(), level, name, outPtr, outLen)
}
diff --git a/pkg/sentry/strace/epoll.go b/pkg/sentry/strace/epoll.go
index ae3b998c8..48650e3f9 100644
--- a/pkg/sentry/strace/epoll.go
+++ b/pkg/sentry/strace/epoll.go
@@ -21,10 +21,11 @@ import (
"gvisor.dev/gvisor/pkg/abi"
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/sentry/kernel"
- "gvisor.dev/gvisor/pkg/usermem"
+
+ "gvisor.dev/gvisor/pkg/hostarch"
)
-func epollEvent(t *kernel.Task, eventAddr usermem.Addr) string {
+func epollEvent(t *kernel.Task, eventAddr hostarch.Addr) string {
var e linux.EpollEvent
if _, err := e.CopyIn(t, eventAddr); err != nil {
return fmt.Sprintf("%#x {error reading event: %v}", eventAddr, err)
@@ -35,7 +36,7 @@ func epollEvent(t *kernel.Task, eventAddr usermem.Addr) string {
return sb.String()
}
-func epollEvents(t *kernel.Task, eventsAddr usermem.Addr, numEvents, maxBytes uint64) string {
+func epollEvents(t *kernel.Task, eventsAddr hostarch.Addr, numEvents, maxBytes uint64) string {
var sb strings.Builder
fmt.Fprintf(&sb, "%#x {", eventsAddr)
addr := eventsAddr
diff --git a/pkg/sentry/strace/poll.go b/pkg/sentry/strace/poll.go
index 074e80f9b..572a8b50b 100644
--- a/pkg/sentry/strace/poll.go
+++ b/pkg/sentry/strace/poll.go
@@ -22,7 +22,8 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/sentry/kernel"
slinux "gvisor.dev/gvisor/pkg/sentry/syscalls/linux"
- "gvisor.dev/gvisor/pkg/usermem"
+
+ "gvisor.dev/gvisor/pkg/hostarch"
)
// PollEventSet is the set of poll(2) event flags.
@@ -52,7 +53,7 @@ func pollFD(t *kernel.Task, pfd *linux.PollFD, post bool) string {
return fmt.Sprintf("{FD: %s, Events: %s, REvents: %s}", fd(t, pfd.FD), PollEventSet.Parse(uint64(pfd.Events)), revents)
}
-func pollFDs(t *kernel.Task, addr usermem.Addr, nfds uint, post bool) string {
+func pollFDs(t *kernel.Task, addr hostarch.Addr, nfds uint, post bool) string {
if addr == 0 {
return "null"
}
diff --git a/pkg/sentry/strace/select.go b/pkg/sentry/strace/select.go
index 3a4c32aa0..e6e928157 100644
--- a/pkg/sentry/strace/select.go
+++ b/pkg/sentry/strace/select.go
@@ -19,7 +19,8 @@ import (
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/syscalls/linux"
- "gvisor.dev/gvisor/pkg/usermem"
+
+ "gvisor.dev/gvisor/pkg/hostarch"
)
func fdsFromSet(t *kernel.Task, set []byte) []int {
@@ -35,7 +36,7 @@ func fdsFromSet(t *kernel.Task, set []byte) []int {
return fds
}
-func fdSet(t *kernel.Task, nfds int, addr usermem.Addr) string {
+func fdSet(t *kernel.Task, nfds int, addr hostarch.Addr) string {
if nfds < 0 {
return fmt.Sprintf("%#x (negative nfds)", addr)
}
diff --git a/pkg/sentry/strace/signal.go b/pkg/sentry/strace/signal.go
index c41f36e3f..e5b379a20 100644
--- a/pkg/sentry/strace/signal.go
+++ b/pkg/sentry/strace/signal.go
@@ -21,7 +21,8 @@ import (
"gvisor.dev/gvisor/pkg/abi"
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/sentry/kernel"
- "gvisor.dev/gvisor/pkg/usermem"
+
+ "gvisor.dev/gvisor/pkg/hostarch"
)
// signalNames contains the names of all named signals.
@@ -100,7 +101,7 @@ var sigActionFlags = abi.FlagSet{
},
}
-func sigSet(t *kernel.Task, addr usermem.Addr) string {
+func sigSet(t *kernel.Task, addr hostarch.Addr) string {
if addr == 0 {
return "null"
}
@@ -110,7 +111,7 @@ func sigSet(t *kernel.Task, addr usermem.Addr) string {
return fmt.Sprintf("%#x (error copying sigset: %v)", addr, err)
}
- set := linux.SignalSet(usermem.ByteOrder.Uint64(b[:]))
+ set := linux.SignalSet(hostarch.ByteOrder.Uint64(b[:]))
return fmt.Sprintf("%#x %s", addr, formatSigSet(set))
}
@@ -124,7 +125,7 @@ func formatSigSet(set linux.SignalSet) string {
return fmt.Sprintf("[%v]", strings.Join(signals, " "))
}
-func sigAction(t *kernel.Task, addr usermem.Addr) string {
+func sigAction(t *kernel.Task, addr hostarch.Addr) string {
if addr == 0 {
return "null"
}
diff --git a/pkg/sentry/strace/socket.go b/pkg/sentry/strace/socket.go
index d943a7cb1..e5b7f9b96 100644
--- a/pkg/sentry/strace/socket.go
+++ b/pkg/sentry/strace/socket.go
@@ -26,7 +26,8 @@ import (
"gvisor.dev/gvisor/pkg/sentry/socket"
"gvisor.dev/gvisor/pkg/sentry/socket/netlink"
slinux "gvisor.dev/gvisor/pkg/sentry/syscalls/linux"
- "gvisor.dev/gvisor/pkg/usermem"
+
+ "gvisor.dev/gvisor/pkg/hostarch"
)
// SocketFamily are the possible socket(2) families.
@@ -161,7 +162,7 @@ var controlMessageType = map[int32]string{
linux.SO_TIMESTAMP: "SO_TIMESTAMP",
}
-func cmsghdr(t *kernel.Task, addr usermem.Addr, length uint64, maxBytes uint64) string {
+func cmsghdr(t *kernel.Task, addr hostarch.Addr, length uint64, maxBytes uint64) string {
if length > maxBytes {
return fmt.Sprintf("%#x (error decoding control: invalid length (%d))", addr, length)
}
@@ -180,7 +181,7 @@ func cmsghdr(t *kernel.Task, addr usermem.Addr, length uint64, maxBytes uint64)
}
var h linux.ControlMessageHeader
- binary.Unmarshal(buf[i:i+linux.SizeOfControlMessageHeader], usermem.ByteOrder, &h)
+ binary.Unmarshal(buf[i:i+linux.SizeOfControlMessageHeader], hostarch.ByteOrder, &h)
var skipData bool
level := "SOL_SOCKET"
@@ -230,7 +231,7 @@ func cmsghdr(t *kernel.Task, addr usermem.Addr, length uint64, maxBytes uint64)
numRights := rightsSize / linux.SizeOfControlMessageRight
fds := make(linux.ControlMessageRights, numRights)
- binary.Unmarshal(buf[i:i+rightsSize], usermem.ByteOrder, &fds)
+ binary.Unmarshal(buf[i:i+rightsSize], hostarch.ByteOrder, &fds)
rights := make([]string, 0, len(fds))
for _, fd := range fds {
@@ -257,7 +258,7 @@ func cmsghdr(t *kernel.Task, addr usermem.Addr, length uint64, maxBytes uint64)
}
var creds linux.ControlMessageCredentials
- binary.Unmarshal(buf[i:i+linux.SizeOfControlMessageCredentials], usermem.ByteOrder, &creds)
+ binary.Unmarshal(buf[i:i+linux.SizeOfControlMessageCredentials], hostarch.ByteOrder, &creds)
strs = append(strs, fmt.Sprintf(
"{level=%s, type=%s, length=%d, pid: %d, uid: %d, gid: %d}",
@@ -281,7 +282,7 @@ func cmsghdr(t *kernel.Task, addr usermem.Addr, length uint64, maxBytes uint64)
}
var tv linux.Timeval
- binary.Unmarshal(buf[i:i+linux.SizeOfTimeval], usermem.ByteOrder, &tv)
+ binary.Unmarshal(buf[i:i+linux.SizeOfTimeval], hostarch.ByteOrder, &tv)
strs = append(strs, fmt.Sprintf(
"{level=%s, type=%s, length=%d, Sec: %d, Usec: %d}",
@@ -301,7 +302,7 @@ func cmsghdr(t *kernel.Task, addr usermem.Addr, length uint64, maxBytes uint64)
return fmt.Sprintf("%#x %s", addr, strings.Join(strs, ", "))
}
-func msghdr(t *kernel.Task, addr usermem.Addr, printContent bool, maxBytes uint64) string {
+func msghdr(t *kernel.Task, addr hostarch.Addr, printContent bool, maxBytes uint64) string {
var msg slinux.MessageHeader64
if _, err := msg.CopyIn(t, addr); err != nil {
return fmt.Sprintf("%#x (error decoding msghdr: %v)", addr, err)
@@ -311,17 +312,17 @@ func msghdr(t *kernel.Task, addr usermem.Addr, printContent bool, maxBytes uint6
addr,
msg.Name,
msg.NameLen,
- iovecs(t, usermem.Addr(msg.Iov), int(msg.IovLen), printContent, maxBytes),
+ iovecs(t, hostarch.Addr(msg.Iov), int(msg.IovLen), printContent, maxBytes),
)
if printContent {
- s = fmt.Sprintf("%s, control={%s}", s, cmsghdr(t, usermem.Addr(msg.Control), msg.ControlLen, maxBytes))
+ s = fmt.Sprintf("%s, control={%s}", s, cmsghdr(t, hostarch.Addr(msg.Control), msg.ControlLen, maxBytes))
} else {
s = fmt.Sprintf("%s, control=%#x, control_len=%d", s, msg.Control, msg.ControlLen)
}
return fmt.Sprintf("%s, flags=%d}", s, msg.Flags)
}
-func sockAddr(t *kernel.Task, addr usermem.Addr, length uint32) string {
+func sockAddr(t *kernel.Task, addr hostarch.Addr, length uint32) string {
if addr == 0 {
return "null"
}
@@ -335,7 +336,7 @@ func sockAddr(t *kernel.Task, addr usermem.Addr, length uint32) string {
if len(b) < 2 {
return fmt.Sprintf("%#x {address too short: %d bytes}", addr, len(b))
}
- family := usermem.ByteOrder.Uint16(b)
+ family := hostarch.ByteOrder.Uint16(b)
familyStr := SocketFamily.Parse(uint64(family))
@@ -362,7 +363,7 @@ func sockAddr(t *kernel.Task, addr usermem.Addr, length uint32) string {
}
}
-func postSockAddr(t *kernel.Task, addr usermem.Addr, lengthPtr usermem.Addr) string {
+func postSockAddr(t *kernel.Task, addr hostarch.Addr, lengthPtr hostarch.Addr) string {
if addr == 0 {
return "null"
}
@@ -379,14 +380,14 @@ func postSockAddr(t *kernel.Task, addr usermem.Addr, lengthPtr usermem.Addr) str
return sockAddr(t, addr, l)
}
-func copySockLen(t *kernel.Task, addr usermem.Addr) (uint32, error) {
+func copySockLen(t *kernel.Task, addr hostarch.Addr) (uint32, error) {
// socklen_t is 32-bits.
var l primitive.Uint32
_, err := l.CopyIn(t, addr)
return uint32(l), err
}
-func sockLenPointer(t *kernel.Task, addr usermem.Addr) string {
+func sockLenPointer(t *kernel.Task, addr hostarch.Addr) string {
if addr == 0 {
return "null"
}
@@ -420,7 +421,7 @@ func sockFlags(flags int32) string {
return SocketFlagSet.Parse(uint64(flags))
}
-func getSockOptVal(t *kernel.Task, level, optname uint64, optVal usermem.Addr, optLen usermem.Addr, maximumBlobSize uint, rval uintptr) string {
+func getSockOptVal(t *kernel.Task, level, optname uint64, optVal hostarch.Addr, optLen hostarch.Addr, maximumBlobSize uint, rval uintptr) string {
if int(rval) < 0 {
return hexNum(uint64(optVal))
}
@@ -434,7 +435,7 @@ func getSockOptVal(t *kernel.Task, level, optname uint64, optVal usermem.Addr, o
return sockOptVal(t, level, optname, optVal, uint64(l), maximumBlobSize)
}
-func sockOptVal(t *kernel.Task, level, optname uint64, optVal usermem.Addr, optLen uint64, maximumBlobSize uint) string {
+func sockOptVal(t *kernel.Task, level, optname uint64, optVal hostarch.Addr, optLen uint64, maximumBlobSize uint) string {
switch optLen {
case 1:
var v primitive.Uint8
diff --git a/pkg/sentry/strace/strace.go b/pkg/sentry/strace/strace.go
index 396744597..ec5d5f846 100644
--- a/pkg/sentry/strace/strace.go
+++ b/pkg/sentry/strace/strace.go
@@ -32,7 +32,8 @@ import (
"gvisor.dev/gvisor/pkg/sentry/kernel"
pb "gvisor.dev/gvisor/pkg/sentry/strace/strace_go_proto"
slinux "gvisor.dev/gvisor/pkg/sentry/syscalls/linux"
- "gvisor.dev/gvisor/pkg/usermem"
+
+ "gvisor.dev/gvisor/pkg/hostarch"
)
// DefaultLogMaximumSize is the default LogMaximumSize.
@@ -62,7 +63,7 @@ func hexArg(arg arch.SyscallArgument) string {
return hexNum(arg.Uint64())
}
-func iovecs(t *kernel.Task, addr usermem.Addr, iovcnt int, printContent bool, maxBytes uint64) string {
+func iovecs(t *kernel.Task, addr hostarch.Addr, iovcnt int, printContent bool, maxBytes uint64) string {
if iovcnt < 0 || iovcnt > linux.UIO_MAXIOV {
return fmt.Sprintf("%#x (error decoding iovecs: invalid iovcnt)", addr)
}
@@ -107,7 +108,7 @@ func iovecs(t *kernel.Task, addr usermem.Addr, iovcnt int, printContent bool, ma
return fmt.Sprintf("%#x %s", addr, strings.Join(iovs, ", "))
}
-func dump(t *kernel.Task, addr usermem.Addr, size uint, maximumBlobSize uint) string {
+func dump(t *kernel.Task, addr hostarch.Addr, size uint, maximumBlobSize uint) string {
origSize := size
if size > maximumBlobSize {
size = maximumBlobSize
@@ -131,7 +132,7 @@ func dump(t *kernel.Task, addr usermem.Addr, size uint, maximumBlobSize uint) st
return fmt.Sprintf("%#x %q%s", addr, b[:amt], dot)
}
-func path(t *kernel.Task, addr usermem.Addr) string {
+func path(t *kernel.Task, addr hostarch.Addr) string {
path, err := t.CopyInString(addr, linux.PATH_MAX)
if err != nil {
return fmt.Sprintf("%#x (error decoding path: %s)", addr, err)
@@ -196,7 +197,7 @@ func fdVFS2(t *kernel.Task, fd int32) string {
return fmt.Sprintf("%#x %s", fd, name)
}
-func fdpair(t *kernel.Task, addr usermem.Addr) string {
+func fdpair(t *kernel.Task, addr hostarch.Addr) string {
var fds [2]int32
_, err := primitive.CopyInt32SliceIn(t, addr, fds[:])
if err != nil {
@@ -206,7 +207,7 @@ func fdpair(t *kernel.Task, addr usermem.Addr) string {
return fmt.Sprintf("%#x [%d %d]", addr, fds[0], fds[1])
}
-func uname(t *kernel.Task, addr usermem.Addr) string {
+func uname(t *kernel.Task, addr hostarch.Addr) string {
var u linux.UtsName
if _, err := u.CopyIn(t, addr); err != nil {
return fmt.Sprintf("%#x (error decoding utsname: %s)", addr, err)
@@ -215,7 +216,7 @@ func uname(t *kernel.Task, addr usermem.Addr) string {
return fmt.Sprintf("%#x %s", addr, u)
}
-func utimensTimespec(t *kernel.Task, addr usermem.Addr) string {
+func utimensTimespec(t *kernel.Task, addr hostarch.Addr) string {
if addr == 0 {
return "null"
}
@@ -237,7 +238,7 @@ func utimensTimespec(t *kernel.Task, addr usermem.Addr) string {
return fmt.Sprintf("%#x {sec=%v nsec=%s}", addr, tim.Sec, ns)
}
-func timespec(t *kernel.Task, addr usermem.Addr) string {
+func timespec(t *kernel.Task, addr hostarch.Addr) string {
if addr == 0 {
return "null"
}
@@ -249,7 +250,7 @@ func timespec(t *kernel.Task, addr usermem.Addr) string {
return fmt.Sprintf("%#x {sec=%v nsec=%v}", addr, tim.Sec, tim.Nsec)
}
-func timeval(t *kernel.Task, addr usermem.Addr) string {
+func timeval(t *kernel.Task, addr hostarch.Addr) string {
if addr == 0 {
return "null"
}
@@ -262,7 +263,7 @@ func timeval(t *kernel.Task, addr usermem.Addr) string {
return fmt.Sprintf("%#x {sec=%v usec=%v}", addr, tim.Sec, tim.Usec)
}
-func utimbuf(t *kernel.Task, addr usermem.Addr) string {
+func utimbuf(t *kernel.Task, addr hostarch.Addr) string {
if addr == 0 {
return "null"
}
@@ -275,7 +276,7 @@ func utimbuf(t *kernel.Task, addr usermem.Addr) string {
return fmt.Sprintf("%#x {actime=%v, modtime=%v}", addr, utim.Actime, utim.Modtime)
}
-func stat(t *kernel.Task, addr usermem.Addr) string {
+func stat(t *kernel.Task, addr hostarch.Addr) string {
if addr == 0 {
return "null"
}
@@ -287,27 +288,27 @@ func stat(t *kernel.Task, addr usermem.Addr) string {
return fmt.Sprintf("%#x {dev=%d, ino=%d, mode=%s, nlink=%d, uid=%d, gid=%d, rdev=%d, size=%d, blksize=%d, blocks=%d, atime=%s, mtime=%s, ctime=%s}", addr, stat.Dev, stat.Ino, linux.FileMode(stat.Mode), stat.Nlink, stat.UID, stat.GID, stat.Rdev, stat.Size, stat.Blksize, stat.Blocks, time.Unix(stat.ATime.Sec, stat.ATime.Nsec), time.Unix(stat.MTime.Sec, stat.MTime.Nsec), time.Unix(stat.CTime.Sec, stat.CTime.Nsec))
}
-func itimerval(t *kernel.Task, addr usermem.Addr) string {
+func itimerval(t *kernel.Task, addr hostarch.Addr) string {
if addr == 0 {
return "null"
}
interval := timeval(t, addr)
- value := timeval(t, addr+usermem.Addr((*linux.Timeval)(nil).SizeBytes()))
+ value := timeval(t, addr+hostarch.Addr((*linux.Timeval)(nil).SizeBytes()))
return fmt.Sprintf("%#x {interval=%s, value=%s}", addr, interval, value)
}
-func itimerspec(t *kernel.Task, addr usermem.Addr) string {
+func itimerspec(t *kernel.Task, addr hostarch.Addr) string {
if addr == 0 {
return "null"
}
interval := timespec(t, addr)
- value := timespec(t, addr+usermem.Addr((*linux.Timespec)(nil).SizeBytes()))
+ value := timespec(t, addr+hostarch.Addr((*linux.Timespec)(nil).SizeBytes()))
return fmt.Sprintf("%#x {interval=%s, value=%s}", addr, interval, value)
}
-func stringVector(t *kernel.Task, addr usermem.Addr) string {
+func stringVector(t *kernel.Task, addr hostarch.Addr) string {
vec, err := t.CopyInVector(addr, slinux.ExecMaxElemSize, slinux.ExecMaxTotalSize)
if err != nil {
return fmt.Sprintf("%#x {error copying vector: %v}", addr, err)
@@ -323,7 +324,7 @@ func stringVector(t *kernel.Task, addr usermem.Addr) string {
return s
}
-func rusage(t *kernel.Task, addr usermem.Addr) string {
+func rusage(t *kernel.Task, addr hostarch.Addr) string {
if addr == 0 {
return "null"
}
@@ -335,7 +336,7 @@ func rusage(t *kernel.Task, addr usermem.Addr) string {
return fmt.Sprintf("%#x %+v", addr, ru)
}
-func capHeader(t *kernel.Task, addr usermem.Addr) string {
+func capHeader(t *kernel.Task, addr hostarch.Addr) string {
if addr == 0 {
return "null"
}
@@ -360,7 +361,7 @@ func capHeader(t *kernel.Task, addr usermem.Addr) string {
return fmt.Sprintf("%#x {Version: %s, Pid: %d}", addr, version, hdr.Pid)
}
-func capData(t *kernel.Task, hdrAddr, dataAddr usermem.Addr) string {
+func capData(t *kernel.Task, hdrAddr, dataAddr hostarch.Addr) string {
if dataAddr == 0 {
return "null"
}
diff --git a/pkg/sentry/syscalls/linux/linux64.go b/pkg/sentry/syscalls/linux/linux64.go
index ac53a0c0e..2d2212605 100644
--- a/pkg/sentry/syscalls/linux/linux64.go
+++ b/pkg/sentry/syscalls/linux/linux64.go
@@ -18,11 +18,11 @@ package linux
import (
"gvisor.dev/gvisor/pkg/abi"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/syscalls"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
)
const (
@@ -405,7 +405,7 @@ var AMD64 = &kernel.SyscallTable{
434: syscalls.ErrorWithEvent("pidfd_open", syserror.ENOSYS, "", nil),
435: syscalls.ErrorWithEvent("clone3", syserror.ENOSYS, "", nil),
},
- Emulate: map[usermem.Addr]uintptr{
+ Emulate: map[hostarch.Addr]uintptr{
0xffffffffff600000: 96, // vsyscall gettimeofday(2)
0xffffffffff600400: 201, // vsyscall time(2)
0xffffffffff600800: 309, // vsyscall getcpu(2)
@@ -723,7 +723,7 @@ var ARM64 = &kernel.SyscallTable{
434: syscalls.ErrorWithEvent("pidfd_open", syserror.ENOSYS, "", nil),
435: syscalls.ErrorWithEvent("clone3", syserror.ENOSYS, "", nil),
},
- Emulate: map[usermem.Addr]uintptr{},
+ Emulate: map[hostarch.Addr]uintptr{},
Missing: func(t *kernel.Task, sysno uintptr, args arch.SyscallArguments) (uintptr, error) {
t.Kernel().EmitUnimplementedEvent(t)
return 0, syserror.ENOSYS
diff --git a/pkg/sentry/syscalls/linux/linux_abi_autogen_unsafe.go b/pkg/sentry/syscalls/linux/linux_abi_autogen_unsafe.go
index bd13e39f2..0eb2e184b 100644
--- a/pkg/sentry/syscalls/linux/linux_abi_autogen_unsafe.go
+++ b/pkg/sentry/syscalls/linux/linux_abi_autogen_unsafe.go
@@ -10,9 +10,9 @@ package linux
import (
"gvisor.dev/gvisor/pkg/gohacks"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal"
"gvisor.dev/gvisor/pkg/safecopy"
- "gvisor.dev/gvisor/pkg/usermem"
"io"
"reflect"
"runtime"
@@ -70,7 +70,7 @@ func (d *direntHdr) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (d *direntHdr) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (d *direntHdr) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Type direntHdr doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := cc.CopyScratchBuffer(d.SizeBytes()) // escapes: okay.
d.MarshalBytes(buf) // escapes: fallback.
@@ -79,13 +79,13 @@ func (d *direntHdr) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit in
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (d *direntHdr) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (d *direntHdr) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return d.CopyOutN(cc, addr, d.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (d *direntHdr) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (d *direntHdr) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Type direntHdr doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := cc.CopyScratchBuffer(d.SizeBytes()) // escapes: okay.
length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
@@ -111,21 +111,21 @@ func (o *oldDirentHdr) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (o *oldDirentHdr) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(o.Ino))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(o.Ino))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(o.Off))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(o.Off))
dst = dst[8:]
- usermem.ByteOrder.PutUint16(dst[:2], uint16(o.Reclen))
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(o.Reclen))
dst = dst[2:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (o *oldDirentHdr) UnmarshalBytes(src []byte) {
- o.Ino = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ o.Ino = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- o.Off = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ o.Off = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- o.Reclen = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ o.Reclen = uint16(hostarch.ByteOrder.Uint16(src[:2]))
src = src[2:]
}
@@ -149,7 +149,7 @@ func (o *oldDirentHdr) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (o *oldDirentHdr) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (o *oldDirentHdr) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Type oldDirentHdr doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := cc.CopyScratchBuffer(o.SizeBytes()) // escapes: okay.
o.MarshalBytes(buf) // escapes: fallback.
@@ -158,13 +158,13 @@ func (o *oldDirentHdr) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (o *oldDirentHdr) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (o *oldDirentHdr) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return o.CopyOutN(cc, addr, o.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (o *oldDirentHdr) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (o *oldDirentHdr) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Type oldDirentHdr doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := cc.CopyScratchBuffer(o.SizeBytes()) // escapes: okay.
length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
@@ -190,17 +190,17 @@ func (r *rlimit64) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (r *rlimit64) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(r.Cur))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(r.Cur))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(r.Max))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(r.Max))
dst = dst[8:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (r *rlimit64) UnmarshalBytes(src []byte) {
- r.Cur = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ r.Cur = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- r.Max = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ r.Max = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
}
@@ -222,7 +222,7 @@ func (r *rlimit64) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (r *rlimit64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (r *rlimit64) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -239,13 +239,13 @@ func (r *rlimit64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (r *rlimit64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (r *rlimit64) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return r.CopyOutN(cc, addr, r.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (r *rlimit64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (r *rlimit64) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -283,13 +283,13 @@ func (s *SchedParam) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (s *SchedParam) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.schedPriority))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.schedPriority))
dst = dst[4:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (s *SchedParam) UnmarshalBytes(src []byte) {
- s.schedPriority = int32(usermem.ByteOrder.Uint32(src[:4]))
+ s.schedPriority = int32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
}
@@ -311,7 +311,7 @@ func (s *SchedParam) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (s *SchedParam) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (s *SchedParam) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -328,13 +328,13 @@ func (s *SchedParam) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit i
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (s *SchedParam) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *SchedParam) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return s.CopyOutN(cc, addr, s.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (s *SchedParam) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *SchedParam) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -373,21 +373,21 @@ func (u *userSockFprog) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (u *userSockFprog) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint16(dst[:2], uint16(u.Len))
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(u.Len))
dst = dst[2:]
// Padding: dst[:sizeof(byte)*6] ~= [6]byte{0}
dst = dst[1*(6):]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(u.Filter))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(u.Filter))
dst = dst[8:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (u *userSockFprog) UnmarshalBytes(src []byte) {
- u.Len = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ u.Len = uint16(hostarch.ByteOrder.Uint16(src[:2]))
src = src[2:]
// Padding: ~ copy([6]byte(u._), src[:sizeof(byte)*6])
src = src[1*(6):]
- u.Filter = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ u.Filter = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
}
@@ -409,7 +409,7 @@ func (u *userSockFprog) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (u *userSockFprog) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (u *userSockFprog) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -426,13 +426,13 @@ func (u *userSockFprog) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limi
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (u *userSockFprog) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (u *userSockFprog) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return u.CopyOutN(cc, addr, u.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (u *userSockFprog) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (u *userSockFprog) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -470,21 +470,21 @@ func (m *MessageHeader64) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (m *MessageHeader64) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(m.Name))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(m.Name))
dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(m.NameLen))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(m.NameLen))
dst = dst[4:]
// Padding: dst[:sizeof(uint32)] ~= uint32(0)
dst = dst[4:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(m.Iov))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(m.Iov))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(m.IovLen))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(m.IovLen))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(m.Control))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(m.Control))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(m.ControlLen))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(m.ControlLen))
dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(m.Flags))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(m.Flags))
dst = dst[4:]
// Padding: dst[:sizeof(int32)] ~= int32(0)
dst = dst[4:]
@@ -492,21 +492,21 @@ func (m *MessageHeader64) MarshalBytes(dst []byte) {
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (m *MessageHeader64) UnmarshalBytes(src []byte) {
- m.Name = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ m.Name = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- m.NameLen = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ m.NameLen = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
// Padding: var _ uint32 ~= src[:sizeof(uint32)]
src = src[4:]
- m.Iov = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ m.Iov = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- m.IovLen = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ m.IovLen = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- m.Control = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ m.Control = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- m.ControlLen = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ m.ControlLen = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- m.Flags = int32(usermem.ByteOrder.Uint32(src[:4]))
+ m.Flags = int32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
// Padding: var _ int32 ~= src[:sizeof(int32)]
src = src[4:]
@@ -530,7 +530,7 @@ func (m *MessageHeader64) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (m *MessageHeader64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (m *MessageHeader64) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -547,13 +547,13 @@ func (m *MessageHeader64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, li
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (m *MessageHeader64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (m *MessageHeader64) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return m.CopyOutN(cc, addr, m.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (m *MessageHeader64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (m *MessageHeader64) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -594,7 +594,7 @@ func (m *multipleMessageHeader64) SizeBytes() int {
func (m *multipleMessageHeader64) MarshalBytes(dst []byte) {
m.msgHdr.MarshalBytes(dst[:m.msgHdr.SizeBytes()])
dst = dst[m.msgHdr.SizeBytes():]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(m.msgLen))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(m.msgLen))
dst = dst[4:]
// Padding: dst[:sizeof(int32)] ~= int32(0)
dst = dst[4:]
@@ -604,7 +604,7 @@ func (m *multipleMessageHeader64) MarshalBytes(dst []byte) {
func (m *multipleMessageHeader64) UnmarshalBytes(src []byte) {
m.msgHdr.UnmarshalBytes(src[:m.msgHdr.SizeBytes()])
src = src[m.msgHdr.SizeBytes():]
- m.msgLen = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ m.msgLen = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
// Padding: var _ int32 ~= src[:sizeof(int32)]
src = src[4:]
@@ -638,7 +638,7 @@ func (m *multipleMessageHeader64) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (m *multipleMessageHeader64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (m *multipleMessageHeader64) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
if !m.msgHdr.Packed() {
// Type multipleMessageHeader64 doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := cc.CopyScratchBuffer(m.SizeBytes()) // escapes: okay.
@@ -662,13 +662,13 @@ func (m *multipleMessageHeader64) CopyOutN(cc marshal.CopyContext, addr usermem.
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (m *multipleMessageHeader64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (m *multipleMessageHeader64) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return m.CopyOutN(cc, addr, m.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (m *multipleMessageHeader64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (m *multipleMessageHeader64) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
if !m.msgHdr.Packed() {
// Type multipleMessageHeader64 doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := cc.CopyScratchBuffer(m.SizeBytes()) // escapes: okay.
diff --git a/pkg/sentry/syscalls/linux/sigset.go b/pkg/sentry/syscalls/linux/sigset.go
index 434559b80..e8c2d8f9e 100644
--- a/pkg/sentry/syscalls/linux/sigset.go
+++ b/pkg/sentry/syscalls/linux/sigset.go
@@ -16,9 +16,9 @@ package linux
import (
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
)
// CopyInSigSet copies in a sigset_t, checks its size, and ensures that KILL and
@@ -27,7 +27,7 @@ import (
// TODO(gvisor.dev/issue/1624): This is only exported because
// syscalls/vfs2/signal.go depends on it. Once vfs1 is deleted and the vfs2
// syscalls are moved into this package, then they can be unexported.
-func CopyInSigSet(t *kernel.Task, sigSetAddr usermem.Addr, size uint) (linux.SignalSet, error) {
+func CopyInSigSet(t *kernel.Task, sigSetAddr hostarch.Addr, size uint) (linux.SignalSet, error) {
if size != linux.SignalSetSize {
return 0, syserror.EINVAL
}
@@ -35,14 +35,14 @@ func CopyInSigSet(t *kernel.Task, sigSetAddr usermem.Addr, size uint) (linux.Sig
if _, err := t.CopyInBytes(sigSetAddr, b); err != nil {
return 0, err
}
- mask := usermem.ByteOrder.Uint64(b[:])
+ mask := hostarch.ByteOrder.Uint64(b[:])
return linux.SignalSet(mask) &^ kernel.UnblockableSignals, nil
}
// copyOutSigSet copies out a sigset_t.
-func copyOutSigSet(t *kernel.Task, sigSetAddr usermem.Addr, mask linux.SignalSet) error {
+func copyOutSigSet(t *kernel.Task, sigSetAddr hostarch.Addr, mask linux.SignalSet) error {
b := t.CopyScratchBuffer(8)
- usermem.ByteOrder.PutUint64(b, uint64(mask))
+ hostarch.ByteOrder.PutUint64(b, uint64(mask))
_, err := t.CopyOutBytes(sigSetAddr, b)
return err
}
@@ -55,15 +55,15 @@ func copyOutSigSet(t *kernel.Task, sigSetAddr usermem.Addr, mask linux.SignalSet
// };
//
// and returns sigset_addr and size.
-func copyInSigSetWithSize(t *kernel.Task, addr usermem.Addr) (usermem.Addr, uint, error) {
+func copyInSigSetWithSize(t *kernel.Task, addr hostarch.Addr) (hostarch.Addr, uint, error) {
switch t.Arch().Width() {
case 8:
in := t.CopyScratchBuffer(16)
if _, err := t.CopyInBytes(addr, in); err != nil {
return 0, 0, err
}
- maskAddr := usermem.Addr(usermem.ByteOrder.Uint64(in[0:]))
- maskSize := uint(usermem.ByteOrder.Uint64(in[8:]))
+ maskAddr := hostarch.Addr(hostarch.ByteOrder.Uint64(in[0:]))
+ maskSize := uint(hostarch.ByteOrder.Uint64(in[8:]))
return maskAddr, maskSize, nil
default:
return 0, 0, syserror.ENOSYS
diff --git a/pkg/sentry/syscalls/linux/sys_aio.go b/pkg/sentry/syscalls/linux/sys_aio.go
index c2285f796..70e8569a8 100644
--- a/pkg/sentry/syscalls/linux/sys_aio.go
+++ b/pkg/sentry/syscalls/linux/sys_aio.go
@@ -17,6 +17,7 @@ package linux
import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal/primitive"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fs"
@@ -152,7 +153,7 @@ func IoGetevents(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.S
}
// Keep rolling.
- eventsAddr += usermem.Addr(linux.IOEventSize)
+ eventsAddr += hostarch.Addr(linux.IOEventSize)
}
// Everything finished.
@@ -191,12 +192,12 @@ func memoryFor(t *kernel.Task, cb *linux.IOCallback) (usermem.IOSequence, error)
// I/O.
switch cb.OpCode {
case linux.IOCB_CMD_PREAD, linux.IOCB_CMD_PWRITE:
- return t.SingleIOSequence(usermem.Addr(cb.Buf), bytes, usermem.IOOpts{
+ return t.SingleIOSequence(hostarch.Addr(cb.Buf), bytes, usermem.IOOpts{
AddressSpaceActive: false,
})
case linux.IOCB_CMD_PREADV, linux.IOCB_CMD_PWRITEV:
- return t.IovecsIOSequence(usermem.Addr(cb.Buf), bytes, usermem.IOOpts{
+ return t.IovecsIOSequence(hostarch.Addr(cb.Buf), bytes, usermem.IOOpts{
AddressSpaceActive: false,
})
@@ -219,7 +220,7 @@ func IoCancel(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
// LINT.IfChange
-func getAIOCallback(t *kernel.Task, file *fs.File, cbAddr usermem.Addr, cb *linux.IOCallback, ioseq usermem.IOSequence, actx *mm.AIOContext, eventFile *fs.File) kernel.AIOCallback {
+func getAIOCallback(t *kernel.Task, file *fs.File, cbAddr hostarch.Addr, cb *linux.IOCallback, ioseq usermem.IOSequence, actx *mm.AIOContext, eventFile *fs.File) kernel.AIOCallback {
return func(ctx context.Context) {
if actx.Dead() {
actx.CancelPendingRequest()
@@ -264,7 +265,7 @@ func getAIOCallback(t *kernel.Task, file *fs.File, cbAddr usermem.Addr, cb *linu
}
// submitCallback processes a single callback.
-func submitCallback(t *kernel.Task, id uint64, cb *linux.IOCallback, cbAddr usermem.Addr) error {
+func submitCallback(t *kernel.Task, id uint64, cb *linux.IOCallback, cbAddr hostarch.Addr) error {
file := t.GetFile(cb.FD)
if file == nil {
// File not found.
@@ -339,7 +340,7 @@ func IoSubmit(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
for i := int32(0); i < nrEvents; i++ {
// Copy in the callback address.
- var cbAddr usermem.Addr
+ var cbAddr hostarch.Addr
switch t.Arch().Width() {
case 8:
var cbAddrP primitive.Uint64
@@ -351,7 +352,7 @@ func IoSubmit(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
// Nothing done.
return 0, nil, err
}
- cbAddr = usermem.Addr(cbAddrP)
+ cbAddr = hostarch.Addr(cbAddrP)
default:
return 0, nil, syserror.ENOSYS
}
@@ -379,7 +380,7 @@ func IoSubmit(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
}
// Advance to the next one.
- addr += usermem.Addr(t.Arch().Width())
+ addr += hostarch.Addr(t.Arch().Width())
}
return uintptr(nrEvents), nil, nil
diff --git a/pkg/sentry/syscalls/linux/sys_file.go b/pkg/sentry/syscalls/linux/sys_file.go
index fd9649340..9cd238efd 100644
--- a/pkg/sentry/syscalls/linux/sys_file.go
+++ b/pkg/sentry/syscalls/linux/sys_file.go
@@ -18,6 +18,7 @@ import (
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal/primitive"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fs"
@@ -29,7 +30,6 @@ import (
ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time"
"gvisor.dev/gvisor/pkg/sentry/limits"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
)
// fileOpAt performs an operation on the second last component in the path.
@@ -115,7 +115,7 @@ func fileOpOn(t *kernel.Task, dirFD int32, path string, resolve bool, fn func(ro
}
// copyInPath copies a path in.
-func copyInPath(t *kernel.Task, addr usermem.Addr, allowEmpty bool) (path string, dirPath bool, err error) {
+func copyInPath(t *kernel.Task, addr hostarch.Addr, allowEmpty bool) (path string, dirPath bool, err error) {
path, err = t.CopyInString(addr, linux.PATH_MAX)
if err != nil {
return "", false, err
@@ -133,7 +133,7 @@ func copyInPath(t *kernel.Task, addr usermem.Addr, allowEmpty bool) (path string
// LINT.IfChange
-func openAt(t *kernel.Task, dirFD int32, addr usermem.Addr, flags uint) (fd uintptr, err error) {
+func openAt(t *kernel.Task, dirFD int32, addr hostarch.Addr, flags uint) (fd uintptr, err error) {
path, dirPath, err := copyInPath(t, addr, false /* allowEmpty */)
if err != nil {
return 0, err
@@ -208,7 +208,7 @@ func openAt(t *kernel.Task, dirFD int32, addr usermem.Addr, flags uint) (fd uint
return fd, err // Use result in frame.
}
-func mknodAt(t *kernel.Task, dirFD int32, addr usermem.Addr, mode linux.FileMode) error {
+func mknodAt(t *kernel.Task, dirFD int32, addr hostarch.Addr, mode linux.FileMode) error {
path, dirPath, err := copyInPath(t, addr, false /* allowEmpty */)
if err != nil {
return err
@@ -301,7 +301,7 @@ func Mknodat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca
return 0, nil, mknodAt(t, dirFD, path, mode)
}
-func createAt(t *kernel.Task, dirFD int32, addr usermem.Addr, flags uint, mode linux.FileMode) (fd uintptr, err error) {
+func createAt(t *kernel.Task, dirFD int32, addr hostarch.Addr, flags uint, mode linux.FileMode) (fd uintptr, err error) {
path, dirPath, err := copyInPath(t, addr, false /* allowEmpty */)
if err != nil {
return 0, err
@@ -515,7 +515,7 @@ func (ac accessContext) Value(key interface{}) interface{} {
}
}
-func accessAt(t *kernel.Task, dirFD int32, addr usermem.Addr, mode uint) error {
+func accessAt(t *kernel.Task, dirFD int32, addr hostarch.Addr, mode uint) error {
const rOK = 4
const wOK = 2
const xOK = 1
@@ -694,7 +694,7 @@ func Getcwd(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
}
// Top it off with a terminator.
- _, err = t.CopyOutBytes(addr+usermem.Addr(bytes), []byte("\x00"))
+ _, err = t.CopyOutBytes(addr+hostarch.Addr(bytes), []byte("\x00"))
return uintptr(bytes + 1), nil, err
}
@@ -1164,7 +1164,7 @@ func Fadvise64(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys
// LINT.IfChange
-func mkdirAt(t *kernel.Task, dirFD int32, addr usermem.Addr, mode linux.FileMode) error {
+func mkdirAt(t *kernel.Task, dirFD int32, addr hostarch.Addr, mode linux.FileMode) error {
path, _, err := copyInPath(t, addr, false /* allowEmpty */)
if err != nil {
return err
@@ -1216,7 +1216,7 @@ func Mkdirat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca
return 0, nil, mkdirAt(t, dirFD, addr, mode)
}
-func rmdirAt(t *kernel.Task, dirFD int32, addr usermem.Addr) error {
+func rmdirAt(t *kernel.Task, dirFD int32, addr hostarch.Addr) error {
path, _, err := copyInPath(t, addr, false /* allowEmpty */)
if err != nil {
return err
@@ -1256,7 +1256,7 @@ func Rmdir(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
return 0, nil, rmdirAt(t, linux.AT_FDCWD, addr)
}
-func symlinkAt(t *kernel.Task, dirFD int32, newAddr usermem.Addr, oldAddr usermem.Addr) error {
+func symlinkAt(t *kernel.Task, dirFD int32, newAddr hostarch.Addr, oldAddr hostarch.Addr) error {
newPath, dirPath, err := copyInPath(t, newAddr, false /* allowEmpty */)
if err != nil {
return err
@@ -1341,7 +1341,7 @@ func mayLinkAt(t *kernel.Task, target *fs.Inode) error {
// linkAt creates a hard link to the target specified by oldDirFD and oldAddr,
// specified by newDirFD and newAddr. If resolve is true, then the symlinks
// will be followed when evaluating the target.
-func linkAt(t *kernel.Task, oldDirFD int32, oldAddr usermem.Addr, newDirFD int32, newAddr usermem.Addr, resolve, allowEmpty bool) error {
+func linkAt(t *kernel.Task, oldDirFD int32, oldAddr hostarch.Addr, newDirFD int32, newAddr hostarch.Addr, resolve, allowEmpty bool) error {
oldPath, _, err := copyInPath(t, oldAddr, allowEmpty)
if err != nil {
return err
@@ -1448,7 +1448,7 @@ func Linkat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
// LINT.IfChange
-func readlinkAt(t *kernel.Task, dirFD int32, addr usermem.Addr, bufAddr usermem.Addr, size uint) (copied uintptr, err error) {
+func readlinkAt(t *kernel.Task, dirFD int32, addr hostarch.Addr, bufAddr hostarch.Addr, size uint) (copied uintptr, err error) {
path, dirPath, err := copyInPath(t, addr, false /* allowEmpty */)
if err != nil {
return 0, err
@@ -1511,7 +1511,7 @@ func Readlinkat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sy
// LINT.IfChange
-func unlinkAt(t *kernel.Task, dirFD int32, addr usermem.Addr) error {
+func unlinkAt(t *kernel.Task, dirFD int32, addr hostarch.Addr) error {
path, dirPath, err := copyInPath(t, addr, false /* allowEmpty */)
if err != nil {
return err
@@ -1728,7 +1728,7 @@ func chown(t *kernel.Task, d *fs.Dirent, uid auth.UID, gid auth.GID) error {
return nil
}
-func chownAt(t *kernel.Task, fd int32, addr usermem.Addr, resolve, allowEmpty bool, uid auth.UID, gid auth.GID) error {
+func chownAt(t *kernel.Task, fd int32, addr hostarch.Addr, resolve, allowEmpty bool, uid auth.UID, gid auth.GID) error {
path, _, err := copyInPath(t, addr, allowEmpty)
if err != nil {
return err
@@ -1815,7 +1815,7 @@ func chmod(t *kernel.Task, d *fs.Dirent, mode linux.FileMode) error {
return nil
}
-func chmodAt(t *kernel.Task, fd int32, addr usermem.Addr, mode linux.FileMode) error {
+func chmodAt(t *kernel.Task, fd int32, addr hostarch.Addr, mode linux.FileMode) error {
path, _, err := copyInPath(t, addr, false /* allowEmpty */)
if err != nil {
return err
@@ -1866,7 +1866,7 @@ func defaultSetToSystemTimeSpec() fs.TimeSpec {
}
}
-func utimes(t *kernel.Task, dirFD int32, addr usermem.Addr, ts fs.TimeSpec, resolve bool) error {
+func utimes(t *kernel.Task, dirFD int32, addr hostarch.Addr, ts fs.TimeSpec, resolve bool) error {
setTimestamp := func(root *fs.Dirent, d *fs.Dirent, _ uint) error {
// Does the task own the file?
if !d.Inode.CheckOwnership(t) {
@@ -2030,7 +2030,7 @@ func Futimesat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys
// LINT.IfChange
-func renameAt(t *kernel.Task, oldDirFD int32, oldAddr usermem.Addr, newDirFD int32, newAddr usermem.Addr) error {
+func renameAt(t *kernel.Task, oldDirFD int32, oldAddr hostarch.Addr, newDirFD int32, newAddr hostarch.Addr) error {
newPath, _, err := copyInPath(t, newAddr, false /* allowEmpty */)
if err != nil {
return err
diff --git a/pkg/sentry/syscalls/linux/sys_futex.go b/pkg/sentry/syscalls/linux/sys_futex.go
index f39ce0639..eeea1613b 100644
--- a/pkg/sentry/syscalls/linux/sys_futex.go
+++ b/pkg/sentry/syscalls/linux/sys_futex.go
@@ -18,11 +18,11 @@ import (
"time"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel"
ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
)
// futexWaitRestartBlock encapsulates the state required to restart futex(2)
@@ -41,7 +41,7 @@ type futexWaitRestartBlock struct {
// Restart implements kernel.SyscallRestartBlock.Restart.
func (f *futexWaitRestartBlock) Restart(t *kernel.Task) (uintptr, error) {
- return futexWaitDuration(t, f.duration, false, usermem.Addr(f.addr), f.private, f.val, f.mask)
+ return futexWaitDuration(t, f.duration, false, hostarch.Addr(f.addr), f.private, f.val, f.mask)
}
// futexWaitAbsolute performs a FUTEX_WAIT_BITSET, blocking until the wait is
@@ -51,7 +51,7 @@ func (f *futexWaitRestartBlock) Restart(t *kernel.Task) (uintptr, error) {
//
// If blocking is interrupted, the syscall is restarted with the original
// arguments.
-func futexWaitAbsolute(t *kernel.Task, clockRealtime bool, ts linux.Timespec, forever bool, addr usermem.Addr, private bool, val, mask uint32) (uintptr, error) {
+func futexWaitAbsolute(t *kernel.Task, clockRealtime bool, ts linux.Timespec, forever bool, addr hostarch.Addr, private bool, val, mask uint32) (uintptr, error) {
w := t.FutexWaiter()
err := t.Futex().WaitPrepare(w, t, addr, private, val, mask)
if err != nil {
@@ -87,7 +87,7 @@ func futexWaitAbsolute(t *kernel.Task, clockRealtime bool, ts linux.Timespec, fo
// syscall. If forever is true, the syscall is restarted with the original
// arguments. If forever is false, duration is a relative timeout and the
// syscall is restarted with the remaining timeout.
-func futexWaitDuration(t *kernel.Task, duration time.Duration, forever bool, addr usermem.Addr, private bool, val, mask uint32) (uintptr, error) {
+func futexWaitDuration(t *kernel.Task, duration time.Duration, forever bool, addr hostarch.Addr, private bool, val, mask uint32) (uintptr, error) {
w := t.FutexWaiter()
err := t.Futex().WaitPrepare(w, t, addr, private, val, mask)
if err != nil {
@@ -124,7 +124,7 @@ func futexWaitDuration(t *kernel.Task, duration time.Duration, forever bool, add
return 0, syserror.ERESTART_RESTARTBLOCK
}
-func futexLockPI(t *kernel.Task, ts linux.Timespec, forever bool, addr usermem.Addr, private bool) error {
+func futexLockPI(t *kernel.Task, ts linux.Timespec, forever bool, addr hostarch.Addr, private bool) error {
w := t.FutexWaiter()
locked, err := t.Futex().LockPI(w, t, addr, uint32(t.ThreadID()), private, false)
if err != nil {
@@ -152,7 +152,7 @@ func futexLockPI(t *kernel.Task, ts linux.Timespec, forever bool, addr usermem.A
return syserror.ConvertIntr(err, syserror.ERESTARTSYS)
}
-func tryLockPI(t *kernel.Task, addr usermem.Addr, private bool) error {
+func tryLockPI(t *kernel.Task, addr hostarch.Addr, private bool) error {
w := t.FutexWaiter()
locked, err := t.Futex().LockPI(w, t, addr, uint32(t.ThreadID()), private, true)
if err != nil {
diff --git a/pkg/sentry/syscalls/linux/sys_getdents.go b/pkg/sentry/syscalls/linux/sys_getdents.go
index b25f7d881..bbba71d8f 100644
--- a/pkg/sentry/syscalls/linux/sys_getdents.go
+++ b/pkg/sentry/syscalls/linux/sys_getdents.go
@@ -19,6 +19,7 @@ import (
"io"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/kernel"
@@ -62,7 +63,7 @@ func Getdents64(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sy
// getdents implements the core of getdents(2)/getdents64(2).
// f is the syscall implementation dirent serialization function.
-func getdents(t *kernel.Task, fd int32, addr usermem.Addr, size int, f func(*dirent, io.Writer) (int, error)) (uintptr, error) {
+func getdents(t *kernel.Task, fd int32, addr hostarch.Addr, size int, f func(*dirent, io.Writer) (int, error)) (uintptr, error) {
dir := t.GetFile(fd)
if dir == nil {
return 0, syserror.EBADF
diff --git a/pkg/sentry/syscalls/linux/sys_mempolicy.go b/pkg/sentry/syscalls/linux/sys_mempolicy.go
index 9b4a5c3f1..6d27f4292 100644
--- a/pkg/sentry/syscalls/linux/sys_mempolicy.go
+++ b/pkg/sentry/syscalls/linux/sys_mempolicy.go
@@ -18,6 +18,7 @@ import (
"fmt"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/syserror"
@@ -31,7 +32,7 @@ const (
allowedNodemask = (1 << maxNodes) - 1
)
-func copyInNodemask(t *kernel.Task, addr usermem.Addr, maxnode uint32) (uint64, error) {
+func copyInNodemask(t *kernel.Task, addr hostarch.Addr, maxnode uint32) (uint64, error) {
// "nodemask points to a bit mask of node IDs that contains up to maxnode
// bits. The bit mask size is rounded to the next multiple of
// sizeof(unsigned long), but the kernel will use bits only up to maxnode.
@@ -41,7 +42,7 @@ func copyInNodemask(t *kernel.Task, addr usermem.Addr, maxnode uint32) (uint64,
// because of what appears to be a bug: mm/mempolicy.c:get_nodes() uses
// maxnode-1, not maxnode, as the number of bits.
bits := maxnode - 1
- if bits > usermem.PageSize*8 { // also handles overflow from maxnode == 0
+ if bits > hostarch.PageSize*8 { // also handles overflow from maxnode == 0
return 0, syserror.EINVAL
}
if bits == 0 {
@@ -53,7 +54,7 @@ func copyInNodemask(t *kernel.Task, addr usermem.Addr, maxnode uint32) (uint64,
if _, err := t.CopyInBytes(addr, buf); err != nil {
return 0, err
}
- val := usermem.ByteOrder.Uint64(buf)
+ val := hostarch.ByteOrder.Uint64(buf)
// Check that only allowed bits in the first unsigned long in the nodemask
// are set.
if val&^allowedNodemask != 0 {
@@ -68,11 +69,11 @@ func copyInNodemask(t *kernel.Task, addr usermem.Addr, maxnode uint32) (uint64,
return val, nil
}
-func copyOutNodemask(t *kernel.Task, addr usermem.Addr, maxnode uint32, val uint64) error {
+func copyOutNodemask(t *kernel.Task, addr hostarch.Addr, maxnode uint32, val uint64) error {
// mm/mempolicy.c:copy_nodes_to_user() also uses maxnode-1 as the number of
// bits.
bits := maxnode - 1
- if bits > usermem.PageSize*8 { // also handles overflow from maxnode == 0
+ if bits > hostarch.PageSize*8 { // also handles overflow from maxnode == 0
return syserror.EINVAL
}
if bits == 0 {
@@ -80,7 +81,7 @@ func copyOutNodemask(t *kernel.Task, addr usermem.Addr, maxnode uint32, val uint
}
// Copy out the first unsigned long in the nodemask.
buf := t.CopyScratchBuffer(8)
- usermem.ByteOrder.PutUint64(buf, val)
+ hostarch.ByteOrder.PutUint64(buf, val)
if _, err := t.CopyOutBytes(addr, buf); err != nil {
return err
}
@@ -258,7 +259,7 @@ func Mbind(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
return 0, nil, err
}
-func copyInMempolicyNodemask(t *kernel.Task, modeWithFlags linux.NumaPolicy, nodemask usermem.Addr, maxnode uint32) (linux.NumaPolicy, uint64, error) {
+func copyInMempolicyNodemask(t *kernel.Task, modeWithFlags linux.NumaPolicy, nodemask hostarch.Addr, maxnode uint32) (linux.NumaPolicy, uint64, error) {
flags := linux.NumaPolicy(modeWithFlags & linux.MPOL_MODE_FLAGS)
mode := linux.NumaPolicy(modeWithFlags &^ linux.MPOL_MODE_FLAGS)
if flags == linux.MPOL_MODE_FLAGS {
diff --git a/pkg/sentry/syscalls/linux/sys_mmap.go b/pkg/sentry/syscalls/linux/sys_mmap.go
index cd8dfdfa4..70da0707d 100644
--- a/pkg/sentry/syscalls/linux/sys_mmap.go
+++ b/pkg/sentry/syscalls/linux/sys_mmap.go
@@ -23,7 +23,8 @@ import (
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sentry/mm"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
+
+ "gvisor.dev/gvisor/pkg/hostarch"
)
// Brk implements linux syscall brk(2).
@@ -61,12 +62,12 @@ func Mmap(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallC
Unmap: fixed,
Map32Bit: map32bit,
Private: private,
- Perms: usermem.AccessType{
+ Perms: hostarch.AccessType{
Read: linux.PROT_READ&prot != 0,
Write: linux.PROT_WRITE&prot != 0,
Execute: linux.PROT_EXEC&prot != 0,
},
- MaxPerms: usermem.AnyAccess,
+ MaxPerms: hostarch.AnyAccess,
GrowsDown: linux.MAP_GROWSDOWN&flags != 0,
Precommit: linux.MAP_POPULATE&flags != 0,
}
@@ -160,7 +161,7 @@ func Mremap(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
func Mprotect(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {
length := args[1].Uint64()
prot := args[2].Int()
- err := t.MemoryManager().MProtect(args[0].Pointer(), length, usermem.AccessType{
+ err := t.MemoryManager().MProtect(args[0].Pointer(), length, hostarch.AccessType{
Read: linux.PROT_READ&prot != 0,
Write: linux.PROT_WRITE&prot != 0,
Execute: linux.PROT_EXEC&prot != 0,
@@ -183,7 +184,7 @@ func Madvise(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca
return 0, nil, nil
}
// Not explicitly stated: length need not be page-aligned.
- lenAddr, ok := usermem.Addr(length).RoundUp()
+ lenAddr, ok := hostarch.Addr(length).RoundUp()
if !ok {
return 0, nil, syserror.EINVAL
}
@@ -232,7 +233,7 @@ func Mincore(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca
// "The length argument need not be a multiple of the page size, but since
// residency information is returned for whole pages, length is effectively
// rounded up to the next multiple of the page size." - mincore(2)
- la, ok := usermem.Addr(length).RoundUp()
+ la, ok := hostarch.Addr(length).RoundUp()
if !ok {
return 0, nil, syserror.ENOMEM
}
@@ -247,7 +248,7 @@ func Mincore(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca
if mapped != uint64(la) {
return 0, nil, syserror.ENOMEM
}
- resident := bytes.Repeat([]byte{1}, int(mapped/usermem.PageSize))
+ resident := bytes.Repeat([]byte{1}, int(mapped/hostarch.PageSize))
_, err := t.CopyOutBytes(vec, resident)
return 0, nil, err
}
diff --git a/pkg/sentry/syscalls/linux/sys_mount.go b/pkg/sentry/syscalls/linux/sys_mount.go
index bd0633564..864d2138c 100644
--- a/pkg/sentry/syscalls/linux/sys_mount.go
+++ b/pkg/sentry/syscalls/linux/sys_mount.go
@@ -20,7 +20,8 @@ import (
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
+
+ "gvisor.dev/gvisor/pkg/hostarch"
)
// Mount implements Linux syscall mount(2).
@@ -31,7 +32,7 @@ func Mount(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
flags := args[3].Uint64()
dataAddr := args[4].Pointer()
- fsType, err := t.CopyInString(typeAddr, usermem.PageSize)
+ fsType, err := t.CopyInString(typeAddr, hostarch.PageSize)
if err != nil {
return 0, nil, err
}
@@ -52,7 +53,7 @@ func Mount(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
// character placement, and the address is passed to each file system.
// Most file systems always treat this data as a string, though, and so
// do all of the ones we implement.
- data, err = t.CopyInString(dataAddr, usermem.PageSize)
+ data, err = t.CopyInString(dataAddr, hostarch.PageSize)
if err != nil {
return 0, nil, err
}
diff --git a/pkg/sentry/syscalls/linux/sys_pipe.go b/pkg/sentry/syscalls/linux/sys_pipe.go
index f7135ea46..d95034347 100644
--- a/pkg/sentry/syscalls/linux/sys_pipe.go
+++ b/pkg/sentry/syscalls/linux/sys_pipe.go
@@ -16,19 +16,19 @@ package linux
import (
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal/primitive"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/kernel/pipe"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
)
// LINT.IfChange
// pipe2 implements the actual system call with flags.
-func pipe2(t *kernel.Task, addr usermem.Addr, flags uint) (uintptr, error) {
+func pipe2(t *kernel.Task, addr hostarch.Addr, flags uint) (uintptr, error) {
if flags&^(linux.O_NONBLOCK|linux.O_CLOEXEC) != 0 {
return 0, syserror.EINVAL
}
diff --git a/pkg/sentry/syscalls/linux/sys_poll.go b/pkg/sentry/syscalls/linux/sys_poll.go
index 254f4c9f9..da548a14a 100644
--- a/pkg/sentry/syscalls/linux/sys_poll.go
+++ b/pkg/sentry/syscalls/linux/sys_poll.go
@@ -18,13 +18,13 @@ import (
"time"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/kernel"
ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time"
"gvisor.dev/gvisor/pkg/sentry/limits"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/pkg/waiter"
)
@@ -155,7 +155,7 @@ func pollBlock(t *kernel.Task, pfd []linux.PollFD, timeout time.Duration) (time.
}
// CopyInPollFDs copies an array of struct pollfd unless nfds exceeds the max.
-func CopyInPollFDs(t *kernel.Task, addr usermem.Addr, nfds uint) ([]linux.PollFD, error) {
+func CopyInPollFDs(t *kernel.Task, addr hostarch.Addr, nfds uint) ([]linux.PollFD, error) {
if uint64(nfds) > t.ThreadGroup().Limits().GetCapped(limits.NumberOfFiles, fileCap) {
return nil, syserror.EINVAL
}
@@ -170,7 +170,7 @@ func CopyInPollFDs(t *kernel.Task, addr usermem.Addr, nfds uint) ([]linux.PollFD
return pfd, nil
}
-func doPoll(t *kernel.Task, addr usermem.Addr, nfds uint, timeout time.Duration) (time.Duration, uintptr, error) {
+func doPoll(t *kernel.Task, addr hostarch.Addr, nfds uint, timeout time.Duration) (time.Duration, uintptr, error) {
pfd, err := CopyInPollFDs(t, addr, nfds)
if err != nil {
return timeout, 0, err
@@ -198,7 +198,7 @@ func doPoll(t *kernel.Task, addr usermem.Addr, nfds uint, timeout time.Duration)
}
// CopyInFDSet copies an fd set from select(2)/pselect(2).
-func CopyInFDSet(t *kernel.Task, addr usermem.Addr, nBytes, nBitsInLastPartialByte int) ([]byte, error) {
+func CopyInFDSet(t *kernel.Task, addr hostarch.Addr, nBytes, nBitsInLastPartialByte int) ([]byte, error) {
set := make([]byte, nBytes)
if addr != 0 {
@@ -215,7 +215,7 @@ func CopyInFDSet(t *kernel.Task, addr usermem.Addr, nBytes, nBitsInLastPartialBy
return set, nil
}
-func doSelect(t *kernel.Task, nfds int, readFDs, writeFDs, exceptFDs usermem.Addr, timeout time.Duration) (uintptr, error) {
+func doSelect(t *kernel.Task, nfds int, readFDs, writeFDs, exceptFDs hostarch.Addr, timeout time.Duration) (uintptr, error) {
if nfds < 0 || nfds > fileCap {
return 0, syserror.EINVAL
}
@@ -365,7 +365,7 @@ func timeoutRemaining(t *kernel.Task, startNs ktime.Time, timeout time.Duration)
// copyOutTimespecRemaining copies the time remaining in timeout to timespecAddr.
//
// startNs must be from CLOCK_MONOTONIC.
-func copyOutTimespecRemaining(t *kernel.Task, startNs ktime.Time, timeout time.Duration, timespecAddr usermem.Addr) error {
+func copyOutTimespecRemaining(t *kernel.Task, startNs ktime.Time, timeout time.Duration, timespecAddr hostarch.Addr) error {
if timeout <= 0 {
return nil
}
@@ -377,7 +377,7 @@ func copyOutTimespecRemaining(t *kernel.Task, startNs ktime.Time, timeout time.D
// copyOutTimevalRemaining copies the time remaining in timeout to timevalAddr.
//
// startNs must be from CLOCK_MONOTONIC.
-func copyOutTimevalRemaining(t *kernel.Task, startNs ktime.Time, timeout time.Duration, timevalAddr usermem.Addr) error {
+func copyOutTimevalRemaining(t *kernel.Task, startNs ktime.Time, timeout time.Duration, timevalAddr hostarch.Addr) error {
if timeout <= 0 {
return nil
}
@@ -391,7 +391,7 @@ func copyOutTimevalRemaining(t *kernel.Task, startNs ktime.Time, timeout time.Du
//
// +stateify savable
type pollRestartBlock struct {
- pfdAddr usermem.Addr
+ pfdAddr hostarch.Addr
nfds uint
timeout time.Duration
}
@@ -401,7 +401,7 @@ func (p *pollRestartBlock) Restart(t *kernel.Task) (uintptr, error) {
return poll(t, p.pfdAddr, p.nfds, p.timeout)
}
-func poll(t *kernel.Task, pfdAddr usermem.Addr, nfds uint, timeout time.Duration) (uintptr, error) {
+func poll(t *kernel.Task, pfdAddr hostarch.Addr, nfds uint, timeout time.Duration) (uintptr, error) {
remainingTimeout, n, err := doPoll(t, pfdAddr, nfds, timeout)
// On an interrupt poll(2) is restarted with the remaining timeout.
if err == syserror.EINTR {
diff --git a/pkg/sentry/syscalls/linux/sys_random.go b/pkg/sentry/syscalls/linux/sys_random.go
index c0aa0fd60..ae545f80f 100644
--- a/pkg/sentry/syscalls/linux/sys_random.go
+++ b/pkg/sentry/syscalls/linux/sys_random.go
@@ -24,6 +24,8 @@ import (
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
+
+ "gvisor.dev/gvisor/pkg/hostarch"
)
const (
@@ -64,7 +66,7 @@ func GetRandom(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys
if min > 256 {
min = 256
}
- n, err := t.MemoryManager().CopyOutFrom(t, usermem.AddrRangeSeqOf(ar), safemem.FromIOReader{&randReader{-1, min}}, usermem.IOOpts{
+ n, err := t.MemoryManager().CopyOutFrom(t, hostarch.AddrRangeSeqOf(ar), safemem.FromIOReader{&randReader{-1, min}}, usermem.IOOpts{
AddressSpaceActive: true,
})
if n >= int64(min) {
diff --git a/pkg/sentry/syscalls/linux/sys_rlimit.go b/pkg/sentry/syscalls/linux/sys_rlimit.go
index 88cd234d1..e64246d57 100644
--- a/pkg/sentry/syscalls/linux/sys_rlimit.go
+++ b/pkg/sentry/syscalls/linux/sys_rlimit.go
@@ -16,12 +16,12 @@ package linux
import (
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/limits"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
)
// rlimit describes an implementation of 'struct rlimit', which may vary from
@@ -67,12 +67,12 @@ func (r *rlimit64) fromLimit(lim limits.Limit) {
}
}
-func (r *rlimit64) copyIn(t *kernel.Task, addr usermem.Addr) error {
+func (r *rlimit64) copyIn(t *kernel.Task, addr hostarch.Addr) error {
_, err := r.CopyIn(t, addr)
return err
}
-func (r *rlimit64) copyOut(t *kernel.Task, addr usermem.Addr) error {
+func (r *rlimit64) copyOut(t *kernel.Task, addr hostarch.Addr) error {
_, err := r.CopyOut(t, addr)
return err
}
diff --git a/pkg/sentry/syscalls/linux/sys_seccomp.go b/pkg/sentry/syscalls/linux/sys_seccomp.go
index 4fdb4463c..e16d6ff3f 100644
--- a/pkg/sentry/syscalls/linux/sys_seccomp.go
+++ b/pkg/sentry/syscalls/linux/sys_seccomp.go
@@ -17,10 +17,10 @@ package linux
import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/bpf"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
)
// userSockFprog is equivalent to Linux's struct sock_fprog on amd64.
@@ -33,14 +33,14 @@ type userSockFprog struct {
_ [6]byte // padding for alignment
// Filter is a user pointer to the struct sock_filter array that makes up
- // the filter program. Filter is a uint64 rather than a usermem.Addr
- // because usermem.Addr is actually uintptr, which is not a fixed-size
+ // the filter program. Filter is a uint64 rather than a hostarch.Addr
+ // because hostarch.Addr is actually uintptr, which is not a fixed-size
// type.
Filter uint64
}
// seccomp applies a seccomp policy to the current task.
-func seccomp(t *kernel.Task, mode, flags uint64, addr usermem.Addr) error {
+func seccomp(t *kernel.Task, mode, flags uint64, addr hostarch.Addr) error {
// We only support SECCOMP_SET_MODE_FILTER at the moment.
if mode != linux.SECCOMP_SET_MODE_FILTER {
// Unsupported mode.
@@ -60,7 +60,7 @@ func seccomp(t *kernel.Task, mode, flags uint64, addr usermem.Addr) error {
return err
}
filter := make([]linux.BPFInstruction, int(fprog.Len))
- if _, err := linux.CopyBPFInstructionSliceIn(t, usermem.Addr(fprog.Filter), filter); err != nil {
+ if _, err := linux.CopyBPFInstructionSliceIn(t, hostarch.Addr(fprog.Filter), filter); err != nil {
return err
}
compiledFilter, err := bpf.Compile(filter)
diff --git a/pkg/sentry/syscalls/linux/sys_sem.go b/pkg/sentry/syscalls/linux/sys_sem.go
index f0570d927..c84260080 100644
--- a/pkg/sentry/syscalls/linux/sys_sem.go
+++ b/pkg/sentry/syscalls/linux/sys_sem.go
@@ -19,13 +19,13 @@ import (
"time"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal/primitive"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
)
const opsMax = 500 // SEMOPM
@@ -310,7 +310,7 @@ func setVal(t *kernel.Task, id int32, num int32, val int16) error {
return set.SetVal(t, num, val, creds, int32(pid))
}
-func setValAll(t *kernel.Task, id int32, array usermem.Addr) error {
+func setValAll(t *kernel.Task, id int32, array hostarch.Addr) error {
r := t.IPCNamespace().SemaphoreRegistry()
set := r.FindByID(id)
if set == nil {
@@ -335,7 +335,7 @@ func getVal(t *kernel.Task, id int32, num int32) (int16, error) {
return set.GetVal(num, creds)
}
-func getValAll(t *kernel.Task, id int32, array usermem.Addr) error {
+func getValAll(t *kernel.Task, id int32, array hostarch.Addr) error {
r := t.IPCNamespace().SemaphoreRegistry()
set := r.FindByID(id)
if set == nil {
diff --git a/pkg/sentry/syscalls/linux/sys_signal.go b/pkg/sentry/syscalls/linux/sys_signal.go
index d639c9bf7..53b12dc41 100644
--- a/pkg/sentry/syscalls/linux/sys_signal.go
+++ b/pkg/sentry/syscalls/linux/sys_signal.go
@@ -19,12 +19,12 @@ import (
"time"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/kernel/signalfd"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
)
// "For a process to have permission to send a signal it must
@@ -516,7 +516,7 @@ func RestartSyscall(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kerne
}
// sharedSignalfd is shared between the two calls.
-func sharedSignalfd(t *kernel.Task, fd int32, sigset usermem.Addr, sigsetsize uint, flags int32) (uintptr, *kernel.SyscallControl, error) {
+func sharedSignalfd(t *kernel.Task, fd int32, sigset hostarch.Addr, sigsetsize uint, flags int32) (uintptr, *kernel.SyscallControl, error) {
// Copy in the signal mask.
mask, err := CopyInSigSet(t, sigset, sigsetsize)
if err != nil {
diff --git a/pkg/sentry/syscalls/linux/sys_socket.go b/pkg/sentry/syscalls/linux/sys_socket.go
index c6adfe06b..9bdf6d3d8 100644
--- a/pkg/sentry/syscalls/linux/sys_socket.go
+++ b/pkg/sentry/syscalls/linux/sys_socket.go
@@ -18,6 +18,7 @@ import (
"time"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal"
"gvisor.dev/gvisor/pkg/marshal/primitive"
"gvisor.dev/gvisor/pkg/sentry/arch"
@@ -117,7 +118,7 @@ type multipleMessageHeader64 struct {
// CaptureAddress allocates memory for and copies a socket address structure
// from the untrusted address space range.
-func CaptureAddress(t *kernel.Task, addr usermem.Addr, addrlen uint32) ([]byte, error) {
+func CaptureAddress(t *kernel.Task, addr hostarch.Addr, addrlen uint32) ([]byte, error) {
if addrlen > maxAddrLen {
return nil, syserror.EINVAL
}
@@ -133,7 +134,7 @@ func CaptureAddress(t *kernel.Task, addr usermem.Addr, addrlen uint32) ([]byte,
// writeAddress writes a sockaddr structure and its length to an output buffer
// in the unstrusted address space range. If the address is bigger than the
// buffer, it is truncated.
-func writeAddress(t *kernel.Task, addr linux.SockAddr, addrLen uint32, addrPtr usermem.Addr, addrLenPtr usermem.Addr) error {
+func writeAddress(t *kernel.Task, addr linux.SockAddr, addrLen uint32, addrPtr hostarch.Addr, addrLenPtr hostarch.Addr) error {
// Get the buffer length.
var bufLen uint32
if _, err := primitive.CopyUint32In(t, addrLenPtr, &bufLen); err != nil {
@@ -276,7 +277,7 @@ func Connect(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca
// accept is the implementation of the accept syscall. It is called by accept
// and accept4 syscall handlers.
-func accept(t *kernel.Task, fd int32, addr usermem.Addr, addrLen usermem.Addr, flags int) (uintptr, error) {
+func accept(t *kernel.Task, fd int32, addr hostarch.Addr, addrLen hostarch.Addr, flags int) (uintptr, error) {
// Check that no unsupported flags are passed in.
if flags & ^(linux.SOCK_NONBLOCK|linux.SOCK_CLOEXEC) != 0 {
return 0, syserror.EINVAL
@@ -472,7 +473,7 @@ func GetSockOpt(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sy
// getSockOpt tries to handle common socket options, or dispatches to a specific
// socket implementation.
-func getSockOpt(t *kernel.Task, s socket.Socket, level, name int, optValAddr usermem.Addr, len int) (marshal.Marshallable, *syserr.Error) {
+func getSockOpt(t *kernel.Task, s socket.Socket, level, name int, optValAddr hostarch.Addr, len int) (marshal.Marshallable, *syserr.Error) {
if level == linux.SOL_SOCKET {
switch name {
case linux.SO_TYPE, linux.SO_DOMAIN, linux.SO_PROTOCOL:
@@ -735,7 +736,7 @@ func RecvMMsg(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
return uintptr(count), nil, nil
}
-func recvSingleMsg(t *kernel.Task, s socket.Socket, msgPtr usermem.Addr, flags int32, haveDeadline bool, deadline ktime.Time) (uintptr, error) {
+func recvSingleMsg(t *kernel.Task, s socket.Socket, msgPtr hostarch.Addr, flags int32, haveDeadline bool, deadline ktime.Time) (uintptr, error) {
// Capture the message header and io vectors.
var msg MessageHeader64
if _, err := msg.CopyIn(t, msgPtr); err != nil {
@@ -745,7 +746,7 @@ func recvSingleMsg(t *kernel.Task, s socket.Socket, msgPtr usermem.Addr, flags i
if msg.IovLen > linux.UIO_MAXIOV {
return 0, syserror.EMSGSIZE
}
- dst, err := t.IovecsIOSequence(usermem.Addr(msg.Iov), int(msg.IovLen), usermem.IOOpts{
+ dst, err := t.IovecsIOSequence(hostarch.Addr(msg.Iov), int(msg.IovLen), usermem.IOOpts{
AddressSpaceActive: true,
})
if err != nil {
@@ -796,7 +797,7 @@ func recvSingleMsg(t *kernel.Task, s socket.Socket, msgPtr usermem.Addr, flags i
// Copy the address to the caller.
if msg.NameLen != 0 {
- if err := writeAddress(t, sender, senderLen, usermem.Addr(msg.Name), usermem.Addr(msgPtr+nameLenOffset)); err != nil {
+ if err := writeAddress(t, sender, senderLen, hostarch.Addr(msg.Name), hostarch.Addr(msgPtr+nameLenOffset)); err != nil {
return 0, err
}
}
@@ -806,7 +807,7 @@ func recvSingleMsg(t *kernel.Task, s socket.Socket, msgPtr usermem.Addr, flags i
return 0, err
}
if len(controlData) > 0 {
- if _, err := t.CopyOutBytes(usermem.Addr(msg.Control), controlData); err != nil {
+ if _, err := t.CopyOutBytes(hostarch.Addr(msg.Control), controlData); err != nil {
return 0, err
}
}
@@ -821,7 +822,7 @@ func recvSingleMsg(t *kernel.Task, s socket.Socket, msgPtr usermem.Addr, flags i
// recvFrom is the implementation of the recvfrom syscall. It is called by
// recvfrom and recv syscall handlers.
-func recvFrom(t *kernel.Task, fd int32, bufPtr usermem.Addr, bufLen uint64, flags int32, namePtr usermem.Addr, nameLenPtr usermem.Addr) (uintptr, error) {
+func recvFrom(t *kernel.Task, fd int32, bufPtr hostarch.Addr, bufLen uint64, flags int32, namePtr hostarch.Addr, nameLenPtr hostarch.Addr) (uintptr, error) {
if int(bufLen) < 0 {
return 0, syserror.EINVAL
}
@@ -997,7 +998,7 @@ func SendMMsg(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
return uintptr(count), nil, nil
}
-func sendSingleMsg(t *kernel.Task, s socket.Socket, file *fs.File, msgPtr usermem.Addr, flags int32) (uintptr, error) {
+func sendSingleMsg(t *kernel.Task, s socket.Socket, file *fs.File, msgPtr hostarch.Addr, flags int32) (uintptr, error) {
// Capture the message header.
var msg MessageHeader64
if _, err := msg.CopyIn(t, msgPtr); err != nil {
@@ -1011,7 +1012,7 @@ func sendSingleMsg(t *kernel.Task, s socket.Socket, file *fs.File, msgPtr userme
return 0, syserror.ENOBUFS
}
controlData = make([]byte, msg.ControlLen)
- if _, err := t.CopyInBytes(usermem.Addr(msg.Control), controlData); err != nil {
+ if _, err := t.CopyInBytes(hostarch.Addr(msg.Control), controlData); err != nil {
return 0, err
}
}
@@ -1020,7 +1021,7 @@ func sendSingleMsg(t *kernel.Task, s socket.Socket, file *fs.File, msgPtr userme
var to []byte
if msg.NameLen != 0 {
var err error
- to, err = CaptureAddress(t, usermem.Addr(msg.Name), msg.NameLen)
+ to, err = CaptureAddress(t, hostarch.Addr(msg.Name), msg.NameLen)
if err != nil {
return 0, err
}
@@ -1030,7 +1031,7 @@ func sendSingleMsg(t *kernel.Task, s socket.Socket, file *fs.File, msgPtr userme
if msg.IovLen > linux.UIO_MAXIOV {
return 0, syserror.EMSGSIZE
}
- src, err := t.IovecsIOSequence(usermem.Addr(msg.Iov), int(msg.IovLen), usermem.IOOpts{
+ src, err := t.IovecsIOSequence(hostarch.Addr(msg.Iov), int(msg.IovLen), usermem.IOOpts{
AddressSpaceActive: true,
})
if err != nil {
@@ -1064,7 +1065,7 @@ func sendSingleMsg(t *kernel.Task, s socket.Socket, file *fs.File, msgPtr userme
// sendTo is the implementation of the sendto syscall. It is called by sendto
// and send syscall handlers.
-func sendTo(t *kernel.Task, fd int32, bufPtr usermem.Addr, bufLen uint64, flags int32, namePtr usermem.Addr, nameLen uint32) (uintptr, error) {
+func sendTo(t *kernel.Task, fd int32, bufPtr hostarch.Addr, bufLen uint64, flags int32, namePtr hostarch.Addr, nameLen uint32) (uintptr, error) {
bl := int(bufLen)
if bl < 0 {
return 0, syserror.EINVAL
diff --git a/pkg/sentry/syscalls/linux/sys_stat.go b/pkg/sentry/syscalls/linux/sys_stat.go
index cda29a8b5..2338ba44b 100644
--- a/pkg/sentry/syscalls/linux/sys_stat.go
+++ b/pkg/sentry/syscalls/linux/sys_stat.go
@@ -16,11 +16,11 @@ package linux
import (
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
)
// LINT.IfChange
@@ -106,7 +106,7 @@ func Fstat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
}
// stat implements stat from the given *fs.Dirent.
-func stat(t *kernel.Task, d *fs.Dirent, dirPath bool, statAddr usermem.Addr) error {
+func stat(t *kernel.Task, d *fs.Dirent, dirPath bool, statAddr hostarch.Addr) error {
if dirPath && !fs.IsDir(d.Inode.StableAttr) {
return syserror.ENOTDIR
}
@@ -120,7 +120,7 @@ func stat(t *kernel.Task, d *fs.Dirent, dirPath bool, statAddr usermem.Addr) err
}
// fstat implements fstat for the given *fs.File.
-func fstat(t *kernel.Task, f *fs.File, statAddr usermem.Addr) error {
+func fstat(t *kernel.Task, f *fs.File, statAddr hostarch.Addr) error {
uattr, err := f.UnstableAttr(t)
if err != nil {
return err
@@ -180,7 +180,7 @@ func Statx(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
})
}
-func statx(t *kernel.Task, sattr fs.StableAttr, uattr fs.UnstableAttr, statxAddr usermem.Addr) error {
+func statx(t *kernel.Task, sattr fs.StableAttr, uattr fs.UnstableAttr, statxAddr hostarch.Addr) error {
// "[T]he kernel may return fields that weren't requested and may fail to
// return fields that were requested, depending on what the backing
// filesystem supports.
@@ -257,7 +257,7 @@ func Fstatfs(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca
// statfsImpl implements the linux syscall statfs and fstatfs based on a Dirent,
// copying the statfs structure out to addr on success, otherwise an error is
// returned.
-func statfsImpl(t *kernel.Task, d *fs.Dirent, addr usermem.Addr) error {
+func statfsImpl(t *kernel.Task, d *fs.Dirent, addr hostarch.Addr) error {
info, err := d.Inode.StatFS(t)
if err != nil {
return err
diff --git a/pkg/sentry/syscalls/linux/sys_thread.go b/pkg/sentry/syscalls/linux/sys_thread.go
index b5f920949..3185ea527 100644
--- a/pkg/sentry/syscalls/linux/sys_thread.go
+++ b/pkg/sentry/syscalls/linux/sys_thread.go
@@ -19,6 +19,7 @@ import (
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal/primitive"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fs"
@@ -46,7 +47,7 @@ var (
ExecMaxTotalSize = 2 * 1024 * 1024
// ExecMaxElemSize is the maximum length of a single argv or envv entry.
- ExecMaxElemSize = 32 * usermem.PageSize
+ ExecMaxElemSize = 32 * hostarch.PageSize
)
// Getppid implements linux syscall getppid(2).
@@ -88,7 +89,7 @@ func Execveat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
return execveat(t, dirFD, pathnameAddr, argvAddr, envvAddr, flags)
}
-func execveat(t *kernel.Task, dirFD int32, pathnameAddr, argvAddr, envvAddr usermem.Addr, flags int32) (uintptr, *kernel.SyscallControl, error) {
+func execveat(t *kernel.Task, dirFD int32, pathnameAddr, argvAddr, envvAddr hostarch.Addr, flags int32) (uintptr, *kernel.SyscallControl, error) {
pathname, err := t.CopyInString(pathnameAddr, linux.PATH_MAX)
if err != nil {
return 0, nil, err
@@ -199,7 +200,7 @@ func ExitGroup(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys
}
// clone is used by Clone, Fork, and VFork.
-func clone(t *kernel.Task, flags int, stack usermem.Addr, parentTID usermem.Addr, childTID usermem.Addr, tls usermem.Addr) (uintptr, *kernel.SyscallControl, error) {
+func clone(t *kernel.Task, flags int, stack hostarch.Addr, parentTID hostarch.Addr, childTID hostarch.Addr, tls hostarch.Addr) (uintptr, *kernel.SyscallControl, error) {
opts := kernel.CloneOptions{
SharingOptions: kernel.SharingOptions{
NewAddressSpace: flags&linux.CLONE_VM == 0,
@@ -274,7 +275,7 @@ func parseCommonWaitOptions(wopts *kernel.WaitOptions, options int) error {
}
// wait4 waits for the given child process to exit.
-func wait4(t *kernel.Task, pid int, statusAddr usermem.Addr, options int, rusageAddr usermem.Addr) (uintptr, error) {
+func wait4(t *kernel.Task, pid int, statusAddr hostarch.Addr, options int, rusageAddr hostarch.Addr) (uintptr, error) {
if options&^(linux.WNOHANG|linux.WUNTRACED|linux.WCONTINUED|linux.WNOTHREAD|linux.WALL|linux.WCLONE) != 0 {
return 0, syserror.EINVAL
}
diff --git a/pkg/sentry/syscalls/linux/sys_time.go b/pkg/sentry/syscalls/linux/sys_time.go
index c5054d2f1..83b777bbd 100644
--- a/pkg/sentry/syscalls/linux/sys_time.go
+++ b/pkg/sentry/syscalls/linux/sys_time.go
@@ -19,12 +19,12 @@ import (
"time"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal/primitive"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel"
ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
)
// The most significant 29 bits hold either a pid or a file descriptor.
@@ -165,7 +165,7 @@ func Time(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallC
addr := args[0].Pointer()
r := t.Kernel().RealtimeClock().Now().TimeT()
- if addr == usermem.Addr(0) {
+ if addr == hostarch.Addr(0) {
return uintptr(r), nil, nil
}
@@ -182,7 +182,7 @@ func Time(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallC
type clockNanosleepRestartBlock struct {
c ktime.Clock
duration time.Duration
- rem usermem.Addr
+ rem hostarch.Addr
}
// Restart implements kernel.SyscallRestartBlock.Restart.
@@ -221,7 +221,7 @@ func clockNanosleepUntil(t *kernel.Task, c ktime.Clock, ts linux.Timespec) error
//
// If blocking is interrupted, the syscall is restarted with the remaining
// duration timeout.
-func clockNanosleepFor(t *kernel.Task, c ktime.Clock, dur time.Duration, rem usermem.Addr) error {
+func clockNanosleepFor(t *kernel.Task, c ktime.Clock, dur time.Duration, rem hostarch.Addr) error {
timer, start, tchan := ktime.After(c, dur)
err := t.BlockWithTimer(nil, tchan)
@@ -324,14 +324,14 @@ func Gettimeofday(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.
tv := args[0].Pointer()
tz := args[1].Pointer()
- if tv != usermem.Addr(0) {
+ if tv != hostarch.Addr(0) {
nowTv := t.Kernel().RealtimeClock().Now().Timeval()
if err := copyTimevalOut(t, tv, &nowTv); err != nil {
return 0, nil, err
}
}
- if tz != usermem.Addr(0) {
+ if tz != hostarch.Addr(0) {
// Ask the time package for the timezone.
_, offset := time.Now().Zone()
// This int32 array mimics linux's struct timezone.
diff --git a/pkg/sentry/syscalls/linux/sys_xattr.go b/pkg/sentry/syscalls/linux/sys_xattr.go
index 97474fd3c..28ad6a60e 100644
--- a/pkg/sentry/syscalls/linux/sys_xattr.go
+++ b/pkg/sentry/syscalls/linux/sys_xattr.go
@@ -18,11 +18,11 @@ import (
"strings"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
)
// LINT.IfChange
@@ -87,7 +87,7 @@ func getXattrFromPath(t *kernel.Task, args arch.SyscallArguments, resolveSymlink
}
// getXattr implements getxattr(2) from the given *fs.Dirent.
-func getXattr(t *kernel.Task, d *fs.Dirent, nameAddr, valueAddr usermem.Addr, size uint64) (int, error) {
+func getXattr(t *kernel.Task, d *fs.Dirent, nameAddr, valueAddr hostarch.Addr, size uint64) (int, error) {
name, err := copyInXattrName(t, nameAddr)
if err != nil {
return 0, err
@@ -180,7 +180,7 @@ func setXattrFromPath(t *kernel.Task, args arch.SyscallArguments, resolveSymlink
}
// setXattr implements setxattr(2) from the given *fs.Dirent.
-func setXattr(t *kernel.Task, d *fs.Dirent, nameAddr, valueAddr usermem.Addr, size uint64, flags uint32) error {
+func setXattr(t *kernel.Task, d *fs.Dirent, nameAddr, valueAddr hostarch.Addr, size uint64, flags uint32) error {
if flags&^(linux.XATTR_CREATE|linux.XATTR_REPLACE) != 0 {
return syserror.EINVAL
}
@@ -214,7 +214,7 @@ func setXattr(t *kernel.Task, d *fs.Dirent, nameAddr, valueAddr usermem.Addr, si
return nil
}
-func copyInXattrName(t *kernel.Task, nameAddr usermem.Addr) (string, error) {
+func copyInXattrName(t *kernel.Task, nameAddr hostarch.Addr) (string, error) {
name, err := t.CopyInString(nameAddr, linux.XATTR_NAME_MAX+1)
if err != nil {
if err == syserror.ENAMETOOLONG {
@@ -306,7 +306,7 @@ func listXattrFromPath(t *kernel.Task, args arch.SyscallArguments, resolveSymlin
return uintptr(n), nil, nil
}
-func listXattr(t *kernel.Task, d *fs.Dirent, addr usermem.Addr, size uint64) (int, error) {
+func listXattr(t *kernel.Task, d *fs.Dirent, addr hostarch.Addr, size uint64) (int, error) {
if !xattrFileTypeOk(d.Inode) {
return 0, nil
}
@@ -408,7 +408,7 @@ func removeXattrFromPath(t *kernel.Task, args arch.SyscallArguments, resolveSyml
}
// removeXattr implements removexattr(2) from the given *fs.Dirent.
-func removeXattr(t *kernel.Task, d *fs.Dirent, nameAddr usermem.Addr) error {
+func removeXattr(t *kernel.Task, d *fs.Dirent, nameAddr hostarch.Addr) error {
name, err := copyInXattrName(t, nameAddr)
if err != nil {
return err
diff --git a/pkg/sentry/syscalls/linux/timespec.go b/pkg/sentry/syscalls/linux/timespec.go
index ddc3ee26e..3edc922eb 100644
--- a/pkg/sentry/syscalls/linux/timespec.go
+++ b/pkg/sentry/syscalls/linux/timespec.go
@@ -18,13 +18,13 @@ import (
"time"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
)
// copyTimespecIn copies a Timespec from the untrusted app range to the kernel.
-func copyTimespecIn(t *kernel.Task, addr usermem.Addr) (linux.Timespec, error) {
+func copyTimespecIn(t *kernel.Task, addr hostarch.Addr) (linux.Timespec, error) {
switch t.Arch().Width() {
case 8:
ts := linux.Timespec{}
@@ -33,8 +33,8 @@ func copyTimespecIn(t *kernel.Task, addr usermem.Addr) (linux.Timespec, error) {
if err != nil {
return ts, err
}
- ts.Sec = int64(usermem.ByteOrder.Uint64(in[0:]))
- ts.Nsec = int64(usermem.ByteOrder.Uint64(in[8:]))
+ ts.Sec = int64(hostarch.ByteOrder.Uint64(in[0:]))
+ ts.Nsec = int64(hostarch.ByteOrder.Uint64(in[8:]))
return ts, nil
default:
return linux.Timespec{}, syserror.ENOSYS
@@ -42,12 +42,12 @@ func copyTimespecIn(t *kernel.Task, addr usermem.Addr) (linux.Timespec, error) {
}
// copyTimespecOut copies a Timespec to the untrusted app range.
-func copyTimespecOut(t *kernel.Task, addr usermem.Addr, ts *linux.Timespec) error {
+func copyTimespecOut(t *kernel.Task, addr hostarch.Addr, ts *linux.Timespec) error {
switch t.Arch().Width() {
case 8:
out := t.CopyScratchBuffer(16)
- usermem.ByteOrder.PutUint64(out[0:], uint64(ts.Sec))
- usermem.ByteOrder.PutUint64(out[8:], uint64(ts.Nsec))
+ hostarch.ByteOrder.PutUint64(out[0:], uint64(ts.Sec))
+ hostarch.ByteOrder.PutUint64(out[8:], uint64(ts.Nsec))
_, err := t.CopyOutBytes(addr, out)
return err
default:
@@ -56,7 +56,7 @@ func copyTimespecOut(t *kernel.Task, addr usermem.Addr, ts *linux.Timespec) erro
}
// copyTimevalIn copies a Timeval from the untrusted app range to the kernel.
-func copyTimevalIn(t *kernel.Task, addr usermem.Addr) (linux.Timeval, error) {
+func copyTimevalIn(t *kernel.Task, addr hostarch.Addr) (linux.Timeval, error) {
switch t.Arch().Width() {
case 8:
tv := linux.Timeval{}
@@ -65,8 +65,8 @@ func copyTimevalIn(t *kernel.Task, addr usermem.Addr) (linux.Timeval, error) {
if err != nil {
return tv, err
}
- tv.Sec = int64(usermem.ByteOrder.Uint64(in[0:]))
- tv.Usec = int64(usermem.ByteOrder.Uint64(in[8:]))
+ tv.Sec = int64(hostarch.ByteOrder.Uint64(in[0:]))
+ tv.Usec = int64(hostarch.ByteOrder.Uint64(in[8:]))
return tv, nil
default:
return linux.Timeval{}, syserror.ENOSYS
@@ -74,12 +74,12 @@ func copyTimevalIn(t *kernel.Task, addr usermem.Addr) (linux.Timeval, error) {
}
// copyTimevalOut copies a Timeval to the untrusted app range.
-func copyTimevalOut(t *kernel.Task, addr usermem.Addr, tv *linux.Timeval) error {
+func copyTimevalOut(t *kernel.Task, addr hostarch.Addr, tv *linux.Timeval) error {
switch t.Arch().Width() {
case 8:
out := t.CopyScratchBuffer(16)
- usermem.ByteOrder.PutUint64(out[0:], uint64(tv.Sec))
- usermem.ByteOrder.PutUint64(out[8:], uint64(tv.Usec))
+ hostarch.ByteOrder.PutUint64(out[0:], uint64(tv.Sec))
+ hostarch.ByteOrder.PutUint64(out[8:], uint64(tv.Usec))
_, err := t.CopyOutBytes(addr, out)
return err
default:
@@ -94,7 +94,7 @@ func copyTimevalOut(t *kernel.Task, addr usermem.Addr, tv *linux.Timeval) error
// returned value is the maximum that Duration will allow.
//
// If timespecAddr is NULL, the returned value is negative.
-func copyTimespecInToDuration(t *kernel.Task, timespecAddr usermem.Addr) (time.Duration, error) {
+func copyTimespecInToDuration(t *kernel.Task, timespecAddr hostarch.Addr) (time.Duration, error) {
// Use a negative Duration to indicate "no timeout".
timeout := time.Duration(-1)
if timespecAddr != 0 {
diff --git a/pkg/sentry/syscalls/linux/vfs2/aio.go b/pkg/sentry/syscalls/linux/vfs2/aio.go
index de6789a65..fd1863ef3 100644
--- a/pkg/sentry/syscalls/linux/vfs2/aio.go
+++ b/pkg/sentry/syscalls/linux/vfs2/aio.go
@@ -26,6 +26,8 @@ import (
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
+
+ "gvisor.dev/gvisor/pkg/hostarch"
)
// IoSubmit implements linux syscall io_submit(2).
@@ -40,7 +42,7 @@ func IoSubmit(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
for i := int32(0); i < nrEvents; i++ {
// Copy in the callback address.
- var cbAddr usermem.Addr
+ var cbAddr hostarch.Addr
switch t.Arch().Width() {
case 8:
var cbAddrP primitive.Uint64
@@ -52,7 +54,7 @@ func IoSubmit(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
// Nothing done.
return 0, nil, err
}
- cbAddr = usermem.Addr(cbAddrP)
+ cbAddr = hostarch.Addr(cbAddrP)
default:
return 0, nil, syserror.ENOSYS
}
@@ -79,14 +81,14 @@ func IoSubmit(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
}
// Advance to the next one.
- addr += usermem.Addr(t.Arch().Width())
+ addr += hostarch.Addr(t.Arch().Width())
}
return uintptr(nrEvents), nil, nil
}
// submitCallback processes a single callback.
-func submitCallback(t *kernel.Task, id uint64, cb *linux.IOCallback, cbAddr usermem.Addr) error {
+func submitCallback(t *kernel.Task, id uint64, cb *linux.IOCallback, cbAddr hostarch.Addr) error {
if cb.Reserved2 != 0 {
return syserror.EINVAL
}
@@ -148,7 +150,7 @@ func submitCallback(t *kernel.Task, id uint64, cb *linux.IOCallback, cbAddr user
return nil
}
-func getAIOCallback(t *kernel.Task, fd, eventFD *vfs.FileDescription, cbAddr usermem.Addr, cb *linux.IOCallback, ioseq usermem.IOSequence, aioCtx *mm.AIOContext) kernel.AIOCallback {
+func getAIOCallback(t *kernel.Task, fd, eventFD *vfs.FileDescription, cbAddr hostarch.Addr, cb *linux.IOCallback, ioseq usermem.IOSequence, aioCtx *mm.AIOContext) kernel.AIOCallback {
return func(ctx context.Context) {
// Release references after completing the callback.
defer fd.DecRef(ctx)
@@ -206,12 +208,12 @@ func memoryFor(t *kernel.Task, cb *linux.IOCallback) (usermem.IOSequence, error)
// I/O.
switch cb.OpCode {
case linux.IOCB_CMD_PREAD, linux.IOCB_CMD_PWRITE:
- return t.SingleIOSequence(usermem.Addr(cb.Buf), bytes, usermem.IOOpts{
+ return t.SingleIOSequence(hostarch.Addr(cb.Buf), bytes, usermem.IOOpts{
AddressSpaceActive: false,
})
case linux.IOCB_CMD_PREADV, linux.IOCB_CMD_PWRITEV:
- return t.IovecsIOSequence(usermem.Addr(cb.Buf), bytes, usermem.IOOpts{
+ return t.IovecsIOSequence(hostarch.Addr(cb.Buf), bytes, usermem.IOOpts{
AddressSpaceActive: false,
})
diff --git a/pkg/sentry/syscalls/linux/vfs2/execve.go b/pkg/sentry/syscalls/linux/vfs2/execve.go
index 7a409620d..3315398a4 100644
--- a/pkg/sentry/syscalls/linux/vfs2/execve.go
+++ b/pkg/sentry/syscalls/linux/vfs2/execve.go
@@ -24,7 +24,8 @@ import (
slinux "gvisor.dev/gvisor/pkg/sentry/syscalls/linux"
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
+
+ "gvisor.dev/gvisor/pkg/hostarch"
)
// Execve implements linux syscall execve(2).
@@ -45,7 +46,7 @@ func Execveat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
return execveat(t, dirfd, pathnameAddr, argvAddr, envvAddr, flags)
}
-func execveat(t *kernel.Task, dirfd int32, pathnameAddr, argvAddr, envvAddr usermem.Addr, flags int32) (uintptr, *kernel.SyscallControl, error) {
+func execveat(t *kernel.Task, dirfd int32, pathnameAddr, argvAddr, envvAddr hostarch.Addr, flags int32) (uintptr, *kernel.SyscallControl, error) {
if flags&^(linux.AT_EMPTY_PATH|linux.AT_SYMLINK_NOFOLLOW) != 0 {
return 0, nil, syserror.EINVAL
}
diff --git a/pkg/sentry/syscalls/linux/vfs2/filesystem.go b/pkg/sentry/syscalls/linux/vfs2/filesystem.go
index 01e0f9010..36aa1d3ae 100644
--- a/pkg/sentry/syscalls/linux/vfs2/filesystem.go
+++ b/pkg/sentry/syscalls/linux/vfs2/filesystem.go
@@ -20,7 +20,8 @@ import (
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
+
+ "gvisor.dev/gvisor/pkg/hostarch"
)
// Link implements Linux syscall link(2).
@@ -40,7 +41,7 @@ func Linkat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
return 0, nil, linkat(t, olddirfd, oldpathAddr, newdirfd, newpathAddr, flags)
}
-func linkat(t *kernel.Task, olddirfd int32, oldpathAddr usermem.Addr, newdirfd int32, newpathAddr usermem.Addr, flags int32) error {
+func linkat(t *kernel.Task, olddirfd int32, oldpathAddr hostarch.Addr, newdirfd int32, newpathAddr hostarch.Addr, flags int32) error {
if flags&^(linux.AT_EMPTY_PATH|linux.AT_SYMLINK_FOLLOW) != 0 {
return syserror.EINVAL
}
@@ -86,7 +87,7 @@ func Mkdirat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca
return 0, nil, mkdirat(t, dirfd, addr, mode)
}
-func mkdirat(t *kernel.Task, dirfd int32, addr usermem.Addr, mode uint) error {
+func mkdirat(t *kernel.Task, dirfd int32, addr hostarch.Addr, mode uint) error {
path, err := copyInPath(t, addr)
if err != nil {
return err
@@ -118,7 +119,7 @@ func Mknodat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca
return 0, nil, mknodat(t, dirfd, addr, linux.FileMode(mode), dev)
}
-func mknodat(t *kernel.Task, dirfd int32, addr usermem.Addr, mode linux.FileMode, dev uint32) error {
+func mknodat(t *kernel.Task, dirfd int32, addr hostarch.Addr, mode linux.FileMode, dev uint32) error {
path, err := copyInPath(t, addr)
if err != nil {
return err
@@ -165,7 +166,7 @@ func Creat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
return openat(t, linux.AT_FDCWD, addr, linux.O_WRONLY|linux.O_CREAT|linux.O_TRUNC, mode)
}
-func openat(t *kernel.Task, dirfd int32, pathAddr usermem.Addr, flags uint32, mode uint) (uintptr, *kernel.SyscallControl, error) {
+func openat(t *kernel.Task, dirfd int32, pathAddr hostarch.Addr, flags uint32, mode uint) (uintptr, *kernel.SyscallControl, error) {
path, err := copyInPath(t, pathAddr)
if err != nil {
return 0, nil, err
@@ -217,7 +218,7 @@ func Renameat2(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys
return 0, nil, renameat(t, olddirfd, oldpathAddr, newdirfd, newpathAddr, flags)
}
-func renameat(t *kernel.Task, olddirfd int32, oldpathAddr usermem.Addr, newdirfd int32, newpathAddr usermem.Addr, flags uint32) error {
+func renameat(t *kernel.Task, olddirfd int32, oldpathAddr hostarch.Addr, newdirfd int32, newpathAddr hostarch.Addr, flags uint32) error {
oldpath, err := copyInPath(t, oldpathAddr)
if err != nil {
return err
@@ -250,7 +251,7 @@ func Rmdir(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
return 0, nil, rmdirat(t, linux.AT_FDCWD, pathAddr)
}
-func rmdirat(t *kernel.Task, dirfd int32, pathAddr usermem.Addr) error {
+func rmdirat(t *kernel.Task, dirfd int32, pathAddr hostarch.Addr) error {
path, err := copyInPath(t, pathAddr)
if err != nil {
return err
@@ -269,7 +270,7 @@ func Unlink(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
return 0, nil, unlinkat(t, linux.AT_FDCWD, pathAddr)
}
-func unlinkat(t *kernel.Task, dirfd int32, pathAddr usermem.Addr) error {
+func unlinkat(t *kernel.Task, dirfd int32, pathAddr hostarch.Addr) error {
path, err := copyInPath(t, pathAddr)
if err != nil {
return err
@@ -313,7 +314,7 @@ func Symlinkat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys
return 0, nil, symlinkat(t, targetAddr, newdirfd, linkpathAddr)
}
-func symlinkat(t *kernel.Task, targetAddr usermem.Addr, newdirfd int32, linkpathAddr usermem.Addr) error {
+func symlinkat(t *kernel.Task, targetAddr hostarch.Addr, newdirfd int32, linkpathAddr hostarch.Addr) error {
target, err := t.CopyInString(targetAddr, linux.PATH_MAX)
if err != nil {
return err
diff --git a/pkg/sentry/syscalls/linux/vfs2/getdents.go b/pkg/sentry/syscalls/linux/vfs2/getdents.go
index 5517595b5..b41a3056a 100644
--- a/pkg/sentry/syscalls/linux/vfs2/getdents.go
+++ b/pkg/sentry/syscalls/linux/vfs2/getdents.go
@@ -22,7 +22,8 @@ import (
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/sync"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
+
+ "gvisor.dev/gvisor/pkg/hostarch"
)
// Getdents implements Linux syscall getdents(2).
@@ -58,7 +59,7 @@ func getdents(t *kernel.Task, args arch.SyscallArguments, isGetdents64 bool) (ui
type getdentsCallback struct {
t *kernel.Task
- addr usermem.Addr
+ addr hostarch.Addr
remaining int
isGetdents64 bool
}
@@ -69,7 +70,7 @@ var getdentsCallbackPool = sync.Pool{
},
}
-func getGetdentsCallback(t *kernel.Task, addr usermem.Addr, size int, isGetdents64 bool) *getdentsCallback {
+func getGetdentsCallback(t *kernel.Task, addr hostarch.Addr, size int, isGetdents64 bool) *getdentsCallback {
cb := getdentsCallbackPool.Get().(*getdentsCallback)
*cb = getdentsCallback{
t: t,
@@ -102,9 +103,9 @@ func (cb *getdentsCallback) Handle(dirent vfs.Dirent) error {
return syserror.EINVAL
}
buf = cb.t.CopyScratchBuffer(size)
- usermem.ByteOrder.PutUint64(buf[0:8], dirent.Ino)
- usermem.ByteOrder.PutUint64(buf[8:16], uint64(dirent.NextOff))
- usermem.ByteOrder.PutUint16(buf[16:18], uint16(size))
+ hostarch.ByteOrder.PutUint64(buf[0:8], dirent.Ino)
+ hostarch.ByteOrder.PutUint64(buf[8:16], uint64(dirent.NextOff))
+ hostarch.ByteOrder.PutUint16(buf[16:18], uint16(size))
buf[18] = dirent.Type
copy(buf[19:], dirent.Name)
// Zero out all remaining bytes in buf, including the NUL terminator
@@ -136,9 +137,9 @@ func (cb *getdentsCallback) Handle(dirent vfs.Dirent) error {
return syserror.EINVAL
}
buf = cb.t.CopyScratchBuffer(size)
- usermem.ByteOrder.PutUint64(buf[0:8], dirent.Ino)
- usermem.ByteOrder.PutUint64(buf[8:16], uint64(dirent.NextOff))
- usermem.ByteOrder.PutUint16(buf[16:18], uint16(size))
+ hostarch.ByteOrder.PutUint64(buf[0:8], dirent.Ino)
+ hostarch.ByteOrder.PutUint64(buf[8:16], uint64(dirent.NextOff))
+ hostarch.ByteOrder.PutUint16(buf[16:18], uint16(size))
copy(buf[18:], dirent.Name)
// Zero out all remaining bytes in buf, including the NUL terminator
// after dirent.Name and the zero padding byte between the name and
@@ -155,7 +156,7 @@ func (cb *getdentsCallback) Handle(dirent vfs.Dirent) error {
// cb.remaining.
return err
}
- cb.addr += usermem.Addr(n)
+ cb.addr += hostarch.Addr(n)
cb.remaining -= n
return nil
}
diff --git a/pkg/sentry/syscalls/linux/vfs2/mmap.go b/pkg/sentry/syscalls/linux/vfs2/mmap.go
index 9d9dbf775..c961545f6 100644
--- a/pkg/sentry/syscalls/linux/vfs2/mmap.go
+++ b/pkg/sentry/syscalls/linux/vfs2/mmap.go
@@ -21,7 +21,8 @@ import (
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
+
+ "gvisor.dev/gvisor/pkg/hostarch"
)
// Mmap implements Linux syscall mmap(2).
@@ -48,12 +49,12 @@ func Mmap(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallC
Unmap: fixed,
Map32Bit: map32bit,
Private: private,
- Perms: usermem.AccessType{
+ Perms: hostarch.AccessType{
Read: linux.PROT_READ&prot != 0,
Write: linux.PROT_WRITE&prot != 0,
Execute: linux.PROT_EXEC&prot != 0,
},
- MaxPerms: usermem.AnyAccess,
+ MaxPerms: hostarch.AnyAccess,
GrowsDown: linux.MAP_GROWSDOWN&flags != 0,
Precommit: linux.MAP_POPULATE&flags != 0,
}
diff --git a/pkg/sentry/syscalls/linux/vfs2/mount.go b/pkg/sentry/syscalls/linux/vfs2/mount.go
index 769c9b92f..dd93430e2 100644
--- a/pkg/sentry/syscalls/linux/vfs2/mount.go
+++ b/pkg/sentry/syscalls/linux/vfs2/mount.go
@@ -20,7 +20,8 @@ import (
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
+
+ "gvisor.dev/gvisor/pkg/hostarch"
)
// Mount implements Linux syscall mount(2).
@@ -33,11 +34,11 @@ func Mount(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
// For null-terminated strings related to mount(2), Linux copies in at most
// a page worth of data. See fs/namespace.c:copy_mount_string().
- fsType, err := t.CopyInString(typeAddr, usermem.PageSize)
+ fsType, err := t.CopyInString(typeAddr, hostarch.PageSize)
if err != nil {
return 0, nil, err
}
- source, err := t.CopyInString(sourceAddr, usermem.PageSize)
+ source, err := t.CopyInString(sourceAddr, hostarch.PageSize)
if err != nil {
return 0, nil, err
}
@@ -53,7 +54,7 @@ func Mount(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
// character placement, and the address is passed to each file system.
// Most file systems always treat this data as a string, though, and so
// do all of the ones we implement.
- data, err = t.CopyInString(dataAddr, usermem.PageSize)
+ data, err = t.CopyInString(dataAddr, hostarch.PageSize)
if err != nil {
return 0, nil, err
}
diff --git a/pkg/sentry/syscalls/linux/vfs2/path.go b/pkg/sentry/syscalls/linux/vfs2/path.go
index 90a511d9a..2aaf1ed74 100644
--- a/pkg/sentry/syscalls/linux/vfs2/path.go
+++ b/pkg/sentry/syscalls/linux/vfs2/path.go
@@ -20,10 +20,11 @@ import (
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
+
+ "gvisor.dev/gvisor/pkg/hostarch"
)
-func copyInPath(t *kernel.Task, addr usermem.Addr) (fspath.Path, error) {
+func copyInPath(t *kernel.Task, addr hostarch.Addr) (fspath.Path, error) {
pathname, err := t.CopyInString(addr, linux.PATH_MAX)
if err != nil {
return fspath.Path{}, err
diff --git a/pkg/sentry/syscalls/linux/vfs2/pipe.go b/pkg/sentry/syscalls/linux/vfs2/pipe.go
index 6986e39fe..c6fc1954c 100644
--- a/pkg/sentry/syscalls/linux/vfs2/pipe.go
+++ b/pkg/sentry/syscalls/linux/vfs2/pipe.go
@@ -22,7 +22,8 @@ import (
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
+
+ "gvisor.dev/gvisor/pkg/hostarch"
)
// Pipe implements Linux syscall pipe(2).
@@ -38,7 +39,7 @@ func Pipe2(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
return 0, nil, pipe2(t, addr, flags)
}
-func pipe2(t *kernel.Task, addr usermem.Addr, flags int32) error {
+func pipe2(t *kernel.Task, addr hostarch.Addr, flags int32) error {
if flags&^(linux.O_NONBLOCK|linux.O_CLOEXEC) != 0 {
return syserror.EINVAL
}
diff --git a/pkg/sentry/syscalls/linux/vfs2/poll.go b/pkg/sentry/syscalls/linux/vfs2/poll.go
index c22e4ce54..a69c80edd 100644
--- a/pkg/sentry/syscalls/linux/vfs2/poll.go
+++ b/pkg/sentry/syscalls/linux/vfs2/poll.go
@@ -25,8 +25,9 @@ import (
"gvisor.dev/gvisor/pkg/sentry/limits"
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/pkg/waiter"
+
+ "gvisor.dev/gvisor/pkg/hostarch"
)
// fileCap is the maximum allowable files for poll & select. This has no
@@ -158,7 +159,7 @@ func pollBlock(t *kernel.Task, pfd []linux.PollFD, timeout time.Duration) (time.
}
// copyInPollFDs copies an array of struct pollfd unless nfds exceeds the max.
-func copyInPollFDs(t *kernel.Task, addr usermem.Addr, nfds uint) ([]linux.PollFD, error) {
+func copyInPollFDs(t *kernel.Task, addr hostarch.Addr, nfds uint) ([]linux.PollFD, error) {
if uint64(nfds) > t.ThreadGroup().Limits().GetCapped(limits.NumberOfFiles, fileCap) {
return nil, syserror.EINVAL
}
@@ -173,7 +174,7 @@ func copyInPollFDs(t *kernel.Task, addr usermem.Addr, nfds uint) ([]linux.PollFD
return pfd, nil
}
-func doPoll(t *kernel.Task, addr usermem.Addr, nfds uint, timeout time.Duration) (time.Duration, uintptr, error) {
+func doPoll(t *kernel.Task, addr hostarch.Addr, nfds uint, timeout time.Duration) (time.Duration, uintptr, error) {
pfd, err := copyInPollFDs(t, addr, nfds)
if err != nil {
return timeout, 0, err
@@ -201,7 +202,7 @@ func doPoll(t *kernel.Task, addr usermem.Addr, nfds uint, timeout time.Duration)
}
// CopyInFDSet copies an fd set from select(2)/pselect(2).
-func CopyInFDSet(t *kernel.Task, addr usermem.Addr, nBytes, nBitsInLastPartialByte int) ([]byte, error) {
+func CopyInFDSet(t *kernel.Task, addr hostarch.Addr, nBytes, nBitsInLastPartialByte int) ([]byte, error) {
set := make([]byte, nBytes)
if addr != 0 {
@@ -218,7 +219,7 @@ func CopyInFDSet(t *kernel.Task, addr usermem.Addr, nBytes, nBitsInLastPartialBy
return set, nil
}
-func doSelect(t *kernel.Task, nfds int, readFDs, writeFDs, exceptFDs usermem.Addr, timeout time.Duration) (uintptr, error) {
+func doSelect(t *kernel.Task, nfds int, readFDs, writeFDs, exceptFDs hostarch.Addr, timeout time.Duration) (uintptr, error) {
if nfds < 0 || nfds > fileCap {
return 0, syserror.EINVAL
}
@@ -368,7 +369,7 @@ func timeoutRemaining(t *kernel.Task, startNs ktime.Time, timeout time.Duration)
// copyOutTimespecRemaining copies the time remaining in timeout to timespecAddr.
//
// startNs must be from CLOCK_MONOTONIC.
-func copyOutTimespecRemaining(t *kernel.Task, startNs ktime.Time, timeout time.Duration, timespecAddr usermem.Addr) error {
+func copyOutTimespecRemaining(t *kernel.Task, startNs ktime.Time, timeout time.Duration, timespecAddr hostarch.Addr) error {
if timeout <= 0 {
return nil
}
@@ -381,7 +382,7 @@ func copyOutTimespecRemaining(t *kernel.Task, startNs ktime.Time, timeout time.D
// copyOutTimevalRemaining copies the time remaining in timeout to timevalAddr.
//
// startNs must be from CLOCK_MONOTONIC.
-func copyOutTimevalRemaining(t *kernel.Task, startNs ktime.Time, timeout time.Duration, timevalAddr usermem.Addr) error {
+func copyOutTimevalRemaining(t *kernel.Task, startNs ktime.Time, timeout time.Duration, timevalAddr hostarch.Addr) error {
if timeout <= 0 {
return nil
}
@@ -396,7 +397,7 @@ func copyOutTimevalRemaining(t *kernel.Task, startNs ktime.Time, timeout time.Du
//
// +stateify savable
type pollRestartBlock struct {
- pfdAddr usermem.Addr
+ pfdAddr hostarch.Addr
nfds uint
timeout time.Duration
}
@@ -406,7 +407,7 @@ func (p *pollRestartBlock) Restart(t *kernel.Task) (uintptr, error) {
return poll(t, p.pfdAddr, p.nfds, p.timeout)
}
-func poll(t *kernel.Task, pfdAddr usermem.Addr, nfds uint, timeout time.Duration) (uintptr, error) {
+func poll(t *kernel.Task, pfdAddr hostarch.Addr, nfds uint, timeout time.Duration) (uintptr, error) {
remainingTimeout, n, err := doPoll(t, pfdAddr, nfds, timeout)
// On an interrupt poll(2) is restarted with the remaining timeout.
if err == syserror.EINTR {
@@ -530,7 +531,7 @@ func Pselect(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca
if _, err := maskStruct.CopyIn(t, maskWithSizeAddr); err != nil {
return 0, nil, err
}
- if err := setTempSignalSet(t, usermem.Addr(maskStruct.sigsetAddr), uint(maskStruct.sizeofSigset)); err != nil {
+ if err := setTempSignalSet(t, hostarch.Addr(maskStruct.sigsetAddr), uint(maskStruct.sizeofSigset)); err != nil {
return 0, nil, err
}
}
@@ -551,7 +552,7 @@ func Pselect(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca
// returned value is the maximum that Duration will allow.
//
// If timespecAddr is NULL, the returned value is negative.
-func copyTimespecInToDuration(t *kernel.Task, timespecAddr usermem.Addr) (time.Duration, error) {
+func copyTimespecInToDuration(t *kernel.Task, timespecAddr hostarch.Addr) (time.Duration, error) {
// Use a negative Duration to indicate "no timeout".
timeout := time.Duration(-1)
if timespecAddr != 0 {
@@ -567,7 +568,7 @@ func copyTimespecInToDuration(t *kernel.Task, timespecAddr usermem.Addr) (time.D
return timeout, nil
}
-func setTempSignalSet(t *kernel.Task, maskAddr usermem.Addr, maskSize uint) error {
+func setTempSignalSet(t *kernel.Task, maskAddr hostarch.Addr, maskSize uint) error {
if maskAddr == 0 {
return nil
}
diff --git a/pkg/sentry/syscalls/linux/vfs2/setstat.go b/pkg/sentry/syscalls/linux/vfs2/setstat.go
index 903169dc2..c6330c21a 100644
--- a/pkg/sentry/syscalls/linux/vfs2/setstat.go
+++ b/pkg/sentry/syscalls/linux/vfs2/setstat.go
@@ -23,7 +23,8 @@ import (
"gvisor.dev/gvisor/pkg/sentry/limits"
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
+
+ "gvisor.dev/gvisor/pkg/hostarch"
)
const chmodMask = 0777 | linux.S_ISUID | linux.S_ISGID | linux.S_ISVTX
@@ -43,7 +44,7 @@ func Fchmodat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
return 0, nil, fchmodat(t, dirfd, pathAddr, mode)
}
-func fchmodat(t *kernel.Task, dirfd int32, pathAddr usermem.Addr, mode uint) error {
+func fchmodat(t *kernel.Task, dirfd int32, pathAddr hostarch.Addr, mode uint) error {
path, err := copyInPath(t, pathAddr)
if err != nil {
return err
@@ -102,7 +103,7 @@ func Fchownat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
return 0, nil, fchownat(t, dirfd, pathAddr, owner, group, flags)
}
-func fchownat(t *kernel.Task, dirfd int32, pathAddr usermem.Addr, owner, group, flags int32) error {
+func fchownat(t *kernel.Task, dirfd int32, pathAddr hostarch.Addr, owner, group, flags int32) error {
if flags&^(linux.AT_EMPTY_PATH|linux.AT_SYMLINK_NOFOLLOW) != 0 {
return syserror.EINVAL
}
@@ -327,7 +328,7 @@ func Futimesat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys
return 0, nil, setstatat(t, dirfd, path, shouldAllowEmptyPath, followFinalSymlink, &opts)
}
-func populateSetStatOptionsForUtimes(t *kernel.Task, timesAddr usermem.Addr, opts *vfs.SetStatOptions) error {
+func populateSetStatOptionsForUtimes(t *kernel.Task, timesAddr hostarch.Addr, opts *vfs.SetStatOptions) error {
if timesAddr == 0 {
opts.Stat.Mask = linux.STATX_ATIME | linux.STATX_MTIME
opts.Stat.Atime.Nsec = linux.UTIME_NOW
@@ -391,7 +392,7 @@ func Utimensat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys
return 0, nil, setstatat(t, dirfd, path, shouldAllowEmptyPath, shouldFollowFinalSymlink(flags&linux.AT_SYMLINK_NOFOLLOW == 0), &opts)
}
-func populateSetStatOptionsForUtimens(t *kernel.Task, timesAddr usermem.Addr, opts *vfs.SetStatOptions) error {
+func populateSetStatOptionsForUtimens(t *kernel.Task, timesAddr hostarch.Addr, opts *vfs.SetStatOptions) error {
if timesAddr == 0 {
opts.Stat.Mask = linux.STATX_ATIME | linux.STATX_MTIME
opts.Stat.Atime.Nsec = linux.UTIME_NOW
diff --git a/pkg/sentry/syscalls/linux/vfs2/signal.go b/pkg/sentry/syscalls/linux/vfs2/signal.go
index b89f34cdb..6163da103 100644
--- a/pkg/sentry/syscalls/linux/vfs2/signal.go
+++ b/pkg/sentry/syscalls/linux/vfs2/signal.go
@@ -21,11 +21,12 @@ import (
"gvisor.dev/gvisor/pkg/sentry/kernel"
slinux "gvisor.dev/gvisor/pkg/sentry/syscalls/linux"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
+
+ "gvisor.dev/gvisor/pkg/hostarch"
)
// sharedSignalfd is shared between the two calls.
-func sharedSignalfd(t *kernel.Task, fd int32, sigset usermem.Addr, sigsetsize uint, flags int32) (uintptr, *kernel.SyscallControl, error) {
+func sharedSignalfd(t *kernel.Task, fd int32, sigset hostarch.Addr, sigsetsize uint, flags int32) (uintptr, *kernel.SyscallControl, error) {
// Copy in the signal mask.
mask, err := slinux.CopyInSigSet(t, sigset, sigsetsize)
if err != nil {
diff --git a/pkg/sentry/syscalls/linux/vfs2/socket.go b/pkg/sentry/syscalls/linux/vfs2/socket.go
index 346fd1cea..a87a66146 100644
--- a/pkg/sentry/syscalls/linux/vfs2/socket.go
+++ b/pkg/sentry/syscalls/linux/vfs2/socket.go
@@ -31,6 +31,8 @@ import (
"gvisor.dev/gvisor/pkg/syserr"
"gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
+
+ "gvisor.dev/gvisor/pkg/hostarch"
)
// minListenBacklog is the minimum reasonable backlog for listening sockets.
@@ -116,7 +118,7 @@ type multipleMessageHeader64 struct {
// CaptureAddress allocates memory for and copies a socket address structure
// from the untrusted address space range.
-func CaptureAddress(t *kernel.Task, addr usermem.Addr, addrlen uint32) ([]byte, error) {
+func CaptureAddress(t *kernel.Task, addr hostarch.Addr, addrlen uint32) ([]byte, error) {
if addrlen > maxAddrLen {
return nil, syserror.EINVAL
}
@@ -132,7 +134,7 @@ func CaptureAddress(t *kernel.Task, addr usermem.Addr, addrlen uint32) ([]byte,
// writeAddress writes a sockaddr structure and its length to an output buffer
// in the unstrusted address space range. If the address is bigger than the
// buffer, it is truncated.
-func writeAddress(t *kernel.Task, addr linux.SockAddr, addrLen uint32, addrPtr usermem.Addr, addrLenPtr usermem.Addr) error {
+func writeAddress(t *kernel.Task, addr linux.SockAddr, addrLen uint32, addrPtr hostarch.Addr, addrLenPtr hostarch.Addr) error {
// Get the buffer length.
var bufLen uint32
if _, err := primitive.CopyUint32In(t, addrLenPtr, &bufLen); err != nil {
@@ -279,7 +281,7 @@ func Connect(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca
// accept is the implementation of the accept syscall. It is called by accept
// and accept4 syscall handlers.
-func accept(t *kernel.Task, fd int32, addr usermem.Addr, addrLen usermem.Addr, flags int) (uintptr, error) {
+func accept(t *kernel.Task, fd int32, addr hostarch.Addr, addrLen hostarch.Addr, flags int) (uintptr, error) {
// Check that no unsupported flags are passed in.
if flags & ^(linux.SOCK_NONBLOCK|linux.SOCK_CLOEXEC) != 0 {
return 0, syserror.EINVAL
@@ -475,7 +477,7 @@ func GetSockOpt(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sy
// getSockOpt tries to handle common socket options, or dispatches to a specific
// socket implementation.
-func getSockOpt(t *kernel.Task, s socket.SocketVFS2, level, name int, optValAddr usermem.Addr, len int) (marshal.Marshallable, *syserr.Error) {
+func getSockOpt(t *kernel.Task, s socket.SocketVFS2, level, name int, optValAddr hostarch.Addr, len int) (marshal.Marshallable, *syserr.Error) {
if level == linux.SOL_SOCKET {
switch name {
case linux.SO_TYPE, linux.SO_DOMAIN, linux.SO_PROTOCOL:
@@ -738,7 +740,7 @@ func RecvMMsg(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
return uintptr(count), nil, nil
}
-func recvSingleMsg(t *kernel.Task, s socket.SocketVFS2, msgPtr usermem.Addr, flags int32, haveDeadline bool, deadline ktime.Time) (uintptr, error) {
+func recvSingleMsg(t *kernel.Task, s socket.SocketVFS2, msgPtr hostarch.Addr, flags int32, haveDeadline bool, deadline ktime.Time) (uintptr, error) {
// Capture the message header and io vectors.
var msg MessageHeader64
if _, err := msg.CopyIn(t, msgPtr); err != nil {
@@ -748,7 +750,7 @@ func recvSingleMsg(t *kernel.Task, s socket.SocketVFS2, msgPtr usermem.Addr, fla
if msg.IovLen > linux.UIO_MAXIOV {
return 0, syserror.EMSGSIZE
}
- dst, err := t.IovecsIOSequence(usermem.Addr(msg.Iov), int(msg.IovLen), usermem.IOOpts{
+ dst, err := t.IovecsIOSequence(hostarch.Addr(msg.Iov), int(msg.IovLen), usermem.IOOpts{
AddressSpaceActive: true,
})
if err != nil {
@@ -799,7 +801,7 @@ func recvSingleMsg(t *kernel.Task, s socket.SocketVFS2, msgPtr usermem.Addr, fla
// Copy the address to the caller.
if msg.NameLen != 0 {
- if err := writeAddress(t, sender, senderLen, usermem.Addr(msg.Name), usermem.Addr(msgPtr+nameLenOffset)); err != nil {
+ if err := writeAddress(t, sender, senderLen, hostarch.Addr(msg.Name), hostarch.Addr(msgPtr+nameLenOffset)); err != nil {
return 0, err
}
}
@@ -809,7 +811,7 @@ func recvSingleMsg(t *kernel.Task, s socket.SocketVFS2, msgPtr usermem.Addr, fla
return 0, err
}
if len(controlData) > 0 {
- if _, err := t.CopyOutBytes(usermem.Addr(msg.Control), controlData); err != nil {
+ if _, err := t.CopyOutBytes(hostarch.Addr(msg.Control), controlData); err != nil {
return 0, err
}
}
@@ -824,7 +826,7 @@ func recvSingleMsg(t *kernel.Task, s socket.SocketVFS2, msgPtr usermem.Addr, fla
// recvFrom is the implementation of the recvfrom syscall. It is called by
// recvfrom and recv syscall handlers.
-func recvFrom(t *kernel.Task, fd int32, bufPtr usermem.Addr, bufLen uint64, flags int32, namePtr usermem.Addr, nameLenPtr usermem.Addr) (uintptr, error) {
+func recvFrom(t *kernel.Task, fd int32, bufPtr hostarch.Addr, bufLen uint64, flags int32, namePtr hostarch.Addr, nameLenPtr hostarch.Addr) (uintptr, error) {
if int(bufLen) < 0 {
return 0, syserror.EINVAL
}
@@ -1000,7 +1002,7 @@ func SendMMsg(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
return uintptr(count), nil, nil
}
-func sendSingleMsg(t *kernel.Task, s socket.SocketVFS2, file *vfs.FileDescription, msgPtr usermem.Addr, flags int32) (uintptr, error) {
+func sendSingleMsg(t *kernel.Task, s socket.SocketVFS2, file *vfs.FileDescription, msgPtr hostarch.Addr, flags int32) (uintptr, error) {
// Capture the message header.
var msg MessageHeader64
if _, err := msg.CopyIn(t, msgPtr); err != nil {
@@ -1014,7 +1016,7 @@ func sendSingleMsg(t *kernel.Task, s socket.SocketVFS2, file *vfs.FileDescriptio
return 0, syserror.ENOBUFS
}
controlData = make([]byte, msg.ControlLen)
- if _, err := t.CopyInBytes(usermem.Addr(msg.Control), controlData); err != nil {
+ if _, err := t.CopyInBytes(hostarch.Addr(msg.Control), controlData); err != nil {
return 0, err
}
}
@@ -1023,7 +1025,7 @@ func sendSingleMsg(t *kernel.Task, s socket.SocketVFS2, file *vfs.FileDescriptio
var to []byte
if msg.NameLen != 0 {
var err error
- to, err = CaptureAddress(t, usermem.Addr(msg.Name), msg.NameLen)
+ to, err = CaptureAddress(t, hostarch.Addr(msg.Name), msg.NameLen)
if err != nil {
return 0, err
}
@@ -1033,7 +1035,7 @@ func sendSingleMsg(t *kernel.Task, s socket.SocketVFS2, file *vfs.FileDescriptio
if msg.IovLen > linux.UIO_MAXIOV {
return 0, syserror.EMSGSIZE
}
- src, err := t.IovecsIOSequence(usermem.Addr(msg.Iov), int(msg.IovLen), usermem.IOOpts{
+ src, err := t.IovecsIOSequence(hostarch.Addr(msg.Iov), int(msg.IovLen), usermem.IOOpts{
AddressSpaceActive: true,
})
if err != nil {
@@ -1067,7 +1069,7 @@ func sendSingleMsg(t *kernel.Task, s socket.SocketVFS2, file *vfs.FileDescriptio
// sendTo is the implementation of the sendto syscall. It is called by sendto
// and send syscall handlers.
-func sendTo(t *kernel.Task, fd int32, bufPtr usermem.Addr, bufLen uint64, flags int32, namePtr usermem.Addr, nameLen uint32) (uintptr, error) {
+func sendTo(t *kernel.Task, fd int32, bufPtr hostarch.Addr, bufLen uint64, flags int32, namePtr hostarch.Addr, nameLen uint32) (uintptr, error) {
bl := int(bufLen)
if bl < 0 {
return 0, syserror.EINVAL
diff --git a/pkg/sentry/syscalls/linux/vfs2/stat.go b/pkg/sentry/syscalls/linux/vfs2/stat.go
index 0f5d5189c..69e77fa99 100644
--- a/pkg/sentry/syscalls/linux/vfs2/stat.go
+++ b/pkg/sentry/syscalls/linux/vfs2/stat.go
@@ -24,7 +24,8 @@ import (
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
+
+ "gvisor.dev/gvisor/pkg/hostarch"
)
// Stat implements Linux syscall stat(2).
@@ -50,7 +51,7 @@ func Newfstatat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sy
return 0, nil, fstatat(t, dirfd, pathAddr, statAddr, flags)
}
-func fstatat(t *kernel.Task, dirfd int32, pathAddr, statAddr usermem.Addr, flags int32) error {
+func fstatat(t *kernel.Task, dirfd int32, pathAddr, statAddr hostarch.Addr, flags int32) error {
if flags&^(linux.AT_EMPTY_PATH|linux.AT_SYMLINK_NOFOLLOW) != 0 {
return syserror.EINVAL
}
@@ -264,7 +265,7 @@ func Faccessat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys
return 0, nil, accessAt(t, dirfd, addr, mode)
}
-func accessAt(t *kernel.Task, dirfd int32, pathAddr usermem.Addr, mode uint) error {
+func accessAt(t *kernel.Task, dirfd int32, pathAddr hostarch.Addr, mode uint) error {
const rOK = 4
const wOK = 2
const xOK = 1
@@ -312,7 +313,7 @@ func Readlinkat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sy
return readlinkat(t, dirfd, pathAddr, bufAddr, size)
}
-func readlinkat(t *kernel.Task, dirfd int32, pathAddr, bufAddr usermem.Addr, size uint) (uintptr, *kernel.SyscallControl, error) {
+func readlinkat(t *kernel.Task, dirfd int32, pathAddr, bufAddr hostarch.Addr, size uint) (uintptr, *kernel.SyscallControl, error) {
if int(size) <= 0 {
return 0, nil, syserror.EINVAL
}
diff --git a/pkg/sentry/syscalls/linux/vfs2/vfs2_abi_autogen_unsafe.go b/pkg/sentry/syscalls/linux/vfs2/vfs2_abi_autogen_unsafe.go
index 8f217bc36..d9e2dd3d9 100644
--- a/pkg/sentry/syscalls/linux/vfs2/vfs2_abi_autogen_unsafe.go
+++ b/pkg/sentry/syscalls/linux/vfs2/vfs2_abi_autogen_unsafe.go
@@ -10,9 +10,9 @@ package vfs2
import (
"gvisor.dev/gvisor/pkg/gohacks"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal"
"gvisor.dev/gvisor/pkg/safecopy"
- "gvisor.dev/gvisor/pkg/usermem"
"io"
"reflect"
"runtime"
@@ -31,17 +31,17 @@ func (s *sigSetWithSize) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (s *sigSetWithSize) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.sigsetAddr))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.sigsetAddr))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.sizeofSigset))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.sizeofSigset))
dst = dst[8:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (s *sigSetWithSize) UnmarshalBytes(src []byte) {
- s.sigsetAddr = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.sigsetAddr = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.sizeofSigset = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.sizeofSigset = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
}
@@ -63,7 +63,7 @@ func (s *sigSetWithSize) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (s *sigSetWithSize) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (s *sigSetWithSize) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -80,13 +80,13 @@ func (s *sigSetWithSize) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, lim
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (s *sigSetWithSize) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *sigSetWithSize) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return s.CopyOutN(cc, addr, s.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (s *sigSetWithSize) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *sigSetWithSize) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -124,21 +124,21 @@ func (m *MessageHeader64) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (m *MessageHeader64) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(m.Name))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(m.Name))
dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(m.NameLen))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(m.NameLen))
dst = dst[4:]
// Padding: dst[:sizeof(uint32)] ~= uint32(0)
dst = dst[4:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(m.Iov))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(m.Iov))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(m.IovLen))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(m.IovLen))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(m.Control))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(m.Control))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(m.ControlLen))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(m.ControlLen))
dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(m.Flags))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(m.Flags))
dst = dst[4:]
// Padding: dst[:sizeof(int32)] ~= int32(0)
dst = dst[4:]
@@ -146,21 +146,21 @@ func (m *MessageHeader64) MarshalBytes(dst []byte) {
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (m *MessageHeader64) UnmarshalBytes(src []byte) {
- m.Name = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ m.Name = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- m.NameLen = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ m.NameLen = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
// Padding: var _ uint32 ~= src[:sizeof(uint32)]
src = src[4:]
- m.Iov = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ m.Iov = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- m.IovLen = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ m.IovLen = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- m.Control = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ m.Control = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- m.ControlLen = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ m.ControlLen = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- m.Flags = int32(usermem.ByteOrder.Uint32(src[:4]))
+ m.Flags = int32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
// Padding: var _ int32 ~= src[:sizeof(int32)]
src = src[4:]
@@ -184,7 +184,7 @@ func (m *MessageHeader64) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (m *MessageHeader64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (m *MessageHeader64) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -201,13 +201,13 @@ func (m *MessageHeader64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, li
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (m *MessageHeader64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (m *MessageHeader64) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return m.CopyOutN(cc, addr, m.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (m *MessageHeader64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (m *MessageHeader64) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -248,7 +248,7 @@ func (m *multipleMessageHeader64) SizeBytes() int {
func (m *multipleMessageHeader64) MarshalBytes(dst []byte) {
m.msgHdr.MarshalBytes(dst[:m.msgHdr.SizeBytes()])
dst = dst[m.msgHdr.SizeBytes():]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(m.msgLen))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(m.msgLen))
dst = dst[4:]
// Padding: dst[:sizeof(int32)] ~= int32(0)
dst = dst[4:]
@@ -258,7 +258,7 @@ func (m *multipleMessageHeader64) MarshalBytes(dst []byte) {
func (m *multipleMessageHeader64) UnmarshalBytes(src []byte) {
m.msgHdr.UnmarshalBytes(src[:m.msgHdr.SizeBytes()])
src = src[m.msgHdr.SizeBytes():]
- m.msgLen = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ m.msgLen = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
// Padding: var _ int32 ~= src[:sizeof(int32)]
src = src[4:]
@@ -292,7 +292,7 @@ func (m *multipleMessageHeader64) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (m *multipleMessageHeader64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (m *multipleMessageHeader64) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
if !m.msgHdr.Packed() {
// Type multipleMessageHeader64 doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := cc.CopyScratchBuffer(m.SizeBytes()) // escapes: okay.
@@ -316,13 +316,13 @@ func (m *multipleMessageHeader64) CopyOutN(cc marshal.CopyContext, addr usermem.
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (m *multipleMessageHeader64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (m *multipleMessageHeader64) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return m.CopyOutN(cc, addr, m.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (m *multipleMessageHeader64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (m *multipleMessageHeader64) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
if !m.msgHdr.Packed() {
// Type multipleMessageHeader64 doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := cc.CopyScratchBuffer(m.SizeBytes()) // escapes: okay.
diff --git a/pkg/sentry/syscalls/linux/vfs2/xattr.go b/pkg/sentry/syscalls/linux/vfs2/xattr.go
index e05723ef9..c261050c6 100644
--- a/pkg/sentry/syscalls/linux/vfs2/xattr.go
+++ b/pkg/sentry/syscalls/linux/vfs2/xattr.go
@@ -23,7 +23,8 @@ import (
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
+
+ "gvisor.dev/gvisor/pkg/hostarch"
)
// ListXattr implements Linux syscall listxattr(2).
@@ -291,7 +292,7 @@ func Fremovexattr(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.
return 0, nil, file.RemoveXattr(t, name)
}
-func copyInXattrName(t *kernel.Task, nameAddr usermem.Addr) (string, error) {
+func copyInXattrName(t *kernel.Task, nameAddr hostarch.Addr) (string, error) {
name, err := t.CopyInString(nameAddr, linux.XATTR_NAME_MAX+1)
if err != nil {
if err == syserror.ENAMETOOLONG {
@@ -305,7 +306,7 @@ func copyInXattrName(t *kernel.Task, nameAddr usermem.Addr) (string, error) {
return name, nil
}
-func copyOutXattrNameList(t *kernel.Task, listAddr usermem.Addr, size uint, names []string) (int, error) {
+func copyOutXattrNameList(t *kernel.Task, listAddr hostarch.Addr, size uint, names []string) (int, error) {
if size > linux.XATTR_LIST_MAX {
size = linux.XATTR_LIST_MAX
}
@@ -327,7 +328,7 @@ func copyOutXattrNameList(t *kernel.Task, listAddr usermem.Addr, size uint, name
return t.CopyOutBytes(listAddr, buf.Bytes())
}
-func copyInXattrValue(t *kernel.Task, valueAddr usermem.Addr, size uint) (string, error) {
+func copyInXattrValue(t *kernel.Task, valueAddr hostarch.Addr, size uint) (string, error) {
if size > linux.XATTR_SIZE_MAX {
return "", syserror.E2BIG
}
@@ -338,7 +339,7 @@ func copyInXattrValue(t *kernel.Task, valueAddr usermem.Addr, size uint) (string
return gohacks.StringFromImmutableBytes(buf), nil
}
-func copyOutXattrValue(t *kernel.Task, valueAddr usermem.Addr, size uint, value string) (int, error) {
+func copyOutXattrValue(t *kernel.Task, valueAddr hostarch.Addr, size uint, value string) (int, error) {
if size > linux.XATTR_SIZE_MAX {
size = linux.XATTR_SIZE_MAX
}
diff --git a/pkg/sentry/vfs/anonfs.go b/pkg/sentry/vfs/anonfs.go
index 3caf417ca..f48817132 100644
--- a/pkg/sentry/vfs/anonfs.go
+++ b/pkg/sentry/vfs/anonfs.go
@@ -20,10 +20,10 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
"gvisor.dev/gvisor/pkg/fspath"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/socket/unix/transport"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
)
// NewAnonVirtualDentry returns a VirtualDentry with the given synthetic name,
@@ -43,7 +43,7 @@ func (vfs *VirtualFilesystem) NewAnonVirtualDentry(name string) VirtualDentry {
}
const (
- anonfsBlockSize = usermem.PageSize // via fs/libfs.c:pseudo_fs_fill_super()
+ anonfsBlockSize = hostarch.PageSize // via fs/libfs.c:pseudo_fs_fill_super()
// Mode, UID, and GID for a generic anonfs file.
anonFileMode = 0600 // no type is correct
diff --git a/pkg/sentry/vfs/filesystem_impl_util.go b/pkg/sentry/vfs/filesystem_impl_util.go
index 2620cf975..15b234d61 100644
--- a/pkg/sentry/vfs/filesystem_impl_util.go
+++ b/pkg/sentry/vfs/filesystem_impl_util.go
@@ -18,7 +18,7 @@ import (
"strings"
"gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/usermem"
+ "gvisor.dev/gvisor/pkg/hostarch"
)
// GenericParseMountOptions parses a comma-separated list of options of the
@@ -50,7 +50,7 @@ func GenericParseMountOptions(str string) map[string]string {
func GenericStatFS(fsMagic uint64) linux.Statfs {
return linux.Statfs{
Type: fsMagic,
- BlockSize: usermem.PageSize,
+ BlockSize: hostarch.PageSize,
NameLength: linux.NAME_MAX,
}
}
diff --git a/pkg/sentry/vfs/inotify.go b/pkg/sentry/vfs/inotify.go
index 32fa01578..49d29e20b 100644
--- a/pkg/sentry/vfs/inotify.go
+++ b/pkg/sentry/vfs/inotify.go
@@ -21,6 +21,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/uniqueid"
"gvisor.dev/gvisor/pkg/sync"
@@ -256,7 +257,7 @@ func (i *Inotify) Ioctl(ctx context.Context, uio usermem.IO, args arch.SyscallAr
n += uint32(e.sizeOf())
}
var buf [4]byte
- usermem.ByteOrder.PutUint32(buf[:], n)
+ hostarch.ByteOrder.PutUint32(buf[:], n)
_, err := uio.CopyOut(ctx, args[2].Pointer(), buf[:], usermem.IOOpts{})
return 0, err
@@ -683,10 +684,10 @@ func (e *Event) sizeOf() int {
// construct the output. We use a buffer allocated ahead of time for
// performance. buf must be at least inotifyEventBaseSize bytes.
func (e *Event) CopyTo(ctx context.Context, buf []byte, dst usermem.IOSequence) (int64, error) {
- usermem.ByteOrder.PutUint32(buf[0:], uint32(e.wd))
- usermem.ByteOrder.PutUint32(buf[4:], e.mask)
- usermem.ByteOrder.PutUint32(buf[8:], e.cookie)
- usermem.ByteOrder.PutUint32(buf[12:], e.len)
+ hostarch.ByteOrder.PutUint32(buf[0:], uint32(e.wd))
+ hostarch.ByteOrder.PutUint32(buf[4:], e.mask)
+ hostarch.ByteOrder.PutUint32(buf[8:], e.cookie)
+ hostarch.ByteOrder.PutUint32(buf[12:], e.len)
writeLen := 0
diff --git a/pkg/usermem/bytes_io.go b/pkg/usermem/bytes_io.go
index e177d30eb..3da3c0294 100644
--- a/pkg/usermem/bytes_io.go
+++ b/pkg/usermem/bytes_io.go
@@ -16,6 +16,7 @@ package usermem
import (
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/safemem"
"gvisor.dev/gvisor/pkg/syserror"
)
@@ -30,7 +31,7 @@ type BytesIO struct {
}
// CopyOut implements IO.CopyOut.
-func (b *BytesIO) CopyOut(ctx context.Context, addr Addr, src []byte, opts IOOpts) (int, error) {
+func (b *BytesIO) CopyOut(ctx context.Context, addr hostarch.Addr, src []byte, opts IOOpts) (int, error) {
rngN, rngErr := b.rangeCheck(addr, len(src))
if rngN == 0 {
return 0, rngErr
@@ -39,7 +40,7 @@ func (b *BytesIO) CopyOut(ctx context.Context, addr Addr, src []byte, opts IOOpt
}
// CopyIn implements IO.CopyIn.
-func (b *BytesIO) CopyIn(ctx context.Context, addr Addr, dst []byte, opts IOOpts) (int, error) {
+func (b *BytesIO) CopyIn(ctx context.Context, addr hostarch.Addr, dst []byte, opts IOOpts) (int, error) {
rngN, rngErr := b.rangeCheck(addr, len(dst))
if rngN == 0 {
return 0, rngErr
@@ -48,7 +49,7 @@ func (b *BytesIO) CopyIn(ctx context.Context, addr Addr, dst []byte, opts IOOpts
}
// ZeroOut implements IO.ZeroOut.
-func (b *BytesIO) ZeroOut(ctx context.Context, addr Addr, toZero int64, opts IOOpts) (int64, error) {
+func (b *BytesIO) ZeroOut(ctx context.Context, addr hostarch.Addr, toZero int64, opts IOOpts) (int64, error) {
if toZero > int64(maxInt) {
return 0, syserror.EINVAL
}
@@ -64,7 +65,7 @@ func (b *BytesIO) ZeroOut(ctx context.Context, addr Addr, toZero int64, opts IOO
}
// CopyOutFrom implements IO.CopyOutFrom.
-func (b *BytesIO) CopyOutFrom(ctx context.Context, ars AddrRangeSeq, src safemem.Reader, opts IOOpts) (int64, error) {
+func (b *BytesIO) CopyOutFrom(ctx context.Context, ars hostarch.AddrRangeSeq, src safemem.Reader, opts IOOpts) (int64, error) {
dsts, rngErr := b.blocksFromAddrRanges(ars)
n, err := src.ReadToBlocks(dsts)
if err != nil {
@@ -74,7 +75,7 @@ func (b *BytesIO) CopyOutFrom(ctx context.Context, ars AddrRangeSeq, src safemem
}
// CopyInTo implements IO.CopyInTo.
-func (b *BytesIO) CopyInTo(ctx context.Context, ars AddrRangeSeq, dst safemem.Writer, opts IOOpts) (int64, error) {
+func (b *BytesIO) CopyInTo(ctx context.Context, ars hostarch.AddrRangeSeq, dst safemem.Writer, opts IOOpts) (int64, error) {
srcs, rngErr := b.blocksFromAddrRanges(ars)
n, err := dst.WriteFromBlocks(srcs)
if err != nil {
@@ -83,14 +84,14 @@ func (b *BytesIO) CopyInTo(ctx context.Context, ars AddrRangeSeq, dst safemem.Wr
return int64(n), rngErr
}
-func (b *BytesIO) rangeCheck(addr Addr, length int) (int, error) {
+func (b *BytesIO) rangeCheck(addr hostarch.Addr, length int) (int, error) {
if length == 0 {
return 0, nil
}
if length < 0 {
return 0, syserror.EINVAL
}
- max := Addr(len(b.Bytes))
+ max := hostarch.Addr(len(b.Bytes))
if addr >= max {
return 0, syserror.EFAULT
}
@@ -101,7 +102,7 @@ func (b *BytesIO) rangeCheck(addr Addr, length int) (int, error) {
return length, nil
}
-func (b *BytesIO) blocksFromAddrRanges(ars AddrRangeSeq) (safemem.BlockSeq, error) {
+func (b *BytesIO) blocksFromAddrRanges(ars hostarch.AddrRangeSeq) (safemem.BlockSeq, error) {
switch ars.NumRanges() {
case 0:
return safemem.BlockSeq{}, nil
@@ -124,7 +125,7 @@ func (b *BytesIO) blocksFromAddrRanges(ars AddrRangeSeq) (safemem.BlockSeq, erro
}
}
-func (b *BytesIO) blockFromAddrRange(ar AddrRange) (safemem.Block, error) {
+func (b *BytesIO) blockFromAddrRange(ar hostarch.AddrRange) (safemem.Block, error) {
n, err := b.rangeCheck(ar.Start, int(ar.Length()))
if n == 0 {
return safemem.Block{}, err
@@ -136,6 +137,6 @@ func (b *BytesIO) blockFromAddrRange(ar AddrRange) (safemem.Block, error) {
func BytesIOSequence(buf []byte) IOSequence {
return IOSequence{
IO: &BytesIO{buf},
- Addrs: AddrRangeSeqOf(AddrRange{0, Addr(len(buf))}),
+ Addrs: hostarch.AddrRangeSeqOf(hostarch.AddrRange{0, hostarch.Addr(len(buf))}),
}
}
diff --git a/pkg/usermem/bytes_io_unsafe.go b/pkg/usermem/bytes_io_unsafe.go
index 20de5037d..dcd5c81d1 100644
--- a/pkg/usermem/bytes_io_unsafe.go
+++ b/pkg/usermem/bytes_io_unsafe.go
@@ -20,10 +20,11 @@ import (
"gvisor.dev/gvisor/pkg/atomicbitops"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
)
// SwapUint32 implements IO.SwapUint32.
-func (b *BytesIO) SwapUint32(ctx context.Context, addr Addr, new uint32, opts IOOpts) (uint32, error) {
+func (b *BytesIO) SwapUint32(ctx context.Context, addr hostarch.Addr, new uint32, opts IOOpts) (uint32, error) {
if _, rngErr := b.rangeCheck(addr, 4); rngErr != nil {
return 0, rngErr
}
@@ -31,7 +32,7 @@ func (b *BytesIO) SwapUint32(ctx context.Context, addr Addr, new uint32, opts IO
}
// CompareAndSwapUint32 implements IO.CompareAndSwapUint32.
-func (b *BytesIO) CompareAndSwapUint32(ctx context.Context, addr Addr, old, new uint32, opts IOOpts) (uint32, error) {
+func (b *BytesIO) CompareAndSwapUint32(ctx context.Context, addr hostarch.Addr, old, new uint32, opts IOOpts) (uint32, error) {
if _, rngErr := b.rangeCheck(addr, 4); rngErr != nil {
return 0, rngErr
}
@@ -39,7 +40,7 @@ func (b *BytesIO) CompareAndSwapUint32(ctx context.Context, addr Addr, old, new
}
// LoadUint32 implements IO.LoadUint32.
-func (b *BytesIO) LoadUint32(ctx context.Context, addr Addr, opts IOOpts) (uint32, error) {
+func (b *BytesIO) LoadUint32(ctx context.Context, addr hostarch.Addr, opts IOOpts) (uint32, error) {
if _, err := b.rangeCheck(addr, 4); err != nil {
return 0, err
}
diff --git a/pkg/usermem/usermem.go b/pkg/usermem/usermem.go
index dc2571154..0d6d25e50 100644
--- a/pkg/usermem/usermem.go
+++ b/pkg/usermem/usermem.go
@@ -25,6 +25,8 @@ import (
"gvisor.dev/gvisor/pkg/gohacks"
"gvisor.dev/gvisor/pkg/safemem"
"gvisor.dev/gvisor/pkg/syserror"
+
+ "gvisor.dev/gvisor/pkg/hostarch"
)
// IO provides access to the contents of a virtual memory space.
@@ -37,7 +39,7 @@ type IO interface {
// any following locks in the lock order.
//
// Postconditions: CopyOut does not retain src.
- CopyOut(ctx context.Context, addr Addr, src []byte, opts IOOpts) (int, error)
+ CopyOut(ctx context.Context, addr hostarch.Addr, src []byte, opts IOOpts) (int, error)
// CopyIn copies len(dst) bytes from the memory mapped at addr to dst.
// It returns the number of bytes copied. If the number of bytes copied is
@@ -47,7 +49,7 @@ type IO interface {
// any following locks in the lock order.
//
// Postconditions: CopyIn does not retain dst.
- CopyIn(ctx context.Context, addr Addr, dst []byte, opts IOOpts) (int, error)
+ CopyIn(ctx context.Context, addr hostarch.Addr, dst []byte, opts IOOpts) (int, error)
// ZeroOut sets toZero bytes to 0, starting at addr. It returns the number
// of bytes zeroed. If the number of bytes zeroed is < toZero, it returns a
@@ -57,7 +59,7 @@ type IO interface {
// * The caller must not hold mm.MemoryManager.mappingMu or any
// following locks in the lock order.
// * toZero >= 0.
- ZeroOut(ctx context.Context, addr Addr, toZero int64, opts IOOpts) (int64, error)
+ ZeroOut(ctx context.Context, addr hostarch.Addr, toZero int64, opts IOOpts) (int64, error)
// CopyOutFrom copies ars.NumBytes() bytes from src to the memory mapped at
// ars. It returns the number of bytes copied, which may be less than the
@@ -72,7 +74,7 @@ type IO interface {
// following locks in the lock order.
// * src.ReadToBlocks must not block on mm.MemoryManager.activeMu or
// any preceding locks in the lock order.
- CopyOutFrom(ctx context.Context, ars AddrRangeSeq, src safemem.Reader, opts IOOpts) (int64, error)
+ CopyOutFrom(ctx context.Context, ars hostarch.AddrRangeSeq, src safemem.Reader, opts IOOpts) (int64, error)
// CopyInTo copies ars.NumBytes() bytes from the memory mapped at ars to
// dst. It returns the number of bytes copied. CopyInTo may return a
@@ -86,7 +88,7 @@ type IO interface {
// following locks in the lock order.
// * dst.WriteFromBlocks must not block on mm.MemoryManager.activeMu or
// any preceding locks in the lock order.
- CopyInTo(ctx context.Context, ars AddrRangeSeq, dst safemem.Writer, opts IOOpts) (int64, error)
+ CopyInTo(ctx context.Context, ars hostarch.AddrRangeSeq, dst safemem.Writer, opts IOOpts) (int64, error)
// TODO(jamieliu): The requirement that CopyOutFrom/CopyInTo call src/dst
// at most once, which is unnecessary in most cases, forces implementations
@@ -101,7 +103,7 @@ type IO interface {
// * The caller must not hold mm.MemoryManager.mappingMu or any
// following locks in the lock order.
// * addr must be aligned to a 4-byte boundary.
- SwapUint32(ctx context.Context, addr Addr, new uint32, opts IOOpts) (uint32, error)
+ SwapUint32(ctx context.Context, addr hostarch.Addr, new uint32, opts IOOpts) (uint32, error)
// CompareAndSwapUint32 atomically compares the uint32 value at addr to
// old; if they are equal, the value in memory is replaced by new. In
@@ -111,7 +113,7 @@ type IO interface {
// * The caller must not hold mm.MemoryManager.mappingMu or any
// following locks in the lock order.
// * addr must be aligned to a 4-byte boundary.
- CompareAndSwapUint32(ctx context.Context, addr Addr, old, new uint32, opts IOOpts) (uint32, error)
+ CompareAndSwapUint32(ctx context.Context, addr hostarch.Addr, old, new uint32, opts IOOpts) (uint32, error)
// LoadUint32 atomically loads the uint32 value at addr and returns it.
//
@@ -119,7 +121,7 @@ type IO interface {
// * The caller must not hold mm.MemoryManager.mappingMu or any
// following locks in the lock order.
// * addr must be aligned to a 4-byte boundary.
- LoadUint32(ctx context.Context, addr Addr, opts IOOpts) (uint32, error)
+ LoadUint32(ctx context.Context, addr hostarch.Addr, opts IOOpts) (uint32, error)
}
// IOOpts contains options applicable to all IO methods.
@@ -142,7 +144,7 @@ type IOOpts struct {
type IOReadWriter struct {
Ctx context.Context
IO IO
- Addr Addr
+ Addr hostarch.Addr
Opts IOOpts
}
@@ -159,7 +161,7 @@ func (rw *IOReadWriter) Read(dst []byte) (int, error) {
rw.Addr = end
} else {
// Disallow wraparound.
- rw.Addr = ^Addr(0)
+ rw.Addr = ^hostarch.Addr(0)
if err != nil {
err = syserror.EFAULT
}
@@ -175,7 +177,7 @@ func (rw *IOReadWriter) Write(src []byte) (int, error) {
rw.Addr = end
} else {
// Disallow wraparound.
- rw.Addr = ^Addr(0)
+ rw.Addr = ^hostarch.Addr(0)
if err != nil {
err = syserror.EFAULT
}
@@ -197,7 +199,7 @@ const (
//
// Preconditions: Same as IO.CopyFromUser, plus:
// * maxlen >= 0.
-func CopyStringIn(ctx context.Context, uio IO, addr Addr, maxlen int, opts IOOpts) (string, error) {
+func CopyStringIn(ctx context.Context, uio IO, addr hostarch.Addr, maxlen int, opts IOOpts) (string, error) {
initLen := maxlen
if initLen > copyStringMaxInitBufLen {
initLen = copyStringMaxInitBufLen
@@ -251,12 +253,12 @@ func CopyStringIn(ctx context.Context, uio IO, addr Addr, maxlen int, opts IOOpt
// the maximum, it returns a non-nil error explaining why.
//
// Preconditions: Same as IO.CopyOut.
-func CopyOutVec(ctx context.Context, uio IO, ars AddrRangeSeq, src []byte, opts IOOpts) (int, error) {
+func CopyOutVec(ctx context.Context, uio IO, ars hostarch.AddrRangeSeq, src []byte, opts IOOpts) (int, error) {
var done int
for !ars.IsEmpty() && done < len(src) {
ar := ars.Head()
cplen := len(src) - done
- if Addr(cplen) >= ar.Length() {
+ if hostarch.Addr(cplen) >= ar.Length() {
cplen = int(ar.Length())
}
n, err := uio.CopyOut(ctx, ar.Start, src[done:done+cplen], opts)
@@ -275,12 +277,12 @@ func CopyOutVec(ctx context.Context, uio IO, ars AddrRangeSeq, src []byte, opts
// maximum, it returns a non-nil error explaining why.
//
// Preconditions: Same as IO.CopyIn.
-func CopyInVec(ctx context.Context, uio IO, ars AddrRangeSeq, dst []byte, opts IOOpts) (int, error) {
+func CopyInVec(ctx context.Context, uio IO, ars hostarch.AddrRangeSeq, dst []byte, opts IOOpts) (int, error) {
var done int
for !ars.IsEmpty() && done < len(dst) {
ar := ars.Head()
cplen := len(dst) - done
- if Addr(cplen) >= ar.Length() {
+ if hostarch.Addr(cplen) >= ar.Length() {
cplen = int(ar.Length())
}
n, err := uio.CopyIn(ctx, ar.Start, dst[done:done+cplen], opts)
@@ -299,12 +301,12 @@ func CopyInVec(ctx context.Context, uio IO, ars AddrRangeSeq, dst []byte, opts I
// maximum, it returns a non-nil error explaining why.
//
// Preconditions: Same as IO.ZeroOut.
-func ZeroOutVec(ctx context.Context, uio IO, ars AddrRangeSeq, toZero int64, opts IOOpts) (int64, error) {
+func ZeroOutVec(ctx context.Context, uio IO, ars hostarch.AddrRangeSeq, toZero int64, opts IOOpts) (int64, error) {
var done int64
for !ars.IsEmpty() && done < toZero {
ar := ars.Head()
cplen := toZero - done
- if Addr(cplen) >= ar.Length() {
+ if hostarch.Addr(cplen) >= ar.Length() {
cplen = int64(ar.Length())
}
n, err := uio.ZeroOut(ctx, ar.Start, cplen, opts)
@@ -352,7 +354,7 @@ func isASCIIWhitespace(b byte) bool {
// - CopyInt32StringsInVec returns EINVAL if ars.NumBytes() == 0.
//
// Preconditions: Same as CopyInVec.
-func CopyInt32StringsInVec(ctx context.Context, uio IO, ars AddrRangeSeq, dsts []int32, opts IOOpts) (int64, error) {
+func CopyInt32StringsInVec(ctx context.Context, uio IO, ars hostarch.AddrRangeSeq, dsts []int32, opts IOOpts) (int64, error) {
if len(dsts) == 0 {
return 0, nil
}
@@ -403,7 +405,7 @@ func CopyInt32StringsInVec(ctx context.Context, uio IO, ars AddrRangeSeq, dsts [
// CopyInt32StringInVec is equivalent to CopyInt32StringsInVec, but copies at
// most one int32.
-func CopyInt32StringInVec(ctx context.Context, uio IO, ars AddrRangeSeq, dst *int32, opts IOOpts) (int64, error) {
+func CopyInt32StringInVec(ctx context.Context, uio IO, ars hostarch.AddrRangeSeq, dst *int32, opts IOOpts) (int64, error) {
dsts := [1]int32{*dst}
n, err := CopyInt32StringsInVec(ctx, uio, ars, dsts[:], opts)
*dst = dsts[0]
@@ -413,7 +415,7 @@ func CopyInt32StringInVec(ctx context.Context, uio IO, ars AddrRangeSeq, dst *in
// IOSequence holds arguments to IO methods.
type IOSequence struct {
IO IO
- Addrs AddrRangeSeq
+ Addrs hostarch.AddrRangeSeq
Opts IOOpts
}
@@ -444,28 +446,28 @@ func (s IOSequence) NumBytes() int64 {
// DropFirst returns a copy of s with s.Addrs.DropFirst(n).
//
-// Preconditions: Same as AddrRangeSeq.DropFirst.
+// Preconditions: Same as hostarch.AddrRangeSeq.DropFirst.
func (s IOSequence) DropFirst(n int) IOSequence {
return IOSequence{s.IO, s.Addrs.DropFirst(n), s.Opts}
}
// DropFirst64 returns a copy of s with s.Addrs.DropFirst64(n).
//
-// Preconditions: Same as AddrRangeSeq.DropFirst64.
+// Preconditions: Same as hostarch.AddrRangeSeq.DropFirst64.
func (s IOSequence) DropFirst64(n int64) IOSequence {
return IOSequence{s.IO, s.Addrs.DropFirst64(n), s.Opts}
}
// TakeFirst returns a copy of s with s.Addrs.TakeFirst(n).
//
-// Preconditions: Same as AddrRangeSeq.TakeFirst.
+// Preconditions: Same as hostarch.AddrRangeSeq.TakeFirst.
func (s IOSequence) TakeFirst(n int) IOSequence {
return IOSequence{s.IO, s.Addrs.TakeFirst(n), s.Opts}
}
// TakeFirst64 returns a copy of s with s.Addrs.TakeFirst64(n).
//
-// Preconditions: Same as AddrRangeSeq.TakeFirst64.
+// Preconditions: Same as hostarch.AddrRangeSeq.TakeFirst64.
func (s IOSequence) TakeFirst64(n int64) IOSequence {
return IOSequence{s.IO, s.Addrs.TakeFirst64(n), s.Opts}
}
diff --git a/pkg/usermem/usermem_state_autogen.go b/pkg/usermem/usermem_state_autogen.go
index 48714d65c..62f8af4c9 100644
--- a/pkg/usermem/usermem_state_autogen.go
+++ b/pkg/usermem/usermem_state_autogen.go
@@ -1,80 +1,3 @@
// automatically generated by stateify.
package usermem
-
-import (
- "gvisor.dev/gvisor/pkg/state"
-)
-
-func (a *AccessType) StateTypeName() string {
- return "pkg/usermem.AccessType"
-}
-
-func (a *AccessType) StateFields() []string {
- return []string{
- "Read",
- "Write",
- "Execute",
- }
-}
-
-func (a *AccessType) beforeSave() {}
-
-// +checklocksignore
-func (a *AccessType) StateSave(stateSinkObject state.Sink) {
- a.beforeSave()
- stateSinkObject.Save(0, &a.Read)
- stateSinkObject.Save(1, &a.Write)
- stateSinkObject.Save(2, &a.Execute)
-}
-
-func (a *AccessType) afterLoad() {}
-
-// +checklocksignore
-func (a *AccessType) StateLoad(stateSourceObject state.Source) {
- stateSourceObject.Load(0, &a.Read)
- stateSourceObject.Load(1, &a.Write)
- stateSourceObject.Load(2, &a.Execute)
-}
-
-func (v *Addr) StateTypeName() string {
- return "pkg/usermem.Addr"
-}
-
-func (v *Addr) StateFields() []string {
- return nil
-}
-
-func (r *AddrRange) StateTypeName() string {
- return "pkg/usermem.AddrRange"
-}
-
-func (r *AddrRange) StateFields() []string {
- return []string{
- "Start",
- "End",
- }
-}
-
-func (r *AddrRange) beforeSave() {}
-
-// +checklocksignore
-func (r *AddrRange) StateSave(stateSinkObject state.Sink) {
- r.beforeSave()
- stateSinkObject.Save(0, &r.Start)
- stateSinkObject.Save(1, &r.End)
-}
-
-func (r *AddrRange) afterLoad() {}
-
-// +checklocksignore
-func (r *AddrRange) StateLoad(stateSourceObject state.Source) {
- stateSourceObject.Load(0, &r.Start)
- stateSourceObject.Load(1, &r.End)
-}
-
-func init() {
- state.Register((*AccessType)(nil))
- state.Register((*Addr)(nil))
- state.Register((*AddrRange)(nil))
-}