diff options
Diffstat (limited to 'pkg/sentry/syscalls/linux')
14 files changed, 1332 insertions, 187 deletions
diff --git a/pkg/sentry/syscalls/linux/BUILD b/pkg/sentry/syscalls/linux/BUILD deleted file mode 100644 index 408a6c422..000000000 --- a/pkg/sentry/syscalls/linux/BUILD +++ /dev/null @@ -1,108 +0,0 @@ -load("//tools:defs.bzl", "go_library") - -package(licenses = ["notice"]) - -go_library( - name = "linux", - srcs = [ - "error.go", - "flags.go", - "linux64.go", - "sigset.go", - "sys_aio.go", - "sys_capability.go", - "sys_clone_amd64.go", - "sys_clone_arm64.go", - "sys_epoll.go", - "sys_eventfd.go", - "sys_file.go", - "sys_futex.go", - "sys_getdents.go", - "sys_identity.go", - "sys_inotify.go", - "sys_lseek.go", - "sys_membarrier.go", - "sys_mempolicy.go", - "sys_mmap.go", - "sys_mount.go", - "sys_pipe.go", - "sys_poll.go", - "sys_prctl.go", - "sys_random.go", - "sys_read.go", - "sys_rlimit.go", - "sys_rseq.go", - "sys_rusage.go", - "sys_sched.go", - "sys_seccomp.go", - "sys_sem.go", - "sys_shm.go", - "sys_signal.go", - "sys_socket.go", - "sys_splice.go", - "sys_stat.go", - "sys_stat_amd64.go", - "sys_stat_arm64.go", - "sys_sync.go", - "sys_sysinfo.go", - "sys_syslog.go", - "sys_thread.go", - "sys_time.go", - "sys_timer.go", - "sys_timerfd.go", - "sys_tls_amd64.go", - "sys_tls_arm64.go", - "sys_utsname.go", - "sys_write.go", - "sys_xattr.go", - "timespec.go", - ], - marshal = True, - visibility = ["//:sandbox"], - deps = [ - "//pkg/abi", - "//pkg/abi/linux", - "//pkg/bpf", - "//pkg/context", - "//pkg/hostarch", - "//pkg/log", - "//pkg/marshal", - "//pkg/marshal/primitive", - "//pkg/metric", - "//pkg/rand", - "//pkg/safemem", - "//pkg/sentry/arch", - "//pkg/sentry/fs", - "//pkg/sentry/fs/anon", - "//pkg/sentry/fs/lock", - "//pkg/sentry/fs/timerfd", - "//pkg/sentry/fs/tmpfs", - "//pkg/sentry/fsbridge", - "//pkg/sentry/kernel", - "//pkg/sentry/kernel/auth", - "//pkg/sentry/kernel/epoll", - "//pkg/sentry/kernel/eventfd", - "//pkg/sentry/kernel/fasync", - "//pkg/sentry/kernel/pipe", - "//pkg/sentry/kernel/sched", - "//pkg/sentry/kernel/shm", - "//pkg/sentry/kernel/signalfd", - "//pkg/sentry/kernel/time", - "//pkg/sentry/limits", - "//pkg/sentry/loader", - "//pkg/sentry/memmap", - "//pkg/sentry/mm", - "//pkg/sentry/socket", - "//pkg/sentry/socket/control", - "//pkg/sentry/socket/unix/transport", - "//pkg/sentry/syscalls", - "//pkg/sentry/usage", - "//pkg/sentry/vfs", - "//pkg/sync", - "//pkg/syserr", - "//pkg/syserror", - "//pkg/usermem", - "//pkg/waiter", - "@org_golang_x_sys//unix:go_default_library", - ], -) diff --git a/pkg/sentry/syscalls/linux/linux_abi_autogen_unsafe.go b/pkg/sentry/syscalls/linux/linux_abi_autogen_unsafe.go new file mode 100644 index 000000000..8539f13c3 --- /dev/null +++ b/pkg/sentry/syscalls/linux/linux_abi_autogen_unsafe.go @@ -0,0 +1,718 @@ +// Automatically generated marshal implementation. See tools/go_marshal. + +// If there are issues with build tag aggregation, see +// tools/go_marshal/gomarshal/generator.go:writeHeader(). The build tags here +// come from the input set of files used to generate this file. This input set +// is filtered based on pre-defined file suffixes related to build tags, see +// tools/defs.bzl:calculate_sets(). + +package linux + +import ( + "gvisor.dev/gvisor/pkg/gohacks" + "gvisor.dev/gvisor/pkg/hostarch" + "gvisor.dev/gvisor/pkg/marshal" + "io" + "reflect" + "runtime" + "unsafe" +) + +// Marshallable types used by this file. +var _ marshal.Marshallable = (*MessageHeader64)(nil) +var _ marshal.Marshallable = (*SchedParam)(nil) +var _ marshal.Marshallable = (*direntHdr)(nil) +var _ marshal.Marshallable = (*multipleMessageHeader64)(nil) +var _ marshal.Marshallable = (*oldDirentHdr)(nil) +var _ marshal.Marshallable = (*rlimit64)(nil) +var _ marshal.Marshallable = (*userSockFprog)(nil) + +// SizeBytes implements marshal.Marshallable.SizeBytes. +func (d *direntHdr) SizeBytes() int { + return 1 + + (*oldDirentHdr)(nil).SizeBytes() +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (d *direntHdr) MarshalBytes(dst []byte) { + d.OldHdr.MarshalBytes(dst[:d.OldHdr.SizeBytes()]) + dst = dst[d.OldHdr.SizeBytes():] + dst[0] = byte(d.Typ) + dst = dst[1:] +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (d *direntHdr) UnmarshalBytes(src []byte) { + d.OldHdr.UnmarshalBytes(src[:d.OldHdr.SizeBytes()]) + src = src[d.OldHdr.SizeBytes():] + d.Typ = uint8(src[0]) + src = src[1:] +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (d *direntHdr) Packed() bool { + return false +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (d *direntHdr) MarshalUnsafe(dst []byte) { + // Type direntHdr doesn't have a packed layout in memory, fallback to MarshalBytes. + d.MarshalBytes(dst) +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (d *direntHdr) UnmarshalUnsafe(src []byte) { + // Type direntHdr doesn't have a packed layout in memory, fallback to UnmarshalBytes. + d.UnmarshalBytes(src) +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (d *direntHdr) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) { + // Type direntHdr doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := cc.CopyScratchBuffer(d.SizeBytes()) // escapes: okay. + d.MarshalBytes(buf) // escapes: fallback. + return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (d *direntHdr) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + return d.CopyOutN(cc, addr, d.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (d *direntHdr) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + // Type direntHdr doesn't have a packed layout in memory, fall back to UnmarshalBytes. + buf := cc.CopyScratchBuffer(d.SizeBytes()) // escapes: okay. + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Unmarshal unconditionally. If we had a short copy-in, this results in a + // partially unmarshalled struct. + d.UnmarshalBytes(buf) // escapes: fallback. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (d *direntHdr) WriteTo(writer io.Writer) (int64, error) { + // Type direntHdr doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := make([]byte, d.SizeBytes()) + d.MarshalBytes(buf) + length, err := writer.Write(buf) + return int64(length), err +} + +// SizeBytes implements marshal.Marshallable.SizeBytes. +func (o *oldDirentHdr) SizeBytes() int { + return 18 +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (o *oldDirentHdr) MarshalBytes(dst []byte) { + hostarch.ByteOrder.PutUint64(dst[:8], uint64(o.Ino)) + dst = dst[8:] + hostarch.ByteOrder.PutUint64(dst[:8], uint64(o.Off)) + dst = dst[8:] + hostarch.ByteOrder.PutUint16(dst[:2], uint16(o.Reclen)) + dst = dst[2:] +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (o *oldDirentHdr) UnmarshalBytes(src []byte) { + o.Ino = uint64(hostarch.ByteOrder.Uint64(src[:8])) + src = src[8:] + o.Off = uint64(hostarch.ByteOrder.Uint64(src[:8])) + src = src[8:] + o.Reclen = uint16(hostarch.ByteOrder.Uint16(src[:2])) + src = src[2:] +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (o *oldDirentHdr) Packed() bool { + return false +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (o *oldDirentHdr) MarshalUnsafe(dst []byte) { + // Type oldDirentHdr doesn't have a packed layout in memory, fallback to MarshalBytes. + o.MarshalBytes(dst) +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (o *oldDirentHdr) UnmarshalUnsafe(src []byte) { + // Type oldDirentHdr doesn't have a packed layout in memory, fallback to UnmarshalBytes. + o.UnmarshalBytes(src) +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (o *oldDirentHdr) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) { + // Type oldDirentHdr doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := cc.CopyScratchBuffer(o.SizeBytes()) // escapes: okay. + o.MarshalBytes(buf) // escapes: fallback. + return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (o *oldDirentHdr) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + return o.CopyOutN(cc, addr, o.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (o *oldDirentHdr) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + // Type oldDirentHdr doesn't have a packed layout in memory, fall back to UnmarshalBytes. + buf := cc.CopyScratchBuffer(o.SizeBytes()) // escapes: okay. + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Unmarshal unconditionally. If we had a short copy-in, this results in a + // partially unmarshalled struct. + o.UnmarshalBytes(buf) // escapes: fallback. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (o *oldDirentHdr) WriteTo(writer io.Writer) (int64, error) { + // Type oldDirentHdr doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := make([]byte, o.SizeBytes()) + o.MarshalBytes(buf) + length, err := writer.Write(buf) + return int64(length), err +} + +// SizeBytes implements marshal.Marshallable.SizeBytes. +func (r *rlimit64) SizeBytes() int { + return 16 +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (r *rlimit64) MarshalBytes(dst []byte) { + hostarch.ByteOrder.PutUint64(dst[:8], uint64(r.Cur)) + dst = dst[8:] + hostarch.ByteOrder.PutUint64(dst[:8], uint64(r.Max)) + dst = dst[8:] +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (r *rlimit64) UnmarshalBytes(src []byte) { + r.Cur = uint64(hostarch.ByteOrder.Uint64(src[:8])) + src = src[8:] + r.Max = uint64(hostarch.ByteOrder.Uint64(src[:8])) + src = src[8:] +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (r *rlimit64) Packed() bool { + return true +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (r *rlimit64) MarshalUnsafe(dst []byte) { + gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(r), uintptr(r.SizeBytes())) +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (r *rlimit64) UnmarshalUnsafe(src []byte) { + gohacks.Memmove(unsafe.Pointer(r), unsafe.Pointer(&src[0]), uintptr(r.SizeBytes())) +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (r *rlimit64) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(r))) + hdr.Len = r.SizeBytes() + hdr.Cap = r.SizeBytes() + + length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that r + // must live until the use above. + runtime.KeepAlive(r) // escapes: replaced by intrinsic. + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (r *rlimit64) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + return r.CopyOutN(cc, addr, r.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (r *rlimit64) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(r))) + hdr.Len = r.SizeBytes() + hdr.Cap = r.SizeBytes() + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that r + // must live until the use above. + runtime.KeepAlive(r) // escapes: replaced by intrinsic. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (r *rlimit64) WriteTo(writer io.Writer) (int64, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(r))) + hdr.Len = r.SizeBytes() + hdr.Cap = r.SizeBytes() + + length, err := writer.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that r + // must live until the use above. + runtime.KeepAlive(r) // escapes: replaced by intrinsic. + return int64(length), err +} + +// SizeBytes implements marshal.Marshallable.SizeBytes. +func (s *SchedParam) SizeBytes() int { + return 4 +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (s *SchedParam) MarshalBytes(dst []byte) { + hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.schedPriority)) + dst = dst[4:] +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (s *SchedParam) UnmarshalBytes(src []byte) { + s.schedPriority = int32(hostarch.ByteOrder.Uint32(src[:4])) + src = src[4:] +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (s *SchedParam) Packed() bool { + return true +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (s *SchedParam) MarshalUnsafe(dst []byte) { + gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(s), uintptr(s.SizeBytes())) +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (s *SchedParam) UnmarshalUnsafe(src []byte) { + gohacks.Memmove(unsafe.Pointer(s), unsafe.Pointer(&src[0]), uintptr(s.SizeBytes())) +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (s *SchedParam) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) + hdr.Len = s.SizeBytes() + hdr.Cap = s.SizeBytes() + + length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that s + // must live until the use above. + runtime.KeepAlive(s) // escapes: replaced by intrinsic. + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (s *SchedParam) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + return s.CopyOutN(cc, addr, s.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (s *SchedParam) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) + hdr.Len = s.SizeBytes() + hdr.Cap = s.SizeBytes() + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that s + // must live until the use above. + runtime.KeepAlive(s) // escapes: replaced by intrinsic. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (s *SchedParam) WriteTo(writer io.Writer) (int64, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) + hdr.Len = s.SizeBytes() + hdr.Cap = s.SizeBytes() + + length, err := writer.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that s + // must live until the use above. + runtime.KeepAlive(s) // escapes: replaced by intrinsic. + return int64(length), err +} + +// SizeBytes implements marshal.Marshallable.SizeBytes. +func (u *userSockFprog) SizeBytes() int { + return 10 + + 1*6 +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (u *userSockFprog) MarshalBytes(dst []byte) { + hostarch.ByteOrder.PutUint16(dst[:2], uint16(u.Len)) + dst = dst[2:] + // Padding: dst[:sizeof(byte)*6] ~= [6]byte{0} + dst = dst[1*(6):] + hostarch.ByteOrder.PutUint64(dst[:8], uint64(u.Filter)) + dst = dst[8:] +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (u *userSockFprog) UnmarshalBytes(src []byte) { + u.Len = uint16(hostarch.ByteOrder.Uint16(src[:2])) + src = src[2:] + // Padding: ~ copy([6]byte(u._), src[:sizeof(byte)*6]) + src = src[1*(6):] + u.Filter = uint64(hostarch.ByteOrder.Uint64(src[:8])) + src = src[8:] +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (u *userSockFprog) Packed() bool { + return true +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (u *userSockFprog) MarshalUnsafe(dst []byte) { + gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(u), uintptr(u.SizeBytes())) +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (u *userSockFprog) UnmarshalUnsafe(src []byte) { + gohacks.Memmove(unsafe.Pointer(u), unsafe.Pointer(&src[0]), uintptr(u.SizeBytes())) +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (u *userSockFprog) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u))) + hdr.Len = u.SizeBytes() + hdr.Cap = u.SizeBytes() + + length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that u + // must live until the use above. + runtime.KeepAlive(u) // escapes: replaced by intrinsic. + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (u *userSockFprog) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + return u.CopyOutN(cc, addr, u.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (u *userSockFprog) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u))) + hdr.Len = u.SizeBytes() + hdr.Cap = u.SizeBytes() + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that u + // must live until the use above. + runtime.KeepAlive(u) // escapes: replaced by intrinsic. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (u *userSockFprog) WriteTo(writer io.Writer) (int64, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u))) + hdr.Len = u.SizeBytes() + hdr.Cap = u.SizeBytes() + + length, err := writer.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that u + // must live until the use above. + runtime.KeepAlive(u) // escapes: replaced by intrinsic. + return int64(length), err +} + +// SizeBytes implements marshal.Marshallable.SizeBytes. +func (m *MessageHeader64) SizeBytes() int { + return 56 +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (m *MessageHeader64) MarshalBytes(dst []byte) { + hostarch.ByteOrder.PutUint64(dst[:8], uint64(m.Name)) + dst = dst[8:] + hostarch.ByteOrder.PutUint32(dst[:4], uint32(m.NameLen)) + dst = dst[4:] + // Padding: dst[:sizeof(uint32)] ~= uint32(0) + dst = dst[4:] + hostarch.ByteOrder.PutUint64(dst[:8], uint64(m.Iov)) + dst = dst[8:] + hostarch.ByteOrder.PutUint64(dst[:8], uint64(m.IovLen)) + dst = dst[8:] + hostarch.ByteOrder.PutUint64(dst[:8], uint64(m.Control)) + dst = dst[8:] + hostarch.ByteOrder.PutUint64(dst[:8], uint64(m.ControlLen)) + dst = dst[8:] + hostarch.ByteOrder.PutUint32(dst[:4], uint32(m.Flags)) + dst = dst[4:] + // Padding: dst[:sizeof(int32)] ~= int32(0) + dst = dst[4:] +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (m *MessageHeader64) UnmarshalBytes(src []byte) { + m.Name = uint64(hostarch.ByteOrder.Uint64(src[:8])) + src = src[8:] + m.NameLen = uint32(hostarch.ByteOrder.Uint32(src[:4])) + src = src[4:] + // Padding: var _ uint32 ~= src[:sizeof(uint32)] + src = src[4:] + m.Iov = uint64(hostarch.ByteOrder.Uint64(src[:8])) + src = src[8:] + m.IovLen = uint64(hostarch.ByteOrder.Uint64(src[:8])) + src = src[8:] + m.Control = uint64(hostarch.ByteOrder.Uint64(src[:8])) + src = src[8:] + m.ControlLen = uint64(hostarch.ByteOrder.Uint64(src[:8])) + src = src[8:] + m.Flags = int32(hostarch.ByteOrder.Uint32(src[:4])) + src = src[4:] + // Padding: var _ int32 ~= src[:sizeof(int32)] + src = src[4:] +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (m *MessageHeader64) Packed() bool { + return true +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (m *MessageHeader64) MarshalUnsafe(dst []byte) { + gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(m), uintptr(m.SizeBytes())) +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (m *MessageHeader64) UnmarshalUnsafe(src []byte) { + gohacks.Memmove(unsafe.Pointer(m), unsafe.Pointer(&src[0]), uintptr(m.SizeBytes())) +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (m *MessageHeader64) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(m))) + hdr.Len = m.SizeBytes() + hdr.Cap = m.SizeBytes() + + length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that m + // must live until the use above. + runtime.KeepAlive(m) // escapes: replaced by intrinsic. + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (m *MessageHeader64) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + return m.CopyOutN(cc, addr, m.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (m *MessageHeader64) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(m))) + hdr.Len = m.SizeBytes() + hdr.Cap = m.SizeBytes() + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that m + // must live until the use above. + runtime.KeepAlive(m) // escapes: replaced by intrinsic. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (m *MessageHeader64) WriteTo(writer io.Writer) (int64, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(m))) + hdr.Len = m.SizeBytes() + hdr.Cap = m.SizeBytes() + + length, err := writer.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that m + // must live until the use above. + runtime.KeepAlive(m) // escapes: replaced by intrinsic. + return int64(length), err +} + +// SizeBytes implements marshal.Marshallable.SizeBytes. +func (m *multipleMessageHeader64) SizeBytes() int { + return 8 + + (*MessageHeader64)(nil).SizeBytes() +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (m *multipleMessageHeader64) MarshalBytes(dst []byte) { + m.msgHdr.MarshalBytes(dst[:m.msgHdr.SizeBytes()]) + dst = dst[m.msgHdr.SizeBytes():] + hostarch.ByteOrder.PutUint32(dst[:4], uint32(m.msgLen)) + dst = dst[4:] + // Padding: dst[:sizeof(int32)] ~= int32(0) + dst = dst[4:] +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (m *multipleMessageHeader64) UnmarshalBytes(src []byte) { + m.msgHdr.UnmarshalBytes(src[:m.msgHdr.SizeBytes()]) + src = src[m.msgHdr.SizeBytes():] + m.msgLen = uint32(hostarch.ByteOrder.Uint32(src[:4])) + src = src[4:] + // Padding: var _ int32 ~= src[:sizeof(int32)] + src = src[4:] +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (m *multipleMessageHeader64) Packed() bool { + return m.msgHdr.Packed() +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (m *multipleMessageHeader64) MarshalUnsafe(dst []byte) { + if m.msgHdr.Packed() { + gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(m), uintptr(m.SizeBytes())) + } else { + // Type multipleMessageHeader64 doesn't have a packed layout in memory, fallback to MarshalBytes. + m.MarshalBytes(dst) + } +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (m *multipleMessageHeader64) UnmarshalUnsafe(src []byte) { + if m.msgHdr.Packed() { + gohacks.Memmove(unsafe.Pointer(m), unsafe.Pointer(&src[0]), uintptr(m.SizeBytes())) + } else { + // Type multipleMessageHeader64 doesn't have a packed layout in memory, fallback to UnmarshalBytes. + m.UnmarshalBytes(src) + } +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (m *multipleMessageHeader64) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) { + if !m.msgHdr.Packed() { + // Type multipleMessageHeader64 doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := cc.CopyScratchBuffer(m.SizeBytes()) // escapes: okay. + m.MarshalBytes(buf) // escapes: fallback. + return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(m))) + hdr.Len = m.SizeBytes() + hdr.Cap = m.SizeBytes() + + length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that m + // must live until the use above. + runtime.KeepAlive(m) // escapes: replaced by intrinsic. + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (m *multipleMessageHeader64) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + return m.CopyOutN(cc, addr, m.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (m *multipleMessageHeader64) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + if !m.msgHdr.Packed() { + // Type multipleMessageHeader64 doesn't have a packed layout in memory, fall back to UnmarshalBytes. + buf := cc.CopyScratchBuffer(m.SizeBytes()) // escapes: okay. + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Unmarshal unconditionally. If we had a short copy-in, this results in a + // partially unmarshalled struct. + m.UnmarshalBytes(buf) // escapes: fallback. + return length, err + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(m))) + hdr.Len = m.SizeBytes() + hdr.Cap = m.SizeBytes() + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that m + // must live until the use above. + runtime.KeepAlive(m) // escapes: replaced by intrinsic. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (m *multipleMessageHeader64) WriteTo(writer io.Writer) (int64, error) { + if !m.msgHdr.Packed() { + // Type multipleMessageHeader64 doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := make([]byte, m.SizeBytes()) + m.MarshalBytes(buf) + length, err := writer.Write(buf) + return int64(length), err + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(m))) + hdr.Len = m.SizeBytes() + hdr.Cap = m.SizeBytes() + + length, err := writer.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that m + // must live until the use above. + runtime.KeepAlive(m) // escapes: replaced by intrinsic. + return int64(length), err +} + diff --git a/pkg/sentry/syscalls/linux/linux_amd64_abi_autogen_unsafe.go b/pkg/sentry/syscalls/linux/linux_amd64_abi_autogen_unsafe.go new file mode 100644 index 000000000..8238cf9c8 --- /dev/null +++ b/pkg/sentry/syscalls/linux/linux_amd64_abi_autogen_unsafe.go @@ -0,0 +1,17 @@ +// Automatically generated marshal implementation. See tools/go_marshal. + +// If there are issues with build tag aggregation, see +// tools/go_marshal/gomarshal/generator.go:writeHeader(). The build tags here +// come from the input set of files used to generate this file. This input set +// is filtered based on pre-defined file suffixes related to build tags, see +// tools/defs.bzl:calculate_sets(). + +// +build amd64 +// +build amd64 +// +build amd64 + +package linux + +import ( +) + diff --git a/pkg/sentry/syscalls/linux/linux_amd64_state_autogen.go b/pkg/sentry/syscalls/linux/linux_amd64_state_autogen.go new file mode 100644 index 000000000..b3e3be0b6 --- /dev/null +++ b/pkg/sentry/syscalls/linux/linux_amd64_state_autogen.go @@ -0,0 +1,7 @@ +// automatically generated by stateify. + +// +build amd64 +// +build amd64 +// +build amd64 + +package linux diff --git a/pkg/sentry/syscalls/linux/linux_arm64_abi_autogen_unsafe.go b/pkg/sentry/syscalls/linux/linux_arm64_abi_autogen_unsafe.go new file mode 100644 index 000000000..6f0f546ea --- /dev/null +++ b/pkg/sentry/syscalls/linux/linux_arm64_abi_autogen_unsafe.go @@ -0,0 +1,17 @@ +// Automatically generated marshal implementation. See tools/go_marshal. + +// If there are issues with build tag aggregation, see +// tools/go_marshal/gomarshal/generator.go:writeHeader(). The build tags here +// come from the input set of files used to generate this file. This input set +// is filtered based on pre-defined file suffixes related to build tags, see +// tools/defs.bzl:calculate_sets(). + +// +build arm64 +// +build arm64 +// +build arm64 + +package linux + +import ( +) + diff --git a/pkg/sentry/syscalls/linux/linux_arm64_state_autogen.go b/pkg/sentry/syscalls/linux/linux_arm64_state_autogen.go new file mode 100644 index 000000000..f03e36ddc --- /dev/null +++ b/pkg/sentry/syscalls/linux/linux_arm64_state_autogen.go @@ -0,0 +1,7 @@ +// automatically generated by stateify. + +// +build arm64 +// +build arm64 +// +build arm64 + +package linux diff --git a/pkg/sentry/syscalls/linux/linux_state_autogen.go b/pkg/sentry/syscalls/linux/linux_state_autogen.go new file mode 100644 index 000000000..1f55db893 --- /dev/null +++ b/pkg/sentry/syscalls/linux/linux_state_autogen.go @@ -0,0 +1,112 @@ +// automatically generated by stateify. + +package linux + +import ( + "gvisor.dev/gvisor/pkg/state" +) + +func (f *futexWaitRestartBlock) StateTypeName() string { + return "pkg/sentry/syscalls/linux.futexWaitRestartBlock" +} + +func (f *futexWaitRestartBlock) StateFields() []string { + return []string{ + "duration", + "addr", + "private", + "val", + "mask", + } +} + +func (f *futexWaitRestartBlock) beforeSave() {} + +// +checklocksignore +func (f *futexWaitRestartBlock) StateSave(stateSinkObject state.Sink) { + f.beforeSave() + stateSinkObject.Save(0, &f.duration) + stateSinkObject.Save(1, &f.addr) + stateSinkObject.Save(2, &f.private) + stateSinkObject.Save(3, &f.val) + stateSinkObject.Save(4, &f.mask) +} + +func (f *futexWaitRestartBlock) afterLoad() {} + +// +checklocksignore +func (f *futexWaitRestartBlock) StateLoad(stateSourceObject state.Source) { + stateSourceObject.Load(0, &f.duration) + stateSourceObject.Load(1, &f.addr) + stateSourceObject.Load(2, &f.private) + stateSourceObject.Load(3, &f.val) + stateSourceObject.Load(4, &f.mask) +} + +func (p *pollRestartBlock) StateTypeName() string { + return "pkg/sentry/syscalls/linux.pollRestartBlock" +} + +func (p *pollRestartBlock) StateFields() []string { + return []string{ + "pfdAddr", + "nfds", + "timeout", + } +} + +func (p *pollRestartBlock) beforeSave() {} + +// +checklocksignore +func (p *pollRestartBlock) StateSave(stateSinkObject state.Sink) { + p.beforeSave() + stateSinkObject.Save(0, &p.pfdAddr) + stateSinkObject.Save(1, &p.nfds) + stateSinkObject.Save(2, &p.timeout) +} + +func (p *pollRestartBlock) afterLoad() {} + +// +checklocksignore +func (p *pollRestartBlock) StateLoad(stateSourceObject state.Source) { + stateSourceObject.Load(0, &p.pfdAddr) + stateSourceObject.Load(1, &p.nfds) + stateSourceObject.Load(2, &p.timeout) +} + +func (n *clockNanosleepRestartBlock) StateTypeName() string { + return "pkg/sentry/syscalls/linux.clockNanosleepRestartBlock" +} + +func (n *clockNanosleepRestartBlock) StateFields() []string { + return []string{ + "c", + "duration", + "rem", + } +} + +func (n *clockNanosleepRestartBlock) beforeSave() {} + +// +checklocksignore +func (n *clockNanosleepRestartBlock) StateSave(stateSinkObject state.Sink) { + n.beforeSave() + stateSinkObject.Save(0, &n.c) + stateSinkObject.Save(1, &n.duration) + stateSinkObject.Save(2, &n.rem) +} + +func (n *clockNanosleepRestartBlock) afterLoad() {} + +// +checklocksignore +func (n *clockNanosleepRestartBlock) StateLoad(stateSourceObject state.Source) { + stateSourceObject.Load(0, &n.c) + stateSourceObject.Load(1, &n.duration) + stateSourceObject.Load(2, &n.rem) +} + +func init() { + state.Register((*futexWaitRestartBlock)(nil)) + state.Register((*pollRestartBlock)(nil)) + state.Register((*clockNanosleepRestartBlock)(nil)) +} diff --git a/pkg/sentry/syscalls/linux/vfs2/BUILD b/pkg/sentry/syscalls/linux/vfs2/BUILD deleted file mode 100644 index 5ce0bc714..000000000 --- a/pkg/sentry/syscalls/linux/vfs2/BUILD +++ /dev/null @@ -1,79 +0,0 @@ -load("//tools:defs.bzl", "go_library") - -package(licenses = ["notice"]) - -go_library( - name = "vfs2", - srcs = [ - "aio.go", - "epoll.go", - "eventfd.go", - "execve.go", - "fd.go", - "filesystem.go", - "fscontext.go", - "getdents.go", - "inotify.go", - "ioctl.go", - "lock.go", - "memfd.go", - "mmap.go", - "mount.go", - "path.go", - "pipe.go", - "poll.go", - "read_write.go", - "setstat.go", - "signal.go", - "socket.go", - "splice.go", - "stat.go", - "stat_amd64.go", - "stat_arm64.go", - "sync.go", - "timerfd.go", - "vfs2.go", - "xattr.go", - ], - marshal = True, - visibility = ["//:sandbox"], - deps = [ - "//pkg/abi/linux", - "//pkg/bits", - "//pkg/context", - "//pkg/fspath", - "//pkg/gohacks", - "//pkg/hostarch", - "//pkg/log", - "//pkg/marshal", - "//pkg/marshal/primitive", - "//pkg/sentry/arch", - "//pkg/sentry/fs/lock", - "//pkg/sentry/fsbridge", - "//pkg/sentry/fsimpl/eventfd", - "//pkg/sentry/fsimpl/pipefs", - "//pkg/sentry/fsimpl/signalfd", - "//pkg/sentry/fsimpl/timerfd", - "//pkg/sentry/fsimpl/tmpfs", - "//pkg/sentry/kernel", - "//pkg/sentry/kernel/auth", - "//pkg/sentry/kernel/fasync", - "//pkg/sentry/kernel/pipe", - "//pkg/sentry/kernel/time", - "//pkg/sentry/limits", - "//pkg/sentry/loader", - "//pkg/sentry/memmap", - "//pkg/sentry/mm", - "//pkg/sentry/socket", - "//pkg/sentry/socket/control", - "//pkg/sentry/socket/unix/transport", - "//pkg/sentry/syscalls", - "//pkg/sentry/syscalls/linux", - "//pkg/sentry/vfs", - "//pkg/sync", - "//pkg/syserr", - "//pkg/syserror", - "//pkg/usermem", - "//pkg/waiter", - ], -) diff --git a/pkg/sentry/syscalls/linux/vfs2/vfs2_abi_autogen_unsafe.go b/pkg/sentry/syscalls/linux/vfs2/vfs2_abi_autogen_unsafe.go new file mode 100644 index 000000000..0525fce4c --- /dev/null +++ b/pkg/sentry/syscalls/linux/vfs2/vfs2_abi_autogen_unsafe.go @@ -0,0 +1,372 @@ +// Automatically generated marshal implementation. See tools/go_marshal. + +// If there are issues with build tag aggregation, see +// tools/go_marshal/gomarshal/generator.go:writeHeader(). The build tags here +// come from the input set of files used to generate this file. This input set +// is filtered based on pre-defined file suffixes related to build tags, see +// tools/defs.bzl:calculate_sets(). + +package vfs2 + +import ( + "gvisor.dev/gvisor/pkg/gohacks" + "gvisor.dev/gvisor/pkg/hostarch" + "gvisor.dev/gvisor/pkg/marshal" + "io" + "reflect" + "runtime" + "unsafe" +) + +// Marshallable types used by this file. +var _ marshal.Marshallable = (*MessageHeader64)(nil) +var _ marshal.Marshallable = (*multipleMessageHeader64)(nil) +var _ marshal.Marshallable = (*sigSetWithSize)(nil) + +// SizeBytes implements marshal.Marshallable.SizeBytes. +func (s *sigSetWithSize) SizeBytes() int { + return 16 +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (s *sigSetWithSize) MarshalBytes(dst []byte) { + hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.sigsetAddr)) + dst = dst[8:] + hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.sizeofSigset)) + dst = dst[8:] +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (s *sigSetWithSize) UnmarshalBytes(src []byte) { + s.sigsetAddr = uint64(hostarch.ByteOrder.Uint64(src[:8])) + src = src[8:] + s.sizeofSigset = uint64(hostarch.ByteOrder.Uint64(src[:8])) + src = src[8:] +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (s *sigSetWithSize) Packed() bool { + return true +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (s *sigSetWithSize) MarshalUnsafe(dst []byte) { + gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(s), uintptr(s.SizeBytes())) +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (s *sigSetWithSize) UnmarshalUnsafe(src []byte) { + gohacks.Memmove(unsafe.Pointer(s), unsafe.Pointer(&src[0]), uintptr(s.SizeBytes())) +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (s *sigSetWithSize) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) + hdr.Len = s.SizeBytes() + hdr.Cap = s.SizeBytes() + + length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that s + // must live until the use above. + runtime.KeepAlive(s) // escapes: replaced by intrinsic. + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (s *sigSetWithSize) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + return s.CopyOutN(cc, addr, s.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (s *sigSetWithSize) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) + hdr.Len = s.SizeBytes() + hdr.Cap = s.SizeBytes() + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that s + // must live until the use above. + runtime.KeepAlive(s) // escapes: replaced by intrinsic. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (s *sigSetWithSize) WriteTo(writer io.Writer) (int64, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) + hdr.Len = s.SizeBytes() + hdr.Cap = s.SizeBytes() + + length, err := writer.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that s + // must live until the use above. + runtime.KeepAlive(s) // escapes: replaced by intrinsic. + return int64(length), err +} + +// SizeBytes implements marshal.Marshallable.SizeBytes. +func (m *MessageHeader64) SizeBytes() int { + return 56 +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (m *MessageHeader64) MarshalBytes(dst []byte) { + hostarch.ByteOrder.PutUint64(dst[:8], uint64(m.Name)) + dst = dst[8:] + hostarch.ByteOrder.PutUint32(dst[:4], uint32(m.NameLen)) + dst = dst[4:] + // Padding: dst[:sizeof(uint32)] ~= uint32(0) + dst = dst[4:] + hostarch.ByteOrder.PutUint64(dst[:8], uint64(m.Iov)) + dst = dst[8:] + hostarch.ByteOrder.PutUint64(dst[:8], uint64(m.IovLen)) + dst = dst[8:] + hostarch.ByteOrder.PutUint64(dst[:8], uint64(m.Control)) + dst = dst[8:] + hostarch.ByteOrder.PutUint64(dst[:8], uint64(m.ControlLen)) + dst = dst[8:] + hostarch.ByteOrder.PutUint32(dst[:4], uint32(m.Flags)) + dst = dst[4:] + // Padding: dst[:sizeof(int32)] ~= int32(0) + dst = dst[4:] +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (m *MessageHeader64) UnmarshalBytes(src []byte) { + m.Name = uint64(hostarch.ByteOrder.Uint64(src[:8])) + src = src[8:] + m.NameLen = uint32(hostarch.ByteOrder.Uint32(src[:4])) + src = src[4:] + // Padding: var _ uint32 ~= src[:sizeof(uint32)] + src = src[4:] + m.Iov = uint64(hostarch.ByteOrder.Uint64(src[:8])) + src = src[8:] + m.IovLen = uint64(hostarch.ByteOrder.Uint64(src[:8])) + src = src[8:] + m.Control = uint64(hostarch.ByteOrder.Uint64(src[:8])) + src = src[8:] + m.ControlLen = uint64(hostarch.ByteOrder.Uint64(src[:8])) + src = src[8:] + m.Flags = int32(hostarch.ByteOrder.Uint32(src[:4])) + src = src[4:] + // Padding: var _ int32 ~= src[:sizeof(int32)] + src = src[4:] +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (m *MessageHeader64) Packed() bool { + return true +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (m *MessageHeader64) MarshalUnsafe(dst []byte) { + gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(m), uintptr(m.SizeBytes())) +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (m *MessageHeader64) UnmarshalUnsafe(src []byte) { + gohacks.Memmove(unsafe.Pointer(m), unsafe.Pointer(&src[0]), uintptr(m.SizeBytes())) +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (m *MessageHeader64) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(m))) + hdr.Len = m.SizeBytes() + hdr.Cap = m.SizeBytes() + + length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that m + // must live until the use above. + runtime.KeepAlive(m) // escapes: replaced by intrinsic. + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (m *MessageHeader64) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + return m.CopyOutN(cc, addr, m.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (m *MessageHeader64) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(m))) + hdr.Len = m.SizeBytes() + hdr.Cap = m.SizeBytes() + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that m + // must live until the use above. + runtime.KeepAlive(m) // escapes: replaced by intrinsic. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (m *MessageHeader64) WriteTo(writer io.Writer) (int64, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(m))) + hdr.Len = m.SizeBytes() + hdr.Cap = m.SizeBytes() + + length, err := writer.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that m + // must live until the use above. + runtime.KeepAlive(m) // escapes: replaced by intrinsic. + return int64(length), err +} + +// SizeBytes implements marshal.Marshallable.SizeBytes. +func (m *multipleMessageHeader64) SizeBytes() int { + return 8 + + (*MessageHeader64)(nil).SizeBytes() +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (m *multipleMessageHeader64) MarshalBytes(dst []byte) { + m.msgHdr.MarshalBytes(dst[:m.msgHdr.SizeBytes()]) + dst = dst[m.msgHdr.SizeBytes():] + hostarch.ByteOrder.PutUint32(dst[:4], uint32(m.msgLen)) + dst = dst[4:] + // Padding: dst[:sizeof(int32)] ~= int32(0) + dst = dst[4:] +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (m *multipleMessageHeader64) UnmarshalBytes(src []byte) { + m.msgHdr.UnmarshalBytes(src[:m.msgHdr.SizeBytes()]) + src = src[m.msgHdr.SizeBytes():] + m.msgLen = uint32(hostarch.ByteOrder.Uint32(src[:4])) + src = src[4:] + // Padding: var _ int32 ~= src[:sizeof(int32)] + src = src[4:] +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (m *multipleMessageHeader64) Packed() bool { + return m.msgHdr.Packed() +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (m *multipleMessageHeader64) MarshalUnsafe(dst []byte) { + if m.msgHdr.Packed() { + gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(m), uintptr(m.SizeBytes())) + } else { + // Type multipleMessageHeader64 doesn't have a packed layout in memory, fallback to MarshalBytes. + m.MarshalBytes(dst) + } +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (m *multipleMessageHeader64) UnmarshalUnsafe(src []byte) { + if m.msgHdr.Packed() { + gohacks.Memmove(unsafe.Pointer(m), unsafe.Pointer(&src[0]), uintptr(m.SizeBytes())) + } else { + // Type multipleMessageHeader64 doesn't have a packed layout in memory, fallback to UnmarshalBytes. + m.UnmarshalBytes(src) + } +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (m *multipleMessageHeader64) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) { + if !m.msgHdr.Packed() { + // Type multipleMessageHeader64 doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := cc.CopyScratchBuffer(m.SizeBytes()) // escapes: okay. + m.MarshalBytes(buf) // escapes: fallback. + return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(m))) + hdr.Len = m.SizeBytes() + hdr.Cap = m.SizeBytes() + + length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that m + // must live until the use above. + runtime.KeepAlive(m) // escapes: replaced by intrinsic. + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (m *multipleMessageHeader64) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + return m.CopyOutN(cc, addr, m.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (m *multipleMessageHeader64) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + if !m.msgHdr.Packed() { + // Type multipleMessageHeader64 doesn't have a packed layout in memory, fall back to UnmarshalBytes. + buf := cc.CopyScratchBuffer(m.SizeBytes()) // escapes: okay. + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Unmarshal unconditionally. If we had a short copy-in, this results in a + // partially unmarshalled struct. + m.UnmarshalBytes(buf) // escapes: fallback. + return length, err + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(m))) + hdr.Len = m.SizeBytes() + hdr.Cap = m.SizeBytes() + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that m + // must live until the use above. + runtime.KeepAlive(m) // escapes: replaced by intrinsic. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (m *multipleMessageHeader64) WriteTo(writer io.Writer) (int64, error) { + if !m.msgHdr.Packed() { + // Type multipleMessageHeader64 doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := make([]byte, m.SizeBytes()) + m.MarshalBytes(buf) + length, err := writer.Write(buf) + return int64(length), err + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(m))) + hdr.Len = m.SizeBytes() + hdr.Cap = m.SizeBytes() + + length, err := writer.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that m + // must live until the use above. + runtime.KeepAlive(m) // escapes: replaced by intrinsic. + return int64(length), err +} + diff --git a/pkg/sentry/syscalls/linux/vfs2/vfs2_amd64_abi_autogen_unsafe.go b/pkg/sentry/syscalls/linux/vfs2/vfs2_amd64_abi_autogen_unsafe.go new file mode 100644 index 000000000..f07118503 --- /dev/null +++ b/pkg/sentry/syscalls/linux/vfs2/vfs2_amd64_abi_autogen_unsafe.go @@ -0,0 +1,15 @@ +// Automatically generated marshal implementation. See tools/go_marshal. + +// If there are issues with build tag aggregation, see +// tools/go_marshal/gomarshal/generator.go:writeHeader(). The build tags here +// come from the input set of files used to generate this file. This input set +// is filtered based on pre-defined file suffixes related to build tags, see +// tools/defs.bzl:calculate_sets(). + +// +build amd64 + +package vfs2 + +import ( +) + diff --git a/pkg/sentry/syscalls/linux/vfs2/vfs2_amd64_state_autogen.go b/pkg/sentry/syscalls/linux/vfs2/vfs2_amd64_state_autogen.go new file mode 100644 index 000000000..7f5556f9c --- /dev/null +++ b/pkg/sentry/syscalls/linux/vfs2/vfs2_amd64_state_autogen.go @@ -0,0 +1,5 @@ +// automatically generated by stateify. + +// +build amd64 + +package vfs2 diff --git a/pkg/sentry/syscalls/linux/vfs2/vfs2_arm64_abi_autogen_unsafe.go b/pkg/sentry/syscalls/linux/vfs2/vfs2_arm64_abi_autogen_unsafe.go new file mode 100644 index 000000000..ea1f77a0f --- /dev/null +++ b/pkg/sentry/syscalls/linux/vfs2/vfs2_arm64_abi_autogen_unsafe.go @@ -0,0 +1,15 @@ +// Automatically generated marshal implementation. See tools/go_marshal. + +// If there are issues with build tag aggregation, see +// tools/go_marshal/gomarshal/generator.go:writeHeader(). The build tags here +// come from the input set of files used to generate this file. This input set +// is filtered based on pre-defined file suffixes related to build tags, see +// tools/defs.bzl:calculate_sets(). + +// +build arm64 + +package vfs2 + +import ( +) + diff --git a/pkg/sentry/syscalls/linux/vfs2/vfs2_arm64_state_autogen.go b/pkg/sentry/syscalls/linux/vfs2/vfs2_arm64_state_autogen.go new file mode 100644 index 000000000..3e3862045 --- /dev/null +++ b/pkg/sentry/syscalls/linux/vfs2/vfs2_arm64_state_autogen.go @@ -0,0 +1,5 @@ +// automatically generated by stateify. + +// +build arm64 + +package vfs2 diff --git a/pkg/sentry/syscalls/linux/vfs2/vfs2_state_autogen.go b/pkg/sentry/syscalls/linux/vfs2/vfs2_state_autogen.go new file mode 100644 index 000000000..d02c8467f --- /dev/null +++ b/pkg/sentry/syscalls/linux/vfs2/vfs2_state_autogen.go @@ -0,0 +1,42 @@ +// automatically generated by stateify. + +package vfs2 + +import ( + "gvisor.dev/gvisor/pkg/state" +) + +func (p *pollRestartBlock) StateTypeName() string { + return "pkg/sentry/syscalls/linux/vfs2.pollRestartBlock" +} + +func (p *pollRestartBlock) StateFields() []string { + return []string{ + "pfdAddr", + "nfds", + "timeout", + } +} + +func (p *pollRestartBlock) beforeSave() {} + +// +checklocksignore +func (p *pollRestartBlock) StateSave(stateSinkObject state.Sink) { + p.beforeSave() + stateSinkObject.Save(0, &p.pfdAddr) + stateSinkObject.Save(1, &p.nfds) + stateSinkObject.Save(2, &p.timeout) +} + +func (p *pollRestartBlock) afterLoad() {} + +// +checklocksignore +func (p *pollRestartBlock) StateLoad(stateSourceObject state.Source) { + stateSourceObject.Load(0, &p.pfdAddr) + stateSourceObject.Load(1, &p.nfds) + stateSourceObject.Load(2, &p.timeout) +} + +func init() { + state.Register((*pollRestartBlock)(nil)) +} |