summaryrefslogtreecommitdiffhomepage
path: root/pkg/abi
diff options
context:
space:
mode:
Diffstat (limited to 'pkg/abi')
-rwxr-xr-xpkg/abi/linux/file_amd64.go2
-rwxr-xr-xpkg/abi/linux/file_arm64.go2
-rwxr-xr-xpkg/abi/linux/linux_abi_autogen_unsafe.go193
-rwxr-xr-xpkg/abi/linux/linux_amd64_abi_autogen_unsafe.go182
-rwxr-xr-xpkg/abi/linux/linux_arm64_abi_autogen_unsafe.go186
-rw-r--r--pkg/abi/linux/time.go2
6 files changed, 567 insertions, 0 deletions
diff --git a/pkg/abi/linux/file_amd64.go b/pkg/abi/linux/file_amd64.go
index 8693d49c8..6b72364ea 100755
--- a/pkg/abi/linux/file_amd64.go
+++ b/pkg/abi/linux/file_amd64.go
@@ -25,6 +25,8 @@ const (
)
// Stat represents struct stat.
+//
+// +marshal
type Stat struct {
Dev uint64
Ino uint64
diff --git a/pkg/abi/linux/file_arm64.go b/pkg/abi/linux/file_arm64.go
index ea3adc5f5..6492c9038 100755
--- a/pkg/abi/linux/file_arm64.go
+++ b/pkg/abi/linux/file_arm64.go
@@ -25,6 +25,8 @@ const (
)
// Stat represents struct stat.
+//
+// +marshal
type Stat struct {
Dev uint64
Ino uint64
diff --git a/pkg/abi/linux/linux_abi_autogen_unsafe.go b/pkg/abi/linux/linux_abi_autogen_unsafe.go
new file mode 100755
index 000000000..d8d4cb088
--- /dev/null
+++ b/pkg/abi/linux/linux_abi_autogen_unsafe.go
@@ -0,0 +1,193 @@
+// Automatically generated marshal implementation. See tools/go_marshal.
+
+package linux
+
+import (
+ "gvisor.dev/gvisor/pkg/safecopy"
+ "gvisor.dev/gvisor/pkg/usermem"
+ "gvisor.dev/gvisor/tools/go_marshal/marshal"
+ "reflect"
+ "runtime"
+ "unsafe"
+)
+
+// Marshallable types used by this file.
+var _ marshal.Marshallable = (*RSeqCriticalSection)(nil)
+var _ marshal.Marshallable = (*Timespec)(nil)
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (r *RSeqCriticalSection) SizeBytes() int {
+ return 32
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (r *RSeqCriticalSection) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(r.Version))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(r.Flags))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(r.Start))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(r.PostCommitOffset))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(r.Abort))
+ dst = dst[8:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (r *RSeqCriticalSection) UnmarshalBytes(src []byte) {
+ r.Version = usermem.ByteOrder.Uint32(src[:4])
+ src = src[4:]
+ r.Flags = usermem.ByteOrder.Uint32(src[:4])
+ src = src[4:]
+ r.Start = usermem.ByteOrder.Uint64(src[:8])
+ src = src[8:]
+ r.PostCommitOffset = usermem.ByteOrder.Uint64(src[:8])
+ src = src[8:]
+ r.Abort = usermem.ByteOrder.Uint64(src[:8])
+ src = src[8:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+func (r *RSeqCriticalSection) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (r *RSeqCriticalSection) MarshalUnsafe(dst []byte) {
+ safecopy.CopyIn(dst, unsafe.Pointer(r))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (r *RSeqCriticalSection) UnmarshalUnsafe(src []byte) {
+ safecopy.CopyOut(unsafe.Pointer(r), src)
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+func (r *RSeqCriticalSection) CopyOut(task marshal.Task, addr usermem.Addr) (int, error) {
+ // Bypass escape analysis on r. The no-op arithmetic operation on the
+ // pointer makes the compiler think val doesn't depend on r.
+ // See src/runtime/stubs.go:noescape() in the golang toolchain.
+ ptr := unsafe.Pointer(r)
+ val := uintptr(ptr)
+ val = val^0
+
+ // Construct a slice backed by r's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = val
+ hdr.Len = r.SizeBytes()
+ hdr.Cap = r.SizeBytes()
+
+ len, err := task.CopyOutBytes(addr, buf)
+ // Since we bypassed the compiler's escape analysis, indicate that r
+ // must live until after the CopyOutBytes.
+ runtime.KeepAlive(r)
+ return len, err
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+func (r *RSeqCriticalSection) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
+ // Bypass escape analysis on r. The no-op arithmetic operation on the
+ // pointer makes the compiler think val doesn't depend on r.
+ // See src/runtime/stubs.go:noescape() in the golang toolchain.
+ ptr := unsafe.Pointer(r)
+ val := uintptr(ptr)
+ val = val^0
+
+ // Construct a slice backed by r's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = val
+ hdr.Len = r.SizeBytes()
+ hdr.Cap = r.SizeBytes()
+
+ len, err := task.CopyInBytes(addr, buf)
+ // Since we bypassed the compiler's escape analysis, indicate that r
+ // must live until after the CopyInBytes.
+ runtime.KeepAlive(r)
+ return len, err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (t *Timespec) SizeBytes() int {
+ return 16
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (t *Timespec) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(t.Sec))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(t.Nsec))
+ dst = dst[8:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (t *Timespec) UnmarshalBytes(src []byte) {
+ t.Sec = int64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ t.Nsec = int64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+func (t *Timespec) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (t *Timespec) MarshalUnsafe(dst []byte) {
+ safecopy.CopyIn(dst, unsafe.Pointer(t))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (t *Timespec) UnmarshalUnsafe(src []byte) {
+ safecopy.CopyOut(unsafe.Pointer(t), src)
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+func (t *Timespec) CopyOut(task marshal.Task, addr usermem.Addr) (int, error) {
+ // Bypass escape analysis on t. The no-op arithmetic operation on the
+ // pointer makes the compiler think val doesn't depend on t.
+ // See src/runtime/stubs.go:noescape() in the golang toolchain.
+ ptr := unsafe.Pointer(t)
+ val := uintptr(ptr)
+ val = val^0
+
+ // Construct a slice backed by t's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = val
+ hdr.Len = t.SizeBytes()
+ hdr.Cap = t.SizeBytes()
+
+ len, err := task.CopyOutBytes(addr, buf)
+ // Since we bypassed the compiler's escape analysis, indicate that t
+ // must live until after the CopyOutBytes.
+ runtime.KeepAlive(t)
+ return len, err
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+func (t *Timespec) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
+ // Bypass escape analysis on t. The no-op arithmetic operation on the
+ // pointer makes the compiler think val doesn't depend on t.
+ // See src/runtime/stubs.go:noescape() in the golang toolchain.
+ ptr := unsafe.Pointer(t)
+ val := uintptr(ptr)
+ val = val^0
+
+ // Construct a slice backed by t's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = val
+ hdr.Len = t.SizeBytes()
+ hdr.Cap = t.SizeBytes()
+
+ len, err := task.CopyInBytes(addr, buf)
+ // Since we bypassed the compiler's escape analysis, indicate that t
+ // must live until after the CopyInBytes.
+ runtime.KeepAlive(t)
+ return len, err
+}
+
diff --git a/pkg/abi/linux/linux_amd64_abi_autogen_unsafe.go b/pkg/abi/linux/linux_amd64_abi_autogen_unsafe.go
new file mode 100755
index 000000000..c8162472d
--- /dev/null
+++ b/pkg/abi/linux/linux_amd64_abi_autogen_unsafe.go
@@ -0,0 +1,182 @@
+// Automatically generated marshal implementation. See tools/go_marshal.
+
+// +build amd64
+
+package linux
+
+import (
+ "gvisor.dev/gvisor/pkg/safecopy"
+ "gvisor.dev/gvisor/pkg/usermem"
+ "gvisor.dev/gvisor/tools/go_marshal/marshal"
+ "reflect"
+ "runtime"
+ "unsafe"
+)
+
+// Marshallable types used by this file.
+var _ marshal.Marshallable = (*Stat)(nil)
+var _ marshal.Marshallable = (*Timespec)(nil)
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (s *Stat) SizeBytes() int {
+ return 96 +
+ s.ATime.SizeBytes() +
+ s.MTime.SizeBytes() +
+ s.CTime.SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (s *Stat) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Dev))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Ino))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Nlink))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Mode))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(s.UID))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(s.GID))
+ dst = dst[4:]
+ // Padding: dst[:sizeof(int32)] ~= int32(0)
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Rdev))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Size))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Blksize))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Blocks))
+ dst = dst[8:]
+ s.ATime.MarshalBytes(dst[:s.ATime.SizeBytes()])
+ dst = dst[s.ATime.SizeBytes():]
+ s.MTime.MarshalBytes(dst[:s.MTime.SizeBytes()])
+ dst = dst[s.MTime.SizeBytes():]
+ s.CTime.MarshalBytes(dst[:s.CTime.SizeBytes()])
+ dst = dst[s.CTime.SizeBytes():]
+ // Padding: dst[:sizeof(int64)*3] ~= [3]int64{0}
+ dst = dst[24:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (s *Stat) UnmarshalBytes(src []byte) {
+ s.Dev = usermem.ByteOrder.Uint64(src[:8])
+ src = src[8:]
+ s.Ino = usermem.ByteOrder.Uint64(src[:8])
+ src = src[8:]
+ s.Nlink = usermem.ByteOrder.Uint64(src[:8])
+ src = src[8:]
+ s.Mode = usermem.ByteOrder.Uint32(src[:4])
+ src = src[4:]
+ s.UID = usermem.ByteOrder.Uint32(src[:4])
+ src = src[4:]
+ s.GID = usermem.ByteOrder.Uint32(src[:4])
+ src = src[4:]
+ // Padding: var _ int32 ~= src[:sizeof(int32)]
+ src = src[4:]
+ s.Rdev = usermem.ByteOrder.Uint64(src[:8])
+ src = src[8:]
+ s.Size = int64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Blksize = int64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Blocks = int64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.ATime.UnmarshalBytes(src[:s.ATime.SizeBytes()])
+ src = src[s.ATime.SizeBytes():]
+ s.MTime.UnmarshalBytes(src[:s.MTime.SizeBytes()])
+ src = src[s.MTime.SizeBytes():]
+ s.CTime.UnmarshalBytes(src[:s.CTime.SizeBytes()])
+ src = src[s.CTime.SizeBytes():]
+ // Padding: ~ copy([3]int64(s._), src[:sizeof(int64)*3])
+ src = src[24:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+func (s *Stat) Packed() bool {
+ return s.CTime.Packed() && s.ATime.Packed() && s.MTime.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (s *Stat) MarshalUnsafe(dst []byte) {
+ if s.ATime.Packed() && s.MTime.Packed() && s.CTime.Packed() {
+ safecopy.CopyIn(dst, unsafe.Pointer(s))
+ } else {
+ s.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (s *Stat) UnmarshalUnsafe(src []byte) {
+ if s.ATime.Packed() && s.MTime.Packed() && s.CTime.Packed() {
+ safecopy.CopyOut(unsafe.Pointer(s), src)
+ } else {
+ s.UnmarshalBytes(src)
+ }
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+func (s *Stat) CopyOut(task marshal.Task, addr usermem.Addr) (int, error) {
+ if !s.CTime.Packed() && s.ATime.Packed() && s.MTime.Packed() {
+ // Type Stat doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := task.CopyScratchBuffer(s.SizeBytes())
+ s.MarshalBytes(buf)
+ return task.CopyOutBytes(addr, buf)
+ }
+
+ // Bypass escape analysis on s. The no-op arithmetic operation on the
+ // pointer makes the compiler think val doesn't depend on s.
+ // See src/runtime/stubs.go:noescape() in the golang toolchain.
+ ptr := unsafe.Pointer(s)
+ val := uintptr(ptr)
+ val = val^0
+
+ // Construct a slice backed by s's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = val
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ len, err := task.CopyOutBytes(addr, buf)
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until after the CopyOutBytes.
+ runtime.KeepAlive(s)
+ return len, err
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+func (s *Stat) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
+ if !s.ATime.Packed() && s.MTime.Packed() && s.CTime.Packed() {
+ // Type Stat doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := task.CopyScratchBuffer(s.SizeBytes())
+ n, err := task.CopyInBytes(addr, buf)
+ if err != nil {
+ return n, err
+ }
+ s.UnmarshalBytes(buf)
+ return n, nil
+ }
+
+ // Bypass escape analysis on s. The no-op arithmetic operation on the
+ // pointer makes the compiler think val doesn't depend on s.
+ // See src/runtime/stubs.go:noescape() in the golang toolchain.
+ ptr := unsafe.Pointer(s)
+ val := uintptr(ptr)
+ val = val^0
+
+ // Construct a slice backed by s's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = val
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ len, err := task.CopyInBytes(addr, buf)
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until after the CopyInBytes.
+ runtime.KeepAlive(s)
+ return len, err
+}
+
diff --git a/pkg/abi/linux/linux_arm64_abi_autogen_unsafe.go b/pkg/abi/linux/linux_arm64_abi_autogen_unsafe.go
new file mode 100755
index 000000000..118b19ac0
--- /dev/null
+++ b/pkg/abi/linux/linux_arm64_abi_autogen_unsafe.go
@@ -0,0 +1,186 @@
+// Automatically generated marshal implementation. See tools/go_marshal.
+
+// +build arm64
+
+package linux
+
+import (
+ "gvisor.dev/gvisor/pkg/safecopy"
+ "gvisor.dev/gvisor/pkg/usermem"
+ "gvisor.dev/gvisor/tools/go_marshal/marshal"
+ "reflect"
+ "runtime"
+ "unsafe"
+)
+
+// Marshallable types used by this file.
+var _ marshal.Marshallable = (*Stat)(nil)
+var _ marshal.Marshallable = (*Timespec)(nil)
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (s *Stat) SizeBytes() int {
+ return 80 +
+ s.ATime.SizeBytes() +
+ s.MTime.SizeBytes() +
+ s.CTime.SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (s *Stat) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Dev))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Ino))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Mode))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Nlink))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(s.UID))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(s.GID))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Rdev))
+ dst = dst[8:]
+ // Padding: dst[:sizeof(uint64)] ~= uint64(0)
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Size))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Blksize))
+ dst = dst[4:]
+ // Padding: dst[:sizeof(int32)] ~= int32(0)
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Blocks))
+ dst = dst[8:]
+ s.ATime.MarshalBytes(dst[:s.ATime.SizeBytes()])
+ dst = dst[s.ATime.SizeBytes():]
+ s.MTime.MarshalBytes(dst[:s.MTime.SizeBytes()])
+ dst = dst[s.MTime.SizeBytes():]
+ s.CTime.MarshalBytes(dst[:s.CTime.SizeBytes()])
+ dst = dst[s.CTime.SizeBytes():]
+ // Padding: dst[:sizeof(int32)*2] ~= [2]int32{0}
+ dst = dst[8:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (s *Stat) UnmarshalBytes(src []byte) {
+ s.Dev = usermem.ByteOrder.Uint64(src[:8])
+ src = src[8:]
+ s.Ino = usermem.ByteOrder.Uint64(src[:8])
+ src = src[8:]
+ s.Mode = usermem.ByteOrder.Uint32(src[:4])
+ src = src[4:]
+ s.Nlink = usermem.ByteOrder.Uint32(src[:4])
+ src = src[4:]
+ s.UID = usermem.ByteOrder.Uint32(src[:4])
+ src = src[4:]
+ s.GID = usermem.ByteOrder.Uint32(src[:4])
+ src = src[4:]
+ s.Rdev = usermem.ByteOrder.Uint64(src[:8])
+ src = src[8:]
+ // Padding: var _ uint64 ~= src[:sizeof(uint64)]
+ src = src[8:]
+ s.Size = int64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Blksize = int32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ // Padding: var _ int32 ~= src[:sizeof(int32)]
+ src = src[4:]
+ s.Blocks = int64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.ATime.UnmarshalBytes(src[:s.ATime.SizeBytes()])
+ src = src[s.ATime.SizeBytes():]
+ s.MTime.UnmarshalBytes(src[:s.MTime.SizeBytes()])
+ src = src[s.MTime.SizeBytes():]
+ s.CTime.UnmarshalBytes(src[:s.CTime.SizeBytes()])
+ src = src[s.CTime.SizeBytes():]
+ // Padding: ~ copy([2]int32(s._), src[:sizeof(int32)*2])
+ src = src[8:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+func (s *Stat) Packed() bool {
+ return s.CTime.Packed() && s.ATime.Packed() && s.MTime.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (s *Stat) MarshalUnsafe(dst []byte) {
+ if s.ATime.Packed() && s.MTime.Packed() && s.CTime.Packed() {
+ safecopy.CopyIn(dst, unsafe.Pointer(s))
+ } else {
+ s.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (s *Stat) UnmarshalUnsafe(src []byte) {
+ if s.ATime.Packed() && s.MTime.Packed() && s.CTime.Packed() {
+ safecopy.CopyOut(unsafe.Pointer(s), src)
+ } else {
+ s.UnmarshalBytes(src)
+ }
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+func (s *Stat) CopyOut(task marshal.Task, addr usermem.Addr) (int, error) {
+ if !s.ATime.Packed() && s.MTime.Packed() && s.CTime.Packed() {
+ // Type Stat doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := task.CopyScratchBuffer(s.SizeBytes())
+ s.MarshalBytes(buf)
+ return task.CopyOutBytes(addr, buf)
+ }
+
+ // Bypass escape analysis on s. The no-op arithmetic operation on the
+ // pointer makes the compiler think val doesn't depend on s.
+ // See src/runtime/stubs.go:noescape() in the golang toolchain.
+ ptr := unsafe.Pointer(s)
+ val := uintptr(ptr)
+ val = val^0
+
+ // Construct a slice backed by s's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = val
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ len, err := task.CopyOutBytes(addr, buf)
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until after the CopyOutBytes.
+ runtime.KeepAlive(s)
+ return len, err
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+func (s *Stat) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
+ if !s.MTime.Packed() && s.CTime.Packed() && s.ATime.Packed() {
+ // Type Stat doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := task.CopyScratchBuffer(s.SizeBytes())
+ n, err := task.CopyInBytes(addr, buf)
+ if err != nil {
+ return n, err
+ }
+ s.UnmarshalBytes(buf)
+ return n, nil
+ }
+
+ // Bypass escape analysis on s. The no-op arithmetic operation on the
+ // pointer makes the compiler think val doesn't depend on s.
+ // See src/runtime/stubs.go:noescape() in the golang toolchain.
+ ptr := unsafe.Pointer(s)
+ val := uintptr(ptr)
+ val = val^0
+
+ // Construct a slice backed by s's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = val
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ len, err := task.CopyInBytes(addr, buf)
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until after the CopyInBytes.
+ runtime.KeepAlive(s)
+ return len, err
+}
+
diff --git a/pkg/abi/linux/time.go b/pkg/abi/linux/time.go
index 5c5a58cd4..e562b46d9 100644
--- a/pkg/abi/linux/time.go
+++ b/pkg/abi/linux/time.go
@@ -101,6 +101,8 @@ func NsecToTimeT(nsec int64) TimeT {
}
// Timespec represents struct timespec in <time.h>.
+//
+// +marshal
type Timespec struct {
Sec int64
Nsec int64