summaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorgVisor bot <gvisor-bot@google.com>2020-04-01 06:00:41 +0000
committergVisor bot <gvisor-bot@google.com>2020-04-01 06:00:41 +0000
commit098281cb49d3119ea7b1f221fb6c74694b2692e7 (patch)
treee46134f447bb0153b935e769e20898911c29db75
parentbbba7a4527f117b6e4a3fe64e816b3e9ef4a6300 (diff)
parent840980aeba0b5224b13bcaadf5785ac5305a5230 (diff)
Merge release-20200323.0-45-g840980a (automated)
-rwxr-xr-xpkg/abi/linux/linux_abi_autogen_unsafe.go521
-rwxr-xr-xpkg/abi/linux/linux_amd64_abi_autogen_unsafe.go143
-rwxr-xr-xpkg/abi/linux/linux_arm64_abi_autogen_unsafe.go151
-rw-r--r--pkg/sentry/kernel/rseq.go2
-rw-r--r--pkg/sentry/syscalls/linux/sys_stat.go6
-rwxr-xr-xpkg/sentry/syscalls/linux/vfs2/epoll.go4
-rwxr-xr-xpkg/sentry/syscalls/linux/vfs2/poll.go14
-rwxr-xr-xpkg/sentry/syscalls/linux/vfs2/setstat.go2
-rwxr-xr-xpkg/sentry/syscalls/linux/vfs2/stat.go23
-rwxr-xr-xpkg/sentry/syscalls/linux/vfs2/vfs2_abi_autogen_unsafe.go63
-rwxr-xr-xtools/go_marshal/marshal/marshal.go103
11 files changed, 458 insertions, 574 deletions
diff --git a/pkg/abi/linux/linux_abi_autogen_unsafe.go b/pkg/abi/linux/linux_abi_autogen_unsafe.go
index 6813ee817..cd9c42319 100755
--- a/pkg/abi/linux/linux_abi_autogen_unsafe.go
+++ b/pkg/abi/linux/linux_abi_autogen_unsafe.go
@@ -3,6 +3,7 @@
package linux
import (
+ "gvisor.dev/gvisor/pkg/gohacks"
"gvisor.dev/gvisor/pkg/safecopy"
"gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/tools/go_marshal/marshal"
@@ -121,12 +122,12 @@ func (s *Statx) UnmarshalBytes(src []byte) {
// Packed implements marshal.Marshallable.Packed.
func (s *Statx) Packed() bool {
- return s.Btime.Packed() && s.Ctime.Packed() && s.Mtime.Packed() && s.Atime.Packed()
+ return s.Ctime.Packed() && s.Mtime.Packed() && s.Atime.Packed() && s.Btime.Packed()
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
func (s *Statx) MarshalUnsafe(dst []byte) {
- if s.Btime.Packed() && s.Ctime.Packed() && s.Mtime.Packed() && s.Atime.Packed() {
+ if s.Atime.Packed() && s.Btime.Packed() && s.Ctime.Packed() && s.Mtime.Packed() {
safecopy.CopyIn(dst, unsafe.Pointer(s))
} else {
s.MarshalBytes(dst)
@@ -135,107 +136,89 @@ func (s *Statx) MarshalUnsafe(dst []byte) {
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
func (s *Statx) UnmarshalUnsafe(src []byte) {
- if s.Atime.Packed() && s.Btime.Packed() && s.Ctime.Packed() && s.Mtime.Packed() {
+ if s.Ctime.Packed() && s.Mtime.Packed() && s.Atime.Packed() && s.Btime.Packed() {
safecopy.CopyOut(unsafe.Pointer(s), src)
} else {
s.UnmarshalBytes(src)
}
}
-// CopyOut implements marshal.Marshallable.CopyOut.
-func (s *Statx) CopyOut(task marshal.Task, addr usermem.Addr) error {
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+func (s *Statx) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (int, error) {
if !s.Atime.Packed() && s.Btime.Packed() && s.Ctime.Packed() && s.Mtime.Packed() {
// Type Statx doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := task.CopyScratchBuffer(s.SizeBytes())
s.MarshalBytes(buf)
- _, err := task.CopyOutBytes(addr, buf)
- return err
+ return task.CopyOutBytes(addr, buf[:limit])
}
- // Bypass escape analysis on s. The no-op arithmetic operation on the
- // pointer makes the compiler think val doesn't depend on s.
- // See src/runtime/stubs.go:noescape() in the golang toolchain.
- ptr := unsafe.Pointer(s)
- val := uintptr(ptr)
- val = val^0
-
- // Construct a slice backed by s's underlying memory.
+ // Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = val
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
hdr.Len = s.SizeBytes()
hdr.Cap = s.SizeBytes()
- _, err := task.CopyOutBytes(addr, buf)
+ length, err := task.CopyOutBytes(addr, buf[:limit])
// Since we bypassed the compiler's escape analysis, indicate that s
- // must live until after the CopyOutBytes.
+ // must live until the use above.
runtime.KeepAlive(s)
- return err
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+func (s *Statx) CopyOut(task marshal.Task, addr usermem.Addr) (int, error) {
+ return s.CopyOutN(task, addr, s.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
-func (s *Statx) CopyIn(task marshal.Task, addr usermem.Addr) error {
+func (s *Statx) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
if !s.Atime.Packed() && s.Btime.Packed() && s.Ctime.Packed() && s.Mtime.Packed() {
// Type Statx doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := task.CopyScratchBuffer(s.SizeBytes())
- _, err := task.CopyInBytes(addr, buf)
- if err != nil {
- return err
- }
+ length, err := task.CopyInBytes(addr, buf)
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
s.UnmarshalBytes(buf)
- return nil
+ return length, err
}
- // Bypass escape analysis on s. The no-op arithmetic operation on the
- // pointer makes the compiler think val doesn't depend on s.
- // See src/runtime/stubs.go:noescape() in the golang toolchain.
- ptr := unsafe.Pointer(s)
- val := uintptr(ptr)
- val = val^0
-
- // Construct a slice backed by s's underlying memory.
+ // Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = val
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
hdr.Len = s.SizeBytes()
hdr.Cap = s.SizeBytes()
- _, err := task.CopyInBytes(addr, buf)
+ length, err := task.CopyInBytes(addr, buf)
// Since we bypassed the compiler's escape analysis, indicate that s
- // must live until after the CopyInBytes.
+ // must live until the use above.
runtime.KeepAlive(s)
- return err
+ return length, err
}
// WriteTo implements io.WriterTo.WriteTo.
func (s *Statx) WriteTo(w io.Writer) (int64, error) {
- if !s.Atime.Packed() && s.Btime.Packed() && s.Ctime.Packed() && s.Mtime.Packed() {
+ if !s.Mtime.Packed() && s.Atime.Packed() && s.Btime.Packed() && s.Ctime.Packed() {
// Type Statx doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := make([]byte, s.SizeBytes())
s.MarshalBytes(buf)
- n, err := w.Write(buf)
- return int64(n), err
+ length, err := w.Write(buf)
+ return int64(length), err
}
- // Bypass escape analysis on s. The no-op arithmetic operation on the
- // pointer makes the compiler think val doesn't depend on s.
- // See src/runtime/stubs.go:noescape() in the golang toolchain.
- ptr := unsafe.Pointer(s)
- val := uintptr(ptr)
- val = val^0
-
- // Construct a slice backed by s's underlying memory.
+ // Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = val
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
hdr.Len = s.SizeBytes()
hdr.Cap = s.SizeBytes()
- len, err := w.Write(buf)
+ length, err := w.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that s
- // must live until after the Write.
+ // must live until the use above.
runtime.KeepAlive(s)
- return int64(len), err
+ return int64(length), err
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
@@ -322,73 +305,57 @@ func (s *Statfs) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(s), src)
}
-// CopyOut implements marshal.Marshallable.CopyOut.
-func (s *Statfs) CopyOut(task marshal.Task, addr usermem.Addr) error {
- // Bypass escape analysis on s. The no-op arithmetic operation on the
- // pointer makes the compiler think val doesn't depend on s.
- // See src/runtime/stubs.go:noescape() in the golang toolchain.
- ptr := unsafe.Pointer(s)
- val := uintptr(ptr)
- val = val^0
-
- // Construct a slice backed by s's underlying memory.
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+func (s *Statfs) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = val
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
hdr.Len = s.SizeBytes()
hdr.Cap = s.SizeBytes()
- _, err := task.CopyOutBytes(addr, buf)
+ length, err := task.CopyOutBytes(addr, buf[:limit])
// Since we bypassed the compiler's escape analysis, indicate that s
- // must live until after the CopyOutBytes.
+ // must live until the use above.
runtime.KeepAlive(s)
- return err
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+func (s *Statfs) CopyOut(task marshal.Task, addr usermem.Addr) (int, error) {
+ return s.CopyOutN(task, addr, s.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
-func (s *Statfs) CopyIn(task marshal.Task, addr usermem.Addr) error {
- // Bypass escape analysis on s. The no-op arithmetic operation on the
- // pointer makes the compiler think val doesn't depend on s.
- // See src/runtime/stubs.go:noescape() in the golang toolchain.
- ptr := unsafe.Pointer(s)
- val := uintptr(ptr)
- val = val^0
-
- // Construct a slice backed by s's underlying memory.
+func (s *Statfs) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = val
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
hdr.Len = s.SizeBytes()
hdr.Cap = s.SizeBytes()
- _, err := task.CopyInBytes(addr, buf)
+ length, err := task.CopyInBytes(addr, buf)
// Since we bypassed the compiler's escape analysis, indicate that s
- // must live until after the CopyInBytes.
+ // must live until the use above.
runtime.KeepAlive(s)
- return err
+ return length, err
}
// WriteTo implements io.WriterTo.WriteTo.
func (s *Statfs) WriteTo(w io.Writer) (int64, error) {
- // Bypass escape analysis on s. The no-op arithmetic operation on the
- // pointer makes the compiler think val doesn't depend on s.
- // See src/runtime/stubs.go:noescape() in the golang toolchain.
- ptr := unsafe.Pointer(s)
- val := uintptr(ptr)
- val = val^0
-
- // Construct a slice backed by s's underlying memory.
+ // Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = val
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
hdr.Len = s.SizeBytes()
hdr.Cap = s.SizeBytes()
- len, err := w.Write(buf)
+ length, err := w.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that s
- // must live until after the Write.
+ // must live until the use above.
runtime.KeepAlive(s)
- return int64(len), err
+ return int64(length), err
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
@@ -439,73 +406,57 @@ func (r *RSeqCriticalSection) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(r), src)
}
-// CopyOut implements marshal.Marshallable.CopyOut.
-func (r *RSeqCriticalSection) CopyOut(task marshal.Task, addr usermem.Addr) error {
- // Bypass escape analysis on r. The no-op arithmetic operation on the
- // pointer makes the compiler think val doesn't depend on r.
- // See src/runtime/stubs.go:noescape() in the golang toolchain.
- ptr := unsafe.Pointer(r)
- val := uintptr(ptr)
- val = val^0
-
- // Construct a slice backed by r's underlying memory.
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+func (r *RSeqCriticalSection) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = val
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(r)))
hdr.Len = r.SizeBytes()
hdr.Cap = r.SizeBytes()
- _, err := task.CopyOutBytes(addr, buf)
+ length, err := task.CopyOutBytes(addr, buf[:limit])
// Since we bypassed the compiler's escape analysis, indicate that r
- // must live until after the CopyOutBytes.
+ // must live until the use above.
runtime.KeepAlive(r)
- return err
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+func (r *RSeqCriticalSection) CopyOut(task marshal.Task, addr usermem.Addr) (int, error) {
+ return r.CopyOutN(task, addr, r.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
-func (r *RSeqCriticalSection) CopyIn(task marshal.Task, addr usermem.Addr) error {
- // Bypass escape analysis on r. The no-op arithmetic operation on the
- // pointer makes the compiler think val doesn't depend on r.
- // See src/runtime/stubs.go:noescape() in the golang toolchain.
- ptr := unsafe.Pointer(r)
- val := uintptr(ptr)
- val = val^0
-
- // Construct a slice backed by r's underlying memory.
+func (r *RSeqCriticalSection) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = val
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(r)))
hdr.Len = r.SizeBytes()
hdr.Cap = r.SizeBytes()
- _, err := task.CopyInBytes(addr, buf)
+ length, err := task.CopyInBytes(addr, buf)
// Since we bypassed the compiler's escape analysis, indicate that r
- // must live until after the CopyInBytes.
+ // must live until the use above.
runtime.KeepAlive(r)
- return err
+ return length, err
}
// WriteTo implements io.WriterTo.WriteTo.
func (r *RSeqCriticalSection) WriteTo(w io.Writer) (int64, error) {
- // Bypass escape analysis on r. The no-op arithmetic operation on the
- // pointer makes the compiler think val doesn't depend on r.
- // See src/runtime/stubs.go:noescape() in the golang toolchain.
- ptr := unsafe.Pointer(r)
- val := uintptr(ptr)
- val = val^0
-
- // Construct a slice backed by r's underlying memory.
+ // Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = val
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(r)))
hdr.Len = r.SizeBytes()
hdr.Cap = r.SizeBytes()
- len, err := w.Write(buf)
+ length, err := w.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that r
- // must live until after the Write.
+ // must live until the use above.
runtime.KeepAlive(r)
- return int64(len), err
+ return int64(length), err
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
@@ -539,73 +490,57 @@ func (s *SignalSet) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(s), src)
}
-// CopyOut implements marshal.Marshallable.CopyOut.
-func (s *SignalSet) CopyOut(task marshal.Task, addr usermem.Addr) error {
- // Bypass escape analysis on s. The no-op arithmetic operation on the
- // pointer makes the compiler think val doesn't depend on s.
- // See src/runtime/stubs.go:noescape() in the golang toolchain.
- ptr := unsafe.Pointer(s)
- val := uintptr(ptr)
- val = val^0
-
- // Construct a slice backed by s's underlying memory.
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+func (s *SignalSet) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = val
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
hdr.Len = s.SizeBytes()
hdr.Cap = s.SizeBytes()
- _, err := task.CopyOutBytes(addr, buf)
+ length, err := task.CopyOutBytes(addr, buf[:limit])
// Since we bypassed the compiler's escape analysis, indicate that s
- // must live until after the CopyOutBytes.
+ // must live until the use above.
runtime.KeepAlive(s)
- return err
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+func (s *SignalSet) CopyOut(task marshal.Task, addr usermem.Addr) (int, error) {
+ return s.CopyOutN(task, addr, s.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
-func (s *SignalSet) CopyIn(task marshal.Task, addr usermem.Addr) error {
- // Bypass escape analysis on s. The no-op arithmetic operation on the
- // pointer makes the compiler think val doesn't depend on s.
- // See src/runtime/stubs.go:noescape() in the golang toolchain.
- ptr := unsafe.Pointer(s)
- val := uintptr(ptr)
- val = val^0
-
- // Construct a slice backed by s's underlying memory.
+func (s *SignalSet) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = val
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
hdr.Len = s.SizeBytes()
hdr.Cap = s.SizeBytes()
- _, err := task.CopyInBytes(addr, buf)
+ length, err := task.CopyInBytes(addr, buf)
// Since we bypassed the compiler's escape analysis, indicate that s
- // must live until after the CopyInBytes.
+ // must live until the use above.
runtime.KeepAlive(s)
- return err
+ return length, err
}
// WriteTo implements io.WriterTo.WriteTo.
func (s *SignalSet) WriteTo(w io.Writer) (int64, error) {
- // Bypass escape analysis on s. The no-op arithmetic operation on the
- // pointer makes the compiler think val doesn't depend on s.
- // See src/runtime/stubs.go:noescape() in the golang toolchain.
- ptr := unsafe.Pointer(s)
- val := uintptr(ptr)
- val = val^0
-
- // Construct a slice backed by s's underlying memory.
+ // Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = val
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
hdr.Len = s.SizeBytes()
hdr.Cap = s.SizeBytes()
- len, err := w.Write(buf)
+ length, err := w.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that s
- // must live until after the Write.
+ // must live until the use above.
runtime.KeepAlive(s)
- return int64(len), err
+ return int64(length), err
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
@@ -644,73 +579,57 @@ func (t *Timespec) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(t), src)
}
-// CopyOut implements marshal.Marshallable.CopyOut.
-func (t *Timespec) CopyOut(task marshal.Task, addr usermem.Addr) error {
- // Bypass escape analysis on t. The no-op arithmetic operation on the
- // pointer makes the compiler think val doesn't depend on t.
- // See src/runtime/stubs.go:noescape() in the golang toolchain.
- ptr := unsafe.Pointer(t)
- val := uintptr(ptr)
- val = val^0
-
- // Construct a slice backed by t's underlying memory.
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+func (t *Timespec) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = val
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t)))
hdr.Len = t.SizeBytes()
hdr.Cap = t.SizeBytes()
- _, err := task.CopyOutBytes(addr, buf)
+ length, err := task.CopyOutBytes(addr, buf[:limit])
// Since we bypassed the compiler's escape analysis, indicate that t
- // must live until after the CopyOutBytes.
+ // must live until the use above.
runtime.KeepAlive(t)
- return err
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+func (t *Timespec) CopyOut(task marshal.Task, addr usermem.Addr) (int, error) {
+ return t.CopyOutN(task, addr, t.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
-func (t *Timespec) CopyIn(task marshal.Task, addr usermem.Addr) error {
- // Bypass escape analysis on t. The no-op arithmetic operation on the
- // pointer makes the compiler think val doesn't depend on t.
- // See src/runtime/stubs.go:noescape() in the golang toolchain.
- ptr := unsafe.Pointer(t)
- val := uintptr(ptr)
- val = val^0
-
- // Construct a slice backed by t's underlying memory.
+func (t *Timespec) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = val
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t)))
hdr.Len = t.SizeBytes()
hdr.Cap = t.SizeBytes()
- _, err := task.CopyInBytes(addr, buf)
+ length, err := task.CopyInBytes(addr, buf)
// Since we bypassed the compiler's escape analysis, indicate that t
- // must live until after the CopyInBytes.
+ // must live until the use above.
runtime.KeepAlive(t)
- return err
+ return length, err
}
// WriteTo implements io.WriterTo.WriteTo.
func (t *Timespec) WriteTo(w io.Writer) (int64, error) {
- // Bypass escape analysis on t. The no-op arithmetic operation on the
- // pointer makes the compiler think val doesn't depend on t.
- // See src/runtime/stubs.go:noescape() in the golang toolchain.
- ptr := unsafe.Pointer(t)
- val := uintptr(ptr)
- val = val^0
-
- // Construct a slice backed by t's underlying memory.
+ // Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = val
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t)))
hdr.Len = t.SizeBytes()
hdr.Cap = t.SizeBytes()
- len, err := w.Write(buf)
+ length, err := w.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that t
- // must live until after the Write.
+ // must live until the use above.
runtime.KeepAlive(t)
- return int64(len), err
+ return int64(length), err
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
@@ -749,73 +668,57 @@ func (t *Timeval) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(t), src)
}
-// CopyOut implements marshal.Marshallable.CopyOut.
-func (t *Timeval) CopyOut(task marshal.Task, addr usermem.Addr) error {
- // Bypass escape analysis on t. The no-op arithmetic operation on the
- // pointer makes the compiler think val doesn't depend on t.
- // See src/runtime/stubs.go:noescape() in the golang toolchain.
- ptr := unsafe.Pointer(t)
- val := uintptr(ptr)
- val = val^0
-
- // Construct a slice backed by t's underlying memory.
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+func (t *Timeval) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = val
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t)))
hdr.Len = t.SizeBytes()
hdr.Cap = t.SizeBytes()
- _, err := task.CopyOutBytes(addr, buf)
+ length, err := task.CopyOutBytes(addr, buf[:limit])
// Since we bypassed the compiler's escape analysis, indicate that t
- // must live until after the CopyOutBytes.
+ // must live until the use above.
runtime.KeepAlive(t)
- return err
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+func (t *Timeval) CopyOut(task marshal.Task, addr usermem.Addr) (int, error) {
+ return t.CopyOutN(task, addr, t.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
-func (t *Timeval) CopyIn(task marshal.Task, addr usermem.Addr) error {
- // Bypass escape analysis on t. The no-op arithmetic operation on the
- // pointer makes the compiler think val doesn't depend on t.
- // See src/runtime/stubs.go:noescape() in the golang toolchain.
- ptr := unsafe.Pointer(t)
- val := uintptr(ptr)
- val = val^0
-
- // Construct a slice backed by t's underlying memory.
+func (t *Timeval) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = val
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t)))
hdr.Len = t.SizeBytes()
hdr.Cap = t.SizeBytes()
- _, err := task.CopyInBytes(addr, buf)
+ length, err := task.CopyInBytes(addr, buf)
// Since we bypassed the compiler's escape analysis, indicate that t
- // must live until after the CopyInBytes.
+ // must live until the use above.
runtime.KeepAlive(t)
- return err
+ return length, err
}
// WriteTo implements io.WriterTo.WriteTo.
func (t *Timeval) WriteTo(w io.Writer) (int64, error) {
- // Bypass escape analysis on t. The no-op arithmetic operation on the
- // pointer makes the compiler think val doesn't depend on t.
- // See src/runtime/stubs.go:noescape() in the golang toolchain.
- ptr := unsafe.Pointer(t)
- val := uintptr(ptr)
- val = val^0
-
- // Construct a slice backed by t's underlying memory.
+ // Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = val
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t)))
hdr.Len = t.SizeBytes()
hdr.Cap = t.SizeBytes()
- len, err := w.Write(buf)
+ length, err := w.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that t
- // must live until after the Write.
+ // must live until the use above.
runtime.KeepAlive(t)
- return int64(len), err
+ return int64(length), err
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
@@ -858,73 +761,57 @@ func (s *StatxTimestamp) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(s), src)
}
-// CopyOut implements marshal.Marshallable.CopyOut.
-func (s *StatxTimestamp) CopyOut(task marshal.Task, addr usermem.Addr) error {
- // Bypass escape analysis on s. The no-op arithmetic operation on the
- // pointer makes the compiler think val doesn't depend on s.
- // See src/runtime/stubs.go:noescape() in the golang toolchain.
- ptr := unsafe.Pointer(s)
- val := uintptr(ptr)
- val = val^0
-
- // Construct a slice backed by s's underlying memory.
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+func (s *StatxTimestamp) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = val
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
hdr.Len = s.SizeBytes()
hdr.Cap = s.SizeBytes()
- _, err := task.CopyOutBytes(addr, buf)
+ length, err := task.CopyOutBytes(addr, buf[:limit])
// Since we bypassed the compiler's escape analysis, indicate that s
- // must live until after the CopyOutBytes.
+ // must live until the use above.
runtime.KeepAlive(s)
- return err
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+func (s *StatxTimestamp) CopyOut(task marshal.Task, addr usermem.Addr) (int, error) {
+ return s.CopyOutN(task, addr, s.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
-func (s *StatxTimestamp) CopyIn(task marshal.Task, addr usermem.Addr) error {
- // Bypass escape analysis on s. The no-op arithmetic operation on the
- // pointer makes the compiler think val doesn't depend on s.
- // See src/runtime/stubs.go:noescape() in the golang toolchain.
- ptr := unsafe.Pointer(s)
- val := uintptr(ptr)
- val = val^0
-
- // Construct a slice backed by s's underlying memory.
+func (s *StatxTimestamp) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = val
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
hdr.Len = s.SizeBytes()
hdr.Cap = s.SizeBytes()
- _, err := task.CopyInBytes(addr, buf)
+ length, err := task.CopyInBytes(addr, buf)
// Since we bypassed the compiler's escape analysis, indicate that s
- // must live until after the CopyInBytes.
+ // must live until the use above.
runtime.KeepAlive(s)
- return err
+ return length, err
}
// WriteTo implements io.WriterTo.WriteTo.
func (s *StatxTimestamp) WriteTo(w io.Writer) (int64, error) {
- // Bypass escape analysis on s. The no-op arithmetic operation on the
- // pointer makes the compiler think val doesn't depend on s.
- // See src/runtime/stubs.go:noescape() in the golang toolchain.
- ptr := unsafe.Pointer(s)
- val := uintptr(ptr)
- val = val^0
-
- // Construct a slice backed by s's underlying memory.
+ // Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = val
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
hdr.Len = s.SizeBytes()
hdr.Cap = s.SizeBytes()
- len, err := w.Write(buf)
+ length, err := w.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that s
- // must live until after the Write.
+ // must live until the use above.
runtime.KeepAlive(s)
- return int64(len), err
+ return int64(length), err
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
@@ -963,72 +850,56 @@ func (u *Utime) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(u), src)
}
-// CopyOut implements marshal.Marshallable.CopyOut.
-func (u *Utime) CopyOut(task marshal.Task, addr usermem.Addr) error {
- // Bypass escape analysis on u. The no-op arithmetic operation on the
- // pointer makes the compiler think val doesn't depend on u.
- // See src/runtime/stubs.go:noescape() in the golang toolchain.
- ptr := unsafe.Pointer(u)
- val := uintptr(ptr)
- val = val^0
-
- // Construct a slice backed by u's underlying memory.
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+func (u *Utime) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = val
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
hdr.Len = u.SizeBytes()
hdr.Cap = u.SizeBytes()
- _, err := task.CopyOutBytes(addr, buf)
+ length, err := task.CopyOutBytes(addr, buf[:limit])
// Since we bypassed the compiler's escape analysis, indicate that u
- // must live until after the CopyOutBytes.
+ // must live until the use above.
runtime.KeepAlive(u)
- return err
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+func (u *Utime) CopyOut(task marshal.Task, addr usermem.Addr) (int, error) {
+ return u.CopyOutN(task, addr, u.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
-func (u *Utime) CopyIn(task marshal.Task, addr usermem.Addr) error {
- // Bypass escape analysis on u. The no-op arithmetic operation on the
- // pointer makes the compiler think val doesn't depend on u.
- // See src/runtime/stubs.go:noescape() in the golang toolchain.
- ptr := unsafe.Pointer(u)
- val := uintptr(ptr)
- val = val^0
-
- // Construct a slice backed by u's underlying memory.
+func (u *Utime) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = val
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
hdr.Len = u.SizeBytes()
hdr.Cap = u.SizeBytes()
- _, err := task.CopyInBytes(addr, buf)
+ length, err := task.CopyInBytes(addr, buf)
// Since we bypassed the compiler's escape analysis, indicate that u
- // must live until after the CopyInBytes.
+ // must live until the use above.
runtime.KeepAlive(u)
- return err
+ return length, err
}
// WriteTo implements io.WriterTo.WriteTo.
func (u *Utime) WriteTo(w io.Writer) (int64, error) {
- // Bypass escape analysis on u. The no-op arithmetic operation on the
- // pointer makes the compiler think val doesn't depend on u.
- // See src/runtime/stubs.go:noescape() in the golang toolchain.
- ptr := unsafe.Pointer(u)
- val := uintptr(ptr)
- val = val^0
-
- // Construct a slice backed by u's underlying memory.
+ // Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = val
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
hdr.Len = u.SizeBytes()
hdr.Cap = u.SizeBytes()
- len, err := w.Write(buf)
+ length, err := w.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that u
- // must live until after the Write.
+ // must live until the use above.
runtime.KeepAlive(u)
- return int64(len), err
+ return int64(length), err
}
diff --git a/pkg/abi/linux/linux_amd64_abi_autogen_unsafe.go b/pkg/abi/linux/linux_amd64_abi_autogen_unsafe.go
index 9b9faaa36..43fd11c6a 100755
--- a/pkg/abi/linux/linux_amd64_abi_autogen_unsafe.go
+++ b/pkg/abi/linux/linux_amd64_abi_autogen_unsafe.go
@@ -5,6 +5,7 @@
package linux
import (
+ "gvisor.dev/gvisor/pkg/gohacks"
"gvisor.dev/gvisor/pkg/safecopy"
"gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/tools/go_marshal/marshal"
@@ -59,73 +60,57 @@ func (e *EpollEvent) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(e), src)
}
-// CopyOut implements marshal.Marshallable.CopyOut.
-func (e *EpollEvent) CopyOut(task marshal.Task, addr usermem.Addr) error {
- // Bypass escape analysis on e. The no-op arithmetic operation on the
- // pointer makes the compiler think val doesn't depend on e.
- // See src/runtime/stubs.go:noescape() in the golang toolchain.
- ptr := unsafe.Pointer(e)
- val := uintptr(ptr)
- val = val^0
-
- // Construct a slice backed by e's underlying memory.
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+func (e *EpollEvent) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = val
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(e)))
hdr.Len = e.SizeBytes()
hdr.Cap = e.SizeBytes()
- _, err := task.CopyOutBytes(addr, buf)
+ length, err := task.CopyOutBytes(addr, buf[:limit])
// Since we bypassed the compiler's escape analysis, indicate that e
- // must live until after the CopyOutBytes.
+ // must live until the use above.
runtime.KeepAlive(e)
- return err
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+func (e *EpollEvent) CopyOut(task marshal.Task, addr usermem.Addr) (int, error) {
+ return e.CopyOutN(task, addr, e.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
-func (e *EpollEvent) CopyIn(task marshal.Task, addr usermem.Addr) error {
- // Bypass escape analysis on e. The no-op arithmetic operation on the
- // pointer makes the compiler think val doesn't depend on e.
- // See src/runtime/stubs.go:noescape() in the golang toolchain.
- ptr := unsafe.Pointer(e)
- val := uintptr(ptr)
- val = val^0
-
- // Construct a slice backed by e's underlying memory.
+func (e *EpollEvent) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = val
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(e)))
hdr.Len = e.SizeBytes()
hdr.Cap = e.SizeBytes()
- _, err := task.CopyInBytes(addr, buf)
+ length, err := task.CopyInBytes(addr, buf)
// Since we bypassed the compiler's escape analysis, indicate that e
- // must live until after the CopyInBytes.
+ // must live until the use above.
runtime.KeepAlive(e)
- return err
+ return length, err
}
// WriteTo implements io.WriterTo.WriteTo.
func (e *EpollEvent) WriteTo(w io.Writer) (int64, error) {
- // Bypass escape analysis on e. The no-op arithmetic operation on the
- // pointer makes the compiler think val doesn't depend on e.
- // See src/runtime/stubs.go:noescape() in the golang toolchain.
- ptr := unsafe.Pointer(e)
- val := uintptr(ptr)
- val = val^0
-
- // Construct a slice backed by e's underlying memory.
+ // Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = val
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(e)))
hdr.Len = e.SizeBytes()
hdr.Cap = e.SizeBytes()
- len, err := w.Write(buf)
+ length, err := w.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that e
- // must live until after the Write.
+ // must live until the use above.
runtime.KeepAlive(e)
- return int64(len), err
+ return int64(length), err
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
@@ -206,7 +191,7 @@ func (s *Stat) UnmarshalBytes(src []byte) {
// Packed implements marshal.Marshallable.Packed.
func (s *Stat) Packed() bool {
- return s.ATime.Packed() && s.MTime.Packed() && s.CTime.Packed()
+ return s.CTime.Packed() && s.ATime.Packed() && s.MTime.Packed()
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
@@ -227,69 +212,58 @@ func (s *Stat) UnmarshalUnsafe(src []byte) {
}
}
-// CopyOut implements marshal.Marshallable.CopyOut.
-func (s *Stat) CopyOut(task marshal.Task, addr usermem.Addr) error {
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+func (s *Stat) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (int, error) {
if !s.ATime.Packed() && s.MTime.Packed() && s.CTime.Packed() {
// Type Stat doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := task.CopyScratchBuffer(s.SizeBytes())
s.MarshalBytes(buf)
- _, err := task.CopyOutBytes(addr, buf)
- return err
+ return task.CopyOutBytes(addr, buf[:limit])
}
- // Bypass escape analysis on s. The no-op arithmetic operation on the
- // pointer makes the compiler think val doesn't depend on s.
- // See src/runtime/stubs.go:noescape() in the golang toolchain.
- ptr := unsafe.Pointer(s)
- val := uintptr(ptr)
- val = val^0
-
- // Construct a slice backed by s's underlying memory.
+ // Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = val
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
hdr.Len = s.SizeBytes()
hdr.Cap = s.SizeBytes()
- _, err := task.CopyOutBytes(addr, buf)
+ length, err := task.CopyOutBytes(addr, buf[:limit])
// Since we bypassed the compiler's escape analysis, indicate that s
- // must live until after the CopyOutBytes.
+ // must live until the use above.
runtime.KeepAlive(s)
- return err
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+func (s *Stat) CopyOut(task marshal.Task, addr usermem.Addr) (int, error) {
+ return s.CopyOutN(task, addr, s.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
-func (s *Stat) CopyIn(task marshal.Task, addr usermem.Addr) error {
+func (s *Stat) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
if !s.ATime.Packed() && s.MTime.Packed() && s.CTime.Packed() {
// Type Stat doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := task.CopyScratchBuffer(s.SizeBytes())
- _, err := task.CopyInBytes(addr, buf)
- if err != nil {
- return err
- }
+ length, err := task.CopyInBytes(addr, buf)
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
s.UnmarshalBytes(buf)
- return nil
+ return length, err
}
- // Bypass escape analysis on s. The no-op arithmetic operation on the
- // pointer makes the compiler think val doesn't depend on s.
- // See src/runtime/stubs.go:noescape() in the golang toolchain.
- ptr := unsafe.Pointer(s)
- val := uintptr(ptr)
- val = val^0
-
- // Construct a slice backed by s's underlying memory.
+ // Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = val
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
hdr.Len = s.SizeBytes()
hdr.Cap = s.SizeBytes()
- _, err := task.CopyInBytes(addr, buf)
+ length, err := task.CopyInBytes(addr, buf)
// Since we bypassed the compiler's escape analysis, indicate that s
- // must live until after the CopyInBytes.
+ // must live until the use above.
runtime.KeepAlive(s)
- return err
+ return length, err
}
// WriteTo implements io.WriterTo.WriteTo.
@@ -298,28 +272,21 @@ func (s *Stat) WriteTo(w io.Writer) (int64, error) {
// Type Stat doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := make([]byte, s.SizeBytes())
s.MarshalBytes(buf)
- n, err := w.Write(buf)
- return int64(n), err
+ length, err := w.Write(buf)
+ return int64(length), err
}
- // Bypass escape analysis on s. The no-op arithmetic operation on the
- // pointer makes the compiler think val doesn't depend on s.
- // See src/runtime/stubs.go:noescape() in the golang toolchain.
- ptr := unsafe.Pointer(s)
- val := uintptr(ptr)
- val = val^0
-
- // Construct a slice backed by s's underlying memory.
+ // Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = val
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
hdr.Len = s.SizeBytes()
hdr.Cap = s.SizeBytes()
- len, err := w.Write(buf)
+ length, err := w.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that s
- // must live until after the Write.
+ // must live until the use above.
runtime.KeepAlive(s)
- return int64(len), err
+ return int64(length), err
}
diff --git a/pkg/abi/linux/linux_arm64_abi_autogen_unsafe.go b/pkg/abi/linux/linux_arm64_abi_autogen_unsafe.go
index 6fc33457a..ce063c847 100755
--- a/pkg/abi/linux/linux_arm64_abi_autogen_unsafe.go
+++ b/pkg/abi/linux/linux_arm64_abi_autogen_unsafe.go
@@ -5,6 +5,7 @@
package linux
import (
+ "gvisor.dev/gvisor/pkg/gohacks"
"gvisor.dev/gvisor/pkg/safecopy"
"gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/tools/go_marshal/marshal"
@@ -63,73 +64,57 @@ func (e *EpollEvent) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(e), src)
}
-// CopyOut implements marshal.Marshallable.CopyOut.
-func (e *EpollEvent) CopyOut(task marshal.Task, addr usermem.Addr) error {
- // Bypass escape analysis on e. The no-op arithmetic operation on the
- // pointer makes the compiler think val doesn't depend on e.
- // See src/runtime/stubs.go:noescape() in the golang toolchain.
- ptr := unsafe.Pointer(e)
- val := uintptr(ptr)
- val = val^0
-
- // Construct a slice backed by e's underlying memory.
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+func (e *EpollEvent) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = val
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(e)))
hdr.Len = e.SizeBytes()
hdr.Cap = e.SizeBytes()
- _, err := task.CopyOutBytes(addr, buf)
+ length, err := task.CopyOutBytes(addr, buf[:limit])
// Since we bypassed the compiler's escape analysis, indicate that e
- // must live until after the CopyOutBytes.
+ // must live until the use above.
runtime.KeepAlive(e)
- return err
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+func (e *EpollEvent) CopyOut(task marshal.Task, addr usermem.Addr) (int, error) {
+ return e.CopyOutN(task, addr, e.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
-func (e *EpollEvent) CopyIn(task marshal.Task, addr usermem.Addr) error {
- // Bypass escape analysis on e. The no-op arithmetic operation on the
- // pointer makes the compiler think val doesn't depend on e.
- // See src/runtime/stubs.go:noescape() in the golang toolchain.
- ptr := unsafe.Pointer(e)
- val := uintptr(ptr)
- val = val^0
-
- // Construct a slice backed by e's underlying memory.
+func (e *EpollEvent) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = val
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(e)))
hdr.Len = e.SizeBytes()
hdr.Cap = e.SizeBytes()
- _, err := task.CopyInBytes(addr, buf)
+ length, err := task.CopyInBytes(addr, buf)
// Since we bypassed the compiler's escape analysis, indicate that e
- // must live until after the CopyInBytes.
+ // must live until the use above.
runtime.KeepAlive(e)
- return err
+ return length, err
}
// WriteTo implements io.WriterTo.WriteTo.
func (e *EpollEvent) WriteTo(w io.Writer) (int64, error) {
- // Bypass escape analysis on e. The no-op arithmetic operation on the
- // pointer makes the compiler think val doesn't depend on e.
- // See src/runtime/stubs.go:noescape() in the golang toolchain.
- ptr := unsafe.Pointer(e)
- val := uintptr(ptr)
- val = val^0
-
- // Construct a slice backed by e's underlying memory.
+ // Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = val
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(e)))
hdr.Len = e.SizeBytes()
hdr.Cap = e.SizeBytes()
- len, err := w.Write(buf)
+ length, err := w.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that e
- // must live until after the Write.
+ // must live until the use above.
runtime.KeepAlive(e)
- return int64(len), err
+ return int64(length), err
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
@@ -214,12 +199,12 @@ func (s *Stat) UnmarshalBytes(src []byte) {
// Packed implements marshal.Marshallable.Packed.
func (s *Stat) Packed() bool {
- return s.MTime.Packed() && s.CTime.Packed() && s.ATime.Packed()
+ return s.ATime.Packed() && s.MTime.Packed() && s.CTime.Packed()
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
func (s *Stat) MarshalUnsafe(dst []byte) {
- if s.ATime.Packed() && s.MTime.Packed() && s.CTime.Packed() {
+ if s.MTime.Packed() && s.CTime.Packed() && s.ATime.Packed() {
safecopy.CopyIn(dst, unsafe.Pointer(s))
} else {
s.MarshalBytes(dst)
@@ -228,76 +213,65 @@ func (s *Stat) MarshalUnsafe(dst []byte) {
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
func (s *Stat) UnmarshalUnsafe(src []byte) {
- if s.CTime.Packed() && s.ATime.Packed() && s.MTime.Packed() {
+ if s.MTime.Packed() && s.CTime.Packed() && s.ATime.Packed() {
safecopy.CopyOut(unsafe.Pointer(s), src)
} else {
s.UnmarshalBytes(src)
}
}
-// CopyOut implements marshal.Marshallable.CopyOut.
-func (s *Stat) CopyOut(task marshal.Task, addr usermem.Addr) error {
- if !s.MTime.Packed() && s.CTime.Packed() && s.ATime.Packed() {
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+func (s *Stat) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (int, error) {
+ if !s.CTime.Packed() && s.ATime.Packed() && s.MTime.Packed() {
// Type Stat doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := task.CopyScratchBuffer(s.SizeBytes())
s.MarshalBytes(buf)
- _, err := task.CopyOutBytes(addr, buf)
- return err
+ return task.CopyOutBytes(addr, buf[:limit])
}
- // Bypass escape analysis on s. The no-op arithmetic operation on the
- // pointer makes the compiler think val doesn't depend on s.
- // See src/runtime/stubs.go:noescape() in the golang toolchain.
- ptr := unsafe.Pointer(s)
- val := uintptr(ptr)
- val = val^0
-
- // Construct a slice backed by s's underlying memory.
+ // Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = val
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
hdr.Len = s.SizeBytes()
hdr.Cap = s.SizeBytes()
- _, err := task.CopyOutBytes(addr, buf)
+ length, err := task.CopyOutBytes(addr, buf[:limit])
// Since we bypassed the compiler's escape analysis, indicate that s
- // must live until after the CopyOutBytes.
+ // must live until the use above.
runtime.KeepAlive(s)
- return err
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+func (s *Stat) CopyOut(task marshal.Task, addr usermem.Addr) (int, error) {
+ return s.CopyOutN(task, addr, s.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
-func (s *Stat) CopyIn(task marshal.Task, addr usermem.Addr) error {
- if !s.ATime.Packed() && s.MTime.Packed() && s.CTime.Packed() {
+func (s *Stat) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
+ if !s.CTime.Packed() && s.ATime.Packed() && s.MTime.Packed() {
// Type Stat doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := task.CopyScratchBuffer(s.SizeBytes())
- _, err := task.CopyInBytes(addr, buf)
- if err != nil {
- return err
- }
+ length, err := task.CopyInBytes(addr, buf)
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
s.UnmarshalBytes(buf)
- return nil
+ return length, err
}
- // Bypass escape analysis on s. The no-op arithmetic operation on the
- // pointer makes the compiler think val doesn't depend on s.
- // See src/runtime/stubs.go:noescape() in the golang toolchain.
- ptr := unsafe.Pointer(s)
- val := uintptr(ptr)
- val = val^0
-
- // Construct a slice backed by s's underlying memory.
+ // Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = val
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
hdr.Len = s.SizeBytes()
hdr.Cap = s.SizeBytes()
- _, err := task.CopyInBytes(addr, buf)
+ length, err := task.CopyInBytes(addr, buf)
// Since we bypassed the compiler's escape analysis, indicate that s
- // must live until after the CopyInBytes.
+ // must live until the use above.
runtime.KeepAlive(s)
- return err
+ return length, err
}
// WriteTo implements io.WriterTo.WriteTo.
@@ -306,28 +280,21 @@ func (s *Stat) WriteTo(w io.Writer) (int64, error) {
// Type Stat doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := make([]byte, s.SizeBytes())
s.MarshalBytes(buf)
- n, err := w.Write(buf)
- return int64(n), err
+ length, err := w.Write(buf)
+ return int64(length), err
}
- // Bypass escape analysis on s. The no-op arithmetic operation on the
- // pointer makes the compiler think val doesn't depend on s.
- // See src/runtime/stubs.go:noescape() in the golang toolchain.
- ptr := unsafe.Pointer(s)
- val := uintptr(ptr)
- val = val^0
-
- // Construct a slice backed by s's underlying memory.
+ // Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = val
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
hdr.Len = s.SizeBytes()
hdr.Cap = s.SizeBytes()
- len, err := w.Write(buf)
+ length, err := w.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that s
- // must live until after the Write.
+ // must live until the use above.
runtime.KeepAlive(s)
- return int64(len), err
+ return int64(length), err
}
diff --git a/pkg/sentry/kernel/rseq.go b/pkg/sentry/kernel/rseq.go
index ded95f532..18416643b 100644
--- a/pkg/sentry/kernel/rseq.go
+++ b/pkg/sentry/kernel/rseq.go
@@ -304,7 +304,7 @@ func (t *Task) rseqAddrInterrupt() {
}
var cs linux.RSeqCriticalSection
- if err := cs.CopyIn(t, critAddr); err != nil {
+ if _, err := cs.CopyIn(t, critAddr); err != nil {
t.Debugf("Failed to copy critical section from %#x for rseq: %v", critAddr, err)
t.forceSignal(linux.SIGSEGV, false /* unconditional */)
t.SendSignal(SignalInfoPriv(linux.SIGSEGV))
diff --git a/pkg/sentry/syscalls/linux/sys_stat.go b/pkg/sentry/syscalls/linux/sys_stat.go
index a11a87cd1..46ebf27a2 100644
--- a/pkg/sentry/syscalls/linux/sys_stat.go
+++ b/pkg/sentry/syscalls/linux/sys_stat.go
@@ -115,7 +115,8 @@ func stat(t *kernel.Task, d *fs.Dirent, dirPath bool, statAddr usermem.Addr) err
return err
}
s := statFromAttrs(t, d.Inode.StableAttr, uattr)
- return s.CopyOut(t, statAddr)
+ _, err = s.CopyOut(t, statAddr)
+ return err
}
// fstat implements fstat for the given *fs.File.
@@ -125,7 +126,8 @@ func fstat(t *kernel.Task, f *fs.File, statAddr usermem.Addr) error {
return err
}
s := statFromAttrs(t, f.Dirent.Inode.StableAttr, uattr)
- return s.CopyOut(t, statAddr)
+ _, err = s.CopyOut(t, statAddr)
+ return err
}
// Statx implements linux syscall statx(2).
diff --git a/pkg/sentry/syscalls/linux/vfs2/epoll.go b/pkg/sentry/syscalls/linux/vfs2/epoll.go
index d6cb0e79a..5a938cee2 100755
--- a/pkg/sentry/syscalls/linux/vfs2/epoll.go
+++ b/pkg/sentry/syscalls/linux/vfs2/epoll.go
@@ -101,14 +101,14 @@ func EpollCtl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
var event linux.EpollEvent
switch op {
case linux.EPOLL_CTL_ADD:
- if err := event.CopyIn(t, eventAddr); err != nil {
+ if _, err := event.CopyIn(t, eventAddr); err != nil {
return 0, nil, err
}
return 0, nil, ep.AddInterest(file, fd, event)
case linux.EPOLL_CTL_DEL:
return 0, nil, ep.DeleteInterest(file, fd)
case linux.EPOLL_CTL_MOD:
- if err := event.CopyIn(t, eventAddr); err != nil {
+ if _, err := event.CopyIn(t, eventAddr); err != nil {
return 0, nil, err
}
return 0, nil, ep.ModifyInterest(file, fd, event)
diff --git a/pkg/sentry/syscalls/linux/vfs2/poll.go b/pkg/sentry/syscalls/linux/vfs2/poll.go
index dbf4882da..ff1b25d7b 100755
--- a/pkg/sentry/syscalls/linux/vfs2/poll.go
+++ b/pkg/sentry/syscalls/linux/vfs2/poll.go
@@ -374,7 +374,8 @@ func copyOutTimespecRemaining(t *kernel.Task, startNs ktime.Time, timeout time.D
}
remaining := timeoutRemaining(t, startNs, timeout)
tsRemaining := linux.NsecToTimespec(remaining.Nanoseconds())
- return tsRemaining.CopyOut(t, timespecAddr)
+ _, err := tsRemaining.CopyOut(t, timespecAddr)
+ return err
}
// copyOutTimevalRemaining copies the time remaining in timeout to timevalAddr.
@@ -386,7 +387,8 @@ func copyOutTimevalRemaining(t *kernel.Task, startNs ktime.Time, timeout time.Du
}
remaining := timeoutRemaining(t, startNs, timeout)
tvRemaining := linux.NsecToTimeval(remaining.Nanoseconds())
- return tvRemaining.CopyOut(t, timevalAddr)
+ _, err := tvRemaining.CopyOut(t, timevalAddr)
+ return err
}
// pollRestartBlock encapsulates the state required to restart poll(2) via
@@ -477,7 +479,7 @@ func Select(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
timeout := time.Duration(-1)
if timevalAddr != 0 {
var timeval linux.Timeval
- if err := timeval.CopyIn(t, timevalAddr); err != nil {
+ if _, err := timeval.CopyIn(t, timevalAddr); err != nil {
return 0, nil, err
}
if timeval.Sec < 0 || timeval.Usec < 0 {
@@ -519,7 +521,7 @@ func Pselect(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca
panic(fmt.Sprintf("unsupported sizeof(void*): %d", t.Arch().Width()))
}
var maskStruct sigSetWithSize
- if err := maskStruct.CopyIn(t, maskWithSizeAddr); err != nil {
+ if _, err := maskStruct.CopyIn(t, maskWithSizeAddr); err != nil {
return 0, nil, err
}
if err := setTempSignalSet(t, usermem.Addr(maskStruct.sigsetAddr), uint(maskStruct.sizeofSigset)); err != nil {
@@ -554,7 +556,7 @@ func copyTimespecInToDuration(t *kernel.Task, timespecAddr usermem.Addr) (time.D
timeout := time.Duration(-1)
if timespecAddr != 0 {
var timespec linux.Timespec
- if err := timespec.CopyIn(t, timespecAddr); err != nil {
+ if _, err := timespec.CopyIn(t, timespecAddr); err != nil {
return 0, err
}
if !timespec.Valid() {
@@ -573,7 +575,7 @@ func setTempSignalSet(t *kernel.Task, maskAddr usermem.Addr, maskSize uint) erro
return syserror.EINVAL
}
var mask linux.SignalSet
- if err := mask.CopyIn(t, maskAddr); err != nil {
+ if _, err := mask.CopyIn(t, maskAddr); err != nil {
return err
}
mask &^= kernel.UnblockableSignals
diff --git a/pkg/sentry/syscalls/linux/vfs2/setstat.go b/pkg/sentry/syscalls/linux/vfs2/setstat.go
index 136453ccc..4e61f1452 100755
--- a/pkg/sentry/syscalls/linux/vfs2/setstat.go
+++ b/pkg/sentry/syscalls/linux/vfs2/setstat.go
@@ -226,7 +226,7 @@ func Utime(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
opts.Stat.Mtime.Nsec = linux.UTIME_NOW
} else {
var times linux.Utime
- if err := times.CopyIn(t, timesAddr); err != nil {
+ if _, err := times.CopyIn(t, timesAddr); err != nil {
return 0, nil, err
}
opts.Stat.Atime.Sec = times.Actime
diff --git a/pkg/sentry/syscalls/linux/vfs2/stat.go b/pkg/sentry/syscalls/linux/vfs2/stat.go
index fdfe49243..bb1d5cac4 100755
--- a/pkg/sentry/syscalls/linux/vfs2/stat.go
+++ b/pkg/sentry/syscalls/linux/vfs2/stat.go
@@ -91,7 +91,8 @@ func fstatat(t *kernel.Task, dirfd int32, pathAddr, statAddr usermem.Addr, flags
}
var stat linux.Stat
convertStatxToUserStat(t, &statx, &stat)
- return stat.CopyOut(t, statAddr)
+ _, err = stat.CopyOut(t, statAddr)
+ return err
}
start = dirfile.VirtualDentry()
start.IncRef()
@@ -111,7 +112,8 @@ func fstatat(t *kernel.Task, dirfd int32, pathAddr, statAddr usermem.Addr, flags
}
var stat linux.Stat
convertStatxToUserStat(t, &statx, &stat)
- return stat.CopyOut(t, statAddr)
+ _, err = stat.CopyOut(t, statAddr)
+ return err
}
func timespecFromStatxTimestamp(sxts linux.StatxTimestamp) linux.Timespec {
@@ -140,7 +142,8 @@ func Fstat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
}
var stat linux.Stat
convertStatxToUserStat(t, &statx, &stat)
- return 0, nil, stat.CopyOut(t, statAddr)
+ _, err = stat.CopyOut(t, statAddr)
+ return 0, nil, err
}
// Statx implements Linux syscall statx(2).
@@ -199,7 +202,8 @@ func Statx(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
return 0, nil, err
}
userifyStatx(t, &statx)
- return 0, nil, statx.CopyOut(t, statxAddr)
+ _, err = statx.CopyOut(t, statxAddr)
+ return 0, nil, err
}
start = dirfile.VirtualDentry()
start.IncRef()
@@ -218,7 +222,8 @@ func Statx(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
return 0, nil, err
}
userifyStatx(t, &statx)
- return 0, nil, statx.CopyOut(t, statxAddr)
+ _, err = statx.CopyOut(t, statxAddr)
+ return 0, nil, err
}
func userifyStatx(t *kernel.Task, statx *linux.Statx) {
@@ -359,8 +364,8 @@ func Statfs(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
if err != nil {
return 0, nil, err
}
-
- return 0, nil, statfs.CopyOut(t, bufAddr)
+ _, err = statfs.CopyOut(t, bufAddr)
+ return 0, nil, err
}
// Fstatfs implements Linux syscall fstatfs(2).
@@ -378,6 +383,6 @@ func Fstatfs(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca
if err != nil {
return 0, nil, err
}
-
- return 0, nil, statfs.CopyOut(t, bufAddr)
+ _, err = statfs.CopyOut(t, bufAddr)
+ return 0, nil, err
}
diff --git a/pkg/sentry/syscalls/linux/vfs2/vfs2_abi_autogen_unsafe.go b/pkg/sentry/syscalls/linux/vfs2/vfs2_abi_autogen_unsafe.go
index fb2182415..c538be89d 100755
--- a/pkg/sentry/syscalls/linux/vfs2/vfs2_abi_autogen_unsafe.go
+++ b/pkg/sentry/syscalls/linux/vfs2/vfs2_abi_autogen_unsafe.go
@@ -3,6 +3,7 @@
package vfs2
import (
+ "gvisor.dev/gvisor/pkg/gohacks"
"gvisor.dev/gvisor/pkg/safecopy"
"gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/tools/go_marshal/marshal"
@@ -51,72 +52,56 @@ func (s *sigSetWithSize) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(s), src)
}
-// CopyOut implements marshal.Marshallable.CopyOut.
-func (s *sigSetWithSize) CopyOut(task marshal.Task, addr usermem.Addr) error {
- // Bypass escape analysis on s. The no-op arithmetic operation on the
- // pointer makes the compiler think val doesn't depend on s.
- // See src/runtime/stubs.go:noescape() in the golang toolchain.
- ptr := unsafe.Pointer(s)
- val := uintptr(ptr)
- val = val^0
-
- // Construct a slice backed by s's underlying memory.
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+func (s *sigSetWithSize) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = val
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
hdr.Len = s.SizeBytes()
hdr.Cap = s.SizeBytes()
- _, err := task.CopyOutBytes(addr, buf)
+ length, err := task.CopyOutBytes(addr, buf[:limit])
// Since we bypassed the compiler's escape analysis, indicate that s
- // must live until after the CopyOutBytes.
+ // must live until the use above.
runtime.KeepAlive(s)
- return err
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+func (s *sigSetWithSize) CopyOut(task marshal.Task, addr usermem.Addr) (int, error) {
+ return s.CopyOutN(task, addr, s.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
-func (s *sigSetWithSize) CopyIn(task marshal.Task, addr usermem.Addr) error {
- // Bypass escape analysis on s. The no-op arithmetic operation on the
- // pointer makes the compiler think val doesn't depend on s.
- // See src/runtime/stubs.go:noescape() in the golang toolchain.
- ptr := unsafe.Pointer(s)
- val := uintptr(ptr)
- val = val^0
-
- // Construct a slice backed by s's underlying memory.
+func (s *sigSetWithSize) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = val
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
hdr.Len = s.SizeBytes()
hdr.Cap = s.SizeBytes()
- _, err := task.CopyInBytes(addr, buf)
+ length, err := task.CopyInBytes(addr, buf)
// Since we bypassed the compiler's escape analysis, indicate that s
- // must live until after the CopyInBytes.
+ // must live until the use above.
runtime.KeepAlive(s)
- return err
+ return length, err
}
// WriteTo implements io.WriterTo.WriteTo.
func (s *sigSetWithSize) WriteTo(w io.Writer) (int64, error) {
- // Bypass escape analysis on s. The no-op arithmetic operation on the
- // pointer makes the compiler think val doesn't depend on s.
- // See src/runtime/stubs.go:noescape() in the golang toolchain.
- ptr := unsafe.Pointer(s)
- val := uintptr(ptr)
- val = val^0
-
- // Construct a slice backed by s's underlying memory.
+ // Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = val
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
hdr.Len = s.SizeBytes()
hdr.Cap = s.SizeBytes()
- len, err := w.Write(buf)
+ length, err := w.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that s
- // must live until after the Write.
+ // must live until the use above.
runtime.KeepAlive(s)
- return int64(len), err
+ return int64(length), err
}
diff --git a/tools/go_marshal/marshal/marshal.go b/tools/go_marshal/marshal/marshal.go
index f129788e0..cb2166252 100755
--- a/tools/go_marshal/marshal/marshal.go
+++ b/tools/go_marshal/marshal/marshal.go
@@ -42,7 +42,11 @@ type Task interface {
CopyInBytes(addr usermem.Addr, b []byte) (int, error)
}
-// Marshallable represents a type that can be marshalled to and from memory.
+// Marshallable represents operations on a type that can be marshalled to and
+// from memory.
+//
+// go-marshal automatically generates implementations for this interface for
+// types marked as '+marshal'.
type Marshallable interface {
io.WriterTo
@@ -54,12 +58,18 @@ type Marshallable interface {
// likely make use of the type of these fields).
SizeBytes() int
- // MarshalBytes serializes a copy of a type to dst. dst must be at least
- // SizeBytes() long.
+ // MarshalBytes serializes a copy of a type to dst. dst may be smaller than
+ // SizeBytes(), which results in a part of the struct being marshalled. Note
+ // that this may have unexpected results for non-packed types, as implicit
+ // padding needs to be taken into account when reasoning about how much of
+ // the type is serialized.
MarshalBytes(dst []byte)
- // UnmarshalBytes deserializes a type from src. src must be at least
- // SizeBytes() long.
+ // UnmarshalBytes deserializes a type from src. src may be smaller than
+ // SizeBytes(), which results in a partially deserialized struct. Note that
+ // this may have unexpected results for non-packed types, as implicit
+ // padding needs to be taken into account when reasoning about how much of
+ // the type is deserialized.
UnmarshalBytes(src []byte)
// Packed returns true if the marshalled size of the type is the same as the
@@ -67,13 +77,20 @@ type Marshallable interface {
// starting at unaligned addresses (should always be true by default for ABI
// structs, verified by automatically generated tests when using
// go_marshal), and has no fields marked `marshal:"unaligned"`.
+ //
+ // Packed must return the same result for all possible values of the type
+ // implementing it. Violating this constraint implies the type doesn't have
+ // a static memory layout, and will lead to memory corruption.
+ // Go-marshal-generated code reuses the result of Packed for multiple values
+ // of the same type.
Packed() bool
// MarshalUnsafe serializes a type by bulk copying its in-memory
// representation to the dst buffer. This is only safe to do when the type
// has no implicit padding, see Marshallable.Packed. When Packed would
// return false, MarshalUnsafe should fall back to the safer but slower
- // MarshalBytes.
+ // MarshalBytes. dst may be smaller than SizeBytes(), see comment for
+ // MarshalBytes for implications.
MarshalUnsafe(dst []byte)
// UnmarshalUnsafe deserializes a type by directly copying to the underlying
@@ -82,7 +99,8 @@ type Marshallable interface {
// This allows much faster unmarshalling of types which have no implicit
// padding, see Marshallable.Packed. When Packed would return false,
// UnmarshalUnsafe should fall back to the safer but slower unmarshal
- // mechanism implemented in UnmarshalBytes.
+ // mechanism implemented in UnmarshalBytes. src may be smaller than
+ // SizeBytes(), see comment for UnmarshalBytes for implications.
UnmarshalUnsafe(src []byte)
// CopyIn deserializes a Marshallable type from a task's memory. This may
@@ -91,12 +109,79 @@ type Marshallable interface {
// marshalled does not escape. The implementation should avoid creating
// extra copies in memory by directly deserializing to the object's
// underlying memory.
- CopyIn(task Task, addr usermem.Addr) error
+ //
+ // If the copy-in from the task memory is only partially successful, CopyIn
+ // should still attempt to deserialize as much data as possible. See comment
+ // for UnmarshalBytes.
+ CopyIn(task Task, addr usermem.Addr) (int, error)
// CopyOut serializes a Marshallable type to a task's memory. This may only
// be called from a task goroutine. This is more efficient than calling
// MarshalUnsafe on Marshallable.Packed types, as the type being serialized
// does not escape. The implementation should avoid creating extra copies in
// memory by directly serializing from the object's underlying memory.
- CopyOut(task Task, addr usermem.Addr) error
+ //
+ // The copy-out to the task memory may be partially successful, in which
+ // case CopyOut returns how much data was serialized. See comment for
+ // MarshalBytes for implications.
+ CopyOut(task Task, addr usermem.Addr) (int, error)
+
+ // CopyOutN is like CopyOut, but explicitly requests a partial
+ // copy-out. Note that this may yield unexpected results for non-packed
+ // types and the caller may only want to allow this for packed types. See
+ // comment on MarshalBytes.
+ //
+ // The limit must be less than or equal to SizeBytes().
+ CopyOutN(task Task, addr usermem.Addr, limit int) (int, error)
}
+
+// go-marshal generates additional functions for a type based on additional
+// clauses to the +marshal directive. They are documented below.
+//
+// Slice API
+// =========
+//
+// Adding a "slice" clause to the +marshal directive for structs or newtypes on
+// primitives like this:
+//
+// // +marshal slice:FooSlice
+// type Foo struct { ... }
+//
+// Generates four additional functions for marshalling slices of Foos like this:
+//
+// // MarshalUnsafeFooSlice is like Foo.MarshalUnsafe, buf for a []Foo. It's
+// // more efficient that repeatedly calling calling Foo.MarshalUnsafe over a
+// // []Foo in a loop.
+// func MarshalUnsafeFooSlice(src []Foo, dst []byte) (int, error) { ... }
+//
+// // UnmarshalUnsafeFooSlice is like Foo.UnmarshalUnsafe, buf for a []Foo. It's
+// // more efficient that repeatedly calling calling Foo.UnmarshalUnsafe over a
+// // []Foo in a loop.
+// func UnmarshalUnsafeFooSlice(dst []Foo, src []byte) (int, error) { ... }
+//
+// // CopyFooSliceIn copies in a slice of Foo objects from the task's memory.
+// func CopyFooSliceIn(task marshal.Task, addr usermem.Addr, dst []Foo) (int, error) { ... }
+//
+// // CopyFooSliceIn copies out a slice of Foo objects to the task's memory.
+// func CopyFooSliceOut(task marshal.Task, addr usermem.Addr, src []Foo) (int, error) { ... }
+//
+// The name of the functions are of the format "Copy%sIn" and "Copy%sOut", where
+// %s is the first argument to the slice clause. This directive is not supported
+// for newtypes on arrays.
+//
+// The slice clause also takes an optional second argument, which must be the
+// value "inner":
+//
+// // +marshal slice:Int32Slice:inner
+// type Int32 int32
+//
+// This is only valid on newtypes on primitives, and causes the generated
+// functions to accept slices of the inner type instead:
+//
+// func CopyInt32SliceIn(task marshal.Task, addr usermem.Addr, dst []int32) (int, error) { ... }
+//
+// Without "inner", they would instead be:
+//
+// func CopyInt32SliceIn(task marshal.Task, addr usermem.Addr, dst []Int32) (int, error) { ... }
+//
+// This may help avoid a cast depending on how the generated functions are used.