summaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorgVisor bot <gvisor-bot@google.com>2020-08-26 21:45:46 +0000
committergVisor bot <gvisor-bot@google.com>2020-08-26 21:45:46 +0000
commit7670835ddf34078801e6e9cba1dc02b3d76d1318 (patch)
tree9e069fb5db1735ef313c3c9faf977b9a35b295ce
parent821e23d65cde88a75af72b889bf7dfa61a40b3a3 (diff)
parent983a55aa0649e48467b2e41f9550759535634854 (diff)
Merge release-20200818.0-61-g983a55aa0 (automated)
-rw-r--r--pkg/abi/linux/linux_abi_autogen_unsafe.go258
-rw-r--r--pkg/abi/linux/linux_amd64_abi_autogen_unsafe.go36
-rw-r--r--pkg/abi/linux/linux_arm64_abi_autogen_unsafe.go26
-rw-r--r--pkg/sentry/arch/arch_abi_autogen_unsafe.go18
-rw-r--r--pkg/sentry/fsimpl/devpts/root_inode_refs.go5
-rw-r--r--pkg/sentry/fsimpl/fuse/inode_refs.go5
-rw-r--r--pkg/sentry/fsimpl/host/connected_endpoint_refs.go5
-rw-r--r--pkg/sentry/fsimpl/host/inode_refs.go5
-rw-r--r--pkg/sentry/fsimpl/kernfs/dentry_refs.go5
-rw-r--r--pkg/sentry/fsimpl/kernfs/static_directory_refs.go5
-rw-r--r--pkg/sentry/fsimpl/proc/fd_dir_inode_refs.go5
-rw-r--r--pkg/sentry/fsimpl/proc/fd_info_dir_inode_refs.go5
-rw-r--r--pkg/sentry/fsimpl/proc/subtasks_inode_refs.go5
-rw-r--r--pkg/sentry/fsimpl/proc/task_inode_refs.go5
-rw-r--r--pkg/sentry/fsimpl/proc/tasks_inode_refs.go5
-rw-r--r--pkg/sentry/fsimpl/sys/dir_refs.go5
-rw-r--r--pkg/sentry/fsimpl/tmpfs/inode_refs.go5
-rw-r--r--pkg/sentry/kernel/fd_table_refs.go5
-rw-r--r--pkg/sentry/kernel/fs_context_refs.go5
-rw-r--r--pkg/sentry/kernel/process_group_refs.go5
-rw-r--r--pkg/sentry/kernel/session_refs.go5
-rw-r--r--pkg/sentry/kernel/shm/shm_refs.go5
-rw-r--r--pkg/sentry/mm/aio_mappable_refs.go5
-rw-r--r--pkg/sentry/mm/special_mappable_refs.go5
-rw-r--r--pkg/sentry/platform/kvm/bluepill_fault.go4
-rw-r--r--pkg/sentry/platform/kvm/kvm_const.go2
-rw-r--r--pkg/sentry/platform/kvm/machine.go40
-rw-r--r--pkg/sentry/platform/ring0/defs_impl_arm64.go4
-rw-r--r--pkg/sentry/socket/unix/socket_refs.go5
-rw-r--r--pkg/sentry/socket/unix/transport/queue_refs.go5
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/vfs2_abi_autogen_unsafe.go6
-rw-r--r--pkg/sentry/vfs/file_description_refs.go5
-rw-r--r--pkg/sentry/vfs/filesystem_refs.go5
-rw-r--r--pkg/sentry/vfs/mount_namespace_refs.go5
-rw-r--r--pkg/tcpip/link/tun/tun_endpoint_refs.go5
-rw-r--r--tools/go_marshal/primitive/primitive_abi_autogen_unsafe.go112
36 files changed, 321 insertions, 315 deletions
diff --git a/pkg/abi/linux/linux_abi_autogen_unsafe.go b/pkg/abi/linux/linux_abi_autogen_unsafe.go
index 1150f33d6..6b03a0f0b 100644
--- a/pkg/abi/linux/linux_abi_autogen_unsafe.go
+++ b/pkg/abi/linux/linux_abi_autogen_unsafe.go
@@ -154,12 +154,12 @@ func (s *Statx) UnmarshalBytes(src []byte) {
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
func (s *Statx) Packed() bool {
- return s.Atime.Packed() && s.Btime.Packed() && s.Ctime.Packed() && s.Mtime.Packed()
+ return s.Ctime.Packed() && s.Mtime.Packed() && s.Atime.Packed() && s.Btime.Packed()
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
func (s *Statx) MarshalUnsafe(dst []byte) {
- if s.Atime.Packed() && s.Btime.Packed() && s.Ctime.Packed() && s.Mtime.Packed() {
+ if s.Mtime.Packed() && s.Atime.Packed() && s.Btime.Packed() && s.Ctime.Packed() {
safecopy.CopyIn(dst, unsafe.Pointer(s))
} else {
// Type Statx doesn't have a packed layout in memory, fallback to MarshalBytes.
@@ -169,7 +169,7 @@ func (s *Statx) MarshalUnsafe(dst []byte) {
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
func (s *Statx) UnmarshalUnsafe(src []byte) {
- if s.Btime.Packed() && s.Ctime.Packed() && s.Mtime.Packed() && s.Atime.Packed() {
+ if s.Atime.Packed() && s.Btime.Packed() && s.Ctime.Packed() && s.Mtime.Packed() {
safecopy.CopyOut(unsafe.Pointer(s), src)
} else {
// Type Statx doesn't have a packed layout in memory, fallback to UnmarshalBytes.
@@ -197,7 +197,7 @@ func (s *Statx) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (int,
length, err := task.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that s
// must live until the use above.
- runtime.KeepAlive(s)
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
return length, err
}
@@ -210,7 +210,7 @@ func (s *Statx) CopyOut(task marshal.Task, addr usermem.Addr) (int, error) {
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
func (s *Statx) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
- if !s.Ctime.Packed() && s.Mtime.Packed() && s.Atime.Packed() && s.Btime.Packed() {
+ if !s.Atime.Packed() && s.Btime.Packed() && s.Ctime.Packed() && s.Mtime.Packed() {
// Type Statx doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := task.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
@@ -230,7 +230,7 @@ func (s *Statx) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that s
// must live until the use above.
- runtime.KeepAlive(s)
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
return length, err
}
@@ -254,7 +254,7 @@ func (s *Statx) WriteTo(writer io.Writer) (int64, error) {
length, err := writer.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that s
// must live until the use above.
- runtime.KeepAlive(s)
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
return int64(length), err
}
@@ -358,7 +358,7 @@ func (s *Statfs) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (int,
length, err := task.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that s
// must live until the use above.
- runtime.KeepAlive(s)
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
return length, err
}
@@ -381,7 +381,7 @@ func (s *Statfs) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that s
// must live until the use above.
- runtime.KeepAlive(s)
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
return length, err
}
@@ -397,7 +397,7 @@ func (s *Statfs) WriteTo(writer io.Writer) (int64, error) {
length, err := writer.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that s
// must live until the use above.
- runtime.KeepAlive(s)
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
return int64(length), err
}
@@ -447,7 +447,7 @@ func (f *FUSEOpcode) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (
length, err := task.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that f
// must live until the use above.
- runtime.KeepAlive(f)
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
return length, err
}
@@ -470,7 +470,7 @@ func (f *FUSEOpcode) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that f
// must live until the use above.
- runtime.KeepAlive(f)
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
return length, err
}
@@ -486,7 +486,7 @@ func (f *FUSEOpcode) WriteTo(w io.Writer) (int64, error) {
length, err := w.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that f
// must live until the use above.
- runtime.KeepAlive(f)
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
return int64(length), err
}
@@ -536,7 +536,7 @@ func (f *FUSEOpID) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (in
length, err := task.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that f
// must live until the use above.
- runtime.KeepAlive(f)
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
return length, err
}
@@ -559,7 +559,7 @@ func (f *FUSEOpID) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that f
// must live until the use above.
- runtime.KeepAlive(f)
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
return length, err
}
@@ -575,7 +575,7 @@ func (f *FUSEOpID) WriteTo(w io.Writer) (int64, error) {
length, err := w.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that f
// must live until the use above.
- runtime.KeepAlive(f)
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
return int64(length), err
}
@@ -672,7 +672,7 @@ func (f *FUSEHeaderIn) CopyOutN(task marshal.Task, addr usermem.Addr, limit int)
length, err := task.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that f
// must live until the use above.
- runtime.KeepAlive(f)
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
return length, err
}
@@ -705,13 +705,13 @@ func (f *FUSEHeaderIn) CopyIn(task marshal.Task, addr usermem.Addr) (int, error)
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that f
// must live until the use above.
- runtime.KeepAlive(f)
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
return length, err
}
// WriteTo implements io.WriterTo.WriteTo.
func (f *FUSEHeaderIn) WriteTo(writer io.Writer) (int64, error) {
- if !f.Opcode.Packed() && f.Unique.Packed() {
+ if !f.Unique.Packed() && f.Opcode.Packed() {
// Type FUSEHeaderIn doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := make([]byte, f.SizeBytes())
f.MarshalBytes(buf)
@@ -729,7 +729,7 @@ func (f *FUSEHeaderIn) WriteTo(writer io.Writer) (int64, error) {
length, err := writer.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that f
// must live until the use above.
- runtime.KeepAlive(f)
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
return int64(length), err
}
@@ -805,7 +805,7 @@ func (f *FUSEHeaderOut) CopyOutN(task marshal.Task, addr usermem.Addr, limit int
length, err := task.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that f
// must live until the use above.
- runtime.KeepAlive(f)
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
return length, err
}
@@ -838,7 +838,7 @@ func (f *FUSEHeaderOut) CopyIn(task marshal.Task, addr usermem.Addr) (int, error
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that f
// must live until the use above.
- runtime.KeepAlive(f)
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
return length, err
}
@@ -862,7 +862,7 @@ func (f *FUSEHeaderOut) WriteTo(writer io.Writer) (int64, error) {
length, err := writer.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that f
// must live until the use above.
- runtime.KeepAlive(f)
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
return int64(length), err
}
@@ -936,7 +936,7 @@ func (f *FUSEWriteIn) CopyOutN(task marshal.Task, addr usermem.Addr, limit int)
length, err := task.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that f
// must live until the use above.
- runtime.KeepAlive(f)
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
return length, err
}
@@ -959,7 +959,7 @@ func (f *FUSEWriteIn) CopyIn(task marshal.Task, addr usermem.Addr) (int, error)
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that f
// must live until the use above.
- runtime.KeepAlive(f)
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
return length, err
}
@@ -975,7 +975,7 @@ func (f *FUSEWriteIn) WriteTo(writer io.Writer) (int64, error) {
length, err := writer.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that f
// must live until the use above.
- runtime.KeepAlive(f)
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
return int64(length), err
}
@@ -1037,7 +1037,7 @@ func (f *FUSEInitIn) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (
length, err := task.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that f
// must live until the use above.
- runtime.KeepAlive(f)
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
return length, err
}
@@ -1060,7 +1060,7 @@ func (f *FUSEInitIn) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that f
// must live until the use above.
- runtime.KeepAlive(f)
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
return length, err
}
@@ -1076,7 +1076,7 @@ func (f *FUSEInitIn) WriteTo(writer io.Writer) (int64, error) {
length, err := writer.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that f
// must live until the use above.
- runtime.KeepAlive(f)
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
return int64(length), err
}
@@ -1167,7 +1167,7 @@ func (f *FUSEInitOut) CopyOutN(task marshal.Task, addr usermem.Addr, limit int)
length, err := task.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that f
// must live until the use above.
- runtime.KeepAlive(f)
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
return length, err
}
@@ -1190,7 +1190,7 @@ func (f *FUSEInitOut) CopyIn(task marshal.Task, addr usermem.Addr) (int, error)
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that f
// must live until the use above.
- runtime.KeepAlive(f)
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
return length, err
}
@@ -1206,7 +1206,7 @@ func (f *FUSEInitOut) WriteTo(writer io.Writer) (int64, error) {
length, err := writer.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that f
// must live until the use above.
- runtime.KeepAlive(f)
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
return int64(length), err
}
@@ -1264,7 +1264,7 @@ func (f *FUSEGetAttrIn) CopyOutN(task marshal.Task, addr usermem.Addr, limit int
length, err := task.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that f
// must live until the use above.
- runtime.KeepAlive(f)
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
return length, err
}
@@ -1287,7 +1287,7 @@ func (f *FUSEGetAttrIn) CopyIn(task marshal.Task, addr usermem.Addr) (int, error
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that f
// must live until the use above.
- runtime.KeepAlive(f)
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
return length, err
}
@@ -1303,7 +1303,7 @@ func (f *FUSEGetAttrIn) WriteTo(writer io.Writer) (int64, error) {
length, err := writer.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that f
// must live until the use above.
- runtime.KeepAlive(f)
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
return int64(length), err
}
@@ -1413,7 +1413,7 @@ func (f *FUSEAttr) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (in
length, err := task.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that f
// must live until the use above.
- runtime.KeepAlive(f)
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
return length, err
}
@@ -1436,7 +1436,7 @@ func (f *FUSEAttr) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that f
// must live until the use above.
- runtime.KeepAlive(f)
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
return length, err
}
@@ -1452,7 +1452,7 @@ func (f *FUSEAttr) WriteTo(writer io.Writer) (int64, error) {
length, err := writer.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that f
// must live until the use above.
- runtime.KeepAlive(f)
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
return int64(length), err
}
@@ -1532,7 +1532,7 @@ func (f *FUSEGetAttrOut) CopyOutN(task marshal.Task, addr usermem.Addr, limit in
length, err := task.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that f
// must live until the use above.
- runtime.KeepAlive(f)
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
return length, err
}
@@ -1565,7 +1565,7 @@ func (f *FUSEGetAttrOut) CopyIn(task marshal.Task, addr usermem.Addr) (int, erro
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that f
// must live until the use above.
- runtime.KeepAlive(f)
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
return length, err
}
@@ -1589,7 +1589,7 @@ func (f *FUSEGetAttrOut) WriteTo(writer io.Writer) (int64, error) {
length, err := writer.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that f
// must live until the use above.
- runtime.KeepAlive(f)
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
return int64(length), err
}
@@ -1647,7 +1647,7 @@ func (r *RobustListHead) CopyOutN(task marshal.Task, addr usermem.Addr, limit in
length, err := task.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that r
// must live until the use above.
- runtime.KeepAlive(r)
+ runtime.KeepAlive(r) // escapes: replaced by intrinsic.
return length, err
}
@@ -1670,7 +1670,7 @@ func (r *RobustListHead) CopyIn(task marshal.Task, addr usermem.Addr) (int, erro
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that r
// must live until the use above.
- runtime.KeepAlive(r)
+ runtime.KeepAlive(r) // escapes: replaced by intrinsic.
return length, err
}
@@ -1686,7 +1686,7 @@ func (r *RobustListHead) WriteTo(writer io.Writer) (int64, error) {
length, err := writer.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that r
// must live until the use above.
- runtime.KeepAlive(r)
+ runtime.KeepAlive(r) // escapes: replaced by intrinsic.
return int64(length), err
}
@@ -1736,7 +1736,7 @@ func (n *NumaPolicy) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (
length, err := task.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that n
// must live until the use above.
- runtime.KeepAlive(n)
+ runtime.KeepAlive(n) // escapes: replaced by intrinsic.
return length, err
}
@@ -1759,7 +1759,7 @@ func (n *NumaPolicy) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that n
// must live until the use above.
- runtime.KeepAlive(n)
+ runtime.KeepAlive(n) // escapes: replaced by intrinsic.
return length, err
}
@@ -1775,7 +1775,7 @@ func (n *NumaPolicy) WriteTo(w io.Writer) (int64, error) {
length, err := w.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that n
// must live until the use above.
- runtime.KeepAlive(n)
+ runtime.KeepAlive(n) // escapes: replaced by intrinsic.
return int64(length), err
}
@@ -1839,7 +1839,7 @@ func (i *IFReq) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (int,
length, err := task.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that i
// must live until the use above.
- runtime.KeepAlive(i)
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
return length, err
}
@@ -1862,7 +1862,7 @@ func (i *IFReq) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that i
// must live until the use above.
- runtime.KeepAlive(i)
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
return length, err
}
@@ -1878,7 +1878,7 @@ func (i *IFReq) WriteTo(writer io.Writer) (int64, error) {
length, err := writer.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that i
// must live until the use above.
- runtime.KeepAlive(i)
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
return int64(length), err
}
@@ -1937,7 +1937,7 @@ func (i *IFConf) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (int,
length, err := task.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that i
// must live until the use above.
- runtime.KeepAlive(i)
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
return length, err
}
@@ -1960,7 +1960,7 @@ func (i *IFConf) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that i
// must live until the use above.
- runtime.KeepAlive(i)
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
return length, err
}
@@ -1976,7 +1976,7 @@ func (i *IFConf) WriteTo(writer io.Writer) (int64, error) {
length, err := writer.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that i
// must live until the use above.
- runtime.KeepAlive(i)
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
return int64(length), err
}
@@ -2065,7 +2065,7 @@ func (i *IPTEntry) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (in
length, err := task.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that i
// must live until the use above.
- runtime.KeepAlive(i)
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
return length, err
}
@@ -2098,7 +2098,7 @@ func (i *IPTEntry) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that i
// must live until the use above.
- runtime.KeepAlive(i)
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
return length, err
}
@@ -2122,7 +2122,7 @@ func (i *IPTEntry) WriteTo(writer io.Writer) (int64, error) {
length, err := writer.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that i
// must live until the use above.
- runtime.KeepAlive(i)
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
return int64(length), err
}
@@ -2225,7 +2225,7 @@ func (i *IPTIP) MarshalUnsafe(dst []byte) {
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
func (i *IPTIP) UnmarshalUnsafe(src []byte) {
- if i.Dst.Packed() && i.SrcMask.Packed() && i.DstMask.Packed() && i.Src.Packed() {
+ if i.Src.Packed() && i.Dst.Packed() && i.SrcMask.Packed() && i.DstMask.Packed() {
safecopy.CopyOut(unsafe.Pointer(i), src)
} else {
// Type IPTIP doesn't have a packed layout in memory, fallback to UnmarshalBytes.
@@ -2236,7 +2236,7 @@ func (i *IPTIP) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
func (i *IPTIP) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (int, error) {
- if !i.Src.Packed() && i.Dst.Packed() && i.SrcMask.Packed() && i.DstMask.Packed() {
+ if !i.Dst.Packed() && i.SrcMask.Packed() && i.DstMask.Packed() && i.Src.Packed() {
// Type IPTIP doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := task.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
i.MarshalBytes(buf) // escapes: fallback.
@@ -2253,7 +2253,7 @@ func (i *IPTIP) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (int,
length, err := task.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that i
// must live until the use above.
- runtime.KeepAlive(i)
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
return length, err
}
@@ -2266,7 +2266,7 @@ func (i *IPTIP) CopyOut(task marshal.Task, addr usermem.Addr) (int, error) {
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
func (i *IPTIP) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
- if !i.Src.Packed() && i.Dst.Packed() && i.SrcMask.Packed() && i.DstMask.Packed() {
+ if !i.SrcMask.Packed() && i.DstMask.Packed() && i.Src.Packed() && i.Dst.Packed() {
// Type IPTIP doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := task.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
@@ -2286,13 +2286,13 @@ func (i *IPTIP) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that i
// must live until the use above.
- runtime.KeepAlive(i)
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
return length, err
}
// WriteTo implements io.WriterTo.WriteTo.
func (i *IPTIP) WriteTo(writer io.Writer) (int64, error) {
- if !i.DstMask.Packed() && i.Src.Packed() && i.Dst.Packed() && i.SrcMask.Packed() {
+ if !i.Src.Packed() && i.Dst.Packed() && i.SrcMask.Packed() && i.DstMask.Packed() {
// Type IPTIP doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := make([]byte, i.SizeBytes())
i.MarshalBytes(buf)
@@ -2310,7 +2310,7 @@ func (i *IPTIP) WriteTo(writer io.Writer) (int64, error) {
length, err := writer.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that i
// must live until the use above.
- runtime.KeepAlive(i)
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
return int64(length), err
}
@@ -2364,7 +2364,7 @@ func (x *XTCounters) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (
length, err := task.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that x
// must live until the use above.
- runtime.KeepAlive(x)
+ runtime.KeepAlive(x) // escapes: replaced by intrinsic.
return length, err
}
@@ -2387,7 +2387,7 @@ func (x *XTCounters) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that x
// must live until the use above.
- runtime.KeepAlive(x)
+ runtime.KeepAlive(x) // escapes: replaced by intrinsic.
return length, err
}
@@ -2403,7 +2403,7 @@ func (x *XTCounters) WriteTo(writer io.Writer) (int64, error) {
length, err := writer.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that x
// must live until the use above.
- runtime.KeepAlive(x)
+ runtime.KeepAlive(x) // escapes: replaced by intrinsic.
return int64(length), err
}
@@ -2501,7 +2501,7 @@ func (i *IPTGetinfo) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (
length, err := task.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that i
// must live until the use above.
- runtime.KeepAlive(i)
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
return length, err
}
@@ -2534,7 +2534,7 @@ func (i *IPTGetinfo) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that i
// must live until the use above.
- runtime.KeepAlive(i)
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
return length, err
}
@@ -2558,7 +2558,7 @@ func (i *IPTGetinfo) WriteTo(writer io.Writer) (int64, error) {
length, err := writer.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that i
// must live until the use above.
- runtime.KeepAlive(i)
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
return int64(length), err
}
@@ -2635,7 +2635,7 @@ func (i *IPTGetEntries) CopyOutN(task marshal.Task, addr usermem.Addr, limit int
length, err := task.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that i
// must live until the use above.
- runtime.KeepAlive(i)
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
return length, err
}
@@ -2668,7 +2668,7 @@ func (i *IPTGetEntries) CopyIn(task marshal.Task, addr usermem.Addr) (int, error
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that i
// must live until the use above.
- runtime.KeepAlive(i)
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
return length, err
}
@@ -2692,7 +2692,7 @@ func (i *IPTGetEntries) WriteTo(writer io.Writer) (int64, error) {
length, err := writer.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that i
// must live until the use above.
- runtime.KeepAlive(i)
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
return int64(length), err
}
@@ -2748,7 +2748,7 @@ func (t *TableName) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (i
length, err := task.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that t
// must live until the use above.
- runtime.KeepAlive(t)
+ runtime.KeepAlive(t) // escapes: replaced by intrinsic.
return length, err
}
@@ -2771,7 +2771,7 @@ func (t *TableName) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that t
// must live until the use above.
- runtime.KeepAlive(t)
+ runtime.KeepAlive(t) // escapes: replaced by intrinsic.
return length, err
}
@@ -2787,7 +2787,7 @@ func (t *TableName) WriteTo(w io.Writer) (int64, error) {
length, err := w.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that t
// must live until the use above.
- runtime.KeepAlive(t)
+ runtime.KeepAlive(t) // escapes: replaced by intrinsic.
return int64(length), err
}
@@ -2893,7 +2893,7 @@ func (i *IP6TReplace) CopyOutN(task marshal.Task, addr usermem.Addr, limit int)
length, err := task.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that i
// must live until the use above.
- runtime.KeepAlive(i)
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
return length, err
}
@@ -2926,7 +2926,7 @@ func (i *IP6TReplace) CopyIn(task marshal.Task, addr usermem.Addr) (int, error)
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that i
// must live until the use above.
- runtime.KeepAlive(i)
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
return length, err
}
@@ -2950,7 +2950,7 @@ func (i *IP6TReplace) WriteTo(writer io.Writer) (int64, error) {
length, err := writer.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that i
// must live until the use above.
- runtime.KeepAlive(i)
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
return int64(length), err
}
@@ -3044,7 +3044,7 @@ func (i *IP6TEntry) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (i
length, err := task.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that i
// must live until the use above.
- runtime.KeepAlive(i)
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
return length, err
}
@@ -3077,7 +3077,7 @@ func (i *IP6TEntry) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that i
// must live until the use above.
- runtime.KeepAlive(i)
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
return length, err
}
@@ -3101,7 +3101,7 @@ func (i *IP6TEntry) WriteTo(writer io.Writer) (int64, error) {
length, err := writer.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that i
// must live until the use above.
- runtime.KeepAlive(i)
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
return int64(length), err
}
@@ -3198,7 +3198,7 @@ func (i *IP6TIP) UnmarshalBytes(src []byte) {
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
func (i *IP6TIP) Packed() bool {
- return i.Src.Packed() && i.Dst.Packed() && i.SrcMask.Packed() && i.DstMask.Packed()
+ return i.Dst.Packed() && i.SrcMask.Packed() && i.DstMask.Packed() && i.Src.Packed()
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
@@ -3224,7 +3224,7 @@ func (i *IP6TIP) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
func (i *IP6TIP) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (int, error) {
- if !i.SrcMask.Packed() && i.DstMask.Packed() && i.Src.Packed() && i.Dst.Packed() {
+ if !i.Dst.Packed() && i.SrcMask.Packed() && i.DstMask.Packed() && i.Src.Packed() {
// Type IP6TIP doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := task.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
i.MarshalBytes(buf) // escapes: fallback.
@@ -3241,7 +3241,7 @@ func (i *IP6TIP) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (int,
length, err := task.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that i
// must live until the use above.
- runtime.KeepAlive(i)
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
return length, err
}
@@ -3254,7 +3254,7 @@ func (i *IP6TIP) CopyOut(task marshal.Task, addr usermem.Addr) (int, error) {
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
func (i *IP6TIP) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
- if !i.SrcMask.Packed() && i.DstMask.Packed() && i.Src.Packed() && i.Dst.Packed() {
+ if !i.Src.Packed() && i.Dst.Packed() && i.SrcMask.Packed() && i.DstMask.Packed() {
// Type IP6TIP doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := task.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
@@ -3274,7 +3274,7 @@ func (i *IP6TIP) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that i
// must live until the use above.
- runtime.KeepAlive(i)
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
return length, err
}
@@ -3298,7 +3298,7 @@ func (i *IP6TIP) WriteTo(writer io.Writer) (int64, error) {
length, err := writer.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that i
// must live until the use above.
- runtime.KeepAlive(i)
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
return int64(length), err
}
@@ -3364,7 +3364,7 @@ func (r *RSeqCriticalSection) CopyOutN(task marshal.Task, addr usermem.Addr, lim
length, err := task.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that r
// must live until the use above.
- runtime.KeepAlive(r)
+ runtime.KeepAlive(r) // escapes: replaced by intrinsic.
return length, err
}
@@ -3387,7 +3387,7 @@ func (r *RSeqCriticalSection) CopyIn(task marshal.Task, addr usermem.Addr) (int,
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that r
// must live until the use above.
- runtime.KeepAlive(r)
+ runtime.KeepAlive(r) // escapes: replaced by intrinsic.
return length, err
}
@@ -3403,7 +3403,7 @@ func (r *RSeqCriticalSection) WriteTo(writer io.Writer) (int64, error) {
length, err := writer.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that r
// must live until the use above.
- runtime.KeepAlive(r)
+ runtime.KeepAlive(r) // escapes: replaced by intrinsic.
return int64(length), err
}
@@ -3453,7 +3453,7 @@ func (s *SignalSet) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (i
length, err := task.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that s
// must live until the use above.
- runtime.KeepAlive(s)
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
return length, err
}
@@ -3476,7 +3476,7 @@ func (s *SignalSet) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that s
// must live until the use above.
- runtime.KeepAlive(s)
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
return length, err
}
@@ -3492,7 +3492,7 @@ func (s *SignalSet) WriteTo(w io.Writer) (int64, error) {
length, err := w.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that s
// must live until the use above.
- runtime.KeepAlive(s)
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
return int64(length), err
}
@@ -3548,7 +3548,7 @@ func (i *InetAddr) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (in
length, err := task.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that i
// must live until the use above.
- runtime.KeepAlive(i)
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
return length, err
}
@@ -3571,7 +3571,7 @@ func (i *InetAddr) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that i
// must live until the use above.
- runtime.KeepAlive(i)
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
return length, err
}
@@ -3587,7 +3587,7 @@ func (i *InetAddr) WriteTo(w io.Writer) (int64, error) {
length, err := w.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that i
// must live until the use above.
- runtime.KeepAlive(i)
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
return int64(length), err
}
@@ -3668,7 +3668,7 @@ func (s *SockAddrInet) CopyOutN(task marshal.Task, addr usermem.Addr, limit int)
length, err := task.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that s
// must live until the use above.
- runtime.KeepAlive(s)
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
return length, err
}
@@ -3701,7 +3701,7 @@ func (s *SockAddrInet) CopyIn(task marshal.Task, addr usermem.Addr) (int, error)
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that s
// must live until the use above.
- runtime.KeepAlive(s)
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
return length, err
}
@@ -3725,7 +3725,7 @@ func (s *SockAddrInet) WriteTo(writer io.Writer) (int64, error) {
length, err := writer.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that s
// must live until the use above.
- runtime.KeepAlive(s)
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
return int64(length), err
}
@@ -3781,7 +3781,7 @@ func (i *Inet6Addr) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (i
length, err := task.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that i
// must live until the use above.
- runtime.KeepAlive(i)
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
return length, err
}
@@ -3804,7 +3804,7 @@ func (i *Inet6Addr) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that i
// must live until the use above.
- runtime.KeepAlive(i)
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
return length, err
}
@@ -3820,7 +3820,7 @@ func (i *Inet6Addr) WriteTo(w io.Writer) (int64, error) {
length, err := w.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that i
// must live until the use above.
- runtime.KeepAlive(i)
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
return int64(length), err
}
@@ -3874,7 +3874,7 @@ func (l *Linger) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (int,
length, err := task.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that l
// must live until the use above.
- runtime.KeepAlive(l)
+ runtime.KeepAlive(l) // escapes: replaced by intrinsic.
return length, err
}
@@ -3897,7 +3897,7 @@ func (l *Linger) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that l
// must live until the use above.
- runtime.KeepAlive(l)
+ runtime.KeepAlive(l) // escapes: replaced by intrinsic.
return length, err
}
@@ -3913,7 +3913,7 @@ func (l *Linger) WriteTo(writer io.Writer) (int64, error) {
length, err := writer.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that l
// must live until the use above.
- runtime.KeepAlive(l)
+ runtime.KeepAlive(l) // escapes: replaced by intrinsic.
return int64(length), err
}
@@ -4143,7 +4143,7 @@ func (t *TCPInfo) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (int
length, err := task.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that t
// must live until the use above.
- runtime.KeepAlive(t)
+ runtime.KeepAlive(t) // escapes: replaced by intrinsic.
return length, err
}
@@ -4166,7 +4166,7 @@ func (t *TCPInfo) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that t
// must live until the use above.
- runtime.KeepAlive(t)
+ runtime.KeepAlive(t) // escapes: replaced by intrinsic.
return length, err
}
@@ -4182,7 +4182,7 @@ func (t *TCPInfo) WriteTo(writer io.Writer) (int64, error) {
length, err := writer.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that t
// must live until the use above.
- runtime.KeepAlive(t)
+ runtime.KeepAlive(t) // escapes: replaced by intrinsic.
return int64(length), err
}
@@ -4240,7 +4240,7 @@ func (c *ControlMessageCredentials) CopyOutN(task marshal.Task, addr usermem.Add
length, err := task.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that c
// must live until the use above.
- runtime.KeepAlive(c)
+ runtime.KeepAlive(c) // escapes: replaced by intrinsic.
return length, err
}
@@ -4263,7 +4263,7 @@ func (c *ControlMessageCredentials) CopyIn(task marshal.Task, addr usermem.Addr)
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that c
// must live until the use above.
- runtime.KeepAlive(c)
+ runtime.KeepAlive(c) // escapes: replaced by intrinsic.
return length, err
}
@@ -4279,7 +4279,7 @@ func (c *ControlMessageCredentials) WriteTo(writer io.Writer) (int64, error) {
length, err := writer.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that c
// must live until the use above.
- runtime.KeepAlive(c)
+ runtime.KeepAlive(c) // escapes: replaced by intrinsic.
return int64(length), err
}
@@ -4333,7 +4333,7 @@ func (t *Timespec) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (in
length, err := task.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that t
// must live until the use above.
- runtime.KeepAlive(t)
+ runtime.KeepAlive(t) // escapes: replaced by intrinsic.
return length, err
}
@@ -4356,7 +4356,7 @@ func (t *Timespec) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that t
// must live until the use above.
- runtime.KeepAlive(t)
+ runtime.KeepAlive(t) // escapes: replaced by intrinsic.
return length, err
}
@@ -4372,7 +4372,7 @@ func (t *Timespec) WriteTo(writer io.Writer) (int64, error) {
length, err := writer.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that t
// must live until the use above.
- runtime.KeepAlive(t)
+ runtime.KeepAlive(t) // escapes: replaced by intrinsic.
return int64(length), err
}
@@ -4426,7 +4426,7 @@ func (t *Timeval) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (int
length, err := task.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that t
// must live until the use above.
- runtime.KeepAlive(t)
+ runtime.KeepAlive(t) // escapes: replaced by intrinsic.
return length, err
}
@@ -4449,7 +4449,7 @@ func (t *Timeval) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that t
// must live until the use above.
- runtime.KeepAlive(t)
+ runtime.KeepAlive(t) // escapes: replaced by intrinsic.
return length, err
}
@@ -4465,7 +4465,7 @@ func (t *Timeval) WriteTo(writer io.Writer) (int64, error) {
length, err := writer.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that t
// must live until the use above.
- runtime.KeepAlive(t)
+ runtime.KeepAlive(t) // escapes: replaced by intrinsic.
return int64(length), err
}
@@ -4523,7 +4523,7 @@ func (s *StatxTimestamp) CopyOutN(task marshal.Task, addr usermem.Addr, limit in
length, err := task.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that s
// must live until the use above.
- runtime.KeepAlive(s)
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
return length, err
}
@@ -4546,7 +4546,7 @@ func (s *StatxTimestamp) CopyIn(task marshal.Task, addr usermem.Addr) (int, erro
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that s
// must live until the use above.
- runtime.KeepAlive(s)
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
return length, err
}
@@ -4562,7 +4562,7 @@ func (s *StatxTimestamp) WriteTo(writer io.Writer) (int64, error) {
length, err := writer.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that s
// must live until the use above.
- runtime.KeepAlive(s)
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
return int64(length), err
}
@@ -4616,7 +4616,7 @@ func (u *Utime) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (int,
length, err := task.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that u
// must live until the use above.
- runtime.KeepAlive(u)
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
return length, err
}
@@ -4639,7 +4639,7 @@ func (u *Utime) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that u
// must live until the use above.
- runtime.KeepAlive(u)
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
return length, err
}
@@ -4655,7 +4655,7 @@ func (u *Utime) WriteTo(writer io.Writer) (int64, error) {
length, err := writer.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that u
// must live until the use above.
- runtime.KeepAlive(u)
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
return int64(length), err
}
@@ -4717,7 +4717,7 @@ func (w *Winsize) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (int
length, err := task.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that w
// must live until the use above.
- runtime.KeepAlive(w)
+ runtime.KeepAlive(w) // escapes: replaced by intrinsic.
return length, err
}
@@ -4740,7 +4740,7 @@ func (w *Winsize) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that w
// must live until the use above.
- runtime.KeepAlive(w)
+ runtime.KeepAlive(w) // escapes: replaced by intrinsic.
return length, err
}
@@ -4756,7 +4756,7 @@ func (w *Winsize) WriteTo(writer io.Writer) (int64, error) {
length, err := writer.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that w
// must live until the use above.
- runtime.KeepAlive(w)
+ runtime.KeepAlive(w) // escapes: replaced by intrinsic.
return int64(length), err
}
@@ -4831,7 +4831,7 @@ func (t *Termios) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (int
length, err := task.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that t
// must live until the use above.
- runtime.KeepAlive(t)
+ runtime.KeepAlive(t) // escapes: replaced by intrinsic.
return length, err
}
@@ -4854,7 +4854,7 @@ func (t *Termios) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that t
// must live until the use above.
- runtime.KeepAlive(t)
+ runtime.KeepAlive(t) // escapes: replaced by intrinsic.
return length, err
}
@@ -4870,7 +4870,7 @@ func (t *Termios) WriteTo(writer io.Writer) (int64, error) {
length, err := writer.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that t
// must live until the use above.
- runtime.KeepAlive(t)
+ runtime.KeepAlive(t) // escapes: replaced by intrinsic.
return int64(length), err
}
diff --git a/pkg/abi/linux/linux_amd64_abi_autogen_unsafe.go b/pkg/abi/linux/linux_amd64_abi_autogen_unsafe.go
index 5d0274386..b1e399b81 100644
--- a/pkg/abi/linux/linux_amd64_abi_autogen_unsafe.go
+++ b/pkg/abi/linux/linux_amd64_abi_autogen_unsafe.go
@@ -79,7 +79,7 @@ func (e *EpollEvent) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (
length, err := task.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that e
// must live until the use above.
- runtime.KeepAlive(e)
+ runtime.KeepAlive(e) // escapes: replaced by intrinsic.
return length, err
}
@@ -102,7 +102,7 @@ func (e *EpollEvent) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that e
// must live until the use above.
- runtime.KeepAlive(e)
+ runtime.KeepAlive(e) // escapes: replaced by intrinsic.
return length, err
}
@@ -118,7 +118,7 @@ func (e *EpollEvent) WriteTo(writer io.Writer) (int64, error) {
length, err := writer.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that e
// must live until the use above.
- runtime.KeepAlive(e)
+ runtime.KeepAlive(e) // escapes: replaced by intrinsic.
return int64(length), err
}
@@ -143,7 +143,7 @@ func CopyEpollEventSliceIn(task marshal.Task, addr usermem.Addr, dst []EpollEven
length, err := task.CopyInBytes(addr, buf)
// Since we bypassed the compiler's escape analysis, indicate that dst
// must live until the use above.
- runtime.KeepAlive(dst)
+ runtime.KeepAlive(dst) // escapes: replaced by intrinsic.
return length, err
}
@@ -168,7 +168,7 @@ func CopyEpollEventSliceOut(task marshal.Task, addr usermem.Addr, src []EpollEve
length, err := task.CopyOutBytes(addr, buf)
// Since we bypassed the compiler's escape analysis, indicate that src
// must live until the use above.
- runtime.KeepAlive(src)
+ runtime.KeepAlive(src) // escapes: replaced by intrinsic.
return length, err
}
@@ -186,7 +186,7 @@ func MarshalUnsafeEpollEventSlice(src []EpollEvent, dst []byte) (int, error) {
length, err := safecopy.CopyIn(dst[:(size*count)], val)
// Since we bypassed the compiler's escape analysis, indicate that src
// must live until the use above.
- runtime.KeepAlive(src)
+ runtime.KeepAlive(src) // escapes: replaced by intrinsic.
return length, err
}
@@ -204,7 +204,7 @@ func UnmarshalUnsafeEpollEventSlice(dst []EpollEvent, src []byte) (int, error) {
length, err := safecopy.CopyOut(val, src[:(size*count)])
// Since we bypassed the compiler's escape analysis, indicate that dst
// must live until the use above.
- runtime.KeepAlive(dst)
+ runtime.KeepAlive(dst) // escapes: replaced by intrinsic.
return length, err
}
@@ -288,12 +288,12 @@ func (s *Stat) UnmarshalBytes(src []byte) {
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
func (s *Stat) Packed() bool {
- return s.ATime.Packed() && s.MTime.Packed() && s.CTime.Packed()
+ return s.MTime.Packed() && s.CTime.Packed() && s.ATime.Packed()
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
func (s *Stat) MarshalUnsafe(dst []byte) {
- if s.ATime.Packed() && s.MTime.Packed() && s.CTime.Packed() {
+ if s.MTime.Packed() && s.CTime.Packed() && s.ATime.Packed() {
safecopy.CopyIn(dst, unsafe.Pointer(s))
} else {
// Type Stat doesn't have a packed layout in memory, fallback to MarshalBytes.
@@ -314,7 +314,7 @@ func (s *Stat) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
func (s *Stat) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (int, error) {
- if !s.ATime.Packed() && s.MTime.Packed() && s.CTime.Packed() {
+ if !s.MTime.Packed() && s.CTime.Packed() && s.ATime.Packed() {
// Type Stat doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := task.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
s.MarshalBytes(buf) // escapes: fallback.
@@ -331,7 +331,7 @@ func (s *Stat) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (int, e
length, err := task.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that s
// must live until the use above.
- runtime.KeepAlive(s)
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
return length, err
}
@@ -344,7 +344,7 @@ func (s *Stat) CopyOut(task marshal.Task, addr usermem.Addr) (int, error) {
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
func (s *Stat) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
- if !s.MTime.Packed() && s.CTime.Packed() && s.ATime.Packed() {
+ if !s.ATime.Packed() && s.MTime.Packed() && s.CTime.Packed() {
// Type Stat doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := task.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
@@ -364,13 +364,13 @@ func (s *Stat) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that s
// must live until the use above.
- runtime.KeepAlive(s)
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
return length, err
}
// WriteTo implements io.WriterTo.WriteTo.
func (s *Stat) WriteTo(writer io.Writer) (int64, error) {
- if !s.ATime.Packed() && s.MTime.Packed() && s.CTime.Packed() {
+ if !s.MTime.Packed() && s.CTime.Packed() && s.ATime.Packed() {
// Type Stat doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := make([]byte, s.SizeBytes())
s.MarshalBytes(buf)
@@ -388,7 +388,7 @@ func (s *Stat) WriteTo(writer io.Writer) (int64, error) {
length, err := writer.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that s
// must live until the use above.
- runtime.KeepAlive(s)
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
return int64(length), err
}
@@ -542,7 +542,7 @@ func (p *PtraceRegs) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (
length, err := task.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that p
// must live until the use above.
- runtime.KeepAlive(p)
+ runtime.KeepAlive(p) // escapes: replaced by intrinsic.
return length, err
}
@@ -565,7 +565,7 @@ func (p *PtraceRegs) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that p
// must live until the use above.
- runtime.KeepAlive(p)
+ runtime.KeepAlive(p) // escapes: replaced by intrinsic.
return length, err
}
@@ -581,7 +581,7 @@ func (p *PtraceRegs) WriteTo(writer io.Writer) (int64, error) {
length, err := writer.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that p
// must live until the use above.
- runtime.KeepAlive(p)
+ runtime.KeepAlive(p) // escapes: replaced by intrinsic.
return int64(length), err
}
diff --git a/pkg/abi/linux/linux_arm64_abi_autogen_unsafe.go b/pkg/abi/linux/linux_arm64_abi_autogen_unsafe.go
index 9a8e13baf..9282f81ac 100644
--- a/pkg/abi/linux/linux_arm64_abi_autogen_unsafe.go
+++ b/pkg/abi/linux/linux_arm64_abi_autogen_unsafe.go
@@ -82,7 +82,7 @@ func (e *EpollEvent) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (
length, err := task.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that e
// must live until the use above.
- runtime.KeepAlive(e)
+ runtime.KeepAlive(e) // escapes: replaced by intrinsic.
return length, err
}
@@ -105,7 +105,7 @@ func (e *EpollEvent) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that e
// must live until the use above.
- runtime.KeepAlive(e)
+ runtime.KeepAlive(e) // escapes: replaced by intrinsic.
return length, err
}
@@ -121,7 +121,7 @@ func (e *EpollEvent) WriteTo(writer io.Writer) (int64, error) {
length, err := writer.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that e
// must live until the use above.
- runtime.KeepAlive(e)
+ runtime.KeepAlive(e) // escapes: replaced by intrinsic.
return int64(length), err
}
@@ -146,7 +146,7 @@ func CopyEpollEventSliceIn(task marshal.Task, addr usermem.Addr, dst []EpollEven
length, err := task.CopyInBytes(addr, buf)
// Since we bypassed the compiler's escape analysis, indicate that dst
// must live until the use above.
- runtime.KeepAlive(dst)
+ runtime.KeepAlive(dst) // escapes: replaced by intrinsic.
return length, err
}
@@ -171,7 +171,7 @@ func CopyEpollEventSliceOut(task marshal.Task, addr usermem.Addr, src []EpollEve
length, err := task.CopyOutBytes(addr, buf)
// Since we bypassed the compiler's escape analysis, indicate that src
// must live until the use above.
- runtime.KeepAlive(src)
+ runtime.KeepAlive(src) // escapes: replaced by intrinsic.
return length, err
}
@@ -189,7 +189,7 @@ func MarshalUnsafeEpollEventSlice(src []EpollEvent, dst []byte) (int, error) {
length, err := safecopy.CopyIn(dst[:(size*count)], val)
// Since we bypassed the compiler's escape analysis, indicate that src
// must live until the use above.
- runtime.KeepAlive(src)
+ runtime.KeepAlive(src) // escapes: replaced by intrinsic.
return length, err
}
@@ -207,7 +207,7 @@ func UnmarshalUnsafeEpollEventSlice(dst []EpollEvent, src []byte) (int, error) {
length, err := safecopy.CopyOut(val, src[:(size*count)])
// Since we bypassed the compiler's escape analysis, indicate that dst
// must live until the use above.
- runtime.KeepAlive(dst)
+ runtime.KeepAlive(dst) // escapes: replaced by intrinsic.
return length, err
}
@@ -338,7 +338,7 @@ func (s *Stat) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (int, e
length, err := task.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that s
// must live until the use above.
- runtime.KeepAlive(s)
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
return length, err
}
@@ -371,7 +371,7 @@ func (s *Stat) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that s
// must live until the use above.
- runtime.KeepAlive(s)
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
return length, err
}
@@ -395,7 +395,7 @@ func (s *Stat) WriteTo(writer io.Writer) (int64, error) {
length, err := writer.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that s
// must live until the use above.
- runtime.KeepAlive(s)
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
return int64(length), err
}
@@ -462,7 +462,7 @@ func (p *PtraceRegs) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (
length, err := task.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that p
// must live until the use above.
- runtime.KeepAlive(p)
+ runtime.KeepAlive(p) // escapes: replaced by intrinsic.
return length, err
}
@@ -485,7 +485,7 @@ func (p *PtraceRegs) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that p
// must live until the use above.
- runtime.KeepAlive(p)
+ runtime.KeepAlive(p) // escapes: replaced by intrinsic.
return length, err
}
@@ -501,7 +501,7 @@ func (p *PtraceRegs) WriteTo(writer io.Writer) (int64, error) {
length, err := writer.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that p
// must live until the use above.
- runtime.KeepAlive(p)
+ runtime.KeepAlive(p) // escapes: replaced by intrinsic.
return int64(length), err
}
diff --git a/pkg/sentry/arch/arch_abi_autogen_unsafe.go b/pkg/sentry/arch/arch_abi_autogen_unsafe.go
index 5d77cd2d1..9df95c6f9 100644
--- a/pkg/sentry/arch/arch_abi_autogen_unsafe.go
+++ b/pkg/sentry/arch/arch_abi_autogen_unsafe.go
@@ -98,7 +98,7 @@ func (s *SignalAct) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (i
length, err := task.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that s
// must live until the use above.
- runtime.KeepAlive(s)
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
return length, err
}
@@ -131,7 +131,7 @@ func (s *SignalAct) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that s
// must live until the use above.
- runtime.KeepAlive(s)
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
return length, err
}
@@ -155,7 +155,7 @@ func (s *SignalAct) WriteTo(writer io.Writer) (int64, error) {
length, err := writer.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that s
// must live until the use above.
- runtime.KeepAlive(s)
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
return int64(length), err
}
@@ -217,7 +217,7 @@ func (s *SignalStack) CopyOutN(task marshal.Task, addr usermem.Addr, limit int)
length, err := task.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that s
// must live until the use above.
- runtime.KeepAlive(s)
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
return length, err
}
@@ -240,7 +240,7 @@ func (s *SignalStack) CopyIn(task marshal.Task, addr usermem.Addr) (int, error)
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that s
// must live until the use above.
- runtime.KeepAlive(s)
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
return length, err
}
@@ -256,7 +256,7 @@ func (s *SignalStack) WriteTo(writer io.Writer) (int64, error) {
length, err := writer.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that s
// must live until the use above.
- runtime.KeepAlive(s)
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
return int64(length), err
}
@@ -327,7 +327,7 @@ func (s *SignalInfo) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (
length, err := task.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that s
// must live until the use above.
- runtime.KeepAlive(s)
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
return length, err
}
@@ -350,7 +350,7 @@ func (s *SignalInfo) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that s
// must live until the use above.
- runtime.KeepAlive(s)
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
return length, err
}
@@ -366,7 +366,7 @@ func (s *SignalInfo) WriteTo(writer io.Writer) (int64, error) {
length, err := writer.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that s
// must live until the use above.
- runtime.KeepAlive(s)
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
return int64(length), err
}
diff --git a/pkg/sentry/fsimpl/devpts/root_inode_refs.go b/pkg/sentry/fsimpl/devpts/root_inode_refs.go
index 051801202..4abb66431 100644
--- a/pkg/sentry/fsimpl/devpts/root_inode_refs.go
+++ b/pkg/sentry/fsimpl/devpts/root_inode_refs.go
@@ -2,11 +2,10 @@ package devpts
import (
"fmt"
- "runtime"
- "sync/atomic"
-
"gvisor.dev/gvisor/pkg/log"
refs_vfs1 "gvisor.dev/gvisor/pkg/refs"
+ "runtime"
+ "sync/atomic"
)
// ownerType is used to customize logging. Note that we use a pointer to T so
diff --git a/pkg/sentry/fsimpl/fuse/inode_refs.go b/pkg/sentry/fsimpl/fuse/inode_refs.go
index 6b9456e1d..4fb4d4da7 100644
--- a/pkg/sentry/fsimpl/fuse/inode_refs.go
+++ b/pkg/sentry/fsimpl/fuse/inode_refs.go
@@ -2,11 +2,10 @@ package fuse
import (
"fmt"
- "runtime"
- "sync/atomic"
-
"gvisor.dev/gvisor/pkg/log"
refs_vfs1 "gvisor.dev/gvisor/pkg/refs"
+ "runtime"
+ "sync/atomic"
)
// ownerType is used to customize logging. Note that we use a pointer to T so
diff --git a/pkg/sentry/fsimpl/host/connected_endpoint_refs.go b/pkg/sentry/fsimpl/host/connected_endpoint_refs.go
index babb3f664..225f59782 100644
--- a/pkg/sentry/fsimpl/host/connected_endpoint_refs.go
+++ b/pkg/sentry/fsimpl/host/connected_endpoint_refs.go
@@ -2,11 +2,10 @@ package host
import (
"fmt"
- "runtime"
- "sync/atomic"
-
"gvisor.dev/gvisor/pkg/log"
refs_vfs1 "gvisor.dev/gvisor/pkg/refs"
+ "runtime"
+ "sync/atomic"
)
// ownerType is used to customize logging. Note that we use a pointer to T so
diff --git a/pkg/sentry/fsimpl/host/inode_refs.go b/pkg/sentry/fsimpl/host/inode_refs.go
index 17f90ce4a..4075eae17 100644
--- a/pkg/sentry/fsimpl/host/inode_refs.go
+++ b/pkg/sentry/fsimpl/host/inode_refs.go
@@ -2,11 +2,10 @@ package host
import (
"fmt"
- "runtime"
- "sync/atomic"
-
"gvisor.dev/gvisor/pkg/log"
refs_vfs1 "gvisor.dev/gvisor/pkg/refs"
+ "runtime"
+ "sync/atomic"
)
// ownerType is used to customize logging. Note that we use a pointer to T so
diff --git a/pkg/sentry/fsimpl/kernfs/dentry_refs.go b/pkg/sentry/fsimpl/kernfs/dentry_refs.go
index 79863b3bc..f99d4941a 100644
--- a/pkg/sentry/fsimpl/kernfs/dentry_refs.go
+++ b/pkg/sentry/fsimpl/kernfs/dentry_refs.go
@@ -2,11 +2,10 @@ package kernfs
import (
"fmt"
- "runtime"
- "sync/atomic"
-
"gvisor.dev/gvisor/pkg/log"
refs_vfs1 "gvisor.dev/gvisor/pkg/refs"
+ "runtime"
+ "sync/atomic"
)
// ownerType is used to customize logging. Note that we use a pointer to T so
diff --git a/pkg/sentry/fsimpl/kernfs/static_directory_refs.go b/pkg/sentry/fsimpl/kernfs/static_directory_refs.go
index 478b04bdd..2b258010e 100644
--- a/pkg/sentry/fsimpl/kernfs/static_directory_refs.go
+++ b/pkg/sentry/fsimpl/kernfs/static_directory_refs.go
@@ -2,11 +2,10 @@ package kernfs
import (
"fmt"
- "runtime"
- "sync/atomic"
-
"gvisor.dev/gvisor/pkg/log"
refs_vfs1 "gvisor.dev/gvisor/pkg/refs"
+ "runtime"
+ "sync/atomic"
)
// ownerType is used to customize logging. Note that we use a pointer to T so
diff --git a/pkg/sentry/fsimpl/proc/fd_dir_inode_refs.go b/pkg/sentry/fsimpl/proc/fd_dir_inode_refs.go
index 9431c1506..467c32752 100644
--- a/pkg/sentry/fsimpl/proc/fd_dir_inode_refs.go
+++ b/pkg/sentry/fsimpl/proc/fd_dir_inode_refs.go
@@ -2,11 +2,10 @@ package proc
import (
"fmt"
- "runtime"
- "sync/atomic"
-
"gvisor.dev/gvisor/pkg/log"
refs_vfs1 "gvisor.dev/gvisor/pkg/refs"
+ "runtime"
+ "sync/atomic"
)
// ownerType is used to customize logging. Note that we use a pointer to T so
diff --git a/pkg/sentry/fsimpl/proc/fd_info_dir_inode_refs.go b/pkg/sentry/fsimpl/proc/fd_info_dir_inode_refs.go
index 872b20eb0..3fcda0948 100644
--- a/pkg/sentry/fsimpl/proc/fd_info_dir_inode_refs.go
+++ b/pkg/sentry/fsimpl/proc/fd_info_dir_inode_refs.go
@@ -2,11 +2,10 @@ package proc
import (
"fmt"
- "runtime"
- "sync/atomic"
-
"gvisor.dev/gvisor/pkg/log"
refs_vfs1 "gvisor.dev/gvisor/pkg/refs"
+ "runtime"
+ "sync/atomic"
)
// ownerType is used to customize logging. Note that we use a pointer to T so
diff --git a/pkg/sentry/fsimpl/proc/subtasks_inode_refs.go b/pkg/sentry/fsimpl/proc/subtasks_inode_refs.go
index c6d9b3522..2da6801c2 100644
--- a/pkg/sentry/fsimpl/proc/subtasks_inode_refs.go
+++ b/pkg/sentry/fsimpl/proc/subtasks_inode_refs.go
@@ -2,11 +2,10 @@ package proc
import (
"fmt"
- "runtime"
- "sync/atomic"
-
"gvisor.dev/gvisor/pkg/log"
refs_vfs1 "gvisor.dev/gvisor/pkg/refs"
+ "runtime"
+ "sync/atomic"
)
// ownerType is used to customize logging. Note that we use a pointer to T so
diff --git a/pkg/sentry/fsimpl/proc/task_inode_refs.go b/pkg/sentry/fsimpl/proc/task_inode_refs.go
index 714488450..b6e19844c 100644
--- a/pkg/sentry/fsimpl/proc/task_inode_refs.go
+++ b/pkg/sentry/fsimpl/proc/task_inode_refs.go
@@ -2,11 +2,10 @@ package proc
import (
"fmt"
- "runtime"
- "sync/atomic"
-
"gvisor.dev/gvisor/pkg/log"
refs_vfs1 "gvisor.dev/gvisor/pkg/refs"
+ "runtime"
+ "sync/atomic"
)
// ownerType is used to customize logging. Note that we use a pointer to T so
diff --git a/pkg/sentry/fsimpl/proc/tasks_inode_refs.go b/pkg/sentry/fsimpl/proc/tasks_inode_refs.go
index 22d9cc488..6207364e4 100644
--- a/pkg/sentry/fsimpl/proc/tasks_inode_refs.go
+++ b/pkg/sentry/fsimpl/proc/tasks_inode_refs.go
@@ -2,11 +2,10 @@ package proc
import (
"fmt"
- "runtime"
- "sync/atomic"
-
"gvisor.dev/gvisor/pkg/log"
refs_vfs1 "gvisor.dev/gvisor/pkg/refs"
+ "runtime"
+ "sync/atomic"
)
// ownerType is used to customize logging. Note that we use a pointer to T so
diff --git a/pkg/sentry/fsimpl/sys/dir_refs.go b/pkg/sentry/fsimpl/sys/dir_refs.go
index 89609b198..9d15d4c80 100644
--- a/pkg/sentry/fsimpl/sys/dir_refs.go
+++ b/pkg/sentry/fsimpl/sys/dir_refs.go
@@ -2,11 +2,10 @@ package sys
import (
"fmt"
- "runtime"
- "sync/atomic"
-
"gvisor.dev/gvisor/pkg/log"
refs_vfs1 "gvisor.dev/gvisor/pkg/refs"
+ "runtime"
+ "sync/atomic"
)
// ownerType is used to customize logging. Note that we use a pointer to T so
diff --git a/pkg/sentry/fsimpl/tmpfs/inode_refs.go b/pkg/sentry/fsimpl/tmpfs/inode_refs.go
index dbf0b2766..ff5e99c52 100644
--- a/pkg/sentry/fsimpl/tmpfs/inode_refs.go
+++ b/pkg/sentry/fsimpl/tmpfs/inode_refs.go
@@ -2,11 +2,10 @@ package tmpfs
import (
"fmt"
- "runtime"
- "sync/atomic"
-
"gvisor.dev/gvisor/pkg/log"
refs_vfs1 "gvisor.dev/gvisor/pkg/refs"
+ "runtime"
+ "sync/atomic"
)
// ownerType is used to customize logging. Note that we use a pointer to T so
diff --git a/pkg/sentry/kernel/fd_table_refs.go b/pkg/sentry/kernel/fd_table_refs.go
index ecba138ac..a630289c9 100644
--- a/pkg/sentry/kernel/fd_table_refs.go
+++ b/pkg/sentry/kernel/fd_table_refs.go
@@ -2,11 +2,10 @@ package kernel
import (
"fmt"
- "runtime"
- "sync/atomic"
-
"gvisor.dev/gvisor/pkg/log"
refs_vfs1 "gvisor.dev/gvisor/pkg/refs"
+ "runtime"
+ "sync/atomic"
)
// ownerType is used to customize logging. Note that we use a pointer to T so
diff --git a/pkg/sentry/kernel/fs_context_refs.go b/pkg/sentry/kernel/fs_context_refs.go
index fb2fde971..e8bb1e6ee 100644
--- a/pkg/sentry/kernel/fs_context_refs.go
+++ b/pkg/sentry/kernel/fs_context_refs.go
@@ -2,11 +2,10 @@ package kernel
import (
"fmt"
- "runtime"
- "sync/atomic"
-
"gvisor.dev/gvisor/pkg/log"
refs_vfs1 "gvisor.dev/gvisor/pkg/refs"
+ "runtime"
+ "sync/atomic"
)
// ownerType is used to customize logging. Note that we use a pointer to T so
diff --git a/pkg/sentry/kernel/process_group_refs.go b/pkg/sentry/kernel/process_group_refs.go
index 4ed6e6458..4b257d548 100644
--- a/pkg/sentry/kernel/process_group_refs.go
+++ b/pkg/sentry/kernel/process_group_refs.go
@@ -2,11 +2,10 @@ package kernel
import (
"fmt"
- "runtime"
- "sync/atomic"
-
"gvisor.dev/gvisor/pkg/log"
refs_vfs1 "gvisor.dev/gvisor/pkg/refs"
+ "runtime"
+ "sync/atomic"
)
// ownerType is used to customize logging. Note that we use a pointer to T so
diff --git a/pkg/sentry/kernel/session_refs.go b/pkg/sentry/kernel/session_refs.go
index f2e1bb797..204fdd060 100644
--- a/pkg/sentry/kernel/session_refs.go
+++ b/pkg/sentry/kernel/session_refs.go
@@ -2,11 +2,10 @@ package kernel
import (
"fmt"
- "runtime"
- "sync/atomic"
-
"gvisor.dev/gvisor/pkg/log"
refs_vfs1 "gvisor.dev/gvisor/pkg/refs"
+ "runtime"
+ "sync/atomic"
)
// ownerType is used to customize logging. Note that we use a pointer to T so
diff --git a/pkg/sentry/kernel/shm/shm_refs.go b/pkg/sentry/kernel/shm/shm_refs.go
index 51e07d0b3..4bffdd0b3 100644
--- a/pkg/sentry/kernel/shm/shm_refs.go
+++ b/pkg/sentry/kernel/shm/shm_refs.go
@@ -2,11 +2,10 @@ package shm
import (
"fmt"
- "runtime"
- "sync/atomic"
-
"gvisor.dev/gvisor/pkg/log"
refs_vfs1 "gvisor.dev/gvisor/pkg/refs"
+ "runtime"
+ "sync/atomic"
)
// ownerType is used to customize logging. Note that we use a pointer to T so
diff --git a/pkg/sentry/mm/aio_mappable_refs.go b/pkg/sentry/mm/aio_mappable_refs.go
index b99909f07..141747137 100644
--- a/pkg/sentry/mm/aio_mappable_refs.go
+++ b/pkg/sentry/mm/aio_mappable_refs.go
@@ -2,11 +2,10 @@ package mm
import (
"fmt"
- "runtime"
- "sync/atomic"
-
"gvisor.dev/gvisor/pkg/log"
refs_vfs1 "gvisor.dev/gvisor/pkg/refs"
+ "runtime"
+ "sync/atomic"
)
// ownerType is used to customize logging. Note that we use a pointer to T so
diff --git a/pkg/sentry/mm/special_mappable_refs.go b/pkg/sentry/mm/special_mappable_refs.go
index 035bbe690..0921a5d18 100644
--- a/pkg/sentry/mm/special_mappable_refs.go
+++ b/pkg/sentry/mm/special_mappable_refs.go
@@ -2,11 +2,10 @@ package mm
import (
"fmt"
- "runtime"
- "sync/atomic"
-
"gvisor.dev/gvisor/pkg/log"
refs_vfs1 "gvisor.dev/gvisor/pkg/refs"
+ "runtime"
+ "sync/atomic"
)
// ownerType is used to customize logging. Note that we use a pointer to T so
diff --git a/pkg/sentry/platform/kvm/bluepill_fault.go b/pkg/sentry/platform/kvm/bluepill_fault.go
index e34f46aeb..a182e4f22 100644
--- a/pkg/sentry/platform/kvm/bluepill_fault.go
+++ b/pkg/sentry/platform/kvm/bluepill_fault.go
@@ -98,6 +98,10 @@ func handleBluepillFault(m *machine, physical uintptr, phyRegions []physicalRegi
}
errno := m.setMemoryRegion(int(slot), physicalStart, length, virtualStart, flags)
if errno == 0 {
+ // Store the physical address in the slot. This is used to
+ // avoid calls to handleBluepillFault in the future (see
+ // machine.mapPhysical).
+ atomic.StoreUintptr(&m.usedSlots[slot], physical)
// Successfully added region; we can increment nextSlot and
// allow another set to proceed here.
atomic.StoreUint32(&m.nextSlot, slot+1)
diff --git a/pkg/sentry/platform/kvm/kvm_const.go b/pkg/sentry/platform/kvm/kvm_const.go
index 3bf918446..5c4b18899 100644
--- a/pkg/sentry/platform/kvm/kvm_const.go
+++ b/pkg/sentry/platform/kvm/kvm_const.go
@@ -56,6 +56,7 @@ const (
// KVM capability options.
const (
+ _KVM_CAP_MAX_MEMSLOTS = 0x0a
_KVM_CAP_MAX_VCPUS = 0x42
_KVM_CAP_ARM_VM_IPA_SIZE = 0xa5
_KVM_CAP_VCPU_EVENTS = 0x29
@@ -64,6 +65,7 @@ const (
// KVM limits.
const (
+ _KVM_NR_MEMSLOTS = 0x100
_KVM_NR_VCPUS = 0xff
_KVM_NR_INTERRUPTS = 0x100
_KVM_NR_CPUID_ENTRIES = 0x100
diff --git a/pkg/sentry/platform/kvm/machine.go b/pkg/sentry/platform/kvm/machine.go
index 6c54712d1..372a4cbd7 100644
--- a/pkg/sentry/platform/kvm/machine.go
+++ b/pkg/sentry/platform/kvm/machine.go
@@ -43,9 +43,6 @@ type machine struct {
// kernel is the set of global structures.
kernel ring0.Kernel
- // mappingCache is used for mapPhysical.
- mappingCache sync.Map
-
// mu protects vCPUs.
mu sync.RWMutex
@@ -63,6 +60,12 @@ type machine struct {
// maxVCPUs is the maximum number of vCPUs supported by the machine.
maxVCPUs int
+ // maxSlots is the maximum number of memory slots supported by the machine.
+ maxSlots int
+
+ // usedSlots is the set of used physical addresses (sorted).
+ usedSlots []uintptr
+
// nextID is the next vCPU ID.
nextID uint32
}
@@ -184,6 +187,7 @@ func newMachine(vm int) (*machine, error) {
PageTables: pagetables.New(newAllocator()),
})
+ // Pull the maximum vCPUs.
maxVCPUs, _, errno := syscall.RawSyscall(syscall.SYS_IOCTL, uintptr(m.fd), _KVM_CHECK_EXTENSION, _KVM_CAP_MAX_VCPUS)
if errno != 0 {
m.maxVCPUs = _KVM_NR_VCPUS
@@ -191,11 +195,19 @@ func newMachine(vm int) (*machine, error) {
m.maxVCPUs = int(maxVCPUs)
}
log.Debugf("The maximum number of vCPUs is %d.", m.maxVCPUs)
-
- // Create the vCPUs map/slices.
m.vCPUsByTID = make(map[uint64]*vCPU)
m.vCPUsByID = make([]*vCPU, m.maxVCPUs)
+ // Pull the maximum slots.
+ maxSlots, _, errno := syscall.RawSyscall(syscall.SYS_IOCTL, uintptr(m.fd), _KVM_CHECK_EXTENSION, _KVM_CAP_MAX_MEMSLOTS)
+ if errno != 0 {
+ m.maxSlots = _KVM_NR_MEMSLOTS
+ } else {
+ m.maxSlots = int(maxSlots)
+ }
+ log.Debugf("The maximum number of slots is %d.", m.maxSlots)
+ m.usedSlots = make([]uintptr, m.maxSlots)
+
// Apply the physical mappings. Note that these mappings may point to
// guest physical addresses that are not actually available. These
// physical pages are mapped on demand, see kernel_unsafe.go.
@@ -272,6 +284,20 @@ func newMachine(vm int) (*machine, error) {
return m, nil
}
+// hasSlot returns true iff the given address is mapped.
+//
+// This must be done via a linear scan.
+//
+//go:nosplit
+func (m *machine) hasSlot(physical uintptr) bool {
+ for i := 0; i < len(m.usedSlots); i++ {
+ if p := atomic.LoadUintptr(&m.usedSlots[i]); p == physical {
+ return true
+ }
+ }
+ return false
+}
+
// mapPhysical checks for the mapping of a physical range, and installs one if
// not available. This attempts to be efficient for calls in the hot path.
//
@@ -286,8 +312,8 @@ func (m *machine) mapPhysical(physical, length uintptr, phyRegions []physicalReg
panic("mapPhysical on unknown physical address")
}
- if _, ok := m.mappingCache.LoadOrStore(physicalStart, true); !ok {
- // Not present in the cache; requires setting the slot.
+ // Is this already mapped? Check the usedSlots.
+ if !m.hasSlot(physicalStart) {
if _, ok := handleBluepillFault(m, physical, phyRegions, flags); !ok {
panic("handleBluepillFault failed")
}
diff --git a/pkg/sentry/platform/ring0/defs_impl_arm64.go b/pkg/sentry/platform/ring0/defs_impl_arm64.go
index eba2eac30..424b66f76 100644
--- a/pkg/sentry/platform/ring0/defs_impl_arm64.go
+++ b/pkg/sentry/platform/ring0/defs_impl_arm64.go
@@ -3,11 +3,11 @@ package ring0
import (
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/platform/ring0/pagetables"
- "io"
- "reflect"
"fmt"
"gvisor.dev/gvisor/pkg/usermem"
+ "io"
+ "reflect"
)
// Useful bits.
diff --git a/pkg/sentry/socket/unix/socket_refs.go b/pkg/sentry/socket/unix/socket_refs.go
index dababb85f..39aaedc7f 100644
--- a/pkg/sentry/socket/unix/socket_refs.go
+++ b/pkg/sentry/socket/unix/socket_refs.go
@@ -2,11 +2,10 @@ package unix
import (
"fmt"
- "runtime"
- "sync/atomic"
-
"gvisor.dev/gvisor/pkg/log"
refs_vfs1 "gvisor.dev/gvisor/pkg/refs"
+ "runtime"
+ "sync/atomic"
)
// ownerType is used to customize logging. Note that we use a pointer to T so
diff --git a/pkg/sentry/socket/unix/transport/queue_refs.go b/pkg/sentry/socket/unix/transport/queue_refs.go
index 0d4e34988..4c3dcd13f 100644
--- a/pkg/sentry/socket/unix/transport/queue_refs.go
+++ b/pkg/sentry/socket/unix/transport/queue_refs.go
@@ -2,11 +2,10 @@ package transport
import (
"fmt"
- "runtime"
- "sync/atomic"
-
"gvisor.dev/gvisor/pkg/log"
refs_vfs1 "gvisor.dev/gvisor/pkg/refs"
+ "runtime"
+ "sync/atomic"
)
// ownerType is used to customize logging. Note that we use a pointer to T so
diff --git a/pkg/sentry/syscalls/linux/vfs2/vfs2_abi_autogen_unsafe.go b/pkg/sentry/syscalls/linux/vfs2/vfs2_abi_autogen_unsafe.go
index ed5e48eaa..da2b37bba 100644
--- a/pkg/sentry/syscalls/linux/vfs2/vfs2_abi_autogen_unsafe.go
+++ b/pkg/sentry/syscalls/linux/vfs2/vfs2_abi_autogen_unsafe.go
@@ -66,7 +66,7 @@ func (s *sigSetWithSize) CopyOutN(task marshal.Task, addr usermem.Addr, limit in
length, err := task.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that s
// must live until the use above.
- runtime.KeepAlive(s)
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
return length, err
}
@@ -89,7 +89,7 @@ func (s *sigSetWithSize) CopyIn(task marshal.Task, addr usermem.Addr) (int, erro
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that s
// must live until the use above.
- runtime.KeepAlive(s)
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
return length, err
}
@@ -105,7 +105,7 @@ func (s *sigSetWithSize) WriteTo(writer io.Writer) (int64, error) {
length, err := writer.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that s
// must live until the use above.
- runtime.KeepAlive(s)
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
return int64(length), err
}
diff --git a/pkg/sentry/vfs/file_description_refs.go b/pkg/sentry/vfs/file_description_refs.go
index bdd7e6554..6c7747259 100644
--- a/pkg/sentry/vfs/file_description_refs.go
+++ b/pkg/sentry/vfs/file_description_refs.go
@@ -2,11 +2,10 @@ package vfs
import (
"fmt"
- "runtime"
- "sync/atomic"
-
"gvisor.dev/gvisor/pkg/log"
refs_vfs1 "gvisor.dev/gvisor/pkg/refs"
+ "runtime"
+ "sync/atomic"
)
// ownerType is used to customize logging. Note that we use a pointer to T so
diff --git a/pkg/sentry/vfs/filesystem_refs.go b/pkg/sentry/vfs/filesystem_refs.go
index 38a9a986f..96f681831 100644
--- a/pkg/sentry/vfs/filesystem_refs.go
+++ b/pkg/sentry/vfs/filesystem_refs.go
@@ -2,11 +2,10 @@ package vfs
import (
"fmt"
- "runtime"
- "sync/atomic"
-
"gvisor.dev/gvisor/pkg/log"
refs_vfs1 "gvisor.dev/gvisor/pkg/refs"
+ "runtime"
+ "sync/atomic"
)
// ownerType is used to customize logging. Note that we use a pointer to T so
diff --git a/pkg/sentry/vfs/mount_namespace_refs.go b/pkg/sentry/vfs/mount_namespace_refs.go
index 63285fb8e..4c422c81f 100644
--- a/pkg/sentry/vfs/mount_namespace_refs.go
+++ b/pkg/sentry/vfs/mount_namespace_refs.go
@@ -2,11 +2,10 @@ package vfs
import (
"fmt"
- "runtime"
- "sync/atomic"
-
"gvisor.dev/gvisor/pkg/log"
refs_vfs1 "gvisor.dev/gvisor/pkg/refs"
+ "runtime"
+ "sync/atomic"
)
// ownerType is used to customize logging. Note that we use a pointer to T so
diff --git a/pkg/tcpip/link/tun/tun_endpoint_refs.go b/pkg/tcpip/link/tun/tun_endpoint_refs.go
index e0595429c..9a38142f5 100644
--- a/pkg/tcpip/link/tun/tun_endpoint_refs.go
+++ b/pkg/tcpip/link/tun/tun_endpoint_refs.go
@@ -2,11 +2,10 @@ package tun
import (
"fmt"
- "runtime"
- "sync/atomic"
-
"gvisor.dev/gvisor/pkg/log"
refs_vfs1 "gvisor.dev/gvisor/pkg/refs"
+ "runtime"
+ "sync/atomic"
)
// ownerType is used to customize logging. Note that we use a pointer to T so
diff --git a/tools/go_marshal/primitive/primitive_abi_autogen_unsafe.go b/tools/go_marshal/primitive/primitive_abi_autogen_unsafe.go
index a408e8194..3af348759 100644
--- a/tools/go_marshal/primitive/primitive_abi_autogen_unsafe.go
+++ b/tools/go_marshal/primitive/primitive_abi_autogen_unsafe.go
@@ -69,7 +69,7 @@ func (i *Int8) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (int, e
length, err := task.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that i
// must live until the use above.
- runtime.KeepAlive(i)
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
return length, err
}
@@ -92,7 +92,7 @@ func (i *Int8) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that i
// must live until the use above.
- runtime.KeepAlive(i)
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
return length, err
}
@@ -108,7 +108,7 @@ func (i *Int8) WriteTo(w io.Writer) (int64, error) {
length, err := w.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that i
// must live until the use above.
- runtime.KeepAlive(i)
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
return int64(length), err
}
@@ -134,7 +134,7 @@ func CopyInt8SliceIn(task marshal.Task, addr usermem.Addr, dst []int8) (int, err
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that dst
// must live until the use above.
- runtime.KeepAlive(dst)
+ runtime.KeepAlive(dst) // escapes: replaced by intrinsic.
return length, err
}
@@ -160,7 +160,7 @@ func CopyInt8SliceOut(task marshal.Task, addr usermem.Addr, src []int8) (int, er
length, err := task.CopyOutBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that src
// must live until the use above.
- runtime.KeepAlive(src)
+ runtime.KeepAlive(src) // escapes: replaced by intrinsic.
return length, err
}
@@ -178,7 +178,7 @@ func MarshalUnsafeInt8Slice(src []Int8, dst []byte) (int, error) {
length, err := safecopy.CopyIn(dst[:(size*count)], val)
// Since we bypassed the compiler's escape analysis, indicate that src
// must live until the use above.
- runtime.KeepAlive(src)
+ runtime.KeepAlive(src) // escapes: replaced by intrinsic.
return length, err
}
@@ -196,7 +196,7 @@ func UnmarshalUnsafeInt8Slice(dst []Int8, src []byte) (int, error) {
length, err := safecopy.CopyOut(val, src[:(size*count)])
// Since we bypassed the compiler's escape analysis, indicate that dst
// must live until the use above.
- runtime.KeepAlive(dst)
+ runtime.KeepAlive(dst) // escapes: replaced by intrinsic.
return length, err
}
@@ -246,7 +246,7 @@ func (u *Uint8) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (int,
length, err := task.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that u
// must live until the use above.
- runtime.KeepAlive(u)
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
return length, err
}
@@ -269,7 +269,7 @@ func (u *Uint8) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that u
// must live until the use above.
- runtime.KeepAlive(u)
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
return length, err
}
@@ -285,7 +285,7 @@ func (u *Uint8) WriteTo(w io.Writer) (int64, error) {
length, err := w.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that u
// must live until the use above.
- runtime.KeepAlive(u)
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
return int64(length), err
}
@@ -311,7 +311,7 @@ func CopyUint8SliceIn(task marshal.Task, addr usermem.Addr, dst []uint8) (int, e
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that dst
// must live until the use above.
- runtime.KeepAlive(dst)
+ runtime.KeepAlive(dst) // escapes: replaced by intrinsic.
return length, err
}
@@ -337,7 +337,7 @@ func CopyUint8SliceOut(task marshal.Task, addr usermem.Addr, src []uint8) (int,
length, err := task.CopyOutBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that src
// must live until the use above.
- runtime.KeepAlive(src)
+ runtime.KeepAlive(src) // escapes: replaced by intrinsic.
return length, err
}
@@ -355,7 +355,7 @@ func MarshalUnsafeUint8Slice(src []Uint8, dst []byte) (int, error) {
length, err := safecopy.CopyIn(dst[:(size*count)], val)
// Since we bypassed the compiler's escape analysis, indicate that src
// must live until the use above.
- runtime.KeepAlive(src)
+ runtime.KeepAlive(src) // escapes: replaced by intrinsic.
return length, err
}
@@ -373,7 +373,7 @@ func UnmarshalUnsafeUint8Slice(dst []Uint8, src []byte) (int, error) {
length, err := safecopy.CopyOut(val, src[:(size*count)])
// Since we bypassed the compiler's escape analysis, indicate that dst
// must live until the use above.
- runtime.KeepAlive(dst)
+ runtime.KeepAlive(dst) // escapes: replaced by intrinsic.
return length, err
}
@@ -423,7 +423,7 @@ func (i *Int16) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (int,
length, err := task.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that i
// must live until the use above.
- runtime.KeepAlive(i)
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
return length, err
}
@@ -446,7 +446,7 @@ func (i *Int16) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that i
// must live until the use above.
- runtime.KeepAlive(i)
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
return length, err
}
@@ -462,7 +462,7 @@ func (i *Int16) WriteTo(w io.Writer) (int64, error) {
length, err := w.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that i
// must live until the use above.
- runtime.KeepAlive(i)
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
return int64(length), err
}
@@ -488,7 +488,7 @@ func CopyInt16SliceIn(task marshal.Task, addr usermem.Addr, dst []int16) (int, e
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that dst
// must live until the use above.
- runtime.KeepAlive(dst)
+ runtime.KeepAlive(dst) // escapes: replaced by intrinsic.
return length, err
}
@@ -514,7 +514,7 @@ func CopyInt16SliceOut(task marshal.Task, addr usermem.Addr, src []int16) (int,
length, err := task.CopyOutBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that src
// must live until the use above.
- runtime.KeepAlive(src)
+ runtime.KeepAlive(src) // escapes: replaced by intrinsic.
return length, err
}
@@ -532,7 +532,7 @@ func MarshalUnsafeInt16Slice(src []Int16, dst []byte) (int, error) {
length, err := safecopy.CopyIn(dst[:(size*count)], val)
// Since we bypassed the compiler's escape analysis, indicate that src
// must live until the use above.
- runtime.KeepAlive(src)
+ runtime.KeepAlive(src) // escapes: replaced by intrinsic.
return length, err
}
@@ -550,7 +550,7 @@ func UnmarshalUnsafeInt16Slice(dst []Int16, src []byte) (int, error) {
length, err := safecopy.CopyOut(val, src[:(size*count)])
// Since we bypassed the compiler's escape analysis, indicate that dst
// must live until the use above.
- runtime.KeepAlive(dst)
+ runtime.KeepAlive(dst) // escapes: replaced by intrinsic.
return length, err
}
@@ -600,7 +600,7 @@ func (u *Uint16) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (int,
length, err := task.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that u
// must live until the use above.
- runtime.KeepAlive(u)
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
return length, err
}
@@ -623,7 +623,7 @@ func (u *Uint16) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that u
// must live until the use above.
- runtime.KeepAlive(u)
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
return length, err
}
@@ -639,7 +639,7 @@ func (u *Uint16) WriteTo(w io.Writer) (int64, error) {
length, err := w.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that u
// must live until the use above.
- runtime.KeepAlive(u)
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
return int64(length), err
}
@@ -665,7 +665,7 @@ func CopyUint16SliceIn(task marshal.Task, addr usermem.Addr, dst []uint16) (int,
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that dst
// must live until the use above.
- runtime.KeepAlive(dst)
+ runtime.KeepAlive(dst) // escapes: replaced by intrinsic.
return length, err
}
@@ -691,7 +691,7 @@ func CopyUint16SliceOut(task marshal.Task, addr usermem.Addr, src []uint16) (int
length, err := task.CopyOutBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that src
// must live until the use above.
- runtime.KeepAlive(src)
+ runtime.KeepAlive(src) // escapes: replaced by intrinsic.
return length, err
}
@@ -709,7 +709,7 @@ func MarshalUnsafeUint16Slice(src []Uint16, dst []byte) (int, error) {
length, err := safecopy.CopyIn(dst[:(size*count)], val)
// Since we bypassed the compiler's escape analysis, indicate that src
// must live until the use above.
- runtime.KeepAlive(src)
+ runtime.KeepAlive(src) // escapes: replaced by intrinsic.
return length, err
}
@@ -727,7 +727,7 @@ func UnmarshalUnsafeUint16Slice(dst []Uint16, src []byte) (int, error) {
length, err := safecopy.CopyOut(val, src[:(size*count)])
// Since we bypassed the compiler's escape analysis, indicate that dst
// must live until the use above.
- runtime.KeepAlive(dst)
+ runtime.KeepAlive(dst) // escapes: replaced by intrinsic.
return length, err
}
@@ -777,7 +777,7 @@ func (i *Int32) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (int,
length, err := task.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that i
// must live until the use above.
- runtime.KeepAlive(i)
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
return length, err
}
@@ -800,7 +800,7 @@ func (i *Int32) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that i
// must live until the use above.
- runtime.KeepAlive(i)
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
return length, err
}
@@ -816,7 +816,7 @@ func (i *Int32) WriteTo(w io.Writer) (int64, error) {
length, err := w.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that i
// must live until the use above.
- runtime.KeepAlive(i)
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
return int64(length), err
}
@@ -842,7 +842,7 @@ func CopyInt32SliceIn(task marshal.Task, addr usermem.Addr, dst []int32) (int, e
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that dst
// must live until the use above.
- runtime.KeepAlive(dst)
+ runtime.KeepAlive(dst) // escapes: replaced by intrinsic.
return length, err
}
@@ -868,7 +868,7 @@ func CopyInt32SliceOut(task marshal.Task, addr usermem.Addr, src []int32) (int,
length, err := task.CopyOutBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that src
// must live until the use above.
- runtime.KeepAlive(src)
+ runtime.KeepAlive(src) // escapes: replaced by intrinsic.
return length, err
}
@@ -886,7 +886,7 @@ func MarshalUnsafeInt32Slice(src []Int32, dst []byte) (int, error) {
length, err := safecopy.CopyIn(dst[:(size*count)], val)
// Since we bypassed the compiler's escape analysis, indicate that src
// must live until the use above.
- runtime.KeepAlive(src)
+ runtime.KeepAlive(src) // escapes: replaced by intrinsic.
return length, err
}
@@ -904,7 +904,7 @@ func UnmarshalUnsafeInt32Slice(dst []Int32, src []byte) (int, error) {
length, err := safecopy.CopyOut(val, src[:(size*count)])
// Since we bypassed the compiler's escape analysis, indicate that dst
// must live until the use above.
- runtime.KeepAlive(dst)
+ runtime.KeepAlive(dst) // escapes: replaced by intrinsic.
return length, err
}
@@ -954,7 +954,7 @@ func (u *Uint32) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (int,
length, err := task.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that u
// must live until the use above.
- runtime.KeepAlive(u)
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
return length, err
}
@@ -977,7 +977,7 @@ func (u *Uint32) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that u
// must live until the use above.
- runtime.KeepAlive(u)
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
return length, err
}
@@ -993,7 +993,7 @@ func (u *Uint32) WriteTo(w io.Writer) (int64, error) {
length, err := w.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that u
// must live until the use above.
- runtime.KeepAlive(u)
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
return int64(length), err
}
@@ -1019,7 +1019,7 @@ func CopyUint32SliceIn(task marshal.Task, addr usermem.Addr, dst []uint32) (int,
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that dst
// must live until the use above.
- runtime.KeepAlive(dst)
+ runtime.KeepAlive(dst) // escapes: replaced by intrinsic.
return length, err
}
@@ -1045,7 +1045,7 @@ func CopyUint32SliceOut(task marshal.Task, addr usermem.Addr, src []uint32) (int
length, err := task.CopyOutBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that src
// must live until the use above.
- runtime.KeepAlive(src)
+ runtime.KeepAlive(src) // escapes: replaced by intrinsic.
return length, err
}
@@ -1063,7 +1063,7 @@ func MarshalUnsafeUint32Slice(src []Uint32, dst []byte) (int, error) {
length, err := safecopy.CopyIn(dst[:(size*count)], val)
// Since we bypassed the compiler's escape analysis, indicate that src
// must live until the use above.
- runtime.KeepAlive(src)
+ runtime.KeepAlive(src) // escapes: replaced by intrinsic.
return length, err
}
@@ -1081,7 +1081,7 @@ func UnmarshalUnsafeUint32Slice(dst []Uint32, src []byte) (int, error) {
length, err := safecopy.CopyOut(val, src[:(size*count)])
// Since we bypassed the compiler's escape analysis, indicate that dst
// must live until the use above.
- runtime.KeepAlive(dst)
+ runtime.KeepAlive(dst) // escapes: replaced by intrinsic.
return length, err
}
@@ -1131,7 +1131,7 @@ func (i *Int64) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (int,
length, err := task.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that i
// must live until the use above.
- runtime.KeepAlive(i)
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
return length, err
}
@@ -1154,7 +1154,7 @@ func (i *Int64) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that i
// must live until the use above.
- runtime.KeepAlive(i)
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
return length, err
}
@@ -1170,7 +1170,7 @@ func (i *Int64) WriteTo(w io.Writer) (int64, error) {
length, err := w.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that i
// must live until the use above.
- runtime.KeepAlive(i)
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
return int64(length), err
}
@@ -1196,7 +1196,7 @@ func CopyInt64SliceIn(task marshal.Task, addr usermem.Addr, dst []int64) (int, e
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that dst
// must live until the use above.
- runtime.KeepAlive(dst)
+ runtime.KeepAlive(dst) // escapes: replaced by intrinsic.
return length, err
}
@@ -1222,7 +1222,7 @@ func CopyInt64SliceOut(task marshal.Task, addr usermem.Addr, src []int64) (int,
length, err := task.CopyOutBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that src
// must live until the use above.
- runtime.KeepAlive(src)
+ runtime.KeepAlive(src) // escapes: replaced by intrinsic.
return length, err
}
@@ -1240,7 +1240,7 @@ func MarshalUnsafeInt64Slice(src []Int64, dst []byte) (int, error) {
length, err := safecopy.CopyIn(dst[:(size*count)], val)
// Since we bypassed the compiler's escape analysis, indicate that src
// must live until the use above.
- runtime.KeepAlive(src)
+ runtime.KeepAlive(src) // escapes: replaced by intrinsic.
return length, err
}
@@ -1258,7 +1258,7 @@ func UnmarshalUnsafeInt64Slice(dst []Int64, src []byte) (int, error) {
length, err := safecopy.CopyOut(val, src[:(size*count)])
// Since we bypassed the compiler's escape analysis, indicate that dst
// must live until the use above.
- runtime.KeepAlive(dst)
+ runtime.KeepAlive(dst) // escapes: replaced by intrinsic.
return length, err
}
@@ -1308,7 +1308,7 @@ func (u *Uint64) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (int,
length, err := task.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that u
// must live until the use above.
- runtime.KeepAlive(u)
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
return length, err
}
@@ -1331,7 +1331,7 @@ func (u *Uint64) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that u
// must live until the use above.
- runtime.KeepAlive(u)
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
return length, err
}
@@ -1347,7 +1347,7 @@ func (u *Uint64) WriteTo(w io.Writer) (int64, error) {
length, err := w.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that u
// must live until the use above.
- runtime.KeepAlive(u)
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
return int64(length), err
}
@@ -1373,7 +1373,7 @@ func CopyUint64SliceIn(task marshal.Task, addr usermem.Addr, dst []uint64) (int,
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that dst
// must live until the use above.
- runtime.KeepAlive(dst)
+ runtime.KeepAlive(dst) // escapes: replaced by intrinsic.
return length, err
}
@@ -1399,7 +1399,7 @@ func CopyUint64SliceOut(task marshal.Task, addr usermem.Addr, src []uint64) (int
length, err := task.CopyOutBytes(addr, buf) // escapes: okay.
// Since we bypassed the compiler's escape analysis, indicate that src
// must live until the use above.
- runtime.KeepAlive(src)
+ runtime.KeepAlive(src) // escapes: replaced by intrinsic.
return length, err
}
@@ -1417,7 +1417,7 @@ func MarshalUnsafeUint64Slice(src []Uint64, dst []byte) (int, error) {
length, err := safecopy.CopyIn(dst[:(size*count)], val)
// Since we bypassed the compiler's escape analysis, indicate that src
// must live until the use above.
- runtime.KeepAlive(src)
+ runtime.KeepAlive(src) // escapes: replaced by intrinsic.
return length, err
}
@@ -1435,7 +1435,7 @@ func UnmarshalUnsafeUint64Slice(dst []Uint64, src []byte) (int, error) {
length, err := safecopy.CopyOut(val, src[:(size*count)])
// Since we bypassed the compiler's escape analysis, indicate that dst
// must live until the use above.
- runtime.KeepAlive(dst)
+ runtime.KeepAlive(dst) // escapes: replaced by intrinsic.
return length, err
}