summaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--pkg/abi/linux/linux_abi_autogen_unsafe.go54
-rw-r--r--pkg/abi/linux/linux_amd64_abi_autogen_unsafe.go4
-rw-r--r--pkg/abi/linux/linux_arm64_abi_autogen_unsafe.go6
-rw-r--r--pkg/sentry/fsimpl/devpts/root_inode_refs.go2
-rw-r--r--pkg/sentry/fsimpl/fuse/inode_refs.go2
-rw-r--r--pkg/sentry/fsimpl/host/connected_endpoint_refs.go2
-rw-r--r--pkg/sentry/fsimpl/host/inode_refs.go2
-rw-r--r--pkg/sentry/fsimpl/kernfs/dentry_refs.go2
-rw-r--r--pkg/sentry/fsimpl/kernfs/static_directory_refs.go2
-rw-r--r--pkg/sentry/fsimpl/proc/fd_dir_inode_refs.go2
-rw-r--r--pkg/sentry/fsimpl/proc/fd_info_dir_inode_refs.go2
-rw-r--r--pkg/sentry/fsimpl/proc/subtasks_inode_refs.go2
-rw-r--r--pkg/sentry/fsimpl/proc/task_inode_refs.go2
-rw-r--r--pkg/sentry/fsimpl/proc/tasks_inode_refs.go2
-rw-r--r--pkg/sentry/fsimpl/sys/dir_refs.go2
-rw-r--r--pkg/sentry/fsimpl/tmpfs/inode_refs.go2
-rw-r--r--pkg/sentry/kernel/fd_table_refs.go2
-rw-r--r--pkg/sentry/kernel/fs_context_refs.go2
-rw-r--r--pkg/sentry/kernel/process_group_refs.go2
-rw-r--r--pkg/sentry/kernel/seqatomic_taskgoroutineschedinfo_unsafe.go3
-rw-r--r--pkg/sentry/kernel/session_refs.go2
-rw-r--r--pkg/sentry/kernel/shm/shm_refs.go2
-rw-r--r--pkg/sentry/mm/aio_mappable_refs.go2
-rw-r--r--pkg/sentry/mm/special_mappable_refs.go2
-rw-r--r--pkg/sentry/socket/unix/socket_refs.go2
-rw-r--r--pkg/sentry/socket/unix/transport/queue_refs.go2
-rw-r--r--pkg/sentry/time/seqatomic_parameters_unsafe.go3
-rw-r--r--pkg/sentry/vfs/file_description_refs.go2
-rw-r--r--pkg/sentry/vfs/filesystem_refs.go2
-rw-r--r--pkg/sentry/vfs/mount_namespace_refs.go2
-rw-r--r--pkg/tcpip/link/tun/tun_endpoint_refs.go2
31 files changed, 62 insertions, 60 deletions
diff --git a/pkg/abi/linux/linux_abi_autogen_unsafe.go b/pkg/abi/linux/linux_abi_autogen_unsafe.go
index 102820ff5..04476f011 100644
--- a/pkg/abi/linux/linux_abi_autogen_unsafe.go
+++ b/pkg/abi/linux/linux_abi_autogen_unsafe.go
@@ -1099,7 +1099,7 @@ func (s *Statx) UnmarshalBytes(src []byte) {
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
func (s *Statx) Packed() bool {
- return s.Mtime.Packed() && s.Atime.Packed() && s.Btime.Packed() && s.Ctime.Packed()
+ return s.Atime.Packed() && s.Btime.Packed() && s.Ctime.Packed() && s.Mtime.Packed()
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
@@ -1125,7 +1125,7 @@ func (s *Statx) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
func (s *Statx) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- if !s.Ctime.Packed() && s.Mtime.Packed() && s.Atime.Packed() && s.Btime.Packed() {
+ if !s.Atime.Packed() && s.Btime.Packed() && s.Ctime.Packed() && s.Mtime.Packed() {
// Type Statx doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
s.MarshalBytes(buf) // escapes: fallback.
@@ -1155,7 +1155,7 @@ func (s *Statx) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error)
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
func (s *Statx) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- if !s.Ctime.Packed() && s.Mtime.Packed() && s.Atime.Packed() && s.Btime.Packed() {
+ if !s.Mtime.Packed() && s.Atime.Packed() && s.Btime.Packed() && s.Ctime.Packed() {
// Type Statx doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
@@ -1181,7 +1181,7 @@ func (s *Statx) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// WriteTo implements io.WriterTo.WriteTo.
func (s *Statx) WriteTo(writer io.Writer) (int64, error) {
- if !s.Atime.Packed() && s.Btime.Packed() && s.Ctime.Packed() && s.Mtime.Packed() {
+ if !s.Ctime.Packed() && s.Mtime.Packed() && s.Atime.Packed() && s.Btime.Packed() {
// Type Statx doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := make([]byte, s.SizeBytes())
s.MarshalBytes(buf)
@@ -1600,7 +1600,7 @@ func (f *FUSEHeaderIn) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
func (f *FUSEHeaderIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- if !f.Opcode.Packed() && f.Unique.Packed() {
+ if !f.Unique.Packed() && f.Opcode.Packed() {
// Type FUSEHeaderIn doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
f.MarshalBytes(buf) // escapes: fallback.
@@ -1656,7 +1656,7 @@ func (f *FUSEHeaderIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, e
// WriteTo implements io.WriterTo.WriteTo.
func (f *FUSEHeaderIn) WriteTo(writer io.Writer) (int64, error) {
- if !f.Opcode.Packed() && f.Unique.Packed() {
+ if !f.Unique.Packed() && f.Opcode.Packed() {
// Type FUSEHeaderIn doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := make([]byte, f.SizeBytes())
f.MarshalBytes(buf)
@@ -4416,7 +4416,7 @@ func (i *IPTEntry) UnmarshalBytes(src []byte) {
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
func (i *IPTEntry) Packed() bool {
- return i.Counters.Packed() && i.IP.Packed()
+ return i.IP.Packed() && i.Counters.Packed()
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
@@ -4472,7 +4472,7 @@ func (i *IPTEntry) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, erro
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
func (i *IPTEntry) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- if !i.IP.Packed() && i.Counters.Packed() {
+ if !i.Counters.Packed() && i.IP.Packed() {
// Type IPTEntry doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
@@ -4604,7 +4604,7 @@ func (i *IPTIP) UnmarshalBytes(src []byte) {
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
func (i *IPTIP) Packed() bool {
- return i.Src.Packed() && i.Dst.Packed() && i.SrcMask.Packed() && i.DstMask.Packed()
+ return i.DstMask.Packed() && i.Src.Packed() && i.Dst.Packed() && i.SrcMask.Packed()
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
@@ -4619,7 +4619,7 @@ func (i *IPTIP) MarshalUnsafe(dst []byte) {
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
func (i *IPTIP) UnmarshalUnsafe(src []byte) {
- if i.Src.Packed() && i.Dst.Packed() && i.SrcMask.Packed() && i.DstMask.Packed() {
+ if i.DstMask.Packed() && i.Src.Packed() && i.Dst.Packed() && i.SrcMask.Packed() {
safecopy.CopyOut(unsafe.Pointer(i), src)
} else {
// Type IPTIP doesn't have a packed layout in memory, fallback to UnmarshalBytes.
@@ -4660,7 +4660,7 @@ func (i *IPTIP) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error)
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
func (i *IPTIP) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- if !i.Src.Packed() && i.Dst.Packed() && i.SrcMask.Packed() && i.DstMask.Packed() {
+ if !i.SrcMask.Packed() && i.DstMask.Packed() && i.Src.Packed() && i.Dst.Packed() {
// Type IPTIP doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
@@ -5400,7 +5400,7 @@ func (i *IP6TEntry) Packed() bool {
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
func (i *IP6TEntry) MarshalUnsafe(dst []byte) {
- if i.IPv6.Packed() && i.Counters.Packed() {
+ if i.Counters.Packed() && i.IPv6.Packed() {
safecopy.CopyIn(dst, unsafe.Pointer(i))
} else {
// Type IP6TEntry doesn't have a packed layout in memory, fallback to MarshalBytes.
@@ -5592,12 +5592,12 @@ func (i *IP6TIP) UnmarshalBytes(src []byte) {
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
func (i *IP6TIP) Packed() bool {
- return i.SrcMask.Packed() && i.DstMask.Packed() && i.Src.Packed() && i.Dst.Packed()
+ return i.DstMask.Packed() && i.Src.Packed() && i.Dst.Packed() && i.SrcMask.Packed()
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
func (i *IP6TIP) MarshalUnsafe(dst []byte) {
- if i.DstMask.Packed() && i.Src.Packed() && i.Dst.Packed() && i.SrcMask.Packed() {
+ if i.Src.Packed() && i.Dst.Packed() && i.SrcMask.Packed() && i.DstMask.Packed() {
safecopy.CopyIn(dst, unsafe.Pointer(i))
} else {
// Type IP6TIP doesn't have a packed layout in memory, fallback to MarshalBytes.
@@ -5607,7 +5607,7 @@ func (i *IP6TIP) MarshalUnsafe(dst []byte) {
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
func (i *IP6TIP) UnmarshalUnsafe(src []byte) {
- if i.Src.Packed() && i.Dst.Packed() && i.SrcMask.Packed() && i.DstMask.Packed() {
+ if i.SrcMask.Packed() && i.DstMask.Packed() && i.Src.Packed() && i.Dst.Packed() {
safecopy.CopyOut(unsafe.Pointer(i), src)
} else {
// Type IP6TIP doesn't have a packed layout in memory, fallback to UnmarshalBytes.
@@ -5674,7 +5674,7 @@ func (i *IP6TIP) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error)
// WriteTo implements io.WriterTo.WriteTo.
func (i *IP6TIP) WriteTo(writer io.Writer) (int64, error) {
- if !i.Dst.Packed() && i.SrcMask.Packed() && i.DstMask.Packed() && i.Src.Packed() {
+ if !i.Src.Packed() && i.Dst.Packed() && i.SrcMask.Packed() && i.DstMask.Packed() {
// Type IP6TIP doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := make([]byte, i.SizeBytes())
i.MarshalBytes(buf)
@@ -6167,7 +6167,7 @@ func (r *Rusage) UnmarshalBytes(src []byte) {
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
func (r *Rusage) Packed() bool {
- return r.UTime.Packed() && r.STime.Packed()
+ return r.STime.Packed() && r.UTime.Packed()
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
@@ -6223,7 +6223,7 @@ func (r *Rusage) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error)
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
func (r *Rusage) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- if !r.STime.Packed() && r.UTime.Packed() {
+ if !r.UTime.Packed() && r.STime.Packed() {
// Type Rusage doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := cc.CopyScratchBuffer(r.SizeBytes()) // escapes: okay.
length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
@@ -6396,7 +6396,7 @@ func (s *SemidDS) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error)
// WriteTo implements io.WriterTo.WriteTo.
func (s *SemidDS) WriteTo(writer io.Writer) (int64, error) {
- if !s.SemPerm.Packed() && s.SemOTime.Packed() && s.SemCTime.Packed() {
+ if !s.SemCTime.Packed() && s.SemPerm.Packed() && s.SemOTime.Packed() {
// Type SemidDS doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := make([]byte, s.SizeBytes())
s.MarshalBytes(buf)
@@ -6666,7 +6666,7 @@ func (s *ShmidDS) Packed() bool {
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
func (s *ShmidDS) MarshalUnsafe(dst []byte) {
- if s.ShmPerm.Packed() && s.ShmAtime.Packed() && s.ShmDtime.Packed() && s.ShmCtime.Packed() {
+ if s.ShmCtime.Packed() && s.ShmPerm.Packed() && s.ShmAtime.Packed() && s.ShmDtime.Packed() {
safecopy.CopyIn(dst, unsafe.Pointer(s))
} else {
// Type ShmidDS doesn't have a packed layout in memory, fallback to MarshalBytes.
@@ -6687,7 +6687,7 @@ func (s *ShmidDS) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
func (s *ShmidDS) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- if !s.ShmPerm.Packed() && s.ShmAtime.Packed() && s.ShmDtime.Packed() && s.ShmCtime.Packed() {
+ if !s.ShmCtime.Packed() && s.ShmPerm.Packed() && s.ShmAtime.Packed() && s.ShmDtime.Packed() {
// Type ShmidDS doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
s.MarshalBytes(buf) // escapes: fallback.
@@ -6717,7 +6717,7 @@ func (s *ShmidDS) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
func (s *ShmidDS) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- if !s.ShmDtime.Packed() && s.ShmCtime.Packed() && s.ShmPerm.Packed() && s.ShmAtime.Packed() {
+ if !s.ShmAtime.Packed() && s.ShmDtime.Packed() && s.ShmCtime.Packed() && s.ShmPerm.Packed() {
// Type ShmidDS doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
@@ -8769,7 +8769,7 @@ func (i *Itimerspec) UnmarshalBytes(src []byte) {
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
func (i *Itimerspec) Packed() bool {
- return i.Value.Packed() && i.Interval.Packed()
+ return i.Interval.Packed() && i.Value.Packed()
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
@@ -9128,12 +9128,12 @@ func (t *Tms) UnmarshalBytes(src []byte) {
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
func (t *Tms) Packed() bool {
- return t.CUTime.Packed() && t.CSTime.Packed() && t.UTime.Packed() && t.STime.Packed()
+ return t.STime.Packed() && t.CUTime.Packed() && t.CSTime.Packed() && t.UTime.Packed()
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
func (t *Tms) MarshalUnsafe(dst []byte) {
- if t.STime.Packed() && t.CUTime.Packed() && t.CSTime.Packed() && t.UTime.Packed() {
+ if t.CUTime.Packed() && t.CSTime.Packed() && t.UTime.Packed() && t.STime.Packed() {
safecopy.CopyIn(dst, unsafe.Pointer(t))
} else {
// Type Tms doesn't have a packed layout in memory, fallback to MarshalBytes.
@@ -9154,7 +9154,7 @@ func (t *Tms) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
func (t *Tms) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- if !t.CUTime.Packed() && t.CSTime.Packed() && t.UTime.Packed() && t.STime.Packed() {
+ if !t.UTime.Packed() && t.STime.Packed() && t.CUTime.Packed() && t.CSTime.Packed() {
// Type Tms doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := cc.CopyScratchBuffer(t.SizeBytes()) // escapes: okay.
t.MarshalBytes(buf) // escapes: fallback.
@@ -9184,7 +9184,7 @@ func (t *Tms) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
func (t *Tms) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- if !t.STime.Packed() && t.CUTime.Packed() && t.CSTime.Packed() && t.UTime.Packed() {
+ if !t.UTime.Packed() && t.STime.Packed() && t.CUTime.Packed() && t.CSTime.Packed() {
// Type Tms doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := cc.CopyScratchBuffer(t.SizeBytes()) // escapes: okay.
length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
diff --git a/pkg/abi/linux/linux_amd64_abi_autogen_unsafe.go b/pkg/abi/linux/linux_amd64_abi_autogen_unsafe.go
index 1fb5ef6c7..30719fe31 100644
--- a/pkg/abi/linux/linux_amd64_abi_autogen_unsafe.go
+++ b/pkg/abi/linux/linux_amd64_abi_autogen_unsafe.go
@@ -303,7 +303,7 @@ func (s *Stat) MarshalUnsafe(dst []byte) {
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
func (s *Stat) UnmarshalUnsafe(src []byte) {
- if s.MTime.Packed() && s.CTime.Packed() && s.ATime.Packed() {
+ if s.ATime.Packed() && s.MTime.Packed() && s.CTime.Packed() {
safecopy.CopyOut(unsafe.Pointer(s), src)
} else {
// Type Stat doesn't have a packed layout in memory, fallback to UnmarshalBytes.
@@ -344,7 +344,7 @@ func (s *Stat) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
func (s *Stat) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- if !s.ATime.Packed() && s.MTime.Packed() && s.CTime.Packed() {
+ if !s.CTime.Packed() && s.ATime.Packed() && s.MTime.Packed() {
// Type Stat doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
diff --git a/pkg/abi/linux/linux_arm64_abi_autogen_unsafe.go b/pkg/abi/linux/linux_arm64_abi_autogen_unsafe.go
index bf57a2101..e7d7217d5 100644
--- a/pkg/abi/linux/linux_arm64_abi_autogen_unsafe.go
+++ b/pkg/abi/linux/linux_arm64_abi_autogen_unsafe.go
@@ -300,7 +300,7 @@ func (s *Stat) Packed() bool {
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
func (s *Stat) MarshalUnsafe(dst []byte) {
- if s.MTime.Packed() && s.CTime.Packed() && s.ATime.Packed() {
+ if s.ATime.Packed() && s.MTime.Packed() && s.CTime.Packed() {
safecopy.CopyIn(dst, unsafe.Pointer(s))
} else {
// Type Stat doesn't have a packed layout in memory, fallback to MarshalBytes.
@@ -321,7 +321,7 @@ func (s *Stat) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
func (s *Stat) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- if !s.ATime.Packed() && s.MTime.Packed() && s.CTime.Packed() {
+ if !s.MTime.Packed() && s.CTime.Packed() && s.ATime.Packed() {
// Type Stat doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
s.MarshalBytes(buf) // escapes: fallback.
@@ -351,7 +351,7 @@ func (s *Stat) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
func (s *Stat) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- if !s.ATime.Packed() && s.MTime.Packed() && s.CTime.Packed() {
+ if !s.CTime.Packed() && s.ATime.Packed() && s.MTime.Packed() {
// Type Stat doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
diff --git a/pkg/sentry/fsimpl/devpts/root_inode_refs.go b/pkg/sentry/fsimpl/devpts/root_inode_refs.go
index 068ee2f20..051801202 100644
--- a/pkg/sentry/fsimpl/devpts/root_inode_refs.go
+++ b/pkg/sentry/fsimpl/devpts/root_inode_refs.go
@@ -1,10 +1,10 @@
package devpts
import (
+ "fmt"
"runtime"
"sync/atomic"
- "fmt"
"gvisor.dev/gvisor/pkg/log"
refs_vfs1 "gvisor.dev/gvisor/pkg/refs"
)
diff --git a/pkg/sentry/fsimpl/fuse/inode_refs.go b/pkg/sentry/fsimpl/fuse/inode_refs.go
index 5d1de6067..6b9456e1d 100644
--- a/pkg/sentry/fsimpl/fuse/inode_refs.go
+++ b/pkg/sentry/fsimpl/fuse/inode_refs.go
@@ -1,10 +1,10 @@
package fuse
import (
+ "fmt"
"runtime"
"sync/atomic"
- "fmt"
"gvisor.dev/gvisor/pkg/log"
refs_vfs1 "gvisor.dev/gvisor/pkg/refs"
)
diff --git a/pkg/sentry/fsimpl/host/connected_endpoint_refs.go b/pkg/sentry/fsimpl/host/connected_endpoint_refs.go
index abf4a9082..babb3f664 100644
--- a/pkg/sentry/fsimpl/host/connected_endpoint_refs.go
+++ b/pkg/sentry/fsimpl/host/connected_endpoint_refs.go
@@ -1,10 +1,10 @@
package host
import (
+ "fmt"
"runtime"
"sync/atomic"
- "fmt"
"gvisor.dev/gvisor/pkg/log"
refs_vfs1 "gvisor.dev/gvisor/pkg/refs"
)
diff --git a/pkg/sentry/fsimpl/host/inode_refs.go b/pkg/sentry/fsimpl/host/inode_refs.go
index 75b9f49e2..17f90ce4a 100644
--- a/pkg/sentry/fsimpl/host/inode_refs.go
+++ b/pkg/sentry/fsimpl/host/inode_refs.go
@@ -1,10 +1,10 @@
package host
import (
+ "fmt"
"runtime"
"sync/atomic"
- "fmt"
"gvisor.dev/gvisor/pkg/log"
refs_vfs1 "gvisor.dev/gvisor/pkg/refs"
)
diff --git a/pkg/sentry/fsimpl/kernfs/dentry_refs.go b/pkg/sentry/fsimpl/kernfs/dentry_refs.go
index b7125caee..79863b3bc 100644
--- a/pkg/sentry/fsimpl/kernfs/dentry_refs.go
+++ b/pkg/sentry/fsimpl/kernfs/dentry_refs.go
@@ -1,10 +1,10 @@
package kernfs
import (
+ "fmt"
"runtime"
"sync/atomic"
- "fmt"
"gvisor.dev/gvisor/pkg/log"
refs_vfs1 "gvisor.dev/gvisor/pkg/refs"
)
diff --git a/pkg/sentry/fsimpl/kernfs/static_directory_refs.go b/pkg/sentry/fsimpl/kernfs/static_directory_refs.go
index 0ff013c97..478b04bdd 100644
--- a/pkg/sentry/fsimpl/kernfs/static_directory_refs.go
+++ b/pkg/sentry/fsimpl/kernfs/static_directory_refs.go
@@ -1,10 +1,10 @@
package kernfs
import (
+ "fmt"
"runtime"
"sync/atomic"
- "fmt"
"gvisor.dev/gvisor/pkg/log"
refs_vfs1 "gvisor.dev/gvisor/pkg/refs"
)
diff --git a/pkg/sentry/fsimpl/proc/fd_dir_inode_refs.go b/pkg/sentry/fsimpl/proc/fd_dir_inode_refs.go
index 454862d98..9431c1506 100644
--- a/pkg/sentry/fsimpl/proc/fd_dir_inode_refs.go
+++ b/pkg/sentry/fsimpl/proc/fd_dir_inode_refs.go
@@ -1,10 +1,10 @@
package proc
import (
+ "fmt"
"runtime"
"sync/atomic"
- "fmt"
"gvisor.dev/gvisor/pkg/log"
refs_vfs1 "gvisor.dev/gvisor/pkg/refs"
)
diff --git a/pkg/sentry/fsimpl/proc/fd_info_dir_inode_refs.go b/pkg/sentry/fsimpl/proc/fd_info_dir_inode_refs.go
index d2169be5b..872b20eb0 100644
--- a/pkg/sentry/fsimpl/proc/fd_info_dir_inode_refs.go
+++ b/pkg/sentry/fsimpl/proc/fd_info_dir_inode_refs.go
@@ -1,10 +1,10 @@
package proc
import (
+ "fmt"
"runtime"
"sync/atomic"
- "fmt"
"gvisor.dev/gvisor/pkg/log"
refs_vfs1 "gvisor.dev/gvisor/pkg/refs"
)
diff --git a/pkg/sentry/fsimpl/proc/subtasks_inode_refs.go b/pkg/sentry/fsimpl/proc/subtasks_inode_refs.go
index 9b50f632c..c6d9b3522 100644
--- a/pkg/sentry/fsimpl/proc/subtasks_inode_refs.go
+++ b/pkg/sentry/fsimpl/proc/subtasks_inode_refs.go
@@ -1,10 +1,10 @@
package proc
import (
+ "fmt"
"runtime"
"sync/atomic"
- "fmt"
"gvisor.dev/gvisor/pkg/log"
refs_vfs1 "gvisor.dev/gvisor/pkg/refs"
)
diff --git a/pkg/sentry/fsimpl/proc/task_inode_refs.go b/pkg/sentry/fsimpl/proc/task_inode_refs.go
index c29272f9b..714488450 100644
--- a/pkg/sentry/fsimpl/proc/task_inode_refs.go
+++ b/pkg/sentry/fsimpl/proc/task_inode_refs.go
@@ -1,10 +1,10 @@
package proc
import (
+ "fmt"
"runtime"
"sync/atomic"
- "fmt"
"gvisor.dev/gvisor/pkg/log"
refs_vfs1 "gvisor.dev/gvisor/pkg/refs"
)
diff --git a/pkg/sentry/fsimpl/proc/tasks_inode_refs.go b/pkg/sentry/fsimpl/proc/tasks_inode_refs.go
index 7e0b70f6c..22d9cc488 100644
--- a/pkg/sentry/fsimpl/proc/tasks_inode_refs.go
+++ b/pkg/sentry/fsimpl/proc/tasks_inode_refs.go
@@ -1,10 +1,10 @@
package proc
import (
+ "fmt"
"runtime"
"sync/atomic"
- "fmt"
"gvisor.dev/gvisor/pkg/log"
refs_vfs1 "gvisor.dev/gvisor/pkg/refs"
)
diff --git a/pkg/sentry/fsimpl/sys/dir_refs.go b/pkg/sentry/fsimpl/sys/dir_refs.go
index d42edb20e..89609b198 100644
--- a/pkg/sentry/fsimpl/sys/dir_refs.go
+++ b/pkg/sentry/fsimpl/sys/dir_refs.go
@@ -1,10 +1,10 @@
package sys
import (
+ "fmt"
"runtime"
"sync/atomic"
- "fmt"
"gvisor.dev/gvisor/pkg/log"
refs_vfs1 "gvisor.dev/gvisor/pkg/refs"
)
diff --git a/pkg/sentry/fsimpl/tmpfs/inode_refs.go b/pkg/sentry/fsimpl/tmpfs/inode_refs.go
index 4f4037adb..dbf0b2766 100644
--- a/pkg/sentry/fsimpl/tmpfs/inode_refs.go
+++ b/pkg/sentry/fsimpl/tmpfs/inode_refs.go
@@ -1,10 +1,10 @@
package tmpfs
import (
+ "fmt"
"runtime"
"sync/atomic"
- "fmt"
"gvisor.dev/gvisor/pkg/log"
refs_vfs1 "gvisor.dev/gvisor/pkg/refs"
)
diff --git a/pkg/sentry/kernel/fd_table_refs.go b/pkg/sentry/kernel/fd_table_refs.go
index dc7f4e246..ecba138ac 100644
--- a/pkg/sentry/kernel/fd_table_refs.go
+++ b/pkg/sentry/kernel/fd_table_refs.go
@@ -1,10 +1,10 @@
package kernel
import (
+ "fmt"
"runtime"
"sync/atomic"
- "fmt"
"gvisor.dev/gvisor/pkg/log"
refs_vfs1 "gvisor.dev/gvisor/pkg/refs"
)
diff --git a/pkg/sentry/kernel/fs_context_refs.go b/pkg/sentry/kernel/fs_context_refs.go
index be045c862..fb2fde971 100644
--- a/pkg/sentry/kernel/fs_context_refs.go
+++ b/pkg/sentry/kernel/fs_context_refs.go
@@ -1,10 +1,10 @@
package kernel
import (
+ "fmt"
"runtime"
"sync/atomic"
- "fmt"
"gvisor.dev/gvisor/pkg/log"
refs_vfs1 "gvisor.dev/gvisor/pkg/refs"
)
diff --git a/pkg/sentry/kernel/process_group_refs.go b/pkg/sentry/kernel/process_group_refs.go
index 4622687b1..4ed6e6458 100644
--- a/pkg/sentry/kernel/process_group_refs.go
+++ b/pkg/sentry/kernel/process_group_refs.go
@@ -1,10 +1,10 @@
package kernel
import (
+ "fmt"
"runtime"
"sync/atomic"
- "fmt"
"gvisor.dev/gvisor/pkg/log"
refs_vfs1 "gvisor.dev/gvisor/pkg/refs"
)
diff --git a/pkg/sentry/kernel/seqatomic_taskgoroutineschedinfo_unsafe.go b/pkg/sentry/kernel/seqatomic_taskgoroutineschedinfo_unsafe.go
index a37f74a10..90148bbb2 100644
--- a/pkg/sentry/kernel/seqatomic_taskgoroutineschedinfo_unsafe.go
+++ b/pkg/sentry/kernel/seqatomic_taskgoroutineschedinfo_unsafe.go
@@ -2,10 +2,11 @@ package kernel
import (
"fmt"
- "gvisor.dev/gvisor/pkg/sync"
"reflect"
"strings"
"unsafe"
+
+ "gvisor.dev/gvisor/pkg/sync"
)
// SeqAtomicLoad returns a copy of *ptr, ensuring that the read does not race
diff --git a/pkg/sentry/kernel/session_refs.go b/pkg/sentry/kernel/session_refs.go
index 89e43ae6b..f2e1bb797 100644
--- a/pkg/sentry/kernel/session_refs.go
+++ b/pkg/sentry/kernel/session_refs.go
@@ -1,10 +1,10 @@
package kernel
import (
+ "fmt"
"runtime"
"sync/atomic"
- "fmt"
"gvisor.dev/gvisor/pkg/log"
refs_vfs1 "gvisor.dev/gvisor/pkg/refs"
)
diff --git a/pkg/sentry/kernel/shm/shm_refs.go b/pkg/sentry/kernel/shm/shm_refs.go
index 2b4f608c7..51e07d0b3 100644
--- a/pkg/sentry/kernel/shm/shm_refs.go
+++ b/pkg/sentry/kernel/shm/shm_refs.go
@@ -1,10 +1,10 @@
package shm
import (
+ "fmt"
"runtime"
"sync/atomic"
- "fmt"
"gvisor.dev/gvisor/pkg/log"
refs_vfs1 "gvisor.dev/gvisor/pkg/refs"
)
diff --git a/pkg/sentry/mm/aio_mappable_refs.go b/pkg/sentry/mm/aio_mappable_refs.go
index ac7690d3f..b99909f07 100644
--- a/pkg/sentry/mm/aio_mappable_refs.go
+++ b/pkg/sentry/mm/aio_mappable_refs.go
@@ -1,10 +1,10 @@
package mm
import (
+ "fmt"
"runtime"
"sync/atomic"
- "fmt"
"gvisor.dev/gvisor/pkg/log"
refs_vfs1 "gvisor.dev/gvisor/pkg/refs"
)
diff --git a/pkg/sentry/mm/special_mappable_refs.go b/pkg/sentry/mm/special_mappable_refs.go
index b304fd2ef..035bbe690 100644
--- a/pkg/sentry/mm/special_mappable_refs.go
+++ b/pkg/sentry/mm/special_mappable_refs.go
@@ -1,10 +1,10 @@
package mm
import (
+ "fmt"
"runtime"
"sync/atomic"
- "fmt"
"gvisor.dev/gvisor/pkg/log"
refs_vfs1 "gvisor.dev/gvisor/pkg/refs"
)
diff --git a/pkg/sentry/socket/unix/socket_refs.go b/pkg/sentry/socket/unix/socket_refs.go
index 69fa54964..dababb85f 100644
--- a/pkg/sentry/socket/unix/socket_refs.go
+++ b/pkg/sentry/socket/unix/socket_refs.go
@@ -1,10 +1,10 @@
package unix
import (
+ "fmt"
"runtime"
"sync/atomic"
- "fmt"
"gvisor.dev/gvisor/pkg/log"
refs_vfs1 "gvisor.dev/gvisor/pkg/refs"
)
diff --git a/pkg/sentry/socket/unix/transport/queue_refs.go b/pkg/sentry/socket/unix/transport/queue_refs.go
index a154c8334..0d4e34988 100644
--- a/pkg/sentry/socket/unix/transport/queue_refs.go
+++ b/pkg/sentry/socket/unix/transport/queue_refs.go
@@ -1,10 +1,10 @@
package transport
import (
+ "fmt"
"runtime"
"sync/atomic"
- "fmt"
"gvisor.dev/gvisor/pkg/log"
refs_vfs1 "gvisor.dev/gvisor/pkg/refs"
)
diff --git a/pkg/sentry/time/seqatomic_parameters_unsafe.go b/pkg/sentry/time/seqatomic_parameters_unsafe.go
index 88d6b5569..2cb001080 100644
--- a/pkg/sentry/time/seqatomic_parameters_unsafe.go
+++ b/pkg/sentry/time/seqatomic_parameters_unsafe.go
@@ -2,10 +2,11 @@ package time
import (
"fmt"
- "gvisor.dev/gvisor/pkg/sync"
"reflect"
"strings"
"unsafe"
+
+ "gvisor.dev/gvisor/pkg/sync"
)
// SeqAtomicLoad returns a copy of *ptr, ensuring that the read does not race
diff --git a/pkg/sentry/vfs/file_description_refs.go b/pkg/sentry/vfs/file_description_refs.go
index 3953d2396..bdd7e6554 100644
--- a/pkg/sentry/vfs/file_description_refs.go
+++ b/pkg/sentry/vfs/file_description_refs.go
@@ -1,10 +1,10 @@
package vfs
import (
+ "fmt"
"runtime"
"sync/atomic"
- "fmt"
"gvisor.dev/gvisor/pkg/log"
refs_vfs1 "gvisor.dev/gvisor/pkg/refs"
)
diff --git a/pkg/sentry/vfs/filesystem_refs.go b/pkg/sentry/vfs/filesystem_refs.go
index c6a390430..38a9a986f 100644
--- a/pkg/sentry/vfs/filesystem_refs.go
+++ b/pkg/sentry/vfs/filesystem_refs.go
@@ -1,10 +1,10 @@
package vfs
import (
+ "fmt"
"runtime"
"sync/atomic"
- "fmt"
"gvisor.dev/gvisor/pkg/log"
refs_vfs1 "gvisor.dev/gvisor/pkg/refs"
)
diff --git a/pkg/sentry/vfs/mount_namespace_refs.go b/pkg/sentry/vfs/mount_namespace_refs.go
index ed126cc5e..63285fb8e 100644
--- a/pkg/sentry/vfs/mount_namespace_refs.go
+++ b/pkg/sentry/vfs/mount_namespace_refs.go
@@ -1,10 +1,10 @@
package vfs
import (
+ "fmt"
"runtime"
"sync/atomic"
- "fmt"
"gvisor.dev/gvisor/pkg/log"
refs_vfs1 "gvisor.dev/gvisor/pkg/refs"
)
diff --git a/pkg/tcpip/link/tun/tun_endpoint_refs.go b/pkg/tcpip/link/tun/tun_endpoint_refs.go
index 895a577ce..e0595429c 100644
--- a/pkg/tcpip/link/tun/tun_endpoint_refs.go
+++ b/pkg/tcpip/link/tun/tun_endpoint_refs.go
@@ -1,10 +1,10 @@
package tun
import (
+ "fmt"
"runtime"
"sync/atomic"
- "fmt"
"gvisor.dev/gvisor/pkg/log"
refs_vfs1 "gvisor.dev/gvisor/pkg/refs"
)