diff options
Diffstat (limited to 'pkg/sentry')
-rw-r--r-- | pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go | 336 | ||||
-rw-r--r-- | pkg/sentry/arch/signal.go | 16 | ||||
-rw-r--r-- | pkg/sentry/control/pprof.go | 2 | ||||
-rw-r--r-- | pkg/sentry/fdimport/fdimport.go | 1 | ||||
-rw-r--r-- | pkg/sentry/fs/copy_up.go | 13 | ||||
-rw-r--r-- | pkg/sentry/fs/gofer/attr.go | 2 | ||||
-rw-r--r-- | pkg/sentry/fsimpl/fuse/request_response.go | 1 | ||||
-rw-r--r-- | pkg/sentry/fsimpl/signalfd/signalfd.go | 5 | ||||
-rw-r--r-- | pkg/sentry/kernel/auth/auth_abi_autogen_unsafe.go | 120 | ||||
-rw-r--r-- | pkg/sentry/kernel/kernel_abi_autogen_unsafe.go | 60 | ||||
-rw-r--r-- | pkg/sentry/kernel/ptrace.go | 4 | ||||
-rw-r--r-- | pkg/sentry/kernel/signal.go | 4 | ||||
-rw-r--r-- | pkg/sentry/kernel/signalfd/signalfd.go | 4 | ||||
-rw-r--r-- | pkg/sentry/kernel/task_exit.go | 8 | ||||
-rw-r--r-- | pkg/sentry/kernel/task_signals.go | 16 | ||||
-rw-r--r-- | pkg/sentry/memmap/memmap.go | 2 | ||||
-rw-r--r-- | pkg/sentry/mm/aio_context_state.go | 4 | ||||
-rw-r--r-- | pkg/sentry/mm/mm_state_autogen.go | 30 | ||||
-rw-r--r-- | pkg/sentry/platform/ptrace/subprocess.go | 2 | ||||
-rw-r--r-- | pkg/sentry/syscalls/linux/sys_signal.go | 16 | ||||
-rw-r--r-- | pkg/sentry/syscalls/linux/sys_thread.go | 4 |
21 files changed, 328 insertions, 322 deletions
diff --git a/pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go b/pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go index aac25375e..b8667cdb9 100644 --- a/pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go +++ b/pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go @@ -27,174 +27,6 @@ var _ marshal.Marshallable = (*aarch64Ctx)(nil) var _ marshal.Marshallable = (*linux.SignalSet)(nil) // SizeBytes implements marshal.Marshallable.SizeBytes. -func (s *SignalContext64) SizeBytes() int { - return 32 + - 8*31 + - 1*8 + - (*FpsimdContext)(nil).SizeBytes() + - 1*3568 -} - -// MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (s *SignalContext64) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint64(dst[:8], uint64(s.FaultAddr)) - dst = dst[8:] - for idx := 0; idx < 31; idx++ { - usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Regs[idx])) - dst = dst[8:] - } - usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Sp)) - dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Pc)) - dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Pstate)) - dst = dst[8:] - for idx := 0; idx < 8; idx++ { - dst[0] = byte(s._pad[idx]) - dst = dst[1:] - } - s.Fpsimd64.MarshalBytes(dst[:s.Fpsimd64.SizeBytes()]) - dst = dst[s.Fpsimd64.SizeBytes():] - for idx := 0; idx < 3568; idx++ { - dst[0] = byte(s.Reserved[idx]) - dst = dst[1:] - } -} - -// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (s *SignalContext64) UnmarshalBytes(src []byte) { - s.FaultAddr = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - for idx := 0; idx < 31; idx++ { - s.Regs[idx] = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - } - s.Sp = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - s.Pc = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - s.Pstate = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - for idx := 0; idx < 8; idx++ { - s._pad[idx] = src[0] - src = src[1:] - } - s.Fpsimd64.UnmarshalBytes(src[:s.Fpsimd64.SizeBytes()]) - src = src[s.Fpsimd64.SizeBytes():] - for idx := 0; idx < 3568; idx++ { - s.Reserved[idx] = uint8(src[0]) - src = src[1:] - } -} - -// Packed implements marshal.Marshallable.Packed. -//go:nosplit -func (s *SignalContext64) Packed() bool { - return s.Fpsimd64.Packed() -} - -// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (s *SignalContext64) MarshalUnsafe(dst []byte) { - if s.Fpsimd64.Packed() { - safecopy.CopyIn(dst, unsafe.Pointer(s)) - } else { - // Type SignalContext64 doesn't have a packed layout in memory, fallback to MarshalBytes. - s.MarshalBytes(dst) - } -} - -// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (s *SignalContext64) UnmarshalUnsafe(src []byte) { - if s.Fpsimd64.Packed() { - safecopy.CopyOut(unsafe.Pointer(s), src) - } else { - // Type SignalContext64 doesn't have a packed layout in memory, fallback to UnmarshalBytes. - s.UnmarshalBytes(src) - } -} - -// CopyOutN implements marshal.Marshallable.CopyOutN. -//go:nosplit -func (s *SignalContext64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { - if !s.Fpsimd64.Packed() { - // Type SignalContext64 doesn't have a packed layout in memory, fall back to MarshalBytes. - buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay. - s.MarshalBytes(buf) // escapes: fallback. - return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - } - - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) - hdr.Len = s.SizeBytes() - hdr.Cap = s.SizeBytes() - - length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that s - // must live until the use above. - runtime.KeepAlive(s) // escapes: replaced by intrinsic. - return length, err -} - -// CopyOut implements marshal.Marshallable.CopyOut. -//go:nosplit -func (s *SignalContext64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - return s.CopyOutN(cc, addr, s.SizeBytes()) -} - -// CopyIn implements marshal.Marshallable.CopyIn. -//go:nosplit -func (s *SignalContext64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - if !s.Fpsimd64.Packed() { - // Type SignalContext64 doesn't have a packed layout in memory, fall back to UnmarshalBytes. - buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay. - length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Unmarshal unconditionally. If we had a short copy-in, this results in a - // partially unmarshalled struct. - s.UnmarshalBytes(buf) // escapes: fallback. - return length, err - } - - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) - hdr.Len = s.SizeBytes() - hdr.Cap = s.SizeBytes() - - length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that s - // must live until the use above. - runtime.KeepAlive(s) // escapes: replaced by intrinsic. - return length, err -} - -// WriteTo implements io.WriterTo.WriteTo. -func (s *SignalContext64) WriteTo(writer io.Writer) (int64, error) { - if !s.Fpsimd64.Packed() { - // Type SignalContext64 doesn't have a packed layout in memory, fall back to MarshalBytes. - buf := make([]byte, s.SizeBytes()) - s.MarshalBytes(buf) - length, err := writer.Write(buf) - return int64(length), err - } - - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) - hdr.Len = s.SizeBytes() - hdr.Cap = s.SizeBytes() - - length, err := writer.Write(buf) - // Since we bypassed the compiler's escape analysis, indicate that s - // must live until the use above. - runtime.KeepAlive(s) // escapes: replaced by intrinsic. - return int64(length), err -} - -// SizeBytes implements marshal.Marshallable.SizeBytes. func (a *aarch64Ctx) SizeBytes() int { return 8 } @@ -590,3 +422,171 @@ func (u *UContext64) WriteTo(writer io.Writer) (int64, error) { return int64(length), err } +// SizeBytes implements marshal.Marshallable.SizeBytes. +func (s *SignalContext64) SizeBytes() int { + return 32 + + 8*31 + + 1*8 + + (*FpsimdContext)(nil).SizeBytes() + + 1*3568 +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (s *SignalContext64) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint64(dst[:8], uint64(s.FaultAddr)) + dst = dst[8:] + for idx := 0; idx < 31; idx++ { + usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Regs[idx])) + dst = dst[8:] + } + usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Sp)) + dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Pc)) + dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Pstate)) + dst = dst[8:] + for idx := 0; idx < 8; idx++ { + dst[0] = byte(s._pad[idx]) + dst = dst[1:] + } + s.Fpsimd64.MarshalBytes(dst[:s.Fpsimd64.SizeBytes()]) + dst = dst[s.Fpsimd64.SizeBytes():] + for idx := 0; idx < 3568; idx++ { + dst[0] = byte(s.Reserved[idx]) + dst = dst[1:] + } +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (s *SignalContext64) UnmarshalBytes(src []byte) { + s.FaultAddr = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + for idx := 0; idx < 31; idx++ { + s.Regs[idx] = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + } + s.Sp = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + s.Pc = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + s.Pstate = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + for idx := 0; idx < 8; idx++ { + s._pad[idx] = src[0] + src = src[1:] + } + s.Fpsimd64.UnmarshalBytes(src[:s.Fpsimd64.SizeBytes()]) + src = src[s.Fpsimd64.SizeBytes():] + for idx := 0; idx < 3568; idx++ { + s.Reserved[idx] = uint8(src[0]) + src = src[1:] + } +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (s *SignalContext64) Packed() bool { + return s.Fpsimd64.Packed() +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (s *SignalContext64) MarshalUnsafe(dst []byte) { + if s.Fpsimd64.Packed() { + safecopy.CopyIn(dst, unsafe.Pointer(s)) + } else { + // Type SignalContext64 doesn't have a packed layout in memory, fallback to MarshalBytes. + s.MarshalBytes(dst) + } +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (s *SignalContext64) UnmarshalUnsafe(src []byte) { + if s.Fpsimd64.Packed() { + safecopy.CopyOut(unsafe.Pointer(s), src) + } else { + // Type SignalContext64 doesn't have a packed layout in memory, fallback to UnmarshalBytes. + s.UnmarshalBytes(src) + } +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (s *SignalContext64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { + if !s.Fpsimd64.Packed() { + // Type SignalContext64 doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay. + s.MarshalBytes(buf) // escapes: fallback. + return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) + hdr.Len = s.SizeBytes() + hdr.Cap = s.SizeBytes() + + length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that s + // must live until the use above. + runtime.KeepAlive(s) // escapes: replaced by intrinsic. + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (s *SignalContext64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + return s.CopyOutN(cc, addr, s.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (s *SignalContext64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + if !s.Fpsimd64.Packed() { + // Type SignalContext64 doesn't have a packed layout in memory, fall back to UnmarshalBytes. + buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay. + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Unmarshal unconditionally. If we had a short copy-in, this results in a + // partially unmarshalled struct. + s.UnmarshalBytes(buf) // escapes: fallback. + return length, err + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) + hdr.Len = s.SizeBytes() + hdr.Cap = s.SizeBytes() + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that s + // must live until the use above. + runtime.KeepAlive(s) // escapes: replaced by intrinsic. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (s *SignalContext64) WriteTo(writer io.Writer) (int64, error) { + if !s.Fpsimd64.Packed() { + // Type SignalContext64 doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := make([]byte, s.SizeBytes()) + s.MarshalBytes(buf) + length, err := writer.Write(buf) + return int64(length), err + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) + hdr.Len = s.SizeBytes() + hdr.Cap = s.SizeBytes() + + length, err := writer.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that s + // must live until the use above. + runtime.KeepAlive(s) // escapes: replaced by intrinsic. + return int64(length), err +} + diff --git a/pkg/sentry/arch/signal.go b/pkg/sentry/arch/signal.go index 5138f3bf5..35d2e07c3 100644 --- a/pkg/sentry/arch/signal.go +++ b/pkg/sentry/arch/signal.go @@ -152,23 +152,23 @@ func (s *SignalInfo) FixSignalCodeForUser() { } } -// Pid returns the si_pid field. -func (s *SignalInfo) Pid() int32 { +// PID returns the si_pid field. +func (s *SignalInfo) PID() int32 { return int32(usermem.ByteOrder.Uint32(s.Fields[0:4])) } -// SetPid mutates the si_pid field. -func (s *SignalInfo) SetPid(val int32) { +// SetPID mutates the si_pid field. +func (s *SignalInfo) SetPID(val int32) { usermem.ByteOrder.PutUint32(s.Fields[0:4], uint32(val)) } -// Uid returns the si_uid field. -func (s *SignalInfo) Uid() int32 { +// UID returns the si_uid field. +func (s *SignalInfo) UID() int32 { return int32(usermem.ByteOrder.Uint32(s.Fields[4:8])) } -// SetUid mutates the si_uid field. -func (s *SignalInfo) SetUid(val int32) { +// SetUID mutates the si_uid field. +func (s *SignalInfo) SetUID(val int32) { usermem.ByteOrder.PutUint32(s.Fields[4:8], uint32(val)) } diff --git a/pkg/sentry/control/pprof.go b/pkg/sentry/control/pprof.go index 2bf3c45e1..91b8fb44f 100644 --- a/pkg/sentry/control/pprof.go +++ b/pkg/sentry/control/pprof.go @@ -193,7 +193,7 @@ func (p *Profile) StopTrace(_, _ *struct{}) error { defer p.mu.Unlock() if p.traceFile == nil { - return errors.New("Execution tracing not started") + return errors.New("execution tracing not started") } // Similarly to the case above, if tasks have not ended traces, we will diff --git a/pkg/sentry/fdimport/fdimport.go b/pkg/sentry/fdimport/fdimport.go index 314661475..badd5b073 100644 --- a/pkg/sentry/fdimport/fdimport.go +++ b/pkg/sentry/fdimport/fdimport.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +// Package fdimport provides the Import function. package fdimport import ( diff --git a/pkg/sentry/fs/copy_up.go b/pkg/sentry/fs/copy_up.go index ff2fe6712..8e0aa9019 100644 --- a/pkg/sentry/fs/copy_up.go +++ b/pkg/sentry/fs/copy_up.go @@ -336,7 +336,12 @@ func cleanupUpper(ctx context.Context, parent *Inode, name string, copyUpErr err // copyUpBuffers is a buffer pool for copying file content. The buffer // size is the same used by io.Copy. -var copyUpBuffers = sync.Pool{New: func() interface{} { return make([]byte, 8*usermem.PageSize) }} +var copyUpBuffers = sync.Pool{ + New: func() interface{} { + b := make([]byte, 8*usermem.PageSize) + return &b + }, +} // copyContentsLocked copies the contents of lower to upper. It panics if // less than size bytes can be copied. @@ -361,7 +366,7 @@ func copyContentsLocked(ctx context.Context, upper *Inode, lower *Inode, size in defer lowerFile.DecRef(ctx) // Use a buffer pool to minimize allocations. - buf := copyUpBuffers.Get().([]byte) + buf := copyUpBuffers.Get().(*[]byte) defer copyUpBuffers.Put(buf) // Transfer the contents. @@ -371,7 +376,7 @@ func copyContentsLocked(ctx context.Context, upper *Inode, lower *Inode, size in // optimizations could be self-defeating. So we leave this as simple as possible. var offset int64 for { - nr, err := lowerFile.FileOperations.Read(ctx, lowerFile, usermem.BytesIOSequence(buf), offset) + nr, err := lowerFile.FileOperations.Read(ctx, lowerFile, usermem.BytesIOSequence(*buf), offset) if err != nil && err != io.EOF { return err } @@ -383,7 +388,7 @@ func copyContentsLocked(ctx context.Context, upper *Inode, lower *Inode, size in } return nil } - nw, err := upperFile.FileOperations.Write(ctx, upperFile, usermem.BytesIOSequence(buf[:nr]), offset) + nw, err := upperFile.FileOperations.Write(ctx, upperFile, usermem.BytesIOSequence((*buf)[:nr]), offset) if err != nil { return err } diff --git a/pkg/sentry/fs/gofer/attr.go b/pkg/sentry/fs/gofer/attr.go index d481baf77..e5579095b 100644 --- a/pkg/sentry/fs/gofer/attr.go +++ b/pkg/sentry/fs/gofer/attr.go @@ -117,8 +117,6 @@ func ntype(pattr p9.Attr) fs.InodeType { return fs.BlockDevice case pattr.Mode.IsSocket(): return fs.Socket - case pattr.Mode.IsRegular(): - fallthrough default: return fs.RegularFile } diff --git a/pkg/sentry/fsimpl/fuse/request_response.go b/pkg/sentry/fsimpl/fuse/request_response.go index dc0180812..41d679358 100644 --- a/pkg/sentry/fsimpl/fuse/request_response.go +++ b/pkg/sentry/fsimpl/fuse/request_response.go @@ -70,6 +70,7 @@ func (r *fuseInitRes) UnmarshalBytes(src []byte) { out.MaxPages = uint16(usermem.ByteOrder.Uint16(src[:2])) src = src[2:] } + _ = src // Remove unused warning. } // SizeBytes is the size of the payload of the FUSE_INIT response. diff --git a/pkg/sentry/fsimpl/signalfd/signalfd.go b/pkg/sentry/fsimpl/signalfd/signalfd.go index 10f1452ef..246bd87bc 100644 --- a/pkg/sentry/fsimpl/signalfd/signalfd.go +++ b/pkg/sentry/fsimpl/signalfd/signalfd.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +// Package signalfd provides basic signalfd file implementations. package signalfd import ( @@ -98,8 +99,8 @@ func (sfd *SignalFileDescription) Read(ctx context.Context, dst usermem.IOSequen Signo: uint32(info.Signo), Errno: info.Errno, Code: info.Code, - PID: uint32(info.Pid()), - UID: uint32(info.Uid()), + PID: uint32(info.PID()), + UID: uint32(info.UID()), Status: info.Status(), Overrun: uint32(info.Overrun()), Addr: info.Addr(), diff --git a/pkg/sentry/kernel/auth/auth_abi_autogen_unsafe.go b/pkg/sentry/kernel/auth/auth_abi_autogen_unsafe.go index e7892288b..da6aad4a5 100644 --- a/pkg/sentry/kernel/auth/auth_abi_autogen_unsafe.go +++ b/pkg/sentry/kernel/auth/auth_abi_autogen_unsafe.go @@ -19,179 +19,179 @@ var _ marshal.Marshallable = (*UID)(nil) // SizeBytes implements marshal.Marshallable.SizeBytes. //go:nosplit -func (u *UID) SizeBytes() int { +func (uid *UID) SizeBytes() int { return 4 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (u *UID) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint32(dst[:4], uint32(*u)) +func (uid *UID) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint32(dst[:4], uint32(*uid)) } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (u *UID) UnmarshalBytes(src []byte) { - *u = UID(uint32(usermem.ByteOrder.Uint32(src[:4]))) +func (uid *UID) UnmarshalBytes(src []byte) { + *uid = UID(uint32(usermem.ByteOrder.Uint32(src[:4]))) } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (u *UID) Packed() bool { +func (uid *UID) Packed() bool { // Scalar newtypes are always packed. return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (u *UID) MarshalUnsafe(dst []byte) { - safecopy.CopyIn(dst, unsafe.Pointer(u)) +func (uid *UID) MarshalUnsafe(dst []byte) { + safecopy.CopyIn(dst, unsafe.Pointer(uid)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (u *UID) UnmarshalUnsafe(src []byte) { - safecopy.CopyOut(unsafe.Pointer(u), src) +func (uid *UID) UnmarshalUnsafe(src []byte) { + safecopy.CopyOut(unsafe.Pointer(uid), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (u *UID) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (uid *UID) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u))) - hdr.Len = u.SizeBytes() - hdr.Cap = u.SizeBytes() + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(uid))) + hdr.Len = uid.SizeBytes() + hdr.Cap = uid.SizeBytes() length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that u + // Since we bypassed the compiler's escape analysis, indicate that uid // must live until the use above. - runtime.KeepAlive(u) // escapes: replaced by intrinsic. + runtime.KeepAlive(uid) // escapes: replaced by intrinsic. return length, err } // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (u *UID) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - return u.CopyOutN(cc, addr, u.SizeBytes()) +func (uid *UID) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + return uid.CopyOutN(cc, addr, uid.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (u *UID) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (uid *UID) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u))) - hdr.Len = u.SizeBytes() - hdr.Cap = u.SizeBytes() + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(uid))) + hdr.Len = uid.SizeBytes() + hdr.Cap = uid.SizeBytes() length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that u + // Since we bypassed the compiler's escape analysis, indicate that uid // must live until the use above. - runtime.KeepAlive(u) // escapes: replaced by intrinsic. + runtime.KeepAlive(uid) // escapes: replaced by intrinsic. return length, err } // WriteTo implements io.WriterTo.WriteTo. -func (u *UID) WriteTo(w io.Writer) (int64, error) { +func (uid *UID) WriteTo(w io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u))) - hdr.Len = u.SizeBytes() - hdr.Cap = u.SizeBytes() + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(uid))) + hdr.Len = uid.SizeBytes() + hdr.Cap = uid.SizeBytes() length, err := w.Write(buf) - // Since we bypassed the compiler's escape analysis, indicate that u + // Since we bypassed the compiler's escape analysis, indicate that uid // must live until the use above. - runtime.KeepAlive(u) // escapes: replaced by intrinsic. + runtime.KeepAlive(uid) // escapes: replaced by intrinsic. return int64(length), err } // SizeBytes implements marshal.Marshallable.SizeBytes. //go:nosplit -func (g *GID) SizeBytes() int { +func (gid *GID) SizeBytes() int { return 4 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (g *GID) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint32(dst[:4], uint32(*g)) +func (gid *GID) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint32(dst[:4], uint32(*gid)) } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (g *GID) UnmarshalBytes(src []byte) { - *g = GID(uint32(usermem.ByteOrder.Uint32(src[:4]))) +func (gid *GID) UnmarshalBytes(src []byte) { + *gid = GID(uint32(usermem.ByteOrder.Uint32(src[:4]))) } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (g *GID) Packed() bool { +func (gid *GID) Packed() bool { // Scalar newtypes are always packed. return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (g *GID) MarshalUnsafe(dst []byte) { - safecopy.CopyIn(dst, unsafe.Pointer(g)) +func (gid *GID) MarshalUnsafe(dst []byte) { + safecopy.CopyIn(dst, unsafe.Pointer(gid)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (g *GID) UnmarshalUnsafe(src []byte) { - safecopy.CopyOut(unsafe.Pointer(g), src) +func (gid *GID) UnmarshalUnsafe(src []byte) { + safecopy.CopyOut(unsafe.Pointer(gid), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (g *GID) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (gid *GID) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(g))) - hdr.Len = g.SizeBytes() - hdr.Cap = g.SizeBytes() + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(gid))) + hdr.Len = gid.SizeBytes() + hdr.Cap = gid.SizeBytes() length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that g + // Since we bypassed the compiler's escape analysis, indicate that gid // must live until the use above. - runtime.KeepAlive(g) // escapes: replaced by intrinsic. + runtime.KeepAlive(gid) // escapes: replaced by intrinsic. return length, err } // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (g *GID) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - return g.CopyOutN(cc, addr, g.SizeBytes()) +func (gid *GID) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + return gid.CopyOutN(cc, addr, gid.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (g *GID) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (gid *GID) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(g))) - hdr.Len = g.SizeBytes() - hdr.Cap = g.SizeBytes() + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(gid))) + hdr.Len = gid.SizeBytes() + hdr.Cap = gid.SizeBytes() length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that g + // Since we bypassed the compiler's escape analysis, indicate that gid // must live until the use above. - runtime.KeepAlive(g) // escapes: replaced by intrinsic. + runtime.KeepAlive(gid) // escapes: replaced by intrinsic. return length, err } // WriteTo implements io.WriterTo.WriteTo. -func (g *GID) WriteTo(w io.Writer) (int64, error) { +func (gid *GID) WriteTo(w io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(g))) - hdr.Len = g.SizeBytes() - hdr.Cap = g.SizeBytes() + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(gid))) + hdr.Len = gid.SizeBytes() + hdr.Cap = gid.SizeBytes() length, err := w.Write(buf) - // Since we bypassed the compiler's escape analysis, indicate that g + // Since we bypassed the compiler's escape analysis, indicate that gid // must live until the use above. - runtime.KeepAlive(g) // escapes: replaced by intrinsic. + runtime.KeepAlive(gid) // escapes: replaced by intrinsic. return int64(length), err } diff --git a/pkg/sentry/kernel/kernel_abi_autogen_unsafe.go b/pkg/sentry/kernel/kernel_abi_autogen_unsafe.go index 9ac2a552a..af906909e 100644 --- a/pkg/sentry/kernel/kernel_abi_autogen_unsafe.go +++ b/pkg/sentry/kernel/kernel_abi_autogen_unsafe.go @@ -19,90 +19,90 @@ var _ marshal.Marshallable = (*vdsoParams)(nil) // SizeBytes implements marshal.Marshallable.SizeBytes. //go:nosplit -func (t *ThreadID) SizeBytes() int { +func (tid *ThreadID) SizeBytes() int { return 4 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (t *ThreadID) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint32(dst[:4], uint32(*t)) +func (tid *ThreadID) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint32(dst[:4], uint32(*tid)) } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (t *ThreadID) UnmarshalBytes(src []byte) { - *t = ThreadID(int32(usermem.ByteOrder.Uint32(src[:4]))) +func (tid *ThreadID) UnmarshalBytes(src []byte) { + *tid = ThreadID(int32(usermem.ByteOrder.Uint32(src[:4]))) } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (t *ThreadID) Packed() bool { +func (tid *ThreadID) Packed() bool { // Scalar newtypes are always packed. return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (t *ThreadID) MarshalUnsafe(dst []byte) { - safecopy.CopyIn(dst, unsafe.Pointer(t)) +func (tid *ThreadID) MarshalUnsafe(dst []byte) { + safecopy.CopyIn(dst, unsafe.Pointer(tid)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (t *ThreadID) UnmarshalUnsafe(src []byte) { - safecopy.CopyOut(unsafe.Pointer(t), src) +func (tid *ThreadID) UnmarshalUnsafe(src []byte) { + safecopy.CopyOut(unsafe.Pointer(tid), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (t *ThreadID) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (tid *ThreadID) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t))) - hdr.Len = t.SizeBytes() - hdr.Cap = t.SizeBytes() + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(tid))) + hdr.Len = tid.SizeBytes() + hdr.Cap = tid.SizeBytes() length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that t + // Since we bypassed the compiler's escape analysis, indicate that tid // must live until the use above. - runtime.KeepAlive(t) // escapes: replaced by intrinsic. + runtime.KeepAlive(tid) // escapes: replaced by intrinsic. return length, err } // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (t *ThreadID) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - return t.CopyOutN(cc, addr, t.SizeBytes()) +func (tid *ThreadID) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + return tid.CopyOutN(cc, addr, tid.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (t *ThreadID) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (tid *ThreadID) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t))) - hdr.Len = t.SizeBytes() - hdr.Cap = t.SizeBytes() + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(tid))) + hdr.Len = tid.SizeBytes() + hdr.Cap = tid.SizeBytes() length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that t + // Since we bypassed the compiler's escape analysis, indicate that tid // must live until the use above. - runtime.KeepAlive(t) // escapes: replaced by intrinsic. + runtime.KeepAlive(tid) // escapes: replaced by intrinsic. return length, err } // WriteTo implements io.WriterTo.WriteTo. -func (t *ThreadID) WriteTo(w io.Writer) (int64, error) { +func (tid *ThreadID) WriteTo(w io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t))) - hdr.Len = t.SizeBytes() - hdr.Cap = t.SizeBytes() + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(tid))) + hdr.Len = tid.SizeBytes() + hdr.Cap = tid.SizeBytes() length, err := w.Write(buf) - // Since we bypassed the compiler's escape analysis, indicate that t + // Since we bypassed the compiler's escape analysis, indicate that tid // must live until the use above. - runtime.KeepAlive(t) // escapes: replaced by intrinsic. + runtime.KeepAlive(tid) // escapes: replaced by intrinsic. return int64(length), err } diff --git a/pkg/sentry/kernel/ptrace.go b/pkg/sentry/kernel/ptrace.go index 1abfe2201..cef58a590 100644 --- a/pkg/sentry/kernel/ptrace.go +++ b/pkg/sentry/kernel/ptrace.go @@ -259,8 +259,8 @@ func (t *Task) ptraceTrapLocked(code int32) { Signo: int32(linux.SIGTRAP), Code: code, } - t.ptraceSiginfo.SetPid(int32(t.tg.pidns.tids[t])) - t.ptraceSiginfo.SetUid(int32(t.Credentials().RealKUID.In(t.UserNamespace()).OrOverflow())) + t.ptraceSiginfo.SetPID(int32(t.tg.pidns.tids[t])) + t.ptraceSiginfo.SetUID(int32(t.Credentials().RealKUID.In(t.UserNamespace()).OrOverflow())) if t.beginPtraceStopLocked() { tracer := t.Tracer() tracer.signalStop(t, arch.CLD_TRAPPED, int32(linux.SIGTRAP)) diff --git a/pkg/sentry/kernel/signal.go b/pkg/sentry/kernel/signal.go index e8cce37d0..2488ae7d5 100644 --- a/pkg/sentry/kernel/signal.go +++ b/pkg/sentry/kernel/signal.go @@ -73,7 +73,7 @@ func SignalInfoNoInfo(sig linux.Signal, sender, receiver *Task) *arch.SignalInfo Signo: int32(sig), Code: arch.SignalInfoUser, } - info.SetPid(int32(receiver.tg.pidns.IDOfThreadGroup(sender.tg))) - info.SetUid(int32(sender.Credentials().RealKUID.In(receiver.UserNamespace()).OrOverflow())) + info.SetPID(int32(receiver.tg.pidns.IDOfThreadGroup(sender.tg))) + info.SetUID(int32(sender.Credentials().RealKUID.In(receiver.UserNamespace()).OrOverflow())) return info } diff --git a/pkg/sentry/kernel/signalfd/signalfd.go b/pkg/sentry/kernel/signalfd/signalfd.go index 78f718cfe..884966120 100644 --- a/pkg/sentry/kernel/signalfd/signalfd.go +++ b/pkg/sentry/kernel/signalfd/signalfd.go @@ -106,8 +106,8 @@ func (s *SignalOperations) Read(ctx context.Context, _ *fs.File, dst usermem.IOS Signo: uint32(info.Signo), Errno: info.Errno, Code: info.Code, - PID: uint32(info.Pid()), - UID: uint32(info.Uid()), + PID: uint32(info.PID()), + UID: uint32(info.UID()), Status: info.Status(), Overrun: uint32(info.Overrun()), Addr: info.Addr(), diff --git a/pkg/sentry/kernel/task_exit.go b/pkg/sentry/kernel/task_exit.go index c5137c282..16986244c 100644 --- a/pkg/sentry/kernel/task_exit.go +++ b/pkg/sentry/kernel/task_exit.go @@ -368,8 +368,8 @@ func (t *Task) exitChildren() { Signo: int32(sig), Code: arch.SignalInfoUser, } - siginfo.SetPid(int32(c.tg.pidns.tids[t])) - siginfo.SetUid(int32(t.Credentials().RealKUID.In(c.UserNamespace()).OrOverflow())) + siginfo.SetPID(int32(c.tg.pidns.tids[t])) + siginfo.SetUID(int32(t.Credentials().RealKUID.In(c.UserNamespace()).OrOverflow())) c.tg.signalHandlers.mu.Lock() c.sendSignalLocked(siginfo, true /* group */) c.tg.signalHandlers.mu.Unlock() @@ -698,8 +698,8 @@ func (t *Task) exitNotificationSignal(sig linux.Signal, receiver *Task) *arch.Si info := &arch.SignalInfo{ Signo: int32(sig), } - info.SetPid(int32(receiver.tg.pidns.tids[t])) - info.SetUid(int32(t.Credentials().RealKUID.In(receiver.UserNamespace()).OrOverflow())) + info.SetPID(int32(receiver.tg.pidns.tids[t])) + info.SetUID(int32(t.Credentials().RealKUID.In(receiver.UserNamespace()).OrOverflow())) if t.exitStatus.Signaled() { info.Code = arch.CLD_KILLED info.SetStatus(int32(t.exitStatus.Signo)) diff --git a/pkg/sentry/kernel/task_signals.go b/pkg/sentry/kernel/task_signals.go index 42dd3e278..75af3af79 100644 --- a/pkg/sentry/kernel/task_signals.go +++ b/pkg/sentry/kernel/task_signals.go @@ -914,8 +914,8 @@ func (t *Task) signalStop(target *Task, code int32, status int32) { Signo: int32(linux.SIGCHLD), Code: code, } - sigchld.SetPid(int32(t.tg.pidns.tids[target])) - sigchld.SetUid(int32(target.Credentials().RealKUID.In(t.UserNamespace()).OrOverflow())) + sigchld.SetPID(int32(t.tg.pidns.tids[target])) + sigchld.SetUID(int32(target.Credentials().RealKUID.In(t.UserNamespace()).OrOverflow())) sigchld.SetStatus(status) // TODO(b/72102453): Set utime, stime. t.sendSignalLocked(sigchld, true /* group */) @@ -1022,8 +1022,8 @@ func (*runInterrupt) execute(t *Task) taskRunState { Signo: int32(sig), Code: t.ptraceCode, } - t.ptraceSiginfo.SetPid(int32(t.tg.pidns.tids[t])) - t.ptraceSiginfo.SetUid(int32(t.Credentials().RealKUID.In(t.UserNamespace()).OrOverflow())) + t.ptraceSiginfo.SetPID(int32(t.tg.pidns.tids[t])) + t.ptraceSiginfo.SetUID(int32(t.Credentials().RealKUID.In(t.UserNamespace()).OrOverflow())) } else { t.ptraceCode = int32(sig) t.ptraceSiginfo = nil @@ -1114,11 +1114,11 @@ func (*runInterruptAfterSignalDeliveryStop) execute(t *Task) taskRunState { if parent == nil { // Tracer has detached and t was created by Kernel.CreateProcess(). // Pretend the parent is in an ancestor PID + user namespace. - info.SetPid(0) - info.SetUid(int32(auth.OverflowUID)) + info.SetPID(0) + info.SetUID(int32(auth.OverflowUID)) } else { - info.SetPid(int32(t.tg.pidns.tids[parent])) - info.SetUid(int32(parent.Credentials().RealKUID.In(t.UserNamespace()).OrOverflow())) + info.SetPID(int32(t.tg.pidns.tids[parent])) + info.SetUID(int32(parent.Credentials().RealKUID.In(t.UserNamespace()).OrOverflow())) } } t.tg.signalHandlers.mu.Lock() diff --git a/pkg/sentry/memmap/memmap.go b/pkg/sentry/memmap/memmap.go index 7fd77925f..49e21026e 100644 --- a/pkg/sentry/memmap/memmap.go +++ b/pkg/sentry/memmap/memmap.go @@ -160,7 +160,7 @@ func CheckTranslateResult(required, optional MappableRange, at usermem.AccessTyp // Translations must be contiguous and in increasing order of // Translation.Source. if i > 0 && ts[i-1].Source.End != t.Source.Start { - return fmt.Errorf("Translations %+v and %+v are not contiguous", ts[i-1], t) + return fmt.Errorf("Translation %+v and Translation %+v are not contiguous", ts[i-1], t) } // At least part of each Translation must be required. if t.Source.Intersect(required).Length() == 0 { diff --git a/pkg/sentry/mm/aio_context_state.go b/pkg/sentry/mm/aio_context_state.go index 3dabac1af..e8931922f 100644 --- a/pkg/sentry/mm/aio_context_state.go +++ b/pkg/sentry/mm/aio_context_state.go @@ -15,6 +15,6 @@ package mm // afterLoad is invoked by stateify. -func (a *AIOContext) afterLoad() { - a.requestReady = make(chan struct{}, 1) +func (ctx *AIOContext) afterLoad() { + ctx.requestReady = make(chan struct{}, 1) } diff --git a/pkg/sentry/mm/mm_state_autogen.go b/pkg/sentry/mm/mm_state_autogen.go index a2d098d30..60aee9d2e 100644 --- a/pkg/sentry/mm/mm_state_autogen.go +++ b/pkg/sentry/mm/mm_state_autogen.go @@ -55,11 +55,11 @@ func (i *ioResult) StateLoad(stateSourceObject state.Source) { stateSourceObject.Load(1, &i.ioEntry) } -func (a *AIOContext) StateTypeName() string { +func (ctx *AIOContext) StateTypeName() string { return "pkg/sentry/mm.AIOContext" } -func (a *AIOContext) StateFields() []string { +func (ctx *AIOContext) StateFields() []string { return []string{ "results", "maxOutstanding", @@ -67,23 +67,23 @@ func (a *AIOContext) StateFields() []string { } } -func (a *AIOContext) beforeSave() {} +func (ctx *AIOContext) beforeSave() {} -func (a *AIOContext) StateSave(stateSinkObject state.Sink) { - a.beforeSave() - if !state.IsZeroValue(&a.dead) { - state.Failf("dead is %#v, expected zero", &a.dead) +func (ctx *AIOContext) StateSave(stateSinkObject state.Sink) { + ctx.beforeSave() + if !state.IsZeroValue(&ctx.dead) { + state.Failf("dead is %#v, expected zero", &ctx.dead) } - stateSinkObject.Save(0, &a.results) - stateSinkObject.Save(1, &a.maxOutstanding) - stateSinkObject.Save(2, &a.outstanding) + stateSinkObject.Save(0, &ctx.results) + stateSinkObject.Save(1, &ctx.maxOutstanding) + stateSinkObject.Save(2, &ctx.outstanding) } -func (a *AIOContext) StateLoad(stateSourceObject state.Source) { - stateSourceObject.Load(0, &a.results) - stateSourceObject.Load(1, &a.maxOutstanding) - stateSourceObject.Load(2, &a.outstanding) - stateSourceObject.AfterLoad(a.afterLoad) +func (ctx *AIOContext) StateLoad(stateSourceObject state.Source) { + stateSourceObject.Load(0, &ctx.results) + stateSourceObject.Load(1, &ctx.maxOutstanding) + stateSourceObject.Load(2, &ctx.outstanding) + stateSourceObject.AfterLoad(ctx.afterLoad) } func (m *aioMappable) StateTypeName() string { diff --git a/pkg/sentry/platform/ptrace/subprocess.go b/pkg/sentry/platform/ptrace/subprocess.go index 812ab80ef..aacd7ce70 100644 --- a/pkg/sentry/platform/ptrace/subprocess.go +++ b/pkg/sentry/platform/ptrace/subprocess.go @@ -590,7 +590,7 @@ func (s *subprocess) switchToApp(c *context, ac arch.Context) bool { // facilitate vsyscall emulation. See patchSignalInfo. patchSignalInfo(regs, &c.signalInfo) return false - } else if c.signalInfo.Code <= 0 && c.signalInfo.Pid() == int32(os.Getpid()) { + } else if c.signalInfo.Code <= 0 && c.signalInfo.PID() == int32(os.Getpid()) { // The signal was generated by this process. That means // that it was an interrupt or something else that we // should bail for. Note that we ignore signals diff --git a/pkg/sentry/syscalls/linux/sys_signal.go b/pkg/sentry/syscalls/linux/sys_signal.go index e748d33d8..d639c9bf7 100644 --- a/pkg/sentry/syscalls/linux/sys_signal.go +++ b/pkg/sentry/syscalls/linux/sys_signal.go @@ -88,8 +88,8 @@ func Kill(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallC Signo: int32(sig), Code: arch.SignalInfoUser, } - info.SetPid(int32(target.PIDNamespace().IDOfTask(t))) - info.SetUid(int32(t.Credentials().RealKUID.In(target.UserNamespace()).OrOverflow())) + info.SetPID(int32(target.PIDNamespace().IDOfTask(t))) + info.SetUID(int32(t.Credentials().RealKUID.In(target.UserNamespace()).OrOverflow())) if err := target.SendGroupSignal(info); err != syserror.ESRCH { return 0, nil, err } @@ -127,8 +127,8 @@ func Kill(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallC Signo: int32(sig), Code: arch.SignalInfoUser, } - info.SetPid(int32(tg.PIDNamespace().IDOfTask(t))) - info.SetUid(int32(t.Credentials().RealKUID.In(tg.Leader().UserNamespace()).OrOverflow())) + info.SetPID(int32(tg.PIDNamespace().IDOfTask(t))) + info.SetUID(int32(t.Credentials().RealKUID.In(tg.Leader().UserNamespace()).OrOverflow())) err := tg.SendSignal(info) if err == syserror.ESRCH { // ESRCH is ignored because it means the task @@ -171,8 +171,8 @@ func Kill(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallC Signo: int32(sig), Code: arch.SignalInfoUser, } - info.SetPid(int32(tg.PIDNamespace().IDOfTask(t))) - info.SetUid(int32(t.Credentials().RealKUID.In(tg.Leader().UserNamespace()).OrOverflow())) + info.SetPID(int32(tg.PIDNamespace().IDOfTask(t))) + info.SetUID(int32(t.Credentials().RealKUID.In(tg.Leader().UserNamespace()).OrOverflow())) // See note above regarding ESRCH race above. if err := tg.SendSignal(info); err != syserror.ESRCH { lastErr = err @@ -189,8 +189,8 @@ func tkillSigInfo(sender, receiver *kernel.Task, sig linux.Signal) *arch.SignalI Signo: int32(sig), Code: arch.SignalInfoTkill, } - info.SetPid(int32(receiver.PIDNamespace().IDOfThreadGroup(sender.ThreadGroup()))) - info.SetUid(int32(sender.Credentials().RealKUID.In(receiver.UserNamespace()).OrOverflow())) + info.SetPID(int32(receiver.PIDNamespace().IDOfThreadGroup(sender.ThreadGroup()))) + info.SetUID(int32(sender.Credentials().RealKUID.In(receiver.UserNamespace()).OrOverflow())) return info } diff --git a/pkg/sentry/syscalls/linux/sys_thread.go b/pkg/sentry/syscalls/linux/sys_thread.go index 983f8d396..8e7ac0ffe 100644 --- a/pkg/sentry/syscalls/linux/sys_thread.go +++ b/pkg/sentry/syscalls/linux/sys_thread.go @@ -413,8 +413,8 @@ func Waitid(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal si := arch.SignalInfo{ Signo: int32(linux.SIGCHLD), } - si.SetPid(int32(wr.TID)) - si.SetUid(int32(wr.UID)) + si.SetPID(int32(wr.TID)) + si.SetUID(int32(wr.UID)) // TODO(b/73541790): convert kernel.ExitStatus to functions and make // WaitResult.Status a linux.WaitStatus. s := syscall.WaitStatus(wr.Status) |