summaryrefslogtreecommitdiffhomepage
path: root/pkg/sentry/platform
diff options
context:
space:
mode:
authorAyush Ranjan <ayushranjan@google.com>2021-03-03 10:23:55 -0800
committergVisor bot <gvisor-bot@google.com>2021-03-03 10:25:58 -0800
commita9441aea2780da8c93da1c73da860219f98438de (patch)
tree8b12915756f5bfb926218214cd7bc0b3281605fd /pkg/sentry/platform
parentb8a5420f49a2afd622ec08b5019e1bf537f7da82 (diff)
[op] Replace syscall package usage with golang.org/x/sys/unix in pkg/.
The syscall package has been deprecated in favor of golang.org/x/sys. Note that syscall is still used in the following places: - pkg/sentry/socket/hostinet/stack.go: some netlink related functionalities are not yet available in golang.org/x/sys. - syscall.Stat_t is still used in some places because os.FileInfo.Sys() still returns it and not unix.Stat_t. Updates #214 PiperOrigin-RevId: 360701387
Diffstat (limited to 'pkg/sentry/platform')
-rw-r--r--pkg/sentry/platform/kvm/BUILD1
-rw-r--r--pkg/sentry/platform/kvm/bluepill.go6
-rw-r--r--pkg/sentry/platform/kvm/bluepill_amd64.go5
-rw-r--r--pkg/sentry/platform/kvm/bluepill_amd64_unsafe.go10
-rw-r--r--pkg/sentry/platform/kvm/bluepill_arm64.go5
-rw-r--r--pkg/sentry/platform/kvm/bluepill_arm64_unsafe.go16
-rw-r--r--pkg/sentry/platform/kvm/bluepill_fault.go12
-rw-r--r--pkg/sentry/platform/kvm/bluepill_unsafe.go18
-rw-r--r--pkg/sentry/platform/kvm/filters_amd64.go14
-rw-r--r--pkg/sentry/platform/kvm/filters_arm64.go12
-rw-r--r--pkg/sentry/platform/kvm/kvm.go10
-rw-r--r--pkg/sentry/platform/kvm/kvm_amd64_unsafe.go15
-rw-r--r--pkg/sentry/platform/kvm/kvm_arm64_unsafe.go5
-rw-r--r--pkg/sentry/platform/kvm/kvm_test.go10
-rw-r--r--pkg/sentry/platform/kvm/machine.go18
-rw-r--r--pkg/sentry/platform/kvm/machine_amd64.go32
-rw-r--r--pkg/sentry/platform/kvm/machine_amd64_unsafe.go48
-rw-r--r--pkg/sentry/platform/kvm/machine_arm64_unsafe.go32
-rw-r--r--pkg/sentry/platform/kvm/machine_unsafe.go34
-rw-r--r--pkg/sentry/platform/kvm/physical_map.go10
-rw-r--r--pkg/sentry/platform/kvm/virtual_map_test.go24
-rw-r--r--pkg/sentry/platform/ptrace/filters.go9
-rw-r--r--pkg/sentry/platform/ptrace/ptrace_arm64_unsafe.go18
-rw-r--r--pkg/sentry/platform/ptrace/ptrace_unsafe.go66
-rw-r--r--pkg/sentry/platform/ptrace/stub_unsafe.go18
-rw-r--r--pkg/sentry/platform/ptrace/subprocess.go77
-rw-r--r--pkg/sentry/platform/ptrace/subprocess_amd64.go19
-rw-r--r--pkg/sentry/platform/ptrace/subprocess_arm64.go11
-rw-r--r--pkg/sentry/platform/ptrace/subprocess_linux.go66
-rw-r--r--pkg/sentry/platform/ptrace/subprocess_linux_unsafe.go6
30 files changed, 310 insertions, 317 deletions
diff --git a/pkg/sentry/platform/kvm/BUILD b/pkg/sentry/platform/kvm/BUILD
index b3290917e..4f9e781af 100644
--- a/pkg/sentry/platform/kvm/BUILD
+++ b/pkg/sentry/platform/kvm/BUILD
@@ -82,6 +82,7 @@ go_test(
"//pkg/sentry/platform/kvm/testutil",
"//pkg/sentry/time",
"//pkg/usermem",
+ "@org_golang_x_sys//unix:go_default_library",
],
)
diff --git a/pkg/sentry/platform/kvm/bluepill.go b/pkg/sentry/platform/kvm/bluepill.go
index 2c970162e..fd1131638 100644
--- a/pkg/sentry/platform/kvm/bluepill.go
+++ b/pkg/sentry/platform/kvm/bluepill.go
@@ -17,8 +17,8 @@ package kvm
import (
"fmt"
"reflect"
- "syscall"
+ "golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/ring0"
"gvisor.dev/gvisor/pkg/safecopy"
"gvisor.dev/gvisor/pkg/sentry/arch"
@@ -41,7 +41,7 @@ var (
//
// We use SIGCHLD because it is not masked by the runtime, and
// it will be ignored properly by other parts of the kernel.
- bounceSignal = syscall.SIGCHLD
+ bounceSignal = unix.SIGCHLD
// bounceSignalMask has only bounceSignal set.
bounceSignalMask = uint64(1 << (uint64(bounceSignal) - 1))
@@ -62,7 +62,7 @@ var (
//
//go:nosplit
func redpill() {
- syscall.RawSyscall(^uintptr(0), 0, 0, 0)
+ unix.RawSyscall(^uintptr(0), 0, 0, 0)
}
// dieHandler is called by dieTrampoline.
diff --git a/pkg/sentry/platform/kvm/bluepill_amd64.go b/pkg/sentry/platform/kvm/bluepill_amd64.go
index 83a4766fb..f4b9a5321 100644
--- a/pkg/sentry/platform/kvm/bluepill_amd64.go
+++ b/pkg/sentry/platform/kvm/bluepill_amd64.go
@@ -17,15 +17,14 @@
package kvm
import (
- "syscall"
-
+ "golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/ring0"
"gvisor.dev/gvisor/pkg/sentry/arch"
)
var (
// The action for bluepillSignal is changed by sigaction().
- bluepillSignal = syscall.SIGSEGV
+ bluepillSignal = unix.SIGSEGV
)
// bluepillArchEnter is called during bluepillEnter.
diff --git a/pkg/sentry/platform/kvm/bluepill_amd64_unsafe.go b/pkg/sentry/platform/kvm/bluepill_amd64_unsafe.go
index 0063e947b..198bafdea 100644
--- a/pkg/sentry/platform/kvm/bluepill_amd64_unsafe.go
+++ b/pkg/sentry/platform/kvm/bluepill_amd64_unsafe.go
@@ -17,9 +17,9 @@
package kvm
import (
- "syscall"
"unsafe"
+ "golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/ring0"
"gvisor.dev/gvisor/pkg/sentry/arch"
)
@@ -68,8 +68,8 @@ func getHypercallID(addr uintptr) int {
func bluepillStopGuest(c *vCPU) {
// Interrupt: we must have requested an interrupt
// window; set the interrupt line.
- if _, _, errno := syscall.RawSyscall(
- syscall.SYS_IOCTL,
+ if _, _, errno := unix.RawSyscall(
+ unix.SYS_IOCTL,
uintptr(c.fd),
_KVM_INTERRUPT,
uintptr(unsafe.Pointer(&bounce))); errno != 0 {
@@ -83,8 +83,8 @@ func bluepillStopGuest(c *vCPU) {
//
//go:nosplit
func bluepillSigBus(c *vCPU) {
- if _, _, errno := syscall.RawSyscall( // escapes: no.
- syscall.SYS_IOCTL,
+ if _, _, errno := unix.RawSyscall( // escapes: no.
+ unix.SYS_IOCTL,
uintptr(c.fd),
_KVM_NMI, 0); errno != 0 {
throw("NMI injection failed")
diff --git a/pkg/sentry/platform/kvm/bluepill_arm64.go b/pkg/sentry/platform/kvm/bluepill_arm64.go
index 6846abee9..e26b7da8d 100644
--- a/pkg/sentry/platform/kvm/bluepill_arm64.go
+++ b/pkg/sentry/platform/kvm/bluepill_arm64.go
@@ -17,15 +17,14 @@
package kvm
import (
- "syscall"
-
+ "golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/ring0"
"gvisor.dev/gvisor/pkg/sentry/arch"
)
var (
// The action for bluepillSignal is changed by sigaction().
- bluepillSignal = syscall.SIGILL
+ bluepillSignal = unix.SIGILL
// vcpuSErrBounce is the event of system error for bouncing KVM.
vcpuSErrBounce = kvmVcpuEvents{
diff --git a/pkg/sentry/platform/kvm/bluepill_arm64_unsafe.go b/pkg/sentry/platform/kvm/bluepill_arm64_unsafe.go
index dbbf2a897..07fc4f216 100644
--- a/pkg/sentry/platform/kvm/bluepill_arm64_unsafe.go
+++ b/pkg/sentry/platform/kvm/bluepill_arm64_unsafe.go
@@ -17,9 +17,9 @@
package kvm
import (
- "syscall"
"unsafe"
+ "golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/ring0"
"gvisor.dev/gvisor/pkg/sentry/arch"
)
@@ -80,8 +80,8 @@ func getHypercallID(addr uintptr) int {
//
//go:nosplit
func bluepillStopGuest(c *vCPU) {
- if _, _, errno := syscall.RawSyscall( // escapes: no.
- syscall.SYS_IOCTL,
+ if _, _, errno := unix.RawSyscall( // escapes: no.
+ unix.SYS_IOCTL,
uintptr(c.fd),
_KVM_SET_VCPU_EVENTS,
uintptr(unsafe.Pointer(&vcpuSErrBounce))); errno != 0 {
@@ -94,12 +94,12 @@ func bluepillStopGuest(c *vCPU) {
//go:nosplit
func bluepillSigBus(c *vCPU) {
// Host must support ARM64_HAS_RAS_EXTN.
- if _, _, errno := syscall.RawSyscall( // escapes: no.
- syscall.SYS_IOCTL,
+ if _, _, errno := unix.RawSyscall( // escapes: no.
+ unix.SYS_IOCTL,
uintptr(c.fd),
_KVM_SET_VCPU_EVENTS,
uintptr(unsafe.Pointer(&vcpuSErrNMI))); errno != 0 {
- if errno == syscall.EINVAL {
+ if errno == unix.EINVAL {
throw("No ARM64_HAS_RAS_EXTN feature in host.")
}
throw("nmi sErr injection failed")
@@ -110,8 +110,8 @@ func bluepillSigBus(c *vCPU) {
//
//go:nosplit
func bluepillExtDabt(c *vCPU) {
- if _, _, errno := syscall.RawSyscall( // escapes: no.
- syscall.SYS_IOCTL,
+ if _, _, errno := unix.RawSyscall( // escapes: no.
+ unix.SYS_IOCTL,
uintptr(c.fd),
_KVM_SET_VCPU_EVENTS,
uintptr(unsafe.Pointer(&vcpuExtDabt))); errno != 0 {
diff --git a/pkg/sentry/platform/kvm/bluepill_fault.go b/pkg/sentry/platform/kvm/bluepill_fault.go
index a182e4f22..37c53fa02 100644
--- a/pkg/sentry/platform/kvm/bluepill_fault.go
+++ b/pkg/sentry/platform/kvm/bluepill_fault.go
@@ -16,8 +16,8 @@ package kvm
import (
"sync/atomic"
- "syscall"
+ "golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/usermem"
)
@@ -40,7 +40,7 @@ const (
//
//go:nosplit
func yield() {
- syscall.RawSyscall(syscall.SYS_SCHED_YIELD, 0, 0, 0)
+ unix.RawSyscall(unix.SYS_SCHED_YIELD, 0, 0, 0)
}
// calculateBluepillFault calculates the fault address range.
@@ -112,16 +112,16 @@ func handleBluepillFault(m *machine, physical uintptr, phyRegions []physicalRegi
atomic.StoreUint32(&m.nextSlot, slot)
switch errno {
- case syscall.EEXIST:
+ case unix.EEXIST:
// The region already exists. It's possible that we raced with
// another vCPU here. We just revert nextSlot and return true,
// because this must have been satisfied by some other vCPU.
return virtualStart + (physical - physicalStart), true
- case syscall.EINVAL:
+ case unix.EINVAL:
throw("set memory region failed; out of slots")
- case syscall.ENOMEM:
+ case unix.ENOMEM:
throw("set memory region failed: out of memory")
- case syscall.EFAULT:
+ case unix.EFAULT:
throw("set memory region failed: invalid physical range")
default:
throw("set memory region failed: unknown reason")
diff --git a/pkg/sentry/platform/kvm/bluepill_unsafe.go b/pkg/sentry/platform/kvm/bluepill_unsafe.go
index 55da6dd95..6f87236ad 100644
--- a/pkg/sentry/platform/kvm/bluepill_unsafe.go
+++ b/pkg/sentry/platform/kvm/bluepill_unsafe.go
@@ -21,9 +21,9 @@ package kvm
import (
"sync/atomic"
- "syscall"
"unsafe"
+ "golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/sentry/arch"
)
@@ -102,23 +102,23 @@ func bluepillHandler(context unsafe.Pointer) {
}
for {
- _, _, errno := syscall.RawSyscall(syscall.SYS_IOCTL, uintptr(c.fd), _KVM_RUN, 0) // escapes: no.
+ _, _, errno := unix.RawSyscall(unix.SYS_IOCTL, uintptr(c.fd), _KVM_RUN, 0) // escapes: no.
switch errno {
case 0: // Expected case.
- case syscall.EINTR:
+ case unix.EINTR:
// First, we process whatever pending signal
// interrupted KVM. Since we're in a signal handler
// currently, all signals are masked and the signal
// must have been delivered directly to this thread.
- timeout := syscall.Timespec{}
- sig, _, errno := syscall.RawSyscall6( // escapes: no.
- syscall.SYS_RT_SIGTIMEDWAIT,
+ timeout := unix.Timespec{}
+ sig, _, errno := unix.RawSyscall6( // escapes: no.
+ unix.SYS_RT_SIGTIMEDWAIT,
uintptr(unsafe.Pointer(&bounceSignalMask)),
0, // siginfo.
uintptr(unsafe.Pointer(&timeout)), // timeout.
8, // sigset size.
0, 0)
- if errno == syscall.EAGAIN {
+ if errno == unix.EAGAIN {
continue
}
if errno != 0 {
@@ -140,7 +140,7 @@ func bluepillHandler(context unsafe.Pointer) {
c.runData.requestInterruptWindow = 1
continue // Rerun vCPU.
}
- case syscall.EFAULT:
+ case unix.EFAULT:
// If a fault is not serviceable due to the host
// backing pages having page permissions, instead of an
// MMIO exit we receive EFAULT from the run ioctl. We
@@ -148,7 +148,7 @@ func bluepillHandler(context unsafe.Pointer) {
// mode and have interrupts disabled.
bluepillSigBus(c)
continue // Rerun vCPU.
- case syscall.ENOSYS:
+ case unix.ENOSYS:
bluepillHandleEnosys(c)
continue
default:
diff --git a/pkg/sentry/platform/kvm/filters_amd64.go b/pkg/sentry/platform/kvm/filters_amd64.go
index d3d216aa5..a78be3403 100644
--- a/pkg/sentry/platform/kvm/filters_amd64.go
+++ b/pkg/sentry/platform/kvm/filters_amd64.go
@@ -15,8 +15,6 @@
package kvm
import (
- "syscall"
-
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/abi/linux"
@@ -26,17 +24,17 @@ import (
// SyscallFilters returns syscalls made exclusively by the KVM platform.
func (*KVM) SyscallFilters() seccomp.SyscallRules {
return seccomp.SyscallRules{
- syscall.SYS_ARCH_PRCTL: {},
- syscall.SYS_IOCTL: {},
+ unix.SYS_ARCH_PRCTL: {},
+ unix.SYS_IOCTL: {},
unix.SYS_MEMBARRIER: []seccomp.Rule{
{
seccomp.EqualTo(linux.MEMBARRIER_CMD_PRIVATE_EXPEDITED),
seccomp.EqualTo(0),
},
},
- syscall.SYS_MMAP: {},
- syscall.SYS_RT_SIGSUSPEND: {},
- syscall.SYS_RT_SIGTIMEDWAIT: {},
- 0xffffffffffffffff: {}, // KVM uses syscall -1 to transition to host.
+ unix.SYS_MMAP: {},
+ unix.SYS_RT_SIGSUSPEND: {},
+ unix.SYS_RT_SIGTIMEDWAIT: {},
+ 0xffffffffffffffff: {}, // KVM uses syscall -1 to transition to host.
}
}
diff --git a/pkg/sentry/platform/kvm/filters_arm64.go b/pkg/sentry/platform/kvm/filters_arm64.go
index 21abc2a3d..4e5b91048 100644
--- a/pkg/sentry/platform/kvm/filters_arm64.go
+++ b/pkg/sentry/platform/kvm/filters_arm64.go
@@ -15,8 +15,6 @@
package kvm
import (
- "syscall"
-
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/abi/linux"
@@ -26,16 +24,16 @@ import (
// SyscallFilters returns syscalls made exclusively by the KVM platform.
func (*KVM) SyscallFilters() seccomp.SyscallRules {
return seccomp.SyscallRules{
- syscall.SYS_IOCTL: {},
+ unix.SYS_IOCTL: {},
unix.SYS_MEMBARRIER: []seccomp.Rule{
{
seccomp.EqualTo(linux.MEMBARRIER_CMD_PRIVATE_EXPEDITED),
seccomp.EqualTo(0),
},
},
- syscall.SYS_MMAP: {},
- syscall.SYS_RT_SIGSUSPEND: {},
- syscall.SYS_RT_SIGTIMEDWAIT: {},
- 0xffffffffffffffff: {}, // KVM uses syscall -1 to transition to host.
+ unix.SYS_MMAP: {},
+ unix.SYS_RT_SIGSUSPEND: {},
+ unix.SYS_RT_SIGTIMEDWAIT: {},
+ 0xffffffffffffffff: {}, // KVM uses syscall -1 to transition to host.
}
}
diff --git a/pkg/sentry/platform/kvm/kvm.go b/pkg/sentry/platform/kvm/kvm.go
index 7bdf57436..92c05a9ad 100644
--- a/pkg/sentry/platform/kvm/kvm.go
+++ b/pkg/sentry/platform/kvm/kvm.go
@@ -18,8 +18,8 @@ package kvm
import (
"fmt"
"os"
- "syscall"
+ "golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/ring0"
"gvisor.dev/gvisor/pkg/ring0/pagetables"
"gvisor.dev/gvisor/pkg/sentry/platform"
@@ -77,7 +77,7 @@ var (
// OpenDevice opens the KVM device at /dev/kvm and returns the File.
func OpenDevice() (*os.File, error) {
- f, err := os.OpenFile("/dev/kvm", syscall.O_RDWR, 0)
+ f, err := os.OpenFile("/dev/kvm", unix.O_RDWR, 0)
if err != nil {
return nil, fmt.Errorf("error opening /dev/kvm: %v", err)
}
@@ -99,11 +99,11 @@ func New(deviceFile *os.File) (*KVM, error) {
// Create a new VM fd.
var (
vm uintptr
- errno syscall.Errno
+ errno unix.Errno
)
for {
- vm, _, errno = syscall.Syscall(syscall.SYS_IOCTL, fd, _KVM_CREATE_VM, 0)
- if errno == syscall.EINTR {
+ vm, _, errno = unix.Syscall(unix.SYS_IOCTL, fd, _KVM_CREATE_VM, 0)
+ if errno == unix.EINTR {
continue
}
if errno != 0 {
diff --git a/pkg/sentry/platform/kvm/kvm_amd64_unsafe.go b/pkg/sentry/platform/kvm/kvm_amd64_unsafe.go
index 46c4b9113..0c43d72f4 100644
--- a/pkg/sentry/platform/kvm/kvm_amd64_unsafe.go
+++ b/pkg/sentry/platform/kvm/kvm_amd64_unsafe.go
@@ -18,8 +18,9 @@ package kvm
import (
"fmt"
- "syscall"
"unsafe"
+
+ "golang.org/x/sys/unix"
)
var (
@@ -30,7 +31,7 @@ var (
func updateSystemValues(fd int) error {
// Extract the mmap size.
- sz, _, errno := syscall.RawSyscall(syscall.SYS_IOCTL, uintptr(fd), _KVM_GET_VCPU_MMAP_SIZE, 0)
+ sz, _, errno := unix.RawSyscall(unix.SYS_IOCTL, uintptr(fd), _KVM_GET_VCPU_MMAP_SIZE, 0)
if errno != 0 {
return fmt.Errorf("getting VCPU mmap size: %v", errno)
}
@@ -39,19 +40,19 @@ func updateSystemValues(fd int) error {
runDataSize = int(sz)
// Must do the dance to figure out the number of entries.
- _, _, errno = syscall.RawSyscall(
- syscall.SYS_IOCTL,
+ _, _, errno = unix.RawSyscall(
+ unix.SYS_IOCTL,
uintptr(fd),
_KVM_GET_SUPPORTED_CPUID,
uintptr(unsafe.Pointer(&cpuidSupported)))
- if errno != 0 && errno != syscall.ENOMEM {
+ if errno != 0 && errno != unix.ENOMEM {
// Some other error occurred.
return fmt.Errorf("getting supported CPUID: %v", errno)
}
// The number should now be correct.
- _, _, errno = syscall.RawSyscall(
- syscall.SYS_IOCTL,
+ _, _, errno = unix.RawSyscall(
+ unix.SYS_IOCTL,
uintptr(fd),
_KVM_GET_SUPPORTED_CPUID,
uintptr(unsafe.Pointer(&cpuidSupported)))
diff --git a/pkg/sentry/platform/kvm/kvm_arm64_unsafe.go b/pkg/sentry/platform/kvm/kvm_arm64_unsafe.go
index 48ccf8474..f07a9f34d 100644
--- a/pkg/sentry/platform/kvm/kvm_arm64_unsafe.go
+++ b/pkg/sentry/platform/kvm/kvm_arm64_unsafe.go
@@ -18,7 +18,8 @@ package kvm
import (
"fmt"
- "syscall"
+
+ "golang.org/x/sys/unix"
)
var (
@@ -28,7 +29,7 @@ var (
func updateSystemValues(fd int) error {
// Extract the mmap size.
- sz, _, errno := syscall.RawSyscall(syscall.SYS_IOCTL, uintptr(fd), _KVM_GET_VCPU_MMAP_SIZE, 0)
+ sz, _, errno := unix.RawSyscall(unix.SYS_IOCTL, uintptr(fd), _KVM_GET_VCPU_MMAP_SIZE, 0)
if errno != 0 {
return fmt.Errorf("getting VCPU mmap size: %v", errno)
}
diff --git a/pkg/sentry/platform/kvm/kvm_test.go b/pkg/sentry/platform/kvm/kvm_test.go
index 11ca1f0ea..6243b9a04 100644
--- a/pkg/sentry/platform/kvm/kvm_test.go
+++ b/pkg/sentry/platform/kvm/kvm_test.go
@@ -18,10 +18,10 @@ import (
"math/rand"
"reflect"
"sync/atomic"
- "syscall"
"testing"
"time"
+ "golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/ring0"
"gvisor.dev/gvisor/pkg/ring0/pagetables"
"gvisor.dev/gvisor/pkg/sentry/arch"
@@ -195,7 +195,7 @@ func TestApplicationFault(t *testing.T) {
FullRestore: true,
}, &si); err == platform.ErrContextInterrupt {
return true // Retry.
- } else if err != platform.ErrContextSignal || si.Signo != int32(syscall.SIGSEGV) {
+ } else if err != platform.ErrContextSignal || si.Signo != int32(unix.SIGSEGV) {
t.Errorf("application fault with full restore got (%v, %v), expected (%v, SIGSEGV)", err, si, platform.ErrContextSignal)
}
return false
@@ -209,7 +209,7 @@ func TestApplicationFault(t *testing.T) {
PageTables: pt,
}, &si); err == platform.ErrContextInterrupt {
return true // Retry.
- } else if err != platform.ErrContextSignal || si.Signo != int32(syscall.SIGSEGV) {
+ } else if err != platform.ErrContextSignal || si.Signo != int32(unix.SIGSEGV) {
t.Errorf("application fault with partial restore got (%v, %v), expected (%v, SIGSEGV)", err, si, platform.ErrContextSignal)
}
return false
@@ -251,7 +251,7 @@ func TestRegistersFault(t *testing.T) {
FullRestore: true,
}, &si); err == platform.ErrContextInterrupt {
continue // Retry.
- } else if err != platform.ErrContextSignal || si.Signo != int32(syscall.SIGSEGV) {
+ } else if err != platform.ErrContextSignal || si.Signo != int32(unix.SIGSEGV) {
t.Errorf("application register check with full restore got unexpected error: %v", err)
}
if err := testutil.CheckTestRegs(regs, true); err != nil {
@@ -371,7 +371,7 @@ func TestInvalidate(t *testing.T) {
// IsFault returns true iff the given signal represents a fault.
func IsFault(err error, si *arch.SignalInfo) bool {
- return err == platform.ErrContextSignal && si.Signo == int32(syscall.SIGSEGV)
+ return err == platform.ErrContextSignal && si.Signo == int32(unix.SIGSEGV)
}
func TestEmptyAddressSpace(t *testing.T) {
diff --git a/pkg/sentry/platform/kvm/machine.go b/pkg/sentry/platform/kvm/machine.go
index 1ece1b8d8..0e4cf01e1 100644
--- a/pkg/sentry/platform/kvm/machine.go
+++ b/pkg/sentry/platform/kvm/machine.go
@@ -18,8 +18,8 @@ import (
"fmt"
"runtime"
"sync/atomic"
- "syscall"
+ "golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/atomicbitops"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/procid"
@@ -153,7 +153,7 @@ type dieState struct {
func (m *machine) newVCPU() *vCPU {
// Create the vCPU.
id := int(atomic.AddUint32(&m.nextID, 1) - 1)
- fd, _, errno := syscall.RawSyscall(syscall.SYS_IOCTL, uintptr(m.fd), _KVM_CREATE_VCPU, uintptr(id))
+ fd, _, errno := unix.RawSyscall(unix.SYS_IOCTL, uintptr(m.fd), _KVM_CREATE_VCPU, uintptr(id))
if errno != 0 {
panic(fmt.Sprintf("error creating new vCPU: %v", errno))
}
@@ -193,7 +193,7 @@ func newMachine(vm int) (*machine, error) {
m.available.L = &m.mu
// Pull the maximum vCPUs.
- maxVCPUs, _, errno := syscall.RawSyscall(syscall.SYS_IOCTL, uintptr(m.fd), _KVM_CHECK_EXTENSION, _KVM_CAP_MAX_VCPUS)
+ maxVCPUs, _, errno := unix.RawSyscall(unix.SYS_IOCTL, uintptr(m.fd), _KVM_CHECK_EXTENSION, _KVM_CAP_MAX_VCPUS)
if errno != 0 {
m.maxVCPUs = _KVM_NR_VCPUS
} else {
@@ -205,7 +205,7 @@ func newMachine(vm int) (*machine, error) {
m.kernel.Init(m.maxVCPUs)
// Pull the maximum slots.
- maxSlots, _, errno := syscall.RawSyscall(syscall.SYS_IOCTL, uintptr(m.fd), _KVM_CHECK_EXTENSION, _KVM_CAP_MAX_MEMSLOTS)
+ maxSlots, _, errno := unix.RawSyscall(unix.SYS_IOCTL, uintptr(m.fd), _KVM_CHECK_EXTENSION, _KVM_CAP_MAX_MEMSLOTS)
if errno != 0 {
m.maxSlots = _KVM_NR_MEMSLOTS
} else {
@@ -357,13 +357,13 @@ func (m *machine) Destroy() {
panic(fmt.Sprintf("error unmapping rundata: %v", err))
}
}
- if err := syscall.Close(int(c.fd)); err != nil {
+ if err := unix.Close(int(c.fd)); err != nil {
panic(fmt.Sprintf("error closing vCPU fd: %v", err))
}
}
// vCPUs are gone: teardown machine state.
- if err := syscall.Close(m.fd); err != nil {
+ if err := unix.Close(m.fd); err != nil {
panic(fmt.Sprintf("error closing VM fd: %v", err))
}
}
@@ -546,7 +546,7 @@ func (c *vCPU) NotifyInterrupt() {
}
// pid is used below in bounce.
-var pid = syscall.Getpid()
+var pid = unix.Getpid()
// bounce forces a return to the kernel or to host mode.
//
@@ -588,9 +588,9 @@ func (c *vCPU) bounce(forceGuestExit bool) {
// under memory pressure. Since we already
// marked ourselves as a waiter, we need to
// ensure that a signal is actually delivered.
- if err := syscall.Tgkill(pid, int(atomic.LoadUint64(&c.tid)), bounceSignal); err == nil {
+ if err := unix.Tgkill(pid, int(atomic.LoadUint64(&c.tid)), bounceSignal); err == nil {
break
- } else if err.(syscall.Errno) == syscall.EAGAIN {
+ } else if err.(unix.Errno) == unix.EAGAIN {
continue
} else {
// Nothing else should be returned by tgkill.
diff --git a/pkg/sentry/platform/kvm/machine_amd64.go b/pkg/sentry/platform/kvm/machine_amd64.go
index 59c752d73..6e583baa3 100644
--- a/pkg/sentry/platform/kvm/machine_amd64.go
+++ b/pkg/sentry/platform/kvm/machine_amd64.go
@@ -21,8 +21,8 @@ import (
"math/big"
"reflect"
"runtime/debug"
- "syscall"
+ "golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/cpuid"
"gvisor.dev/gvisor/pkg/ring0"
"gvisor.dev/gvisor/pkg/ring0/pagetables"
@@ -36,8 +36,8 @@ import (
func (m *machine) initArchState() error {
// Set the legacy TSS address. This address is covered by the reserved
// range (up to 4GB). In fact, this is a main reason it exists.
- if _, _, errno := syscall.RawSyscall(
- syscall.SYS_IOCTL,
+ if _, _, errno := unix.RawSyscall(
+ unix.SYS_IOCTL,
uintptr(m.fd),
_KVM_SET_TSS_ADDR,
uintptr(reservedMemory-(3*usermem.PageSize))); errno != 0 {
@@ -297,13 +297,13 @@ func (c *vCPU) fault(signal int32, info *arch.SignalInfo) (usermem.AccessType, e
func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts, info *arch.SignalInfo) (usermem.AccessType, error) {
// Check for canonical addresses.
if regs := switchOpts.Registers; !ring0.IsCanonical(regs.Rip) {
- return nonCanonical(regs.Rip, int32(syscall.SIGSEGV), info)
+ return nonCanonical(regs.Rip, int32(unix.SIGSEGV), info)
} else if !ring0.IsCanonical(regs.Rsp) {
- return nonCanonical(regs.Rsp, int32(syscall.SIGBUS), info)
+ return nonCanonical(regs.Rsp, int32(unix.SIGBUS), info)
} else if !ring0.IsCanonical(regs.Fs_base) {
- return nonCanonical(regs.Fs_base, int32(syscall.SIGBUS), info)
+ return nonCanonical(regs.Fs_base, int32(unix.SIGBUS), info)
} else if !ring0.IsCanonical(regs.Gs_base) {
- return nonCanonical(regs.Gs_base, int32(syscall.SIGBUS), info)
+ return nonCanonical(regs.Gs_base, int32(unix.SIGBUS), info)
}
// Assign PCIDs.
@@ -332,11 +332,11 @@ func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts, info *arch.SignalInfo)
return usermem.NoAccess, nil
case ring0.PageFault:
- return c.fault(int32(syscall.SIGSEGV), info)
+ return c.fault(int32(unix.SIGSEGV), info)
case ring0.Debug, ring0.Breakpoint:
*info = arch.SignalInfo{
- Signo: int32(syscall.SIGTRAP),
+ Signo: int32(unix.SIGTRAP),
Code: 1, // TRAP_BRKPT (breakpoint).
}
info.SetAddr(switchOpts.Registers.Rip) // Include address.
@@ -348,7 +348,7 @@ func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts, info *arch.SignalInfo)
ring0.InvalidTSS,
ring0.StackSegmentFault:
*info = arch.SignalInfo{
- Signo: int32(syscall.SIGSEGV),
+ Signo: int32(unix.SIGSEGV),
Code: arch.SignalInfoKernel,
}
info.SetAddr(switchOpts.Registers.Rip) // Include address.
@@ -362,7 +362,7 @@ func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts, info *arch.SignalInfo)
case ring0.InvalidOpcode:
*info = arch.SignalInfo{
- Signo: int32(syscall.SIGILL),
+ Signo: int32(unix.SIGILL),
Code: 1, // ILL_ILLOPC (illegal opcode).
}
info.SetAddr(switchOpts.Registers.Rip) // Include address.
@@ -370,7 +370,7 @@ func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts, info *arch.SignalInfo)
case ring0.DivideByZero:
*info = arch.SignalInfo{
- Signo: int32(syscall.SIGFPE),
+ Signo: int32(unix.SIGFPE),
Code: 1, // FPE_INTDIV (divide by zero).
}
info.SetAddr(switchOpts.Registers.Rip) // Include address.
@@ -378,7 +378,7 @@ func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts, info *arch.SignalInfo)
case ring0.Overflow:
*info = arch.SignalInfo{
- Signo: int32(syscall.SIGFPE),
+ Signo: int32(unix.SIGFPE),
Code: 2, // FPE_INTOVF (integer overflow).
}
info.SetAddr(switchOpts.Registers.Rip) // Include address.
@@ -387,7 +387,7 @@ func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts, info *arch.SignalInfo)
case ring0.X87FloatingPointException,
ring0.SIMDFloatingPointException:
*info = arch.SignalInfo{
- Signo: int32(syscall.SIGFPE),
+ Signo: int32(unix.SIGFPE),
Code: 7, // FPE_FLTINV (invalid operation).
}
info.SetAddr(switchOpts.Registers.Rip) // Include address.
@@ -398,7 +398,7 @@ func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts, info *arch.SignalInfo)
case ring0.AlignmentCheck:
*info = arch.SignalInfo{
- Signo: int32(syscall.SIGBUS),
+ Signo: int32(unix.SIGBUS),
Code: 2, // BUS_ADRERR (physical address does not exist).
}
return usermem.NoAccess, platform.ErrContextSignal
@@ -409,7 +409,7 @@ func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts, info *arch.SignalInfo)
// really not. This could happen, e.g. if some file is
// truncated (and would generate a SIGBUS) and we map it
// directly into the instance.
- return c.fault(int32(syscall.SIGBUS), info)
+ return c.fault(int32(unix.SIGBUS), info)
case ring0.DeviceNotAvailable,
ring0.DoubleFault,
diff --git a/pkg/sentry/platform/kvm/machine_amd64_unsafe.go b/pkg/sentry/platform/kvm/machine_amd64_unsafe.go
index b430f92c6..83bcc7406 100644
--- a/pkg/sentry/platform/kvm/machine_amd64_unsafe.go
+++ b/pkg/sentry/platform/kvm/machine_amd64_unsafe.go
@@ -19,9 +19,9 @@ package kvm
import (
"fmt"
"sync/atomic"
- "syscall"
"unsafe"
+ "golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/abi/linux"
)
@@ -31,15 +31,15 @@ import (
//
//go:nosplit
func (c *vCPU) loadSegments(tid uint64) {
- if _, _, errno := syscall.RawSyscall(
- syscall.SYS_ARCH_PRCTL,
+ if _, _, errno := unix.RawSyscall(
+ unix.SYS_ARCH_PRCTL,
linux.ARCH_GET_FS,
uintptr(unsafe.Pointer(&c.CPU.Registers().Fs_base)),
0); errno != 0 {
throw("getting FS segment")
}
- if _, _, errno := syscall.RawSyscall(
- syscall.SYS_ARCH_PRCTL,
+ if _, _, errno := unix.RawSyscall(
+ unix.SYS_ARCH_PRCTL,
linux.ARCH_GET_GS,
uintptr(unsafe.Pointer(&c.CPU.Registers().Gs_base)),
0); errno != 0 {
@@ -50,8 +50,8 @@ func (c *vCPU) loadSegments(tid uint64) {
// setCPUID sets the CPUID to be used by the guest.
func (c *vCPU) setCPUID() error {
- if _, _, errno := syscall.RawSyscall(
- syscall.SYS_IOCTL,
+ if _, _, errno := unix.RawSyscall(
+ unix.SYS_IOCTL,
uintptr(c.fd),
_KVM_SET_CPUID2,
uintptr(unsafe.Pointer(&cpuidSupported))); errno != 0 {
@@ -64,8 +64,8 @@ func (c *vCPU) setCPUID() error {
//
// If mustSucceed is true, then this function panics on error.
func (c *vCPU) getTSCFreq() (uintptr, error) {
- rawFreq, _, errno := syscall.RawSyscall(
- syscall.SYS_IOCTL,
+ rawFreq, _, errno := unix.RawSyscall(
+ unix.SYS_IOCTL,
uintptr(c.fd),
_KVM_GET_TSC_KHZ,
0 /* ignored */)
@@ -77,8 +77,8 @@ func (c *vCPU) getTSCFreq() (uintptr, error) {
// setTSCFreq sets the TSC frequency.
func (c *vCPU) setTSCFreq(freq uintptr) error {
- if _, _, errno := syscall.RawSyscall(
- syscall.SYS_IOCTL,
+ if _, _, errno := unix.RawSyscall(
+ unix.SYS_IOCTL,
uintptr(c.fd),
_KVM_SET_TSC_KHZ,
freq /* khz */); errno != 0 {
@@ -95,8 +95,8 @@ func (c *vCPU) setTSC(value uint64) error {
}
registers.entries[0].index = _MSR_IA32_TSC
registers.entries[0].data = value
- if _, _, errno := syscall.RawSyscall(
- syscall.SYS_IOCTL,
+ if _, _, errno := unix.RawSyscall(
+ unix.SYS_IOCTL,
uintptr(c.fd),
_KVM_SET_MSRS,
uintptr(unsafe.Pointer(&registers))); errno != 0 {
@@ -108,9 +108,9 @@ func (c *vCPU) setTSC(value uint64) error {
// setUserRegisters sets user registers in the vCPU.
//
//go:nosplit
-func (c *vCPU) setUserRegisters(uregs *userRegs) syscall.Errno {
- if _, _, errno := syscall.RawSyscall(
- syscall.SYS_IOCTL,
+func (c *vCPU) setUserRegisters(uregs *userRegs) unix.Errno {
+ if _, _, errno := unix.RawSyscall(
+ unix.SYS_IOCTL,
uintptr(c.fd),
_KVM_SET_REGS,
uintptr(unsafe.Pointer(uregs))); errno != 0 {
@@ -124,9 +124,9 @@ func (c *vCPU) setUserRegisters(uregs *userRegs) syscall.Errno {
// This is safe to call from a nosplit context.
//
//go:nosplit
-func (c *vCPU) getUserRegisters(uregs *userRegs) syscall.Errno {
- if _, _, errno := syscall.RawSyscall( // escapes: no.
- syscall.SYS_IOCTL,
+func (c *vCPU) getUserRegisters(uregs *userRegs) unix.Errno {
+ if _, _, errno := unix.RawSyscall( // escapes: no.
+ unix.SYS_IOCTL,
uintptr(c.fd),
_KVM_GET_REGS,
uintptr(unsafe.Pointer(uregs))); errno != 0 {
@@ -137,8 +137,8 @@ func (c *vCPU) getUserRegisters(uregs *userRegs) syscall.Errno {
// setSystemRegisters sets system registers.
func (c *vCPU) setSystemRegisters(sregs *systemRegs) error {
- if _, _, errno := syscall.RawSyscall(
- syscall.SYS_IOCTL,
+ if _, _, errno := unix.RawSyscall(
+ unix.SYS_IOCTL,
uintptr(c.fd),
_KVM_SET_SREGS,
uintptr(unsafe.Pointer(sregs))); errno != 0 {
@@ -150,9 +150,9 @@ func (c *vCPU) setSystemRegisters(sregs *systemRegs) error {
// getSystemRegisters sets system registers.
//
//go:nosplit
-func (c *vCPU) getSystemRegisters(sregs *systemRegs) syscall.Errno {
- if _, _, errno := syscall.RawSyscall(
- syscall.SYS_IOCTL,
+func (c *vCPU) getSystemRegisters(sregs *systemRegs) unix.Errno {
+ if _, _, errno := unix.RawSyscall(
+ unix.SYS_IOCTL,
uintptr(c.fd),
_KVM_GET_SREGS,
uintptr(unsafe.Pointer(sregs))); errno != 0 {
diff --git a/pkg/sentry/platform/kvm/machine_arm64_unsafe.go b/pkg/sentry/platform/kvm/machine_arm64_unsafe.go
index 78cbd9701..059aa43d0 100644
--- a/pkg/sentry/platform/kvm/machine_arm64_unsafe.go
+++ b/pkg/sentry/platform/kvm/machine_arm64_unsafe.go
@@ -20,9 +20,9 @@ import (
"fmt"
"reflect"
"sync/atomic"
- "syscall"
"unsafe"
+ "golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/ring0"
"gvisor.dev/gvisor/pkg/ring0/pagetables"
"gvisor.dev/gvisor/pkg/sentry/arch"
@@ -39,8 +39,8 @@ var vcpuInit kvmVcpuInit
// initArchState initializes architecture-specific state.
func (m *machine) initArchState() error {
- if _, _, errno := syscall.RawSyscall(
- syscall.SYS_IOCTL,
+ if _, _, errno := unix.RawSyscall(
+ unix.SYS_IOCTL,
uintptr(m.fd),
_KVM_ARM_PREFERRED_TARGET,
uintptr(unsafe.Pointer(&vcpuInit))); errno != 0 {
@@ -62,8 +62,8 @@ func (c *vCPU) initArchState() error {
regGet.addr = uint64(reflect.ValueOf(&dataGet).Pointer())
vcpuInit.features[0] |= (1 << _KVM_ARM_VCPU_PSCI_0_2)
- if _, _, errno := syscall.RawSyscall(
- syscall.SYS_IOCTL,
+ if _, _, errno := unix.RawSyscall(
+ unix.SYS_IOCTL,
uintptr(c.fd),
_KVM_ARM_VCPU_INIT,
uintptr(unsafe.Pointer(&vcpuInit))); errno != 0 {
@@ -186,8 +186,8 @@ func (c *vCPU) loadSegments(tid uint64) {
}
func (c *vCPU) setOneRegister(reg *kvmOneReg) error {
- if _, _, errno := syscall.RawSyscall(
- syscall.SYS_IOCTL,
+ if _, _, errno := unix.RawSyscall(
+ unix.SYS_IOCTL,
uintptr(c.fd),
_KVM_SET_ONE_REG,
uintptr(unsafe.Pointer(reg))); errno != 0 {
@@ -197,8 +197,8 @@ func (c *vCPU) setOneRegister(reg *kvmOneReg) error {
}
func (c *vCPU) getOneRegister(reg *kvmOneReg) error {
- if _, _, errno := syscall.RawSyscall(
- syscall.SYS_IOCTL,
+ if _, _, errno := unix.RawSyscall(
+ unix.SYS_IOCTL,
uintptr(c.fd),
_KVM_GET_ONE_REG,
uintptr(unsafe.Pointer(reg))); errno != 0 {
@@ -211,9 +211,9 @@ func (c *vCPU) getOneRegister(reg *kvmOneReg) error {
func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts, info *arch.SignalInfo) (usermem.AccessType, error) {
// Check for canonical addresses.
if regs := switchOpts.Registers; !ring0.IsCanonical(regs.Pc) {
- return nonCanonical(regs.Pc, int32(syscall.SIGSEGV), info)
+ return nonCanonical(regs.Pc, int32(unix.SIGSEGV), info)
} else if !ring0.IsCanonical(regs.Sp) {
- return nonCanonical(regs.Sp, int32(syscall.SIGSEGV), info)
+ return nonCanonical(regs.Sp, int32(unix.SIGSEGV), info)
}
// Assign PCIDs.
@@ -247,23 +247,23 @@ func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts, info *arch.SignalInfo)
// Fast path: system call executed.
return usermem.NoAccess, nil
case ring0.PageFault:
- return c.fault(int32(syscall.SIGSEGV), info)
+ return c.fault(int32(unix.SIGSEGV), info)
case ring0.El0ErrNMI:
- return c.fault(int32(syscall.SIGBUS), info)
+ return c.fault(int32(unix.SIGBUS), info)
case ring0.Vector(bounce): // ring0.VirtualizationException.
return usermem.NoAccess, platform.ErrContextInterrupt
case ring0.El0SyncUndef:
- return c.fault(int32(syscall.SIGILL), info)
+ return c.fault(int32(unix.SIGILL), info)
case ring0.El0SyncDbg:
*info = arch.SignalInfo{
- Signo: int32(syscall.SIGTRAP),
+ Signo: int32(unix.SIGTRAP),
Code: 1, // TRAP_BRKPT (breakpoint).
}
info.SetAddr(switchOpts.Registers.Pc) // Include address.
return usermem.AccessType{}, platform.ErrContextSignal
case ring0.El0SyncSpPc:
*info = arch.SignalInfo{
- Signo: int32(syscall.SIGBUS),
+ Signo: int32(unix.SIGBUS),
Code: 2, // BUS_ADRERR (physical address does not exist).
}
return usermem.NoAccess, platform.ErrContextSignal
diff --git a/pkg/sentry/platform/kvm/machine_unsafe.go b/pkg/sentry/platform/kvm/machine_unsafe.go
index c322551d2..49e1c7136 100644
--- a/pkg/sentry/platform/kvm/machine_unsafe.go
+++ b/pkg/sentry/platform/kvm/machine_unsafe.go
@@ -23,9 +23,9 @@ import (
"fmt"
"math"
"sync/atomic"
- "syscall"
"unsafe"
+ "golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/abi/linux"
)
@@ -41,7 +41,7 @@ func exitsyscall()
// directly (instead of wrapping in an error) to avoid allocations.
//
//go:nosplit
-func (m *machine) setMemoryRegion(slot int, physical, length, virtual uintptr, flags uint32) syscall.Errno {
+func (m *machine) setMemoryRegion(slot int, physical, length, virtual uintptr, flags uint32) unix.Errno {
userRegion := userMemoryRegion{
slot: uint32(slot),
flags: uint32(flags),
@@ -51,8 +51,8 @@ func (m *machine) setMemoryRegion(slot int, physical, length, virtual uintptr, f
}
// Set the region.
- _, _, errno := syscall.RawSyscall(
- syscall.SYS_IOCTL,
+ _, _, errno := unix.RawSyscall(
+ unix.SYS_IOCTL,
uintptr(m.fd),
_KVM_SET_USER_MEMORY_REGION,
uintptr(unsafe.Pointer(&userRegion)))
@@ -61,12 +61,12 @@ func (m *machine) setMemoryRegion(slot int, physical, length, virtual uintptr, f
// mapRunData maps the vCPU run data.
func mapRunData(fd int) (*runData, error) {
- r, _, errno := syscall.RawSyscall6(
- syscall.SYS_MMAP,
+ r, _, errno := unix.RawSyscall6(
+ unix.SYS_MMAP,
0,
uintptr(runDataSize),
- syscall.PROT_READ|syscall.PROT_WRITE,
- syscall.MAP_SHARED,
+ unix.PROT_READ|unix.PROT_WRITE,
+ unix.MAP_SHARED,
uintptr(fd),
0)
if errno != 0 {
@@ -77,8 +77,8 @@ func mapRunData(fd int) (*runData, error) {
// unmapRunData unmaps the vCPU run data.
func unmapRunData(r *runData) error {
- if _, _, errno := syscall.RawSyscall(
- syscall.SYS_MUNMAP,
+ if _, _, errno := unix.RawSyscall(
+ unix.SYS_MUNMAP,
uintptr(unsafe.Pointer(r)),
uintptr(runDataSize),
0); errno != 0 {
@@ -115,8 +115,8 @@ func (a *atomicAddressSpace) get() *addressSpace {
//
//go:nosplit
func (c *vCPU) notify() {
- _, _, errno := syscall.RawSyscall6( // escapes: no.
- syscall.SYS_FUTEX,
+ _, _, errno := unix.RawSyscall6( // escapes: no.
+ unix.SYS_FUTEX,
uintptr(unsafe.Pointer(&c.state)),
linux.FUTEX_WAKE|linux.FUTEX_PRIVATE_FLAG,
math.MaxInt32, // Number of waiters.
@@ -133,13 +133,13 @@ func (c *vCPU) notify() {
//
// This panics on error.
func (c *vCPU) waitUntilNot(state uint32) {
- _, _, errno := syscall.Syscall6(
- syscall.SYS_FUTEX,
+ _, _, errno := unix.Syscall6(
+ unix.SYS_FUTEX,
uintptr(unsafe.Pointer(&c.state)),
linux.FUTEX_WAIT|linux.FUTEX_PRIVATE_FLAG,
uintptr(state),
0, 0, 0)
- if errno != 0 && errno != syscall.EINTR && errno != syscall.EAGAIN {
+ if errno != 0 && errno != unix.EINTR && errno != unix.EAGAIN {
panic("futex wait error")
}
}
@@ -159,8 +159,8 @@ func (c *vCPU) setSignalMask() error {
data.length = 8 // Fixed sigset size.
data.mask1 = ^uint32(bounceSignalMask & 0xffffffff)
data.mask2 = ^uint32(bounceSignalMask >> 32)
- if _, _, errno := syscall.RawSyscall(
- syscall.SYS_IOCTL,
+ if _, _, errno := unix.RawSyscall(
+ unix.SYS_IOCTL,
uintptr(c.fd),
_KVM_SET_SIGNAL_MASK,
uintptr(unsafe.Pointer(&data))); errno != 0 {
diff --git a/pkg/sentry/platform/kvm/physical_map.go b/pkg/sentry/platform/kvm/physical_map.go
index 8bdec93ae..7376d8b8d 100644
--- a/pkg/sentry/platform/kvm/physical_map.go
+++ b/pkg/sentry/platform/kvm/physical_map.go
@@ -17,8 +17,8 @@ package kvm
import (
"fmt"
"sort"
- "syscall"
+ "golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/ring0"
"gvisor.dev/gvisor/pkg/usermem"
@@ -90,12 +90,12 @@ func fillAddressSpace() (excludedRegions []region) {
required := uintptr(requiredAddr)
current := required // Attempted mmap size.
for filled := uintptr(0); filled < required && current > 0; {
- addr, _, errno := syscall.RawSyscall6(
- syscall.SYS_MMAP,
+ addr, _, errno := unix.RawSyscall6(
+ unix.SYS_MMAP,
0, // Suggested address.
current,
- syscall.PROT_NONE,
- syscall.MAP_ANONYMOUS|syscall.MAP_PRIVATE|syscall.MAP_NORESERVE,
+ unix.PROT_NONE,
+ unix.MAP_ANONYMOUS|unix.MAP_PRIVATE|unix.MAP_NORESERVE,
0, 0)
if errno != 0 {
// Attempt half the size; overflow not possible.
diff --git a/pkg/sentry/platform/kvm/virtual_map_test.go b/pkg/sentry/platform/kvm/virtual_map_test.go
index 327e2be4f..9b4545fdd 100644
--- a/pkg/sentry/platform/kvm/virtual_map_test.go
+++ b/pkg/sentry/platform/kvm/virtual_map_test.go
@@ -15,9 +15,9 @@
package kvm
import (
- "syscall"
"testing"
+ "golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/usermem"
)
@@ -45,31 +45,31 @@ func TestParseMaps(t *testing.T) {
}
// MMap a new page.
- addr, _, errno := syscall.RawSyscall6(
- syscall.SYS_MMAP, 0, usermem.PageSize,
- syscall.PROT_READ|syscall.PROT_WRITE,
- syscall.MAP_ANONYMOUS|syscall.MAP_PRIVATE, 0, 0)
+ addr, _, errno := unix.RawSyscall6(
+ unix.SYS_MMAP, 0, usermem.PageSize,
+ unix.PROT_READ|unix.PROT_WRITE,
+ unix.MAP_ANONYMOUS|unix.MAP_PRIVATE, 0, 0)
if errno != 0 {
t.Fatalf("unexpected map error: %v", errno)
}
// Re-parse maps.
if err := applyVirtualRegions(c.Containing(addr)); err != nil {
- syscall.RawSyscall(syscall.SYS_MUNMAP, addr, usermem.PageSize, 0)
+ unix.RawSyscall(unix.SYS_MUNMAP, addr, usermem.PageSize, 0)
t.Fatalf("unexpected error: %v", err)
}
// Assert that it now does contain the region.
if !c.ok {
- syscall.RawSyscall(syscall.SYS_MUNMAP, addr, usermem.PageSize, 0)
+ unix.RawSyscall(unix.SYS_MUNMAP, addr, usermem.PageSize, 0)
t.Fatalf("updated map does not contain 0x%08x, expected true", addr)
}
// Map the region as PROT_NONE.
- newAddr, _, errno := syscall.RawSyscall6(
- syscall.SYS_MMAP, addr, usermem.PageSize,
- syscall.PROT_NONE,
- syscall.MAP_ANONYMOUS|syscall.MAP_FIXED|syscall.MAP_PRIVATE, 0, 0)
+ newAddr, _, errno := unix.RawSyscall6(
+ unix.SYS_MMAP, addr, usermem.PageSize,
+ unix.PROT_NONE,
+ unix.MAP_ANONYMOUS|unix.MAP_FIXED|unix.MAP_PRIVATE, 0, 0)
if errno != 0 {
t.Fatalf("unexpected map error: %v", errno)
}
@@ -89,5 +89,5 @@ func TestParseMaps(t *testing.T) {
}
// Unmap the region.
- syscall.RawSyscall(syscall.SYS_MUNMAP, addr, usermem.PageSize, 0)
+ unix.RawSyscall(unix.SYS_MUNMAP, addr, usermem.PageSize, 0)
}
diff --git a/pkg/sentry/platform/ptrace/filters.go b/pkg/sentry/platform/ptrace/filters.go
index 20fc62acb..ba4503b0d 100644
--- a/pkg/sentry/platform/ptrace/filters.go
+++ b/pkg/sentry/platform/ptrace/filters.go
@@ -15,16 +15,15 @@
package ptrace
import (
- "syscall"
-
+ "golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/seccomp"
)
// SyscallFilters returns syscalls made exclusively by the ptrace platform.
func (*PTrace) SyscallFilters() seccomp.SyscallRules {
return seccomp.SyscallRules{
- syscall.SYS_PTRACE: {},
- syscall.SYS_TGKILL: {},
- syscall.SYS_WAIT4: {},
+ unix.SYS_PTRACE: {},
+ unix.SYS_TGKILL: {},
+ unix.SYS_WAIT4: {},
}
}
diff --git a/pkg/sentry/platform/ptrace/ptrace_arm64_unsafe.go b/pkg/sentry/platform/ptrace/ptrace_arm64_unsafe.go
index 32b8a6be9..4f7fe993a 100644
--- a/pkg/sentry/platform/ptrace/ptrace_arm64_unsafe.go
+++ b/pkg/sentry/platform/ptrace/ptrace_arm64_unsafe.go
@@ -17,21 +17,21 @@
package ptrace
import (
- "syscall"
"unsafe"
+ "golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/abi/linux"
)
// getTLS gets the thread local storage register.
func (t *thread) getTLS(tls *uint64) error {
- iovec := syscall.Iovec{
+ iovec := unix.Iovec{
Base: (*byte)(unsafe.Pointer(tls)),
Len: uint64(unsafe.Sizeof(*tls)),
}
- _, _, errno := syscall.RawSyscall6(
- syscall.SYS_PTRACE,
- syscall.PTRACE_GETREGSET,
+ _, _, errno := unix.RawSyscall6(
+ unix.SYS_PTRACE,
+ unix.PTRACE_GETREGSET,
uintptr(t.tid),
linux.NT_ARM_TLS,
uintptr(unsafe.Pointer(&iovec)),
@@ -44,13 +44,13 @@ func (t *thread) getTLS(tls *uint64) error {
// setTLS sets the thread local storage register.
func (t *thread) setTLS(tls *uint64) error {
- iovec := syscall.Iovec{
+ iovec := unix.Iovec{
Base: (*byte)(unsafe.Pointer(tls)),
Len: uint64(unsafe.Sizeof(*tls)),
}
- _, _, errno := syscall.RawSyscall6(
- syscall.SYS_PTRACE,
- syscall.PTRACE_SETREGSET,
+ _, _, errno := unix.RawSyscall6(
+ unix.SYS_PTRACE,
+ unix.PTRACE_SETREGSET,
uintptr(t.tid),
linux.NT_ARM_TLS,
uintptr(unsafe.Pointer(&iovec)),
diff --git a/pkg/sentry/platform/ptrace/ptrace_unsafe.go b/pkg/sentry/platform/ptrace/ptrace_unsafe.go
index 8b72d24e8..2c21f946e 100644
--- a/pkg/sentry/platform/ptrace/ptrace_unsafe.go
+++ b/pkg/sentry/platform/ptrace/ptrace_unsafe.go
@@ -15,9 +15,9 @@
package ptrace
import (
- "syscall"
"unsafe"
+ "golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/usermem"
@@ -25,13 +25,13 @@ import (
// getRegs gets the general purpose register set.
func (t *thread) getRegs(regs *arch.Registers) error {
- iovec := syscall.Iovec{
+ iovec := unix.Iovec{
Base: (*byte)(unsafe.Pointer(regs)),
Len: uint64(unsafe.Sizeof(*regs)),
}
- _, _, errno := syscall.RawSyscall6(
- syscall.SYS_PTRACE,
- syscall.PTRACE_GETREGSET,
+ _, _, errno := unix.RawSyscall6(
+ unix.SYS_PTRACE,
+ unix.PTRACE_GETREGSET,
uintptr(t.tid),
linux.NT_PRSTATUS,
uintptr(unsafe.Pointer(&iovec)),
@@ -44,13 +44,13 @@ func (t *thread) getRegs(regs *arch.Registers) error {
// setRegs sets the general purpose register set.
func (t *thread) setRegs(regs *arch.Registers) error {
- iovec := syscall.Iovec{
+ iovec := unix.Iovec{
Base: (*byte)(unsafe.Pointer(regs)),
Len: uint64(unsafe.Sizeof(*regs)),
}
- _, _, errno := syscall.RawSyscall6(
- syscall.SYS_PTRACE,
- syscall.PTRACE_SETREGSET,
+ _, _, errno := unix.RawSyscall6(
+ unix.SYS_PTRACE,
+ unix.PTRACE_SETREGSET,
uintptr(t.tid),
linux.NT_PRSTATUS,
uintptr(unsafe.Pointer(&iovec)),
@@ -61,15 +61,15 @@ func (t *thread) setRegs(regs *arch.Registers) error {
return nil
}
-// getFPRegs gets the floating-point data via the GETREGSET ptrace syscall.
+// getFPRegs gets the floating-point data via the GETREGSET ptrace unix.
func (t *thread) getFPRegs(fpState *arch.FloatingPointData, fpLen uint64, useXsave bool) error {
- iovec := syscall.Iovec{
+ iovec := unix.Iovec{
Base: (*byte)(fpState),
Len: fpLen,
}
- _, _, errno := syscall.RawSyscall6(
- syscall.SYS_PTRACE,
- syscall.PTRACE_GETREGSET,
+ _, _, errno := unix.RawSyscall6(
+ unix.SYS_PTRACE,
+ unix.PTRACE_GETREGSET,
uintptr(t.tid),
fpRegSet(useXsave),
uintptr(unsafe.Pointer(&iovec)),
@@ -80,15 +80,15 @@ func (t *thread) getFPRegs(fpState *arch.FloatingPointData, fpLen uint64, useXsa
return nil
}
-// setFPRegs sets the floating-point data via the SETREGSET ptrace syscall.
+// setFPRegs sets the floating-point data via the SETREGSET ptrace unix.
func (t *thread) setFPRegs(fpState *arch.FloatingPointData, fpLen uint64, useXsave bool) error {
- iovec := syscall.Iovec{
+ iovec := unix.Iovec{
Base: (*byte)(fpState),
Len: fpLen,
}
- _, _, errno := syscall.RawSyscall6(
- syscall.SYS_PTRACE,
- syscall.PTRACE_SETREGSET,
+ _, _, errno := unix.RawSyscall6(
+ unix.SYS_PTRACE,
+ unix.PTRACE_SETREGSET,
uintptr(t.tid),
fpRegSet(useXsave),
uintptr(unsafe.Pointer(&iovec)),
@@ -101,9 +101,9 @@ func (t *thread) setFPRegs(fpState *arch.FloatingPointData, fpLen uint64, useXsa
// getSignalInfo retrieves information about the signal that caused the stop.
func (t *thread) getSignalInfo(si *arch.SignalInfo) error {
- _, _, errno := syscall.RawSyscall6(
- syscall.SYS_PTRACE,
- syscall.PTRACE_GETSIGINFO,
+ _, _, errno := unix.RawSyscall6(
+ unix.SYS_PTRACE,
+ unix.PTRACE_GETSIGINFO,
uintptr(t.tid),
0,
uintptr(unsafe.Pointer(si)),
@@ -123,18 +123,18 @@ func (t *thread) getSignalInfo(si *arch.SignalInfo) error {
func (t *thread) clone() (*thread, error) {
r, ok := usermem.Addr(stackPointer(&t.initRegs)).RoundUp()
if !ok {
- return nil, syscall.EINVAL
+ return nil, unix.EINVAL
}
rval, err := t.syscallIgnoreInterrupt(
&t.initRegs,
- syscall.SYS_CLONE,
+ unix.SYS_CLONE,
arch.SyscallArgument{Value: uintptr(
- syscall.CLONE_FILES |
- syscall.CLONE_FS |
- syscall.CLONE_SIGHAND |
- syscall.CLONE_THREAD |
- syscall.CLONE_PTRACE |
- syscall.CLONE_VM)},
+ unix.CLONE_FILES |
+ unix.CLONE_FS |
+ unix.CLONE_SIGHAND |
+ unix.CLONE_THREAD |
+ unix.CLONE_PTRACE |
+ unix.CLONE_VM)},
// The stack pointer is just made up, but we have it be
// something sensible so the kernel doesn't think we're
// up to no good. Which we are.
@@ -158,9 +158,9 @@ func (t *thread) clone() (*thread, error) {
// getEventMessage retrieves a message about the ptrace event that just happened.
func (t *thread) getEventMessage() (uintptr, error) {
var msg uintptr
- _, _, errno := syscall.RawSyscall6(
- syscall.SYS_PTRACE,
- syscall.PTRACE_GETEVENTMSG,
+ _, _, errno := unix.RawSyscall6(
+ unix.SYS_PTRACE,
+ unix.PTRACE_GETEVENTMSG,
uintptr(t.tid),
0,
uintptr(unsafe.Pointer(&msg)),
diff --git a/pkg/sentry/platform/ptrace/stub_unsafe.go b/pkg/sentry/platform/ptrace/stub_unsafe.go
index 341dde143..780227248 100644
--- a/pkg/sentry/platform/ptrace/stub_unsafe.go
+++ b/pkg/sentry/platform/ptrace/stub_unsafe.go
@@ -16,9 +16,9 @@ package ptrace
import (
"reflect"
- "syscall"
"unsafe"
+ "golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/safecopy"
"gvisor.dev/gvisor/pkg/usermem"
)
@@ -56,17 +56,17 @@ func stubInit() {
// something that may have been there already. We just walk
// down the address space until we find a place where the stub
// can be placed.
- addr, _, errno := syscall.RawSyscall6(
- syscall.SYS_MMAP,
+ addr, _, errno := unix.RawSyscall6(
+ unix.SYS_MMAP,
stubStart,
mapLen,
- syscall.PROT_WRITE|syscall.PROT_READ,
- syscall.MAP_PRIVATE|syscall.MAP_ANONYMOUS,
+ unix.PROT_WRITE|unix.PROT_READ,
+ unix.MAP_PRIVATE|unix.MAP_ANONYMOUS,
0 /* fd */, 0 /* offset */)
if addr != stubStart || errno != 0 {
if addr != 0 {
// Unmap the region we've mapped accidentally.
- syscall.RawSyscall(syscall.SYS_MUNMAP, addr, mapLen, 0)
+ unix.RawSyscall(unix.SYS_MUNMAP, addr, mapLen, 0)
}
// Attempt to begin at a lower address.
@@ -79,11 +79,11 @@ func stubInit() {
copy(targetSlice, stubSlice)
// Make the stub executable.
- if _, _, errno := syscall.RawSyscall(
- syscall.SYS_MPROTECT,
+ if _, _, errno := unix.RawSyscall(
+ unix.SYS_MPROTECT,
stubStart,
mapLen,
- syscall.PROT_EXEC|syscall.PROT_READ); errno != 0 {
+ unix.PROT_EXEC|unix.PROT_READ); errno != 0 {
panic("mprotect failed: " + errno.Error())
}
diff --git a/pkg/sentry/platform/ptrace/subprocess.go b/pkg/sentry/platform/ptrace/subprocess.go
index 17fb0a0d8..acccbfe2e 100644
--- a/pkg/sentry/platform/ptrace/subprocess.go
+++ b/pkg/sentry/platform/ptrace/subprocess.go
@@ -18,7 +18,6 @@ import (
"fmt"
"os"
"runtime"
- "syscall"
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/log"
@@ -35,9 +34,9 @@ import (
//
// These constants are only used in subprocess.go.
const (
- ERESTARTSYS = syscall.Errno(512)
- ERESTARTNOINTR = syscall.Errno(513)
- ERESTARTNOHAND = syscall.Errno(514)
+ ERESTARTSYS = unix.Errno(512)
+ ERESTARTNOINTR = unix.Errno(513)
+ ERESTARTNOHAND = unix.Errno(514)
)
// globalPool exists to solve two distinct problems:
@@ -96,7 +95,7 @@ func (tp *threadPool) lookupOrCreate(currentTID int32, newThread func() *thread)
// threads never exiting.
for origTID, t := range tp.threads {
// Signal zero is an easy existence check.
- if err := syscall.Tgkill(syscall.Getpid(), int(origTID), 0); err != nil {
+ if err := unix.Tgkill(unix.Getpid(), int(origTID), 0); err != nil {
// This thread has been abandoned; reuse it.
delete(tp.threads, origTID)
tp.threads[currentTID] = t
@@ -186,7 +185,7 @@ func newSubprocess(create func() (*thread, error)) (*subprocess, error) {
// (Hopefully nobody tgkilled it with a signal <
// SIGSTOP before the SIGSTOP was delivered, in which
// case that signal would be delivered before SIGSTOP.)
- if sig := t.wait(stopped); sig != syscall.SIGSTOP {
+ if sig := t.wait(stopped); sig != unix.SIGSTOP {
panic(fmt.Sprintf("error waiting for new clone: expected SIGSTOP, got %v", sig))
}
@@ -269,7 +268,7 @@ func (s *subprocess) newThread() *thread {
// attach attaches to the thread.
func (t *thread) attach() {
- if _, _, errno := syscall.RawSyscall6(syscall.SYS_PTRACE, syscall.PTRACE_ATTACH, uintptr(t.tid), 0, 0, 0, 0); errno != 0 {
+ if _, _, errno := unix.RawSyscall6(unix.SYS_PTRACE, unix.PTRACE_ATTACH, uintptr(t.tid), 0, 0, 0, 0); errno != 0 {
panic(fmt.Sprintf("unable to attach: %v", errno))
}
@@ -277,7 +276,7 @@ func (t *thread) attach() {
// stopped from the SIGSTOP queued by CLONE_PTRACE (see inner loop of
// newSubprocess), so we always expect to see signal-delivery-stop with
// SIGSTOP.
- if sig := t.wait(stopped); sig != syscall.SIGSTOP {
+ if sig := t.wait(stopped); sig != unix.SIGSTOP {
panic(fmt.Sprintf("wait failed: expected SIGSTOP, got %v", sig))
}
@@ -301,7 +300,7 @@ func (t *thread) grabInitRegs() {
//
// Because the SIGSTOP is not suppressed, the thread will enter group-stop.
func (t *thread) detach() {
- if _, _, errno := syscall.RawSyscall6(syscall.SYS_PTRACE, syscall.PTRACE_DETACH, uintptr(t.tid), 0, uintptr(syscall.SIGSTOP), 0, 0); errno != 0 {
+ if _, _, errno := unix.RawSyscall6(unix.SYS_PTRACE, unix.PTRACE_DETACH, uintptr(t.tid), 0, uintptr(unix.SIGSTOP), 0, 0); errno != 0 {
panic(fmt.Sprintf("can't detach new clone: %v", errno))
}
}
@@ -331,14 +330,14 @@ func (t *thread) dumpAndPanic(message string) {
func (t *thread) unexpectedStubExit() {
msg, err := t.getEventMessage()
- status := syscall.WaitStatus(msg)
- if status.Signaled() && status.Signal() == syscall.SIGKILL {
+ status := unix.WaitStatus(msg)
+ if status.Signaled() && status.Signal() == unix.SIGKILL {
// SIGKILL can be only sent by a user or OOM-killer. In both
// these cases, we don't need to panic. There is no reasons to
// think that something wrong in gVisor.
log.Warningf("The ptrace stub process %v has been killed by SIGKILL.", t.tgid)
pid := os.Getpid()
- syscall.Tgkill(pid, pid, syscall.Signal(syscall.SIGKILL))
+ unix.Tgkill(pid, pid, unix.Signal(unix.SIGKILL))
}
t.dumpAndPanic(fmt.Sprintf("wait failed: the process %d:%d exited: %x (err %v)", t.tgid, t.tid, msg, err))
}
@@ -346,12 +345,12 @@ func (t *thread) unexpectedStubExit() {
// wait waits for a stop event.
//
// Precondition: outcome is a valid waitOutcome.
-func (t *thread) wait(outcome waitOutcome) syscall.Signal {
- var status syscall.WaitStatus
+func (t *thread) wait(outcome waitOutcome) unix.Signal {
+ var status unix.WaitStatus
for {
- r, err := syscall.Wait4(int(t.tid), &status, syscall.WALL|syscall.WUNTRACED, nil)
- if err == syscall.EINTR || err == syscall.EAGAIN {
+ r, err := unix.Wait4(int(t.tid), &status, unix.WALL|unix.WUNTRACED, nil)
+ if err == unix.EINTR || err == unix.EAGAIN {
// Wait was interrupted; wait again.
continue
} else if err != nil {
@@ -369,12 +368,12 @@ func (t *thread) wait(outcome waitOutcome) syscall.Signal {
if stopSig == 0 {
continue // Spurious stop.
}
- if stopSig == syscall.SIGTRAP {
- if status.TrapCause() == syscall.PTRACE_EVENT_EXIT {
+ if stopSig == unix.SIGTRAP {
+ if status.TrapCause() == unix.PTRACE_EVENT_EXIT {
t.unexpectedStubExit()
}
// Re-encode the trap cause the way it's expected.
- return stopSig | syscall.Signal(status.TrapCause()<<8)
+ return stopSig | unix.Signal(status.TrapCause()<<8)
}
// Not a trap signal.
return stopSig
@@ -382,7 +381,7 @@ func (t *thread) wait(outcome waitOutcome) syscall.Signal {
if !status.Exited() && !status.Signaled() {
t.dumpAndPanic(fmt.Sprintf("ptrace status unexpected: got %v, wanted exited", status))
}
- return syscall.Signal(status.ExitStatus())
+ return unix.Signal(status.ExitStatus())
default:
// Should not happen.
t.dumpAndPanic(fmt.Sprintf("unknown outcome: %v", outcome))
@@ -397,7 +396,7 @@ func (t *thread) wait(outcome waitOutcome) syscall.Signal {
// manually created threads.
func (t *thread) destroy() {
t.detach()
- syscall.Tgkill(int(t.tgid), int(t.tid), syscall.Signal(syscall.SIGKILL))
+ unix.Tgkill(int(t.tgid), int(t.tid), unix.Signal(unix.SIGKILL))
t.wait(killed)
}
@@ -407,12 +406,12 @@ func (t *thread) init() {
// set PTRACE_O_EXITKILL to ensure that the unexpected exit of the
// sentry will immediately kill the associated stubs.
const PTRACE_O_EXITKILL = 0x100000
- _, _, errno := syscall.RawSyscall6(
- syscall.SYS_PTRACE,
- syscall.PTRACE_SETOPTIONS,
+ _, _, errno := unix.RawSyscall6(
+ unix.SYS_PTRACE,
+ unix.PTRACE_SETOPTIONS,
uintptr(t.tid),
0,
- syscall.PTRACE_O_TRACESYSGOOD|syscall.PTRACE_O_TRACEEXIT|PTRACE_O_EXITKILL,
+ unix.PTRACE_O_TRACESYSGOOD|unix.PTRACE_O_TRACEEXIT|PTRACE_O_EXITKILL,
0, 0)
if errno != 0 {
panic(fmt.Sprintf("ptrace set options failed: %v", errno))
@@ -434,17 +433,17 @@ func (t *thread) syscall(regs *arch.Registers) (uintptr, error) {
// Execute the syscall instruction. The task has to stop on the
// trap instruction which is right after the syscall
// instruction.
- if _, _, errno := syscall.RawSyscall6(syscall.SYS_PTRACE, syscall.PTRACE_CONT, uintptr(t.tid), 0, 0, 0, 0); errno != 0 {
+ if _, _, errno := unix.RawSyscall6(unix.SYS_PTRACE, unix.PTRACE_CONT, uintptr(t.tid), 0, 0, 0, 0); errno != 0 {
panic(fmt.Sprintf("ptrace syscall-enter failed: %v", errno))
}
sig := t.wait(stopped)
- if sig == syscall.SIGTRAP {
+ if sig == unix.SIGTRAP {
// Reached syscall-enter-stop.
break
} else {
// Some other signal caused a thread stop; ignore.
- if sig != syscall.SIGSTOP && sig != syscall.SIGCHLD {
+ if sig != unix.SIGSTOP && sig != unix.SIGCHLD {
log.Warningf("The thread %d:%d has been interrupted by %d", t.tgid, t.tid, sig)
}
continue
@@ -483,7 +482,7 @@ func (t *thread) syscallIgnoreInterrupt(
// NotifyInterrupt implements interrupt.Receiver.NotifyInterrupt.
func (t *thread) NotifyInterrupt() {
- syscall.Tgkill(int(t.tgid), int(t.tid), syscall.Signal(platform.SignalInterrupt))
+ unix.Tgkill(int(t.tgid), int(t.tid), unix.Signal(platform.SignalInterrupt))
}
// switchToApp is called from the main SwitchToApp entrypoint.
@@ -532,15 +531,15 @@ func (s *subprocess) switchToApp(c *context, ac arch.Context) bool {
for {
// Start running until the next system call.
if isSingleStepping(regs) {
- if _, _, errno := syscall.RawSyscall6(
- syscall.SYS_PTRACE,
+ if _, _, errno := unix.RawSyscall6(
+ unix.SYS_PTRACE,
unix.PTRACE_SYSEMU_SINGLESTEP,
uintptr(t.tid), 0, 0, 0, 0); errno != 0 {
panic(fmt.Sprintf("ptrace sysemu failed: %v", errno))
}
} else {
- if _, _, errno := syscall.RawSyscall6(
- syscall.SYS_PTRACE,
+ if _, _, errno := unix.RawSyscall6(
+ unix.SYS_PTRACE,
unix.PTRACE_SYSEMU,
uintptr(t.tid), 0, 0, 0, 0); errno != 0 {
panic(fmt.Sprintf("ptrace sysemu failed: %v", errno))
@@ -550,7 +549,7 @@ func (s *subprocess) switchToApp(c *context, ac arch.Context) bool {
// Wait for the syscall-enter stop.
sig := t.wait(stopped)
- if sig == syscall.SIGSTOP {
+ if sig == unix.SIGSTOP {
// SIGSTOP was delivered to another thread in the same thread
// group, which initiated another group stop. Just ignore it.
continue
@@ -571,7 +570,7 @@ func (s *subprocess) switchToApp(c *context, ac arch.Context) bool {
}
// Is it a system call?
- if sig == (syscallEvent | syscall.SIGTRAP) {
+ if sig == (syscallEvent | unix.SIGTRAP) {
s.arm64SyscallWorkaround(t, regs)
// Ensure registers are sane.
@@ -619,14 +618,14 @@ func (s *subprocess) syscall(sysno uintptr, args ...arch.SyscallArgument) (uintp
func (s *subprocess) MapFile(addr usermem.Addr, f memmap.File, fr memmap.FileRange, at usermem.AccessType, precommit bool) error {
var flags int
if precommit {
- flags |= syscall.MAP_POPULATE
+ flags |= unix.MAP_POPULATE
}
_, err := s.syscall(
- syscall.SYS_MMAP,
+ unix.SYS_MMAP,
arch.SyscallArgument{Value: uintptr(addr)},
arch.SyscallArgument{Value: uintptr(fr.Length())},
arch.SyscallArgument{Value: uintptr(at.Prot())},
- arch.SyscallArgument{Value: uintptr(flags | syscall.MAP_SHARED | syscall.MAP_FIXED)},
+ arch.SyscallArgument{Value: uintptr(flags | unix.MAP_SHARED | unix.MAP_FIXED)},
arch.SyscallArgument{Value: uintptr(f.FD())},
arch.SyscallArgument{Value: uintptr(fr.Start)})
return err
@@ -653,7 +652,7 @@ func (s *subprocess) Unmap(addr usermem.Addr, length uint64) {
}
s.mu.Unlock()
_, err := s.syscall(
- syscall.SYS_MUNMAP,
+ unix.SYS_MUNMAP,
arch.SyscallArgument{Value: uintptr(addr)},
arch.SyscallArgument{Value: uintptr(length)})
if err != nil {
diff --git a/pkg/sentry/platform/ptrace/subprocess_amd64.go b/pkg/sentry/platform/ptrace/subprocess_amd64.go
index 04815282b..9252c0bd7 100644
--- a/pkg/sentry/platform/ptrace/subprocess_amd64.go
+++ b/pkg/sentry/platform/ptrace/subprocess_amd64.go
@@ -19,7 +19,6 @@ package ptrace
import (
"fmt"
"strings"
- "syscall"
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/abi/linux"
@@ -96,7 +95,7 @@ func updateSyscallRegs(regs *arch.Registers) {
func syscallReturnValue(regs *arch.Registers) (uintptr, error) {
rval := int64(regs.Rax)
if rval < 0 {
- return 0, syscall.Errno(-rval)
+ return 0, unix.Errno(-rval)
}
return uintptr(rval), nil
}
@@ -179,7 +178,7 @@ func patchSignalInfo(regs *arch.Registers, signalInfo *arch.SignalInfo) {
//
//go:nosplit
func enableCpuidFault() {
- syscall.RawSyscall6(syscall.SYS_ARCH_PRCTL, linux.ARCH_SET_CPUID, 0, 0, 0, 0, 0)
+ unix.RawSyscall6(unix.SYS_ARCH_PRCTL, linux.ARCH_SET_CPUID, 0, 0, 0, 0, 0)
}
// appendArchSeccompRules append architecture specific seccomp rules when creating BPF program.
@@ -189,9 +188,9 @@ func appendArchSeccompRules(rules []seccomp.RuleSet, defaultAction linux.BPFActi
// Rules for trapping vsyscall access.
seccomp.RuleSet{
Rules: seccomp.SyscallRules{
- syscall.SYS_GETTIMEOFDAY: {},
- syscall.SYS_TIME: {},
- unix.SYS_GETCPU: {}, // SYS_GETCPU was not defined in package syscall on amd64.
+ unix.SYS_GETTIMEOFDAY: {},
+ unix.SYS_TIME: {},
+ unix.SYS_GETCPU: {}, // SYS_GETCPU was not defined in package syscall on amd64.
},
Action: linux.SECCOMP_RET_TRAP,
Vsyscall: true,
@@ -200,7 +199,7 @@ func appendArchSeccompRules(rules []seccomp.RuleSet, defaultAction linux.BPFActi
rules = append(rules,
seccomp.RuleSet{
Rules: seccomp.SyscallRules{
- syscall.SYS_ARCH_PRCTL: []seccomp.Rule{
+ unix.SYS_ARCH_PRCTL: []seccomp.Rule{
{seccomp.EqualTo(linux.ARCH_SET_CPUID), seccomp.EqualTo(0)},
},
},
@@ -227,19 +226,19 @@ func probeSeccomp() bool {
// Set registers to the yield system call. This call is not allowed
// by the filters specified in the attachThread function.
- regs := createSyscallRegs(&t.initRegs, syscall.SYS_SCHED_YIELD)
+ regs := createSyscallRegs(&t.initRegs, unix.SYS_SCHED_YIELD)
if err := t.setRegs(&regs); err != nil {
panic(fmt.Sprintf("ptrace set regs failed: %v", err))
}
for {
// Attempt an emulation.
- if _, _, errno := syscall.RawSyscall6(syscall.SYS_PTRACE, unix.PTRACE_SYSEMU, uintptr(t.tid), 0, 0, 0, 0); errno != 0 {
+ if _, _, errno := unix.RawSyscall6(unix.SYS_PTRACE, unix.PTRACE_SYSEMU, uintptr(t.tid), 0, 0, 0, 0); errno != 0 {
panic(fmt.Sprintf("ptrace syscall-enter failed: %v", errno))
}
sig := t.wait(stopped)
- if sig == (syscallEvent | syscall.SIGTRAP) {
+ if sig == (syscallEvent | unix.SIGTRAP) {
// Did the seccomp errno hook already run? This would
// indicate that seccomp is first in line and we're
// less than 4.8.
diff --git a/pkg/sentry/platform/ptrace/subprocess_arm64.go b/pkg/sentry/platform/ptrace/subprocess_arm64.go
index 416132967..c0cbc0686 100644
--- a/pkg/sentry/platform/ptrace/subprocess_arm64.go
+++ b/pkg/sentry/platform/ptrace/subprocess_arm64.go
@@ -19,7 +19,6 @@ package ptrace
import (
"fmt"
"strings"
- "syscall"
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/abi/linux"
@@ -99,7 +98,7 @@ func updateSyscallRegs(regs *arch.Registers) {
func syscallReturnValue(regs *arch.Registers) (uintptr, error) {
rval := int64(regs.Regs[0])
if rval < 0 {
- return 0, syscall.Errno(-rval)
+ return 0, unix.Errno(-rval)
}
return uintptr(rval), nil
}
@@ -185,8 +184,8 @@ func (s *subprocess) arm64SyscallWorkaround(t *thread, regs *arch.Registers) {
// signal, resume a stub thread and catch it on a signal handling.
t.NotifyInterrupt()
for {
- if _, _, errno := syscall.RawSyscall6(
- syscall.SYS_PTRACE,
+ if _, _, errno := unix.RawSyscall6(
+ unix.SYS_PTRACE,
unix.PTRACE_SYSEMU,
uintptr(t.tid), 0, 0, 0, 0); errno != 0 {
panic(fmt.Sprintf("ptrace sysemu failed: %v", errno))
@@ -194,12 +193,12 @@ func (s *subprocess) arm64SyscallWorkaround(t *thread, regs *arch.Registers) {
// Wait for the syscall-enter stop.
sig := t.wait(stopped)
- if sig == syscall.SIGSTOP {
+ if sig == unix.SIGSTOP {
// SIGSTOP was delivered to another thread in the same thread
// group, which initiated another group stop. Just ignore it.
continue
}
- if sig == (syscallEvent | syscall.SIGTRAP) {
+ if sig == (syscallEvent | unix.SIGTRAP) {
t.dumpAndPanic(fmt.Sprintf("unexpected syscall event"))
}
break
diff --git a/pkg/sentry/platform/ptrace/subprocess_linux.go b/pkg/sentry/platform/ptrace/subprocess_linux.go
index 8548853da..4f0260432 100644
--- a/pkg/sentry/platform/ptrace/subprocess_linux.go
+++ b/pkg/sentry/platform/ptrace/subprocess_linux.go
@@ -18,8 +18,8 @@ package ptrace
import (
"fmt"
- "syscall"
+ "golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/procid"
@@ -27,7 +27,7 @@ import (
"gvisor.dev/gvisor/pkg/sentry/arch"
)
-const syscallEvent syscall.Signal = 0x80
+const syscallEvent unix.Signal = 0x80
// createStub creates a fresh stub processes.
//
@@ -63,7 +63,7 @@ func createStub() (*thread, error) {
//
// In addition, we set the PTRACE_O_TRACEEXIT option to log more
// information about a stub process when it receives a fatal signal.
- return attachedThread(uintptr(syscall.SIGKILL)|syscall.CLONE_FILES, defaultAction)
+ return attachedThread(uintptr(unix.SIGKILL)|unix.CLONE_FILES, defaultAction)
}
// attachedThread returns a new attached thread.
@@ -78,38 +78,38 @@ func attachedThread(flags uintptr, defaultAction linux.BPFAction) (*thread, erro
if defaultAction != linux.SECCOMP_RET_ALLOW {
rules = append(rules, seccomp.RuleSet{
Rules: seccomp.SyscallRules{
- syscall.SYS_CLONE: []seccomp.Rule{
+ unix.SYS_CLONE: []seccomp.Rule{
// Allow creation of new subprocesses (used by the master).
- {seccomp.EqualTo(syscall.CLONE_FILES | syscall.SIGKILL)},
+ {seccomp.EqualTo(unix.CLONE_FILES | unix.SIGKILL)},
// Allow creation of new threads within a single address space (used by addresss spaces).
{seccomp.EqualTo(
- syscall.CLONE_FILES |
- syscall.CLONE_FS |
- syscall.CLONE_SIGHAND |
- syscall.CLONE_THREAD |
- syscall.CLONE_PTRACE |
- syscall.CLONE_VM)},
+ unix.CLONE_FILES |
+ unix.CLONE_FS |
+ unix.CLONE_SIGHAND |
+ unix.CLONE_THREAD |
+ unix.CLONE_PTRACE |
+ unix.CLONE_VM)},
},
// For the initial process creation.
- syscall.SYS_WAIT4: {},
- syscall.SYS_EXIT: {},
+ unix.SYS_WAIT4: {},
+ unix.SYS_EXIT: {},
// For the stub prctl dance (all).
- syscall.SYS_PRCTL: []seccomp.Rule{
- {seccomp.EqualTo(syscall.PR_SET_PDEATHSIG), seccomp.EqualTo(syscall.SIGKILL)},
+ unix.SYS_PRCTL: []seccomp.Rule{
+ {seccomp.EqualTo(unix.PR_SET_PDEATHSIG), seccomp.EqualTo(unix.SIGKILL)},
},
- syscall.SYS_GETPPID: {},
+ unix.SYS_GETPPID: {},
// For the stub to stop itself (all).
- syscall.SYS_GETPID: {},
- syscall.SYS_KILL: []seccomp.Rule{
- {seccomp.MatchAny{}, seccomp.EqualTo(syscall.SIGSTOP)},
+ unix.SYS_GETPID: {},
+ unix.SYS_KILL: []seccomp.Rule{
+ {seccomp.MatchAny{}, seccomp.EqualTo(unix.SIGSTOP)},
},
// Injected to support the address space operations.
- syscall.SYS_MMAP: {},
- syscall.SYS_MUNMAP: {},
+ unix.SYS_MMAP: {},
+ unix.SYS_MUNMAP: {},
},
Action: linux.SECCOMP_RET_ALLOW,
})
@@ -125,17 +125,17 @@ func attachedThread(flags uintptr, defaultAction linux.BPFAction) (*thread, erro
var (
pid uintptr
ppid uintptr
- errno syscall.Errno
+ errno unix.Errno
)
// Remember the current ppid for the pdeathsig race.
- ppid, _, _ = syscall.RawSyscall(syscall.SYS_GETPID, 0, 0, 0)
+ ppid, _, _ = unix.RawSyscall(unix.SYS_GETPID, 0, 0, 0)
// Among other things, beforeFork masks all signals.
beforeFork()
// Do the clone.
- pid, _, errno = syscall.RawSyscall6(syscall.SYS_CLONE, flags, 0, 0, 0, 0, 0)
+ pid, _, errno = unix.RawSyscall6(unix.SYS_CLONE, flags, 0, 0, 0, 0, 0)
if errno != 0 {
afterFork()
return nil, errno
@@ -152,7 +152,7 @@ func attachedThread(flags uintptr, defaultAction linux.BPFAction) (*thread, erro
tid: int32(pid),
cpu: ^uint32(0),
}
- if sig := t.wait(stopped); sig != syscall.SIGSTOP {
+ if sig := t.wait(stopped); sig != unix.SIGSTOP {
return nil, fmt.Errorf("wait failed: expected SIGSTOP, got %v", sig)
}
t.attach()
@@ -165,8 +165,8 @@ func attachedThread(flags uintptr, defaultAction linux.BPFAction) (*thread, erro
// prevents the stub from getting PTY job control signals intended only
// for the sentry process. We must call this before restoring signal
// mask.
- if _, _, errno := syscall.RawSyscall(syscall.SYS_SETSID, 0, 0, 0); errno != 0 {
- syscall.RawSyscall(syscall.SYS_EXIT, uintptr(errno), 0, 0)
+ if _, _, errno := unix.RawSyscall(unix.SYS_SETSID, 0, 0, 0); errno != 0 {
+ unix.RawSyscall(unix.SYS_EXIT, uintptr(errno), 0, 0)
}
// afterForkInChild resets all signals to their default dispositions
@@ -176,13 +176,13 @@ func attachedThread(flags uintptr, defaultAction linux.BPFAction) (*thread, erro
// Explicitly unmask all signals to ensure that the tracer can see
// them.
if errno := unmaskAllSignals(); errno != 0 {
- syscall.RawSyscall(syscall.SYS_EXIT, uintptr(errno), 0, 0)
+ unix.RawSyscall(unix.SYS_EXIT, uintptr(errno), 0, 0)
}
// Set an aggressive BPF filter for the stub and all it's children. See
// the description of the BPF program built above.
if errno := seccomp.SetFilter(instrs); errno != 0 {
- syscall.RawSyscall(syscall.SYS_EXIT, uintptr(errno), 0, 0)
+ unix.RawSyscall(unix.SYS_EXIT, uintptr(errno), 0, 0)
}
// Enable cpuid-faulting.
@@ -218,8 +218,8 @@ func (s *subprocess) createStub() (*thread, error) {
// See above re: SIGKILL.
pid, err := t.syscallIgnoreInterrupt(
&regs,
- syscall.SYS_CLONE,
- arch.SyscallArgument{Value: uintptr(syscall.SIGKILL | syscall.CLONE_FILES)},
+ unix.SYS_CLONE,
+ arch.SyscallArgument{Value: uintptr(unix.SIGKILL | unix.CLONE_FILES)},
arch.SyscallArgument{Value: 0},
arch.SyscallArgument{Value: 0},
arch.SyscallArgument{Value: 0},
@@ -237,10 +237,10 @@ func (s *subprocess) createStub() (*thread, error) {
// If the child actually exited, the attach below will fail.
_, err = t.syscallIgnoreInterrupt(
&t.initRegs,
- syscall.SYS_WAIT4,
+ unix.SYS_WAIT4,
arch.SyscallArgument{Value: uintptr(pid)},
arch.SyscallArgument{Value: 0},
- arch.SyscallArgument{Value: syscall.WALL | syscall.WUNTRACED},
+ arch.SyscallArgument{Value: unix.WALL | unix.WUNTRACED},
arch.SyscallArgument{Value: 0},
arch.SyscallArgument{Value: 0},
arch.SyscallArgument{Value: 0})
diff --git a/pkg/sentry/platform/ptrace/subprocess_linux_unsafe.go b/pkg/sentry/platform/ptrace/subprocess_linux_unsafe.go
index 533e45497..9c342c59b 100644
--- a/pkg/sentry/platform/ptrace/subprocess_linux_unsafe.go
+++ b/pkg/sentry/platform/ptrace/subprocess_linux_unsafe.go
@@ -18,17 +18,17 @@
package ptrace
import (
- "syscall"
"unsafe"
+ "golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/abi/linux"
)
// unmaskAllSignals unmasks all signals on the current thread.
//
//go:nosplit
-func unmaskAllSignals() syscall.Errno {
+func unmaskAllSignals() unix.Errno {
var set linux.SignalSet
- _, _, errno := syscall.RawSyscall6(syscall.SYS_RT_SIGPROCMASK, linux.SIG_SETMASK, uintptr(unsafe.Pointer(&set)), 0, linux.SignalSetSize, 0, 0)
+ _, _, errno := unix.RawSyscall6(unix.SYS_RT_SIGPROCMASK, linux.SIG_SETMASK, uintptr(unsafe.Pointer(&set)), 0, linux.SignalSetSize, 0, 0)
return errno
}