summaryrefslogtreecommitdiffhomepage
path: root/pkg/sentry/mm/syscalls.go
diff options
context:
space:
mode:
Diffstat (limited to 'pkg/sentry/mm/syscalls.go')
-rw-r--r--pkg/sentry/mm/syscalls.go90
1 files changed, 46 insertions, 44 deletions
diff --git a/pkg/sentry/mm/syscalls.go b/pkg/sentry/mm/syscalls.go
index 7ad6b7c21..256eb4afb 100644
--- a/pkg/sentry/mm/syscalls.go
+++ b/pkg/sentry/mm/syscalls.go
@@ -21,6 +21,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/kernel/futex"
@@ -36,7 +37,7 @@ import (
func (mm *MemoryManager) HandleUserFault(ctx context.Context, addr hostarch.Addr, at hostarch.AccessType, sp hostarch.Addr) error {
ar, ok := addr.RoundDown().ToRange(hostarch.PageSize)
if !ok {
- return syserror.EFAULT
+ return linuxerr.EFAULT
}
// Don't bother trying existingPMAsLocked; in most cases, if we did have
@@ -74,7 +75,7 @@ func (mm *MemoryManager) HandleUserFault(ctx context.Context, addr hostarch.Addr
// MMap establishes a memory mapping.
func (mm *MemoryManager) MMap(ctx context.Context, opts memmap.MMapOpts) (hostarch.Addr, error) {
if opts.Length == 0 {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
length, ok := hostarch.Addr(opts.Length).RoundUp()
if !ok {
@@ -85,7 +86,7 @@ func (mm *MemoryManager) MMap(ctx context.Context, opts memmap.MMapOpts) (hostar
if opts.Mappable != nil {
// Offset must be aligned.
if hostarch.Addr(opts.Offset).RoundDown() != hostarch.Addr(opts.Offset) {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
// Offset + length must not overflow.
if end := opts.Offset + opts.Length; end < opts.Offset {
@@ -99,19 +100,19 @@ func (mm *MemoryManager) MMap(ctx context.Context, opts memmap.MMapOpts) (hostar
// MAP_FIXED requires addr to be page-aligned; non-fixed mappings
// don't.
if opts.Fixed {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
opts.Addr = opts.Addr.RoundDown()
}
if !opts.MaxPerms.SupersetOf(opts.Perms) {
- return 0, syserror.EACCES
+ return 0, linuxerr.EACCES
}
if opts.Unmap && !opts.Fixed {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
if opts.GrowsDown && opts.Mappable != nil {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
// Get the new vma.
@@ -203,6 +204,7 @@ func (mm *MemoryManager) populateVMA(ctx context.Context, vseg vmaIterator, ar h
// * vseg.Range().IsSupersetOf(ar).
//
// Postconditions: mm.mappingMu will be unlocked.
+// +checklocksrelease:mm.mappingMu
func (mm *MemoryManager) populateVMAAndUnlock(ctx context.Context, vseg vmaIterator, ar hostarch.AddrRange, precommit bool) {
// See populateVMA above for commentary.
if !vseg.ValuePtr().effectivePerms.Any() {
@@ -281,18 +283,18 @@ func (mm *MemoryManager) MapStack(ctx context.Context) (hostarch.AddrRange, erro
// MUnmap implements the semantics of Linux's munmap(2).
func (mm *MemoryManager) MUnmap(ctx context.Context, addr hostarch.Addr, length uint64) error {
if addr != addr.RoundDown() {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
if length == 0 {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
la, ok := hostarch.Addr(length).RoundUp()
if !ok {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
ar, ok := addr.ToRange(uint64(la))
if !ok {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
mm.mappingMu.Lock()
@@ -331,7 +333,7 @@ const (
func (mm *MemoryManager) MRemap(ctx context.Context, oldAddr hostarch.Addr, oldSize uint64, newSize uint64, opts MRemapOpts) (hostarch.Addr, error) {
// "Note that old_address has to be page aligned." - mremap(2)
if oldAddr.RoundDown() != oldAddr {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
// Linux treats an old_size that rounds up to 0 as 0, which is otherwise a
@@ -340,13 +342,13 @@ func (mm *MemoryManager) MRemap(ctx context.Context, oldAddr hostarch.Addr, oldS
oldSize = uint64(oldSizeAddr)
newSizeAddr, ok := hostarch.Addr(newSize).RoundUp()
if !ok || newSizeAddr == 0 {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
newSize = uint64(newSizeAddr)
oldEnd, ok := oldAddr.AddLength(oldSize)
if !ok {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
mm.mappingMu.Lock()
@@ -355,7 +357,7 @@ func (mm *MemoryManager) MRemap(ctx context.Context, oldAddr hostarch.Addr, oldS
// All cases require that a vma exists at oldAddr.
vseg := mm.vmas.FindSegment(oldAddr)
if !vseg.Ok() {
- return 0, syserror.EFAULT
+ return 0, linuxerr.EFAULT
}
// Behavior matrix:
@@ -379,7 +381,7 @@ func (mm *MemoryManager) MRemap(ctx context.Context, oldAddr hostarch.Addr, oldS
mlockLimit := limits.FromContext(ctx).Get(limits.MemoryLocked).Cur
if creds := auth.CredentialsFromContext(ctx); !creds.HasCapabilityIn(linux.CAP_IPC_LOCK, creds.UserNamespace.Root()) {
if newLockedAS := mm.lockedAS - oldSize + newSize; newLockedAS > mlockLimit {
- return 0, syserror.EAGAIN
+ return 0, linuxerr.EAGAIN
}
}
}
@@ -402,7 +404,7 @@ func (mm *MemoryManager) MRemap(ctx context.Context, oldAddr hostarch.Addr, oldS
// Check that oldEnd maps to the same vma as oldAddr.
if vseg.End() < oldEnd {
- return 0, syserror.EFAULT
+ return 0, linuxerr.EFAULT
}
// "Grow" the existing vma by creating a new mergeable one.
vma := vseg.ValuePtr()
@@ -450,15 +452,15 @@ func (mm *MemoryManager) MRemap(ctx context.Context, oldAddr hostarch.Addr, oldS
case MRemapMustMove:
newAddr := opts.NewAddr
if newAddr.RoundDown() != newAddr {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
var ok bool
newAR, ok = newAddr.ToRange(newSize)
if !ok {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
if (hostarch.AddrRange{oldAddr, oldEnd}).Overlaps(newAR) {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
// Check that the new region is valid.
@@ -492,7 +494,7 @@ func (mm *MemoryManager) MRemap(ctx context.Context, oldAddr hostarch.Addr, oldS
// Check that oldEnd maps to the same vma as oldAddr.
if vseg.End() < oldEnd {
- return 0, syserror.EFAULT
+ return 0, linuxerr.EFAULT
}
// Check against RLIMIT_AS.
@@ -504,7 +506,7 @@ func (mm *MemoryManager) MRemap(ctx context.Context, oldAddr hostarch.Addr, oldS
if vma := vseg.ValuePtr(); vma.mappable != nil {
// Check that offset+length does not overflow.
if vma.off+uint64(newAR.Length()) < vma.off {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
// Inform the Mappable, if any, of the new mapping.
if err := vma.mappable.CopyMapping(ctx, mm, oldAR, newAR, vseg.mappableOffsetAt(oldAR.Start), vma.canWriteMappableLocked()); err != nil {
@@ -590,7 +592,7 @@ func (mm *MemoryManager) MRemap(ctx context.Context, oldAddr hostarch.Addr, oldS
// MProtect implements the semantics of Linux's mprotect(2).
func (mm *MemoryManager) MProtect(addr hostarch.Addr, length uint64, realPerms hostarch.AccessType, growsDown bool) error {
if addr.RoundDown() != addr {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
if length == 0 {
return nil
@@ -618,7 +620,7 @@ func (mm *MemoryManager) MProtect(addr hostarch.Addr, length uint64, realPerms h
}
if growsDown {
if !vseg.ValuePtr().growsDown {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
if ar.End <= vseg.Start() {
return syserror.ENOMEM
@@ -644,7 +646,7 @@ func (mm *MemoryManager) MProtect(addr hostarch.Addr, length uint64, realPerms h
// Check for permission validity before splitting vmas, for consistency
// with Linux.
if !vseg.ValuePtr().maxPerms.SupersetOf(effectivePerms) {
- return syserror.EACCES
+ return linuxerr.EACCES
}
vseg = mm.vmas.Isolate(vseg, ar)
@@ -711,7 +713,7 @@ func (mm *MemoryManager) Brk(ctx context.Context, addr hostarch.Addr) (hostarch.
if addr < mm.brk.Start {
addr = mm.brk.End
mm.mappingMu.Unlock()
- return addr, syserror.EINVAL
+ return addr, linuxerr.EINVAL
}
// TODO(gvisor.dev/issue/156): This enforces RLIMIT_DATA, but is
@@ -730,7 +732,7 @@ func (mm *MemoryManager) Brk(ctx context.Context, addr hostarch.Addr) (hostarch.
if !ok {
addr = mm.brk.End
mm.mappingMu.Unlock()
- return addr, syserror.EFAULT
+ return addr, linuxerr.EFAULT
}
switch {
@@ -780,7 +782,7 @@ func (mm *MemoryManager) MLock(ctx context.Context, addr hostarch.Addr, length u
la, _ := hostarch.Addr(length + addr.PageOffset()).RoundUp()
ar, ok := addr.RoundDown().ToRange(uint64(la))
if !ok {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
mm.mappingMu.Lock()
@@ -792,7 +794,7 @@ func (mm *MemoryManager) MLock(ctx context.Context, addr hostarch.Addr, length u
mlockLimit := limits.FromContext(ctx).Get(limits.MemoryLocked).Cur
if mlockLimit == 0 {
mm.mappingMu.Unlock()
- return syserror.EPERM
+ return linuxerr.EPERM
}
if newLockedAS := mm.lockedAS + uint64(ar.Length()) - mm.mlockedBytesRangeLocked(ar); newLockedAS > mlockLimit {
mm.mappingMu.Unlock()
@@ -855,11 +857,11 @@ func (mm *MemoryManager) MLock(ctx context.Context, addr hostarch.Addr, length u
mm.activeMu.Unlock()
mm.mappingMu.RUnlock()
// Linux: mm/mlock.c:__mlock_posix_error_return()
- if err == syserror.EFAULT {
+ if linuxerr.Equals(linuxerr.EFAULT, err) {
return syserror.ENOMEM
}
- if err == syserror.ENOMEM {
- return syserror.EAGAIN
+ if linuxerr.Equals(linuxerr.ENOMEM, err) {
+ return linuxerr.EAGAIN
}
return err
}
@@ -898,7 +900,7 @@ type MLockAllOpts struct {
// depending on opts.
func (mm *MemoryManager) MLockAll(ctx context.Context, opts MLockAllOpts) error {
if !opts.Current && !opts.Future {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
mm.mappingMu.Lock()
@@ -911,7 +913,7 @@ func (mm *MemoryManager) MLockAll(ctx context.Context, opts MLockAllOpts) error
mlockLimit := limits.FromContext(ctx).Get(limits.MemoryLocked).Cur
if mlockLimit == 0 {
mm.mappingMu.Unlock()
- return syserror.EPERM
+ return linuxerr.EPERM
}
if uint64(mm.vmas.Span()) > mlockLimit {
mm.mappingMu.Unlock()
@@ -970,7 +972,7 @@ func (mm *MemoryManager) NumaPolicy(addr hostarch.Addr) (linux.NumaPolicy, uint6
defer mm.mappingMu.RUnlock()
vseg := mm.vmas.FindSegment(addr)
if !vseg.Ok() {
- return 0, 0, syserror.EFAULT
+ return 0, 0, linuxerr.EFAULT
}
vma := vseg.ValuePtr()
return vma.numaPolicy, vma.numaNodemask, nil
@@ -979,13 +981,13 @@ func (mm *MemoryManager) NumaPolicy(addr hostarch.Addr) (linux.NumaPolicy, uint6
// SetNumaPolicy implements the semantics of Linux's mbind().
func (mm *MemoryManager) SetNumaPolicy(addr hostarch.Addr, length uint64, policy linux.NumaPolicy, nodemask uint64) error {
if !addr.IsPageAligned() {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
// Linux allows this to overflow.
la, _ := hostarch.Addr(length).RoundUp()
ar, ok := addr.ToRange(uint64(la))
if !ok {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
if ar.Length() == 0 {
return nil
@@ -1003,7 +1005,7 @@ func (mm *MemoryManager) SetNumaPolicy(addr hostarch.Addr, length uint64, policy
if !vseg.Ok() || lastEnd < vseg.Start() {
// "EFAULT: ... there was an unmapped hole in the specified memory
// range specified [sic] by addr and len." - mbind(2)
- return syserror.EFAULT
+ return linuxerr.EFAULT
}
vseg = mm.vmas.Isolate(vseg, ar)
vma := vseg.ValuePtr()
@@ -1021,7 +1023,7 @@ func (mm *MemoryManager) SetNumaPolicy(addr hostarch.Addr, length uint64, policy
func (mm *MemoryManager) SetDontFork(addr hostarch.Addr, length uint64, dontfork bool) error {
ar, ok := addr.ToRange(length)
if !ok {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
mm.mappingMu.Lock()
@@ -1047,7 +1049,7 @@ func (mm *MemoryManager) SetDontFork(addr hostarch.Addr, length uint64, dontfork
func (mm *MemoryManager) Decommit(addr hostarch.Addr, length uint64) error {
ar, ok := addr.ToRange(length)
if !ok {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
mm.mappingMu.RLock()
@@ -1063,7 +1065,7 @@ func (mm *MemoryManager) Decommit(addr hostarch.Addr, length uint64) error {
for vseg := mm.vmas.LowerBoundSegment(ar.Start); vseg.Ok() && vseg.Start() < ar.End; vseg = vseg.NextSegment() {
vma := vseg.ValuePtr()
if vma.mlockMode != memmap.MLockNone {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
vsegAR := vseg.Range().Intersect(ar)
// pseg should already correspond to either this vma or a later one,
@@ -1114,7 +1116,7 @@ type MSyncOpts struct {
// MSync implements the semantics of Linux's msync().
func (mm *MemoryManager) MSync(ctx context.Context, addr hostarch.Addr, length uint64, opts MSyncOpts) error {
if addr != addr.RoundDown() {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
if length == 0 {
return nil
@@ -1150,7 +1152,7 @@ func (mm *MemoryManager) MSync(ctx context.Context, addr hostarch.Addr, length u
vma := vseg.ValuePtr()
if opts.Invalidate && vma.mlockMode != memmap.MLockNone {
mm.mappingMu.RUnlock()
- return syserror.EBUSY
+ return linuxerr.EBUSY
}
// It's only possible to have dirtied the Mappable through a shared
// mapping. Don't check if the mapping is writable, because mprotect
@@ -1191,7 +1193,7 @@ func (mm *MemoryManager) MSync(ctx context.Context, addr hostarch.Addr, length u
func (mm *MemoryManager) GetSharedFutexKey(ctx context.Context, addr hostarch.Addr) (futex.Key, error) {
ar, ok := addr.ToRange(4) // sizeof(int32).
if !ok {
- return futex.Key{}, syserror.EFAULT
+ return futex.Key{}, linuxerr.EFAULT
}
mm.mappingMu.RLock()