summaryrefslogtreecommitdiffhomepage
path: root/pkg/sentry/mm
diff options
context:
space:
mode:
authorZach Koopmans <zkoopmans@google.com>2021-03-29 13:28:32 -0700
committergVisor bot <gvisor-bot@google.com>2021-03-29 13:30:21 -0700
commit8a2f7e716dcc62f04d2808e8ade34941c94fc956 (patch)
treeb2195d5728dcbc4f4e59c23ad95d7486ef744371 /pkg/sentry/mm
parentb125afba416ebeba906ea595a44a55afe4729d64 (diff)
[syserror] Split usermem package
Split usermem package to help remove syserror dependency in go_marshal. New hostarch package contains code not dependent on syserror. PiperOrigin-RevId: 365651233
Diffstat (limited to 'pkg/sentry/mm')
-rw-r--r--pkg/sentry/mm/BUILD14
-rw-r--r--pkg/sentry/mm/address_space.go10
-rw-r--r--pkg/sentry/mm/aio_context.go21
-rw-r--r--pkg/sentry/mm/io.go75
-rw-r--r--pkg/sentry/mm/lifecycle.go4
-rw-r--r--pkg/sentry/mm/metadata.go18
-rw-r--r--pkg/sentry/mm/mm.go24
-rw-r--r--pkg/sentry/mm/mm_test.go35
-rw-r--r--pkg/sentry/mm/pma.go74
-rw-r--r--pkg/sentry/mm/procfs.go20
-rw-r--r--pkg/sentry/mm/shm.go6
-rw-r--r--pkg/sentry/mm/special_mappable.go14
-rw-r--r--pkg/sentry/mm/syscalls.go106
-rw-r--r--pkg/sentry/mm/vma.go86
14 files changed, 256 insertions, 251 deletions
diff --git a/pkg/sentry/mm/BUILD b/pkg/sentry/mm/BUILD
index 6dbeccfe2..b417c2da7 100644
--- a/pkg/sentry/mm/BUILD
+++ b/pkg/sentry/mm/BUILD
@@ -28,14 +28,14 @@ go_template_instance(
"trackGaps": "1",
},
imports = {
- "usermem": "gvisor.dev/gvisor/pkg/usermem",
+ "hostarch": "gvisor.dev/gvisor/pkg/hostarch",
},
package = "mm",
prefix = "vma",
template = "//pkg/segment:generic_set",
types = {
- "Key": "usermem.Addr",
- "Range": "usermem.AddrRange",
+ "Key": "hostarch.Addr",
+ "Range": "hostarch.AddrRange",
"Value": "vma",
"Functions": "vmaSetFunctions",
},
@@ -48,14 +48,14 @@ go_template_instance(
"minDegree": "8",
},
imports = {
- "usermem": "gvisor.dev/gvisor/pkg/usermem",
+ "hostarch": "gvisor.dev/gvisor/pkg/hostarch",
},
package = "mm",
prefix = "pma",
template = "//pkg/segment:generic_set",
types = {
- "Key": "usermem.Addr",
- "Range": "usermem.AddrRange",
+ "Key": "hostarch.Addr",
+ "Range": "hostarch.AddrRange",
"Value": "pma",
"Functions": "pmaSetFunctions",
},
@@ -125,6 +125,7 @@ go_library(
"//pkg/abi/linux",
"//pkg/atomicbitops",
"//pkg/context",
+ "//pkg/hostarch",
"//pkg/log",
"//pkg/refs",
"//pkg/refsvfs2",
@@ -155,6 +156,7 @@ go_test(
library = ":mm",
deps = [
"//pkg/context",
+ "//pkg/hostarch",
"//pkg/sentry/arch",
"//pkg/sentry/contexttest",
"//pkg/sentry/limits",
diff --git a/pkg/sentry/mm/address_space.go b/pkg/sentry/mm/address_space.go
index a93e76c75..534e0e957 100644
--- a/pkg/sentry/mm/address_space.go
+++ b/pkg/sentry/mm/address_space.go
@@ -19,8 +19,8 @@ import (
"sync/atomic"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/platform"
- "gvisor.dev/gvisor/pkg/usermem"
)
// AddressSpace returns the platform.AddressSpace bound to mm.
@@ -172,17 +172,17 @@ func (mm *MemoryManager) Deactivate() {
// * ar.Length() != 0.
// * ar must be page-aligned.
// * pseg == mm.pmas.LowerBoundSegment(ar.Start).
-func (mm *MemoryManager) mapASLocked(pseg pmaIterator, ar usermem.AddrRange, precommit bool) error {
+func (mm *MemoryManager) mapASLocked(pseg pmaIterator, ar hostarch.AddrRange, precommit bool) error {
// By default, map entire pmas at a time, under the assumption that there
// is no cost to mapping more of a pma than necessary.
- mapAR := usermem.AddrRange{0, ^usermem.Addr(usermem.PageSize - 1)}
+ mapAR := hostarch.AddrRange{0, ^hostarch.Addr(hostarch.PageSize - 1)}
if precommit {
// When explicitly precommitting, only map ar, since overmapping may
// incur unexpected resource usage.
mapAR = ar
} else if mapUnit := mm.p.MapUnit(); mapUnit != 0 {
// Limit the range we map to ar, aligned to mapUnit.
- mapMask := usermem.Addr(mapUnit - 1)
+ mapMask := hostarch.Addr(mapUnit - 1)
mapAR.Start = ar.Start &^ mapMask
// If rounding ar.End up overflows, just keep the existing mapAR.End.
if end := (ar.End + mapMask) &^ mapMask; end >= ar.End {
@@ -218,7 +218,7 @@ func (mm *MemoryManager) mapASLocked(pseg pmaIterator, ar usermem.AddrRange, pre
// unmapASLocked removes all AddressSpace mappings for addresses in ar.
//
// Preconditions: mm.activeMu must be locked.
-func (mm *MemoryManager) unmapASLocked(ar usermem.AddrRange) {
+func (mm *MemoryManager) unmapASLocked(ar hostarch.AddrRange) {
if mm.as == nil {
// No AddressSpace? Force all mappings to be unmapped on the next
// Activate.
diff --git a/pkg/sentry/mm/aio_context.go b/pkg/sentry/mm/aio_context.go
index 5ab2ef79f..346866d3c 100644
--- a/pkg/sentry/mm/aio_context.go
+++ b/pkg/sentry/mm/aio_context.go
@@ -17,6 +17,7 @@ package mm
import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sentry/pgalloc"
"gvisor.dev/gvisor/pkg/sentry/usage"
@@ -83,7 +84,7 @@ func (mm *MemoryManager) destroyAIOContextLocked(ctx context.Context, id uint64)
// the same address. Then it would be unmapping memory that it doesn't own.
// This is, however, the way Linux implements AIO. Keeps the same [weird]
// semantics in case anyone relies on it.
- mm.MUnmap(ctx, usermem.Addr(id), aioRingBufferSize)
+ mm.MUnmap(ctx, hostarch.Addr(id), aioRingBufferSize)
delete(mm.aioManager.contexts, id)
aioCtx.destroy()
@@ -259,7 +260,7 @@ type aioMappable struct {
fr memmap.FileRange
}
-var aioRingBufferSize = uint64(usermem.Addr(linux.AIORingSize).MustRoundUp())
+var aioRingBufferSize = uint64(hostarch.Addr(linux.AIORingSize).MustRoundUp())
func newAIOMappable(mfp pgalloc.MemoryFileProvider) (*aioMappable, error) {
fr, err := mfp.MemoryFile().Allocate(aioRingBufferSize, usage.Anonymous)
@@ -300,7 +301,7 @@ func (m *aioMappable) Msync(ctx context.Context, mr memmap.MappableRange) error
}
// AddMapping implements memmap.Mappable.AddMapping.
-func (m *aioMappable) AddMapping(_ context.Context, _ memmap.MappingSpace, ar usermem.AddrRange, offset uint64, _ bool) error {
+func (m *aioMappable) AddMapping(_ context.Context, _ memmap.MappingSpace, ar hostarch.AddrRange, offset uint64, _ bool) error {
// Don't allow mappings to be expanded (in Linux, fs/aio.c:aio_ring_mmap()
// sets VM_DONTEXPAND).
if offset != 0 || uint64(ar.Length()) != aioRingBufferSize {
@@ -310,11 +311,11 @@ func (m *aioMappable) AddMapping(_ context.Context, _ memmap.MappingSpace, ar us
}
// RemoveMapping implements memmap.Mappable.RemoveMapping.
-func (m *aioMappable) RemoveMapping(context.Context, memmap.MappingSpace, usermem.AddrRange, uint64, bool) {
+func (m *aioMappable) RemoveMapping(context.Context, memmap.MappingSpace, hostarch.AddrRange, uint64, bool) {
}
// CopyMapping implements memmap.Mappable.CopyMapping.
-func (m *aioMappable) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR usermem.AddrRange, offset uint64, _ bool) error {
+func (m *aioMappable) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR hostarch.AddrRange, offset uint64, _ bool) error {
// Don't allow mappings to be expanded (in Linux, fs/aio.c:aio_ring_mmap()
// sets VM_DONTEXPAND).
if offset != 0 || uint64(dstAR.Length()) != aioRingBufferSize {
@@ -346,7 +347,7 @@ func (m *aioMappable) CopyMapping(ctx context.Context, ms memmap.MappingSpace, s
}
// Translate implements memmap.Mappable.Translate.
-func (m *aioMappable) Translate(ctx context.Context, required, optional memmap.MappableRange, at usermem.AccessType) ([]memmap.Translation, error) {
+func (m *aioMappable) Translate(ctx context.Context, required, optional memmap.MappableRange, at hostarch.AccessType) ([]memmap.Translation, error) {
var err error
if required.End > m.fr.Length() {
err = &memmap.BusError{syserror.EFAULT}
@@ -357,7 +358,7 @@ func (m *aioMappable) Translate(ctx context.Context, required, optional memmap.M
Source: source,
File: m.mfp.MemoryFile(),
Offset: m.fr.Start + source.Start,
- Perms: usermem.AnyAccess,
+ Perms: hostarch.AnyAccess,
},
}, err
}
@@ -389,8 +390,8 @@ func (mm *MemoryManager) NewAIOContext(ctx context.Context, events uint32) (uint
// Linux uses "do_mmap_pgoff(..., PROT_READ | PROT_WRITE, ...)" in
// fs/aio.c:aio_setup_ring(). Since we don't implement AIO_RING_MAGIC,
// user mode should not write to this page.
- Perms: usermem.Read,
- MaxPerms: usermem.Read,
+ Perms: hostarch.Read,
+ MaxPerms: hostarch.Read,
})
if err != nil {
return 0, err
@@ -435,6 +436,6 @@ func (mm *MemoryManager) LookupAIOContext(ctx context.Context, id uint64) (*AIOC
// bytes from id).
func (mm *MemoryManager) isValidAddr(ctx context.Context, id uint64) bool {
var buf [4]byte
- _, err := mm.CopyIn(ctx, usermem.Addr(id), buf[:], usermem.IOOpts{})
+ _, err := mm.CopyIn(ctx, hostarch.Addr(id), buf[:], usermem.IOOpts{})
return err == nil
}
diff --git a/pkg/sentry/mm/io.go b/pkg/sentry/mm/io.go
index a8ac48080..16f318ab3 100644
--- a/pkg/sentry/mm/io.go
+++ b/pkg/sentry/mm/io.go
@@ -16,6 +16,7 @@ package mm
import (
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/safemem"
"gvisor.dev/gvisor/pkg/sentry/platform"
"gvisor.dev/gvisor/pkg/syserror"
@@ -60,11 +61,11 @@ const (
rwMapMinBytes = 512
)
-// CheckIORange is similar to usermem.Addr.ToRange, but applies bounds checks
+// CheckIORange is similar to hostarch.Addr.ToRange, but applies bounds checks
// consistent with Linux's arch/x86/include/asm/uaccess.h:access_ok().
//
// Preconditions: length >= 0.
-func (mm *MemoryManager) CheckIORange(addr usermem.Addr, length int64) (usermem.AddrRange, bool) {
+func (mm *MemoryManager) CheckIORange(addr hostarch.Addr, length int64) (hostarch.AddrRange, bool) {
// Note that access_ok() constrains end even if length == 0.
ar, ok := addr.ToRange(uint64(length))
return ar, (ok && ar.End <= mm.layout.MaxAddr)
@@ -72,7 +73,7 @@ func (mm *MemoryManager) CheckIORange(addr usermem.Addr, length int64) (usermem.
// checkIOVec applies bound checks consistent with Linux's
// arch/x86/include/asm/uaccess.h:access_ok() to ars.
-func (mm *MemoryManager) checkIOVec(ars usermem.AddrRangeSeq) bool {
+func (mm *MemoryManager) checkIOVec(ars hostarch.AddrRangeSeq) bool {
for !ars.IsEmpty() {
ar := ars.Head()
if _, ok := mm.CheckIORange(ar.Start, int64(ar.Length())); !ok {
@@ -100,7 +101,7 @@ func translateIOError(ctx context.Context, err error) error {
}
// CopyOut implements usermem.IO.CopyOut.
-func (mm *MemoryManager) CopyOut(ctx context.Context, addr usermem.Addr, src []byte, opts usermem.IOOpts) (int, error) {
+func (mm *MemoryManager) CopyOut(ctx context.Context, addr hostarch.Addr, src []byte, opts usermem.IOOpts) (int, error) {
ar, ok := mm.CheckIORange(addr, int64(len(src)))
if !ok {
return 0, syserror.EFAULT
@@ -116,24 +117,24 @@ func (mm *MemoryManager) CopyOut(ctx context.Context, addr usermem.Addr, src []b
}
// Go through internal mappings.
- n64, err := mm.withInternalMappings(ctx, ar, usermem.Write, opts.IgnorePermissions, func(ims safemem.BlockSeq) (uint64, error) {
+ n64, err := mm.withInternalMappings(ctx, ar, hostarch.Write, opts.IgnorePermissions, func(ims safemem.BlockSeq) (uint64, error) {
n, err := safemem.CopySeq(ims, safemem.BlockSeqOf(safemem.BlockFromSafeSlice(src)))
return n, translateIOError(ctx, err)
})
return int(n64), err
}
-func (mm *MemoryManager) asCopyOut(ctx context.Context, addr usermem.Addr, src []byte) (int, error) {
+func (mm *MemoryManager) asCopyOut(ctx context.Context, addr hostarch.Addr, src []byte) (int, error) {
var done int
for {
- n, err := mm.as.CopyOut(addr+usermem.Addr(done), src[done:])
+ n, err := mm.as.CopyOut(addr+hostarch.Addr(done), src[done:])
done += n
if err == nil {
return done, nil
}
if f, ok := err.(platform.SegmentationFault); ok {
ar, _ := addr.ToRange(uint64(len(src)))
- if err := mm.handleASIOFault(ctx, f.Addr, ar, usermem.Write); err != nil {
+ if err := mm.handleASIOFault(ctx, f.Addr, ar, hostarch.Write); err != nil {
return done, err
}
continue
@@ -143,7 +144,7 @@ func (mm *MemoryManager) asCopyOut(ctx context.Context, addr usermem.Addr, src [
}
// CopyIn implements usermem.IO.CopyIn.
-func (mm *MemoryManager) CopyIn(ctx context.Context, addr usermem.Addr, dst []byte, opts usermem.IOOpts) (int, error) {
+func (mm *MemoryManager) CopyIn(ctx context.Context, addr hostarch.Addr, dst []byte, opts usermem.IOOpts) (int, error) {
ar, ok := mm.CheckIORange(addr, int64(len(dst)))
if !ok {
return 0, syserror.EFAULT
@@ -159,24 +160,24 @@ func (mm *MemoryManager) CopyIn(ctx context.Context, addr usermem.Addr, dst []by
}
// Go through internal mappings.
- n64, err := mm.withInternalMappings(ctx, ar, usermem.Read, opts.IgnorePermissions, func(ims safemem.BlockSeq) (uint64, error) {
+ n64, err := mm.withInternalMappings(ctx, ar, hostarch.Read, opts.IgnorePermissions, func(ims safemem.BlockSeq) (uint64, error) {
n, err := safemem.CopySeq(safemem.BlockSeqOf(safemem.BlockFromSafeSlice(dst)), ims)
return n, translateIOError(ctx, err)
})
return int(n64), err
}
-func (mm *MemoryManager) asCopyIn(ctx context.Context, addr usermem.Addr, dst []byte) (int, error) {
+func (mm *MemoryManager) asCopyIn(ctx context.Context, addr hostarch.Addr, dst []byte) (int, error) {
var done int
for {
- n, err := mm.as.CopyIn(addr+usermem.Addr(done), dst[done:])
+ n, err := mm.as.CopyIn(addr+hostarch.Addr(done), dst[done:])
done += n
if err == nil {
return done, nil
}
if f, ok := err.(platform.SegmentationFault); ok {
ar, _ := addr.ToRange(uint64(len(dst)))
- if err := mm.handleASIOFault(ctx, f.Addr, ar, usermem.Read); err != nil {
+ if err := mm.handleASIOFault(ctx, f.Addr, ar, hostarch.Read); err != nil {
return done, err
}
continue
@@ -186,7 +187,7 @@ func (mm *MemoryManager) asCopyIn(ctx context.Context, addr usermem.Addr, dst []
}
// ZeroOut implements usermem.IO.ZeroOut.
-func (mm *MemoryManager) ZeroOut(ctx context.Context, addr usermem.Addr, toZero int64, opts usermem.IOOpts) (int64, error) {
+func (mm *MemoryManager) ZeroOut(ctx context.Context, addr hostarch.Addr, toZero int64, opts usermem.IOOpts) (int64, error) {
ar, ok := mm.CheckIORange(addr, toZero)
if !ok {
return 0, syserror.EFAULT
@@ -202,23 +203,23 @@ func (mm *MemoryManager) ZeroOut(ctx context.Context, addr usermem.Addr, toZero
}
// Go through internal mappings.
- return mm.withInternalMappings(ctx, ar, usermem.Write, opts.IgnorePermissions, func(dsts safemem.BlockSeq) (uint64, error) {
+ return mm.withInternalMappings(ctx, ar, hostarch.Write, opts.IgnorePermissions, func(dsts safemem.BlockSeq) (uint64, error) {
n, err := safemem.ZeroSeq(dsts)
return n, translateIOError(ctx, err)
})
}
-func (mm *MemoryManager) asZeroOut(ctx context.Context, addr usermem.Addr, toZero int64) (int64, error) {
+func (mm *MemoryManager) asZeroOut(ctx context.Context, addr hostarch.Addr, toZero int64) (int64, error) {
var done int64
for {
- n, err := mm.as.ZeroOut(addr+usermem.Addr(done), uintptr(toZero-done))
+ n, err := mm.as.ZeroOut(addr+hostarch.Addr(done), uintptr(toZero-done))
done += int64(n)
if err == nil {
return done, nil
}
if f, ok := err.(platform.SegmentationFault); ok {
ar, _ := addr.ToRange(uint64(toZero))
- if err := mm.handleASIOFault(ctx, f.Addr, ar, usermem.Write); err != nil {
+ if err := mm.handleASIOFault(ctx, f.Addr, ar, hostarch.Write); err != nil {
return done, err
}
continue
@@ -228,7 +229,7 @@ func (mm *MemoryManager) asZeroOut(ctx context.Context, addr usermem.Addr, toZer
}
// CopyOutFrom implements usermem.IO.CopyOutFrom.
-func (mm *MemoryManager) CopyOutFrom(ctx context.Context, ars usermem.AddrRangeSeq, src safemem.Reader, opts usermem.IOOpts) (int64, error) {
+func (mm *MemoryManager) CopyOutFrom(ctx context.Context, ars hostarch.AddrRangeSeq, src safemem.Reader, opts usermem.IOOpts) (int64, error) {
if !mm.checkIOVec(ars) {
return 0, syserror.EFAULT
}
@@ -269,11 +270,11 @@ func (mm *MemoryManager) CopyOutFrom(ctx context.Context, ars usermem.AddrRangeS
}
// Go through internal mappings.
- return mm.withVecInternalMappings(ctx, ars, usermem.Write, opts.IgnorePermissions, src.ReadToBlocks)
+ return mm.withVecInternalMappings(ctx, ars, hostarch.Write, opts.IgnorePermissions, src.ReadToBlocks)
}
// CopyInTo implements usermem.IO.CopyInTo.
-func (mm *MemoryManager) CopyInTo(ctx context.Context, ars usermem.AddrRangeSeq, dst safemem.Writer, opts usermem.IOOpts) (int64, error) {
+func (mm *MemoryManager) CopyInTo(ctx context.Context, ars hostarch.AddrRangeSeq, dst safemem.Writer, opts usermem.IOOpts) (int64, error) {
if !mm.checkIOVec(ars) {
return 0, syserror.EFAULT
}
@@ -306,11 +307,11 @@ func (mm *MemoryManager) CopyInTo(ctx context.Context, ars usermem.AddrRangeSeq,
}
// Go through internal mappings.
- return mm.withVecInternalMappings(ctx, ars, usermem.Read, opts.IgnorePermissions, dst.WriteFromBlocks)
+ return mm.withVecInternalMappings(ctx, ars, hostarch.Read, opts.IgnorePermissions, dst.WriteFromBlocks)
}
// SwapUint32 implements usermem.IO.SwapUint32.
-func (mm *MemoryManager) SwapUint32(ctx context.Context, addr usermem.Addr, new uint32, opts usermem.IOOpts) (uint32, error) {
+func (mm *MemoryManager) SwapUint32(ctx context.Context, addr hostarch.Addr, new uint32, opts usermem.IOOpts) (uint32, error) {
ar, ok := mm.CheckIORange(addr, 4)
if !ok {
return 0, syserror.EFAULT
@@ -324,7 +325,7 @@ func (mm *MemoryManager) SwapUint32(ctx context.Context, addr usermem.Addr, new
return old, nil
}
if f, ok := err.(platform.SegmentationFault); ok {
- if err := mm.handleASIOFault(ctx, f.Addr, ar, usermem.ReadWrite); err != nil {
+ if err := mm.handleASIOFault(ctx, f.Addr, ar, hostarch.ReadWrite); err != nil {
return 0, err
}
continue
@@ -335,7 +336,7 @@ func (mm *MemoryManager) SwapUint32(ctx context.Context, addr usermem.Addr, new
// Go through internal mappings.
var old uint32
- _, err := mm.withInternalMappings(ctx, ar, usermem.ReadWrite, opts.IgnorePermissions, func(ims safemem.BlockSeq) (uint64, error) {
+ _, err := mm.withInternalMappings(ctx, ar, hostarch.ReadWrite, opts.IgnorePermissions, func(ims safemem.BlockSeq) (uint64, error) {
if ims.NumBlocks() != 1 || ims.NumBytes() != 4 {
// Atomicity is unachievable across mappings.
return 0, syserror.EFAULT
@@ -353,7 +354,7 @@ func (mm *MemoryManager) SwapUint32(ctx context.Context, addr usermem.Addr, new
}
// CompareAndSwapUint32 implements usermem.IO.CompareAndSwapUint32.
-func (mm *MemoryManager) CompareAndSwapUint32(ctx context.Context, addr usermem.Addr, old, new uint32, opts usermem.IOOpts) (uint32, error) {
+func (mm *MemoryManager) CompareAndSwapUint32(ctx context.Context, addr hostarch.Addr, old, new uint32, opts usermem.IOOpts) (uint32, error) {
ar, ok := mm.CheckIORange(addr, 4)
if !ok {
return 0, syserror.EFAULT
@@ -367,7 +368,7 @@ func (mm *MemoryManager) CompareAndSwapUint32(ctx context.Context, addr usermem.
return prev, nil
}
if f, ok := err.(platform.SegmentationFault); ok {
- if err := mm.handleASIOFault(ctx, f.Addr, ar, usermem.ReadWrite); err != nil {
+ if err := mm.handleASIOFault(ctx, f.Addr, ar, hostarch.ReadWrite); err != nil {
return 0, err
}
continue
@@ -378,7 +379,7 @@ func (mm *MemoryManager) CompareAndSwapUint32(ctx context.Context, addr usermem.
// Go through internal mappings.
var prev uint32
- _, err := mm.withInternalMappings(ctx, ar, usermem.ReadWrite, opts.IgnorePermissions, func(ims safemem.BlockSeq) (uint64, error) {
+ _, err := mm.withInternalMappings(ctx, ar, hostarch.ReadWrite, opts.IgnorePermissions, func(ims safemem.BlockSeq) (uint64, error) {
if ims.NumBlocks() != 1 || ims.NumBytes() != 4 {
// Atomicity is unachievable across mappings.
return 0, syserror.EFAULT
@@ -396,7 +397,7 @@ func (mm *MemoryManager) CompareAndSwapUint32(ctx context.Context, addr usermem.
}
// LoadUint32 implements usermem.IO.LoadUint32.
-func (mm *MemoryManager) LoadUint32(ctx context.Context, addr usermem.Addr, opts usermem.IOOpts) (uint32, error) {
+func (mm *MemoryManager) LoadUint32(ctx context.Context, addr hostarch.Addr, opts usermem.IOOpts) (uint32, error) {
ar, ok := mm.CheckIORange(addr, 4)
if !ok {
return 0, syserror.EFAULT
@@ -410,7 +411,7 @@ func (mm *MemoryManager) LoadUint32(ctx context.Context, addr usermem.Addr, opts
return val, nil
}
if f, ok := err.(platform.SegmentationFault); ok {
- if err := mm.handleASIOFault(ctx, f.Addr, ar, usermem.Read); err != nil {
+ if err := mm.handleASIOFault(ctx, f.Addr, ar, hostarch.Read); err != nil {
return 0, err
}
continue
@@ -421,7 +422,7 @@ func (mm *MemoryManager) LoadUint32(ctx context.Context, addr usermem.Addr, opts
// Go through internal mappings.
var val uint32
- _, err := mm.withInternalMappings(ctx, ar, usermem.Read, opts.IgnorePermissions, func(ims safemem.BlockSeq) (uint64, error) {
+ _, err := mm.withInternalMappings(ctx, ar, hostarch.Read, opts.IgnorePermissions, func(ims safemem.BlockSeq) (uint64, error) {
if ims.NumBlocks() != 1 || ims.NumBytes() != 4 {
// Atomicity is unachievable across mappings.
return 0, syserror.EFAULT
@@ -445,11 +446,11 @@ func (mm *MemoryManager) LoadUint32(ctx context.Context, addr usermem.Addr, opts
// * mm.as != nil.
// * ioar.Length() != 0.
// * ioar.Contains(addr).
-func (mm *MemoryManager) handleASIOFault(ctx context.Context, addr usermem.Addr, ioar usermem.AddrRange, at usermem.AccessType) error {
+func (mm *MemoryManager) handleASIOFault(ctx context.Context, addr hostarch.Addr, ioar hostarch.AddrRange, at hostarch.AccessType) error {
// Try to map all remaining pages in the I/O operation. This RoundUp can't
// overflow because otherwise it would have been caught by CheckIORange.
end, _ := ioar.End.RoundUp()
- ar := usermem.AddrRange{addr.RoundDown(), end}
+ ar := hostarch.AddrRange{addr.RoundDown(), end}
// Don't bother trying existingPMAsLocked; in most cases, if we did have
// existing pmas, we wouldn't have faulted.
@@ -498,7 +499,7 @@ func (mm *MemoryManager) handleASIOFault(ctx context.Context, addr usermem.Addr,
// more useful for usermem.IO methods.
//
// Preconditions: 0 < ar.Length() <= math.MaxInt64.
-func (mm *MemoryManager) withInternalMappings(ctx context.Context, ar usermem.AddrRange, at usermem.AccessType, ignorePermissions bool, f func(safemem.BlockSeq) (uint64, error)) (int64, error) {
+func (mm *MemoryManager) withInternalMappings(ctx context.Context, ar hostarch.AddrRange, at hostarch.AccessType, ignorePermissions bool, f func(safemem.BlockSeq) (uint64, error)) (int64, error) {
// If pmas are already available, we can do IO without touching mm.vmas or
// mm.mappingMu.
mm.activeMu.RLock()
@@ -567,7 +568,7 @@ func (mm *MemoryManager) withInternalMappings(ctx context.Context, ar usermem.Ad
// internal mappings for the subset of ars for which this property holds.
//
// Preconditions: !ars.IsEmpty().
-func (mm *MemoryManager) withVecInternalMappings(ctx context.Context, ars usermem.AddrRangeSeq, at usermem.AccessType, ignorePermissions bool, f func(safemem.BlockSeq) (uint64, error)) (int64, error) {
+func (mm *MemoryManager) withVecInternalMappings(ctx context.Context, ars hostarch.AddrRangeSeq, at hostarch.AccessType, ignorePermissions bool, f func(safemem.BlockSeq) (uint64, error)) (int64, error) {
// withInternalMappings is faster than withVecInternalMappings because of
// iterator plumbing (this isn't generally practical in the vector case due
// to iterator invalidation between AddrRanges). Use it if possible.
@@ -630,12 +631,12 @@ func (mm *MemoryManager) withVecInternalMappings(ctx context.Context, ars userme
// truncatedAddrRangeSeq returns a copy of ars, but with the end truncated to
// at most address end on AddrRange arsit.Head(). It is used in vector I/O paths to
-// truncate usermem.AddrRangeSeq when errors occur.
+// truncate hostarch.AddrRangeSeq when errors occur.
//
// Preconditions:
// * !arsit.IsEmpty().
// * end <= arsit.Head().End.
-func truncatedAddrRangeSeq(ars, arsit usermem.AddrRangeSeq, end usermem.Addr) usermem.AddrRangeSeq {
+func truncatedAddrRangeSeq(ars, arsit hostarch.AddrRangeSeq, end hostarch.Addr) hostarch.AddrRangeSeq {
ar := arsit.Head()
if end <= ar.Start {
return ars.TakeFirst64(ars.NumBytes() - arsit.NumBytes())
diff --git a/pkg/sentry/mm/lifecycle.go b/pkg/sentry/mm/lifecycle.go
index 120707429..a79ef9223 100644
--- a/pkg/sentry/mm/lifecycle.go
+++ b/pkg/sentry/mm/lifecycle.go
@@ -19,12 +19,12 @@ import (
"sync/atomic"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/limits"
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sentry/pgalloc"
"gvisor.dev/gvisor/pkg/sentry/platform"
- "gvisor.dev/gvisor/pkg/usermem"
)
// NewMemoryManager returns a new MemoryManager with no mappings and 1 user.
@@ -139,7 +139,7 @@ func (mm *MemoryManager) Fork(ctx context.Context) (*MemoryManager, error) {
}
srcvseg := mm.vmas.FirstSegment()
dstpgap := mm2.pmas.FirstGap()
- var unmapAR usermem.AddrRange
+ var unmapAR hostarch.AddrRange
for srcpseg := mm.pmas.FirstSegment(); srcpseg.Ok(); srcpseg = srcpseg.NextSegment() {
pma := srcpseg.ValuePtr()
if !pma.private {
diff --git a/pkg/sentry/mm/metadata.go b/pkg/sentry/mm/metadata.go
index 0cfd60f6c..28c5fead9 100644
--- a/pkg/sentry/mm/metadata.go
+++ b/pkg/sentry/mm/metadata.go
@@ -16,9 +16,9 @@ package mm
import (
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fsbridge"
- "gvisor.dev/gvisor/pkg/usermem"
)
// Dumpability describes if and how core dumps should be created.
@@ -54,14 +54,14 @@ func (mm *MemoryManager) SetDumpability(d Dumpability) {
// ArgvStart returns the start of the application argument vector.
//
// There is no guarantee that this value is sensible w.r.t. ArgvEnd.
-func (mm *MemoryManager) ArgvStart() usermem.Addr {
+func (mm *MemoryManager) ArgvStart() hostarch.Addr {
mm.metadataMu.Lock()
defer mm.metadataMu.Unlock()
return mm.argv.Start
}
// SetArgvStart sets the start of the application argument vector.
-func (mm *MemoryManager) SetArgvStart(a usermem.Addr) {
+func (mm *MemoryManager) SetArgvStart(a hostarch.Addr) {
mm.metadataMu.Lock()
defer mm.metadataMu.Unlock()
mm.argv.Start = a
@@ -70,14 +70,14 @@ func (mm *MemoryManager) SetArgvStart(a usermem.Addr) {
// ArgvEnd returns the end of the application argument vector.
//
// There is no guarantee that this value is sensible w.r.t. ArgvStart.
-func (mm *MemoryManager) ArgvEnd() usermem.Addr {
+func (mm *MemoryManager) ArgvEnd() hostarch.Addr {
mm.metadataMu.Lock()
defer mm.metadataMu.Unlock()
return mm.argv.End
}
// SetArgvEnd sets the end of the application argument vector.
-func (mm *MemoryManager) SetArgvEnd(a usermem.Addr) {
+func (mm *MemoryManager) SetArgvEnd(a hostarch.Addr) {
mm.metadataMu.Lock()
defer mm.metadataMu.Unlock()
mm.argv.End = a
@@ -86,14 +86,14 @@ func (mm *MemoryManager) SetArgvEnd(a usermem.Addr) {
// EnvvStart returns the start of the application environment vector.
//
// There is no guarantee that this value is sensible w.r.t. EnvvEnd.
-func (mm *MemoryManager) EnvvStart() usermem.Addr {
+func (mm *MemoryManager) EnvvStart() hostarch.Addr {
mm.metadataMu.Lock()
defer mm.metadataMu.Unlock()
return mm.envv.Start
}
// SetEnvvStart sets the start of the application environment vector.
-func (mm *MemoryManager) SetEnvvStart(a usermem.Addr) {
+func (mm *MemoryManager) SetEnvvStart(a hostarch.Addr) {
mm.metadataMu.Lock()
defer mm.metadataMu.Unlock()
mm.envv.Start = a
@@ -102,14 +102,14 @@ func (mm *MemoryManager) SetEnvvStart(a usermem.Addr) {
// EnvvEnd returns the end of the application environment vector.
//
// There is no guarantee that this value is sensible w.r.t. EnvvStart.
-func (mm *MemoryManager) EnvvEnd() usermem.Addr {
+func (mm *MemoryManager) EnvvEnd() hostarch.Addr {
mm.metadataMu.Lock()
defer mm.metadataMu.Unlock()
return mm.envv.End
}
// SetEnvvEnd sets the end of the application environment vector.
-func (mm *MemoryManager) SetEnvvEnd(a usermem.Addr) {
+func (mm *MemoryManager) SetEnvvEnd(a hostarch.Addr) {
mm.metadataMu.Lock()
defer mm.metadataMu.Unlock()
mm.envv.End = a
diff --git a/pkg/sentry/mm/mm.go b/pkg/sentry/mm/mm.go
index 92cc87d84..57969b26c 100644
--- a/pkg/sentry/mm/mm.go
+++ b/pkg/sentry/mm/mm.go
@@ -36,6 +36,7 @@ package mm
import (
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/safemem"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fsbridge"
@@ -43,7 +44,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/pgalloc"
"gvisor.dev/gvisor/pkg/sentry/platform"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/usermem"
)
// MemoryManager implements a virtual address space.
@@ -97,7 +97,7 @@ type MemoryManager struct {
// binary into the mm.
//
// brk is protected by mappingMu.
- brk usermem.AddrRange
+ brk hostarch.AddrRange
// usageAS is vmas.Span(), cached to accelerate RLIMIT_AS checks.
//
@@ -198,14 +198,14 @@ type MemoryManager struct {
// requirements apply to argv; we do not require that argv.WellFormed().
//
// argv is protected by metadataMu.
- argv usermem.AddrRange
+ argv hostarch.AddrRange
// envv is the application envv. This is set up by the loader and may be
// modified by prctl(PR_SET_MM_ENV_START/PR_SET_MM_ENV_END). No
// requirements apply to envv; we do not require that envv.WellFormed().
//
// envv is protected by metadataMu.
- envv usermem.AddrRange
+ envv hostarch.AddrRange
// auxv is the ELF's auxiliary vector.
//
@@ -268,20 +268,20 @@ type vma struct {
// realPerms are the memory permissions on this vma, as defined by the
// application.
- realPerms usermem.AccessType `state:".(int)"`
+ realPerms hostarch.AccessType `state:".(int)"`
// effectivePerms are the memory permissions on this vma which are
// actually used to control access.
//
// Invariant: effectivePerms == realPerms.Effective().
- effectivePerms usermem.AccessType `state:"manual"`
+ effectivePerms hostarch.AccessType `state:"manual"`
// maxPerms limits the set of permissions that may ever apply to this
// memory, as well as accesses for which usermem.IOOpts.IgnorePermissions
// is true (e.g. ptrace(PTRACE_POKEDATA)).
//
// Invariant: maxPerms == maxPerms.Effective().
- maxPerms usermem.AccessType `state:"manual"`
+ maxPerms hostarch.AccessType `state:"manual"`
// private is true if this is a MAP_PRIVATE mapping, such that writes to
// the mapping are propagated to a copy.
@@ -421,8 +421,8 @@ type pma struct {
off uint64
// translatePerms is the permissions returned by memmap.Mappable.Translate.
- // If private is true, translatePerms is usermem.AnyAccess.
- translatePerms usermem.AccessType
+ // If private is true, translatePerms is hostarch.AnyAccess.
+ translatePerms hostarch.AccessType
// effectivePerms is the permissions allowed for non-ignorePermissions
// accesses. maxPerms is the permissions allowed for ignorePermissions
@@ -432,8 +432,8 @@ type pma struct {
//
// These are stored in the pma so that the IO implementation can avoid
// iterating mm.vmas when pmas already exist.
- effectivePerms usermem.AccessType
- maxPerms usermem.AccessType
+ effectivePerms hostarch.AccessType
+ maxPerms hostarch.AccessType
// needCOW is true if writes to the mapping must be propagated to a copy.
needCOW bool
@@ -465,7 +465,7 @@ type privateRefs struct {
}
type invalidateArgs struct {
- ar usermem.AddrRange
+ ar hostarch.AddrRange
opts memmap.InvalidateOpts
}
diff --git a/pkg/sentry/mm/mm_test.go b/pkg/sentry/mm/mm_test.go
index bc53bd41e..1304b0a2f 100644
--- a/pkg/sentry/mm/mm_test.go
+++ b/pkg/sentry/mm/mm_test.go
@@ -18,6 +18,7 @@ import (
"testing"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/contexttest"
"gvisor.dev/gvisor/pkg/sentry/limits"
@@ -51,7 +52,7 @@ func TestUsageASUpdates(t *testing.T) {
defer mm.DecUsers(ctx)
addr, err := mm.MMap(ctx, memmap.MMapOpts{
- Length: 2 * usermem.PageSize,
+ Length: 2 * hostarch.PageSize,
Private: true,
})
if err != nil {
@@ -62,7 +63,7 @@ func TestUsageASUpdates(t *testing.T) {
t.Fatalf("usageAS believes %v bytes are mapped; %v bytes are actually mapped", mm.usageAS, realUsage)
}
- mm.MUnmap(ctx, addr, usermem.PageSize)
+ mm.MUnmap(ctx, addr, hostarch.PageSize)
realUsage = mm.realUsageAS()
if mm.usageAS != realUsage {
t.Fatalf("usageAS believes %v bytes are mapped; %v bytes are actually mapped", mm.usageAS, realUsage)
@@ -86,10 +87,10 @@ func TestDataASUpdates(t *testing.T) {
defer mm.DecUsers(ctx)
addr, err := mm.MMap(ctx, memmap.MMapOpts{
- Length: 3 * usermem.PageSize,
+ Length: 3 * hostarch.PageSize,
Private: true,
- Perms: usermem.Write,
- MaxPerms: usermem.AnyAccess,
+ Perms: hostarch.Write,
+ MaxPerms: hostarch.AnyAccess,
})
if err != nil {
t.Fatalf("MMap got err %v want nil", err)
@@ -102,19 +103,19 @@ func TestDataASUpdates(t *testing.T) {
t.Fatalf("dataAS believes %v bytes are mapped; %v bytes are actually mapped", mm.dataAS, realDataAS)
}
- mm.MUnmap(ctx, addr, usermem.PageSize)
+ mm.MUnmap(ctx, addr, hostarch.PageSize)
realDataAS = mm.realDataAS()
if mm.dataAS != realDataAS {
t.Fatalf("dataAS believes %v bytes are mapped; %v bytes are actually mapped", mm.dataAS, realDataAS)
}
- mm.MProtect(addr+usermem.PageSize, usermem.PageSize, usermem.Read, false)
+ mm.MProtect(addr+hostarch.PageSize, hostarch.PageSize, hostarch.Read, false)
realDataAS = mm.realDataAS()
if mm.dataAS != realDataAS {
t.Fatalf("dataAS believes %v bytes are mapped; %v bytes are actually mapped", mm.dataAS, realDataAS)
}
- mm.MRemap(ctx, addr+2*usermem.PageSize, usermem.PageSize, 2*usermem.PageSize, MRemapOpts{
+ mm.MRemap(ctx, addr+2*hostarch.PageSize, hostarch.PageSize, 2*hostarch.PageSize, MRemapOpts{
Move: MRemapMayMove,
})
realDataAS = mm.realDataAS()
@@ -133,7 +134,7 @@ func TestBrkDataLimitUpdates(t *testing.T) {
// Try to extend the brk by one page and expect doing so to fail.
oldBrk, _ := mm.Brk(ctx, 0)
- if newBrk, _ := mm.Brk(ctx, oldBrk+usermem.PageSize); newBrk != oldBrk {
+ if newBrk, _ := mm.Brk(ctx, oldBrk+hostarch.PageSize); newBrk != oldBrk {
t.Errorf("brk() increased data segment above RLIMIT_DATA (old brk = %#x, new brk = %#x", oldBrk, newBrk)
}
}
@@ -145,10 +146,10 @@ func TestIOAfterUnmap(t *testing.T) {
defer mm.DecUsers(ctx)
addr, err := mm.MMap(ctx, memmap.MMapOpts{
- Length: usermem.PageSize,
+ Length: hostarch.PageSize,
Private: true,
- Perms: usermem.Read,
- MaxPerms: usermem.AnyAccess,
+ Perms: hostarch.Read,
+ MaxPerms: hostarch.AnyAccess,
})
if err != nil {
t.Fatalf("MMap got err %v want nil", err)
@@ -164,7 +165,7 @@ func TestIOAfterUnmap(t *testing.T) {
t.Errorf("CopyIn got %d want 1", n)
}
- err = mm.MUnmap(ctx, addr, usermem.PageSize)
+ err = mm.MUnmap(ctx, addr, hostarch.PageSize)
if err != nil {
t.Fatalf("MUnmap got err %v want nil", err)
}
@@ -185,10 +186,10 @@ func TestIOAfterMProtect(t *testing.T) {
defer mm.DecUsers(ctx)
addr, err := mm.MMap(ctx, memmap.MMapOpts{
- Length: usermem.PageSize,
+ Length: hostarch.PageSize,
Private: true,
- Perms: usermem.ReadWrite,
- MaxPerms: usermem.AnyAccess,
+ Perms: hostarch.ReadWrite,
+ MaxPerms: hostarch.AnyAccess,
})
if err != nil {
t.Fatalf("MMap got err %v want nil", err)
@@ -204,7 +205,7 @@ func TestIOAfterMProtect(t *testing.T) {
t.Errorf("CopyOut got %d want 1", n)
}
- err = mm.MProtect(addr, usermem.PageSize, usermem.Read, false)
+ err = mm.MProtect(addr, hostarch.PageSize, hostarch.Read, false)
if err != nil {
t.Errorf("MProtect got err %v want nil", err)
}
diff --git a/pkg/sentry/mm/pma.go b/pkg/sentry/mm/pma.go
index 7e5f7de64..5583f62b2 100644
--- a/pkg/sentry/mm/pma.go
+++ b/pkg/sentry/mm/pma.go
@@ -18,12 +18,12 @@ import (
"fmt"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/safecopy"
"gvisor.dev/gvisor/pkg/safemem"
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sentry/usage"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
)
// existingPMAsLocked checks that pmas exist for all addresses in ar, and
@@ -34,7 +34,7 @@ import (
// Preconditions:
// * mm.activeMu must be locked.
// * ar.Length() != 0.
-func (mm *MemoryManager) existingPMAsLocked(ar usermem.AddrRange, at usermem.AccessType, ignorePermissions bool, needInternalMappings bool) pmaIterator {
+func (mm *MemoryManager) existingPMAsLocked(ar hostarch.AddrRange, at hostarch.AccessType, ignorePermissions bool, needInternalMappings bool) pmaIterator {
if checkInvariants {
if !ar.WellFormed() || ar.Length() == 0 {
panic(fmt.Sprintf("invalid ar: %v", ar))
@@ -70,7 +70,7 @@ func (mm *MemoryManager) existingPMAsLocked(ar usermem.AddrRange, at usermem.Acc
// and support access of type (at, ignorePermissions).
//
// Preconditions: mm.activeMu must be locked.
-func (mm *MemoryManager) existingVecPMAsLocked(ars usermem.AddrRangeSeq, at usermem.AccessType, ignorePermissions bool, needInternalMappings bool) bool {
+func (mm *MemoryManager) existingVecPMAsLocked(ars hostarch.AddrRangeSeq, at hostarch.AccessType, ignorePermissions bool, needInternalMappings bool) bool {
for ; !ars.IsEmpty(); ars = ars.Tail() {
if ar := ars.Head(); ar.Length() != 0 && !mm.existingPMAsLocked(ar, at, ignorePermissions, needInternalMappings).Ok() {
return false
@@ -98,7 +98,7 @@ func (mm *MemoryManager) existingVecPMAsLocked(ars usermem.AddrRangeSeq, at user
// * vseg.Range().Contains(ar.Start).
// * vmas must exist for all addresses in ar, and support accesses of type at
// (i.e. permission checks must have been performed against vmas).
-func (mm *MemoryManager) getPMAsLocked(ctx context.Context, vseg vmaIterator, ar usermem.AddrRange, at usermem.AccessType) (pmaIterator, pmaGapIterator, error) {
+func (mm *MemoryManager) getPMAsLocked(ctx context.Context, vseg vmaIterator, ar hostarch.AddrRange, at hostarch.AccessType) (pmaIterator, pmaGapIterator, error) {
if checkInvariants {
if !ar.WellFormed() || ar.Length() == 0 {
panic(fmt.Sprintf("invalid ar: %v", ar))
@@ -118,7 +118,7 @@ func (mm *MemoryManager) getPMAsLocked(ctx context.Context, vseg vmaIterator, ar
end = ar.End.RoundDown()
alignerr = syserror.EFAULT
}
- ar = usermem.AddrRange{ar.Start.RoundDown(), end}
+ ar = hostarch.AddrRange{ar.Start.RoundDown(), end}
pstart, pend, perr := mm.getPMAsInternalLocked(ctx, vseg, ar, at)
if pend.Start() <= ar.Start {
@@ -145,7 +145,7 @@ func (mm *MemoryManager) getPMAsLocked(ctx context.Context, vseg vmaIterator, ar
// * mm.activeMu must be locked for writing.
// * vmas must exist for all addresses in ars, and support accesses of type at
// (i.e. permission checks must have been performed against vmas).
-func (mm *MemoryManager) getVecPMAsLocked(ctx context.Context, ars usermem.AddrRangeSeq, at usermem.AccessType) (usermem.AddrRangeSeq, error) {
+func (mm *MemoryManager) getVecPMAsLocked(ctx context.Context, ars hostarch.AddrRangeSeq, at hostarch.AccessType) (hostarch.AddrRangeSeq, error) {
for arsit := ars; !arsit.IsEmpty(); arsit = arsit.Tail() {
ar := arsit.Head()
if ar.Length() == 0 {
@@ -164,7 +164,7 @@ func (mm *MemoryManager) getVecPMAsLocked(ctx context.Context, ars usermem.AddrR
end = ar.End.RoundDown()
alignerr = syserror.EFAULT
}
- ar = usermem.AddrRange{ar.Start.RoundDown(), end}
+ ar = hostarch.AddrRange{ar.Start.RoundDown(), end}
_, pend, perr := mm.getPMAsInternalLocked(ctx, mm.vmas.FindSegment(ar.Start), ar, at)
if perr != nil {
@@ -191,7 +191,7 @@ func (mm *MemoryManager) getVecPMAsLocked(ctx context.Context, ars usermem.AddrR
//
// getPMAsInternalLocked is an implementation helper for getPMAsLocked and
// getVecPMAsLocked; other clients should call one of those instead.
-func (mm *MemoryManager) getPMAsInternalLocked(ctx context.Context, vseg vmaIterator, ar usermem.AddrRange, at usermem.AccessType) (pmaIterator, pmaGapIterator, error) {
+func (mm *MemoryManager) getPMAsInternalLocked(ctx context.Context, vseg vmaIterator, ar hostarch.AddrRange, at hostarch.AccessType) (pmaIterator, pmaGapIterator, error) {
if checkInvariants {
if !ar.WellFormed() || ar.Length() == 0 || !ar.IsPageAligned() {
panic(fmt.Sprintf("invalid ar: %v", ar))
@@ -245,7 +245,7 @@ func (mm *MemoryManager) getPMAsInternalLocked(ctx context.Context, vseg vmaIter
pseg, pgap = mm.pmas.Insert(pgap, allocAR, pma{
file: mf,
off: fr.Start,
- translatePerms: usermem.AnyAccess,
+ translatePerms: hostarch.AnyAccess,
effectivePerms: vma.effectivePerms,
maxPerms: vma.maxPerms,
// Since we just allocated this memory and have the
@@ -335,7 +335,7 @@ func (mm *MemoryManager) getPMAsInternalLocked(ctx context.Context, vseg vmaIter
// Neither of these cases has enough spatial locality to
// benefit from copying nearby pages, so if the vma is
// executable, only copy the pages required.
- var copyAR usermem.AddrRange
+ var copyAR hostarch.AddrRange
if vseg.ValuePtr().effectivePerms.Execute {
copyAR = pseg.Range().Intersect(ar)
} else {
@@ -366,7 +366,7 @@ func (mm *MemoryManager) getPMAsInternalLocked(ctx context.Context, vseg vmaIter
// Replace the pma with a copy in the part of the address
// range where copying was successful. This doesn't change
// RSS.
- copyAR.End = copyAR.Start + usermem.Addr(fr.Length())
+ copyAR.End = copyAR.Start + hostarch.Addr(fr.Length())
if copyAR != pseg.Range() {
pseg = mm.pmas.Isolate(pseg, copyAR)
pstart = pmaIterator{} // iterators invalidated
@@ -380,7 +380,7 @@ func (mm *MemoryManager) getPMAsInternalLocked(ctx context.Context, vseg vmaIter
mf.IncRef(fr)
oldpma.file = mf
oldpma.off = fr.Start
- oldpma.translatePerms = usermem.AnyAccess
+ oldpma.translatePerms = hostarch.AnyAccess
oldpma.effectivePerms = vma.effectivePerms
oldpma.maxPerms = vma.maxPerms
oldpma.needCOW = false
@@ -499,14 +499,14 @@ const (
// privateAllocUnit may reduce page faults by allowing fewer, larger pmas
// to be mapped, but may result in larger amounts of wasted memory in the
// presence of fragmentation. privateAllocUnit must be a power-of-2
- // multiple of usermem.PageSize.
- privateAllocUnit = usermem.HugePageSize
+ // multiple of hostarch.PageSize.
+ privateAllocUnit = hostarch.HugePageSize
privateAllocMask = privateAllocUnit - 1
)
-func privateAligned(ar usermem.AddrRange) usermem.AddrRange {
- aligned := usermem.AddrRange{ar.Start &^ privateAllocMask, ar.End}
+func privateAligned(ar hostarch.AddrRange) hostarch.AddrRange {
+ aligned := hostarch.AddrRange{ar.Start &^ privateAllocMask, ar.End}
if end := (ar.End + privateAllocMask) &^ privateAllocMask; end >= ar.End {
aligned.End = end
}
@@ -548,7 +548,7 @@ func (mm *MemoryManager) isPMACopyOnWriteLocked(vseg vmaIterator, pseg pmaIterat
rseg := mm.privateRefs.refs.FindSegment(fr.Start)
if rseg.Ok() && rseg.Value() == 1 && fr.End <= rseg.End() {
pma.needCOW = false
- // pma.private => pma.translatePerms == usermem.AnyAccess
+ // pma.private => pma.translatePerms == hostarch.AnyAccess
vma := vseg.ValuePtr()
pma.effectivePerms = vma.effectivePerms
pma.maxPerms = vma.maxPerms
@@ -558,7 +558,7 @@ func (mm *MemoryManager) isPMACopyOnWriteLocked(vseg vmaIterator, pseg pmaIterat
}
// Invalidate implements memmap.MappingSpace.Invalidate.
-func (mm *MemoryManager) Invalidate(ar usermem.AddrRange, opts memmap.InvalidateOpts) {
+func (mm *MemoryManager) Invalidate(ar hostarch.AddrRange, opts memmap.InvalidateOpts) {
if checkInvariants {
if !ar.WellFormed() || ar.Length() == 0 || !ar.IsPageAligned() {
panic(fmt.Sprintf("invalid ar: %v", ar))
@@ -581,7 +581,7 @@ func (mm *MemoryManager) Invalidate(ar usermem.AddrRange, opts memmap.Invalidate
// * mm.activeMu must be locked for writing.
// * ar.Length() != 0.
// * ar must be page-aligned.
-func (mm *MemoryManager) invalidateLocked(ar usermem.AddrRange, invalidatePrivate, invalidateShared bool) {
+func (mm *MemoryManager) invalidateLocked(ar hostarch.AddrRange, invalidatePrivate, invalidateShared bool) {
if checkInvariants {
if !ar.WellFormed() || ar.Length() == 0 || !ar.IsPageAligned() {
panic(fmt.Sprintf("invalid ar: %v", ar))
@@ -627,7 +627,7 @@ func (mm *MemoryManager) invalidateLocked(ar usermem.AddrRange, invalidatePrivat
// Preconditions:
// * ar.Length() != 0.
// * ar must be page-aligned.
-func (mm *MemoryManager) Pin(ctx context.Context, ar usermem.AddrRange, at usermem.AccessType, ignorePermissions bool) ([]PinnedRange, error) {
+func (mm *MemoryManager) Pin(ctx context.Context, ar hostarch.AddrRange, at hostarch.AccessType, ignorePermissions bool) ([]PinnedRange, error) {
if checkInvariants {
if !ar.WellFormed() || ar.Length() == 0 || !ar.IsPageAligned() {
panic(fmt.Sprintf("invalid ar: %v", ar))
@@ -683,7 +683,7 @@ func (mm *MemoryManager) Pin(ctx context.Context, ar usermem.AddrRange, at userm
// PinnedRanges are returned by MemoryManager.Pin.
type PinnedRange struct {
// Source is the corresponding range of addresses.
- Source usermem.AddrRange
+ Source hostarch.AddrRange
// File is the mapped file.
File memmap.File
@@ -713,7 +713,7 @@ func Unpin(prs []PinnedRange) {
// * !oldAR.Overlaps(newAR).
// * mm.pmas.IsEmptyRange(newAR).
// * oldAR and newAR must be page-aligned.
-func (mm *MemoryManager) movePMAsLocked(oldAR, newAR usermem.AddrRange) {
+func (mm *MemoryManager) movePMAsLocked(oldAR, newAR hostarch.AddrRange) {
if checkInvariants {
if !oldAR.WellFormed() || oldAR.Length() == 0 || !oldAR.IsPageAligned() {
panic(fmt.Sprintf("invalid oldAR: %v", oldAR))
@@ -731,7 +731,7 @@ func (mm *MemoryManager) movePMAsLocked(oldAR, newAR usermem.AddrRange) {
}
type movedPMA struct {
- oldAR usermem.AddrRange
+ oldAR hostarch.AddrRange
pma pma
}
var movedPMAs []movedPMA
@@ -751,7 +751,7 @@ func (mm *MemoryManager) movePMAsLocked(oldAR, newAR usermem.AddrRange) {
pgap := mm.pmas.FindGap(newAR.Start)
for i := range movedPMAs {
mpma := &movedPMAs[i]
- pmaNewAR := usermem.AddrRange{mpma.oldAR.Start + off, mpma.oldAR.End + off}
+ pmaNewAR := hostarch.AddrRange{mpma.oldAR.Start + off, mpma.oldAR.End + off}
pgap = mm.pmas.Insert(pgap, pmaNewAR, mpma.pma).NextGap()
}
@@ -776,7 +776,7 @@ func (mm *MemoryManager) movePMAsLocked(oldAR, newAR usermem.AddrRange) {
//
// Postconditions: getPMAInternalMappingsLocked does not invalidate iterators
// into mm.pmas.
-func (mm *MemoryManager) getPMAInternalMappingsLocked(pseg pmaIterator, ar usermem.AddrRange) (pmaGapIterator, error) {
+func (mm *MemoryManager) getPMAInternalMappingsLocked(pseg pmaIterator, ar hostarch.AddrRange) (pmaGapIterator, error) {
if checkInvariants {
if !ar.WellFormed() || ar.Length() == 0 {
panic(fmt.Sprintf("invalid ar: %v", ar))
@@ -808,7 +808,7 @@ func (mm *MemoryManager) getPMAInternalMappingsLocked(pseg pmaIterator, ar userm
//
// Postconditions: getVecPMAInternalMappingsLocked does not invalidate iterators
// into mm.pmas.
-func (mm *MemoryManager) getVecPMAInternalMappingsLocked(ars usermem.AddrRangeSeq) (usermem.AddrRangeSeq, error) {
+func (mm *MemoryManager) getVecPMAInternalMappingsLocked(ars hostarch.AddrRangeSeq) (hostarch.AddrRangeSeq, error) {
for arsit := ars; !arsit.IsEmpty(); arsit = arsit.Tail() {
ar := arsit.Head()
if ar.Length() == 0 {
@@ -829,7 +829,7 @@ func (mm *MemoryManager) getVecPMAInternalMappingsLocked(ars usermem.AddrRangeSe
// in ar.
// * ar.Length() != 0.
// * pseg.Range().Contains(ar.Start).
-func (mm *MemoryManager) internalMappingsLocked(pseg pmaIterator, ar usermem.AddrRange) safemem.BlockSeq {
+func (mm *MemoryManager) internalMappingsLocked(pseg pmaIterator, ar hostarch.AddrRange) safemem.BlockSeq {
if checkInvariants {
if !ar.WellFormed() || ar.Length() == 0 {
panic(fmt.Sprintf("invalid ar: %v", ar))
@@ -866,7 +866,7 @@ func (mm *MemoryManager) internalMappingsLocked(pseg pmaIterator, ar usermem.Add
// * mm.activeMu must be locked.
// * Internal mappings must have been previously established for all addresses
// in ars.
-func (mm *MemoryManager) vecInternalMappingsLocked(ars usermem.AddrRangeSeq) safemem.BlockSeq {
+func (mm *MemoryManager) vecInternalMappingsLocked(ars hostarch.AddrRangeSeq) safemem.BlockSeq {
var ims []safemem.Block
for ; !ars.IsEmpty(); ars = ars.Tail() {
ar := ars.Head()
@@ -931,7 +931,7 @@ func (mm *MemoryManager) decPrivateRef(fr memmap.FileRange) {
// MemoryManager to reflect the insertion of a pma at ar.
//
// Preconditions: mm.activeMu must be locked for writing.
-func (mm *MemoryManager) addRSSLocked(ar usermem.AddrRange) {
+func (mm *MemoryManager) addRSSLocked(ar hostarch.AddrRange) {
mm.curRSS += uint64(ar.Length())
if mm.curRSS > mm.maxRSS {
mm.maxRSS = mm.curRSS
@@ -942,19 +942,19 @@ func (mm *MemoryManager) addRSSLocked(ar usermem.AddrRange) {
// reflect the removal of a pma at ar.
//
// Preconditions: mm.activeMu must be locked for writing.
-func (mm *MemoryManager) removeRSSLocked(ar usermem.AddrRange) {
+func (mm *MemoryManager) removeRSSLocked(ar hostarch.AddrRange) {
mm.curRSS -= uint64(ar.Length())
}
// pmaSetFunctions implements segment.Functions for pmaSet.
type pmaSetFunctions struct{}
-func (pmaSetFunctions) MinKey() usermem.Addr {
+func (pmaSetFunctions) MinKey() hostarch.Addr {
return 0
}
-func (pmaSetFunctions) MaxKey() usermem.Addr {
- return ^usermem.Addr(0)
+func (pmaSetFunctions) MaxKey() hostarch.Addr {
+ return ^hostarch.Addr(0)
}
func (pmaSetFunctions) ClearValue(pma *pma) {
@@ -962,7 +962,7 @@ func (pmaSetFunctions) ClearValue(pma *pma) {
pma.internalMappings = safemem.BlockSeq{}
}
-func (pmaSetFunctions) Merge(ar1 usermem.AddrRange, pma1 pma, ar2 usermem.AddrRange, pma2 pma) (pma, bool) {
+func (pmaSetFunctions) Merge(ar1 hostarch.AddrRange, pma1 pma, ar2 hostarch.AddrRange, pma2 pma) (pma, bool) {
if pma1.file != pma2.file ||
pma1.off+uint64(ar1.Length()) != pma2.off ||
pma1.translatePerms != pma2.translatePerms ||
@@ -980,7 +980,7 @@ func (pmaSetFunctions) Merge(ar1 usermem.AddrRange, pma1 pma, ar2 usermem.AddrRa
return pma1, true
}
-func (pmaSetFunctions) Split(ar usermem.AddrRange, p pma, split usermem.Addr) (pma, pma) {
+func (pmaSetFunctions) Split(ar hostarch.AddrRange, p pma, split hostarch.Addr) (pma, pma) {
newlen1 := uint64(split - ar.Start)
p2 := p
p2.off += newlen1
@@ -997,7 +997,7 @@ func (pmaSetFunctions) Split(ar usermem.AddrRange, p pma, split usermem.Addr) (p
// Preconditions:
// * mm.activeMu must be locked.
// * addr <= pgap.Start().
-func (mm *MemoryManager) findOrSeekPrevUpperBoundPMA(addr usermem.Addr, pgap pmaGapIterator) pmaIterator {
+func (mm *MemoryManager) findOrSeekPrevUpperBoundPMA(addr hostarch.Addr, pgap pmaGapIterator) pmaIterator {
if checkInvariants {
if !pgap.Ok() {
panic("terminal pma iterator")
@@ -1045,7 +1045,7 @@ func (pseg pmaIterator) fileRange() memmap.FileRange {
// Preconditions:
// * pseg.Range().IsSupersetOf(ar).
// * ar.Length != 0.
-func (pseg pmaIterator) fileRangeOf(ar usermem.AddrRange) memmap.FileRange {
+func (pseg pmaIterator) fileRangeOf(ar hostarch.AddrRange) memmap.FileRange {
if checkInvariants {
if !pseg.Ok() {
panic("terminal pma iterator")
diff --git a/pkg/sentry/mm/procfs.go b/pkg/sentry/mm/procfs.go
index 73bfbea49..f1440e884 100644
--- a/pkg/sentry/mm/procfs.go
+++ b/pkg/sentry/mm/procfs.go
@@ -19,9 +19,9 @@ import (
"fmt"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/fs/proc/seqfile"
"gvisor.dev/gvisor/pkg/sentry/memmap"
- "gvisor.dev/gvisor/pkg/usermem"
)
const (
@@ -29,7 +29,7 @@ const (
// include/linux/kdev_t.h:MINORBITS
devMinorBits = 20
- vsyscallEnd = usermem.Addr(0xffffffffff601000)
+ vsyscallEnd = hostarch.Addr(0xffffffffff601000)
vsyscallMapsEntry = "ffffffffff600000-ffffffffff601000 r-xp 00000000 00:00 0 [vsyscall]\n"
vsyscallSmapsEntry = vsyscallMapsEntry +
"Size: 4 kB\n" +
@@ -62,7 +62,7 @@ func (mm *MemoryManager) NeedsUpdate(generation int64) bool {
func (mm *MemoryManager) ReadMapsDataInto(ctx context.Context, buf *bytes.Buffer) {
mm.mappingMu.RLock()
defer mm.mappingMu.RUnlock()
- var start usermem.Addr
+ var start hostarch.Addr
for vseg := mm.vmas.LowerBoundSegment(start); vseg.Ok(); vseg = vseg.NextSegment() {
mm.appendVMAMapsEntryLocked(ctx, vseg, buf)
@@ -88,9 +88,9 @@ func (mm *MemoryManager) ReadMapsSeqFileData(ctx context.Context, handle seqfile
mm.mappingMu.RLock()
defer mm.mappingMu.RUnlock()
var data []seqfile.SeqData
- var start usermem.Addr
+ var start hostarch.Addr
if handle != nil {
- start = *handle.(*usermem.Addr)
+ start = *handle.(*hostarch.Addr)
}
for vseg := mm.vmas.LowerBoundSegment(start); vseg.Ok(); vseg = vseg.NextSegment() {
vmaAddr := vseg.End()
@@ -177,7 +177,7 @@ func (mm *MemoryManager) appendVMAMapsEntryLocked(ctx context.Context, vseg vmaI
func (mm *MemoryManager) ReadSmapsDataInto(ctx context.Context, buf *bytes.Buffer) {
mm.mappingMu.RLock()
defer mm.mappingMu.RUnlock()
- var start usermem.Addr
+ var start hostarch.Addr
for vseg := mm.vmas.LowerBoundSegment(start); vseg.Ok(); vseg = vseg.NextSegment() {
mm.vmaSmapsEntryIntoLocked(ctx, vseg, buf)
@@ -196,9 +196,9 @@ func (mm *MemoryManager) ReadSmapsSeqFileData(ctx context.Context, handle seqfil
mm.mappingMu.RLock()
defer mm.mappingMu.RUnlock()
var data []seqfile.SeqData
- var start usermem.Addr
+ var start hostarch.Addr
if handle != nil {
- start = *handle.(*usermem.Addr)
+ start = *handle.(*hostarch.Addr)
}
for vseg := mm.vmas.LowerBoundSegment(start); vseg.Ok(); vseg = vseg.NextSegment() {
vmaAddr := vseg.End()
@@ -279,8 +279,8 @@ func (mm *MemoryManager) vmaSmapsEntryIntoLocked(ctx context.Context, vseg vmaIt
// Swap is not implemented.
fmt.Fprintf(b, "Swap: %8d kB\n", 0)
fmt.Fprintf(b, "SwapPss: %8d kB\n", 0)
- fmt.Fprintf(b, "KernelPageSize: %8d kB\n", usermem.PageSize/1024)
- fmt.Fprintf(b, "MMUPageSize: %8d kB\n", usermem.PageSize/1024)
+ fmt.Fprintf(b, "KernelPageSize: %8d kB\n", hostarch.PageSize/1024)
+ fmt.Fprintf(b, "MMUPageSize: %8d kB\n", hostarch.PageSize/1024)
locked := rss
if vma.mlockMode == memmap.MLockNone {
locked = 0
diff --git a/pkg/sentry/mm/shm.go b/pkg/sentry/mm/shm.go
index 6432731d4..3130be80c 100644
--- a/pkg/sentry/mm/shm.go
+++ b/pkg/sentry/mm/shm.go
@@ -16,13 +16,13 @@ package mm
import (
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/kernel/shm"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
)
// DetachShm unmaps a sysv shared memory segment.
-func (mm *MemoryManager) DetachShm(ctx context.Context, addr usermem.Addr) error {
+func (mm *MemoryManager) DetachShm(ctx context.Context, addr hostarch.Addr) error {
if addr != addr.RoundDown() {
// "... shmaddr is not aligned on a page boundary." - man shmdt(2)
return syserror.EINVAL
@@ -52,7 +52,7 @@ func (mm *MemoryManager) DetachShm(ctx context.Context, addr usermem.Addr) error
}
// Remove all vmas that could have been created by the same attach.
- end := addr + usermem.Addr(detached.EffectiveSize())
+ end := addr + hostarch.Addr(detached.EffectiveSize())
for vseg.Ok() && vseg.End() <= end {
vma := vseg.ValuePtr()
if vma.mappable == detached && uint64(vseg.Start()-addr) == vma.off {
diff --git a/pkg/sentry/mm/special_mappable.go b/pkg/sentry/mm/special_mappable.go
index 48d8b6a2b..e748b7ff8 100644
--- a/pkg/sentry/mm/special_mappable.go
+++ b/pkg/sentry/mm/special_mappable.go
@@ -16,11 +16,11 @@ package mm
import (
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sentry/pgalloc"
"gvisor.dev/gvisor/pkg/sentry/usage"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
)
// SpecialMappable implements memmap.MappingIdentity and memmap.Mappable with
@@ -77,21 +77,21 @@ func (m *SpecialMappable) Msync(ctx context.Context, mr memmap.MappableRange) er
}
// AddMapping implements memmap.Mappable.AddMapping.
-func (*SpecialMappable) AddMapping(context.Context, memmap.MappingSpace, usermem.AddrRange, uint64, bool) error {
+func (*SpecialMappable) AddMapping(context.Context, memmap.MappingSpace, hostarch.AddrRange, uint64, bool) error {
return nil
}
// RemoveMapping implements memmap.Mappable.RemoveMapping.
-func (*SpecialMappable) RemoveMapping(context.Context, memmap.MappingSpace, usermem.AddrRange, uint64, bool) {
+func (*SpecialMappable) RemoveMapping(context.Context, memmap.MappingSpace, hostarch.AddrRange, uint64, bool) {
}
// CopyMapping implements memmap.Mappable.CopyMapping.
-func (*SpecialMappable) CopyMapping(context.Context, memmap.MappingSpace, usermem.AddrRange, usermem.AddrRange, uint64, bool) error {
+func (*SpecialMappable) CopyMapping(context.Context, memmap.MappingSpace, hostarch.AddrRange, hostarch.AddrRange, uint64, bool) error {
return nil
}
// Translate implements memmap.Mappable.Translate.
-func (m *SpecialMappable) Translate(ctx context.Context, required, optional memmap.MappableRange, at usermem.AccessType) ([]memmap.Translation, error) {
+func (m *SpecialMappable) Translate(ctx context.Context, required, optional memmap.MappableRange, at hostarch.AccessType) ([]memmap.Translation, error) {
var err error
if required.End > m.fr.Length() {
err = &memmap.BusError{syserror.EFAULT}
@@ -102,7 +102,7 @@ func (m *SpecialMappable) Translate(ctx context.Context, required, optional memm
Source: source,
File: m.mfp.MemoryFile(),
Offset: m.fr.Start + source.Start,
- Perms: usermem.AnyAccess,
+ Perms: hostarch.AnyAccess,
},
}, err
}
@@ -146,7 +146,7 @@ func NewSharedAnonMappable(length uint64, mfp pgalloc.MemoryFileProvider) (*Spec
if length == 0 {
return nil, syserror.EINVAL
}
- alignedLen, ok := usermem.Addr(length).RoundUp()
+ alignedLen, ok := hostarch.Addr(length).RoundUp()
if !ok {
return nil, syserror.EINVAL
}
diff --git a/pkg/sentry/mm/syscalls.go b/pkg/sentry/mm/syscalls.go
index 69e37330b..7ad6b7c21 100644
--- a/pkg/sentry/mm/syscalls.go
+++ b/pkg/sentry/mm/syscalls.go
@@ -21,20 +21,20 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/kernel/futex"
"gvisor.dev/gvisor/pkg/sentry/limits"
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
)
// HandleUserFault handles an application page fault. sp is the faulting
// application thread's stack pointer.
//
// Preconditions: mm.as != nil.
-func (mm *MemoryManager) HandleUserFault(ctx context.Context, addr usermem.Addr, at usermem.AccessType, sp usermem.Addr) error {
- ar, ok := addr.RoundDown().ToRange(usermem.PageSize)
+func (mm *MemoryManager) HandleUserFault(ctx context.Context, addr hostarch.Addr, at hostarch.AccessType, sp hostarch.Addr) error {
+ ar, ok := addr.RoundDown().ToRange(hostarch.PageSize)
if !ok {
return syserror.EFAULT
}
@@ -72,11 +72,11 @@ func (mm *MemoryManager) HandleUserFault(ctx context.Context, addr usermem.Addr,
}
// MMap establishes a memory mapping.
-func (mm *MemoryManager) MMap(ctx context.Context, opts memmap.MMapOpts) (usermem.Addr, error) {
+func (mm *MemoryManager) MMap(ctx context.Context, opts memmap.MMapOpts) (hostarch.Addr, error) {
if opts.Length == 0 {
return 0, syserror.EINVAL
}
- length, ok := usermem.Addr(opts.Length).RoundUp()
+ length, ok := hostarch.Addr(opts.Length).RoundUp()
if !ok {
return 0, syserror.ENOMEM
}
@@ -84,7 +84,7 @@ func (mm *MemoryManager) MMap(ctx context.Context, opts memmap.MMapOpts) (userme
if opts.Mappable != nil {
// Offset must be aligned.
- if usermem.Addr(opts.Offset).RoundDown() != usermem.Addr(opts.Offset) {
+ if hostarch.Addr(opts.Offset).RoundDown() != hostarch.Addr(opts.Offset) {
return 0, syserror.EINVAL
}
// Offset + length must not overflow.
@@ -157,7 +157,7 @@ func (mm *MemoryManager) MMap(ctx context.Context, opts memmap.MMapOpts) (userme
// Preconditions:
// * mm.mappingMu must be locked.
// * vseg.Range().IsSupersetOf(ar).
-func (mm *MemoryManager) populateVMA(ctx context.Context, vseg vmaIterator, ar usermem.AddrRange, precommit bool) {
+func (mm *MemoryManager) populateVMA(ctx context.Context, vseg vmaIterator, ar hostarch.AddrRange, precommit bool) {
if !vseg.ValuePtr().effectivePerms.Any() {
// Linux doesn't populate inaccessible pages. See
// mm/gup.c:populate_vma_page_range.
@@ -175,7 +175,7 @@ func (mm *MemoryManager) populateVMA(ctx context.Context, vseg vmaIterator, ar u
}
// Ensure that we have usable pmas.
- pseg, _, err := mm.getPMAsLocked(ctx, vseg, ar, usermem.NoAccess)
+ pseg, _, err := mm.getPMAsLocked(ctx, vseg, ar, hostarch.NoAccess)
if err != nil {
// mm/util.c:vm_mmap_pgoff() ignores the error, if any, from
// mm/gup.c:mm_populate(). If it matters, we'll get it again when
@@ -203,7 +203,7 @@ func (mm *MemoryManager) populateVMA(ctx context.Context, vseg vmaIterator, ar u
// * vseg.Range().IsSupersetOf(ar).
//
// Postconditions: mm.mappingMu will be unlocked.
-func (mm *MemoryManager) populateVMAAndUnlock(ctx context.Context, vseg vmaIterator, ar usermem.AddrRange, precommit bool) {
+func (mm *MemoryManager) populateVMAAndUnlock(ctx context.Context, vseg vmaIterator, ar hostarch.AddrRange, precommit bool) {
// See populateVMA above for commentary.
if !vseg.ValuePtr().effectivePerms.Any() {
mm.mappingMu.Unlock()
@@ -221,7 +221,7 @@ func (mm *MemoryManager) populateVMAAndUnlock(ctx context.Context, vseg vmaItera
// mm.mappingMu doesn't need to be write-locked for getPMAsLocked, and it
// isn't needed at all for mapASLocked.
mm.mappingMu.DowngradeLock()
- pseg, _, err := mm.getPMAsLocked(ctx, vseg, ar, usermem.NoAccess)
+ pseg, _, err := mm.getPMAsLocked(ctx, vseg, ar, hostarch.NoAccess)
mm.mappingMu.RUnlock()
if err != nil {
mm.activeMu.Unlock()
@@ -234,7 +234,7 @@ func (mm *MemoryManager) populateVMAAndUnlock(ctx context.Context, vseg vmaItera
}
// MapStack allocates the initial process stack.
-func (mm *MemoryManager) MapStack(ctx context.Context) (usermem.AddrRange, error) {
+func (mm *MemoryManager) MapStack(ctx context.Context) (hostarch.AddrRange, error) {
// maxStackSize is the maximum supported process stack size in bytes.
//
// This limit exists because stack growing isn't implemented, so the entire
@@ -242,7 +242,7 @@ func (mm *MemoryManager) MapStack(ctx context.Context) (usermem.AddrRange, error
const maxStackSize = 128 << 20
stackSize := limits.FromContext(ctx).Get(limits.Stack)
- r, ok := usermem.Addr(stackSize.Cur).RoundUp()
+ r, ok := hostarch.Addr(stackSize.Cur).RoundUp()
sz := uint64(r)
if !ok {
// RLIM_INFINITY rounds up to 0.
@@ -251,16 +251,16 @@ func (mm *MemoryManager) MapStack(ctx context.Context) (usermem.AddrRange, error
ctx.Warningf("Capping stack size from RLIMIT_STACK of %v down to %v.", sz, maxStackSize)
sz = maxStackSize
} else if sz == 0 {
- return usermem.AddrRange{}, syserror.ENOMEM
+ return hostarch.AddrRange{}, syserror.ENOMEM
}
- szaddr := usermem.Addr(sz)
+ szaddr := hostarch.Addr(sz)
ctx.Debugf("Allocating stack with size of %v bytes", sz)
// Determine the stack's desired location. Unlike Linux, address
// randomization can't be disabled.
- stackEnd := mm.layout.MaxAddr - usermem.Addr(mrand.Int63n(int64(mm.layout.MaxStackRand))).RoundDown()
+ stackEnd := mm.layout.MaxAddr - hostarch.Addr(mrand.Int63n(int64(mm.layout.MaxStackRand))).RoundDown()
if stackEnd < szaddr {
- return usermem.AddrRange{}, syserror.ENOMEM
+ return hostarch.AddrRange{}, syserror.ENOMEM
}
stackStart := stackEnd - szaddr
mm.mappingMu.Lock()
@@ -268,8 +268,8 @@ func (mm *MemoryManager) MapStack(ctx context.Context) (usermem.AddrRange, error
_, ar, err := mm.createVMALocked(ctx, memmap.MMapOpts{
Length: sz,
Addr: stackStart,
- Perms: usermem.ReadWrite,
- MaxPerms: usermem.AnyAccess,
+ Perms: hostarch.ReadWrite,
+ MaxPerms: hostarch.AnyAccess,
Private: true,
GrowsDown: true,
MLockMode: mm.defMLockMode,
@@ -279,14 +279,14 @@ func (mm *MemoryManager) MapStack(ctx context.Context) (usermem.AddrRange, error
}
// MUnmap implements the semantics of Linux's munmap(2).
-func (mm *MemoryManager) MUnmap(ctx context.Context, addr usermem.Addr, length uint64) error {
+func (mm *MemoryManager) MUnmap(ctx context.Context, addr hostarch.Addr, length uint64) error {
if addr != addr.RoundDown() {
return syserror.EINVAL
}
if length == 0 {
return syserror.EINVAL
}
- la, ok := usermem.Addr(length).RoundUp()
+ la, ok := hostarch.Addr(length).RoundUp()
if !ok {
return syserror.EINVAL
}
@@ -308,7 +308,7 @@ type MRemapOpts struct {
// NewAddr is the new address for the remapping. NewAddr is ignored unless
// Move is MMRemapMustMove.
- NewAddr usermem.Addr
+ NewAddr hostarch.Addr
}
// MRemapMoveMode controls MRemap's moving behavior.
@@ -328,7 +328,7 @@ const (
)
// MRemap implements the semantics of Linux's mremap(2).
-func (mm *MemoryManager) MRemap(ctx context.Context, oldAddr usermem.Addr, oldSize uint64, newSize uint64, opts MRemapOpts) (usermem.Addr, error) {
+func (mm *MemoryManager) MRemap(ctx context.Context, oldAddr hostarch.Addr, oldSize uint64, newSize uint64, opts MRemapOpts) (hostarch.Addr, error) {
// "Note that old_address has to be page aligned." - mremap(2)
if oldAddr.RoundDown() != oldAddr {
return 0, syserror.EINVAL
@@ -336,9 +336,9 @@ func (mm *MemoryManager) MRemap(ctx context.Context, oldAddr usermem.Addr, oldSi
// Linux treats an old_size that rounds up to 0 as 0, which is otherwise a
// valid size. However, new_size can't be 0 after rounding.
- oldSizeAddr, _ := usermem.Addr(oldSize).RoundUp()
+ oldSizeAddr, _ := hostarch.Addr(oldSize).RoundUp()
oldSize = uint64(oldSizeAddr)
- newSizeAddr, ok := usermem.Addr(newSize).RoundUp()
+ newSizeAddr, ok := hostarch.Addr(newSize).RoundUp()
if !ok || newSizeAddr == 0 {
return 0, syserror.EINVAL
}
@@ -392,8 +392,8 @@ func (mm *MemoryManager) MRemap(ctx context.Context, oldAddr usermem.Addr, oldSi
if newSize < oldSize {
// If oldAddr+oldSize didn't overflow, oldAddr+newSize can't
// either.
- newEnd := oldAddr + usermem.Addr(newSize)
- mm.unmapLocked(ctx, usermem.AddrRange{newEnd, oldEnd})
+ newEnd := oldAddr + hostarch.Addr(newSize)
+ mm.unmapLocked(ctx, hostarch.AddrRange{newEnd, oldEnd})
}
return oldAddr, nil
}
@@ -438,7 +438,7 @@ func (mm *MemoryManager) MRemap(ctx context.Context, oldAddr usermem.Addr, oldSi
}
// Find a location for the new mapping.
- var newAR usermem.AddrRange
+ var newAR hostarch.AddrRange
switch opts.Move {
case MRemapMayMove:
newAddr, err := mm.findAvailableLocked(newSize, findAvailableOpts{})
@@ -457,7 +457,7 @@ func (mm *MemoryManager) MRemap(ctx context.Context, oldAddr usermem.Addr, oldSi
if !ok {
return 0, syserror.EINVAL
}
- if (usermem.AddrRange{oldAddr, oldEnd}).Overlaps(newAR) {
+ if (hostarch.AddrRange{oldAddr, oldEnd}).Overlaps(newAR) {
return 0, syserror.EINVAL
}
@@ -479,8 +479,8 @@ func (mm *MemoryManager) MRemap(ctx context.Context, oldAddr usermem.Addr, oldSi
// correct: compare Linux's mm/mremap.c:mremap_to() => do_munmap(),
// vma_to_resize().
if newSize < oldSize {
- oldNewEnd := oldAddr + usermem.Addr(newSize)
- mm.unmapLocked(ctx, usermem.AddrRange{oldNewEnd, oldEnd})
+ oldNewEnd := oldAddr + hostarch.Addr(newSize)
+ mm.unmapLocked(ctx, hostarch.AddrRange{oldNewEnd, oldEnd})
oldEnd = oldNewEnd
}
@@ -488,7 +488,7 @@ func (mm *MemoryManager) MRemap(ctx context.Context, oldAddr usermem.Addr, oldSi
vseg = mm.vmas.FindSegment(oldAddr)
}
- oldAR := usermem.AddrRange{oldAddr, oldEnd}
+ oldAR := hostarch.AddrRange{oldAddr, oldEnd}
// Check that oldEnd maps to the same vma as oldAddr.
if vseg.End() < oldEnd {
@@ -588,14 +588,14 @@ func (mm *MemoryManager) MRemap(ctx context.Context, oldAddr usermem.Addr, oldSi
}
// MProtect implements the semantics of Linux's mprotect(2).
-func (mm *MemoryManager) MProtect(addr usermem.Addr, length uint64, realPerms usermem.AccessType, growsDown bool) error {
+func (mm *MemoryManager) MProtect(addr hostarch.Addr, length uint64, realPerms hostarch.AccessType, growsDown bool) error {
if addr.RoundDown() != addr {
return syserror.EINVAL
}
if length == 0 {
return nil
}
- rlength, ok := usermem.Addr(length).RoundUp()
+ rlength, ok := hostarch.Addr(length).RoundUp()
if !ok {
return syserror.ENOMEM
}
@@ -692,19 +692,19 @@ func (mm *MemoryManager) MProtect(addr usermem.Addr, length uint64, realPerms us
}
// BrkSetup sets mm's brk address to addr and its brk size to 0.
-func (mm *MemoryManager) BrkSetup(ctx context.Context, addr usermem.Addr) {
+func (mm *MemoryManager) BrkSetup(ctx context.Context, addr hostarch.Addr) {
mm.mappingMu.Lock()
defer mm.mappingMu.Unlock()
// Unmap the existing brk.
if mm.brk.Length() != 0 {
mm.unmapLocked(ctx, mm.brk)
}
- mm.brk = usermem.AddrRange{addr, addr}
+ mm.brk = hostarch.AddrRange{addr, addr}
}
// Brk implements the semantics of Linux's brk(2), except that it returns an
// error on failure.
-func (mm *MemoryManager) Brk(ctx context.Context, addr usermem.Addr) (usermem.Addr, error) {
+func (mm *MemoryManager) Brk(ctx context.Context, addr hostarch.Addr) (hostarch.Addr, error) {
mm.mappingMu.Lock()
// Can't defer mm.mappingMu.Unlock(); see below.
@@ -741,8 +741,8 @@ func (mm *MemoryManager) Brk(ctx context.Context, addr usermem.Addr) (usermem.Ad
Fixed: true,
// Compare Linux's
// arch/x86/include/asm/page_types.h:VM_DATA_DEFAULT_FLAGS.
- Perms: usermem.ReadWrite,
- MaxPerms: usermem.AnyAccess,
+ Perms: hostarch.ReadWrite,
+ MaxPerms: hostarch.AnyAccess,
Private: true,
// Linux: mm/mmap.c:sys_brk() => do_brk_flags() includes
// mm->def_flags.
@@ -762,7 +762,7 @@ func (mm *MemoryManager) Brk(ctx context.Context, addr usermem.Addr) (usermem.Ad
}
case newbrkpg < oldbrkpg:
- mm.unmapLocked(ctx, usermem.AddrRange{newbrkpg, oldbrkpg})
+ mm.unmapLocked(ctx, hostarch.AddrRange{newbrkpg, oldbrkpg})
fallthrough
default:
@@ -775,9 +775,9 @@ func (mm *MemoryManager) Brk(ctx context.Context, addr usermem.Addr) (usermem.Ad
// MLock implements the semantics of Linux's mlock()/mlock2()/munlock(),
// depending on mode.
-func (mm *MemoryManager) MLock(ctx context.Context, addr usermem.Addr, length uint64, mode memmap.MLockMode) error {
+func (mm *MemoryManager) MLock(ctx context.Context, addr hostarch.Addr, length uint64, mode memmap.MLockMode) error {
// Linux allows this to overflow.
- la, _ := usermem.Addr(length + addr.PageOffset()).RoundUp()
+ la, _ := hostarch.Addr(length + addr.PageOffset()).RoundUp()
ar, ok := addr.RoundDown().ToRange(uint64(la))
if !ok {
return syserror.EINVAL
@@ -850,7 +850,7 @@ func (mm *MemoryManager) MLock(ctx context.Context, addr usermem.Addr, length ui
mm.mappingMu.RUnlock()
return syserror.ENOMEM
}
- _, _, err := mm.getPMAsLocked(ctx, vseg, vseg.Range().Intersect(ar), usermem.NoAccess)
+ _, _, err := mm.getPMAsLocked(ctx, vseg, vseg.Range().Intersect(ar), hostarch.NoAccess)
if err != nil {
mm.activeMu.Unlock()
mm.mappingMu.RUnlock()
@@ -945,7 +945,7 @@ func (mm *MemoryManager) MLockAll(ctx context.Context, opts MLockAllOpts) error
mm.mappingMu.DowngradeLock()
for vseg := mm.vmas.FirstSegment(); vseg.Ok(); vseg = vseg.NextSegment() {
if vseg.ValuePtr().effectivePerms.Any() {
- mm.getPMAsLocked(ctx, vseg, vseg.Range(), usermem.NoAccess)
+ mm.getPMAsLocked(ctx, vseg, vseg.Range(), hostarch.NoAccess)
}
}
@@ -965,7 +965,7 @@ func (mm *MemoryManager) MLockAll(ctx context.Context, opts MLockAllOpts) error
}
// NumaPolicy implements the semantics of Linux's get_mempolicy(MPOL_F_ADDR).
-func (mm *MemoryManager) NumaPolicy(addr usermem.Addr) (linux.NumaPolicy, uint64, error) {
+func (mm *MemoryManager) NumaPolicy(addr hostarch.Addr) (linux.NumaPolicy, uint64, error) {
mm.mappingMu.RLock()
defer mm.mappingMu.RUnlock()
vseg := mm.vmas.FindSegment(addr)
@@ -977,12 +977,12 @@ func (mm *MemoryManager) NumaPolicy(addr usermem.Addr) (linux.NumaPolicy, uint64
}
// SetNumaPolicy implements the semantics of Linux's mbind().
-func (mm *MemoryManager) SetNumaPolicy(addr usermem.Addr, length uint64, policy linux.NumaPolicy, nodemask uint64) error {
+func (mm *MemoryManager) SetNumaPolicy(addr hostarch.Addr, length uint64, policy linux.NumaPolicy, nodemask uint64) error {
if !addr.IsPageAligned() {
return syserror.EINVAL
}
// Linux allows this to overflow.
- la, _ := usermem.Addr(length).RoundUp()
+ la, _ := hostarch.Addr(length).RoundUp()
ar, ok := addr.ToRange(uint64(la))
if !ok {
return syserror.EINVAL
@@ -1018,7 +1018,7 @@ func (mm *MemoryManager) SetNumaPolicy(addr usermem.Addr, length uint64, policy
}
// SetDontFork implements the semantics of madvise MADV_DONTFORK.
-func (mm *MemoryManager) SetDontFork(addr usermem.Addr, length uint64, dontfork bool) error {
+func (mm *MemoryManager) SetDontFork(addr hostarch.Addr, length uint64, dontfork bool) error {
ar, ok := addr.ToRange(length)
if !ok {
return syserror.EINVAL
@@ -1044,7 +1044,7 @@ func (mm *MemoryManager) SetDontFork(addr usermem.Addr, length uint64, dontfork
}
// Decommit implements the semantics of Linux's madvise(MADV_DONTNEED).
-func (mm *MemoryManager) Decommit(addr usermem.Addr, length uint64) error {
+func (mm *MemoryManager) Decommit(addr hostarch.Addr, length uint64) error {
ar, ok := addr.ToRange(length)
if !ok {
return syserror.EINVAL
@@ -1112,14 +1112,14 @@ type MSyncOpts struct {
}
// MSync implements the semantics of Linux's msync().
-func (mm *MemoryManager) MSync(ctx context.Context, addr usermem.Addr, length uint64, opts MSyncOpts) error {
+func (mm *MemoryManager) MSync(ctx context.Context, addr hostarch.Addr, length uint64, opts MSyncOpts) error {
if addr != addr.RoundDown() {
return syserror.EINVAL
}
if length == 0 {
return nil
}
- la, ok := usermem.Addr(length).RoundUp()
+ la, ok := hostarch.Addr(length).RoundUp()
if !ok {
return syserror.ENOMEM
}
@@ -1188,7 +1188,7 @@ func (mm *MemoryManager) MSync(ctx context.Context, addr usermem.Addr, length ui
}
// GetSharedFutexKey is used by kernel.Task.GetSharedKey.
-func (mm *MemoryManager) GetSharedFutexKey(ctx context.Context, addr usermem.Addr) (futex.Key, error) {
+func (mm *MemoryManager) GetSharedFutexKey(ctx context.Context, addr hostarch.Addr) (futex.Key, error) {
ar, ok := addr.ToRange(4) // sizeof(int32).
if !ok {
return futex.Key{}, syserror.EFAULT
@@ -1196,7 +1196,7 @@ func (mm *MemoryManager) GetSharedFutexKey(ctx context.Context, addr usermem.Add
mm.mappingMu.RLock()
defer mm.mappingMu.RUnlock()
- vseg, _, err := mm.getVMAsLocked(ctx, ar, usermem.Read, false)
+ vseg, _, err := mm.getVMAsLocked(ctx, ar, hostarch.Read, false)
if err != nil {
return futex.Key{}, err
}
@@ -1230,7 +1230,7 @@ func (mm *MemoryManager) VirtualMemorySize() uint64 {
// VirtualMemorySizeRange returns the combined length in bytes of all mappings
// in ar in mm.
-func (mm *MemoryManager) VirtualMemorySizeRange(ar usermem.AddrRange) uint64 {
+func (mm *MemoryManager) VirtualMemorySizeRange(ar hostarch.AddrRange) uint64 {
mm.mappingMu.RLock()
defer mm.mappingMu.RUnlock()
return uint64(mm.vmas.SpanRange(ar))
diff --git a/pkg/sentry/mm/vma.go b/pkg/sentry/mm/vma.go
index b8df72813..0d019e41d 100644
--- a/pkg/sentry/mm/vma.go
+++ b/pkg/sentry/mm/vma.go
@@ -19,18 +19,18 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/limits"
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
)
// Preconditions:
// * mm.mappingMu must be locked for writing.
// * opts must be valid as defined by the checks in MMap.
-func (mm *MemoryManager) createVMALocked(ctx context.Context, opts memmap.MMapOpts) (vmaIterator, usermem.AddrRange, error) {
+func (mm *MemoryManager) createVMALocked(ctx context.Context, opts memmap.MMapOpts) (vmaIterator, hostarch.AddrRange, error) {
if opts.MaxPerms != opts.MaxPerms.Effective() {
panic(fmt.Sprintf("Non-effective MaxPerms %s cannot be enforced", opts.MaxPerms))
}
@@ -47,7 +47,7 @@ func (mm *MemoryManager) createVMALocked(ctx context.Context, opts memmap.MMapOp
if opts.Force && opts.Unmap && opts.Fixed {
addr = opts.Addr
} else {
- return vmaIterator{}, usermem.AddrRange{}, err
+ return vmaIterator{}, hostarch.AddrRange{}, err
}
}
ar, _ := addr.ToRange(opts.Length)
@@ -58,7 +58,7 @@ func (mm *MemoryManager) createVMALocked(ctx context.Context, opts memmap.MMapOp
newUsageAS -= uint64(mm.vmas.SpanRange(ar))
}
if limitAS := limits.FromContext(ctx).Get(limits.AS).Cur; newUsageAS > limitAS {
- return vmaIterator{}, usermem.AddrRange{}, syserror.ENOMEM
+ return vmaIterator{}, hostarch.AddrRange{}, syserror.ENOMEM
}
if opts.MLockMode != memmap.MLockNone {
@@ -66,14 +66,14 @@ func (mm *MemoryManager) createVMALocked(ctx context.Context, opts memmap.MMapOp
if creds := auth.CredentialsFromContext(ctx); !creds.HasCapabilityIn(linux.CAP_IPC_LOCK, creds.UserNamespace.Root()) {
mlockLimit := limits.FromContext(ctx).Get(limits.MemoryLocked).Cur
if mlockLimit == 0 {
- return vmaIterator{}, usermem.AddrRange{}, syserror.EPERM
+ return vmaIterator{}, hostarch.AddrRange{}, syserror.EPERM
}
newLockedAS := mm.lockedAS + opts.Length
if opts.Unmap {
newLockedAS -= mm.mlockedBytesRangeLocked(ar)
}
if newLockedAS > mlockLimit {
- return vmaIterator{}, usermem.AddrRange{}, syserror.EAGAIN
+ return vmaIterator{}, hostarch.AddrRange{}, syserror.EAGAIN
}
}
}
@@ -93,7 +93,7 @@ func (mm *MemoryManager) createVMALocked(ctx context.Context, opts memmap.MMapOp
// The expression for writable is vma.canWriteMappableLocked(), but we
// don't yet have a vma.
if err := opts.Mappable.AddMapping(ctx, mm, ar, opts.Offset, !opts.Private && opts.MaxPerms.Write); err != nil {
- return vmaIterator{}, usermem.AddrRange{}, err
+ return vmaIterator{}, hostarch.AddrRange{}, err
}
}
@@ -137,7 +137,7 @@ type findAvailableOpts struct {
//
// - Unmap allows existing guard pages in the returned range.
- Addr usermem.Addr
+ Addr hostarch.Addr
Fixed bool
Unmap bool
Map32Bit bool
@@ -153,13 +153,13 @@ const (
// findAvailableLocked finds an allocatable range.
//
// Preconditions: mm.mappingMu must be locked.
-func (mm *MemoryManager) findAvailableLocked(length uint64, opts findAvailableOpts) (usermem.Addr, error) {
+func (mm *MemoryManager) findAvailableLocked(length uint64, opts findAvailableOpts) (hostarch.Addr, error) {
if opts.Fixed {
opts.Map32Bit = false
}
allowedAR := mm.applicationAddrRange()
if opts.Map32Bit {
- allowedAR = allowedAR.Intersect(usermem.AddrRange{map32Start, map32End})
+ allowedAR = allowedAR.Intersect(hostarch.AddrRange{map32Start, map32End})
}
// Does the provided suggestion work?
@@ -181,33 +181,33 @@ func (mm *MemoryManager) findAvailableLocked(length uint64, opts findAvailableOp
}
// Prefer hugepage alignment if a hugepage or more is requested.
- alignment := uint64(usermem.PageSize)
- if length >= usermem.HugePageSize {
- alignment = usermem.HugePageSize
+ alignment := uint64(hostarch.PageSize)
+ if length >= hostarch.HugePageSize {
+ alignment = hostarch.HugePageSize
}
if opts.Map32Bit {
return mm.findLowestAvailableLocked(length, alignment, allowedAR)
}
if mm.layout.DefaultDirection == arch.MmapBottomUp {
- return mm.findLowestAvailableLocked(length, alignment, usermem.AddrRange{mm.layout.BottomUpBase, mm.layout.MaxAddr})
+ return mm.findLowestAvailableLocked(length, alignment, hostarch.AddrRange{mm.layout.BottomUpBase, mm.layout.MaxAddr})
}
- return mm.findHighestAvailableLocked(length, alignment, usermem.AddrRange{mm.layout.MinAddr, mm.layout.TopDownBase})
+ return mm.findHighestAvailableLocked(length, alignment, hostarch.AddrRange{mm.layout.MinAddr, mm.layout.TopDownBase})
}
-func (mm *MemoryManager) applicationAddrRange() usermem.AddrRange {
- return usermem.AddrRange{mm.layout.MinAddr, mm.layout.MaxAddr}
+func (mm *MemoryManager) applicationAddrRange() hostarch.AddrRange {
+ return hostarch.AddrRange{mm.layout.MinAddr, mm.layout.MaxAddr}
}
// Preconditions: mm.mappingMu must be locked.
-func (mm *MemoryManager) findLowestAvailableLocked(length, alignment uint64, bounds usermem.AddrRange) (usermem.Addr, error) {
- for gap := mm.vmas.LowerBoundGap(bounds.Start); gap.Ok() && gap.Start() < bounds.End; gap = gap.NextLargeEnoughGap(usermem.Addr(length)) {
+func (mm *MemoryManager) findLowestAvailableLocked(length, alignment uint64, bounds hostarch.AddrRange) (hostarch.Addr, error) {
+ for gap := mm.vmas.LowerBoundGap(bounds.Start); gap.Ok() && gap.Start() < bounds.End; gap = gap.NextLargeEnoughGap(hostarch.Addr(length)) {
if gr := gap.availableRange().Intersect(bounds); uint64(gr.Length()) >= length {
// Can we shift up to match the alignment?
if offset := uint64(gr.Start) % alignment; offset != 0 {
if uint64(gr.Length()) >= length+alignment-offset {
// Yes, we're aligned.
- return gr.Start + usermem.Addr(alignment-offset), nil
+ return gr.Start + hostarch.Addr(alignment-offset), nil
}
}
@@ -219,15 +219,15 @@ func (mm *MemoryManager) findLowestAvailableLocked(length, alignment uint64, bou
}
// Preconditions: mm.mappingMu must be locked.
-func (mm *MemoryManager) findHighestAvailableLocked(length, alignment uint64, bounds usermem.AddrRange) (usermem.Addr, error) {
- for gap := mm.vmas.UpperBoundGap(bounds.End); gap.Ok() && gap.End() > bounds.Start; gap = gap.PrevLargeEnoughGap(usermem.Addr(length)) {
+func (mm *MemoryManager) findHighestAvailableLocked(length, alignment uint64, bounds hostarch.AddrRange) (hostarch.Addr, error) {
+ for gap := mm.vmas.UpperBoundGap(bounds.End); gap.Ok() && gap.End() > bounds.Start; gap = gap.PrevLargeEnoughGap(hostarch.Addr(length)) {
if gr := gap.availableRange().Intersect(bounds); uint64(gr.Length()) >= length {
// Can we shift down to match the alignment?
- start := gr.End - usermem.Addr(length)
+ start := gr.End - hostarch.Addr(length)
if offset := uint64(start) % alignment; offset != 0 {
- if gr.Start <= start-usermem.Addr(offset) {
+ if gr.Start <= start-hostarch.Addr(offset) {
// Yes, we're aligned.
- return start - usermem.Addr(offset), nil
+ return start - hostarch.Addr(offset), nil
}
}
@@ -239,7 +239,7 @@ func (mm *MemoryManager) findHighestAvailableLocked(length, alignment uint64, bo
}
// Preconditions: mm.mappingMu must be locked.
-func (mm *MemoryManager) mlockedBytesRangeLocked(ar usermem.AddrRange) uint64 {
+func (mm *MemoryManager) mlockedBytesRangeLocked(ar hostarch.AddrRange) uint64 {
var total uint64
for vseg := mm.vmas.LowerBoundSegment(ar.Start); vseg.Ok() && vseg.Start() < ar.End; vseg = vseg.NextSegment() {
if vseg.ValuePtr().mlockMode != memmap.MLockNone {
@@ -264,7 +264,7 @@ func (mm *MemoryManager) mlockedBytesRangeLocked(ar usermem.AddrRange) uint64 {
// Preconditions:
// * mm.mappingMu must be locked for reading; it may be temporarily unlocked.
// * ar.Length() != 0.
-func (mm *MemoryManager) getVMAsLocked(ctx context.Context, ar usermem.AddrRange, at usermem.AccessType, ignorePermissions bool) (vmaIterator, vmaGapIterator, error) {
+func (mm *MemoryManager) getVMAsLocked(ctx context.Context, ar hostarch.AddrRange, at hostarch.AccessType, ignorePermissions bool) (vmaIterator, vmaGapIterator, error) {
if checkInvariants {
if !ar.WellFormed() || ar.Length() == 0 {
panic(fmt.Sprintf("invalid ar: %v", ar))
@@ -320,7 +320,7 @@ func (mm *MemoryManager) getVMAsLocked(ctx context.Context, ar usermem.AddrRange
// temporarily unlocked.
//
// Postconditions: ars is not mutated.
-func (mm *MemoryManager) getVecVMAsLocked(ctx context.Context, ars usermem.AddrRangeSeq, at usermem.AccessType, ignorePermissions bool) (usermem.AddrRangeSeq, error) {
+func (mm *MemoryManager) getVecVMAsLocked(ctx context.Context, ars hostarch.AddrRangeSeq, at hostarch.AccessType, ignorePermissions bool) (hostarch.AddrRangeSeq, error) {
for arsit := ars; !arsit.IsEmpty(); arsit = arsit.Tail() {
ar := arsit.Head()
if ar.Length() == 0 {
@@ -339,7 +339,7 @@ func (mm *MemoryManager) getVecVMAsLocked(ctx context.Context, ars usermem.AddrR
//
// guardBytes is equivalent to Linux's stack_guard_gap after upstream
// 1be7107fbe18 "mm: larger stack guard gap, between vmas".
-const guardBytes = 256 * usermem.PageSize
+const guardBytes = 256 * hostarch.PageSize
// unmapLocked unmaps all addresses in ar and returns the resulting gap in
// mm.vmas.
@@ -348,7 +348,7 @@ const guardBytes = 256 * usermem.PageSize
// * mm.mappingMu must be locked for writing.
// * ar.Length() != 0.
// * ar must be page-aligned.
-func (mm *MemoryManager) unmapLocked(ctx context.Context, ar usermem.AddrRange) vmaGapIterator {
+func (mm *MemoryManager) unmapLocked(ctx context.Context, ar hostarch.AddrRange) vmaGapIterator {
if checkInvariants {
if !ar.WellFormed() || ar.Length() == 0 || !ar.IsPageAligned() {
panic(fmt.Sprintf("invalid ar: %v", ar))
@@ -369,7 +369,7 @@ func (mm *MemoryManager) unmapLocked(ctx context.Context, ar usermem.AddrRange)
// * mm.mappingMu must be locked for writing.
// * ar.Length() != 0.
// * ar must be page-aligned.
-func (mm *MemoryManager) removeVMAsLocked(ctx context.Context, ar usermem.AddrRange) vmaGapIterator {
+func (mm *MemoryManager) removeVMAsLocked(ctx context.Context, ar hostarch.AddrRange) vmaGapIterator {
if checkInvariants {
if !ar.WellFormed() || ar.Length() == 0 || !ar.IsPageAligned() {
panic(fmt.Sprintf("invalid ar: %v", ar))
@@ -426,12 +426,12 @@ func (vma *vma) isPrivateDataLocked() bool {
// vmaSetFunctions implements segment.Functions for vmaSet.
type vmaSetFunctions struct{}
-func (vmaSetFunctions) MinKey() usermem.Addr {
+func (vmaSetFunctions) MinKey() hostarch.Addr {
return 0
}
-func (vmaSetFunctions) MaxKey() usermem.Addr {
- return ^usermem.Addr(0)
+func (vmaSetFunctions) MaxKey() hostarch.Addr {
+ return ^hostarch.Addr(0)
}
func (vmaSetFunctions) ClearValue(vma *vma) {
@@ -440,7 +440,7 @@ func (vmaSetFunctions) ClearValue(vma *vma) {
vma.hint = ""
}
-func (vmaSetFunctions) Merge(ar1 usermem.AddrRange, vma1 vma, ar2 usermem.AddrRange, vma2 vma) (vma, bool) {
+func (vmaSetFunctions) Merge(ar1 hostarch.AddrRange, vma1 vma, ar2 hostarch.AddrRange, vma2 vma) (vma, bool) {
if vma1.mappable != vma2.mappable ||
(vma1.mappable != nil && vma1.off+uint64(ar1.Length()) != vma2.off) ||
vma1.realPerms != vma2.realPerms ||
@@ -462,7 +462,7 @@ func (vmaSetFunctions) Merge(ar1 usermem.AddrRange, vma1 vma, ar2 usermem.AddrRa
return vma1, true
}
-func (vmaSetFunctions) Split(ar usermem.AddrRange, v vma, split usermem.Addr) (vma, vma) {
+func (vmaSetFunctions) Split(ar hostarch.AddrRange, v vma, split hostarch.Addr) (vma, vma) {
v2 := v
if v2.mappable != nil {
v2.off += uint64(split - ar.Start)
@@ -476,7 +476,7 @@ func (vmaSetFunctions) Split(ar usermem.AddrRange, v vma, split usermem.Addr) (v
// Preconditions:
// * vseg.ValuePtr().mappable != nil.
// * vseg.Range().Contains(addr).
-func (vseg vmaIterator) mappableOffsetAt(addr usermem.Addr) uint64 {
+func (vseg vmaIterator) mappableOffsetAt(addr hostarch.Addr) uint64 {
if checkInvariants {
if !vseg.Ok() {
panic("terminal vma iterator")
@@ -503,7 +503,7 @@ func (vseg vmaIterator) mappableRange() memmap.MappableRange {
// * vseg.ValuePtr().mappable != nil.
// * vseg.Range().IsSupersetOf(ar).
// * ar.Length() != 0.
-func (vseg vmaIterator) mappableRangeOf(ar usermem.AddrRange) memmap.MappableRange {
+func (vseg vmaIterator) mappableRangeOf(ar hostarch.AddrRange) memmap.MappableRange {
if checkInvariants {
if !vseg.Ok() {
panic("terminal vma iterator")
@@ -528,7 +528,7 @@ func (vseg vmaIterator) mappableRangeOf(ar usermem.AddrRange) memmap.MappableRan
// * vseg.ValuePtr().mappable != nil.
// * vseg.mappableRange().IsSupersetOf(mr).
// * mr.Length() != 0.
-func (vseg vmaIterator) addrRangeOf(mr memmap.MappableRange) usermem.AddrRange {
+func (vseg vmaIterator) addrRangeOf(mr memmap.MappableRange) hostarch.AddrRange {
if checkInvariants {
if !vseg.Ok() {
panic("terminal vma iterator")
@@ -546,7 +546,7 @@ func (vseg vmaIterator) addrRangeOf(mr memmap.MappableRange) usermem.AddrRange {
vma := vseg.ValuePtr()
vstart := vseg.Start()
- return usermem.AddrRange{vstart + usermem.Addr(mr.Start-vma.off), vstart + usermem.Addr(mr.End-vma.off)}
+ return hostarch.AddrRange{vstart + hostarch.Addr(mr.Start-vma.off), vstart + hostarch.Addr(mr.End-vma.off)}
}
// seekNextLowerBound returns mm.vmas.LowerBoundSegment(addr), but does so by
@@ -555,7 +555,7 @@ func (vseg vmaIterator) addrRangeOf(mr memmap.MappableRange) usermem.AddrRange {
// Preconditions:
// * mm.mappingMu must be locked.
// * addr >= vseg.Start().
-func (vseg vmaIterator) seekNextLowerBound(addr usermem.Addr) vmaIterator {
+func (vseg vmaIterator) seekNextLowerBound(addr hostarch.Addr) vmaIterator {
if checkInvariants {
if !vseg.Ok() {
panic("terminal vma iterator")
@@ -572,7 +572,7 @@ func (vseg vmaIterator) seekNextLowerBound(addr usermem.Addr) vmaIterator {
// availableRange returns the subset of vgap.Range() in which new vmas may be
// created without MMapOpts.Unmap == true.
-func (vgap vmaGapIterator) availableRange() usermem.AddrRange {
+func (vgap vmaGapIterator) availableRange() hostarch.AddrRange {
ar := vgap.Range()
next := vgap.NextSegment()
if !next.Ok() || !next.ValuePtr().growsDown {
@@ -580,7 +580,7 @@ func (vgap vmaGapIterator) availableRange() usermem.AddrRange {
}
// Exclude guard pages.
if ar.Length() < guardBytes {
- return usermem.AddrRange{ar.Start, ar.Start}
+ return hostarch.AddrRange{ar.Start, ar.Start}
}
ar.End -= guardBytes
return ar