summaryrefslogtreecommitdiffhomepage
path: root/pkg/sentry/mm
diff options
context:
space:
mode:
Diffstat (limited to 'pkg/sentry/mm')
-rw-r--r--pkg/sentry/mm/BUILD2
-rw-r--r--pkg/sentry/mm/README.md4
-rw-r--r--pkg/sentry/mm/aio_context.go17
-rw-r--r--pkg/sentry/mm/lifecycle.go5
-rw-r--r--pkg/sentry/mm/mm.go20
-rw-r--r--pkg/sentry/mm/mm_test.go4
-rw-r--r--pkg/sentry/mm/pma.go20
-rw-r--r--pkg/sentry/mm/save_restore.go10
-rw-r--r--pkg/sentry/mm/special_mappable.go36
-rw-r--r--pkg/sentry/mm/syscalls.go8
10 files changed, 67 insertions, 59 deletions
diff --git a/pkg/sentry/mm/BUILD b/pkg/sentry/mm/BUILD
index a85ffdef8..c78cb4280 100644
--- a/pkg/sentry/mm/BUILD
+++ b/pkg/sentry/mm/BUILD
@@ -111,6 +111,7 @@ go_library(
"//pkg/sentry/kernel/shm",
"//pkg/sentry/limits",
"//pkg/sentry/memmap",
+ "//pkg/sentry/pgalloc",
"//pkg/sentry/platform",
"//pkg/sentry/platform/safecopy",
"//pkg/sentry/safemem",
@@ -133,6 +134,7 @@ go_test(
"//pkg/sentry/context/contexttest",
"//pkg/sentry/limits",
"//pkg/sentry/memmap",
+ "//pkg/sentry/pgalloc",
"//pkg/sentry/platform",
"//pkg/sentry/usermem",
"//pkg/syserror",
diff --git a/pkg/sentry/mm/README.md b/pkg/sentry/mm/README.md
index e485a5ca5..e6efbf565 100644
--- a/pkg/sentry/mm/README.md
+++ b/pkg/sentry/mm/README.md
@@ -153,7 +153,7 @@ manner, and the sentry handles the fault:
represented by a host file descriptor and offset, since (as noted in
"Background") this is the memory mapping primitive provided by the host
kernel. In general, memory is allocated from a temporary host file using the
- `filemem` package. Supposing that the sentry allocates offset 0x3000 from
+ `pgalloc` package. Supposing that the sentry allocates offset 0x3000 from
host file "memory-file", the resulting state is:
Sentry VMA: VA:0x400000 -> /tmp/foo:0x0
@@ -274,7 +274,7 @@ In the sentry:
methods
[`platform.AddressSpace.MapFile` and `platform.AddressSpace.Unmap`][platform].
-[filemem]: https://gvisor.googlesource.com/gvisor/+/master/pkg/sentry/platform/filemem/filemem.go
[memmap]: https://gvisor.googlesource.com/gvisor/+/master/pkg/sentry/memmap/memmap.go
[mm]: https://gvisor.googlesource.com/gvisor/+/master/pkg/sentry/mm/mm.go
+[pgalloc]: https://gvisor.googlesource.com/gvisor/+/master/pkg/sentry/pgalloc/pgalloc.go
[platform]: https://gvisor.googlesource.com/gvisor/+/master/pkg/sentry/platform/platform.go
diff --git a/pkg/sentry/mm/aio_context.go b/pkg/sentry/mm/aio_context.go
index 5e86d3b49..6cec6387a 100644
--- a/pkg/sentry/mm/aio_context.go
+++ b/pkg/sentry/mm/aio_context.go
@@ -21,6 +21,7 @@ import (
"gvisor.googlesource.com/gvisor/pkg/refs"
"gvisor.googlesource.com/gvisor/pkg/sentry/context"
"gvisor.googlesource.com/gvisor/pkg/sentry/memmap"
+ "gvisor.googlesource.com/gvisor/pkg/sentry/pgalloc"
"gvisor.googlesource.com/gvisor/pkg/sentry/platform"
"gvisor.googlesource.com/gvisor/pkg/sentry/usage"
"gvisor.googlesource.com/gvisor/pkg/sentry/usermem"
@@ -201,24 +202,24 @@ func (ctx *AIOContext) WaitChannel() (chan struct{}, bool) {
type aioMappable struct {
refs.AtomicRefCount
- p platform.Platform
- fr platform.FileRange
+ mfp pgalloc.MemoryFileProvider
+ fr platform.FileRange
}
var aioRingBufferSize = uint64(usermem.Addr(linux.AIORingSize).MustRoundUp())
-func newAIOMappable(p platform.Platform) (*aioMappable, error) {
- fr, err := p.Memory().Allocate(aioRingBufferSize, usage.Anonymous)
+func newAIOMappable(mfp pgalloc.MemoryFileProvider) (*aioMappable, error) {
+ fr, err := mfp.MemoryFile().Allocate(aioRingBufferSize, usage.Anonymous)
if err != nil {
return nil, err
}
- return &aioMappable{p: p, fr: fr}, nil
+ return &aioMappable{mfp: mfp, fr: fr}, nil
}
// DecRef implements refs.RefCounter.DecRef.
func (m *aioMappable) DecRef() {
m.AtomicRefCount.DecRefWithDestructor(func() {
- m.p.Memory().DecRef(m.fr)
+ m.mfp.MemoryFile().DecRef(m.fr)
})
}
@@ -299,7 +300,7 @@ func (m *aioMappable) Translate(ctx context.Context, required, optional memmap.M
return []memmap.Translation{
{
Source: source,
- File: m.p.Memory(),
+ File: m.mfp.MemoryFile(),
Offset: m.fr.Start + source.Start,
},
}, err
@@ -320,7 +321,7 @@ func (mm *MemoryManager) NewAIOContext(ctx context.Context, events uint32) (uint
// libaio peeks inside looking for a magic number. This function allocates
// a page per context and keeps it set to zeroes to ensure it will not
// match AIO_RING_MAGIC and make libaio happy.
- m, err := newAIOMappable(mm.p)
+ m, err := newAIOMappable(mm.mfp)
if err != nil {
return 0, err
}
diff --git a/pkg/sentry/mm/lifecycle.go b/pkg/sentry/mm/lifecycle.go
index 1ee8ae74e..a71286f14 100644
--- a/pkg/sentry/mm/lifecycle.go
+++ b/pkg/sentry/mm/lifecycle.go
@@ -23,14 +23,16 @@ import (
"gvisor.googlesource.com/gvisor/pkg/sentry/context"
"gvisor.googlesource.com/gvisor/pkg/sentry/limits"
"gvisor.googlesource.com/gvisor/pkg/sentry/memmap"
+ "gvisor.googlesource.com/gvisor/pkg/sentry/pgalloc"
"gvisor.googlesource.com/gvisor/pkg/sentry/platform"
"gvisor.googlesource.com/gvisor/pkg/sentry/usermem"
)
// NewMemoryManager returns a new MemoryManager with no mappings and 1 user.
-func NewMemoryManager(p platform.Platform) *MemoryManager {
+func NewMemoryManager(p platform.Platform, mfp pgalloc.MemoryFileProvider) *MemoryManager {
return &MemoryManager{
p: p,
+ mfp: mfp,
haveASIO: p.SupportsAddressSpaceIO(),
privateRefs: &privateRefs{},
users: 1,
@@ -60,6 +62,7 @@ func (mm *MemoryManager) Fork(ctx context.Context) (*MemoryManager, error) {
defer mm.mappingMu.RUnlock()
mm2 := &MemoryManager{
p: mm.p,
+ mfp: mm.mfp,
haveASIO: mm.haveASIO,
layout: mm.layout,
privateRefs: mm.privateRefs,
diff --git a/pkg/sentry/mm/mm.go b/pkg/sentry/mm/mm.go
index e2c636f38..6ed838d64 100644
--- a/pkg/sentry/mm/mm.go
+++ b/pkg/sentry/mm/mm.go
@@ -40,6 +40,7 @@ import (
"gvisor.googlesource.com/gvisor/pkg/sentry/arch"
"gvisor.googlesource.com/gvisor/pkg/sentry/fs"
"gvisor.googlesource.com/gvisor/pkg/sentry/memmap"
+ "gvisor.googlesource.com/gvisor/pkg/sentry/pgalloc"
"gvisor.googlesource.com/gvisor/pkg/sentry/platform"
"gvisor.googlesource.com/gvisor/pkg/sentry/safemem"
"gvisor.googlesource.com/gvisor/pkg/sentry/usermem"
@@ -50,10 +51,9 @@ import (
//
// +stateify savable
type MemoryManager struct {
- // p is the platform.
- //
- // p is immutable.
- p platform.Platform
+ // p and mfp are immutable.
+ p platform.Platform
+ mfp pgalloc.MemoryFileProvider
// haveASIO is the cached result of p.SupportsAddressSpaceIO(). Aside from
// eliminating an indirect call in the hot I/O path, this makes
@@ -369,8 +369,8 @@ func (v *vma) loadRealPerms(b int) {
// +stateify savable
type pma struct {
// file is the file mapped by this pma. Only pmas for which file ==
- // platform.Platform.Memory() may be saved. pmas hold a reference to the
- // corresponding file range while they exist.
+ // MemoryManager.mfp.MemoryFile() may be saved. pmas hold a reference to
+ // the corresponding file range while they exist.
file platform.File `state:"nosave"`
// off is the offset into file at which this pma begins.
@@ -387,7 +387,7 @@ type pma struct {
// private is true if this pma represents private memory.
//
- // If private is true, file must be platform.Platform.Memory(), the pma
+ // If private is true, file must be MemoryManager.mfp.MemoryFile(), the pma
// holds a reference on the mapped memory that is tracked in privateRefs,
// and calls to Invalidate for which
// memmap.InvalidateOpts.InvalidatePrivate is false should ignore the pma.
@@ -405,9 +405,9 @@ type pma struct {
type privateRefs struct {
mu sync.Mutex `state:"nosave"`
- // refs maps offsets into Platform.Memory() to the number of pmas (or,
- // equivalently, MemoryManagers) that share ownership of the memory at that
- // offset.
+ // refs maps offsets into MemoryManager.mfp.MemoryFile() to the number of
+ // pmas (or, equivalently, MemoryManagers) that share ownership of the
+ // memory at that offset.
refs fileRefcountSet
}
diff --git a/pkg/sentry/mm/mm_test.go b/pkg/sentry/mm/mm_test.go
index f2db43196..e12cb3bd1 100644
--- a/pkg/sentry/mm/mm_test.go
+++ b/pkg/sentry/mm/mm_test.go
@@ -22,6 +22,7 @@ import (
"gvisor.googlesource.com/gvisor/pkg/sentry/context/contexttest"
"gvisor.googlesource.com/gvisor/pkg/sentry/limits"
"gvisor.googlesource.com/gvisor/pkg/sentry/memmap"
+ "gvisor.googlesource.com/gvisor/pkg/sentry/pgalloc"
"gvisor.googlesource.com/gvisor/pkg/sentry/platform"
"gvisor.googlesource.com/gvisor/pkg/sentry/usermem"
"gvisor.googlesource.com/gvisor/pkg/syserror"
@@ -29,7 +30,8 @@ import (
func testMemoryManager(ctx context.Context) *MemoryManager {
p := platform.FromContext(ctx)
- mm := NewMemoryManager(p)
+ mfp := pgalloc.MemoryFileProviderFromContext(ctx)
+ mm := NewMemoryManager(p, mfp)
mm.layout = arch.MmapLayout{
MinAddr: p.MinUserAddress(),
MaxAddr: p.MaxUserAddress(),
diff --git a/pkg/sentry/mm/pma.go b/pkg/sentry/mm/pma.go
index d102035d8..bb779a45b 100644
--- a/pkg/sentry/mm/pma.go
+++ b/pkg/sentry/mm/pma.go
@@ -328,8 +328,8 @@ func (mm *MemoryManager) insertPMAsLocked(ctx context.Context, vseg vmaIterator,
// Limit the range we allocate to ar, aligned to privateAllocUnit.
maskAR := privateAligned(ar)
allocAR := optAR.Intersect(maskAR)
- mem := mm.p.Memory()
- fr, err := mem.Allocate(uint64(allocAR.Length()), usage.Anonymous)
+ mf := mm.mfp.MemoryFile()
+ fr, err := mf.Allocate(uint64(allocAR.Length()), usage.Anonymous)
if err != nil {
return pgap, err
}
@@ -342,10 +342,10 @@ func (mm *MemoryManager) insertPMAsLocked(ctx context.Context, vseg vmaIterator,
}
mm.addRSSLocked(allocAR)
- mem.IncRef(fr)
+ mf.IncRef(fr)
return mm.pmas.Insert(pgap, allocAR, pma{
- file: mem,
+ file: mf,
off: fr.Start,
vmaEffectivePerms: vma.effectivePerms,
vmaMaxPerms: vma.maxPerms,
@@ -426,7 +426,7 @@ func (mm *MemoryManager) breakCopyOnWriteLocked(pseg pmaIterator, ar usermem.Add
// Limit the range we copy to ar, aligned to privateAllocUnit.
maskAR := privateAligned(ar)
var invalidatedIterators, didUnmapAS bool
- mem := mm.p.Memory()
+ mf := mm.mfp.MemoryFile()
for {
if mm.isPMACopyOnWriteLocked(pseg) {
// Determine the range to copy.
@@ -438,7 +438,7 @@ func (mm *MemoryManager) breakCopyOnWriteLocked(pseg pmaIterator, ar usermem.Add
}
// Copy contents.
- fr, err := platform.AllocateAndFill(mem, uint64(copyAR.Length()), usage.Anonymous, &safemem.BlockSeqReader{mm.internalMappingsLocked(pseg, copyAR)})
+ fr, err := mf.AllocateAndFill(uint64(copyAR.Length()), usage.Anonymous, &safemem.BlockSeqReader{mm.internalMappingsLocked(pseg, copyAR)})
if _, ok := err.(safecopy.BusError); ok {
// If we got SIGBUS during the copy, deliver SIGBUS to
// userspace (instead of SIGSEGV) if we're breaking
@@ -449,7 +449,7 @@ func (mm *MemoryManager) breakCopyOnWriteLocked(pseg pmaIterator, ar usermem.Add
return pseg.PrevGap(), invalidatedIterators, err
}
mm.incPrivateRef(fr)
- mem.IncRef(fr)
+ mf.IncRef(fr)
// Unmap all of maskAR, not just copyAR, to minimize host syscalls.
// AddressSpace mappings must be removed before mm.decPrivateRef().
@@ -471,7 +471,7 @@ func (mm *MemoryManager) breakCopyOnWriteLocked(pseg pmaIterator, ar usermem.Add
}
pma.file.DecRef(pseg.fileRange())
- pma.file = mem
+ pma.file = mf
pma.off = fr.Start
pma.private = true
pma.needCOW = false
@@ -881,9 +881,9 @@ func (mm *MemoryManager) decPrivateRef(fr platform.FileRange) {
refSet.MergeAdjacent(fr)
mm.privateRefs.mu.Unlock()
- mem := mm.p.Memory()
+ mf := mm.mfp.MemoryFile()
for _, fr := range freed {
- mem.DecRef(fr)
+ mf.DecRef(fr)
}
}
diff --git a/pkg/sentry/mm/save_restore.go b/pkg/sentry/mm/save_restore.go
index 6e7080a84..46e0e0754 100644
--- a/pkg/sentry/mm/save_restore.go
+++ b/pkg/sentry/mm/save_restore.go
@@ -37,12 +37,12 @@ func (mm *MemoryManager) InvalidateUnsavable(ctx context.Context) error {
// beforeSave is invoked by stateify.
func (mm *MemoryManager) beforeSave() {
- mem := mm.p.Memory()
+ mf := mm.mfp.MemoryFile()
for pseg := mm.pmas.FirstSegment(); pseg.Ok(); pseg = pseg.NextSegment() {
- if pma := pseg.ValuePtr(); pma.file != mem {
+ if pma := pseg.ValuePtr(); pma.file != mf {
// InvalidateUnsavable should have caused all such pmas to be
// invalidated.
- panic(fmt.Sprintf("Can't save pma %#v with non-Memory file of type %T:\n%s", pseg.Range(), pma.file, mm))
+ panic(fmt.Sprintf("Can't save pma %#v with non-MemoryFile of type %T:\n%s", pseg.Range(), pma.file, mm))
}
}
}
@@ -50,8 +50,8 @@ func (mm *MemoryManager) beforeSave() {
// afterLoad is invoked by stateify.
func (mm *MemoryManager) afterLoad() {
mm.haveASIO = mm.p.SupportsAddressSpaceIO()
- mem := mm.p.Memory()
+ mf := mm.mfp.MemoryFile()
for pseg := mm.pmas.FirstSegment(); pseg.Ok(); pseg = pseg.NextSegment() {
- pseg.ValuePtr().file = mem
+ pseg.ValuePtr().file = mf
}
}
diff --git a/pkg/sentry/mm/special_mappable.go b/pkg/sentry/mm/special_mappable.go
index 64d0dd3f6..aa94d7d6a 100644
--- a/pkg/sentry/mm/special_mappable.go
+++ b/pkg/sentry/mm/special_mappable.go
@@ -18,6 +18,7 @@ import (
"gvisor.googlesource.com/gvisor/pkg/refs"
"gvisor.googlesource.com/gvisor/pkg/sentry/context"
"gvisor.googlesource.com/gvisor/pkg/sentry/memmap"
+ "gvisor.googlesource.com/gvisor/pkg/sentry/pgalloc"
"gvisor.googlesource.com/gvisor/pkg/sentry/platform"
"gvisor.googlesource.com/gvisor/pkg/sentry/usage"
"gvisor.googlesource.com/gvisor/pkg/sentry/usermem"
@@ -33,24 +34,24 @@ import (
type SpecialMappable struct {
refs.AtomicRefCount
- p platform.Platform
+ mfp pgalloc.MemoryFileProvider
fr platform.FileRange
name string
}
// NewSpecialMappable returns a SpecialMappable that owns fr, which represents
-// offsets in p.Memory() that contain the SpecialMappable's data. The
+// offsets in mfp.MemoryFile() that contain the SpecialMappable's data. The
// SpecialMappable will use the given name in /proc/[pid]/maps.
//
// Preconditions: fr.Length() != 0.
-func NewSpecialMappable(name string, p platform.Platform, fr platform.FileRange) *SpecialMappable {
- return &SpecialMappable{p: p, fr: fr, name: name}
+func NewSpecialMappable(name string, mfp pgalloc.MemoryFileProvider, fr platform.FileRange) *SpecialMappable {
+ return &SpecialMappable{mfp: mfp, fr: fr, name: name}
}
// DecRef implements refs.RefCounter.DecRef.
func (m *SpecialMappable) DecRef() {
m.AtomicRefCount.DecRefWithDestructor(func() {
- m.p.Memory().DecRef(m.fr)
+ m.mfp.MemoryFile().DecRef(m.fr)
})
}
@@ -99,7 +100,7 @@ func (m *SpecialMappable) Translate(ctx context.Context, required, optional memm
return []memmap.Translation{
{
Source: source,
- File: m.p.Memory(),
+ File: m.mfp.MemoryFile(),
Offset: m.fr.Start + source.Start,
},
}, err
@@ -109,19 +110,19 @@ func (m *SpecialMappable) Translate(ctx context.Context, required, optional memm
// InvalidateUnsavable implements memmap.Mappable.InvalidateUnsavable.
func (m *SpecialMappable) InvalidateUnsavable(ctx context.Context) error {
- // Since data is stored in platform.Platform.Memory(), the contents of
- // which are preserved across save/restore, we don't need to do anything.
+ // Since data is stored in pgalloc.MemoryFile, the contents of which are
+ // preserved across save/restore, we don't need to do anything.
return nil
}
-// Platform returns the Platform whose Memory stores the SpecialMappable's
-// contents.
-func (m *SpecialMappable) Platform() platform.Platform {
- return m.p
+// MemoryFileProvider returns the MemoryFileProvider whose MemoryFile stores
+// the SpecialMappable's contents.
+func (m *SpecialMappable) MemoryFileProvider() pgalloc.MemoryFileProvider {
+ return m.mfp
}
-// FileRange returns the offsets into Platform().Memory() that store the
-// SpecialMappable's contents.
+// FileRange returns the offsets into MemoryFileProvider().MemoryFile() that
+// store the SpecialMappable's contents.
func (m *SpecialMappable) FileRange() platform.FileRange {
return m.fr
}
@@ -137,7 +138,7 @@ func (m *SpecialMappable) Length() uint64 {
// TODO: The use of SpecialMappable is a lazy code reuse hack. Linux
// uses an ephemeral file created by mm/shmem.c:shmem_zero_setup(); we should
// do the same to get non-zero device and inode IDs.
-func NewSharedAnonMappable(length uint64, p platform.Platform) (*SpecialMappable, error) {
+func NewSharedAnonMappable(length uint64, mfp pgalloc.MemoryFileProvider) (*SpecialMappable, error) {
if length == 0 {
return nil, syserror.EINVAL
}
@@ -145,10 +146,9 @@ func NewSharedAnonMappable(length uint64, p platform.Platform) (*SpecialMappable
if !ok {
return nil, syserror.EINVAL
}
-
- fr, err := p.Memory().Allocate(uint64(alignedLen), usage.Anonymous)
+ fr, err := mfp.MemoryFile().Allocate(uint64(alignedLen), usage.Anonymous)
if err != nil {
return nil, err
}
- return NewSpecialMappable("/dev/zero (deleted)", p, fr), nil
+ return NewSpecialMappable("/dev/zero (deleted)", mfp, fr), nil
}
diff --git a/pkg/sentry/mm/syscalls.go b/pkg/sentry/mm/syscalls.go
index fd6929e08..b56e0d3b9 100644
--- a/pkg/sentry/mm/syscalls.go
+++ b/pkg/sentry/mm/syscalls.go
@@ -24,7 +24,7 @@ import (
"gvisor.googlesource.com/gvisor/pkg/sentry/kernel/futex"
"gvisor.googlesource.com/gvisor/pkg/sentry/limits"
"gvisor.googlesource.com/gvisor/pkg/sentry/memmap"
- "gvisor.googlesource.com/gvisor/pkg/sentry/platform"
+ "gvisor.googlesource.com/gvisor/pkg/sentry/pgalloc"
"gvisor.googlesource.com/gvisor/pkg/sentry/usermem"
"gvisor.googlesource.com/gvisor/pkg/syserror"
)
@@ -99,7 +99,7 @@ func (mm *MemoryManager) MMap(ctx context.Context, opts memmap.MMapOpts) (userme
if opts.MappingIdentity != nil {
return 0, syserror.EINVAL
}
- m, err := NewSharedAnonMappable(opts.Length, platform.FromContext(ctx))
+ m, err := NewSharedAnonMappable(opts.Length, pgalloc.MemoryFileProviderFromContext(ctx))
if err != nil {
return 0, err
}
@@ -965,7 +965,7 @@ func (mm *MemoryManager) Decommit(addr usermem.Addr, length uint64) error {
// ensures that Decommit immediately reduces host memory usage.
var didUnmapAS bool
pseg := mm.pmas.LowerBoundSegment(ar.Start)
- mem := mm.p.Memory()
+ mf := mm.mfp.MemoryFile()
for vseg := mm.vmas.LowerBoundSegment(ar.Start); vseg.Ok() && vseg.Start() < ar.End; vseg = vseg.NextSegment() {
vma := vseg.ValuePtr()
if vma.mlockMode != memmap.MLockNone {
@@ -984,7 +984,7 @@ func (mm *MemoryManager) Decommit(addr usermem.Addr, length uint64) error {
if pma.private && !mm.isPMACopyOnWriteLocked(pseg) {
psegAR := pseg.Range().Intersect(ar)
if vsegAR.IsSupersetOf(psegAR) && vma.mappable == nil {
- if err := mem.Decommit(pseg.fileRangeOf(psegAR)); err == nil {
+ if err := mf.Decommit(pseg.fileRangeOf(psegAR)); err == nil {
pseg = pseg.NextSegment()
continue
}