diff options
author | Jamie Liu <jamieliu@google.com> | 2019-03-14 08:11:36 -0700 |
---|---|---|
committer | Shentubot <shentubot@google.com> | 2019-03-14 08:12:48 -0700 |
commit | 8f4634997bd97810a85a70b71f000378d9db2e55 (patch) | |
tree | 903096f91ee8f201fa622296e0f04cf7c7cd9013 /pkg/sentry/fs | |
parent | fb9919881c7dc98eaf97cad2a70d187bd78f1566 (diff) |
Decouple filemem from platform and move it to pgalloc.MemoryFile.
This is in preparation for improved page cache reclaim, which requires
greater integration between the page cache and page allocator.
PiperOrigin-RevId: 238444706
Change-Id: Id24141b3678d96c7d7dc24baddd9be555bffafe4
Diffstat (limited to 'pkg/sentry/fs')
-rw-r--r-- | pkg/sentry/fs/ashmem/BUILD | 1 | ||||
-rw-r--r-- | pkg/sentry/fs/binder/BUILD | 1 | ||||
-rw-r--r-- | pkg/sentry/fs/binder/binder.go | 21 | ||||
-rw-r--r-- | pkg/sentry/fs/dev/BUILD | 2 | ||||
-rw-r--r-- | pkg/sentry/fs/dev/null.go | 4 | ||||
-rw-r--r-- | pkg/sentry/fs/fsutil/BUILD | 1 | ||||
-rw-r--r-- | pkg/sentry/fs/fsutil/README.md | 11 | ||||
-rw-r--r-- | pkg/sentry/fs/fsutil/file_range_set.go | 23 | ||||
-rw-r--r-- | pkg/sentry/fs/fsutil/inode_cached.go | 42 | ||||
-rw-r--r-- | pkg/sentry/fs/proc/meminfo.go | 6 | ||||
-rw-r--r-- | pkg/sentry/fs/tmpfs/inode_file.go | 24 | ||||
-rw-r--r-- | pkg/sentry/fs/tmpfs/tmpfs.go | 2 |
12 files changed, 72 insertions, 66 deletions
diff --git a/pkg/sentry/fs/ashmem/BUILD b/pkg/sentry/fs/ashmem/BUILD index dcf620dca..ef1c31a3e 100644 --- a/pkg/sentry/fs/ashmem/BUILD +++ b/pkg/sentry/fs/ashmem/BUILD @@ -23,7 +23,6 @@ go_library( "//pkg/sentry/fs/tmpfs", "//pkg/sentry/kernel/time", "//pkg/sentry/memmap", - "//pkg/sentry/platform", "//pkg/sentry/usage", "//pkg/sentry/usermem", "//pkg/syserror", diff --git a/pkg/sentry/fs/binder/BUILD b/pkg/sentry/fs/binder/BUILD index 8a448175f..3710664d3 100644 --- a/pkg/sentry/fs/binder/BUILD +++ b/pkg/sentry/fs/binder/BUILD @@ -17,6 +17,7 @@ go_library( "//pkg/sentry/fs/fsutil", "//pkg/sentry/kernel", "//pkg/sentry/memmap", + "//pkg/sentry/pgalloc", "//pkg/sentry/platform", "//pkg/sentry/usage", "//pkg/sentry/usermem", diff --git a/pkg/sentry/fs/binder/binder.go b/pkg/sentry/fs/binder/binder.go index 19cd55e65..16fb4806f 100644 --- a/pkg/sentry/fs/binder/binder.go +++ b/pkg/sentry/fs/binder/binder.go @@ -25,6 +25,7 @@ import ( "gvisor.googlesource.com/gvisor/pkg/sentry/fs/fsutil" "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" "gvisor.googlesource.com/gvisor/pkg/sentry/memmap" + "gvisor.googlesource.com/gvisor/pkg/sentry/pgalloc" "gvisor.googlesource.com/gvisor/pkg/sentry/platform" "gvisor.googlesource.com/gvisor/pkg/sentry/usage" "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" @@ -74,9 +75,9 @@ func NewDevice(ctx context.Context, owner fs.FileOwner, fp fs.FilePermissions) * // ioctl. func (bd *Device) GetFile(ctx context.Context, d *fs.Dirent, flags fs.FileFlags) (*fs.File, error) { return fs.NewFile(ctx, d, flags, &Proc{ - bd: bd, - task: kernel.TaskFromContext(ctx), - platform: platform.FromContext(ctx), + bd: bd, + task: kernel.TaskFromContext(ctx), + mfp: pgalloc.MemoryFileProviderFromContext(ctx), }), nil } @@ -88,14 +89,14 @@ type Proc struct { fsutil.FileNoFsync `state:"nosave"` fsutil.FileNotDirReaddir `state:"nosave"` - bd *Device - task *kernel.Task - platform platform.Platform + bd *Device + task *kernel.Task + mfp pgalloc.MemoryFileProvider // mu protects fr. mu sync.Mutex `state:"nosave"` - // mapped is memory allocated from platform.Memory() by AddMapping. + // mapped is memory allocated from mfp.MemoryFile() by AddMapping. mapped platform.FileRange } @@ -104,7 +105,7 @@ func (bp *Proc) Release() { bp.mu.Lock() defer bp.mu.Unlock() if bp.mapped.Length() != 0 { - bp.platform.Memory().DecRef(bp.mapped) + bp.mfp.MemoryFile().DecRef(bp.mapped) } } @@ -204,7 +205,7 @@ func (bp *Proc) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar userm } // Binder only allocates and maps a single page up-front // (drivers/android/binder.c:binder_mmap() => binder_update_page_range()). - fr, err := bp.platform.Memory().Allocate(usermem.PageSize, usage.Anonymous) + fr, err := bp.mfp.MemoryFile().Allocate(usermem.PageSize, usage.Anonymous) if err != nil { return err } @@ -241,7 +242,7 @@ func (bp *Proc) Translate(ctx context.Context, required, optional memmap.Mappabl return []memmap.Translation{ { Source: memmap.MappableRange{0, usermem.PageSize}, - File: bp.platform.Memory(), + File: bp.mfp.MemoryFile(), Offset: bp.mapped.Start, }, }, err diff --git a/pkg/sentry/fs/dev/BUILD b/pkg/sentry/fs/dev/BUILD index e5b962c8c..6c4fdaba9 100644 --- a/pkg/sentry/fs/dev/BUILD +++ b/pkg/sentry/fs/dev/BUILD @@ -27,7 +27,7 @@ go_library( "//pkg/sentry/fs/tmpfs", "//pkg/sentry/memmap", "//pkg/sentry/mm", - "//pkg/sentry/platform", + "//pkg/sentry/pgalloc", "//pkg/sentry/safemem", "//pkg/sentry/usermem", "//pkg/syserror", diff --git a/pkg/sentry/fs/dev/null.go b/pkg/sentry/fs/dev/null.go index 73fd09058..83f43c203 100644 --- a/pkg/sentry/fs/dev/null.go +++ b/pkg/sentry/fs/dev/null.go @@ -21,7 +21,7 @@ import ( "gvisor.googlesource.com/gvisor/pkg/sentry/fs/fsutil" "gvisor.googlesource.com/gvisor/pkg/sentry/memmap" "gvisor.googlesource.com/gvisor/pkg/sentry/mm" - "gvisor.googlesource.com/gvisor/pkg/sentry/platform" + "gvisor.googlesource.com/gvisor/pkg/sentry/pgalloc" "gvisor.googlesource.com/gvisor/pkg/waiter" ) @@ -115,7 +115,7 @@ var _ fs.FileOperations = (*zeroFileOperations)(nil) // ConfigureMMap implements fs.FileOperations.ConfigureMMap. func (*zeroFileOperations) ConfigureMMap(ctx context.Context, file *fs.File, opts *memmap.MMapOpts) error { - m, err := mm.NewSharedAnonMappable(opts.Length, platform.FromContext(ctx)) + m, err := mm.NewSharedAnonMappable(opts.Length, pgalloc.MemoryFileProviderFromContext(ctx)) if err != nil { return err } diff --git a/pkg/sentry/fs/fsutil/BUILD b/pkg/sentry/fs/fsutil/BUILD index d41fc17cc..01098675d 100644 --- a/pkg/sentry/fs/fsutil/BUILD +++ b/pkg/sentry/fs/fsutil/BUILD @@ -85,6 +85,7 @@ go_library( "//pkg/sentry/fs", "//pkg/sentry/kernel/time", "//pkg/sentry/memmap", + "//pkg/sentry/pgalloc", "//pkg/sentry/platform", "//pkg/sentry/safemem", "//pkg/sentry/socket/unix/transport", diff --git a/pkg/sentry/fs/fsutil/README.md b/pkg/sentry/fs/fsutil/README.md index 6e677890c..8be367334 100644 --- a/pkg/sentry/fs/fsutil/README.md +++ b/pkg/sentry/fs/fsutil/README.md @@ -112,11 +112,12 @@ finds the file that was mapped and its `CachingInodeOperations`. It then calls It may choose to allocate more memory (i.e. do "readahead") to minimize subsequent faults. -Memory that is allocated comes from a host tmpfs file (see `filemem.FileMem`). -The host tmpfs file memory is brought up to date with the contents of the mapped -file on its filesystem. The region of the host tmpfs file that reflects the -mapped file is then mapped into the host address space of the application so -that subsequent memory accesses do not repeatedly generate a `SIGSEGV`. +Memory that is allocated comes from a host tmpfs file (see +`pgalloc.MemoryFile`). The host tmpfs file memory is brought up to date with the +contents of the mapped file on its filesystem. The region of the host tmpfs file +that reflects the mapped file is then mapped into the host address space of the +application so that subsequent memory accesses do not repeatedly generate a +`SIGSEGV`. The range that was allocated, including any extra memory allocation to minimize faults, is marked dirty due to the write fault. This overcounts dirty memory if diff --git a/pkg/sentry/fs/fsutil/file_range_set.go b/pkg/sentry/fs/fsutil/file_range_set.go index dd7ab4b4a..32ebf64ff 100644 --- a/pkg/sentry/fs/fsutil/file_range_set.go +++ b/pkg/sentry/fs/fsutil/file_range_set.go @@ -21,6 +21,7 @@ import ( "gvisor.googlesource.com/gvisor/pkg/sentry/context" "gvisor.googlesource.com/gvisor/pkg/sentry/memmap" + "gvisor.googlesource.com/gvisor/pkg/sentry/pgalloc" "gvisor.googlesource.com/gvisor/pkg/sentry/platform" "gvisor.googlesource.com/gvisor/pkg/sentry/safemem" "gvisor.googlesource.com/gvisor/pkg/sentry/usage" @@ -77,7 +78,7 @@ func (seg FileRangeIterator) FileRangeOf(mr memmap.MappableRange) platform.FileR } // Fill attempts to ensure that all memmap.Mappable offsets in required are -// mapped to a platform.File offset, by allocating from mem with the given +// mapped to a platform.File offset, by allocating from mf with the given // memory usage kind and invoking readAt to store data into memory. (If readAt // returns a successful partial read, Fill will call it repeatedly until all // bytes have been read.) EOF is handled consistently with the requirements of @@ -90,7 +91,7 @@ func (seg FileRangeIterator) FileRangeOf(mr memmap.MappableRange) platform.FileR // // Preconditions: required.Length() > 0. optional.IsSupersetOf(required). // required and optional must be page-aligned. -func (frs *FileRangeSet) Fill(ctx context.Context, required, optional memmap.MappableRange, mem platform.Memory, kind usage.MemoryKind, readAt func(ctx context.Context, dsts safemem.BlockSeq, offset uint64) (uint64, error)) error { +func (frs *FileRangeSet) Fill(ctx context.Context, required, optional memmap.MappableRange, mf *pgalloc.MemoryFile, kind usage.MemoryKind, readAt func(ctx context.Context, dsts safemem.BlockSeq, offset uint64) (uint64, error)) error { gap := frs.LowerBoundGap(required.Start) for gap.Ok() && gap.Start() < required.End { if gap.Range().Length() == 0 { @@ -100,7 +101,7 @@ func (frs *FileRangeSet) Fill(ctx context.Context, required, optional memmap.Map gr := gap.Range().Intersect(optional) // Read data into the gap. - fr, err := platform.AllocateAndFill(mem, gr.Length(), kind, safemem.ReaderFunc(func(dsts safemem.BlockSeq) (uint64, error) { + fr, err := mf.AllocateAndFill(gr.Length(), kind, safemem.ReaderFunc(func(dsts safemem.BlockSeq) (uint64, error) { var done uint64 for !dsts.IsEmpty() { n, err := readAt(ctx, dsts, gr.Start+done) @@ -108,7 +109,7 @@ func (frs *FileRangeSet) Fill(ctx context.Context, required, optional memmap.Map dsts = dsts.DropFirst64(n) if err != nil { if err == io.EOF { - // platform.AllocateAndFill truncates down to a page + // MemoryFile.AllocateAndFill truncates down to a page // boundary, but FileRangeSet.Fill is supposed to // zero-fill to the end of the page in this case. donepgaddr, ok := usermem.Addr(done).RoundUp() @@ -143,20 +144,20 @@ func (frs *FileRangeSet) Fill(ctx context.Context, required, optional memmap.Map // corresponding platform.FileRanges. // // Preconditions: mr must be page-aligned. -func (frs *FileRangeSet) Drop(mr memmap.MappableRange, mem platform.Memory) { +func (frs *FileRangeSet) Drop(mr memmap.MappableRange, mf *pgalloc.MemoryFile) { seg := frs.LowerBoundSegment(mr.Start) for seg.Ok() && seg.Start() < mr.End { seg = frs.Isolate(seg, mr) - mem.DecRef(seg.FileRange()) + mf.DecRef(seg.FileRange()) seg = frs.Remove(seg).NextSegment() } } // DropAll removes all segments in mr, freeing the corresponding // platform.FileRanges. -func (frs *FileRangeSet) DropAll(mem platform.Memory) { +func (frs *FileRangeSet) DropAll(mf *pgalloc.MemoryFile) { for seg := frs.FirstSegment(); seg.Ok(); seg = seg.NextSegment() { - mem.DecRef(seg.FileRange()) + mf.DecRef(seg.FileRange()) } frs.RemoveAll() } @@ -164,7 +165,7 @@ func (frs *FileRangeSet) DropAll(mem platform.Memory) { // Truncate updates frs to reflect Mappable truncation to the given length: // bytes after the new EOF on the same page are zeroed, and pages after the new // EOF are freed. -func (frs *FileRangeSet) Truncate(end uint64, mem platform.Memory) { +func (frs *FileRangeSet) Truncate(end uint64, mf *pgalloc.MemoryFile) { pgendaddr, ok := usermem.Addr(end).RoundUp() if ok { pgend := uint64(pgendaddr) @@ -173,7 +174,7 @@ func (frs *FileRangeSet) Truncate(end uint64, mem platform.Memory) { frs.SplitAt(pgend) seg := frs.LowerBoundSegment(pgend) for seg.Ok() { - mem.DecRef(seg.FileRange()) + mf.DecRef(seg.FileRange()) seg = frs.Remove(seg).NextSegment() } @@ -189,7 +190,7 @@ func (frs *FileRangeSet) Truncate(end uint64, mem platform.Memory) { if seg.Ok() { fr := seg.FileRange() fr.Start += end - seg.Start() - ims, err := mem.MapInternal(fr, usermem.Write) + ims, err := mf.MapInternal(fr, usermem.Write) if err != nil { // There's no good recourse from here. This means // that we can't keep cached memory consistent with diff --git a/pkg/sentry/fs/fsutil/inode_cached.go b/pkg/sentry/fs/fsutil/inode_cached.go index ef11676b8..9bd923678 100644 --- a/pkg/sentry/fs/fsutil/inode_cached.go +++ b/pkg/sentry/fs/fsutil/inode_cached.go @@ -25,6 +25,7 @@ import ( "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/time" ktime "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/time" "gvisor.googlesource.com/gvisor/pkg/sentry/memmap" + "gvisor.googlesource.com/gvisor/pkg/sentry/pgalloc" "gvisor.googlesource.com/gvisor/pkg/sentry/platform" "gvisor.googlesource.com/gvisor/pkg/sentry/safemem" "gvisor.googlesource.com/gvisor/pkg/sentry/usage" @@ -62,8 +63,8 @@ type CachingInodeOperations struct { // backingFile is a handle to a cached file object. backingFile CachedFileObject - // platform is used to allocate memory that caches backingFile's contents. - platform platform.Platform + // mfp is used to allocate memory that caches backingFile's contents. + mfp pgalloc.MemoryFileProvider // forcePageCache indicates the sentry page cache should be used regardless // of whether the platform supports host mapped I/O or not. This must not be @@ -96,7 +97,7 @@ type CachingInodeOperations struct { dataMu sync.RWMutex `state:"nosave"` // cache maps offsets into the cached file to offsets into - // platform.Memory() that store the file's data. + // mfp.MemoryFile() that store the file's data. // // cache is protected by dataMu. cache FileRangeSet @@ -148,13 +149,13 @@ type CachedFileObject interface { // NewCachingInodeOperations returns a new CachingInodeOperations backed by // a CachedFileObject and its initial unstable attributes. func NewCachingInodeOperations(ctx context.Context, backingFile CachedFileObject, uattr fs.UnstableAttr, forcePageCache bool) *CachingInodeOperations { - p := platform.FromContext(ctx) - if p == nil { - panic(fmt.Sprintf("context.Context %T lacks non-nil value for key %T", ctx, platform.CtxPlatform)) + mfp := pgalloc.MemoryFileProviderFromContext(ctx) + if mfp == nil { + panic(fmt.Sprintf("context.Context %T lacks non-nil value for key %T", ctx, pgalloc.CtxMemoryFileProvider)) } return &CachingInodeOperations{ backingFile: backingFile, - platform: p, + mfp: mfp, forcePageCache: forcePageCache, attr: uattr, hostFileMapper: NewHostFileMapper(), @@ -311,7 +312,7 @@ func (c *CachingInodeOperations) Truncate(ctx context.Context, inode *fs.Inode, // written back. c.dataMu.Lock() defer c.dataMu.Unlock() - c.cache.Truncate(uint64(size), c.platform.Memory()) + c.cache.Truncate(uint64(size), c.mfp.MemoryFile()) c.dirty.KeepClean(memmap.MappableRange{uint64(size), oldpgend}) return nil @@ -323,7 +324,7 @@ func (c *CachingInodeOperations) WriteOut(ctx context.Context, inode *fs.Inode) // Write dirty pages back. c.dataMu.Lock() - err := SyncDirtyAll(ctx, &c.cache, &c.dirty, uint64(c.attr.Size), c.platform.Memory(), c.backingFile.WriteFromBlocksAt) + err := SyncDirtyAll(ctx, &c.cache, &c.dirty, uint64(c.attr.Size), c.mfp.MemoryFile(), c.backingFile.WriteFromBlocksAt) c.dataMu.Unlock() if err != nil { c.attrMu.Unlock() @@ -527,7 +528,7 @@ func (rw *inodeReadWriter) ReadToBlocks(dsts safemem.BlockSeq) (uint64, error) { return 0, nil } - mem := rw.c.platform.Memory() + mem := rw.c.mfp.MemoryFile() var done uint64 seg, gap := rw.c.cache.Find(uint64(rw.offset)) for rw.offset < end { @@ -613,7 +614,7 @@ func (rw *inodeReadWriter) WriteFromBlocks(srcs safemem.BlockSeq) (uint64, error return 0, nil } - mem := rw.c.platform.Memory() + mf := rw.c.mfp.MemoryFile() var done uint64 seg, gap := rw.c.cache.Find(uint64(rw.offset)) for rw.offset < end { @@ -622,7 +623,7 @@ func (rw *inodeReadWriter) WriteFromBlocks(srcs safemem.BlockSeq) (uint64, error case seg.Ok() && seg.Start() < mr.End: // Get internal mappings from the cache. segMR := seg.Range().Intersect(mr) - ims, err := mem.MapInternal(seg.FileRangeOf(segMR), usermem.Write) + ims, err := mf.MapInternal(seg.FileRangeOf(segMR), usermem.Write) if err != nil { rw.maybeGrowFile() rw.c.dataMu.Unlock() @@ -711,13 +712,13 @@ func (c *CachingInodeOperations) RemoveMapping(ctx context.Context, ms memmap.Ma // Writeback dirty mapped memory now that there are no longer any // mappings that reference it. This is our naive memory eviction // strategy. - mem := c.platform.Memory() + mf := c.mfp.MemoryFile() c.dataMu.Lock() for _, r := range unmapped { - if err := SyncDirty(ctx, r, &c.cache, &c.dirty, uint64(c.attr.Size), c.platform.Memory(), c.backingFile.WriteFromBlocksAt); err != nil { + if err := SyncDirty(ctx, r, &c.cache, &c.dirty, uint64(c.attr.Size), mf, c.backingFile.WriteFromBlocksAt); err != nil { log.Warningf("Failed to writeback cached data %v: %v", r, err) } - c.cache.Drop(r, mem) + c.cache.Drop(r, mf) c.dirty.KeepClean(r) } c.dataMu.Unlock() @@ -760,8 +761,8 @@ func (c *CachingInodeOperations) Translate(ctx context.Context, required, option optional.End = pgend } - mem := c.platform.Memory() - cerr := c.cache.Fill(ctx, required, maxFillRange(required, optional), mem, usage.PageCache, c.backingFile.ReadToBlocksAt) + mf := c.mfp.MemoryFile() + cerr := c.cache.Fill(ctx, required, maxFillRange(required, optional), mf, usage.PageCache, c.backingFile.ReadToBlocksAt) var ts []memmap.Translation var translatedEnd uint64 @@ -769,7 +770,7 @@ func (c *CachingInodeOperations) Translate(ctx context.Context, required, option segMR := seg.Range().Intersect(optional) ts = append(ts, memmap.Translation{ Source: segMR, - File: mem, + File: mf, Offset: seg.FileRangeOf(segMR).Start, }) if at.Write { @@ -820,16 +821,17 @@ func (c *CachingInodeOperations) InvalidateUnsavable(ctx context.Context) error // Sync the cache's contents so that if we have a host fd after restore, // the remote file's contents are coherent. + mf := c.mfp.MemoryFile() c.dataMu.Lock() defer c.dataMu.Unlock() - if err := SyncDirtyAll(ctx, &c.cache, &c.dirty, uint64(c.attr.Size), c.platform.Memory(), c.backingFile.WriteFromBlocksAt); err != nil { + if err := SyncDirtyAll(ctx, &c.cache, &c.dirty, uint64(c.attr.Size), mf, c.backingFile.WriteFromBlocksAt); err != nil { return err } // Discard the cache so that it's not stored in saved state. This is safe // because per InvalidateUnsavable invariants, no new translations can have // been returned after we invalidated all existing translations above. - c.cache.DropAll(c.platform.Memory()) + c.cache.DropAll(mf) c.dirty.RemoveAll() return nil diff --git a/pkg/sentry/fs/proc/meminfo.go b/pkg/sentry/fs/proc/meminfo.go index b31258eed..620e93ce3 100644 --- a/pkg/sentry/fs/proc/meminfo.go +++ b/pkg/sentry/fs/proc/meminfo.go @@ -44,10 +44,10 @@ func (d *meminfoData) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) return nil, 0 } - mem := d.k.Platform.Memory() - mem.UpdateUsage() + mf := d.k.MemoryFile() + mf.UpdateUsage() snapshot, totalUsage := usage.MemoryAccounting.Copy() - totalSize := usage.TotalMemory(mem.TotalSize(), totalUsage) + totalSize := usage.TotalMemory(mf.TotalSize(), totalUsage) anon := snapshot.Anonymous + snapshot.Tmpfs file := snapshot.PageCache + snapshot.Mapped // We don't actually have active/inactive LRUs, so just make up numbers. diff --git a/pkg/sentry/fs/tmpfs/inode_file.go b/pkg/sentry/fs/tmpfs/inode_file.go index 13d06684d..a98fbf0f1 100644 --- a/pkg/sentry/fs/tmpfs/inode_file.go +++ b/pkg/sentry/fs/tmpfs/inode_file.go @@ -52,7 +52,7 @@ type fileInodeOperations struct { fsutil.InodeSimpleExtendedAttributes - // kernel is used to allocate platform memory that stores the file's contents. + // kernel is used to allocate memory that stores the file's contents. kernel *kernel.Kernel // memUsage is the default memory usage that will be reported by this file. @@ -85,7 +85,7 @@ type fileInodeOperations struct { var _ fs.InodeOperations = (*fileInodeOperations)(nil) -// NewInMemoryFile returns a new file backed by p.Memory(). +// NewInMemoryFile returns a new file backed by Kernel.MemoryFile(). func NewInMemoryFile(ctx context.Context, usage usage.MemoryKind, uattr fs.UnstableAttr) fs.InodeOperations { return &fileInodeOperations{ attr: uattr, @@ -98,7 +98,7 @@ func NewInMemoryFile(ctx context.Context, usage usage.MemoryKind, uattr fs.Unsta func (f *fileInodeOperations) Release(context.Context) { f.dataMu.Lock() defer f.dataMu.Unlock() - f.data.DropAll(f.kernel.Platform.Memory()) + f.data.DropAll(f.kernel.MemoryFile()) } // Mappable implements fs.InodeOperations.Mappable. @@ -202,7 +202,7 @@ func (f *fileInodeOperations) Truncate(ctx context.Context, _ *fs.Inode, size in // and can remove them. f.dataMu.Lock() defer f.dataMu.Unlock() - f.data.Truncate(uint64(size), f.kernel.Platform.Memory()) + f.data.Truncate(uint64(size), f.kernel.MemoryFile()) return nil } @@ -312,7 +312,7 @@ func (rw *fileReadWriter) ReadToBlocks(dsts safemem.BlockSeq) (uint64, error) { return 0, nil } - mem := rw.f.kernel.Platform.Memory() + mf := rw.f.kernel.MemoryFile() var done uint64 seg, gap := rw.f.data.Find(uint64(rw.offset)) for rw.offset < end { @@ -320,7 +320,7 @@ func (rw *fileReadWriter) ReadToBlocks(dsts safemem.BlockSeq) (uint64, error) { switch { case seg.Ok(): // Get internal mappings. - ims, err := mem.MapInternal(seg.FileRangeOf(seg.Range().Intersect(mr)), usermem.Read) + ims, err := mf.MapInternal(seg.FileRangeOf(seg.Range().Intersect(mr)), usermem.Read) if err != nil { return done, err } @@ -378,7 +378,7 @@ func (rw *fileReadWriter) WriteFromBlocks(srcs safemem.BlockSeq) (uint64, error) } }() - mem := rw.f.kernel.Platform.Memory() + mf := rw.f.kernel.MemoryFile() // Page-aligned mr for when we need to allocate memory. RoundUp can't // overflow since end is an int64. pgstartaddr := usermem.Addr(rw.offset).RoundDown() @@ -392,7 +392,7 @@ func (rw *fileReadWriter) WriteFromBlocks(srcs safemem.BlockSeq) (uint64, error) switch { case seg.Ok(): // Get internal mappings. - ims, err := mem.MapInternal(seg.FileRangeOf(seg.Range().Intersect(mr)), usermem.Write) + ims, err := mf.MapInternal(seg.FileRangeOf(seg.Range().Intersect(mr)), usermem.Write) if err != nil { return done, err } @@ -412,7 +412,7 @@ func (rw *fileReadWriter) WriteFromBlocks(srcs safemem.BlockSeq) (uint64, error) case gap.Ok(): // Allocate memory for the write. gapMR := gap.Range().Intersect(pgMR) - fr, err := mem.Allocate(gapMR.Length(), rw.f.memUsage) + fr, err := mf.Allocate(gapMR.Length(), rw.f.memUsage) if err != nil { return done, err } @@ -467,8 +467,8 @@ func (f *fileInodeOperations) Translate(ctx context.Context, required, optional optional.End = pgend } - mem := f.kernel.Platform.Memory() - cerr := f.data.Fill(ctx, required, optional, mem, f.memUsage, func(_ context.Context, dsts safemem.BlockSeq, _ uint64) (uint64, error) { + mf := f.kernel.MemoryFile() + cerr := f.data.Fill(ctx, required, optional, mf, f.memUsage, func(_ context.Context, dsts safemem.BlockSeq, _ uint64) (uint64, error) { // Newly-allocated pages are zeroed, so we don't need to do anything. return dsts.NumBytes(), nil }) @@ -479,7 +479,7 @@ func (f *fileInodeOperations) Translate(ctx context.Context, required, optional segMR := seg.Range().Intersect(optional) ts = append(ts, memmap.Translation{ Source: segMR, - File: mem, + File: mf, Offset: seg.FileRangeOf(segMR).Start, }) translatedEnd = segMR.End diff --git a/pkg/sentry/fs/tmpfs/tmpfs.go b/pkg/sentry/fs/tmpfs/tmpfs.go index 4b1762ce4..1a9d12c0b 100644 --- a/pkg/sentry/fs/tmpfs/tmpfs.go +++ b/pkg/sentry/fs/tmpfs/tmpfs.go @@ -74,7 +74,7 @@ type Dir struct { // InodeOperation methods to it. ramfsDir *ramfs.Dir - // kernel is used to allocate platform memory as storage for tmpfs Files. + // kernel is used to allocate memory as storage for tmpfs Files. kernel *kernel.Kernel } |