summaryrefslogtreecommitdiffhomepage
path: root/pkg
diff options
context:
space:
mode:
Diffstat (limited to 'pkg')
-rw-r--r--pkg/sentry/fs/proc/task.go4
-rw-r--r--pkg/sentry/mm/lifecycle.go1
-rw-r--r--pkg/sentry/mm/mm.go6
-rw-r--r--pkg/sentry/mm/mm_test.go54
-rw-r--r--pkg/sentry/mm/syscalls.go27
-rw-r--r--pkg/sentry/mm/vma.go19
6 files changed, 105 insertions, 6 deletions
diff --git a/pkg/sentry/fs/proc/task.go b/pkg/sentry/fs/proc/task.go
index 494b195cd..77e03d349 100644
--- a/pkg/sentry/fs/proc/task.go
+++ b/pkg/sentry/fs/proc/task.go
@@ -578,7 +578,7 @@ func (s *statusData) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) (
}
fmt.Fprintf(&buf, "TracerPid:\t%d\n", tpid)
var fds int
- var vss, rss uint64
+ var vss, rss, data uint64
s.t.WithMuLocked(func(t *kernel.Task) {
if fdm := t.FDMap(); fdm != nil {
fds = fdm.Size()
@@ -586,11 +586,13 @@ func (s *statusData) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) (
if mm := t.MemoryManager(); mm != nil {
vss = mm.VirtualMemorySize()
rss = mm.ResidentSetSize()
+ data = mm.VirtualDataSize()
}
})
fmt.Fprintf(&buf, "FDSize:\t%d\n", fds)
fmt.Fprintf(&buf, "VmSize:\t%d kB\n", vss>>10)
fmt.Fprintf(&buf, "VmRSS:\t%d kB\n", rss>>10)
+ fmt.Fprintf(&buf, "VmData:\t%d kB\n", data>>10)
fmt.Fprintf(&buf, "Threads:\t%d\n", s.t.ThreadGroup().Count())
creds := s.t.Credentials()
fmt.Fprintf(&buf, "CapInh:\t%016x\n", creds.InheritableCaps)
diff --git a/pkg/sentry/mm/lifecycle.go b/pkg/sentry/mm/lifecycle.go
index e6aa6f9ef..7a65a62a2 100644
--- a/pkg/sentry/mm/lifecycle.go
+++ b/pkg/sentry/mm/lifecycle.go
@@ -69,6 +69,7 @@ func (mm *MemoryManager) Fork(ctx context.Context) (*MemoryManager, error) {
users: 1,
brk: mm.brk,
usageAS: mm.usageAS,
+ dataAS: mm.dataAS,
// "The child does not inherit its parent's memory locks (mlock(2),
// mlockall(2))." - fork(2). So lockedAS is 0 and defMLockMode is
// MLockNone, both of which are zero values. vma.mlockMode is reset
diff --git a/pkg/sentry/mm/mm.go b/pkg/sentry/mm/mm.go
index d25aa5136..eb6defa2b 100644
--- a/pkg/sentry/mm/mm.go
+++ b/pkg/sentry/mm/mm.go
@@ -111,6 +111,12 @@ type MemoryManager struct {
// lockedAS is protected by mappingMu.
lockedAS uint64
+ // dataAS is the size of private data segments, like mm_struct->data_vm.
+ // It means the vma which is private, writable, not stack.
+ //
+ // dataAS is protected by mappingMu.
+ dataAS uint64
+
// New VMAs created by MMap use whichever of memmap.MMapOpts.MLockMode or
// defMLockMode is greater.
//
diff --git a/pkg/sentry/mm/mm_test.go b/pkg/sentry/mm/mm_test.go
index f4917419f..7209c73ce 100644
--- a/pkg/sentry/mm/mm_test.go
+++ b/pkg/sentry/mm/mm_test.go
@@ -68,6 +68,60 @@ func TestUsageASUpdates(t *testing.T) {
}
}
+func (mm *MemoryManager) realDataAS() uint64 {
+ var sz uint64
+ for seg := mm.vmas.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
+ vma := seg.Value()
+ if vma.isPrivateDataLocked() {
+ sz += uint64(seg.Range().Length())
+ }
+ }
+ return sz
+}
+
+func TestDataASUpdates(t *testing.T) {
+ ctx := contexttest.Context(t)
+ mm := testMemoryManager(ctx)
+ defer mm.DecUsers(ctx)
+
+ addr, err := mm.MMap(ctx, memmap.MMapOpts{
+ Length: 3 * usermem.PageSize,
+ Private: true,
+ Perms: usermem.Write,
+ MaxPerms: usermem.AnyAccess,
+ })
+ if err != nil {
+ t.Fatalf("MMap got err %v want nil", err)
+ }
+ if mm.dataAS == 0 {
+ t.Fatalf("dataAS is 0, wanted not 0")
+ }
+ realDataAS := mm.realDataAS()
+ if mm.dataAS != realDataAS {
+ t.Fatalf("dataAS believes %v bytes are mapped; %v bytes are actually mapped", mm.dataAS, realDataAS)
+ }
+
+ mm.MUnmap(ctx, addr, usermem.PageSize)
+ realDataAS = mm.realDataAS()
+ if mm.dataAS != realDataAS {
+ t.Fatalf("dataAS believes %v bytes are mapped; %v bytes are actually mapped", mm.dataAS, realDataAS)
+ }
+
+ mm.MProtect(addr+usermem.PageSize, usermem.PageSize, usermem.Read, false)
+ realDataAS = mm.realDataAS()
+ if mm.dataAS != realDataAS {
+ t.Fatalf("dataAS believes %v bytes are mapped; %v bytes are actually mapped", mm.dataAS, realDataAS)
+ }
+
+ mm.MRemap(ctx, addr+2*usermem.PageSize, usermem.PageSize, 2*usermem.PageSize, MRemapOpts{
+ Move: MRemapMayMove,
+ })
+ realDataAS = mm.realDataAS()
+ if mm.dataAS != realDataAS {
+ t.Fatalf("dataAS believes %v bytes are mapped; %v bytes are actually mapped", mm.dataAS, realDataAS)
+ }
+}
+
func TestBrkDataLimitUpdates(t *testing.T) {
limitSet := limits.NewLimitSet()
limitSet.Set(limits.Data, limits.Limit{}, true /* privileged */) // zero RLIMIT_DATA
diff --git a/pkg/sentry/mm/syscalls.go b/pkg/sentry/mm/syscalls.go
index 70c9aa7f6..0368c6794 100644
--- a/pkg/sentry/mm/syscalls.go
+++ b/pkg/sentry/mm/syscalls.go
@@ -527,6 +527,9 @@ func (mm *MemoryManager) MRemap(ctx context.Context, oldAddr usermem.Addr, oldSi
}
vseg := mm.vmas.Insert(mm.vmas.FindGap(newAR.Start), newAR, vma)
mm.usageAS += uint64(newAR.Length())
+ if vma.isPrivateDataLocked() {
+ mm.dataAS += uint64(newAR.Length())
+ }
if vma.mlockMode != memmap.MLockNone {
mm.lockedAS += uint64(newAR.Length())
if vma.mlockMode == memmap.MLockEager {
@@ -556,6 +559,9 @@ func (mm *MemoryManager) MRemap(ctx context.Context, oldAddr usermem.Addr, oldSi
mm.vmas.Remove(vseg)
vseg = mm.vmas.Insert(mm.vmas.FindGap(newAR.Start), newAR, vma)
mm.usageAS = mm.usageAS - uint64(oldAR.Length()) + uint64(newAR.Length())
+ if vma.isPrivateDataLocked() {
+ mm.dataAS = mm.dataAS - uint64(oldAR.Length()) + uint64(newAR.Length())
+ }
if vma.mlockMode != memmap.MLockNone {
mm.lockedAS = mm.lockedAS - uint64(oldAR.Length()) + uint64(newAR.Length())
}
@@ -643,8 +649,16 @@ func (mm *MemoryManager) MProtect(addr usermem.Addr, length uint64, realPerms us
// Update vma permissions.
vma := vseg.ValuePtr()
+ vmaLength := vseg.Range().Length()
+ if vma.isPrivateDataLocked() {
+ mm.dataAS -= uint64(vmaLength)
+ }
+
vma.realPerms = realPerms
vma.effectivePerms = effectivePerms
+ if vma.isPrivateDataLocked() {
+ mm.dataAS += uint64(vmaLength)
+ }
// Propagate vma permission changes to pmas.
for pseg.Ok() && pseg.Start() < vseg.End() {
@@ -1150,7 +1164,7 @@ func (mm *MemoryManager) GetSharedFutexKey(ctx context.Context, addr usermem.Add
func (mm *MemoryManager) VirtualMemorySize() uint64 {
mm.mappingMu.RLock()
defer mm.mappingMu.RUnlock()
- return uint64(mm.usageAS)
+ return mm.usageAS
}
// VirtualMemorySizeRange returns the combined length in bytes of all mappings
@@ -1165,12 +1179,19 @@ func (mm *MemoryManager) VirtualMemorySizeRange(ar usermem.AddrRange) uint64 {
func (mm *MemoryManager) ResidentSetSize() uint64 {
mm.activeMu.RLock()
defer mm.activeMu.RUnlock()
- return uint64(mm.curRSS)
+ return mm.curRSS
}
// MaxResidentSetSize returns the value advertised as mm's max RSS in bytes.
func (mm *MemoryManager) MaxResidentSetSize() uint64 {
mm.activeMu.RLock()
defer mm.activeMu.RUnlock()
- return uint64(mm.maxRSS)
+ return mm.maxRSS
+}
+
+// VirtualDataSize returns the size of private data segments in mm.
+func (mm *MemoryManager) VirtualDataSize() uint64 {
+ mm.mappingMu.RLock()
+ defer mm.mappingMu.RUnlock()
+ return mm.dataAS
}
diff --git a/pkg/sentry/mm/vma.go b/pkg/sentry/mm/vma.go
index ad901344b..02203f79f 100644
--- a/pkg/sentry/mm/vma.go
+++ b/pkg/sentry/mm/vma.go
@@ -98,7 +98,7 @@ func (mm *MemoryManager) createVMALocked(ctx context.Context, opts memmap.MMapOp
}
// Finally insert the vma.
- vseg := mm.vmas.Insert(vgap, ar, vma{
+ v := vma{
mappable: opts.Mappable,
off: opts.Offset,
realPerms: opts.Perms,
@@ -109,8 +109,13 @@ func (mm *MemoryManager) createVMALocked(ctx context.Context, opts memmap.MMapOp
mlockMode: opts.MLockMode,
id: opts.MappingIdentity,
hint: opts.Hint,
- })
+ }
+
+ vseg := mm.vmas.Insert(vgap, ar, v)
mm.usageAS += opts.Length
+ if v.isPrivateDataLocked() {
+ mm.dataAS += opts.Length
+ }
if opts.MLockMode != memmap.MLockNone {
mm.lockedAS += opts.Length
}
@@ -374,6 +379,9 @@ func (mm *MemoryManager) removeVMAsLocked(ctx context.Context, ar usermem.AddrRa
vma.id.DecRef()
}
mm.usageAS -= uint64(vmaAR.Length())
+ if vma.isPrivateDataLocked() {
+ mm.dataAS -= uint64(vmaAR.Length())
+ }
if vma.mlockMode != memmap.MLockNone {
mm.lockedAS -= uint64(vmaAR.Length())
}
@@ -396,6 +404,13 @@ func (vma *vma) canWriteMappableLocked() bool {
return !vma.private && vma.maxPerms.Write
}
+// isPrivateDataLocked identify the data segments - private, writable, not stack
+//
+// Preconditions: mm.mappingMu must be locked.
+func (vma *vma) isPrivateDataLocked() bool {
+ return vma.realPerms.Write && vma.private && !vma.growsDown
+}
+
// vmaSetFunctions implements segment.Functions for vmaSet.
type vmaSetFunctions struct{}