// Copyright 2018 The gVisor Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package mm import ( "testing" "gvisor.dev/gvisor/pkg/context" "gvisor.dev/gvisor/pkg/errors/linuxerr" "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/arch" "gvisor.dev/gvisor/pkg/sentry/contexttest" "gvisor.dev/gvisor/pkg/sentry/limits" "gvisor.dev/gvisor/pkg/sentry/memmap" "gvisor.dev/gvisor/pkg/sentry/pgalloc" "gvisor.dev/gvisor/pkg/sentry/platform" "gvisor.dev/gvisor/pkg/usermem" ) func testMemoryManager(ctx context.Context) *MemoryManager { p := platform.FromContext(ctx) mfp := pgalloc.MemoryFileProviderFromContext(ctx) mm := NewMemoryManager(p, mfp, false) mm.layout = arch.MmapLayout{ MinAddr: p.MinUserAddress(), MaxAddr: p.MaxUserAddress(), BottomUpBase: p.MinUserAddress(), TopDownBase: p.MaxUserAddress(), } return mm } func (mm *MemoryManager) realUsageAS() uint64 { return uint64(mm.vmas.Span()) } func TestUsageASUpdates(t *testing.T) { ctx := contexttest.Context(t) mm := testMemoryManager(ctx) defer mm.DecUsers(ctx) addr, err := mm.MMap(ctx, memmap.MMapOpts{ Length: 2 * hostarch.PageSize, Private: true, }) if err != nil { t.Fatalf("MMap got err %v want nil", err) } realUsage := mm.realUsageAS() if mm.usageAS != realUsage { t.Fatalf("usageAS believes %v bytes are mapped; %v bytes are actually mapped", mm.usageAS, realUsage) } mm.MUnmap(ctx, addr, hostarch.PageSize) realUsage = mm.realUsageAS() if mm.usageAS != realUsage { t.Fatalf("usageAS believes %v bytes are mapped; %v bytes are actually mapped", mm.usageAS, realUsage) } } func (mm *MemoryManager) realDataAS() uint64 { var sz uint64 for seg := mm.vmas.FirstSegment(); seg.Ok(); seg = seg.NextSegment() { vma := seg.Value() if vma.isPrivateDataLocked() { sz += uint64(seg.Range().Length()) } } return sz } func TestDataASUpdates(t *testing.T) { ctx := contexttest.Context(t) mm := testMemoryManager(ctx) defer mm.DecUsers(ctx) addr, err := mm.MMap(ctx, memmap.MMapOpts{ Length: 3 * hostarch.PageSize, Private: true, Perms: hostarch.Write, MaxPerms: hostarch.AnyAccess, }) if err != nil { t.Fatalf("MMap got err %v want nil", err) } if mm.dataAS == 0 { t.Fatalf("dataAS is 0, wanted not 0") } realDataAS := mm.realDataAS() if mm.dataAS != realDataAS { t.Fatalf("dataAS believes %v bytes are mapped; %v bytes are actually mapped", mm.dataAS, realDataAS) } mm.MUnmap(ctx, addr, hostarch.PageSize) realDataAS = mm.realDataAS() if mm.dataAS != realDataAS { t.Fatalf("dataAS believes %v bytes are mapped; %v bytes are actually mapped", mm.dataAS, realDataAS) } mm.MProtect(addr+hostarch.PageSize, hostarch.PageSize, hostarch.Read, false) realDataAS = mm.realDataAS() if mm.dataAS != realDataAS { t.Fatalf("dataAS believes %v bytes are mapped; %v bytes are actually mapped", mm.dataAS, realDataAS) } mm.MRemap(ctx, addr+2*hostarch.PageSize, hostarch.PageSize, 2*hostarch.PageSize, MRemapOpts{ Move: MRemapMayMove, }) realDataAS = mm.realDataAS() if mm.dataAS != realDataAS { t.Fatalf("dataAS believes %v bytes are mapped; %v bytes are actually mapped", mm.dataAS, realDataAS) } } func TestBrkDataLimitUpdates(t *testing.T) { limitSet := limits.NewLimitSet() limitSet.Set(limits.Data, limits.Limit{}, true /* privileged */) // zero RLIMIT_DATA ctx := contexttest.WithLimitSet(contexttest.Context(t), limitSet) mm := testMemoryManager(ctx) defer mm.DecUsers(ctx) // Try to extend the brk by one page and expect doing so to fail. oldBrk, _ := mm.Brk(ctx, 0) if newBrk, _ := mm.Brk(ctx, oldBrk+hostarch.PageSize); newBrk != oldBrk { t.Errorf("brk() increased data segment above RLIMIT_DATA (old brk = %#x, new brk = %#x", oldBrk, newBrk) } } // TestIOAfterUnmap ensures that IO fails after unmap. func TestIOAfterUnmap(t *testing.T) { ctx := contexttest.Context(t) mm := testMemoryManager(ctx) defer mm.DecUsers(ctx) addr, err := mm.MMap(ctx, memmap.MMapOpts{ Length: hostarch.PageSize, Private: true, Perms: hostarch.Read, MaxPerms: hostarch.AnyAccess, }) if err != nil { t.Fatalf("MMap got err %v want nil", err) } // IO works before munmap. b := make([]byte, 1) n, err := mm.CopyIn(ctx, addr, b, usermem.IOOpts{}) if err != nil { t.Errorf("CopyIn got err %v want nil", err) } if n != 1 { t.Errorf("CopyIn got %d want 1", n) } err = mm.MUnmap(ctx, addr, hostarch.PageSize) if err != nil { t.Fatalf("MUnmap got err %v want nil", err) } n, err = mm.CopyIn(ctx, addr, b, usermem.IOOpts{}) if !linuxerr.Equals(linuxerr.EFAULT, err) { t.Errorf("CopyIn got err %v want EFAULT", err) } if n != 0 { t.Errorf("CopyIn got %d want 0", n) } } // TestIOAfterMProtect tests IO interaction with mprotect permissions. func TestIOAfterMProtect(t *testing.T) { ctx := contexttest.Context(t) mm := testMemoryManager(ctx) defer mm.DecUsers(ctx) addr, err := mm.MMap(ctx, memmap.MMapOpts{ Length: hostarch.PageSize, Private: true, Perms: hostarch.ReadWrite, MaxPerms: hostarch.AnyAccess, }) if err != nil { t.Fatalf("MMap got err %v want nil", err) } // Writing works before mprotect. b := make([]byte, 1) n, err := mm.CopyOut(ctx, addr, b, usermem.IOOpts{}) if err != nil { t.Errorf("CopyOut got err %v want nil", err) } if n != 1 { t.Errorf("CopyOut got %d want 1", n) } err = mm.MProtect(addr, hostarch.PageSize, hostarch.Read, false) if err != nil { t.Errorf("MProtect got err %v want nil", err) } // Without IgnorePermissions, CopyOut should no longer succeed. n, err = mm.CopyOut(ctx, addr, b, usermem.IOOpts{}) if !linuxerr.Equals(linuxerr.EFAULT, err) { t.Errorf("CopyOut got err %v want EFAULT", err) } if n != 0 { t.Errorf("CopyOut got %d want 0", n) } // With IgnorePermissions, CopyOut should succeed despite mprotect. n, err = mm.CopyOut(ctx, addr, b, usermem.IOOpts{ IgnorePermissions: true, }) if err != nil { t.Errorf("CopyOut got err %v want nil", err) } if n != 1 { t.Errorf("CopyOut got %d want 1", n) } } // TestAIOPrepareAfterDestroy tests that AIOContext should not be able to be // prepared after destruction. func TestAIOPrepareAfterDestroy(t *testing.T) { ctx := contexttest.Context(t) mm := testMemoryManager(ctx) defer mm.DecUsers(ctx) id, err := mm.NewAIOContext(ctx, 1) if err != nil { t.Fatalf("mm.NewAIOContext got err %v want nil", err) } aioCtx, ok := mm.LookupAIOContext(ctx, id) if !ok { t.Fatalf("AIOContext not found") } mm.DestroyAIOContext(ctx, id) // Prepare should fail because aioCtx should be destroyed. if err := aioCtx.Prepare(); !linuxerr.Equals(linuxerr.EINVAL, err) { t.Errorf("aioCtx.Prepare got err %v want nil", err) } else if err == nil { aioCtx.CancelPendingRequest() } } // TestAIOLookupAfterDestroy tests that AIOContext should not be able to be // looked up after memory manager is destroyed. func TestAIOLookupAfterDestroy(t *testing.T) { ctx := contexttest.Context(t) mm := testMemoryManager(ctx) id, err := mm.NewAIOContext(ctx, 1) if err != nil { mm.DecUsers(ctx) t.Fatalf("mm.NewAIOContext got err %v want nil", err) } mm.DecUsers(ctx) // This destroys the AIOContext manager. if _, ok := mm.LookupAIOContext(ctx, id); ok { t.Errorf("AIOContext found even after AIOContext manager is destroyed") } }