summaryrefslogtreecommitdiffhomepage
path: root/pkg/sentry/mm
diff options
context:
space:
mode:
authorgVisor bot <gvisor-bot@google.com>2020-08-20 20:36:02 +0000
committergVisor bot <gvisor-bot@google.com>2020-08-20 20:36:02 +0000
commit8c844b60d240b3b00a45620044be39e8797294f7 (patch)
tree0961bd86800ea4a7f39f348ce57a3d012a72cc20 /pkg/sentry/mm
parent38234fef22552b4b44bbac542abd68212690378e (diff)
parent129018ab3d945758b800aa5e6fe1a01d0e99446c (diff)
Merge release-20200810.0-74-g129018ab3 (automated)
Diffstat (limited to 'pkg/sentry/mm')
-rw-r--r--pkg/sentry/mm/address_space.go8
-rw-r--r--pkg/sentry/mm/file_refcount_set.go32
-rw-r--r--pkg/sentry/mm/io.go9
-rw-r--r--pkg/sentry/mm/pma.go85
-rw-r--r--pkg/sentry/mm/pma_set.go32
-rw-r--r--pkg/sentry/mm/syscalls.go9
-rw-r--r--pkg/sentry/mm/vma.go42
-rw-r--r--pkg/sentry/mm/vma_set.go32
8 files changed, 158 insertions, 91 deletions
diff --git a/pkg/sentry/mm/address_space.go b/pkg/sentry/mm/address_space.go
index 5c667117c..a93e76c75 100644
--- a/pkg/sentry/mm/address_space.go
+++ b/pkg/sentry/mm/address_space.go
@@ -166,8 +166,12 @@ func (mm *MemoryManager) Deactivate() {
// mapASLocked maps addresses in ar into mm.as. If precommit is true, mappings
// for all addresses in ar should be precommitted.
//
-// Preconditions: mm.activeMu must be locked. mm.as != nil. ar.Length() != 0.
-// ar must be page-aligned. pseg == mm.pmas.LowerBoundSegment(ar.Start).
+// Preconditions:
+// * mm.activeMu must be locked.
+// * mm.as != nil.
+// * ar.Length() != 0.
+// * ar must be page-aligned.
+// * pseg == mm.pmas.LowerBoundSegment(ar.Start).
func (mm *MemoryManager) mapASLocked(pseg pmaIterator, ar usermem.AddrRange, precommit bool) error {
// By default, map entire pmas at a time, under the assumption that there
// is no cost to mapping more of a pma than necessary.
diff --git a/pkg/sentry/mm/file_refcount_set.go b/pkg/sentry/mm/file_refcount_set.go
index b47c74391..1d956b4b4 100644
--- a/pkg/sentry/mm/file_refcount_set.go
+++ b/pkg/sentry/mm/file_refcount_set.go
@@ -347,7 +347,9 @@ func (s *fileRefcountSet) InsertWithoutMerging(gap fileRefcountGapIterator, r __
// and returns an iterator to the inserted segment. All existing iterators
// (including gap, but not including the returned iterator) are invalidated.
//
-// Preconditions: r.Start >= gap.Start(); r.End <= gap.End().
+// Preconditions:
+// * r.Start >= gap.Start().
+// * r.End <= gap.End().
func (s *fileRefcountSet) InsertWithoutMergingUnchecked(gap fileRefcountGapIterator, r __generics_imported0.FileRange, val int32) fileRefcountIterator {
gap = gap.node.rebalanceBeforeInsert(gap)
splitMaxGap := fileRefcounttrackGaps != 0 && (gap.node.nrSegments == 0 || gap.Range().Length() == gap.node.maxGap.Get())
@@ -1101,12 +1103,10 @@ func (seg fileRefcountIterator) End() uint64 {
// does not invalidate any iterators.
//
// Preconditions:
-//
-// - r.Length() > 0.
-//
-// - The new range must not overlap an existing one: If seg.NextSegment().Ok(),
-// then r.end <= seg.NextSegment().Start(); if seg.PrevSegment().Ok(), then
-// r.start >= seg.PrevSegment().End().
+// * r.Length() > 0.
+// * The new range must not overlap an existing one:
+// * If seg.NextSegment().Ok(), then r.end <= seg.NextSegment().Start().
+// * If seg.PrevSegment().Ok(), then r.start >= seg.PrevSegment().End().
func (seg fileRefcountIterator) SetRangeUnchecked(r __generics_imported0.FileRange) {
seg.node.keys[seg.index] = r
}
@@ -1131,8 +1131,9 @@ func (seg fileRefcountIterator) SetRange(r __generics_imported0.FileRange) {
// SetStartUnchecked mutates the iterated segment's start. This operation does
// not invalidate any iterators.
//
-// Preconditions: The new start must be valid: start < seg.End(); if
-// seg.PrevSegment().Ok(), then start >= seg.PrevSegment().End().
+// Preconditions: The new start must be valid:
+// * start < seg.End()
+// * If seg.PrevSegment().Ok(), then start >= seg.PrevSegment().End().
func (seg fileRefcountIterator) SetStartUnchecked(start uint64) {
seg.node.keys[seg.index].Start = start
}
@@ -1154,8 +1155,9 @@ func (seg fileRefcountIterator) SetStart(start uint64) {
// SetEndUnchecked mutates the iterated segment's end. This operation does not
// invalidate any iterators.
//
-// Preconditions: The new end must be valid: end > seg.Start(); if
-// seg.NextSegment().Ok(), then end <= seg.NextSegment().Start().
+// Preconditions: The new end must be valid:
+// * end > seg.Start().
+// * If seg.NextSegment().Ok(), then end <= seg.NextSegment().Start().
func (seg fileRefcountIterator) SetEndUnchecked(end uint64) {
seg.node.keys[seg.index].End = end
}
@@ -1575,9 +1577,11 @@ func (s *fileRefcountSet) ExportSortedSlices() *fileRefcountSegmentDataSlices {
// ImportSortedSlice initializes the given set from the given slice.
//
-// Preconditions: s must be empty. sds must represent a valid set (the segments
-// in sds must have valid lengths that do not overlap). The segments in sds
-// must be sorted in ascending key order.
+// Preconditions:
+// * s must be empty.
+// * sds must represent a valid set (the segments in sds must have valid
+// lengths that do not overlap).
+// * The segments in sds must be sorted in ascending key order.
func (s *fileRefcountSet) ImportSortedSlices(sds *fileRefcountSegmentDataSlices) error {
if !s.IsEmpty() {
return fmt.Errorf("cannot import into non-empty set %v", s)
diff --git a/pkg/sentry/mm/io.go b/pkg/sentry/mm/io.go
index fa776f9c6..a8ac48080 100644
--- a/pkg/sentry/mm/io.go
+++ b/pkg/sentry/mm/io.go
@@ -441,7 +441,10 @@ func (mm *MemoryManager) LoadUint32(ctx context.Context, addr usermem.Addr, opts
// handleASIOFault handles a page fault at address addr for an AddressSpaceIO
// operation spanning ioar.
//
-// Preconditions: mm.as != nil. ioar.Length() != 0. ioar.Contains(addr).
+// Preconditions:
+// * mm.as != nil.
+// * ioar.Length() != 0.
+// * ioar.Contains(addr).
func (mm *MemoryManager) handleASIOFault(ctx context.Context, addr usermem.Addr, ioar usermem.AddrRange, at usermem.AccessType) error {
// Try to map all remaining pages in the I/O operation. This RoundUp can't
// overflow because otherwise it would have been caught by CheckIORange.
@@ -629,7 +632,9 @@ func (mm *MemoryManager) withVecInternalMappings(ctx context.Context, ars userme
// at most address end on AddrRange arsit.Head(). It is used in vector I/O paths to
// truncate usermem.AddrRangeSeq when errors occur.
//
-// Preconditions: !arsit.IsEmpty(). end <= arsit.Head().End.
+// Preconditions:
+// * !arsit.IsEmpty().
+// * end <= arsit.Head().End.
func truncatedAddrRangeSeq(ars, arsit usermem.AddrRangeSeq, end usermem.Addr) usermem.AddrRangeSeq {
ar := arsit.Head()
if end <= ar.Start {
diff --git a/pkg/sentry/mm/pma.go b/pkg/sentry/mm/pma.go
index 930ec895f..30facebf7 100644
--- a/pkg/sentry/mm/pma.go
+++ b/pkg/sentry/mm/pma.go
@@ -31,7 +31,9 @@ import (
// iterator to the pma containing ar.Start. Otherwise it returns a terminal
// iterator.
//
-// Preconditions: mm.activeMu must be locked. ar.Length() != 0.
+// Preconditions:
+// * mm.activeMu must be locked.
+// * ar.Length() != 0.
func (mm *MemoryManager) existingPMAsLocked(ar usermem.AddrRange, at usermem.AccessType, ignorePermissions bool, needInternalMappings bool) pmaIterator {
if checkInvariants {
if !ar.WellFormed() || ar.Length() <= 0 {
@@ -89,10 +91,13 @@ func (mm *MemoryManager) existingVecPMAsLocked(ars usermem.AddrRangeSeq, at user
//
// - An error that is non-nil if pmas exist for only a subset of ar.
//
-// Preconditions: mm.mappingMu must be locked. mm.activeMu must be locked for
-// writing. ar.Length() != 0. vseg.Range().Contains(ar.Start). vmas must exist
-// for all addresses in ar, and support accesses of type at (i.e. permission
-// checks must have been performed against vmas).
+// Preconditions:
+// * mm.mappingMu must be locked.
+// * mm.activeMu must be locked for writing.
+// * ar.Length() != 0.
+// * vseg.Range().Contains(ar.Start).
+// * vmas must exist for all addresses in ar, and support accesses of type at
+// (i.e. permission checks must have been performed against vmas).
func (mm *MemoryManager) getPMAsLocked(ctx context.Context, vseg vmaIterator, ar usermem.AddrRange, at usermem.AccessType) (pmaIterator, pmaGapIterator, error) {
if checkInvariants {
if !ar.WellFormed() || ar.Length() <= 0 {
@@ -135,9 +140,11 @@ func (mm *MemoryManager) getPMAsLocked(ctx context.Context, vseg vmaIterator, ar
// exist. If this is not equal to ars, it returns a non-nil error explaining
// why.
//
-// Preconditions: mm.mappingMu must be locked. mm.activeMu must be locked for
-// writing. vmas must exist for all addresses in ars, and support accesses of
-// type at (i.e. permission checks must have been performed against vmas).
+// Preconditions:
+// * mm.mappingMu must be locked.
+// * mm.activeMu must be locked for writing.
+// * vmas must exist for all addresses in ars, and support accesses of type at
+// (i.e. permission checks must have been performed against vmas).
func (mm *MemoryManager) getVecPMAsLocked(ctx context.Context, ars usermem.AddrRangeSeq, at usermem.AccessType) (usermem.AddrRangeSeq, error) {
for arsit := ars; !arsit.IsEmpty(); arsit = arsit.Tail() {
ar := arsit.Head()
@@ -518,8 +525,10 @@ func privateAligned(ar usermem.AddrRange) usermem.AddrRange {
// the memory it maps, isPMACopyOnWriteLocked will take ownership of the memory
// and update the pma to indicate that it does not require copy-on-write.
//
-// Preconditions: vseg.Range().IsSupersetOf(pseg.Range()). mm.mappingMu must be
-// locked. mm.activeMu must be locked for writing.
+// Preconditions:
+// * vseg.Range().IsSupersetOf(pseg.Range()).
+// * mm.mappingMu must be locked.
+// * mm.activeMu must be locked for writing.
func (mm *MemoryManager) isPMACopyOnWriteLocked(vseg vmaIterator, pseg pmaIterator) bool {
pma := pseg.ValuePtr()
if !pma.needCOW {
@@ -568,8 +577,10 @@ func (mm *MemoryManager) Invalidate(ar usermem.AddrRange, opts memmap.Invalidate
// invalidateLocked removes pmas and AddressSpace mappings of those pmas for
// addresses in ar.
//
-// Preconditions: mm.activeMu must be locked for writing. ar.Length() != 0. ar
-// must be page-aligned.
+// Preconditions:
+// * mm.activeMu must be locked for writing.
+// * ar.Length() != 0.
+// * ar must be page-aligned.
func (mm *MemoryManager) invalidateLocked(ar usermem.AddrRange, invalidatePrivate, invalidateShared bool) {
if checkInvariants {
if !ar.WellFormed() || ar.Length() <= 0 || !ar.IsPageAligned() {
@@ -613,7 +624,9 @@ func (mm *MemoryManager) invalidateLocked(ar usermem.AddrRange, invalidatePrivat
// most I/O. It should only be used in contexts that would use get_user_pages()
// in the Linux kernel.
//
-// Preconditions: ar.Length() != 0. ar must be page-aligned.
+// Preconditions:
+// * ar.Length() != 0.
+// * ar must be page-aligned.
func (mm *MemoryManager) Pin(ctx context.Context, ar usermem.AddrRange, at usermem.AccessType, ignorePermissions bool) ([]PinnedRange, error) {
if checkInvariants {
if !ar.WellFormed() || ar.Length() <= 0 || !ar.IsPageAligned() {
@@ -693,9 +706,13 @@ func Unpin(prs []PinnedRange) {
// movePMAsLocked moves all pmas in oldAR to newAR.
//
-// Preconditions: mm.activeMu must be locked for writing. oldAR.Length() != 0.
-// oldAR.Length() <= newAR.Length(). !oldAR.Overlaps(newAR).
-// mm.pmas.IsEmptyRange(newAR). oldAR and newAR must be page-aligned.
+// Preconditions:
+// * mm.activeMu must be locked for writing.
+// * oldAR.Length() != 0.
+// * oldAR.Length() <= newAR.Length().
+// * !oldAR.Overlaps(newAR).
+// * mm.pmas.IsEmptyRange(newAR).
+// * oldAR and newAR must be page-aligned.
func (mm *MemoryManager) movePMAsLocked(oldAR, newAR usermem.AddrRange) {
if checkInvariants {
if !oldAR.WellFormed() || oldAR.Length() <= 0 || !oldAR.IsPageAligned() {
@@ -751,9 +768,11 @@ func (mm *MemoryManager) movePMAsLocked(oldAR, newAR usermem.AddrRange) {
// - An error that is non-nil if internal mappings exist for only a subset of
// ar.
//
-// Preconditions: mm.activeMu must be locked for writing.
-// pseg.Range().Contains(ar.Start). pmas must exist for all addresses in ar.
-// ar.Length() != 0.
+// Preconditions:
+// * mm.activeMu must be locked for writing.
+// * pseg.Range().Contains(ar.Start).
+// * pmas must exist for all addresses in ar.
+// * ar.Length() != 0.
//
// Postconditions: getPMAInternalMappingsLocked does not invalidate iterators
// into mm.pmas.
@@ -783,8 +802,9 @@ func (mm *MemoryManager) getPMAInternalMappingsLocked(pseg pmaIterator, ar userm
// internal mappings exist. If this is not equal to ars, it returns a non-nil
// error explaining why.
//
-// Preconditions: mm.activeMu must be locked for writing. pmas must exist for
-// all addresses in ar.
+// Preconditions:
+// * mm.activeMu must be locked for writing.
+// * pmas must exist for all addresses in ar.
//
// Postconditions: getVecPMAInternalMappingsLocked does not invalidate iterators
// into mm.pmas.
@@ -803,9 +823,12 @@ func (mm *MemoryManager) getVecPMAInternalMappingsLocked(ars usermem.AddrRangeSe
// internalMappingsLocked returns internal mappings for addresses in ar.
//
-// Preconditions: mm.activeMu must be locked. Internal mappings must have been
-// previously established for all addresses in ar. ar.Length() != 0.
-// pseg.Range().Contains(ar.Start).
+// Preconditions:
+// * mm.activeMu must be locked.
+// * Internal mappings must have been previously established for all addresses
+// in ar.
+// * ar.Length() != 0.
+// * pseg.Range().Contains(ar.Start).
func (mm *MemoryManager) internalMappingsLocked(pseg pmaIterator, ar usermem.AddrRange) safemem.BlockSeq {
if checkInvariants {
if !ar.WellFormed() || ar.Length() <= 0 {
@@ -839,8 +862,10 @@ func (mm *MemoryManager) internalMappingsLocked(pseg pmaIterator, ar usermem.Add
// vecInternalMappingsLocked returns internal mappings for addresses in ars.
//
-// Preconditions: mm.activeMu must be locked. Internal mappings must have been
-// previously established for all addresses in ars.
+// Preconditions:
+// * mm.activeMu must be locked.
+// * Internal mappings must have been previously established for all addresses
+// in ars.
func (mm *MemoryManager) vecInternalMappingsLocked(ars usermem.AddrRangeSeq) safemem.BlockSeq {
var ims []safemem.Block
for ; !ars.IsEmpty(); ars = ars.Tail() {
@@ -969,7 +994,9 @@ func (pmaSetFunctions) Split(ar usermem.AddrRange, p pma, split usermem.Addr) (p
// findOrSeekPrevUpperBoundPMA returns mm.pmas.UpperBoundSegment(addr), but may do
// so by scanning linearly backward from pgap.
//
-// Preconditions: mm.activeMu must be locked. addr <= pgap.Start().
+// Preconditions:
+// * mm.activeMu must be locked.
+// * addr <= pgap.Start().
func (mm *MemoryManager) findOrSeekPrevUpperBoundPMA(addr usermem.Addr, pgap pmaGapIterator) pmaIterator {
if checkInvariants {
if !pgap.Ok() {
@@ -1015,7 +1042,9 @@ func (pseg pmaIterator) fileRange() memmap.FileRange {
return pseg.fileRangeOf(pseg.Range())
}
-// Preconditions: pseg.Range().IsSupersetOf(ar). ar.Length != 0.
+// Preconditions:
+// * pseg.Range().IsSupersetOf(ar).
+// * ar.Length != 0.
func (pseg pmaIterator) fileRangeOf(ar usermem.AddrRange) memmap.FileRange {
if checkInvariants {
if !pseg.Ok() {
diff --git a/pkg/sentry/mm/pma_set.go b/pkg/sentry/mm/pma_set.go
index d0cc1f9d3..09fe6b05f 100644
--- a/pkg/sentry/mm/pma_set.go
+++ b/pkg/sentry/mm/pma_set.go
@@ -347,7 +347,9 @@ func (s *pmaSet) InsertWithoutMerging(gap pmaGapIterator, r __generics_imported0
// and returns an iterator to the inserted segment. All existing iterators
// (including gap, but not including the returned iterator) are invalidated.
//
-// Preconditions: r.Start >= gap.Start(); r.End <= gap.End().
+// Preconditions:
+// * r.Start >= gap.Start().
+// * r.End <= gap.End().
func (s *pmaSet) InsertWithoutMergingUnchecked(gap pmaGapIterator, r __generics_imported0.AddrRange, val pma) pmaIterator {
gap = gap.node.rebalanceBeforeInsert(gap)
splitMaxGap := pmatrackGaps != 0 && (gap.node.nrSegments == 0 || gap.Range().Length() == gap.node.maxGap.Get())
@@ -1101,12 +1103,10 @@ func (seg pmaIterator) End() __generics_imported0.Addr {
// does not invalidate any iterators.
//
// Preconditions:
-//
-// - r.Length() > 0.
-//
-// - The new range must not overlap an existing one: If seg.NextSegment().Ok(),
-// then r.end <= seg.NextSegment().Start(); if seg.PrevSegment().Ok(), then
-// r.start >= seg.PrevSegment().End().
+// * r.Length() > 0.
+// * The new range must not overlap an existing one:
+// * If seg.NextSegment().Ok(), then r.end <= seg.NextSegment().Start().
+// * If seg.PrevSegment().Ok(), then r.start >= seg.PrevSegment().End().
func (seg pmaIterator) SetRangeUnchecked(r __generics_imported0.AddrRange) {
seg.node.keys[seg.index] = r
}
@@ -1131,8 +1131,9 @@ func (seg pmaIterator) SetRange(r __generics_imported0.AddrRange) {
// SetStartUnchecked mutates the iterated segment's start. This operation does
// not invalidate any iterators.
//
-// Preconditions: The new start must be valid: start < seg.End(); if
-// seg.PrevSegment().Ok(), then start >= seg.PrevSegment().End().
+// Preconditions: The new start must be valid:
+// * start < seg.End()
+// * If seg.PrevSegment().Ok(), then start >= seg.PrevSegment().End().
func (seg pmaIterator) SetStartUnchecked(start __generics_imported0.Addr) {
seg.node.keys[seg.index].Start = start
}
@@ -1154,8 +1155,9 @@ func (seg pmaIterator) SetStart(start __generics_imported0.Addr) {
// SetEndUnchecked mutates the iterated segment's end. This operation does not
// invalidate any iterators.
//
-// Preconditions: The new end must be valid: end > seg.Start(); if
-// seg.NextSegment().Ok(), then end <= seg.NextSegment().Start().
+// Preconditions: The new end must be valid:
+// * end > seg.Start().
+// * If seg.NextSegment().Ok(), then end <= seg.NextSegment().Start().
func (seg pmaIterator) SetEndUnchecked(end __generics_imported0.Addr) {
seg.node.keys[seg.index].End = end
}
@@ -1575,9 +1577,11 @@ func (s *pmaSet) ExportSortedSlices() *pmaSegmentDataSlices {
// ImportSortedSlice initializes the given set from the given slice.
//
-// Preconditions: s must be empty. sds must represent a valid set (the segments
-// in sds must have valid lengths that do not overlap). The segments in sds
-// must be sorted in ascending key order.
+// Preconditions:
+// * s must be empty.
+// * sds must represent a valid set (the segments in sds must have valid
+// lengths that do not overlap).
+// * The segments in sds must be sorted in ascending key order.
func (s *pmaSet) ImportSortedSlices(sds *pmaSegmentDataSlices) error {
if !s.IsEmpty() {
return fmt.Errorf("cannot import into non-empty set %v", s)
diff --git a/pkg/sentry/mm/syscalls.go b/pkg/sentry/mm/syscalls.go
index e74d4e1c1..4c9a575e7 100644
--- a/pkg/sentry/mm/syscalls.go
+++ b/pkg/sentry/mm/syscalls.go
@@ -166,7 +166,9 @@ func (mm *MemoryManager) MMap(ctx context.Context, opts memmap.MMapOpts) (userme
// populateVMA obtains pmas for addresses in ar in the given vma, and maps them
// into mm.as if it is active.
//
-// Preconditions: mm.mappingMu must be locked. vseg.Range().IsSupersetOf(ar).
+// Preconditions:
+// * mm.mappingMu must be locked.
+// * vseg.Range().IsSupersetOf(ar).
func (mm *MemoryManager) populateVMA(ctx context.Context, vseg vmaIterator, ar usermem.AddrRange, precommit bool) {
if !vseg.ValuePtr().effectivePerms.Any() {
// Linux doesn't populate inaccessible pages. See
@@ -208,8 +210,9 @@ func (mm *MemoryManager) populateVMA(ctx context.Context, vseg vmaIterator, ar u
// preferable to populateVMA since it unlocks mm.mappingMu before performing
// expensive operations that don't require it to be locked.
//
-// Preconditions: mm.mappingMu must be locked for writing.
-// vseg.Range().IsSupersetOf(ar).
+// Preconditions:
+// * mm.mappingMu must be locked for writing.
+// * vseg.Range().IsSupersetOf(ar).
//
// Postconditions: mm.mappingMu will be unlocked.
func (mm *MemoryManager) populateVMAAndUnlock(ctx context.Context, vseg vmaIterator, ar usermem.AddrRange, precommit bool) {
diff --git a/pkg/sentry/mm/vma.go b/pkg/sentry/mm/vma.go
index c4e1989ed..f769d8294 100644
--- a/pkg/sentry/mm/vma.go
+++ b/pkg/sentry/mm/vma.go
@@ -27,8 +27,9 @@ import (
"gvisor.dev/gvisor/pkg/usermem"
)
-// Preconditions: mm.mappingMu must be locked for writing. opts must be valid
-// as defined by the checks in MMap.
+// Preconditions:
+// * mm.mappingMu must be locked for writing.
+// * opts must be valid as defined by the checks in MMap.
func (mm *MemoryManager) createVMALocked(ctx context.Context, opts memmap.MMapOpts) (vmaIterator, usermem.AddrRange, error) {
if opts.MaxPerms != opts.MaxPerms.Effective() {
panic(fmt.Sprintf("Non-effective MaxPerms %s cannot be enforced", opts.MaxPerms))
@@ -260,8 +261,9 @@ func (mm *MemoryManager) mlockedBytesRangeLocked(ar usermem.AddrRange) uint64 {
//
// - An error that is non-nil if vmas exist for only a subset of ar.
//
-// Preconditions: mm.mappingMu must be locked for reading; it may be
-// temporarily unlocked. ar.Length() != 0.
+// Preconditions:
+// * mm.mappingMu must be locked for reading; it may be temporarily unlocked.
+// * ar.Length() != 0.
func (mm *MemoryManager) getVMAsLocked(ctx context.Context, ar usermem.AddrRange, at usermem.AccessType, ignorePermissions bool) (vmaIterator, vmaGapIterator, error) {
if checkInvariants {
if !ar.WellFormed() || ar.Length() <= 0 {
@@ -342,8 +344,10 @@ const guardBytes = 256 * usermem.PageSize
// unmapLocked unmaps all addresses in ar and returns the resulting gap in
// mm.vmas.
//
-// Preconditions: mm.mappingMu must be locked for writing. ar.Length() != 0.
-// ar must be page-aligned.
+// Preconditions:
+// * mm.mappingMu must be locked for writing.
+// * ar.Length() != 0.
+// * ar must be page-aligned.
func (mm *MemoryManager) unmapLocked(ctx context.Context, ar usermem.AddrRange) vmaGapIterator {
if checkInvariants {
if !ar.WellFormed() || ar.Length() <= 0 || !ar.IsPageAligned() {
@@ -361,8 +365,10 @@ func (mm *MemoryManager) unmapLocked(ctx context.Context, ar usermem.AddrRange)
// gap in mm.vmas. It does not remove pmas or AddressSpace mappings; clients
// must do so before calling removeVMAsLocked.
//
-// Preconditions: mm.mappingMu must be locked for writing. ar.Length() != 0. ar
-// must be page-aligned.
+// Preconditions:
+// * mm.mappingMu must be locked for writing.
+// * ar.Length() != 0.
+// * ar must be page-aligned.
func (mm *MemoryManager) removeVMAsLocked(ctx context.Context, ar usermem.AddrRange) vmaGapIterator {
if checkInvariants {
if !ar.WellFormed() || ar.Length() <= 0 || !ar.IsPageAligned() {
@@ -467,7 +473,9 @@ func (vmaSetFunctions) Split(ar usermem.AddrRange, v vma, split usermem.Addr) (v
return v, v2
}
-// Preconditions: vseg.ValuePtr().mappable != nil. vseg.Range().Contains(addr).
+// Preconditions:
+// * vseg.ValuePtr().mappable != nil.
+// * vseg.Range().Contains(addr).
func (vseg vmaIterator) mappableOffsetAt(addr usermem.Addr) uint64 {
if checkInvariants {
if !vseg.Ok() {
@@ -491,8 +499,10 @@ func (vseg vmaIterator) mappableRange() memmap.MappableRange {
return vseg.mappableRangeOf(vseg.Range())
}
-// Preconditions: vseg.ValuePtr().mappable != nil.
-// vseg.Range().IsSupersetOf(ar). ar.Length() != 0.
+// Preconditions:
+// * vseg.ValuePtr().mappable != nil.
+// * vseg.Range().IsSupersetOf(ar).
+// * ar.Length() != 0.
func (vseg vmaIterator) mappableRangeOf(ar usermem.AddrRange) memmap.MappableRange {
if checkInvariants {
if !vseg.Ok() {
@@ -514,8 +524,10 @@ func (vseg vmaIterator) mappableRangeOf(ar usermem.AddrRange) memmap.MappableRan
return memmap.MappableRange{vma.off + uint64(ar.Start-vstart), vma.off + uint64(ar.End-vstart)}
}
-// Preconditions: vseg.ValuePtr().mappable != nil.
-// vseg.mappableRange().IsSupersetOf(mr). mr.Length() != 0.
+// Preconditions:
+// * vseg.ValuePtr().mappable != nil.
+// * vseg.mappableRange().IsSupersetOf(mr).
+// * mr.Length() != 0.
func (vseg vmaIterator) addrRangeOf(mr memmap.MappableRange) usermem.AddrRange {
if checkInvariants {
if !vseg.Ok() {
@@ -540,7 +552,9 @@ func (vseg vmaIterator) addrRangeOf(mr memmap.MappableRange) usermem.AddrRange {
// seekNextLowerBound returns mm.vmas.LowerBoundSegment(addr), but does so by
// scanning linearly forward from vseg.
//
-// Preconditions: mm.mappingMu must be locked. addr >= vseg.Start().
+// Preconditions:
+// * mm.mappingMu must be locked.
+// * addr >= vseg.Start().
func (vseg vmaIterator) seekNextLowerBound(addr usermem.Addr) vmaIterator {
if checkInvariants {
if !vseg.Ok() {
diff --git a/pkg/sentry/mm/vma_set.go b/pkg/sentry/mm/vma_set.go
index e515ef105..3b32d3cb5 100644
--- a/pkg/sentry/mm/vma_set.go
+++ b/pkg/sentry/mm/vma_set.go
@@ -347,7 +347,9 @@ func (s *vmaSet) InsertWithoutMerging(gap vmaGapIterator, r __generics_imported0
// and returns an iterator to the inserted segment. All existing iterators
// (including gap, but not including the returned iterator) are invalidated.
//
-// Preconditions: r.Start >= gap.Start(); r.End <= gap.End().
+// Preconditions:
+// * r.Start >= gap.Start().
+// * r.End <= gap.End().
func (s *vmaSet) InsertWithoutMergingUnchecked(gap vmaGapIterator, r __generics_imported0.AddrRange, val vma) vmaIterator {
gap = gap.node.rebalanceBeforeInsert(gap)
splitMaxGap := vmatrackGaps != 0 && (gap.node.nrSegments == 0 || gap.Range().Length() == gap.node.maxGap.Get())
@@ -1101,12 +1103,10 @@ func (seg vmaIterator) End() __generics_imported0.Addr {
// does not invalidate any iterators.
//
// Preconditions:
-//
-// - r.Length() > 0.
-//
-// - The new range must not overlap an existing one: If seg.NextSegment().Ok(),
-// then r.end <= seg.NextSegment().Start(); if seg.PrevSegment().Ok(), then
-// r.start >= seg.PrevSegment().End().
+// * r.Length() > 0.
+// * The new range must not overlap an existing one:
+// * If seg.NextSegment().Ok(), then r.end <= seg.NextSegment().Start().
+// * If seg.PrevSegment().Ok(), then r.start >= seg.PrevSegment().End().
func (seg vmaIterator) SetRangeUnchecked(r __generics_imported0.AddrRange) {
seg.node.keys[seg.index] = r
}
@@ -1131,8 +1131,9 @@ func (seg vmaIterator) SetRange(r __generics_imported0.AddrRange) {
// SetStartUnchecked mutates the iterated segment's start. This operation does
// not invalidate any iterators.
//
-// Preconditions: The new start must be valid: start < seg.End(); if
-// seg.PrevSegment().Ok(), then start >= seg.PrevSegment().End().
+// Preconditions: The new start must be valid:
+// * start < seg.End()
+// * If seg.PrevSegment().Ok(), then start >= seg.PrevSegment().End().
func (seg vmaIterator) SetStartUnchecked(start __generics_imported0.Addr) {
seg.node.keys[seg.index].Start = start
}
@@ -1154,8 +1155,9 @@ func (seg vmaIterator) SetStart(start __generics_imported0.Addr) {
// SetEndUnchecked mutates the iterated segment's end. This operation does not
// invalidate any iterators.
//
-// Preconditions: The new end must be valid: end > seg.Start(); if
-// seg.NextSegment().Ok(), then end <= seg.NextSegment().Start().
+// Preconditions: The new end must be valid:
+// * end > seg.Start().
+// * If seg.NextSegment().Ok(), then end <= seg.NextSegment().Start().
func (seg vmaIterator) SetEndUnchecked(end __generics_imported0.Addr) {
seg.node.keys[seg.index].End = end
}
@@ -1575,9 +1577,11 @@ func (s *vmaSet) ExportSortedSlices() *vmaSegmentDataSlices {
// ImportSortedSlice initializes the given set from the given slice.
//
-// Preconditions: s must be empty. sds must represent a valid set (the segments
-// in sds must have valid lengths that do not overlap). The segments in sds
-// must be sorted in ascending key order.
+// Preconditions:
+// * s must be empty.
+// * sds must represent a valid set (the segments in sds must have valid
+// lengths that do not overlap).
+// * The segments in sds must be sorted in ascending key order.
func (s *vmaSet) ImportSortedSlices(sds *vmaSegmentDataSlices) error {
if !s.IsEmpty() {
return fmt.Errorf("cannot import into non-empty set %v", s)