summaryrefslogtreecommitdiffhomepage
path: root/pkg/sentry/mm/pma.go
diff options
context:
space:
mode:
Diffstat (limited to 'pkg/sentry/mm/pma.go')
-rw-r--r--pkg/sentry/mm/pma.go74
1 files changed, 37 insertions, 37 deletions
diff --git a/pkg/sentry/mm/pma.go b/pkg/sentry/mm/pma.go
index 7e5f7de64..5583f62b2 100644
--- a/pkg/sentry/mm/pma.go
+++ b/pkg/sentry/mm/pma.go
@@ -18,12 +18,12 @@ import (
"fmt"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/safecopy"
"gvisor.dev/gvisor/pkg/safemem"
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sentry/usage"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
)
// existingPMAsLocked checks that pmas exist for all addresses in ar, and
@@ -34,7 +34,7 @@ import (
// Preconditions:
// * mm.activeMu must be locked.
// * ar.Length() != 0.
-func (mm *MemoryManager) existingPMAsLocked(ar usermem.AddrRange, at usermem.AccessType, ignorePermissions bool, needInternalMappings bool) pmaIterator {
+func (mm *MemoryManager) existingPMAsLocked(ar hostarch.AddrRange, at hostarch.AccessType, ignorePermissions bool, needInternalMappings bool) pmaIterator {
if checkInvariants {
if !ar.WellFormed() || ar.Length() == 0 {
panic(fmt.Sprintf("invalid ar: %v", ar))
@@ -70,7 +70,7 @@ func (mm *MemoryManager) existingPMAsLocked(ar usermem.AddrRange, at usermem.Acc
// and support access of type (at, ignorePermissions).
//
// Preconditions: mm.activeMu must be locked.
-func (mm *MemoryManager) existingVecPMAsLocked(ars usermem.AddrRangeSeq, at usermem.AccessType, ignorePermissions bool, needInternalMappings bool) bool {
+func (mm *MemoryManager) existingVecPMAsLocked(ars hostarch.AddrRangeSeq, at hostarch.AccessType, ignorePermissions bool, needInternalMappings bool) bool {
for ; !ars.IsEmpty(); ars = ars.Tail() {
if ar := ars.Head(); ar.Length() != 0 && !mm.existingPMAsLocked(ar, at, ignorePermissions, needInternalMappings).Ok() {
return false
@@ -98,7 +98,7 @@ func (mm *MemoryManager) existingVecPMAsLocked(ars usermem.AddrRangeSeq, at user
// * vseg.Range().Contains(ar.Start).
// * vmas must exist for all addresses in ar, and support accesses of type at
// (i.e. permission checks must have been performed against vmas).
-func (mm *MemoryManager) getPMAsLocked(ctx context.Context, vseg vmaIterator, ar usermem.AddrRange, at usermem.AccessType) (pmaIterator, pmaGapIterator, error) {
+func (mm *MemoryManager) getPMAsLocked(ctx context.Context, vseg vmaIterator, ar hostarch.AddrRange, at hostarch.AccessType) (pmaIterator, pmaGapIterator, error) {
if checkInvariants {
if !ar.WellFormed() || ar.Length() == 0 {
panic(fmt.Sprintf("invalid ar: %v", ar))
@@ -118,7 +118,7 @@ func (mm *MemoryManager) getPMAsLocked(ctx context.Context, vseg vmaIterator, ar
end = ar.End.RoundDown()
alignerr = syserror.EFAULT
}
- ar = usermem.AddrRange{ar.Start.RoundDown(), end}
+ ar = hostarch.AddrRange{ar.Start.RoundDown(), end}
pstart, pend, perr := mm.getPMAsInternalLocked(ctx, vseg, ar, at)
if pend.Start() <= ar.Start {
@@ -145,7 +145,7 @@ func (mm *MemoryManager) getPMAsLocked(ctx context.Context, vseg vmaIterator, ar
// * mm.activeMu must be locked for writing.
// * vmas must exist for all addresses in ars, and support accesses of type at
// (i.e. permission checks must have been performed against vmas).
-func (mm *MemoryManager) getVecPMAsLocked(ctx context.Context, ars usermem.AddrRangeSeq, at usermem.AccessType) (usermem.AddrRangeSeq, error) {
+func (mm *MemoryManager) getVecPMAsLocked(ctx context.Context, ars hostarch.AddrRangeSeq, at hostarch.AccessType) (hostarch.AddrRangeSeq, error) {
for arsit := ars; !arsit.IsEmpty(); arsit = arsit.Tail() {
ar := arsit.Head()
if ar.Length() == 0 {
@@ -164,7 +164,7 @@ func (mm *MemoryManager) getVecPMAsLocked(ctx context.Context, ars usermem.AddrR
end = ar.End.RoundDown()
alignerr = syserror.EFAULT
}
- ar = usermem.AddrRange{ar.Start.RoundDown(), end}
+ ar = hostarch.AddrRange{ar.Start.RoundDown(), end}
_, pend, perr := mm.getPMAsInternalLocked(ctx, mm.vmas.FindSegment(ar.Start), ar, at)
if perr != nil {
@@ -191,7 +191,7 @@ func (mm *MemoryManager) getVecPMAsLocked(ctx context.Context, ars usermem.AddrR
//
// getPMAsInternalLocked is an implementation helper for getPMAsLocked and
// getVecPMAsLocked; other clients should call one of those instead.
-func (mm *MemoryManager) getPMAsInternalLocked(ctx context.Context, vseg vmaIterator, ar usermem.AddrRange, at usermem.AccessType) (pmaIterator, pmaGapIterator, error) {
+func (mm *MemoryManager) getPMAsInternalLocked(ctx context.Context, vseg vmaIterator, ar hostarch.AddrRange, at hostarch.AccessType) (pmaIterator, pmaGapIterator, error) {
if checkInvariants {
if !ar.WellFormed() || ar.Length() == 0 || !ar.IsPageAligned() {
panic(fmt.Sprintf("invalid ar: %v", ar))
@@ -245,7 +245,7 @@ func (mm *MemoryManager) getPMAsInternalLocked(ctx context.Context, vseg vmaIter
pseg, pgap = mm.pmas.Insert(pgap, allocAR, pma{
file: mf,
off: fr.Start,
- translatePerms: usermem.AnyAccess,
+ translatePerms: hostarch.AnyAccess,
effectivePerms: vma.effectivePerms,
maxPerms: vma.maxPerms,
// Since we just allocated this memory and have the
@@ -335,7 +335,7 @@ func (mm *MemoryManager) getPMAsInternalLocked(ctx context.Context, vseg vmaIter
// Neither of these cases has enough spatial locality to
// benefit from copying nearby pages, so if the vma is
// executable, only copy the pages required.
- var copyAR usermem.AddrRange
+ var copyAR hostarch.AddrRange
if vseg.ValuePtr().effectivePerms.Execute {
copyAR = pseg.Range().Intersect(ar)
} else {
@@ -366,7 +366,7 @@ func (mm *MemoryManager) getPMAsInternalLocked(ctx context.Context, vseg vmaIter
// Replace the pma with a copy in the part of the address
// range where copying was successful. This doesn't change
// RSS.
- copyAR.End = copyAR.Start + usermem.Addr(fr.Length())
+ copyAR.End = copyAR.Start + hostarch.Addr(fr.Length())
if copyAR != pseg.Range() {
pseg = mm.pmas.Isolate(pseg, copyAR)
pstart = pmaIterator{} // iterators invalidated
@@ -380,7 +380,7 @@ func (mm *MemoryManager) getPMAsInternalLocked(ctx context.Context, vseg vmaIter
mf.IncRef(fr)
oldpma.file = mf
oldpma.off = fr.Start
- oldpma.translatePerms = usermem.AnyAccess
+ oldpma.translatePerms = hostarch.AnyAccess
oldpma.effectivePerms = vma.effectivePerms
oldpma.maxPerms = vma.maxPerms
oldpma.needCOW = false
@@ -499,14 +499,14 @@ const (
// privateAllocUnit may reduce page faults by allowing fewer, larger pmas
// to be mapped, but may result in larger amounts of wasted memory in the
// presence of fragmentation. privateAllocUnit must be a power-of-2
- // multiple of usermem.PageSize.
- privateAllocUnit = usermem.HugePageSize
+ // multiple of hostarch.PageSize.
+ privateAllocUnit = hostarch.HugePageSize
privateAllocMask = privateAllocUnit - 1
)
-func privateAligned(ar usermem.AddrRange) usermem.AddrRange {
- aligned := usermem.AddrRange{ar.Start &^ privateAllocMask, ar.End}
+func privateAligned(ar hostarch.AddrRange) hostarch.AddrRange {
+ aligned := hostarch.AddrRange{ar.Start &^ privateAllocMask, ar.End}
if end := (ar.End + privateAllocMask) &^ privateAllocMask; end >= ar.End {
aligned.End = end
}
@@ -548,7 +548,7 @@ func (mm *MemoryManager) isPMACopyOnWriteLocked(vseg vmaIterator, pseg pmaIterat
rseg := mm.privateRefs.refs.FindSegment(fr.Start)
if rseg.Ok() && rseg.Value() == 1 && fr.End <= rseg.End() {
pma.needCOW = false
- // pma.private => pma.translatePerms == usermem.AnyAccess
+ // pma.private => pma.translatePerms == hostarch.AnyAccess
vma := vseg.ValuePtr()
pma.effectivePerms = vma.effectivePerms
pma.maxPerms = vma.maxPerms
@@ -558,7 +558,7 @@ func (mm *MemoryManager) isPMACopyOnWriteLocked(vseg vmaIterator, pseg pmaIterat
}
// Invalidate implements memmap.MappingSpace.Invalidate.
-func (mm *MemoryManager) Invalidate(ar usermem.AddrRange, opts memmap.InvalidateOpts) {
+func (mm *MemoryManager) Invalidate(ar hostarch.AddrRange, opts memmap.InvalidateOpts) {
if checkInvariants {
if !ar.WellFormed() || ar.Length() == 0 || !ar.IsPageAligned() {
panic(fmt.Sprintf("invalid ar: %v", ar))
@@ -581,7 +581,7 @@ func (mm *MemoryManager) Invalidate(ar usermem.AddrRange, opts memmap.Invalidate
// * mm.activeMu must be locked for writing.
// * ar.Length() != 0.
// * ar must be page-aligned.
-func (mm *MemoryManager) invalidateLocked(ar usermem.AddrRange, invalidatePrivate, invalidateShared bool) {
+func (mm *MemoryManager) invalidateLocked(ar hostarch.AddrRange, invalidatePrivate, invalidateShared bool) {
if checkInvariants {
if !ar.WellFormed() || ar.Length() == 0 || !ar.IsPageAligned() {
panic(fmt.Sprintf("invalid ar: %v", ar))
@@ -627,7 +627,7 @@ func (mm *MemoryManager) invalidateLocked(ar usermem.AddrRange, invalidatePrivat
// Preconditions:
// * ar.Length() != 0.
// * ar must be page-aligned.
-func (mm *MemoryManager) Pin(ctx context.Context, ar usermem.AddrRange, at usermem.AccessType, ignorePermissions bool) ([]PinnedRange, error) {
+func (mm *MemoryManager) Pin(ctx context.Context, ar hostarch.AddrRange, at hostarch.AccessType, ignorePermissions bool) ([]PinnedRange, error) {
if checkInvariants {
if !ar.WellFormed() || ar.Length() == 0 || !ar.IsPageAligned() {
panic(fmt.Sprintf("invalid ar: %v", ar))
@@ -683,7 +683,7 @@ func (mm *MemoryManager) Pin(ctx context.Context, ar usermem.AddrRange, at userm
// PinnedRanges are returned by MemoryManager.Pin.
type PinnedRange struct {
// Source is the corresponding range of addresses.
- Source usermem.AddrRange
+ Source hostarch.AddrRange
// File is the mapped file.
File memmap.File
@@ -713,7 +713,7 @@ func Unpin(prs []PinnedRange) {
// * !oldAR.Overlaps(newAR).
// * mm.pmas.IsEmptyRange(newAR).
// * oldAR and newAR must be page-aligned.
-func (mm *MemoryManager) movePMAsLocked(oldAR, newAR usermem.AddrRange) {
+func (mm *MemoryManager) movePMAsLocked(oldAR, newAR hostarch.AddrRange) {
if checkInvariants {
if !oldAR.WellFormed() || oldAR.Length() == 0 || !oldAR.IsPageAligned() {
panic(fmt.Sprintf("invalid oldAR: %v", oldAR))
@@ -731,7 +731,7 @@ func (mm *MemoryManager) movePMAsLocked(oldAR, newAR usermem.AddrRange) {
}
type movedPMA struct {
- oldAR usermem.AddrRange
+ oldAR hostarch.AddrRange
pma pma
}
var movedPMAs []movedPMA
@@ -751,7 +751,7 @@ func (mm *MemoryManager) movePMAsLocked(oldAR, newAR usermem.AddrRange) {
pgap := mm.pmas.FindGap(newAR.Start)
for i := range movedPMAs {
mpma := &movedPMAs[i]
- pmaNewAR := usermem.AddrRange{mpma.oldAR.Start + off, mpma.oldAR.End + off}
+ pmaNewAR := hostarch.AddrRange{mpma.oldAR.Start + off, mpma.oldAR.End + off}
pgap = mm.pmas.Insert(pgap, pmaNewAR, mpma.pma).NextGap()
}
@@ -776,7 +776,7 @@ func (mm *MemoryManager) movePMAsLocked(oldAR, newAR usermem.AddrRange) {
//
// Postconditions: getPMAInternalMappingsLocked does not invalidate iterators
// into mm.pmas.
-func (mm *MemoryManager) getPMAInternalMappingsLocked(pseg pmaIterator, ar usermem.AddrRange) (pmaGapIterator, error) {
+func (mm *MemoryManager) getPMAInternalMappingsLocked(pseg pmaIterator, ar hostarch.AddrRange) (pmaGapIterator, error) {
if checkInvariants {
if !ar.WellFormed() || ar.Length() == 0 {
panic(fmt.Sprintf("invalid ar: %v", ar))
@@ -808,7 +808,7 @@ func (mm *MemoryManager) getPMAInternalMappingsLocked(pseg pmaIterator, ar userm
//
// Postconditions: getVecPMAInternalMappingsLocked does not invalidate iterators
// into mm.pmas.
-func (mm *MemoryManager) getVecPMAInternalMappingsLocked(ars usermem.AddrRangeSeq) (usermem.AddrRangeSeq, error) {
+func (mm *MemoryManager) getVecPMAInternalMappingsLocked(ars hostarch.AddrRangeSeq) (hostarch.AddrRangeSeq, error) {
for arsit := ars; !arsit.IsEmpty(); arsit = arsit.Tail() {
ar := arsit.Head()
if ar.Length() == 0 {
@@ -829,7 +829,7 @@ func (mm *MemoryManager) getVecPMAInternalMappingsLocked(ars usermem.AddrRangeSe
// in ar.
// * ar.Length() != 0.
// * pseg.Range().Contains(ar.Start).
-func (mm *MemoryManager) internalMappingsLocked(pseg pmaIterator, ar usermem.AddrRange) safemem.BlockSeq {
+func (mm *MemoryManager) internalMappingsLocked(pseg pmaIterator, ar hostarch.AddrRange) safemem.BlockSeq {
if checkInvariants {
if !ar.WellFormed() || ar.Length() == 0 {
panic(fmt.Sprintf("invalid ar: %v", ar))
@@ -866,7 +866,7 @@ func (mm *MemoryManager) internalMappingsLocked(pseg pmaIterator, ar usermem.Add
// * mm.activeMu must be locked.
// * Internal mappings must have been previously established for all addresses
// in ars.
-func (mm *MemoryManager) vecInternalMappingsLocked(ars usermem.AddrRangeSeq) safemem.BlockSeq {
+func (mm *MemoryManager) vecInternalMappingsLocked(ars hostarch.AddrRangeSeq) safemem.BlockSeq {
var ims []safemem.Block
for ; !ars.IsEmpty(); ars = ars.Tail() {
ar := ars.Head()
@@ -931,7 +931,7 @@ func (mm *MemoryManager) decPrivateRef(fr memmap.FileRange) {
// MemoryManager to reflect the insertion of a pma at ar.
//
// Preconditions: mm.activeMu must be locked for writing.
-func (mm *MemoryManager) addRSSLocked(ar usermem.AddrRange) {
+func (mm *MemoryManager) addRSSLocked(ar hostarch.AddrRange) {
mm.curRSS += uint64(ar.Length())
if mm.curRSS > mm.maxRSS {
mm.maxRSS = mm.curRSS
@@ -942,19 +942,19 @@ func (mm *MemoryManager) addRSSLocked(ar usermem.AddrRange) {
// reflect the removal of a pma at ar.
//
// Preconditions: mm.activeMu must be locked for writing.
-func (mm *MemoryManager) removeRSSLocked(ar usermem.AddrRange) {
+func (mm *MemoryManager) removeRSSLocked(ar hostarch.AddrRange) {
mm.curRSS -= uint64(ar.Length())
}
// pmaSetFunctions implements segment.Functions for pmaSet.
type pmaSetFunctions struct{}
-func (pmaSetFunctions) MinKey() usermem.Addr {
+func (pmaSetFunctions) MinKey() hostarch.Addr {
return 0
}
-func (pmaSetFunctions) MaxKey() usermem.Addr {
- return ^usermem.Addr(0)
+func (pmaSetFunctions) MaxKey() hostarch.Addr {
+ return ^hostarch.Addr(0)
}
func (pmaSetFunctions) ClearValue(pma *pma) {
@@ -962,7 +962,7 @@ func (pmaSetFunctions) ClearValue(pma *pma) {
pma.internalMappings = safemem.BlockSeq{}
}
-func (pmaSetFunctions) Merge(ar1 usermem.AddrRange, pma1 pma, ar2 usermem.AddrRange, pma2 pma) (pma, bool) {
+func (pmaSetFunctions) Merge(ar1 hostarch.AddrRange, pma1 pma, ar2 hostarch.AddrRange, pma2 pma) (pma, bool) {
if pma1.file != pma2.file ||
pma1.off+uint64(ar1.Length()) != pma2.off ||
pma1.translatePerms != pma2.translatePerms ||
@@ -980,7 +980,7 @@ func (pmaSetFunctions) Merge(ar1 usermem.AddrRange, pma1 pma, ar2 usermem.AddrRa
return pma1, true
}
-func (pmaSetFunctions) Split(ar usermem.AddrRange, p pma, split usermem.Addr) (pma, pma) {
+func (pmaSetFunctions) Split(ar hostarch.AddrRange, p pma, split hostarch.Addr) (pma, pma) {
newlen1 := uint64(split - ar.Start)
p2 := p
p2.off += newlen1
@@ -997,7 +997,7 @@ func (pmaSetFunctions) Split(ar usermem.AddrRange, p pma, split usermem.Addr) (p
// Preconditions:
// * mm.activeMu must be locked.
// * addr <= pgap.Start().
-func (mm *MemoryManager) findOrSeekPrevUpperBoundPMA(addr usermem.Addr, pgap pmaGapIterator) pmaIterator {
+func (mm *MemoryManager) findOrSeekPrevUpperBoundPMA(addr hostarch.Addr, pgap pmaGapIterator) pmaIterator {
if checkInvariants {
if !pgap.Ok() {
panic("terminal pma iterator")
@@ -1045,7 +1045,7 @@ func (pseg pmaIterator) fileRange() memmap.FileRange {
// Preconditions:
// * pseg.Range().IsSupersetOf(ar).
// * ar.Length != 0.
-func (pseg pmaIterator) fileRangeOf(ar usermem.AddrRange) memmap.FileRange {
+func (pseg pmaIterator) fileRangeOf(ar hostarch.AddrRange) memmap.FileRange {
if checkInvariants {
if !pseg.Ok() {
panic("terminal pma iterator")