summaryrefslogtreecommitdiffhomepage
path: root/pkg/sentry/loader
diff options
context:
space:
mode:
authorgVisor bot <gvisor-bot@google.com>2021-03-29 20:35:44 +0000
committergVisor bot <gvisor-bot@google.com>2021-03-29 20:35:44 +0000
commit08cc017c088017546ed712cce700bf4374c864c0 (patch)
treeaf024e69d8855f4f867ef435ced35532b368a981 /pkg/sentry/loader
parent6a422755602daeaef4be60969c1acddc8b7b3041 (diff)
parent8a2f7e716dcc62f04d2808e8ade34941c94fc956 (diff)
Merge release-20210322.0-29-g8a2f7e716 (automated)
Diffstat (limited to 'pkg/sentry/loader')
-rw-r--r--pkg/sentry/loader/elf.go49
-rw-r--r--pkg/sentry/loader/loader.go11
-rw-r--r--pkg/sentry/loader/vdso.go25
3 files changed, 44 insertions, 41 deletions
diff --git a/pkg/sentry/loader/elf.go b/pkg/sentry/loader/elf.go
index cd9fa4031..e92d9fdc3 100644
--- a/pkg/sentry/loader/elf.go
+++ b/pkg/sentry/loader/elf.go
@@ -25,6 +25,7 @@ import (
"gvisor.dev/gvisor/pkg/binary"
"gvisor.dev/gvisor/pkg/context"
"gvisor.dev/gvisor/pkg/cpuid"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fsbridge"
@@ -41,7 +42,7 @@ const (
// maxTotalPhdrSize is the maximum combined size of all program
// headers. Linux limits this to one page.
- maxTotalPhdrSize = usermem.PageSize
+ maxTotalPhdrSize = hostarch.PageSize
)
var (
@@ -52,8 +53,8 @@ var (
prog64Size = int(binary.Size(elf.Prog64{}))
)
-func progFlagsAsPerms(f elf.ProgFlag) usermem.AccessType {
- var p usermem.AccessType
+func progFlagsAsPerms(f elf.ProgFlag) hostarch.AccessType {
+ var p hostarch.AccessType
if f&elf.PF_R == elf.PF_R {
p.Read = true
}
@@ -75,7 +76,7 @@ type elfInfo struct {
arch arch.Arch
// entry is the program entry point.
- entry usermem.Addr
+ entry hostarch.Addr
// phdrs are the program headers.
phdrs []elf.ProgHeader
@@ -230,7 +231,7 @@ func parseHeader(ctx context.Context, f fullReader) (elfInfo, error) {
return elfInfo{
os: os,
arch: a,
- entry: usermem.Addr(hdr.Entry),
+ entry: hostarch.Addr(hdr.Entry),
phdrs: phdrs,
phdrOff: hdr.Phoff,
phdrSize: prog64Size,
@@ -240,9 +241,9 @@ func parseHeader(ctx context.Context, f fullReader) (elfInfo, error) {
// mapSegment maps a phdr into the Task. offset is the offset to apply to
// phdr.Vaddr.
-func mapSegment(ctx context.Context, m *mm.MemoryManager, f fsbridge.File, phdr *elf.ProgHeader, offset usermem.Addr) error {
+func mapSegment(ctx context.Context, m *mm.MemoryManager, f fsbridge.File, phdr *elf.ProgHeader, offset hostarch.Addr) error {
// We must make a page-aligned mapping.
- adjust := usermem.Addr(phdr.Vaddr).PageOffset()
+ adjust := hostarch.Addr(phdr.Vaddr).PageOffset()
addr, ok := offset.AddLength(phdr.Vaddr)
if !ok {
@@ -250,14 +251,14 @@ func mapSegment(ctx context.Context, m *mm.MemoryManager, f fsbridge.File, phdr
ctx.Warningf("Computed segment load address overflows: %#x + %#x", phdr.Vaddr, offset)
return syserror.ENOEXEC
}
- addr -= usermem.Addr(adjust)
+ addr -= hostarch.Addr(adjust)
fileSize := phdr.Filesz + adjust
if fileSize < phdr.Filesz {
ctx.Infof("Computed segment file size overflows: %#x + %#x", phdr.Filesz, adjust)
return syserror.ENOEXEC
}
- ms, ok := usermem.Addr(fileSize).RoundUp()
+ ms, ok := hostarch.Addr(fileSize).RoundUp()
if !ok {
ctx.Infof("fileSize %#x too large", fileSize)
return syserror.ENOEXEC
@@ -281,7 +282,7 @@ func mapSegment(ctx context.Context, m *mm.MemoryManager, f fsbridge.File, phdr
Unmap: true,
Private: true,
Perms: prot,
- MaxPerms: usermem.AnyAccess,
+ MaxPerms: hostarch.AnyAccess,
}
defer func() {
if mopts.MappingIdentity != nil {
@@ -312,7 +313,7 @@ func mapSegment(ctx context.Context, m *mm.MemoryManager, f fsbridge.File, phdr
panic(fmt.Sprintf("zeroSize too big? %#x", uint64(zeroSize)))
}
if _, err := m.ZeroOut(ctx, zeroAddr, zeroSize, usermem.IOOpts{IgnorePermissions: true}); err != nil {
- ctx.Warningf("Failed to zero end of page [%#x, %#x): %v", zeroAddr, zeroAddr+usermem.Addr(zeroSize), err)
+ ctx.Warningf("Failed to zero end of page [%#x, %#x): %v", zeroAddr, zeroAddr+hostarch.Addr(zeroSize), err)
return err
}
}
@@ -330,7 +331,7 @@ func mapSegment(ctx context.Context, m *mm.MemoryManager, f fsbridge.File, phdr
if !ok {
panic(fmt.Sprintf("anonymous memory doesn't fit in pre-sized range? %#x + %#x", addr, mapSize))
}
- anonSize, ok := usermem.Addr(memSize - mapSize).RoundUp()
+ anonSize, ok := hostarch.Addr(memSize - mapSize).RoundUp()
if !ok {
ctx.Infof("extra anon pages too large: %#x", memSize-mapSize)
return syserror.ENOEXEC
@@ -339,7 +340,7 @@ func mapSegment(ctx context.Context, m *mm.MemoryManager, f fsbridge.File, phdr
// N.B. Linux uses vm_brk_flags to map these pages, which only
// honors the X bit, always mapping at least RW. ignoring These
// pages are not included in the final brk region.
- prot := usermem.ReadWrite
+ prot := hostarch.ReadWrite
if phdr.Flags&elf.PF_X == elf.PF_X {
prot.Execute = true
}
@@ -352,7 +353,7 @@ func mapSegment(ctx context.Context, m *mm.MemoryManager, f fsbridge.File, phdr
Fixed: true,
Private: true,
Perms: prot,
- MaxPerms: usermem.AnyAccess,
+ MaxPerms: hostarch.AnyAccess,
}); err != nil {
ctx.Infof("Error mapping PT_LOAD segment %v anonymous memory: %v", phdr, err)
return err
@@ -371,19 +372,19 @@ type loadedELF struct {
arch arch.Arch
// entry is the entry point of the ELF.
- entry usermem.Addr
+ entry hostarch.Addr
// start is the end of the ELF.
- start usermem.Addr
+ start hostarch.Addr
// end is the end of the ELF.
- end usermem.Addr
+ end hostarch.Addr
// interpter is the path to the ELF interpreter.
interpreter string
// phdrAddr is the address of the ELF program headers.
- phdrAddr usermem.Addr
+ phdrAddr hostarch.Addr
// phdrSize is the size of a single program header in the ELF.
phdrSize int
@@ -407,14 +408,14 @@ type loadedELF struct {
// It does not load the ELF interpreter, or return any auxv entries.
//
// Preconditions: f is an ELF file.
-func loadParsedELF(ctx context.Context, m *mm.MemoryManager, f fsbridge.File, info elfInfo, sharedLoadOffset usermem.Addr) (loadedELF, error) {
+func loadParsedELF(ctx context.Context, m *mm.MemoryManager, f fsbridge.File, info elfInfo, sharedLoadOffset hostarch.Addr) (loadedELF, error) {
first := true
- var start, end usermem.Addr
+ var start, end hostarch.Addr
var interpreter string
for _, phdr := range info.phdrs {
switch phdr.Type {
case elf.PT_LOAD:
- vaddr := usermem.Addr(phdr.Vaddr)
+ vaddr := hostarch.Addr(phdr.Vaddr)
if first {
first = false
start = vaddr
@@ -492,7 +493,7 @@ func loadParsedELF(ctx context.Context, m *mm.MemoryManager, f fsbridge.File, in
// Note that the vaddr of the first PT_LOAD segment is ignored when
// choosing the load address (even if it is non-zero). The vaddr does
// become an offset from that load address.
- var offset usermem.Addr
+ var offset hostarch.Addr
if info.sharedObject {
totalSize := end - start
totalSize, ok := totalSize.RoundUp()
@@ -688,8 +689,8 @@ func loadELF(ctx context.Context, args LoadArgs) (loadedELF, arch.Context, error
// ELF-specific auxv entries.
bin.auxv = arch.Auxv{
arch.AuxEntry{linux.AT_PHDR, bin.phdrAddr},
- arch.AuxEntry{linux.AT_PHENT, usermem.Addr(bin.phdrSize)},
- arch.AuxEntry{linux.AT_PHNUM, usermem.Addr(bin.phdrNum)},
+ arch.AuxEntry{linux.AT_PHENT, hostarch.Addr(bin.phdrSize)},
+ arch.AuxEntry{linux.AT_PHNUM, hostarch.Addr(bin.phdrNum)},
arch.AuxEntry{linux.AT_ENTRY, bin.entry},
}
if bin.interpreter != "" {
diff --git a/pkg/sentry/loader/loader.go b/pkg/sentry/loader/loader.go
index c69b62db9..47e3775a3 100644
--- a/pkg/sentry/loader/loader.go
+++ b/pkg/sentry/loader/loader.go
@@ -25,6 +25,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
"gvisor.dev/gvisor/pkg/cpuid"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/rand"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fsbridge"
@@ -266,17 +267,17 @@ func Load(ctx context.Context, args LoadArgs, extraAuxv []arch.AuxEntry, vdso *V
// Add generic auxv entries.
auxv := append(loaded.auxv, arch.Auxv{
- arch.AuxEntry{linux.AT_UID, usermem.Addr(c.RealKUID.In(c.UserNamespace).OrOverflow())},
- arch.AuxEntry{linux.AT_EUID, usermem.Addr(c.EffectiveKUID.In(c.UserNamespace).OrOverflow())},
- arch.AuxEntry{linux.AT_GID, usermem.Addr(c.RealKGID.In(c.UserNamespace).OrOverflow())},
- arch.AuxEntry{linux.AT_EGID, usermem.Addr(c.EffectiveKGID.In(c.UserNamespace).OrOverflow())},
+ arch.AuxEntry{linux.AT_UID, hostarch.Addr(c.RealKUID.In(c.UserNamespace).OrOverflow())},
+ arch.AuxEntry{linux.AT_EUID, hostarch.Addr(c.EffectiveKUID.In(c.UserNamespace).OrOverflow())},
+ arch.AuxEntry{linux.AT_GID, hostarch.Addr(c.RealKGID.In(c.UserNamespace).OrOverflow())},
+ arch.AuxEntry{linux.AT_EGID, hostarch.Addr(c.EffectiveKGID.In(c.UserNamespace).OrOverflow())},
// The conditions that require AT_SECURE = 1 never arise. See
// kernel.Task.updateCredsForExecLocked.
arch.AuxEntry{linux.AT_SECURE, 0},
arch.AuxEntry{linux.AT_CLKTCK, linux.CLOCKS_PER_SEC},
arch.AuxEntry{linux.AT_EXECFN, execfn},
arch.AuxEntry{linux.AT_RANDOM, random},
- arch.AuxEntry{linux.AT_PAGESZ, usermem.PageSize},
+ arch.AuxEntry{linux.AT_PAGESZ, hostarch.PageSize},
arch.AuxEntry{linux.AT_SYSINFO_EHDR, vdsoAddr},
}...)
auxv = append(auxv, extraAuxv...)
diff --git a/pkg/sentry/loader/vdso.go b/pkg/sentry/loader/vdso.go
index a32d37d62..fd54261fd 100644
--- a/pkg/sentry/loader/vdso.go
+++ b/pkg/sentry/loader/vdso.go
@@ -23,6 +23,7 @@ import (
"gvisor.dev/gvisor/pkg/abi"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/safemem"
"gvisor.dev/gvisor/pkg/sentry/arch"
@@ -90,7 +91,7 @@ func validateVDSO(ctx context.Context, f fullReader, size uint64) (elfInfo, erro
var first *elf.ProgHeader
var prev *elf.ProgHeader
- var prevEnd usermem.Addr
+ var prevEnd hostarch.Addr
for i, phdr := range info.phdrs {
if phdr.Type != elf.PT_LOAD {
continue
@@ -119,7 +120,7 @@ func validateVDSO(ctx context.Context, f fullReader, size uint64) (elfInfo, erro
return elfInfo{}, syserror.ENOEXEC
}
- start := usermem.Addr(memoryOffset)
+ start := hostarch.Addr(memoryOffset)
end, ok := start.AddLength(phdr.Memsz)
if !ok {
log.Warningf("PT_LOAD segment size overflows: %#x + %#x", start, end)
@@ -210,7 +211,7 @@ func PrepareVDSO(mfp pgalloc.MemoryFileProvider) (*VDSO, error) {
}
// Then copy it into a VDSO mapping.
- size, ok := usermem.Addr(len(vdsodata.Binary)).RoundUp()
+ size, ok := hostarch.Addr(len(vdsodata.Binary)).RoundUp()
if !ok {
return nil, fmt.Errorf("VDSO size overflows? %#x", len(vdsodata.Binary))
}
@@ -221,7 +222,7 @@ func PrepareVDSO(mfp pgalloc.MemoryFileProvider) (*VDSO, error) {
return nil, fmt.Errorf("unable to allocate VDSO memory: %v", err)
}
- ims, err := mf.MapInternal(vdso, usermem.ReadWrite)
+ ims, err := mf.MapInternal(vdso, hostarch.ReadWrite)
if err != nil {
mf.DecRef(vdso)
return nil, fmt.Errorf("unable to map VDSO memory: %v", err)
@@ -234,7 +235,7 @@ func PrepareVDSO(mfp pgalloc.MemoryFileProvider) (*VDSO, error) {
}
// Finally, allocate a param page for this VDSO.
- paramPage, err := mf.Allocate(usermem.PageSize, usage.System)
+ paramPage, err := mf.Allocate(hostarch.PageSize, usage.System)
if err != nil {
mf.DecRef(vdso)
return nil, fmt.Errorf("unable to allocate VDSO param page: %v", err)
@@ -266,7 +267,7 @@ func PrepareVDSO(mfp pgalloc.MemoryFileProvider) (*VDSO, error) {
// compatibility with such binaries, we load the VDSO much like Linux.
//
// loadVDSO takes a reference on the VDSO and parameter page FrameRegions.
-func loadVDSO(ctx context.Context, m *mm.MemoryManager, v *VDSO, bin loadedELF) (usermem.Addr, error) {
+func loadVDSO(ctx context.Context, m *mm.MemoryManager, v *VDSO, bin loadedELF) (hostarch.Addr, error) {
if v.os != bin.os {
ctx.Warningf("Binary ELF OS %v and VDSO ELF OS %v differ", bin.os, v.os)
return 0, syserror.ENOEXEC
@@ -297,8 +298,8 @@ func loadVDSO(ctx context.Context, m *mm.MemoryManager, v *VDSO, bin loadedELF)
Fixed: true,
Unmap: true,
Private: true,
- Perms: usermem.Read,
- MaxPerms: usermem.Read,
+ Perms: hostarch.Read,
+ MaxPerms: hostarch.Read,
})
if err != nil {
ctx.Infof("Unable to map VDSO param page: %v", err)
@@ -318,8 +319,8 @@ func loadVDSO(ctx context.Context, m *mm.MemoryManager, v *VDSO, bin loadedELF)
Fixed: true,
Unmap: true,
Private: true,
- Perms: usermem.Read,
- MaxPerms: usermem.AnyAccess,
+ Perms: hostarch.Read,
+ MaxPerms: hostarch.AnyAccess,
})
if err != nil {
ctx.Infof("Unable to map VDSO: %v", err)
@@ -349,7 +350,7 @@ func loadVDSO(ctx context.Context, m *mm.MemoryManager, v *VDSO, bin loadedELF)
return 0, syserror.ENOEXEC
}
segPage := segAddr.RoundDown()
- segSize := usermem.Addr(phdr.Memsz)
+ segSize := hostarch.Addr(phdr.Memsz)
segSize, ok = segSize.AddLength(segAddr.PageOffset())
if !ok {
ctx.Warningf("PT_LOAD segment memsize %#x + offset %#x overflows", phdr.Memsz, segAddr.PageOffset())
@@ -371,7 +372,7 @@ func loadVDSO(ctx context.Context, m *mm.MemoryManager, v *VDSO, bin loadedELF)
}
perms := progFlagsAsPerms(phdr.Flags)
- if perms != usermem.Read {
+ if perms != hostarch.Read {
if err := m.MProtect(segPage, uint64(segSize), perms, false); err != nil {
ctx.Warningf("Unable to set PT_LOAD segment protections %+v at [%#x, %#x): %v", perms, segAddr, segEnd, err)
return 0, syserror.ENOEXEC