summaryrefslogtreecommitdiffhomepage
path: root/pkg/sentry/fs
diff options
context:
space:
mode:
authorZach Koopmans <zkoopmans@google.com>2021-03-29 13:28:32 -0700
committergVisor bot <gvisor-bot@google.com>2021-03-29 13:30:21 -0700
commit8a2f7e716dcc62f04d2808e8ade34941c94fc956 (patch)
treeb2195d5728dcbc4f4e59c23ad95d7486ef744371 /pkg/sentry/fs
parentb125afba416ebeba906ea595a44a55afe4729d64 (diff)
[syserror] Split usermem package
Split usermem package to help remove syserror dependency in go_marshal. New hostarch package contains code not dependent on syserror. PiperOrigin-RevId: 365651233
Diffstat (limited to 'pkg/sentry/fs')
-rw-r--r--pkg/sentry/fs/BUILD1
-rw-r--r--pkg/sentry/fs/anon/BUILD2
-rw-r--r--pkg/sentry/fs/anon/anon.go4
-rw-r--r--pkg/sentry/fs/copy_up.go3
-rw-r--r--pkg/sentry/fs/dev/BUILD1
-rw-r--r--pkg/sentry/fs/dev/dev.go11
-rw-r--r--pkg/sentry/fs/dev/net_tun.go5
-rw-r--r--pkg/sentry/fs/fdpipe/BUILD1
-rw-r--r--pkg/sentry/fs/fdpipe/pipe_test.go4
-rw-r--r--pkg/sentry/fs/fsutil/BUILD2
-rw-r--r--pkg/sentry/fs/fsutil/dirty_set.go4
-rw-r--r--pkg/sentry/fs/fsutil/dirty_set_test.go12
-rw-r--r--pkg/sentry/fs/fsutil/file_range_set.go8
-rw-r--r--pkg/sentry/fs/fsutil/host_file_mapper.go6
-rw-r--r--pkg/sentry/fs/fsutil/host_mappable.go13
-rw-r--r--pkg/sentry/fs/fsutil/inode_cached.go21
-rw-r--r--pkg/sentry/fs/fsutil/inode_cached_test.go51
-rw-r--r--pkg/sentry/fs/gofer/BUILD1
-rw-r--r--pkg/sentry/fs/gofer/attr.go4
-rw-r--r--pkg/sentry/fs/inotify.go3
-rw-r--r--pkg/sentry/fs/inotify_event.go9
-rw-r--r--pkg/sentry/fs/offset.go4
-rw-r--r--pkg/sentry/fs/overlay.go10
-rw-r--r--pkg/sentry/fs/proc/BUILD1
-rw-r--r--pkg/sentry/fs/proc/exec_args.go7
-rw-r--r--pkg/sentry/fs/proc/inode.go4
-rw-r--r--pkg/sentry/fs/proc/meminfo.go4
-rw-r--r--pkg/sentry/fs/proc/net.go20
-rw-r--r--pkg/sentry/fs/proc/seqfile/BUILD1
-rw-r--r--pkg/sentry/fs/proc/seqfile/seqfile.go3
-rw-r--r--pkg/sentry/fs/proc/sys_net.go21
-rw-r--r--pkg/sentry/fs/proc/task.go13
-rw-r--r--pkg/sentry/fs/proc/uid_gid_map.go3
-rw-r--r--pkg/sentry/fs/ramfs/BUILD2
-rw-r--r--pkg/sentry/fs/ramfs/tree.go4
-rw-r--r--pkg/sentry/fs/sys/BUILD2
-rw-r--r--pkg/sentry/fs/sys/sys.go6
-rw-r--r--pkg/sentry/fs/timerfd/BUILD1
-rw-r--r--pkg/sentry/fs/timerfd/timerfd.go3
-rw-r--r--pkg/sentry/fs/tmpfs/BUILD2
-rw-r--r--pkg/sentry/fs/tmpfs/file_test.go3
-rw-r--r--pkg/sentry/fs/tmpfs/inode_file.go27
-rw-r--r--pkg/sentry/fs/tmpfs/tmpfs.go16
-rw-r--r--pkg/sentry/fs/tty/BUILD1
-rw-r--r--pkg/sentry/fs/tty/dir.go3
45 files changed, 179 insertions, 148 deletions
diff --git a/pkg/sentry/fs/BUILD b/pkg/sentry/fs/BUILD
index 420fbae34..0dc100f9b 100644
--- a/pkg/sentry/fs/BUILD
+++ b/pkg/sentry/fs/BUILD
@@ -48,6 +48,7 @@ go_library(
"//pkg/abi/linux",
"//pkg/amutex",
"//pkg/context",
+ "//pkg/hostarch",
"//pkg/log",
"//pkg/p9",
"//pkg/refs",
diff --git a/pkg/sentry/fs/anon/BUILD b/pkg/sentry/fs/anon/BUILD
index aedcecfa1..1ce56d79f 100644
--- a/pkg/sentry/fs/anon/BUILD
+++ b/pkg/sentry/fs/anon/BUILD
@@ -12,9 +12,9 @@ go_library(
deps = [
"//pkg/abi/linux",
"//pkg/context",
+ "//pkg/hostarch",
"//pkg/sentry/device",
"//pkg/sentry/fs",
"//pkg/sentry/fs/fsutil",
- "//pkg/usermem",
],
)
diff --git a/pkg/sentry/fs/anon/anon.go b/pkg/sentry/fs/anon/anon.go
index 5c421f5fb..8bda22a8e 100644
--- a/pkg/sentry/fs/anon/anon.go
+++ b/pkg/sentry/fs/anon/anon.go
@@ -19,9 +19,9 @@ package anon
import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/fs/fsutil"
- "gvisor.dev/gvisor/pkg/usermem"
)
// NewInode constructs an anonymous Inode that is not associated
@@ -37,6 +37,6 @@ func NewInode(ctx context.Context) *fs.Inode {
Type: fs.Anonymous,
DeviceID: PseudoDevice.DeviceID(),
InodeID: PseudoDevice.NextIno(),
- BlockSize: usermem.PageSize,
+ BlockSize: hostarch.PageSize,
})
}
diff --git a/pkg/sentry/fs/copy_up.go b/pkg/sentry/fs/copy_up.go
index 58deb25fc..5aa668873 100644
--- a/pkg/sentry/fs/copy_up.go
+++ b/pkg/sentry/fs/copy_up.go
@@ -20,6 +20,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sync"
@@ -339,7 +340,7 @@ func cleanupUpper(ctx context.Context, parent *Inode, name string, copyUpErr err
// size is the same used by io.Copy.
var copyUpBuffers = sync.Pool{
New: func() interface{} {
- b := make([]byte, 8*usermem.PageSize)
+ b := make([]byte, 8*hostarch.PageSize)
return &b
},
}
diff --git a/pkg/sentry/fs/dev/BUILD b/pkg/sentry/fs/dev/BUILD
index 9379a4d7b..23a3a9a2d 100644
--- a/pkg/sentry/fs/dev/BUILD
+++ b/pkg/sentry/fs/dev/BUILD
@@ -18,6 +18,7 @@ go_library(
deps = [
"//pkg/abi/linux",
"//pkg/context",
+ "//pkg/hostarch",
"//pkg/rand",
"//pkg/safemem",
"//pkg/sentry/arch",
diff --git a/pkg/sentry/fs/dev/dev.go b/pkg/sentry/fs/dev/dev.go
index acbd401a0..e84ba7a5d 100644
--- a/pkg/sentry/fs/dev/dev.go
+++ b/pkg/sentry/fs/dev/dev.go
@@ -19,6 +19,7 @@ import (
"math"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/fs/ramfs"
"gvisor.dev/gvisor/pkg/sentry/fs/tmpfs"
@@ -49,7 +50,7 @@ func newCharacterDevice(ctx context.Context, iops fs.InodeOperations, msrc *fs.M
return fs.NewInode(ctx, iops, msrc, fs.StableAttr{
DeviceID: devDevice.DeviceID(),
InodeID: devDevice.NextIno(),
- BlockSize: usermem.PageSize,
+ BlockSize: hostarch.PageSize,
Type: fs.CharacterDevice,
DeviceFileMajor: major,
DeviceFileMinor: minor,
@@ -60,7 +61,7 @@ func newMemDevice(ctx context.Context, iops fs.InodeOperations, msrc *fs.MountSo
return fs.NewInode(ctx, iops, msrc, fs.StableAttr{
DeviceID: devDevice.DeviceID(),
InodeID: devDevice.NextIno(),
- BlockSize: usermem.PageSize,
+ BlockSize: hostarch.PageSize,
Type: fs.CharacterDevice,
DeviceFileMajor: memDevMajor,
DeviceFileMinor: minor,
@@ -72,7 +73,7 @@ func newDirectory(ctx context.Context, contents map[string]*fs.Inode, msrc *fs.M
return fs.NewInode(ctx, iops, msrc, fs.StableAttr{
DeviceID: devDevice.DeviceID(),
InodeID: devDevice.NextIno(),
- BlockSize: usermem.PageSize,
+ BlockSize: hostarch.PageSize,
Type: fs.Directory,
})
}
@@ -82,7 +83,7 @@ func newSymlink(ctx context.Context, target string, msrc *fs.MountSource) *fs.In
return fs.NewInode(ctx, iops, msrc, fs.StableAttr{
DeviceID: devDevice.DeviceID(),
InodeID: devDevice.NextIno(),
- BlockSize: usermem.PageSize,
+ BlockSize: hostarch.PageSize,
Type: fs.Symlink,
})
}
@@ -137,7 +138,7 @@ func New(ctx context.Context, msrc *fs.MountSource) *fs.Inode {
return fs.NewInode(ctx, iops, msrc, fs.StableAttr{
DeviceID: devDevice.DeviceID(),
InodeID: devDevice.NextIno(),
- BlockSize: usermem.PageSize,
+ BlockSize: hostarch.PageSize,
Type: fs.Directory,
})
}
diff --git a/pkg/sentry/fs/dev/net_tun.go b/pkg/sentry/fs/dev/net_tun.go
index 11a2984d8..77e8d222a 100644
--- a/pkg/sentry/fs/dev/net_tun.go
+++ b/pkg/sentry/fs/dev/net_tun.go
@@ -17,6 +17,7 @@ package dev
import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/fs/fsutil"
@@ -110,7 +111,7 @@ func (n *netTunFileOperations) Ioctl(ctx context.Context, file *fs.File, io user
}
// Validate flags.
- flags, err := netstack.LinuxToTUNFlags(usermem.ByteOrder.Uint16(req.Data[:]))
+ flags, err := netstack.LinuxToTUNFlags(hostarch.ByteOrder.Uint16(req.Data[:]))
if err != nil {
return 0, err
}
@@ -119,7 +120,7 @@ func (n *netTunFileOperations) Ioctl(ctx context.Context, file *fs.File, io user
case linux.TUNGETIFF:
var req linux.IFReq
copy(req.IFName[:], n.device.Name())
- usermem.ByteOrder.PutUint16(req.Data[:], netstack.TUNFlagsToLinux(n.device.Flags()))
+ hostarch.ByteOrder.PutUint16(req.Data[:], netstack.TUNFlagsToLinux(n.device.Flags()))
_, err := req.CopyOut(t, data)
return 0, err
diff --git a/pkg/sentry/fs/fdpipe/BUILD b/pkg/sentry/fs/fdpipe/BUILD
index c83baf464..2120f2bad 100644
--- a/pkg/sentry/fs/fdpipe/BUILD
+++ b/pkg/sentry/fs/fdpipe/BUILD
@@ -40,6 +40,7 @@ go_test(
"//pkg/context",
"//pkg/fd",
"//pkg/fdnotifier",
+ "//pkg/hostarch",
"//pkg/sentry/contexttest",
"//pkg/sentry/fs",
"//pkg/syserror",
diff --git a/pkg/sentry/fs/fdpipe/pipe_test.go b/pkg/sentry/fs/fdpipe/pipe_test.go
index faeb3908c..ab0e9dac7 100644
--- a/pkg/sentry/fs/fdpipe/pipe_test.go
+++ b/pkg/sentry/fs/fdpipe/pipe_test.go
@@ -27,6 +27,8 @@ import (
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
+
+ "gvisor.dev/gvisor/pkg/hostarch"
)
func singlePipeFD() (int, error) {
@@ -52,7 +54,7 @@ func mockPipeDirent(t *testing.T) *fs.Dirent {
}
inode := fs.NewInode(ctx, node, fs.NewMockMountSource(nil), fs.StableAttr{
Type: fs.Pipe,
- BlockSize: usermem.PageSize,
+ BlockSize: hostarch.PageSize,
})
return fs.NewDirent(ctx, inode, "")
}
diff --git a/pkg/sentry/fs/fsutil/BUILD b/pkg/sentry/fs/fsutil/BUILD
index d388f0e92..6469cc3a9 100644
--- a/pkg/sentry/fs/fsutil/BUILD
+++ b/pkg/sentry/fs/fsutil/BUILD
@@ -76,6 +76,7 @@ go_library(
deps = [
"//pkg/abi/linux",
"//pkg/context",
+ "//pkg/hostarch",
"//pkg/log",
"//pkg/safemem",
"//pkg/sentry/arch",
@@ -105,6 +106,7 @@ go_test(
library = ":fsutil",
deps = [
"//pkg/context",
+ "//pkg/hostarch",
"//pkg/safemem",
"//pkg/sentry/contexttest",
"//pkg/sentry/fs",
diff --git a/pkg/sentry/fs/fsutil/dirty_set.go b/pkg/sentry/fs/fsutil/dirty_set.go
index 2c9446c1d..38383e730 100644
--- a/pkg/sentry/fs/fsutil/dirty_set.go
+++ b/pkg/sentry/fs/fsutil/dirty_set.go
@@ -18,9 +18,9 @@ import (
"math"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/safemem"
"gvisor.dev/gvisor/pkg/sentry/memmap"
- "gvisor.dev/gvisor/pkg/usermem"
)
// DirtySet maps offsets into a memmap.Mappable to DirtyInfo. It is used to
@@ -215,7 +215,7 @@ func syncDirtyRange(ctx context.Context, mr memmap.MappableRange, cache *FileRan
if max < wbr.Start {
break
}
- ims, err := mem.MapInternal(cseg.FileRangeOf(wbr), usermem.Read)
+ ims, err := mem.MapInternal(cseg.FileRangeOf(wbr), hostarch.Read)
if err != nil {
return err
}
diff --git a/pkg/sentry/fs/fsutil/dirty_set_test.go b/pkg/sentry/fs/fsutil/dirty_set_test.go
index e3579c23c..48448c97c 100644
--- a/pkg/sentry/fs/fsutil/dirty_set_test.go
+++ b/pkg/sentry/fs/fsutil/dirty_set_test.go
@@ -18,18 +18,18 @@ import (
"reflect"
"testing"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/memmap"
- "gvisor.dev/gvisor/pkg/usermem"
)
func TestDirtySet(t *testing.T) {
var set DirtySet
- set.MarkDirty(memmap.MappableRange{0, 2 * usermem.PageSize})
- set.KeepDirty(memmap.MappableRange{usermem.PageSize, 2 * usermem.PageSize})
- set.MarkClean(memmap.MappableRange{0, 2 * usermem.PageSize})
+ set.MarkDirty(memmap.MappableRange{0, 2 * hostarch.PageSize})
+ set.KeepDirty(memmap.MappableRange{hostarch.PageSize, 2 * hostarch.PageSize})
+ set.MarkClean(memmap.MappableRange{0, 2 * hostarch.PageSize})
want := &DirtySegmentDataSlices{
- Start: []uint64{usermem.PageSize},
- End: []uint64{2 * usermem.PageSize},
+ Start: []uint64{hostarch.PageSize},
+ End: []uint64{2 * hostarch.PageSize},
Values: []DirtyInfo{{Keep: true}},
}
if got := set.ExportSortedSlices(); !reflect.DeepEqual(got, want) {
diff --git a/pkg/sentry/fs/fsutil/file_range_set.go b/pkg/sentry/fs/fsutil/file_range_set.go
index 1dc409d38..fdaceb1db 100644
--- a/pkg/sentry/fs/fsutil/file_range_set.go
+++ b/pkg/sentry/fs/fsutil/file_range_set.go
@@ -20,11 +20,11 @@ import (
"math"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/safemem"
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sentry/pgalloc"
"gvisor.dev/gvisor/pkg/sentry/usage"
- "gvisor.dev/gvisor/pkg/usermem"
)
// FileRangeSet maps offsets into a memmap.Mappable to offsets into a
@@ -130,7 +130,7 @@ func (frs *FileRangeSet) Fill(ctx context.Context, required, optional memmap.Map
// MemoryFile.AllocateAndFill truncates down to a page
// boundary, but FileRangeSet.Fill is supposed to
// zero-fill to the end of the page in this case.
- donepgaddr, ok := usermem.Addr(done).RoundUp()
+ donepgaddr, ok := hostarch.Addr(done).RoundUp()
if donepg := uint64(donepgaddr); ok && donepg != done {
dsts.DropFirst64(donepg - done)
done = donepg
@@ -184,7 +184,7 @@ func (frs *FileRangeSet) DropAll(mf *pgalloc.MemoryFile) {
// bytes after the new EOF on the same page are zeroed, and pages after the new
// EOF are freed.
func (frs *FileRangeSet) Truncate(end uint64, mf *pgalloc.MemoryFile) {
- pgendaddr, ok := usermem.Addr(end).RoundUp()
+ pgendaddr, ok := hostarch.Addr(end).RoundUp()
if ok {
pgend := uint64(pgendaddr)
@@ -208,7 +208,7 @@ func (frs *FileRangeSet) Truncate(end uint64, mf *pgalloc.MemoryFile) {
if seg.Ok() {
fr := seg.FileRange()
fr.Start += end - seg.Start()
- ims, err := mf.MapInternal(fr, usermem.Write)
+ ims, err := mf.MapInternal(fr, hostarch.Write)
if err != nil {
// There's no good recourse from here. This means
// that we can't keep cached memory consistent with
diff --git a/pkg/sentry/fs/fsutil/host_file_mapper.go b/pkg/sentry/fs/fsutil/host_file_mapper.go
index 54f7b7cdc..23528bf25 100644
--- a/pkg/sentry/fs/fsutil/host_file_mapper.go
+++ b/pkg/sentry/fs/fsutil/host_file_mapper.go
@@ -18,11 +18,11 @@ import (
"fmt"
"golang.org/x/sys/unix"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/safemem"
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/usermem"
)
// HostFileMapper caches mappings of an arbitrary host file descriptor. It is
@@ -50,13 +50,13 @@ type HostFileMapper struct {
}
const (
- chunkShift = usermem.HugePageShift
+ chunkShift = hostarch.HugePageShift
chunkSize = 1 << chunkShift
chunkMask = chunkSize - 1
)
func pagesInChunk(mr memmap.MappableRange, chunkStart uint64) int32 {
- return int32(mr.Intersect(memmap.MappableRange{chunkStart, chunkStart + chunkSize}).Length() / usermem.PageSize)
+ return int32(mr.Intersect(memmap.MappableRange{chunkStart, chunkStart + chunkSize}).Length() / hostarch.PageSize)
}
type mapping struct {
diff --git a/pkg/sentry/fs/fsutil/host_mappable.go b/pkg/sentry/fs/fsutil/host_mappable.go
index c15d8a946..e1e38b498 100644
--- a/pkg/sentry/fs/fsutil/host_mappable.go
+++ b/pkg/sentry/fs/fsutil/host_mappable.go
@@ -18,6 +18,7 @@ import (
"math"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/safemem"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/memmap"
@@ -59,7 +60,7 @@ func NewHostMappable(backingFile CachedFileObject) *HostMappable {
}
// AddMapping implements memmap.Mappable.AddMapping.
-func (h *HostMappable) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) error {
+func (h *HostMappable) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar hostarch.AddrRange, offset uint64, writable bool) error {
// Hot path. Avoid defers.
h.mu.Lock()
mapped := h.mappings.AddMapping(ms, ar, offset, writable)
@@ -71,7 +72,7 @@ func (h *HostMappable) AddMapping(ctx context.Context, ms memmap.MappingSpace, a
}
// RemoveMapping implements memmap.Mappable.RemoveMapping.
-func (h *HostMappable) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) {
+func (h *HostMappable) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar hostarch.AddrRange, offset uint64, writable bool) {
// Hot path. Avoid defers.
h.mu.Lock()
unmapped := h.mappings.RemoveMapping(ms, ar, offset, writable)
@@ -82,18 +83,18 @@ func (h *HostMappable) RemoveMapping(ctx context.Context, ms memmap.MappingSpace
}
// CopyMapping implements memmap.Mappable.CopyMapping.
-func (h *HostMappable) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR usermem.AddrRange, offset uint64, writable bool) error {
+func (h *HostMappable) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR hostarch.AddrRange, offset uint64, writable bool) error {
return h.AddMapping(ctx, ms, dstAR, offset, writable)
}
// Translate implements memmap.Mappable.Translate.
-func (h *HostMappable) Translate(ctx context.Context, required, optional memmap.MappableRange, at usermem.AccessType) ([]memmap.Translation, error) {
+func (h *HostMappable) Translate(ctx context.Context, required, optional memmap.MappableRange, at hostarch.AccessType) ([]memmap.Translation, error) {
return []memmap.Translation{
{
Source: optional,
File: h,
Offset: optional.Start,
- Perms: usermem.AnyAccess,
+ Perms: hostarch.AnyAccess,
},
}, nil
}
@@ -124,7 +125,7 @@ func (h *HostMappable) NotifyChangeFD() error {
}
// MapInternal implements memmap.File.MapInternal.
-func (h *HostMappable) MapInternal(fr memmap.FileRange, at usermem.AccessType) (safemem.BlockSeq, error) {
+func (h *HostMappable) MapInternal(fr memmap.FileRange, at hostarch.AccessType) (safemem.BlockSeq, error) {
return h.hostFileMapper.MapInternal(fr, h.backingFile.FD(), at.Write)
}
diff --git a/pkg/sentry/fs/fsutil/inode_cached.go b/pkg/sentry/fs/fsutil/inode_cached.go
index 0ed7aafa5..7856b354b 100644
--- a/pkg/sentry/fs/fsutil/inode_cached.go
+++ b/pkg/sentry/fs/fsutil/inode_cached.go
@@ -19,6 +19,7 @@ import (
"io"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/safemem"
"gvisor.dev/gvisor/pkg/sentry/fs"
@@ -622,7 +623,7 @@ func (rw *inodeReadWriter) ReadToBlocks(dsts safemem.BlockSeq) (uint64, error) {
switch {
case seg.Ok():
// Get internal mappings from the cache.
- ims, err := mem.MapInternal(seg.FileRangeOf(seg.Range().Intersect(mr)), usermem.Read)
+ ims, err := mem.MapInternal(seg.FileRangeOf(seg.Range().Intersect(mr)), hostarch.Read)
if err != nil {
unlock()
return done, err
@@ -647,7 +648,7 @@ func (rw *inodeReadWriter) ReadToBlocks(dsts safemem.BlockSeq) (uint64, error) {
// Read into the cache, then re-enter the loop to read from the
// cache.
reqMR := memmap.MappableRange{
- Start: uint64(usermem.Addr(gapMR.Start).RoundDown()),
+ Start: uint64(hostarch.Addr(gapMR.Start).RoundDown()),
End: fs.OffsetPageEnd(int64(gapMR.End)),
}
optMR := gap.Range()
@@ -729,7 +730,7 @@ func (rw *inodeReadWriter) WriteFromBlocks(srcs safemem.BlockSeq) (uint64, error
case seg.Ok() && seg.Start() < mr.End:
// Get internal mappings from the cache.
segMR := seg.Range().Intersect(mr)
- ims, err := mf.MapInternal(seg.FileRangeOf(segMR), usermem.Write)
+ ims, err := mf.MapInternal(seg.FileRangeOf(segMR), hostarch.Write)
if err != nil {
rw.maybeGrowFile()
rw.c.dataMu.Unlock()
@@ -786,7 +787,7 @@ func (c *CachingInodeOperations) useHostPageCache() bool {
}
// AddMapping implements memmap.Mappable.AddMapping.
-func (c *CachingInodeOperations) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) error {
+func (c *CachingInodeOperations) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar hostarch.AddrRange, offset uint64, writable bool) error {
// Hot path. Avoid defers.
c.mapsMu.Lock()
mapped := c.mappings.AddMapping(ms, ar, offset, writable)
@@ -808,7 +809,7 @@ func (c *CachingInodeOperations) AddMapping(ctx context.Context, ms memmap.Mappi
}
// RemoveMapping implements memmap.Mappable.RemoveMapping.
-func (c *CachingInodeOperations) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) {
+func (c *CachingInodeOperations) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar hostarch.AddrRange, offset uint64, writable bool) {
// Hot path. Avoid defers.
c.mapsMu.Lock()
unmapped := c.mappings.RemoveMapping(ms, ar, offset, writable)
@@ -836,12 +837,12 @@ func (c *CachingInodeOperations) RemoveMapping(ctx context.Context, ms memmap.Ma
}
// CopyMapping implements memmap.Mappable.CopyMapping.
-func (c *CachingInodeOperations) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR usermem.AddrRange, offset uint64, writable bool) error {
+func (c *CachingInodeOperations) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR hostarch.AddrRange, offset uint64, writable bool) error {
return c.AddMapping(ctx, ms, dstAR, offset, writable)
}
// Translate implements memmap.Mappable.Translate.
-func (c *CachingInodeOperations) Translate(ctx context.Context, required, optional memmap.MappableRange, at usermem.AccessType) ([]memmap.Translation, error) {
+func (c *CachingInodeOperations) Translate(ctx context.Context, required, optional memmap.MappableRange, at hostarch.AccessType) ([]memmap.Translation, error) {
// Hot path. Avoid defer.
if c.useHostPageCache() {
mr := optional
@@ -853,7 +854,7 @@ func (c *CachingInodeOperations) Translate(ctx context.Context, required, option
Source: mr,
File: c,
Offset: mr.Start,
- Perms: usermem.AnyAccess,
+ Perms: hostarch.AnyAccess,
},
}, nil
}
@@ -885,7 +886,7 @@ func (c *CachingInodeOperations) Translate(ctx context.Context, required, option
segMR := seg.Range().Intersect(optional)
// TODO(jamieliu): Make Translations writable even if writability is
// not required if already kept-dirty by another writable translation.
- perms := usermem.AccessType{
+ perms := hostarch.AccessType{
Read: true,
Execute: true,
}
@@ -1050,7 +1051,7 @@ func (c *CachingInodeOperations) DecRef(fr memmap.FileRange) {
// MapInternal implements memmap.File.MapInternal. This is used when we
// directly map an underlying host fd and CachingInodeOperations is used as the
// memmap.File during translation.
-func (c *CachingInodeOperations) MapInternal(fr memmap.FileRange, at usermem.AccessType) (safemem.BlockSeq, error) {
+func (c *CachingInodeOperations) MapInternal(fr memmap.FileRange, at hostarch.AccessType) (safemem.BlockSeq, error) {
return c.hostFileMapper.MapInternal(fr, c.backingFile.FD(), at.Write)
}
diff --git a/pkg/sentry/fs/fsutil/inode_cached_test.go b/pkg/sentry/fs/fsutil/inode_cached_test.go
index 1547584c5..e107c3096 100644
--- a/pkg/sentry/fs/fsutil/inode_cached_test.go
+++ b/pkg/sentry/fs/fsutil/inode_cached_test.go
@@ -20,6 +20,7 @@ import (
"testing"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/safemem"
"gvisor.dev/gvisor/pkg/sentry/contexttest"
"gvisor.dev/gvisor/pkg/sentry/fs"
@@ -249,7 +250,7 @@ func (f *sliceBackingFile) Allocate(ctx context.Context, offset int64, length in
type noopMappingSpace struct{}
// Invalidate implements memmap.MappingSpace.Invalidate.
-func (noopMappingSpace) Invalidate(ar usermem.AddrRange, opts memmap.InvalidateOpts) {
+func (noopMappingSpace) Invalidate(ar hostarch.AddrRange, opts memmap.InvalidateOpts) {
}
func anonInode(ctx context.Context) *fs.Inode {
@@ -259,14 +260,14 @@ func anonInode(ctx context.Context) *fs.Inode {
}, 0),
}, fs.NewPseudoMountSource(ctx), fs.StableAttr{
Type: fs.Anonymous,
- BlockSize: usermem.PageSize,
+ BlockSize: hostarch.PageSize,
})
}
func pagesOf(bs ...byte) []byte {
- buf := make([]byte, 0, len(bs)*usermem.PageSize)
+ buf := make([]byte, 0, len(bs)*hostarch.PageSize)
for _, b := range bs {
- buf = append(buf, bytes.Repeat([]byte{b}, usermem.PageSize)...)
+ buf = append(buf, bytes.Repeat([]byte{b}, hostarch.PageSize)...)
}
return buf
}
@@ -292,28 +293,28 @@ func TestRead(t *testing.T) {
// expects to only cache mapped pages), then call Translate to force it to
// be cached.
var ms noopMappingSpace
- ar := usermem.AddrRange{usermem.PageSize, 2 * usermem.PageSize}
- if err := iops.AddMapping(ctx, ms, ar, usermem.PageSize, true); err != nil {
+ ar := hostarch.AddrRange{hostarch.PageSize, 2 * hostarch.PageSize}
+ if err := iops.AddMapping(ctx, ms, ar, hostarch.PageSize, true); err != nil {
t.Fatalf("AddMapping got %v, want nil", err)
}
- mr := memmap.MappableRange{usermem.PageSize, 2 * usermem.PageSize}
- if _, err := iops.Translate(ctx, mr, mr, usermem.Read); err != nil {
+ mr := memmap.MappableRange{hostarch.PageSize, 2 * hostarch.PageSize}
+ if _, err := iops.Translate(ctx, mr, mr, hostarch.Read); err != nil {
t.Fatalf("Translate got %v, want nil", err)
}
- if cached := iops.cache.Span(); cached != usermem.PageSize {
- t.Errorf("SpanRange got %d, want %d", cached, usermem.PageSize)
+ if cached := iops.cache.Span(); cached != hostarch.PageSize {
+ t.Errorf("SpanRange got %d, want %d", cached, hostarch.PageSize)
}
// Try to read 4 pages. The first and third pages should be read directly
// from the "file", the second page should be read from the cache, and only
// 3 pages (the size of the file) should be readable.
- rbuf := make([]byte, 4*usermem.PageSize)
+ rbuf := make([]byte, 4*hostarch.PageSize)
dst := usermem.BytesIOSequence(rbuf)
n, err := iops.Read(ctx, file, dst, 0)
- if n != 3*usermem.PageSize || (err != nil && err != io.EOF) {
- t.Fatalf("Read got (%d, %v), want (%d, nil or EOF)", n, err, 3*usermem.PageSize)
+ if n != 3*hostarch.PageSize || (err != nil && err != io.EOF) {
+ t.Fatalf("Read got (%d, %v), want (%d, nil or EOF)", n, err, 3*hostarch.PageSize)
}
- rbuf = rbuf[:3*usermem.PageSize]
+ rbuf = rbuf[:3*hostarch.PageSize]
// Did we get the bytes we expect?
if !bytes.Equal(rbuf, buf) {
@@ -323,7 +324,7 @@ func TestRead(t *testing.T) {
// Delete the memory mapping before iops.Release(). The cached page will
// either be evicted by ctx's pgalloc.MemoryFile, or dropped by
// iops.Release().
- iops.RemoveMapping(ctx, ms, ar, usermem.PageSize, true)
+ iops.RemoveMapping(ctx, ms, ar, hostarch.PageSize, true)
}
func TestWrite(t *testing.T) {
@@ -348,25 +349,25 @@ func TestWrite(t *testing.T) {
// CachingInodeOperations expects to only cache mapped pages), then call
// Translate to force them to be cached.
var ms noopMappingSpace
- ar := usermem.AddrRange{usermem.PageSize, 3 * usermem.PageSize}
- if err := iops.AddMapping(ctx, ms, ar, usermem.PageSize, true); err != nil {
+ ar := hostarch.AddrRange{hostarch.PageSize, 3 * hostarch.PageSize}
+ if err := iops.AddMapping(ctx, ms, ar, hostarch.PageSize, true); err != nil {
t.Fatalf("AddMapping got %v, want nil", err)
}
- defer iops.RemoveMapping(ctx, ms, ar, usermem.PageSize, true)
- mr := memmap.MappableRange{usermem.PageSize, 3 * usermem.PageSize}
- if _, err := iops.Translate(ctx, mr, mr, usermem.Read); err != nil {
+ defer iops.RemoveMapping(ctx, ms, ar, hostarch.PageSize, true)
+ mr := memmap.MappableRange{hostarch.PageSize, 3 * hostarch.PageSize}
+ if _, err := iops.Translate(ctx, mr, mr, hostarch.Read); err != nil {
t.Fatalf("Translate got %v, want nil", err)
}
- if cached := iops.cache.Span(); cached != 2*usermem.PageSize {
- t.Errorf("SpanRange got %d, want %d", cached, 2*usermem.PageSize)
+ if cached := iops.cache.Span(); cached != 2*hostarch.PageSize {
+ t.Errorf("SpanRange got %d, want %d", cached, 2*hostarch.PageSize)
}
// Write to the first 2 pages.
wbuf := pagesOf('e', 'f')
src := usermem.BytesIOSequence(wbuf)
n, err := iops.Write(ctx, src, 0)
- if n != 2*usermem.PageSize || err != nil {
- t.Fatalf("Write got (%d, %v), want (%d, nil)", n, err, 2*usermem.PageSize)
+ if n != 2*hostarch.PageSize || err != nil {
+ t.Fatalf("Write got (%d, %v), want (%d, nil)", n, err, 2*hostarch.PageSize)
}
// The first page should have been written directly, since it was not cached.
@@ -382,7 +383,7 @@ func TestWrite(t *testing.T) {
}
// Now the second page should have been written as well.
- copy(want[usermem.PageSize:], pagesOf('f'))
+ copy(want[hostarch.PageSize:], pagesOf('f'))
if !bytes.Equal(buf, want) {
t.Errorf("File contents are %v, want %v", buf, want)
}
diff --git a/pkg/sentry/fs/gofer/BUILD b/pkg/sentry/fs/gofer/BUILD
index b210e0e7e..c4a069832 100644
--- a/pkg/sentry/fs/gofer/BUILD
+++ b/pkg/sentry/fs/gofer/BUILD
@@ -27,6 +27,7 @@ go_library(
"//pkg/abi/linux",
"//pkg/context",
"//pkg/fd",
+ "//pkg/hostarch",
"//pkg/log",
"//pkg/p9",
"//pkg/refs",
diff --git a/pkg/sentry/fs/gofer/attr.go b/pkg/sentry/fs/gofer/attr.go
index cffc756cc..d6bff3f40 100644
--- a/pkg/sentry/fs/gofer/attr.go
+++ b/pkg/sentry/fs/gofer/attr.go
@@ -17,11 +17,11 @@ package gofer
import (
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/p9"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time"
- "gvisor.dev/gvisor/pkg/usermem"
)
// getattr returns the 9p attributes of the p9.File. On success, Mode, Size, and RDev
@@ -98,7 +98,7 @@ func bsize(pattr p9.Attr) int64 {
// Some files, particularly those that are not on a local file system,
// may have no clue of their block size. Better not to report something
// misleading or buggy and have a safe default.
- return usermem.PageSize
+ return hostarch.PageSize
}
// ntype returns an fs.InodeType from 9p attributes.
diff --git a/pkg/sentry/fs/inotify.go b/pkg/sentry/fs/inotify.go
index fb81d903d..1b83643db 100644
--- a/pkg/sentry/fs/inotify.go
+++ b/pkg/sentry/fs/inotify.go
@@ -20,6 +20,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sentry/uniqueid"
@@ -216,7 +217,7 @@ func (i *Inotify) Ioctl(ctx context.Context, _ *File, io usermem.IO, args arch.S
n += uint32(e.sizeOf())
}
var buf [4]byte
- usermem.ByteOrder.PutUint32(buf[:], n)
+ hostarch.ByteOrder.PutUint32(buf[:], n)
_, err := io.CopyOut(ctx, args[2].Pointer(), buf[:], usermem.IOOpts{})
return 0, err
diff --git a/pkg/sentry/fs/inotify_event.go b/pkg/sentry/fs/inotify_event.go
index 686e1b1cd..399aff1ed 100644
--- a/pkg/sentry/fs/inotify_event.go
+++ b/pkg/sentry/fs/inotify_event.go
@@ -19,6 +19,7 @@ import (
"fmt"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/usermem"
)
@@ -100,10 +101,10 @@ func (e *Event) sizeOf() int {
// construct the output. We use a buffer allocated ahead of time for
// performance. buf must be at least inotifyEventBaseSize bytes.
func (e *Event) CopyTo(ctx context.Context, buf []byte, dst usermem.IOSequence) (int64, error) {
- usermem.ByteOrder.PutUint32(buf[0:], uint32(e.wd))
- usermem.ByteOrder.PutUint32(buf[4:], e.mask)
- usermem.ByteOrder.PutUint32(buf[8:], e.cookie)
- usermem.ByteOrder.PutUint32(buf[12:], e.len)
+ hostarch.ByteOrder.PutUint32(buf[0:], uint32(e.wd))
+ hostarch.ByteOrder.PutUint32(buf[4:], e.mask)
+ hostarch.ByteOrder.PutUint32(buf[8:], e.cookie)
+ hostarch.ByteOrder.PutUint32(buf[12:], e.len)
writeLen := 0
diff --git a/pkg/sentry/fs/offset.go b/pkg/sentry/fs/offset.go
index 53b5df175..3a8c97d8f 100644
--- a/pkg/sentry/fs/offset.go
+++ b/pkg/sentry/fs/offset.go
@@ -17,14 +17,14 @@ package fs
import (
"math"
- "gvisor.dev/gvisor/pkg/usermem"
+ "gvisor.dev/gvisor/pkg/hostarch"
)
// OffsetPageEnd returns the file offset rounded up to the nearest
// page boundary. OffsetPageEnd panics if rounding up causes overflow,
// which shouldn't be possible given that offset is an int64.
func OffsetPageEnd(offset int64) uint64 {
- end, ok := usermem.Addr(offset).RoundUp()
+ end, ok := hostarch.Addr(offset).RoundUp()
if !ok {
panic("impossible overflow")
}
diff --git a/pkg/sentry/fs/overlay.go b/pkg/sentry/fs/overlay.go
index 01a1235b8..f96f5a3e5 100644
--- a/pkg/sentry/fs/overlay.go
+++ b/pkg/sentry/fs/overlay.go
@@ -19,11 +19,11 @@ import (
"strings"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sync"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
)
// The virtual filesystem implements an overlay configuration. For a high-level
@@ -274,7 +274,7 @@ func (o *overlayEntry) markDirectoryDirty() {
}
// AddMapping implements memmap.Mappable.AddMapping.
-func (o *overlayEntry) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) error {
+func (o *overlayEntry) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar hostarch.AddrRange, offset uint64, writable bool) error {
o.mapsMu.Lock()
defer o.mapsMu.Unlock()
if err := o.inodeLocked().Mappable().AddMapping(ctx, ms, ar, offset, writable); err != nil {
@@ -285,7 +285,7 @@ func (o *overlayEntry) AddMapping(ctx context.Context, ms memmap.MappingSpace, a
}
// RemoveMapping implements memmap.Mappable.RemoveMapping.
-func (o *overlayEntry) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) {
+func (o *overlayEntry) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar hostarch.AddrRange, offset uint64, writable bool) {
o.mapsMu.Lock()
defer o.mapsMu.Unlock()
o.inodeLocked().Mappable().RemoveMapping(ctx, ms, ar, offset, writable)
@@ -293,7 +293,7 @@ func (o *overlayEntry) RemoveMapping(ctx context.Context, ms memmap.MappingSpace
}
// CopyMapping implements memmap.Mappable.CopyMapping.
-func (o *overlayEntry) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR usermem.AddrRange, offset uint64, writable bool) error {
+func (o *overlayEntry) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR hostarch.AddrRange, offset uint64, writable bool) error {
o.mapsMu.Lock()
defer o.mapsMu.Unlock()
if err := o.inodeLocked().Mappable().CopyMapping(ctx, ms, srcAR, dstAR, offset, writable); err != nil {
@@ -304,7 +304,7 @@ func (o *overlayEntry) CopyMapping(ctx context.Context, ms memmap.MappingSpace,
}
// Translate implements memmap.Mappable.Translate.
-func (o *overlayEntry) Translate(ctx context.Context, required, optional memmap.MappableRange, at usermem.AccessType) ([]memmap.Translation, error) {
+func (o *overlayEntry) Translate(ctx context.Context, required, optional memmap.MappableRange, at hostarch.AccessType) ([]memmap.Translation, error) {
o.dataMu.RLock()
defer o.dataMu.RUnlock()
return o.inodeLocked().Mappable().Translate(ctx, required, optional, at)
diff --git a/pkg/sentry/fs/proc/BUILD b/pkg/sentry/fs/proc/BUILD
index b8b2281a8..7af7e0b45 100644
--- a/pkg/sentry/fs/proc/BUILD
+++ b/pkg/sentry/fs/proc/BUILD
@@ -30,6 +30,7 @@ go_library(
deps = [
"//pkg/abi/linux",
"//pkg/context",
+ "//pkg/hostarch",
"//pkg/log",
"//pkg/sentry/fs",
"//pkg/sentry/fs/fsutil",
diff --git a/pkg/sentry/fs/proc/exec_args.go b/pkg/sentry/fs/proc/exec_args.go
index e6171dd1d..24426b225 100644
--- a/pkg/sentry/fs/proc/exec_args.go
+++ b/pkg/sentry/fs/proc/exec_args.go
@@ -21,6 +21,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/fs/fsutil"
"gvisor.dev/gvisor/pkg/sentry/kernel"
@@ -113,7 +114,7 @@ func (f *execArgFile) Read(ctx context.Context, _ *fs.File, dst usermem.IOSequen
defer m.DecUsers(ctx)
// Figure out the bounds of the exec arg we are trying to read.
- var execArgStart, execArgEnd usermem.Addr
+ var execArgStart, execArgEnd hostarch.Addr
switch f.arg {
case cmdlineExecArg:
execArgStart, execArgEnd = m.ArgvStart(), m.ArgvEnd()
@@ -172,8 +173,8 @@ func (f *execArgFile) Read(ctx context.Context, _ *fs.File, dst usermem.IOSequen
// https://elixir.bootlin.com/linux/v4.20/source/fs/proc/base.c#L208
// we'll return one page total between argv and envp because of the
// above page restrictions.
- if lengthEnvv > usermem.PageSize-len(buf) {
- lengthEnvv = usermem.PageSize - len(buf)
+ if lengthEnvv > hostarch.PageSize-len(buf) {
+ lengthEnvv = hostarch.PageSize - len(buf)
}
// Make a new buffer to fit the whole thing
tmp := make([]byte, length+lengthEnvv)
diff --git a/pkg/sentry/fs/proc/inode.go b/pkg/sentry/fs/proc/inode.go
index d2859a4c2..78132f7a5 100644
--- a/pkg/sentry/fs/proc/inode.go
+++ b/pkg/sentry/fs/proc/inode.go
@@ -17,13 +17,13 @@ package proc
import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/fs/fsutil"
"gvisor.dev/gvisor/pkg/sentry/fs/proc/device"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/mm"
- "gvisor.dev/gvisor/pkg/usermem"
)
// LINT.IfChange
@@ -125,7 +125,7 @@ func newProcInode(ctx context.Context, iops fs.InodeOperations, msrc *fs.MountSo
sattr := fs.StableAttr{
DeviceID: device.ProcDevice.DeviceID(),
InodeID: device.ProcDevice.NextIno(),
- BlockSize: usermem.PageSize,
+ BlockSize: hostarch.PageSize,
Type: typ,
}
if t != nil {
diff --git a/pkg/sentry/fs/proc/meminfo.go b/pkg/sentry/fs/proc/meminfo.go
index 91617267d..7d975d333 100644
--- a/pkg/sentry/fs/proc/meminfo.go
+++ b/pkg/sentry/fs/proc/meminfo.go
@@ -19,10 +19,10 @@ import (
"fmt"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/fs/proc/seqfile"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/usage"
- "gvisor.dev/gvisor/pkg/usermem"
)
// LINT.IfChange
@@ -53,7 +53,7 @@ func (d *meminfoData) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle)
anon := snapshot.Anonymous + snapshot.Tmpfs
file := snapshot.PageCache + snapshot.Mapped
// We don't actually have active/inactive LRUs, so just make up numbers.
- activeFile := (file / 2) &^ (usermem.PageSize - 1)
+ activeFile := (file / 2) &^ (hostarch.PageSize - 1)
inactiveFile := file - activeFile
var buf bytes.Buffer
diff --git a/pkg/sentry/fs/proc/net.go b/pkg/sentry/fs/proc/net.go
index 203cfa061..91c35eea9 100644
--- a/pkg/sentry/fs/proc/net.go
+++ b/pkg/sentry/fs/proc/net.go
@@ -23,6 +23,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/fs/proc/seqfile"
@@ -35,7 +36,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/socket/unix/transport"
"gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/tcpip/header"
- "gvisor.dev/gvisor/pkg/usermem"
)
// LINT.IfChange
@@ -367,10 +367,10 @@ func (n *netRoute) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([]
)
if len(rt.GatewayAddr) == header.IPv4AddressSize {
flags |= linux.RTF_GATEWAY
- gw = usermem.ByteOrder.Uint32(rt.GatewayAddr)
+ gw = hostarch.ByteOrder.Uint32(rt.GatewayAddr)
}
if len(rt.DstAddr) == header.IPv4AddressSize {
- prefix = usermem.ByteOrder.Uint32(rt.DstAddr)
+ prefix = hostarch.ByteOrder.Uint32(rt.DstAddr)
}
l := fmt.Sprintf(
"%s\t%08X\t%08X\t%04X\t%d\t%d\t%d\t%08X\t%d\t%d\t%d",
@@ -520,7 +520,7 @@ func networkToHost16(n uint16) uint16 {
// binary.BigEndian.Uint16() require a read of binary.BigEndian and an
// interface method call, defeating inlining.
buf := [2]byte{byte(n >> 8 & 0xff), byte(n & 0xff)}
- return usermem.ByteOrder.Uint16(buf[:])
+ return hostarch.ByteOrder.Uint16(buf[:])
}
func writeInetAddr(w io.Writer, family int, i linux.SockAddr) {
@@ -542,14 +542,14 @@ func writeInetAddr(w io.Writer, family int, i linux.SockAddr) {
// __be32 which is a typedef for an unsigned int, and is printed with
// %X. This means that for a little-endian machine, Linux prints the
// least-significant byte of the address first. To emulate this, we first
- // invert the byte order for the address using usermem.ByteOrder.Uint32,
+ // invert the byte order for the address using hostarch.ByteOrder.Uint32,
// which makes it have the equivalent encoding to a __be32 on a little
// endian machine. Note that this operation is a no-op on a big endian
// machine. Then similar to Linux, we format it with %X, which will print
// the most-significant byte of the __be32 address first, which is now
// actually the least-significant byte of the original address in
// linux.SockAddrInet.Addr on little endian machines, due to the conversion.
- addr := usermem.ByteOrder.Uint32(a.Addr[:])
+ addr := hostarch.ByteOrder.Uint32(a.Addr[:])
fmt.Fprintf(w, "%08X:%04X ", addr, port)
case linux.AF_INET6:
@@ -559,10 +559,10 @@ func writeInetAddr(w io.Writer, family int, i linux.SockAddr) {
}
port := networkToHost16(a.Port)
- addr0 := usermem.ByteOrder.Uint32(a.Addr[0:4])
- addr1 := usermem.ByteOrder.Uint32(a.Addr[4:8])
- addr2 := usermem.ByteOrder.Uint32(a.Addr[8:12])
- addr3 := usermem.ByteOrder.Uint32(a.Addr[12:16])
+ addr0 := hostarch.ByteOrder.Uint32(a.Addr[0:4])
+ addr1 := hostarch.ByteOrder.Uint32(a.Addr[4:8])
+ addr2 := hostarch.ByteOrder.Uint32(a.Addr[8:12])
+ addr3 := hostarch.ByteOrder.Uint32(a.Addr[12:16])
fmt.Fprintf(w, "%08X%08X%08X%08X:%04X ", addr0, addr1, addr2, addr3, port)
}
}
diff --git a/pkg/sentry/fs/proc/seqfile/BUILD b/pkg/sentry/fs/proc/seqfile/BUILD
index 21338d912..713b81e08 100644
--- a/pkg/sentry/fs/proc/seqfile/BUILD
+++ b/pkg/sentry/fs/proc/seqfile/BUILD
@@ -9,6 +9,7 @@ go_library(
deps = [
"//pkg/abi/linux",
"//pkg/context",
+ "//pkg/hostarch",
"//pkg/sentry/fs",
"//pkg/sentry/fs/fsutil",
"//pkg/sentry/fs/proc/device",
diff --git a/pkg/sentry/fs/proc/seqfile/seqfile.go b/pkg/sentry/fs/proc/seqfile/seqfile.go
index 6121f0e95..b01688b1d 100644
--- a/pkg/sentry/fs/proc/seqfile/seqfile.go
+++ b/pkg/sentry/fs/proc/seqfile/seqfile.go
@@ -20,6 +20,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/fs/fsutil"
"gvisor.dev/gvisor/pkg/sentry/fs/proc/device"
@@ -131,7 +132,7 @@ func NewSeqFileInode(ctx context.Context, source SeqSource, msrc *fs.MountSource
sattr := fs.StableAttr{
DeviceID: device.ProcDevice.DeviceID(),
InodeID: device.ProcDevice.NextIno(),
- BlockSize: usermem.PageSize,
+ BlockSize: hostarch.PageSize,
Type: fs.SpecialFile,
}
return fs.NewInode(ctx, iops, msrc, sattr)
diff --git a/pkg/sentry/fs/proc/sys_net.go b/pkg/sentry/fs/proc/sys_net.go
index bbe282c03..1d09afdd7 100644
--- a/pkg/sentry/fs/proc/sys_net.go
+++ b/pkg/sentry/fs/proc/sys_net.go
@@ -21,6 +21,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/fs/fsutil"
"gvisor.dev/gvisor/pkg/sentry/fs/proc/device"
@@ -76,7 +77,7 @@ func newTCPMemInode(ctx context.Context, msrc *fs.MountSource, s inet.Stack, dir
sattr := fs.StableAttr{
DeviceID: device.ProcDevice.DeviceID(),
InodeID: device.ProcDevice.NextIno(),
- BlockSize: usermem.PageSize,
+ BlockSize: hostarch.PageSize,
Type: fs.SpecialFile,
}
return fs.NewInode(ctx, tm, msrc, sattr)
@@ -136,7 +137,7 @@ func (f *tcpMemFile) Write(ctx context.Context, _ *fs.File, src usermem.IOSequen
f.tcpMemInode.mu.Lock()
defer f.tcpMemInode.mu.Unlock()
- src = src.TakeFirst(usermem.PageSize - 1)
+ src = src.TakeFirst(hostarch.PageSize - 1)
size, err := readSize(f.tcpMemInode.dir, f.tcpMemInode.s)
if err != nil {
return 0, err
@@ -192,7 +193,7 @@ func newTCPSackInode(ctx context.Context, msrc *fs.MountSource, s inet.Stack) *f
sattr := fs.StableAttr{
DeviceID: device.ProcDevice.DeviceID(),
InodeID: device.ProcDevice.NextIno(),
- BlockSize: usermem.PageSize,
+ BlockSize: hostarch.PageSize,
Type: fs.SpecialFile,
}
return fs.NewInode(ctx, ts, msrc, sattr)
@@ -264,7 +265,7 @@ func (f *tcpSackFile) Write(ctx context.Context, _ *fs.File, src usermem.IOSeque
// Only consider size of one memory page for input for performance reasons.
// We are only reading if it's zero or not anyway.
- src = src.TakeFirst(usermem.PageSize - 1)
+ src = src.TakeFirst(hostarch.PageSize - 1)
var v int32
n, err := usermem.CopyInt32StringInVec(ctx, src.IO, src.Addrs, &v, src.Opts)
@@ -294,7 +295,7 @@ func newTCPRecoveryInode(ctx context.Context, msrc *fs.MountSource, s inet.Stack
sattr := fs.StableAttr{
DeviceID: device.ProcDevice.DeviceID(),
InodeID: device.ProcDevice.NextIno(),
- BlockSize: usermem.PageSize,
+ BlockSize: hostarch.PageSize,
Type: fs.SpecialFile,
}
return fs.NewInode(ctx, ts, msrc, sattr)
@@ -354,7 +355,7 @@ func (f *tcpRecoveryFile) Write(ctx context.Context, _ *fs.File, src usermem.IOS
if src.NumBytes() == 0 {
return 0, nil
}
- src = src.TakeFirst(usermem.PageSize - 1)
+ src = src.TakeFirst(hostarch.PageSize - 1)
var v int32
n, err := usermem.CopyInt32StringInVec(ctx, src.IO, src.Addrs, &v, src.Opts)
@@ -413,7 +414,7 @@ func newIPForwardingInode(ctx context.Context, msrc *fs.MountSource, s inet.Stac
sattr := fs.StableAttr{
DeviceID: device.ProcDevice.DeviceID(),
InodeID: device.ProcDevice.NextIno(),
- BlockSize: usermem.PageSize,
+ BlockSize: hostarch.PageSize,
Type: fs.SpecialFile,
}
return fs.NewInode(ctx, ipf, msrc, sattr)
@@ -486,7 +487,7 @@ func (f *ipForwardingFile) Write(ctx context.Context, _ *fs.File, src usermem.IO
// Only consider size of one memory page for input for performance reasons.
// We are only reading if it's zero or not anyway.
- src = src.TakeFirst(usermem.PageSize - 1)
+ src = src.TakeFirst(hostarch.PageSize - 1)
var v int32
n, err := usermem.CopyInt32StringInVec(ctx, src.IO, src.Addrs, &v, src.Opts)
@@ -524,7 +525,7 @@ func newPortRangeInode(ctx context.Context, msrc *fs.MountSource, s inet.Stack)
sattr := fs.StableAttr{
DeviceID: device.ProcDevice.DeviceID(),
InodeID: device.ProcDevice.NextIno(),
- BlockSize: usermem.PageSize,
+ BlockSize: hostarch.PageSize,
Type: fs.SpecialFile,
}
return fs.NewInode(ctx, ipf, msrc, sattr)
@@ -589,7 +590,7 @@ func (pf *portRangeFile) Write(ctx context.Context, _ *fs.File, src usermem.IOSe
// Only consider size of one memory page for input for performance
// reasons.
- src = src.TakeFirst(usermem.PageSize - 1)
+ src = src.TakeFirst(hostarch.PageSize - 1)
ports := make([]int32, 2)
n, err := usermem.CopyInt32StringsInVec(ctx, src.IO, src.Addrs, ports, src.Opts)
diff --git a/pkg/sentry/fs/proc/task.go b/pkg/sentry/fs/proc/task.go
index f43d6c221..ae5ed25f9 100644
--- a/pkg/sentry/fs/proc/task.go
+++ b/pkg/sentry/fs/proc/task.go
@@ -23,6 +23,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/fs/fsutil"
"gvisor.dev/gvisor/pkg/sentry/fs/proc/device"
@@ -469,7 +470,7 @@ func (m *memDataFile) Read(ctx context.Context, _ *fs.File, dst usermem.IOSequen
defer mm.DecUsers(ctx)
// Buffer the read data because of MM locks
buf := make([]byte, dst.NumBytes())
- n, readErr := mm.CopyIn(ctx, usermem.Addr(offset), buf, usermem.IOOpts{IgnorePermissions: true})
+ n, readErr := mm.CopyIn(ctx, hostarch.Addr(offset), buf, usermem.IOOpts{IgnorePermissions: true})
if n > 0 {
if _, err := dst.CopyOut(ctx, buf[:n]); err != nil {
return 0, syserror.EFAULT
@@ -632,7 +633,7 @@ func (s *taskStatData) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle)
rss = mm.ResidentSetSize()
}
})
- fmt.Fprintf(&buf, "%d %d ", vss, rss/usermem.PageSize)
+ fmt.Fprintf(&buf, "%d %d ", vss, rss/hostarch.PageSize)
// rsslim.
fmt.Fprintf(&buf, "%d ", s.t.ThreadGroup().Limits().Get(limits.Rss).Cur)
@@ -684,7 +685,7 @@ func (s *statmData) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([
})
var buf bytes.Buffer
- fmt.Fprintf(&buf, "%d %d 0 0 0 0 0\n", vss/usermem.PageSize, rss/usermem.PageSize)
+ fmt.Fprintf(&buf, "%d %d 0 0 0 0 0\n", vss/hostarch.PageSize, rss/hostarch.PageSize)
return []seqfile.SeqData{{Buf: buf.Bytes(), Handle: (*statmData)(nil)}}, 0
}
@@ -939,8 +940,8 @@ func (f *auxvecFile) Read(ctx context.Context, _ *fs.File, dst usermem.IOSequenc
buf := make([]byte, size)
for i, e := range auxv {
- usermem.ByteOrder.PutUint64(buf[16*i:], e.Key)
- usermem.ByteOrder.PutUint64(buf[16*i+8:], uint64(e.Value))
+ hostarch.ByteOrder.PutUint64(buf[16*i:], e.Key)
+ hostarch.ByteOrder.PutUint64(buf[16*i+8:], uint64(e.Value))
}
n, err := dst.CopyOut(ctx, buf[offset:])
@@ -1020,7 +1021,7 @@ func (f *oomScoreAdjFile) Write(ctx context.Context, _ *fs.File, src usermem.IOS
}
// Limit input size so as not to impact performance if input size is large.
- src = src.TakeFirst(usermem.PageSize - 1)
+ src = src.TakeFirst(hostarch.PageSize - 1)
var v int32
n, err := usermem.CopyInt32StringInVec(ctx, src.IO, src.Addrs, &v, src.Opts)
diff --git a/pkg/sentry/fs/proc/uid_gid_map.go b/pkg/sentry/fs/proc/uid_gid_map.go
index 2bc9485d8..30d5ad4cf 100644
--- a/pkg/sentry/fs/proc/uid_gid_map.go
+++ b/pkg/sentry/fs/proc/uid_gid_map.go
@@ -21,6 +21,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/fs/fsutil"
"gvisor.dev/gvisor/pkg/sentry/kernel"
@@ -132,7 +133,7 @@ func (imfo *idMapFileOperations) Write(ctx context.Context, file *fs.File, src u
// the system page size, and the write must be performed at the start of
// the file ..." - user_namespaces(7)
srclen := src.NumBytes()
- if srclen >= usermem.PageSize || offset != 0 {
+ if srclen >= hostarch.PageSize || offset != 0 {
return 0, syserror.EINVAL
}
b := make([]byte, srclen)
diff --git a/pkg/sentry/fs/ramfs/BUILD b/pkg/sentry/fs/ramfs/BUILD
index a51d00d86..4a3d9636b 100644
--- a/pkg/sentry/fs/ramfs/BUILD
+++ b/pkg/sentry/fs/ramfs/BUILD
@@ -14,13 +14,13 @@ go_library(
deps = [
"//pkg/abi/linux",
"//pkg/context",
+ "//pkg/hostarch",
"//pkg/sentry/fs",
"//pkg/sentry/fs/anon",
"//pkg/sentry/fs/fsutil",
"//pkg/sentry/socket/unix/transport",
"//pkg/sync",
"//pkg/syserror",
- "//pkg/usermem",
"//pkg/waiter",
"@org_golang_x_sys//unix:go_default_library",
],
diff --git a/pkg/sentry/fs/ramfs/tree.go b/pkg/sentry/fs/ramfs/tree.go
index dfc9d3453..0ace636c9 100644
--- a/pkg/sentry/fs/ramfs/tree.go
+++ b/pkg/sentry/fs/ramfs/tree.go
@@ -20,9 +20,9 @@ import (
"strings"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/fs/anon"
- "gvisor.dev/gvisor/pkg/usermem"
)
// MakeDirectoryTree constructs a ramfs tree of all directories containing
@@ -71,7 +71,7 @@ func emptyDir(ctx context.Context, msrc *fs.MountSource) *fs.Inode {
return fs.NewInode(ctx, dir, msrc, fs.StableAttr{
DeviceID: anon.PseudoDevice.DeviceID(),
InodeID: anon.PseudoDevice.NextIno(),
- BlockSize: usermem.PageSize,
+ BlockSize: hostarch.PageSize,
Type: fs.Directory,
})
}
diff --git a/pkg/sentry/fs/sys/BUILD b/pkg/sentry/fs/sys/BUILD
index f2e8b9932..fdbc5f912 100644
--- a/pkg/sentry/fs/sys/BUILD
+++ b/pkg/sentry/fs/sys/BUILD
@@ -14,11 +14,11 @@ go_library(
deps = [
"//pkg/abi/linux",
"//pkg/context",
+ "//pkg/hostarch",
"//pkg/sentry/device",
"//pkg/sentry/fs",
"//pkg/sentry/fs/fsutil",
"//pkg/sentry/fs/ramfs",
"//pkg/sentry/kernel",
- "//pkg/usermem",
],
)
diff --git a/pkg/sentry/fs/sys/sys.go b/pkg/sentry/fs/sys/sys.go
index 0891645e4..101779a7a 100644
--- a/pkg/sentry/fs/sys/sys.go
+++ b/pkg/sentry/fs/sys/sys.go
@@ -17,16 +17,16 @@ package sys
import (
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/fs/ramfs"
- "gvisor.dev/gvisor/pkg/usermem"
)
func newFile(ctx context.Context, node fs.InodeOperations, msrc *fs.MountSource) *fs.Inode {
sattr := fs.StableAttr{
DeviceID: sysfsDevice.DeviceID(),
InodeID: sysfsDevice.NextIno(),
- BlockSize: usermem.PageSize,
+ BlockSize: hostarch.PageSize,
Type: fs.SpecialFile,
}
return fs.NewInode(ctx, node, msrc, sattr)
@@ -37,7 +37,7 @@ func newDir(ctx context.Context, msrc *fs.MountSource, contents map[string]*fs.I
return fs.NewInode(ctx, d, msrc, fs.StableAttr{
DeviceID: sysfsDevice.DeviceID(),
InodeID: sysfsDevice.NextIno(),
- BlockSize: usermem.PageSize,
+ BlockSize: hostarch.PageSize,
Type: fs.SpecialDirectory,
})
}
diff --git a/pkg/sentry/fs/timerfd/BUILD b/pkg/sentry/fs/timerfd/BUILD
index d16cdb4df..c7977a217 100644
--- a/pkg/sentry/fs/timerfd/BUILD
+++ b/pkg/sentry/fs/timerfd/BUILD
@@ -8,6 +8,7 @@ go_library(
visibility = ["//pkg/sentry:internal"],
deps = [
"//pkg/context",
+ "//pkg/hostarch",
"//pkg/sentry/fs",
"//pkg/sentry/fs/anon",
"//pkg/sentry/fs/fsutil",
diff --git a/pkg/sentry/fs/timerfd/timerfd.go b/pkg/sentry/fs/timerfd/timerfd.go
index 46511a6ac..c8ebe256c 100644
--- a/pkg/sentry/fs/timerfd/timerfd.go
+++ b/pkg/sentry/fs/timerfd/timerfd.go
@@ -20,6 +20,7 @@ import (
"sync/atomic"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/fs/anon"
"gvisor.dev/gvisor/pkg/sentry/fs/fsutil"
@@ -124,7 +125,7 @@ func (t *TimerOperations) Read(ctx context.Context, file *fs.File, dst usermem.I
}
if val := atomic.SwapUint64(&t.val, 0); val != 0 {
var buf [sizeofUint64]byte
- usermem.ByteOrder.PutUint64(buf[:], val)
+ hostarch.ByteOrder.PutUint64(buf[:], val)
if _, err := dst.CopyOut(ctx, buf[:]); err != nil {
// Linux does not undo consuming the number of expirations even if
// writing to userspace fails.
diff --git a/pkg/sentry/fs/tmpfs/BUILD b/pkg/sentry/fs/tmpfs/BUILD
index b521a86a2..90398376a 100644
--- a/pkg/sentry/fs/tmpfs/BUILD
+++ b/pkg/sentry/fs/tmpfs/BUILD
@@ -15,6 +15,7 @@ go_library(
deps = [
"//pkg/abi/linux",
"//pkg/context",
+ "//pkg/hostarch",
"//pkg/safemem",
"//pkg/sentry/device",
"//pkg/sentry/fs",
@@ -42,6 +43,7 @@ go_test(
library = ":tmpfs",
deps = [
"//pkg/context",
+ "//pkg/hostarch",
"//pkg/sentry/fs",
"//pkg/sentry/kernel/contexttest",
"//pkg/sentry/usage",
diff --git a/pkg/sentry/fs/tmpfs/file_test.go b/pkg/sentry/fs/tmpfs/file_test.go
index d4d613ea9..1718f9372 100644
--- a/pkg/sentry/fs/tmpfs/file_test.go
+++ b/pkg/sentry/fs/tmpfs/file_test.go
@@ -19,6 +19,7 @@ import (
"testing"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/kernel/contexttest"
"gvisor.dev/gvisor/pkg/sentry/usage"
@@ -31,7 +32,7 @@ func newFileInode(ctx context.Context) *fs.Inode {
return fs.NewInode(ctx, iops, m, fs.StableAttr{
DeviceID: tmpfsDevice.DeviceID(),
InodeID: tmpfsDevice.NextIno(),
- BlockSize: usermem.PageSize,
+ BlockSize: hostarch.PageSize,
Type: fs.RegularFile,
})
}
diff --git a/pkg/sentry/fs/tmpfs/inode_file.go b/pkg/sentry/fs/tmpfs/inode_file.go
index ad4aea282..f4de8c968 100644
--- a/pkg/sentry/fs/tmpfs/inode_file.go
+++ b/pkg/sentry/fs/tmpfs/inode_file.go
@@ -21,6 +21,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/safemem"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/fs/fsutil"
@@ -125,7 +126,7 @@ func NewMemfdInode(ctx context.Context, allowSeals bool) *fs.Inode {
Type: fs.RegularFile,
DeviceID: tmpfsDevice.DeviceID(),
InodeID: tmpfsDevice.NextIno(),
- BlockSize: usermem.PageSize,
+ BlockSize: hostarch.PageSize,
})
}
@@ -392,7 +393,7 @@ func (rw *fileReadWriter) ReadToBlocks(dsts safemem.BlockSeq) (uint64, error) {
switch {
case seg.Ok():
// Get internal mappings.
- ims, err := mf.MapInternal(seg.FileRangeOf(seg.Range().Intersect(mr)), usermem.Read)
+ ims, err := mf.MapInternal(seg.FileRangeOf(seg.Range().Intersect(mr)), hostarch.Read)
if err != nil {
return done, err
}
@@ -463,7 +464,7 @@ func (rw *fileReadWriter) WriteFromBlocks(srcs safemem.BlockSeq) (uint64, error)
//
// See Linux, mm/filemap.c:generic_perform_write() and
// mm/shmem.c:shmem_write_begin().
- if pgstart := int64(usermem.Addr(rw.f.attr.Size).RoundDown()); end > pgstart {
+ if pgstart := int64(hostarch.Addr(rw.f.attr.Size).RoundDown()); end > pgstart {
end = pgstart
}
if end <= rw.offset {
@@ -483,8 +484,8 @@ func (rw *fileReadWriter) WriteFromBlocks(srcs safemem.BlockSeq) (uint64, error)
mf := rw.f.kernel.MemoryFile()
// Page-aligned mr for when we need to allocate memory. RoundUp can't
// overflow since end is an int64.
- pgstartaddr := usermem.Addr(rw.offset).RoundDown()
- pgendaddr, _ := usermem.Addr(end).RoundUp()
+ pgstartaddr := hostarch.Addr(rw.offset).RoundDown()
+ pgendaddr, _ := hostarch.Addr(end).RoundUp()
pgMR := memmap.MappableRange{uint64(pgstartaddr), uint64(pgendaddr)}
var done uint64
@@ -494,7 +495,7 @@ func (rw *fileReadWriter) WriteFromBlocks(srcs safemem.BlockSeq) (uint64, error)
switch {
case seg.Ok():
// Get internal mappings.
- ims, err := mf.MapInternal(seg.FileRangeOf(seg.Range().Intersect(mr)), usermem.Write)
+ ims, err := mf.MapInternal(seg.FileRangeOf(seg.Range().Intersect(mr)), hostarch.Write)
if err != nil {
return done, err
}
@@ -527,7 +528,7 @@ func (rw *fileReadWriter) WriteFromBlocks(srcs safemem.BlockSeq) (uint64, error)
}
// AddMapping implements memmap.Mappable.AddMapping.
-func (f *fileInodeOperations) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) error {
+func (f *fileInodeOperations) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar hostarch.AddrRange, offset uint64, writable bool) error {
f.mapsMu.Lock()
defer f.mapsMu.Unlock()
@@ -544,7 +545,7 @@ func (f *fileInodeOperations) AddMapping(ctx context.Context, ms memmap.MappingS
pagesBefore := f.writableMappingPages
// ar is guaranteed to be page aligned per memmap.Mappable.
- f.writableMappingPages += uint64(ar.Length() / usermem.PageSize)
+ f.writableMappingPages += uint64(ar.Length() / hostarch.PageSize)
if f.writableMappingPages < pagesBefore {
panic(fmt.Sprintf("Overflow while mapping potentially writable pages pointing to a tmpfs file. Before %v, after %v", pagesBefore, f.writableMappingPages))
@@ -555,7 +556,7 @@ func (f *fileInodeOperations) AddMapping(ctx context.Context, ms memmap.MappingS
}
// RemoveMapping implements memmap.Mappable.RemoveMapping.
-func (f *fileInodeOperations) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) {
+func (f *fileInodeOperations) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar hostarch.AddrRange, offset uint64, writable bool) {
f.mapsMu.Lock()
defer f.mapsMu.Unlock()
@@ -565,7 +566,7 @@ func (f *fileInodeOperations) RemoveMapping(ctx context.Context, ms memmap.Mappi
pagesBefore := f.writableMappingPages
// ar is guaranteed to be page aligned per memmap.Mappable.
- f.writableMappingPages -= uint64(ar.Length() / usermem.PageSize)
+ f.writableMappingPages -= uint64(ar.Length() / hostarch.PageSize)
if f.writableMappingPages > pagesBefore {
panic(fmt.Sprintf("Underflow while unmapping potentially writable pages pointing to a tmpfs file. Before %v, after %v", pagesBefore, f.writableMappingPages))
@@ -574,12 +575,12 @@ func (f *fileInodeOperations) RemoveMapping(ctx context.Context, ms memmap.Mappi
}
// CopyMapping implements memmap.Mappable.CopyMapping.
-func (f *fileInodeOperations) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR usermem.AddrRange, offset uint64, writable bool) error {
+func (f *fileInodeOperations) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR hostarch.AddrRange, offset uint64, writable bool) error {
return f.AddMapping(ctx, ms, dstAR, offset, writable)
}
// Translate implements memmap.Mappable.Translate.
-func (f *fileInodeOperations) Translate(ctx context.Context, required, optional memmap.MappableRange, at usermem.AccessType) ([]memmap.Translation, error) {
+func (f *fileInodeOperations) Translate(ctx context.Context, required, optional memmap.MappableRange, at hostarch.AccessType) ([]memmap.Translation, error) {
f.dataMu.Lock()
defer f.dataMu.Unlock()
@@ -612,7 +613,7 @@ func (f *fileInodeOperations) Translate(ctx context.Context, required, optional
Source: segMR,
File: mf,
Offset: seg.FileRangeOf(segMR).Start,
- Perms: usermem.AnyAccess,
+ Perms: hostarch.AnyAccess,
})
translatedEnd = segMR.End
}
diff --git a/pkg/sentry/fs/tmpfs/tmpfs.go b/pkg/sentry/fs/tmpfs/tmpfs.go
index cf4ed5de0..577052888 100644
--- a/pkg/sentry/fs/tmpfs/tmpfs.go
+++ b/pkg/sentry/fs/tmpfs/tmpfs.go
@@ -20,6 +20,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/fs/fsutil"
"gvisor.dev/gvisor/pkg/sentry/fs/ramfs"
@@ -28,7 +29,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/socket/unix/transport"
"gvisor.dev/gvisor/pkg/sentry/usage"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
)
var fsInfo = fs.Info{
@@ -41,8 +41,8 @@ var fsInfo = fs.Info{
// chosen to ensure that BlockSize * Blocks does not overflow int64 (which
// applications may also handle incorrectly).
// TODO(b/29637826): allow configuring a tmpfs size and enforce it.
- TotalBlocks: math.MaxInt64 / usermem.PageSize,
- FreeBlocks: math.MaxInt64 / usermem.PageSize,
+ TotalBlocks: math.MaxInt64 / hostarch.PageSize,
+ FreeBlocks: math.MaxInt64 / hostarch.PageSize,
}
// rename implements fs.InodeOperations.Rename for tmpfs nodes.
@@ -99,7 +99,7 @@ func NewDir(ctx context.Context, contents map[string]*fs.Inode, owner fs.FileOwn
return fs.NewInode(ctx, d, msrc, fs.StableAttr{
DeviceID: tmpfsDevice.DeviceID(),
InodeID: tmpfsDevice.NextIno(),
- BlockSize: usermem.PageSize,
+ BlockSize: hostarch.PageSize,
Type: fs.Directory,
})
}
@@ -232,7 +232,7 @@ func (d *Dir) newCreateOps() *ramfs.CreateOps {
return fs.NewInode(ctx, iops, dir.MountSource, fs.StableAttr{
DeviceID: tmpfsDevice.DeviceID(),
InodeID: tmpfsDevice.NextIno(),
- BlockSize: usermem.PageSize,
+ BlockSize: hostarch.PageSize,
Type: fs.RegularFile,
}), nil
},
@@ -281,7 +281,7 @@ func NewSymlink(ctx context.Context, target string, owner fs.FileOwner, msrc *fs
return fs.NewInode(ctx, s, msrc, fs.StableAttr{
DeviceID: tmpfsDevice.DeviceID(),
InodeID: tmpfsDevice.NextIno(),
- BlockSize: usermem.PageSize,
+ BlockSize: hostarch.PageSize,
Type: fs.Symlink,
})
}
@@ -311,7 +311,7 @@ func NewSocket(ctx context.Context, socket transport.BoundEndpoint, owner fs.Fil
return fs.NewInode(ctx, s, msrc, fs.StableAttr{
DeviceID: tmpfsDevice.DeviceID(),
InodeID: tmpfsDevice.NextIno(),
- BlockSize: usermem.PageSize,
+ BlockSize: hostarch.PageSize,
Type: fs.Socket,
})
}
@@ -348,7 +348,7 @@ func NewFifo(ctx context.Context, owner fs.FileOwner, perms fs.FilePermissions,
return fs.NewInode(ctx, fifoIops, msrc, fs.StableAttr{
DeviceID: tmpfsDevice.DeviceID(),
InodeID: tmpfsDevice.NextIno(),
- BlockSize: usermem.PageSize,
+ BlockSize: hostarch.PageSize,
Type: fs.Pipe,
})
}
diff --git a/pkg/sentry/fs/tty/BUILD b/pkg/sentry/fs/tty/BUILD
index e6d0eb359..86ada820e 100644
--- a/pkg/sentry/fs/tty/BUILD
+++ b/pkg/sentry/fs/tty/BUILD
@@ -17,6 +17,7 @@ go_library(
deps = [
"//pkg/abi/linux",
"//pkg/context",
+ "//pkg/hostarch",
"//pkg/marshal/primitive",
"//pkg/refs",
"//pkg/safemem",
diff --git a/pkg/sentry/fs/tty/dir.go b/pkg/sentry/fs/tty/dir.go
index c2da80bc2..13c9dbe7d 100644
--- a/pkg/sentry/fs/tty/dir.go
+++ b/pkg/sentry/fs/tty/dir.go
@@ -22,6 +22,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/fs/fsutil"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
@@ -122,7 +123,7 @@ func newDir(ctx context.Context, m *fs.MountSource) *fs.Inode {
// TODO(b/75267214): Since ptsDevice must be shared between
// different mounts, we must not assign fixed numbers.
InodeID: ptsDevice.NextIno(),
- BlockSize: usermem.PageSize,
+ BlockSize: hostarch.PageSize,
Type: fs.Directory,
})
}