summaryrefslogtreecommitdiffhomepage
path: root/pkg/usermem
diff options
context:
space:
mode:
Diffstat (limited to 'pkg/usermem')
-rw-r--r--pkg/usermem/BUILD55
-rw-r--r--pkg/usermem/README.md31
-rw-r--r--pkg/usermem/access_type.go128
-rw-r--r--pkg/usermem/addr.go108
-rw-r--r--pkg/usermem/addr_range_seq_test.go197
-rw-r--r--pkg/usermem/addr_range_seq_unsafe.go277
-rw-r--r--pkg/usermem/bytes_io.go141
-rw-r--r--pkg/usermem/bytes_io_unsafe.go47
-rw-r--r--pkg/usermem/usermem.go595
-rw-r--r--pkg/usermem/usermem_arm64.go53
-rw-r--r--pkg/usermem/usermem_test.go424
-rw-r--r--pkg/usermem/usermem_x86.go38
12 files changed, 2094 insertions, 0 deletions
diff --git a/pkg/usermem/BUILD b/pkg/usermem/BUILD
new file mode 100644
index 000000000..6c9ada9c7
--- /dev/null
+++ b/pkg/usermem/BUILD
@@ -0,0 +1,55 @@
+load("//tools:defs.bzl", "go_library", "go_test")
+load("//tools/go_generics:defs.bzl", "go_template_instance")
+
+package(licenses = ["notice"])
+
+go_template_instance(
+ name = "addr_range",
+ out = "addr_range.go",
+ package = "usermem",
+ prefix = "Addr",
+ template = "//pkg/segment:generic_range",
+ types = {
+ "T": "Addr",
+ },
+)
+
+go_library(
+ name = "usermem",
+ srcs = [
+ "access_type.go",
+ "addr.go",
+ "addr_range.go",
+ "addr_range_seq_unsafe.go",
+ "bytes_io.go",
+ "bytes_io_unsafe.go",
+ "usermem.go",
+ "usermem_arm64.go",
+ "usermem_x86.go",
+ ],
+ visibility = ["//:sandbox"],
+ deps = [
+ "//pkg/atomicbitops",
+ "//pkg/binary",
+ "//pkg/context",
+ "//pkg/gohacks",
+ "//pkg/log",
+ "//pkg/safemem",
+ "//pkg/syserror",
+ ],
+)
+
+go_test(
+ name = "usermem_test",
+ size = "small",
+ srcs = [
+ "addr_range_seq_test.go",
+ "usermem_test.go",
+ ],
+ library = ":usermem",
+ deps = [
+ "//pkg/context",
+ "//pkg/safemem",
+ "//pkg/syserror",
+ ],
+)
diff --git a/pkg/usermem/README.md b/pkg/usermem/README.md
new file mode 100644
index 000000000..f6d2137eb
--- /dev/null
+++ b/pkg/usermem/README.md
@@ -0,0 +1,31 @@
+This package defines primitives for sentry access to application memory.
+
+Major types:
+
+- The `IO` interface represents a virtual address space and provides I/O
+ methods on that address space. `IO` is the lowest-level primitive. The
+ primary implementation of the `IO` interface is `mm.MemoryManager`.
+
+- `IOSequence` represents a collection of individually-contiguous address
+ ranges in a `IO` that is operated on sequentially, analogous to Linux's
+ `struct iov_iter`.
+
+Major usage patterns:
+
+- Access to a task's virtual memory, subject to the application's memory
+ protections and while running on that task's goroutine, from a context that
+ is at or above the level of the `kernel` package (e.g. most syscall
+ implementations in `syscalls/linux`); use the `kernel.Task.Copy*` wrappers
+ defined in `kernel/task_usermem.go`.
+
+- Access to a task's virtual memory, from a context that is at or above the
+ level of the `kernel` package, but where any of the above constraints does
+ not hold (e.g. `PTRACE_POKEDATA`, which ignores application memory
+ protections); obtain the task's `mm.MemoryManager` by calling
+ `kernel.Task.MemoryManager`, and call its `IO` methods directly.
+
+- Access to a task's virtual memory, from a context that is below the level of
+ the `kernel` package (e.g. filesystem I/O); clients must pass I/O arguments
+ from higher layers, usually in the form of an `IOSequence`. The
+ `kernel.Task.SingleIOSequence` and `kernel.Task.IovecsIOSequence` functions
+ in `kernel/task_usermem.go` are convenience functions for doing so.
diff --git a/pkg/usermem/access_type.go b/pkg/usermem/access_type.go
new file mode 100644
index 000000000..9c1742a59
--- /dev/null
+++ b/pkg/usermem/access_type.go
@@ -0,0 +1,128 @@
+// Copyright 2018 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package usermem
+
+import (
+ "syscall"
+)
+
+// AccessType specifies memory access types. This is used for
+// setting mapping permissions, as well as communicating faults.
+//
+// +stateify savable
+type AccessType struct {
+ // Read is read access.
+ Read bool
+
+ // Write is write access.
+ Write bool
+
+ // Execute is executable access.
+ Execute bool
+}
+
+// String returns a pretty representation of access. This looks like the
+// familiar r-x, rw-, etc. and can be relied on as such.
+func (a AccessType) String() string {
+ bits := [3]byte{'-', '-', '-'}
+ if a.Read {
+ bits[0] = 'r'
+ }
+ if a.Write {
+ bits[1] = 'w'
+ }
+ if a.Execute {
+ bits[2] = 'x'
+ }
+ return string(bits[:])
+}
+
+// Any returns true iff at least one of Read, Write or Execute is true.
+func (a AccessType) Any() bool {
+ return a.Read || a.Write || a.Execute
+}
+
+// Prot returns the system prot (syscall.PROT_READ, etc.) for this access.
+func (a AccessType) Prot() int {
+ var prot int
+ if a.Read {
+ prot |= syscall.PROT_READ
+ }
+ if a.Write {
+ prot |= syscall.PROT_WRITE
+ }
+ if a.Execute {
+ prot |= syscall.PROT_EXEC
+ }
+ return prot
+}
+
+// SupersetOf returns true iff the access types in a are a superset of the
+// access types in other.
+func (a AccessType) SupersetOf(other AccessType) bool {
+ if !a.Read && other.Read {
+ return false
+ }
+ if !a.Write && other.Write {
+ return false
+ }
+ if !a.Execute && other.Execute {
+ return false
+ }
+ return true
+}
+
+// Intersect returns the access types set in both a and other.
+func (a AccessType) Intersect(other AccessType) AccessType {
+ return AccessType{
+ Read: a.Read && other.Read,
+ Write: a.Write && other.Write,
+ Execute: a.Execute && other.Execute,
+ }
+}
+
+// Union returns the access types set in either a or other.
+func (a AccessType) Union(other AccessType) AccessType {
+ return AccessType{
+ Read: a.Read || other.Read,
+ Write: a.Write || other.Write,
+ Execute: a.Execute || other.Execute,
+ }
+}
+
+// Effective returns the set of effective access types allowed by a, even if
+// some types are not explicitly allowed.
+func (a AccessType) Effective() AccessType {
+ // In Linux, Write and Execute access generally imply Read access. See
+ // mm/mmap.c:protection_map.
+ //
+ // The notable exception is get_user_pages, which only checks against
+ // the original vma flags. That said, most user memory accesses do not
+ // use GUP.
+ if a.Write || a.Execute {
+ a.Read = true
+ }
+ return a
+}
+
+// Convenient access types.
+var (
+ NoAccess = AccessType{}
+ Read = AccessType{Read: true}
+ Write = AccessType{Write: true}
+ Execute = AccessType{Execute: true}
+ ReadWrite = AccessType{Read: true, Write: true}
+ AnyAccess = AccessType{Read: true, Write: true, Execute: true}
+)
diff --git a/pkg/usermem/addr.go b/pkg/usermem/addr.go
new file mode 100644
index 000000000..e79210804
--- /dev/null
+++ b/pkg/usermem/addr.go
@@ -0,0 +1,108 @@
+// Copyright 2018 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package usermem
+
+import (
+ "fmt"
+)
+
+// Addr represents a generic virtual address.
+//
+// +stateify savable
+type Addr uintptr
+
+// AddLength adds the given length to start and returns the result. ok is true
+// iff adding the length did not overflow the range of Addr.
+//
+// Note: This function is usually used to get the end of an address range
+// defined by its start address and length. Since the resulting end is
+// exclusive, end == 0 is technically valid, and corresponds to a range that
+// extends to the end of the address space, but ok will be false. This isn't
+// expected to ever come up in practice.
+func (v Addr) AddLength(length uint64) (end Addr, ok bool) {
+ end = v + Addr(length)
+ // The second half of the following check is needed in case uintptr is
+ // smaller than 64 bits.
+ ok = end >= v && length <= uint64(^Addr(0))
+ return
+}
+
+// RoundDown returns the address rounded down to the nearest page boundary.
+func (v Addr) RoundDown() Addr {
+ return v & ^Addr(PageSize-1)
+}
+
+// RoundUp returns the address rounded up to the nearest page boundary. ok is
+// true iff rounding up did not wrap around.
+func (v Addr) RoundUp() (addr Addr, ok bool) {
+ addr = Addr(v + PageSize - 1).RoundDown()
+ ok = addr >= v
+ return
+}
+
+// MustRoundUp is equivalent to RoundUp, but panics if rounding up wraps
+// around.
+func (v Addr) MustRoundUp() Addr {
+ addr, ok := v.RoundUp()
+ if !ok {
+ panic(fmt.Sprintf("usermem.Addr(%d).RoundUp() wraps", v))
+ }
+ return addr
+}
+
+// HugeRoundDown returns the address rounded down to the nearest huge page
+// boundary.
+func (v Addr) HugeRoundDown() Addr {
+ return v & ^Addr(HugePageSize-1)
+}
+
+// HugeRoundUp returns the address rounded up to the nearest huge page boundary.
+// ok is true iff rounding up did not wrap around.
+func (v Addr) HugeRoundUp() (addr Addr, ok bool) {
+ addr = Addr(v + HugePageSize - 1).HugeRoundDown()
+ ok = addr >= v
+ return
+}
+
+// PageOffset returns the offset of v into the current page.
+func (v Addr) PageOffset() uint64 {
+ return uint64(v & Addr(PageSize-1))
+}
+
+// IsPageAligned returns true if v.PageOffset() == 0.
+func (v Addr) IsPageAligned() bool {
+ return v.PageOffset() == 0
+}
+
+// AddrRange is a range of Addrs.
+//
+// type AddrRange <generated by go_generics>
+
+// ToRange returns [v, v+length).
+func (v Addr) ToRange(length uint64) (AddrRange, bool) {
+ end, ok := v.AddLength(length)
+ return AddrRange{v, end}, ok
+}
+
+// IsPageAligned returns true if ar.Start.IsPageAligned() and
+// ar.End.IsPageAligned().
+func (ar AddrRange) IsPageAligned() bool {
+ return ar.Start.IsPageAligned() && ar.End.IsPageAligned()
+}
+
+// String implements fmt.Stringer.String.
+func (ar AddrRange) String() string {
+ return fmt.Sprintf("[%#x, %#x)", ar.Start, ar.End)
+}
diff --git a/pkg/usermem/addr_range_seq_test.go b/pkg/usermem/addr_range_seq_test.go
new file mode 100644
index 000000000..82f735026
--- /dev/null
+++ b/pkg/usermem/addr_range_seq_test.go
@@ -0,0 +1,197 @@
+// Copyright 2018 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package usermem
+
+import (
+ "testing"
+)
+
+var addrRangeSeqTests = []struct {
+ desc string
+ ranges []AddrRange
+}{
+ {
+ desc: "Empty sequence",
+ },
+ {
+ desc: "Single empty AddrRange",
+ ranges: []AddrRange{
+ {0x10, 0x10},
+ },
+ },
+ {
+ desc: "Single non-empty AddrRange of length 1",
+ ranges: []AddrRange{
+ {0x10, 0x11},
+ },
+ },
+ {
+ desc: "Single non-empty AddrRange of length 2",
+ ranges: []AddrRange{
+ {0x10, 0x12},
+ },
+ },
+ {
+ desc: "Multiple non-empty AddrRanges",
+ ranges: []AddrRange{
+ {0x10, 0x11},
+ {0x20, 0x22},
+ },
+ },
+ {
+ desc: "Multiple AddrRanges including empty AddrRanges",
+ ranges: []AddrRange{
+ {0x10, 0x10},
+ {0x20, 0x20},
+ {0x30, 0x33},
+ {0x40, 0x44},
+ {0x50, 0x50},
+ {0x60, 0x60},
+ {0x70, 0x77},
+ {0x80, 0x88},
+ {0x90, 0x90},
+ {0xa0, 0xa0},
+ },
+ },
+}
+
+func testAddrRangeSeqEqualityWithTailIteration(t *testing.T, ars AddrRangeSeq, wantRanges []AddrRange) {
+ var wantLen int64
+ for _, ar := range wantRanges {
+ wantLen += int64(ar.Length())
+ }
+
+ var i int
+ for !ars.IsEmpty() {
+ if gotLen := ars.NumBytes(); gotLen != wantLen {
+ t.Errorf("Iteration %d: %v.NumBytes(): got %d, wanted %d", i, ars, gotLen, wantLen)
+ }
+ if gotN, wantN := ars.NumRanges(), len(wantRanges)-i; gotN != wantN {
+ t.Errorf("Iteration %d: %v.NumRanges(): got %d, wanted %d", i, ars, gotN, wantN)
+ }
+ got := ars.Head()
+ if i >= len(wantRanges) {
+ t.Errorf("Iteration %d: %v.Head(): got %s, wanted <end of sequence>", i, ars, got)
+ } else if want := wantRanges[i]; got != want {
+ t.Errorf("Iteration %d: %v.Head(): got %s, wanted %s", i, ars, got, want)
+ }
+ ars = ars.Tail()
+ wantLen -= int64(got.Length())
+ i++
+ }
+ if gotLen := ars.NumBytes(); gotLen != 0 || wantLen != 0 {
+ t.Errorf("Iteration %d: %v.NumBytes(): got %d, wanted %d (which should be 0)", i, ars, gotLen, wantLen)
+ }
+ if gotN := ars.NumRanges(); gotN != 0 {
+ t.Errorf("Iteration %d: %v.NumRanges(): got %d, wanted 0", i, ars, gotN)
+ }
+}
+
+func TestAddrRangeSeqTailIteration(t *testing.T) {
+ for _, test := range addrRangeSeqTests {
+ t.Run(test.desc, func(t *testing.T) {
+ testAddrRangeSeqEqualityWithTailIteration(t, AddrRangeSeqFromSlice(test.ranges), test.ranges)
+ })
+ }
+}
+
+func TestAddrRangeSeqDropFirstEmpty(t *testing.T) {
+ var ars AddrRangeSeq
+ if got, want := ars.DropFirst(1), ars; got != want {
+ t.Errorf("%v.DropFirst(1): got %v, wanted %v", ars, got, want)
+ }
+}
+
+func TestAddrRangeSeqDropSingleByteIteration(t *testing.T) {
+ // Tests AddrRangeSeq iteration using Head/DropFirst, simulating
+ // I/O-per-AddrRange.
+ for _, test := range addrRangeSeqTests {
+ t.Run(test.desc, func(t *testing.T) {
+ // Figure out what AddrRanges we expect to see.
+ var wantLen int64
+ var wantRanges []AddrRange
+ for _, ar := range test.ranges {
+ wantLen += int64(ar.Length())
+ wantRanges = append(wantRanges, ar)
+ if ar.Length() == 0 {
+ // We "do" 0 bytes of I/O and then call DropFirst(0),
+ // advancing to the next AddrRange.
+ continue
+ }
+ // Otherwise we "do" 1 byte of I/O and then call DropFirst(1),
+ // advancing the AddrRange by 1 byte, or to the next AddrRange
+ // if this one is exhausted.
+ for ar.Start++; ar.Length() != 0; ar.Start++ {
+ wantRanges = append(wantRanges, ar)
+ }
+ }
+ t.Logf("Expected AddrRanges: %s (%d bytes)", wantRanges, wantLen)
+
+ ars := AddrRangeSeqFromSlice(test.ranges)
+ var i int
+ for !ars.IsEmpty() {
+ if gotLen := ars.NumBytes(); gotLen != wantLen {
+ t.Errorf("Iteration %d: %v.NumBytes(): got %d, wanted %d", i, ars, gotLen, wantLen)
+ }
+ got := ars.Head()
+ if i >= len(wantRanges) {
+ t.Errorf("Iteration %d: %v.Head(): got %s, wanted <end of sequence>", i, ars, got)
+ } else if want := wantRanges[i]; got != want {
+ t.Errorf("Iteration %d: %v.Head(): got %s, wanted %s", i, ars, got, want)
+ }
+ if got.Length() == 0 {
+ ars = ars.DropFirst(0)
+ } else {
+ ars = ars.DropFirst(1)
+ wantLen--
+ }
+ i++
+ }
+ if gotLen := ars.NumBytes(); gotLen != 0 || wantLen != 0 {
+ t.Errorf("Iteration %d: %v.NumBytes(): got %d, wanted %d (which should be 0)", i, ars, gotLen, wantLen)
+ }
+ })
+ }
+}
+
+func TestAddrRangeSeqTakeFirstEmpty(t *testing.T) {
+ var ars AddrRangeSeq
+ if got, want := ars.TakeFirst(1), ars; got != want {
+ t.Errorf("%v.TakeFirst(1): got %v, wanted %v", ars, got, want)
+ }
+}
+
+func TestAddrRangeSeqTakeFirst(t *testing.T) {
+ ranges := []AddrRange{
+ {0x10, 0x11},
+ {0x20, 0x22},
+ {0x30, 0x30},
+ {0x40, 0x44},
+ {0x50, 0x55},
+ {0x60, 0x60},
+ {0x70, 0x77},
+ }
+ ars := AddrRangeSeqFromSlice(ranges).TakeFirst(5)
+ want := []AddrRange{
+ {0x10, 0x11}, // +1 byte (total 1 byte), not truncated
+ {0x20, 0x22}, // +2 bytes (total 3 bytes), not truncated
+ {0x30, 0x30}, // +0 bytes (total 3 bytes), no change
+ {0x40, 0x42}, // +2 bytes (total 5 bytes), partially truncated
+ {0x50, 0x50}, // +0 bytes (total 5 bytes), fully truncated
+ {0x60, 0x60}, // +0 bytes (total 5 bytes), "fully truncated" (no change)
+ {0x70, 0x70}, // +0 bytes (total 5 bytes), fully truncated
+ }
+ testAddrRangeSeqEqualityWithTailIteration(t, ars, want)
+}
diff --git a/pkg/usermem/addr_range_seq_unsafe.go b/pkg/usermem/addr_range_seq_unsafe.go
new file mode 100644
index 000000000..c09337c15
--- /dev/null
+++ b/pkg/usermem/addr_range_seq_unsafe.go
@@ -0,0 +1,277 @@
+// Copyright 2018 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package usermem
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "unsafe"
+)
+
+// An AddrRangeSeq represents a sequence of AddrRanges.
+//
+// AddrRangeSeqs are immutable and may be copied by value. The zero value of
+// AddrRangeSeq represents an empty sequence.
+//
+// An AddrRangeSeq may contain AddrRanges with a length of 0. This is necessary
+// since zero-length AddrRanges are significant to MM bounds checks.
+type AddrRangeSeq struct {
+ // If length is 0, then the AddrRangeSeq represents no AddrRanges.
+ // Invariants: data == 0; offset == 0; limit == 0.
+ //
+ // If length is 1, then the AddrRangeSeq represents the single
+ // AddrRange{offset, offset+limit}. Invariants: data == 0.
+ //
+ // Otherwise, length >= 2, and the AddrRangeSeq represents the `length`
+ // AddrRanges in the array of AddrRanges starting at address `data`,
+ // starting at `offset` bytes into the first AddrRange and limited to the
+ // following `limit` bytes. (AddrRanges after `limit` are still iterated,
+ // but are truncated to a length of 0.) Invariants: data != 0; offset <=
+ // data[0].Length(); limit > 0; offset+limit <= the combined length of all
+ // AddrRanges in the array.
+ data unsafe.Pointer
+ length int
+ offset Addr
+ limit Addr
+}
+
+// AddrRangeSeqOf returns an AddrRangeSeq representing the single AddrRange ar.
+func AddrRangeSeqOf(ar AddrRange) AddrRangeSeq {
+ return AddrRangeSeq{
+ length: 1,
+ offset: ar.Start,
+ limit: ar.Length(),
+ }
+}
+
+// AddrRangeSeqFromSlice returns an AddrRangeSeq representing all AddrRanges in
+// slice.
+//
+// Whether the returned AddrRangeSeq shares memory with slice is unspecified;
+// clients should avoid mutating slices passed to AddrRangeSeqFromSlice.
+//
+// Preconditions: The combined length of all AddrRanges in slice <=
+// math.MaxInt64.
+func AddrRangeSeqFromSlice(slice []AddrRange) AddrRangeSeq {
+ var limit int64
+ for _, ar := range slice {
+ len64 := int64(ar.Length())
+ if len64 < 0 {
+ panic(fmt.Sprintf("Length of AddrRange %v overflows int64", ar))
+ }
+ sum := limit + len64
+ if sum < limit {
+ panic(fmt.Sprintf("Total length of AddrRanges %v overflows int64", slice))
+ }
+ limit = sum
+ }
+ return addrRangeSeqFromSliceLimited(slice, limit)
+}
+
+// Preconditions: The combined length of all AddrRanges in slice <= limit.
+// limit >= 0. If len(slice) != 0, then limit > 0.
+func addrRangeSeqFromSliceLimited(slice []AddrRange, limit int64) AddrRangeSeq {
+ switch len(slice) {
+ case 0:
+ return AddrRangeSeq{}
+ case 1:
+ return AddrRangeSeq{
+ length: 1,
+ offset: slice[0].Start,
+ limit: Addr(limit),
+ }
+ default:
+ return AddrRangeSeq{
+ data: unsafe.Pointer(&slice[0]),
+ length: len(slice),
+ limit: Addr(limit),
+ }
+ }
+}
+
+// IsEmpty returns true if ars.NumRanges() == 0.
+//
+// Note that since AddrRangeSeq may contain AddrRanges with a length of zero,
+// an AddrRange representing 0 bytes (AddrRangeSeq.NumBytes() == 0) is not
+// necessarily empty.
+func (ars AddrRangeSeq) IsEmpty() bool {
+ return ars.length == 0
+}
+
+// NumRanges returns the number of AddrRanges in ars.
+func (ars AddrRangeSeq) NumRanges() int {
+ return ars.length
+}
+
+// NumBytes returns the number of bytes represented by ars.
+func (ars AddrRangeSeq) NumBytes() int64 {
+ return int64(ars.limit)
+}
+
+// Head returns the first AddrRange in ars.
+//
+// Preconditions: !ars.IsEmpty().
+func (ars AddrRangeSeq) Head() AddrRange {
+ if ars.length == 0 {
+ panic("empty AddrRangeSeq")
+ }
+ if ars.length == 1 {
+ return AddrRange{ars.offset, ars.offset + ars.limit}
+ }
+ ar := *(*AddrRange)(ars.data)
+ ar.Start += ars.offset
+ if ar.Length() > ars.limit {
+ ar.End = ar.Start + ars.limit
+ }
+ return ar
+}
+
+// Tail returns an AddrRangeSeq consisting of all AddrRanges in ars after the
+// first.
+//
+// Preconditions: !ars.IsEmpty().
+func (ars AddrRangeSeq) Tail() AddrRangeSeq {
+ if ars.length == 0 {
+ panic("empty AddrRangeSeq")
+ }
+ if ars.length == 1 {
+ return AddrRangeSeq{}
+ }
+ return ars.externalTail()
+}
+
+// Preconditions: ars.length >= 2.
+func (ars AddrRangeSeq) externalTail() AddrRangeSeq {
+ headLen := (*AddrRange)(ars.data).Length() - ars.offset
+ var tailLimit int64
+ if ars.limit > headLen {
+ tailLimit = int64(ars.limit - headLen)
+ }
+ var extSlice []AddrRange
+ extSliceHdr := (*reflect.SliceHeader)(unsafe.Pointer(&extSlice))
+ extSliceHdr.Data = uintptr(ars.data)
+ extSliceHdr.Len = ars.length
+ extSliceHdr.Cap = ars.length
+ return addrRangeSeqFromSliceLimited(extSlice[1:], tailLimit)
+}
+
+// DropFirst returns an AddrRangeSeq equivalent to ars, but with the first n
+// bytes omitted. If n > ars.NumBytes(), DropFirst returns an empty
+// AddrRangeSeq.
+//
+// If !ars.IsEmpty() and ars.Head().Length() == 0, DropFirst will always omit
+// at least ars.Head(), even if n == 0. This guarantees that the basic pattern
+// of:
+//
+// for !ars.IsEmpty() {
+// n, err = doIOWith(ars.Head())
+// if err != nil {
+// return err
+// }
+// ars = ars.DropFirst(n)
+// }
+//
+// works even in the presence of zero-length AddrRanges.
+//
+// Preconditions: n >= 0.
+func (ars AddrRangeSeq) DropFirst(n int) AddrRangeSeq {
+ if n < 0 {
+ panic(fmt.Sprintf("invalid n: %d", n))
+ }
+ return ars.DropFirst64(int64(n))
+}
+
+// DropFirst64 is equivalent to DropFirst but takes an int64.
+func (ars AddrRangeSeq) DropFirst64(n int64) AddrRangeSeq {
+ if n < 0 {
+ panic(fmt.Sprintf("invalid n: %d", n))
+ }
+ if Addr(n) > ars.limit {
+ return AddrRangeSeq{}
+ }
+ // Handle initial empty AddrRange.
+ switch ars.length {
+ case 0:
+ return AddrRangeSeq{}
+ case 1:
+ if ars.limit == 0 {
+ return AddrRangeSeq{}
+ }
+ default:
+ if rawHeadLen := (*AddrRange)(ars.data).Length(); ars.offset == rawHeadLen {
+ ars = ars.externalTail()
+ }
+ }
+ for n != 0 {
+ // Calling ars.Head() here is surprisingly expensive, so inline getting
+ // the head's length.
+ var headLen Addr
+ if ars.length == 1 {
+ headLen = ars.limit
+ } else {
+ headLen = (*AddrRange)(ars.data).Length() - ars.offset
+ }
+ if Addr(n) < headLen {
+ // Dropping ends partway through the head AddrRange.
+ ars.offset += Addr(n)
+ ars.limit -= Addr(n)
+ return ars
+ }
+ n -= int64(headLen)
+ ars = ars.Tail()
+ }
+ return ars
+}
+
+// TakeFirst returns an AddrRangeSeq equivalent to ars, but iterating at most n
+// bytes. TakeFirst never removes AddrRanges from ars; AddrRanges beyond the
+// first n bytes are reduced to a length of zero, but will still be iterated.
+//
+// Preconditions: n >= 0.
+func (ars AddrRangeSeq) TakeFirst(n int) AddrRangeSeq {
+ if n < 0 {
+ panic(fmt.Sprintf("invalid n: %d", n))
+ }
+ return ars.TakeFirst64(int64(n))
+}
+
+// TakeFirst64 is equivalent to TakeFirst but takes an int64.
+func (ars AddrRangeSeq) TakeFirst64(n int64) AddrRangeSeq {
+ if n < 0 {
+ panic(fmt.Sprintf("invalid n: %d", n))
+ }
+ if ars.limit > Addr(n) {
+ ars.limit = Addr(n)
+ }
+ return ars
+}
+
+// String implements fmt.Stringer.String.
+func (ars AddrRangeSeq) String() string {
+ // This is deliberately chosen to be the same as fmt's automatic stringer
+ // for []AddrRange.
+ var buf bytes.Buffer
+ buf.WriteByte('[')
+ var sep string
+ for !ars.IsEmpty() {
+ buf.WriteString(sep)
+ sep = " "
+ buf.WriteString(ars.Head().String())
+ ars = ars.Tail()
+ }
+ buf.WriteByte(']')
+ return buf.String()
+}
diff --git a/pkg/usermem/bytes_io.go b/pkg/usermem/bytes_io.go
new file mode 100644
index 000000000..e177d30eb
--- /dev/null
+++ b/pkg/usermem/bytes_io.go
@@ -0,0 +1,141 @@
+// Copyright 2018 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package usermem
+
+import (
+ "gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/safemem"
+ "gvisor.dev/gvisor/pkg/syserror"
+)
+
+const maxInt = int(^uint(0) >> 1)
+
+// BytesIO implements IO using a byte slice. Addresses are interpreted as
+// offsets into the slice. Reads and writes beyond the end of the slice return
+// EFAULT.
+type BytesIO struct {
+ Bytes []byte
+}
+
+// CopyOut implements IO.CopyOut.
+func (b *BytesIO) CopyOut(ctx context.Context, addr Addr, src []byte, opts IOOpts) (int, error) {
+ rngN, rngErr := b.rangeCheck(addr, len(src))
+ if rngN == 0 {
+ return 0, rngErr
+ }
+ return copy(b.Bytes[int(addr):], src[:rngN]), rngErr
+}
+
+// CopyIn implements IO.CopyIn.
+func (b *BytesIO) CopyIn(ctx context.Context, addr Addr, dst []byte, opts IOOpts) (int, error) {
+ rngN, rngErr := b.rangeCheck(addr, len(dst))
+ if rngN == 0 {
+ return 0, rngErr
+ }
+ return copy(dst[:rngN], b.Bytes[int(addr):]), rngErr
+}
+
+// ZeroOut implements IO.ZeroOut.
+func (b *BytesIO) ZeroOut(ctx context.Context, addr Addr, toZero int64, opts IOOpts) (int64, error) {
+ if toZero > int64(maxInt) {
+ return 0, syserror.EINVAL
+ }
+ rngN, rngErr := b.rangeCheck(addr, int(toZero))
+ if rngN == 0 {
+ return 0, rngErr
+ }
+ zeroSlice := b.Bytes[int(addr) : int(addr)+rngN]
+ for i := range zeroSlice {
+ zeroSlice[i] = 0
+ }
+ return int64(rngN), rngErr
+}
+
+// CopyOutFrom implements IO.CopyOutFrom.
+func (b *BytesIO) CopyOutFrom(ctx context.Context, ars AddrRangeSeq, src safemem.Reader, opts IOOpts) (int64, error) {
+ dsts, rngErr := b.blocksFromAddrRanges(ars)
+ n, err := src.ReadToBlocks(dsts)
+ if err != nil {
+ return int64(n), err
+ }
+ return int64(n), rngErr
+}
+
+// CopyInTo implements IO.CopyInTo.
+func (b *BytesIO) CopyInTo(ctx context.Context, ars AddrRangeSeq, dst safemem.Writer, opts IOOpts) (int64, error) {
+ srcs, rngErr := b.blocksFromAddrRanges(ars)
+ n, err := dst.WriteFromBlocks(srcs)
+ if err != nil {
+ return int64(n), err
+ }
+ return int64(n), rngErr
+}
+
+func (b *BytesIO) rangeCheck(addr Addr, length int) (int, error) {
+ if length == 0 {
+ return 0, nil
+ }
+ if length < 0 {
+ return 0, syserror.EINVAL
+ }
+ max := Addr(len(b.Bytes))
+ if addr >= max {
+ return 0, syserror.EFAULT
+ }
+ end, ok := addr.AddLength(uint64(length))
+ if !ok || end > max {
+ return int(max - addr), syserror.EFAULT
+ }
+ return length, nil
+}
+
+func (b *BytesIO) blocksFromAddrRanges(ars AddrRangeSeq) (safemem.BlockSeq, error) {
+ switch ars.NumRanges() {
+ case 0:
+ return safemem.BlockSeq{}, nil
+ case 1:
+ block, err := b.blockFromAddrRange(ars.Head())
+ return safemem.BlockSeqOf(block), err
+ default:
+ blocks := make([]safemem.Block, 0, ars.NumRanges())
+ for !ars.IsEmpty() {
+ block, err := b.blockFromAddrRange(ars.Head())
+ if block.Len() != 0 {
+ blocks = append(blocks, block)
+ }
+ if err != nil {
+ return safemem.BlockSeqFromSlice(blocks), err
+ }
+ ars = ars.Tail()
+ }
+ return safemem.BlockSeqFromSlice(blocks), nil
+ }
+}
+
+func (b *BytesIO) blockFromAddrRange(ar AddrRange) (safemem.Block, error) {
+ n, err := b.rangeCheck(ar.Start, int(ar.Length()))
+ if n == 0 {
+ return safemem.Block{}, err
+ }
+ return safemem.BlockFromSafeSlice(b.Bytes[int(ar.Start) : int(ar.Start)+n]), err
+}
+
+// BytesIOSequence returns an IOSequence representing the given byte slice.
+func BytesIOSequence(buf []byte) IOSequence {
+ return IOSequence{
+ IO: &BytesIO{buf},
+ Addrs: AddrRangeSeqOf(AddrRange{0, Addr(len(buf))}),
+ }
+}
diff --git a/pkg/usermem/bytes_io_unsafe.go b/pkg/usermem/bytes_io_unsafe.go
new file mode 100644
index 000000000..20de5037d
--- /dev/null
+++ b/pkg/usermem/bytes_io_unsafe.go
@@ -0,0 +1,47 @@
+// Copyright 2018 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package usermem
+
+import (
+ "sync/atomic"
+ "unsafe"
+
+ "gvisor.dev/gvisor/pkg/atomicbitops"
+ "gvisor.dev/gvisor/pkg/context"
+)
+
+// SwapUint32 implements IO.SwapUint32.
+func (b *BytesIO) SwapUint32(ctx context.Context, addr Addr, new uint32, opts IOOpts) (uint32, error) {
+ if _, rngErr := b.rangeCheck(addr, 4); rngErr != nil {
+ return 0, rngErr
+ }
+ return atomic.SwapUint32((*uint32)(unsafe.Pointer(&b.Bytes[int(addr)])), new), nil
+}
+
+// CompareAndSwapUint32 implements IO.CompareAndSwapUint32.
+func (b *BytesIO) CompareAndSwapUint32(ctx context.Context, addr Addr, old, new uint32, opts IOOpts) (uint32, error) {
+ if _, rngErr := b.rangeCheck(addr, 4); rngErr != nil {
+ return 0, rngErr
+ }
+ return atomicbitops.CompareAndSwapUint32((*uint32)(unsafe.Pointer(&b.Bytes[int(addr)])), old, new), nil
+}
+
+// LoadUint32 implements IO.LoadUint32.
+func (b *BytesIO) LoadUint32(ctx context.Context, addr Addr, opts IOOpts) (uint32, error) {
+ if _, err := b.rangeCheck(addr, 4); err != nil {
+ return 0, err
+ }
+ return atomic.LoadUint32((*uint32)(unsafe.Pointer(&b.Bytes[int(addr)]))), nil
+}
diff --git a/pkg/usermem/usermem.go b/pkg/usermem/usermem.go
new file mode 100644
index 000000000..cd6a0ea6b
--- /dev/null
+++ b/pkg/usermem/usermem.go
@@ -0,0 +1,595 @@
+// Copyright 2018 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package usermem governs access to user memory.
+package usermem
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "strconv"
+
+ "gvisor.dev/gvisor/pkg/binary"
+ "gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/gohacks"
+ "gvisor.dev/gvisor/pkg/safemem"
+ "gvisor.dev/gvisor/pkg/syserror"
+)
+
+// IO provides access to the contents of a virtual memory space.
+type IO interface {
+ // CopyOut copies len(src) bytes from src to the memory mapped at addr. It
+ // returns the number of bytes copied. If the number of bytes copied is <
+ // len(src), it returns a non-nil error explaining why.
+ //
+ // Preconditions: The caller must not hold mm.MemoryManager.mappingMu or
+ // any following locks in the lock order.
+ //
+ // Postconditions: CopyOut does not retain src.
+ CopyOut(ctx context.Context, addr Addr, src []byte, opts IOOpts) (int, error)
+
+ // CopyIn copies len(dst) bytes from the memory mapped at addr to dst.
+ // It returns the number of bytes copied. If the number of bytes copied is
+ // < len(dst), it returns a non-nil error explaining why.
+ //
+ // Preconditions: The caller must not hold mm.MemoryManager.mappingMu or
+ // any following locks in the lock order.
+ //
+ // Postconditions: CopyIn does not retain dst.
+ CopyIn(ctx context.Context, addr Addr, dst []byte, opts IOOpts) (int, error)
+
+ // ZeroOut sets toZero bytes to 0, starting at addr. It returns the number
+ // of bytes zeroed. If the number of bytes zeroed is < toZero, it returns a
+ // non-nil error explaining why.
+ //
+ // Preconditions: The caller must not hold mm.MemoryManager.mappingMu or
+ // any following locks in the lock order. toZero >= 0.
+ ZeroOut(ctx context.Context, addr Addr, toZero int64, opts IOOpts) (int64, error)
+
+ // CopyOutFrom copies ars.NumBytes() bytes from src to the memory mapped at
+ // ars. It returns the number of bytes copied, which may be less than the
+ // number of bytes read from src if copying fails. CopyOutFrom may return a
+ // partial copy without an error iff src.ReadToBlocks returns a partial
+ // read without an error.
+ //
+ // CopyOutFrom calls src.ReadToBlocks at most once.
+ //
+ // Preconditions: The caller must not hold mm.MemoryManager.mappingMu or
+ // any following locks in the lock order. src.ReadToBlocks must not block
+ // on mm.MemoryManager.activeMu or any preceding locks in the lock order.
+ CopyOutFrom(ctx context.Context, ars AddrRangeSeq, src safemem.Reader, opts IOOpts) (int64, error)
+
+ // CopyInTo copies ars.NumBytes() bytes from the memory mapped at ars to
+ // dst. It returns the number of bytes copied. CopyInTo may return a
+ // partial copy without an error iff dst.WriteFromBlocks returns a partial
+ // write without an error.
+ //
+ // CopyInTo calls dst.WriteFromBlocks at most once.
+ //
+ // Preconditions: The caller must not hold mm.MemoryManager.mappingMu or
+ // any following locks in the lock order. dst.WriteFromBlocks must not
+ // block on mm.MemoryManager.activeMu or any preceding locks in the lock
+ // order.
+ CopyInTo(ctx context.Context, ars AddrRangeSeq, dst safemem.Writer, opts IOOpts) (int64, error)
+
+ // TODO(jamieliu): The requirement that CopyOutFrom/CopyInTo call src/dst
+ // at most once, which is unnecessary in most cases, forces implementations
+ // to gather safemem.Blocks into a single slice to pass to src/dst. Add
+ // CopyOutFromIter/CopyInToIter, which relaxes this restriction, to avoid
+ // this allocation.
+
+ // SwapUint32 atomically sets the uint32 value at addr to new and
+ // returns the previous value.
+ //
+ // Preconditions: The caller must not hold mm.MemoryManager.mappingMu or
+ // any following locks in the lock order. addr must be aligned to a 4-byte
+ // boundary.
+ SwapUint32(ctx context.Context, addr Addr, new uint32, opts IOOpts) (uint32, error)
+
+ // CompareAndSwapUint32 atomically compares the uint32 value at addr to
+ // old; if they are equal, the value in memory is replaced by new. In
+ // either case, the previous value stored in memory is returned.
+ //
+ // Preconditions: The caller must not hold mm.MemoryManager.mappingMu or
+ // any following locks in the lock order. addr must be aligned to a 4-byte
+ // boundary.
+ CompareAndSwapUint32(ctx context.Context, addr Addr, old, new uint32, opts IOOpts) (uint32, error)
+
+ // LoadUint32 atomically loads the uint32 value at addr and returns it.
+ //
+ // Preconditions: The caller must not hold mm.MemoryManager.mappingMu or
+ // any following locks in the lock order. addr must be aligned to a 4-byte
+ // boundary.
+ LoadUint32(ctx context.Context, addr Addr, opts IOOpts) (uint32, error)
+}
+
+// IOOpts contains options applicable to all IO methods.
+type IOOpts struct {
+ // If IgnorePermissions is true, application-defined memory protections set
+ // by mmap(2) or mprotect(2) will be ignored. (Memory protections required
+ // by the target of the mapping are never ignored.)
+ IgnorePermissions bool
+
+ // If AddressSpaceActive is true, the IO implementation may assume that it
+ // has an active AddressSpace and can therefore use AddressSpace copying
+ // without performing activation. See mm/io.go for details.
+ AddressSpaceActive bool
+}
+
+// IOReadWriter is an io.ReadWriter that reads from / writes to addresses
+// starting at addr in IO. The preconditions that apply to IO.CopyIn and
+// IO.CopyOut also apply to IOReadWriter.Read and IOReadWriter.Write
+// respectively.
+type IOReadWriter struct {
+ Ctx context.Context
+ IO IO
+ Addr Addr
+ Opts IOOpts
+}
+
+// Read implements io.Reader.Read.
+//
+// Note that an address space does not have an "end of file", so Read can only
+// return io.EOF if IO.CopyIn returns io.EOF. Attempts to read unmapped or
+// unreadable memory, or beyond the end of the address space, should return
+// EFAULT.
+func (rw *IOReadWriter) Read(dst []byte) (int, error) {
+ n, err := rw.IO.CopyIn(rw.Ctx, rw.Addr, dst, rw.Opts)
+ end, ok := rw.Addr.AddLength(uint64(n))
+ if ok {
+ rw.Addr = end
+ } else {
+ // Disallow wraparound.
+ rw.Addr = ^Addr(0)
+ if err != nil {
+ err = syserror.EFAULT
+ }
+ }
+ return n, err
+}
+
+// Writer implements io.Writer.Write.
+func (rw *IOReadWriter) Write(src []byte) (int, error) {
+ n, err := rw.IO.CopyOut(rw.Ctx, rw.Addr, src, rw.Opts)
+ end, ok := rw.Addr.AddLength(uint64(n))
+ if ok {
+ rw.Addr = end
+ } else {
+ // Disallow wraparound.
+ rw.Addr = ^Addr(0)
+ if err != nil {
+ err = syserror.EFAULT
+ }
+ }
+ return n, err
+}
+
+// CopyObjectOut copies a fixed-size value or slice of fixed-size values from
+// src to the memory mapped at addr in uio. It returns the number of bytes
+// copied.
+//
+// CopyObjectOut must use reflection to encode src; performance-sensitive
+// clients should do encoding manually and use uio.CopyOut directly.
+//
+// Preconditions: As for IO.CopyOut.
+func CopyObjectOut(ctx context.Context, uio IO, addr Addr, src interface{}, opts IOOpts) (int, error) {
+ w := &IOReadWriter{
+ Ctx: ctx,
+ IO: uio,
+ Addr: addr,
+ Opts: opts,
+ }
+ // Allocate a byte slice the size of the object being marshaled. This
+ // adds an extra reflection call, but avoids needing to grow the slice
+ // during encoding, which can result in many heap-allocated slices.
+ b := make([]byte, 0, binary.Size(src))
+ return w.Write(binary.Marshal(b, ByteOrder, src))
+}
+
+// CopyObjectIn copies a fixed-size value or slice of fixed-size values from
+// the memory mapped at addr in uio to dst. It returns the number of bytes
+// copied.
+//
+// CopyObjectIn must use reflection to decode dst; performance-sensitive
+// clients should use uio.CopyIn directly and do decoding manually.
+//
+// Preconditions: As for IO.CopyIn.
+func CopyObjectIn(ctx context.Context, uio IO, addr Addr, dst interface{}, opts IOOpts) (int, error) {
+ r := &IOReadWriter{
+ Ctx: ctx,
+ IO: uio,
+ Addr: addr,
+ Opts: opts,
+ }
+ buf := make([]byte, binary.Size(dst))
+ if _, err := io.ReadFull(r, buf); err != nil {
+ return 0, err
+ }
+ binary.Unmarshal(buf, ByteOrder, dst)
+ return int(r.Addr - addr), nil
+}
+
+// CopyStringIn tuning parameters, defined outside that function for tests.
+const (
+ copyStringIncrement = 64
+ copyStringMaxInitBufLen = 256
+)
+
+// CopyStringIn copies a NUL-terminated string of unknown length from the
+// memory mapped at addr in uio and returns it as a string (not including the
+// trailing NUL). If the length of the string, including the terminating NUL,
+// would exceed maxlen, CopyStringIn returns the string truncated to maxlen and
+// ENAMETOOLONG.
+//
+// Preconditions: As for IO.CopyFromUser. maxlen >= 0.
+func CopyStringIn(ctx context.Context, uio IO, addr Addr, maxlen int, opts IOOpts) (string, error) {
+ initLen := maxlen
+ if initLen > copyStringMaxInitBufLen {
+ initLen = copyStringMaxInitBufLen
+ }
+ buf := make([]byte, initLen)
+ var done int
+ for done < maxlen {
+ // Read up to copyStringIncrement bytes at a time.
+ readlen := copyStringIncrement
+ if readlen > maxlen-done {
+ readlen = maxlen - done
+ }
+ end, ok := addr.AddLength(uint64(readlen))
+ if !ok {
+ return gohacks.StringFromImmutableBytes(buf[:done]), syserror.EFAULT
+ }
+ // Shorten the read to avoid crossing page boundaries, since faulting
+ // in a page unnecessarily is expensive. This also ensures that partial
+ // copies up to the end of application-mappable memory succeed.
+ if addr.RoundDown() != end.RoundDown() {
+ end = end.RoundDown()
+ readlen = int(end - addr)
+ }
+ // Ensure that our buffer is large enough to accommodate the read.
+ if done+readlen > len(buf) {
+ newBufLen := len(buf) * 2
+ if newBufLen > maxlen {
+ newBufLen = maxlen
+ }
+ buf = append(buf, make([]byte, newBufLen-len(buf))...)
+ }
+ n, err := uio.CopyIn(ctx, addr, buf[done:done+readlen], opts)
+ // Look for the terminating zero byte, which may have occurred before
+ // hitting err.
+ if i := bytes.IndexByte(buf[done:done+n], byte(0)); i >= 0 {
+ return gohacks.StringFromImmutableBytes(buf[:done+i]), nil
+ }
+
+ done += n
+ if err != nil {
+ return gohacks.StringFromImmutableBytes(buf[:done]), err
+ }
+ addr = end
+ }
+ return gohacks.StringFromImmutableBytes(buf), syserror.ENAMETOOLONG
+}
+
+// CopyOutVec copies bytes from src to the memory mapped at ars in uio. The
+// maximum number of bytes copied is ars.NumBytes() or len(src), whichever is
+// less. CopyOutVec returns the number of bytes copied; if this is less than
+// the maximum, it returns a non-nil error explaining why.
+//
+// Preconditions: As for IO.CopyOut.
+func CopyOutVec(ctx context.Context, uio IO, ars AddrRangeSeq, src []byte, opts IOOpts) (int, error) {
+ var done int
+ for !ars.IsEmpty() && done < len(src) {
+ ar := ars.Head()
+ cplen := len(src) - done
+ if Addr(cplen) >= ar.Length() {
+ cplen = int(ar.Length())
+ }
+ n, err := uio.CopyOut(ctx, ar.Start, src[done:done+cplen], opts)
+ done += n
+ if err != nil {
+ return done, err
+ }
+ ars = ars.DropFirst(n)
+ }
+ return done, nil
+}
+
+// CopyInVec copies bytes from the memory mapped at ars in uio to dst. The
+// maximum number of bytes copied is ars.NumBytes() or len(dst), whichever is
+// less. CopyInVec returns the number of bytes copied; if this is less than the
+// maximum, it returns a non-nil error explaining why.
+//
+// Preconditions: As for IO.CopyIn.
+func CopyInVec(ctx context.Context, uio IO, ars AddrRangeSeq, dst []byte, opts IOOpts) (int, error) {
+ var done int
+ for !ars.IsEmpty() && done < len(dst) {
+ ar := ars.Head()
+ cplen := len(dst) - done
+ if Addr(cplen) >= ar.Length() {
+ cplen = int(ar.Length())
+ }
+ n, err := uio.CopyIn(ctx, ar.Start, dst[done:done+cplen], opts)
+ done += n
+ if err != nil {
+ return done, err
+ }
+ ars = ars.DropFirst(n)
+ }
+ return done, nil
+}
+
+// ZeroOutVec writes zeroes to the memory mapped at ars in uio. The maximum
+// number of bytes written is ars.NumBytes() or toZero, whichever is less.
+// ZeroOutVec returns the number of bytes written; if this is less than the
+// maximum, it returns a non-nil error explaining why.
+//
+// Preconditions: As for IO.ZeroOut.
+func ZeroOutVec(ctx context.Context, uio IO, ars AddrRangeSeq, toZero int64, opts IOOpts) (int64, error) {
+ var done int64
+ for !ars.IsEmpty() && done < toZero {
+ ar := ars.Head()
+ cplen := toZero - done
+ if Addr(cplen) >= ar.Length() {
+ cplen = int64(ar.Length())
+ }
+ n, err := uio.ZeroOut(ctx, ar.Start, cplen, opts)
+ done += n
+ if err != nil {
+ return done, err
+ }
+ ars = ars.DropFirst64(n)
+ }
+ return done, nil
+}
+
+func isASCIIWhitespace(b byte) bool {
+ // Compare Linux include/linux/ctype.h, lib/ctype.c.
+ // 9 => horizontal tab '\t'
+ // 10 => line feed '\n'
+ // 11 => vertical tab '\v'
+ // 12 => form feed '\c'
+ // 13 => carriage return '\r'
+ return b == ' ' || (b >= 9 && b <= 13)
+}
+
+// CopyInt32StringsInVec copies up to len(dsts) whitespace-separated decimal
+// strings from the memory mapped at ars in uio and converts them to int32
+// values in dsts. It returns the number of bytes read.
+//
+// CopyInt32StringsInVec shares the following properties with Linux's
+// kernel/sysctl.c:proc_dointvec(write=1):
+//
+// - If any read value overflows the range of int32, or any invalid characters
+// are encountered during the read, CopyInt32StringsInVec returns EINVAL.
+//
+// - If, upon reaching the end of ars, fewer than len(dsts) values have been
+// read, CopyInt32StringsInVec returns no error if at least 1 value was read
+// and EINVAL otherwise.
+//
+// - Trailing whitespace after the last successfully read value is counted in
+// the number of bytes read.
+//
+// Unlike proc_dointvec():
+//
+// - CopyInt32StringsInVec does not implicitly limit ars.NumBytes() to
+// PageSize-1; callers that require this must do so explicitly.
+//
+// - CopyInt32StringsInVec returns EINVAL if ars.NumBytes() == 0.
+//
+// Preconditions: As for CopyInVec.
+func CopyInt32StringsInVec(ctx context.Context, uio IO, ars AddrRangeSeq, dsts []int32, opts IOOpts) (int64, error) {
+ if len(dsts) == 0 {
+ return 0, nil
+ }
+
+ buf := make([]byte, ars.NumBytes())
+ n, cperr := CopyInVec(ctx, uio, ars, buf, opts)
+ buf = buf[:n]
+
+ var i, j int
+ for ; j < len(dsts); j++ {
+ // Skip leading whitespace.
+ for i < len(buf) && isASCIIWhitespace(buf[i]) {
+ i++
+ }
+ if i == len(buf) {
+ break
+ }
+
+ // Find the end of the value to be parsed (next whitespace or end of string).
+ nextI := i + 1
+ for nextI < len(buf) && !isASCIIWhitespace(buf[nextI]) {
+ nextI++
+ }
+
+ // Parse a single value.
+ val, err := strconv.ParseInt(string(buf[i:nextI]), 10, 32)
+ if err != nil {
+ return int64(i), syserror.EINVAL
+ }
+ dsts[j] = int32(val)
+
+ i = nextI
+ }
+
+ // Skip trailing whitespace.
+ for i < len(buf) && isASCIIWhitespace(buf[i]) {
+ i++
+ }
+
+ if cperr != nil {
+ return int64(i), cperr
+ }
+ if j == 0 {
+ return int64(i), syserror.EINVAL
+ }
+ return int64(i), nil
+}
+
+// CopyInt32StringInVec is equivalent to CopyInt32StringsInVec, but copies at
+// most one int32.
+func CopyInt32StringInVec(ctx context.Context, uio IO, ars AddrRangeSeq, dst *int32, opts IOOpts) (int64, error) {
+ dsts := [1]int32{*dst}
+ n, err := CopyInt32StringsInVec(ctx, uio, ars, dsts[:], opts)
+ *dst = dsts[0]
+ return n, err
+}
+
+// IOSequence holds arguments to IO methods.
+type IOSequence struct {
+ IO IO
+ Addrs AddrRangeSeq
+ Opts IOOpts
+}
+
+// NumBytes returns s.Addrs.NumBytes().
+//
+// Note that NumBytes() may return 0 even if !s.Addrs.IsEmpty(), since
+// s.Addrs may contain a non-zero number of zero-length AddrRanges.
+// Many clients of
+// IOSequence currently do something like:
+//
+// if ioseq.NumBytes() == 0 {
+// return 0, nil
+// }
+// if f.availableBytes == 0 {
+// return 0, syserror.ErrWouldBlock
+// }
+// return ioseq.CopyOutFrom(..., reader)
+//
+// In such cases, using s.Addrs.IsEmpty() will cause them to have the wrong
+// behavior for zero-length I/O. However, using s.NumBytes() == 0 instead means
+// that we will return success for zero-length I/O in cases where Linux would
+// return EFAULT due to a failed access_ok() check, so in the long term we
+// should move checks for ErrWouldBlock etc. into the body of
+// reader.ReadToBlocks and use s.Addrs.IsEmpty() instead.
+func (s IOSequence) NumBytes() int64 {
+ return s.Addrs.NumBytes()
+}
+
+// DropFirst returns a copy of s with s.Addrs.DropFirst(n).
+//
+// Preconditions: As for AddrRangeSeq.DropFirst.
+func (s IOSequence) DropFirst(n int) IOSequence {
+ return IOSequence{s.IO, s.Addrs.DropFirst(n), s.Opts}
+}
+
+// DropFirst64 returns a copy of s with s.Addrs.DropFirst64(n).
+//
+// Preconditions: As for AddrRangeSeq.DropFirst64.
+func (s IOSequence) DropFirst64(n int64) IOSequence {
+ return IOSequence{s.IO, s.Addrs.DropFirst64(n), s.Opts}
+}
+
+// TakeFirst returns a copy of s with s.Addrs.TakeFirst(n).
+//
+// Preconditions: As for AddrRangeSeq.TakeFirst.
+func (s IOSequence) TakeFirst(n int) IOSequence {
+ return IOSequence{s.IO, s.Addrs.TakeFirst(n), s.Opts}
+}
+
+// TakeFirst64 returns a copy of s with s.Addrs.TakeFirst64(n).
+//
+// Preconditions: As for AddrRangeSeq.TakeFirst64.
+func (s IOSequence) TakeFirst64(n int64) IOSequence {
+ return IOSequence{s.IO, s.Addrs.TakeFirst64(n), s.Opts}
+}
+
+// CopyOut invokes CopyOutVec over s.Addrs.
+//
+// As with CopyOutVec, if s.NumBytes() < len(src), the copy will be truncated
+// to s.NumBytes(), and a nil error will be returned.
+//
+// Preconditions: As for CopyOutVec.
+func (s IOSequence) CopyOut(ctx context.Context, src []byte) (int, error) {
+ return CopyOutVec(ctx, s.IO, s.Addrs, src, s.Opts)
+}
+
+// CopyIn invokes CopyInVec over s.Addrs.
+//
+// As with CopyInVec, if s.NumBytes() < len(dst), the copy will be truncated to
+// s.NumBytes(), and a nil error will be returned.
+//
+// Preconditions: As for CopyInVec.
+func (s IOSequence) CopyIn(ctx context.Context, dst []byte) (int, error) {
+ return CopyInVec(ctx, s.IO, s.Addrs, dst, s.Opts)
+}
+
+// ZeroOut invokes ZeroOutVec over s.Addrs.
+//
+// As with ZeroOutVec, if s.NumBytes() < toZero, the write will be truncated
+// to s.NumBytes(), and a nil error will be returned.
+//
+// Preconditions: As for ZeroOutVec.
+func (s IOSequence) ZeroOut(ctx context.Context, toZero int64) (int64, error) {
+ return ZeroOutVec(ctx, s.IO, s.Addrs, toZero, s.Opts)
+}
+
+// CopyOutFrom invokes s.CopyOutFrom over s.Addrs.
+//
+// Preconditions: As for IO.CopyOutFrom.
+func (s IOSequence) CopyOutFrom(ctx context.Context, src safemem.Reader) (int64, error) {
+ return s.IO.CopyOutFrom(ctx, s.Addrs, src, s.Opts)
+}
+
+// CopyInTo invokes s.CopyInTo over s.Addrs.
+//
+// Preconditions: As for IO.CopyInTo.
+func (s IOSequence) CopyInTo(ctx context.Context, dst safemem.Writer) (int64, error) {
+ return s.IO.CopyInTo(ctx, s.Addrs, dst, s.Opts)
+}
+
+// Reader returns an io.Reader that reads from s. Reads beyond the end of s
+// return io.EOF. The preconditions that apply to s.CopyIn also apply to the
+// returned io.Reader.Read.
+func (s IOSequence) Reader(ctx context.Context) io.Reader {
+ return &ioSequenceReadWriter{ctx, s}
+}
+
+// Writer returns an io.Writer that writes to s. Writes beyond the end of s
+// return ErrEndOfIOSequence. The preconditions that apply to s.CopyOut also
+// apply to the returned io.Writer.Write.
+func (s IOSequence) Writer(ctx context.Context) io.Writer {
+ return &ioSequenceReadWriter{ctx, s}
+}
+
+// ErrEndOfIOSequence is returned by IOSequence.Writer().Write() when
+// attempting to write beyond the end of the IOSequence.
+var ErrEndOfIOSequence = errors.New("write beyond end of IOSequence")
+
+type ioSequenceReadWriter struct {
+ ctx context.Context
+ s IOSequence
+}
+
+// Read implements io.Reader.Read.
+func (rw *ioSequenceReadWriter) Read(dst []byte) (int, error) {
+ n, err := rw.s.CopyIn(rw.ctx, dst)
+ rw.s = rw.s.DropFirst(n)
+ if err == nil && rw.s.NumBytes() == 0 {
+ err = io.EOF
+ }
+ return n, err
+}
+
+// Write implements io.Writer.Write.
+func (rw *ioSequenceReadWriter) Write(src []byte) (int, error) {
+ n, err := rw.s.CopyOut(rw.ctx, src)
+ rw.s = rw.s.DropFirst(n)
+ if err == nil && n < len(src) {
+ err = ErrEndOfIOSequence
+ }
+ return n, err
+}
diff --git a/pkg/usermem/usermem_arm64.go b/pkg/usermem/usermem_arm64.go
new file mode 100644
index 000000000..fdfc30a66
--- /dev/null
+++ b/pkg/usermem/usermem_arm64.go
@@ -0,0 +1,53 @@
+// Copyright 2019 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build arm64
+
+package usermem
+
+import (
+ "encoding/binary"
+ "syscall"
+)
+
+const (
+ // PageSize is the system page size.
+ // arm64 support 4K/16K/64K page size,
+ // which can be get by syscall.Getpagesize().
+ // Currently, only 4K page size is supported.
+ PageSize = 1 << PageShift
+
+ // HugePageSize is the system huge page size.
+ HugePageSize = 1 << HugePageShift
+
+ // PageShift is the binary log of the system page size.
+ PageShift = 12
+
+ // HugePageShift is the binary log of the system huge page size.
+ // Should be calculated by "PageShift + (PageShift - 3)"
+ // when multiple page size support is ready.
+ HugePageShift = 21
+)
+
+var (
+ // ByteOrder is the native byte order (little endian).
+ ByteOrder = binary.LittleEndian
+)
+
+func init() {
+ // Make sure the page size is 4K on arm64 platform.
+ if size := syscall.Getpagesize(); size != PageSize {
+ panic("Only 4K page size is supported on arm64!")
+ }
+}
diff --git a/pkg/usermem/usermem_test.go b/pkg/usermem/usermem_test.go
new file mode 100644
index 000000000..bf3c5df2b
--- /dev/null
+++ b/pkg/usermem/usermem_test.go
@@ -0,0 +1,424 @@
+// Copyright 2018 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package usermem
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "reflect"
+ "strings"
+ "testing"
+
+ "gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/safemem"
+ "gvisor.dev/gvisor/pkg/syserror"
+)
+
+// newContext returns a context.Context that we can use in these tests (we
+// can't use contexttest because it depends on usermem).
+func newContext() context.Context {
+ return context.Background()
+}
+
+func newBytesIOString(s string) *BytesIO {
+ return &BytesIO{[]byte(s)}
+}
+
+func TestBytesIOCopyOutSuccess(t *testing.T) {
+ b := newBytesIOString("ABCDE")
+ n, err := b.CopyOut(newContext(), 1, []byte("foo"), IOOpts{})
+ if wantN := 3; n != wantN || err != nil {
+ t.Errorf("CopyOut: got (%v, %v), wanted (%v, nil)", n, err, wantN)
+ }
+ if got, want := b.Bytes, []byte("AfooE"); !bytes.Equal(got, want) {
+ t.Errorf("Bytes: got %q, wanted %q", got, want)
+ }
+}
+
+func TestBytesIOCopyOutFailure(t *testing.T) {
+ b := newBytesIOString("ABC")
+ n, err := b.CopyOut(newContext(), 1, []byte("foo"), IOOpts{})
+ if wantN, wantErr := 2, syserror.EFAULT; n != wantN || err != wantErr {
+ t.Errorf("CopyOut: got (%v, %v), wanted (%v, %v)", n, err, wantN, wantErr)
+ }
+ if got, want := b.Bytes, []byte("Afo"); !bytes.Equal(got, want) {
+ t.Errorf("Bytes: got %q, wanted %q", got, want)
+ }
+}
+
+func TestBytesIOCopyInSuccess(t *testing.T) {
+ b := newBytesIOString("AfooE")
+ var dst [3]byte
+ n, err := b.CopyIn(newContext(), 1, dst[:], IOOpts{})
+ if wantN := 3; n != wantN || err != nil {
+ t.Errorf("CopyIn: got (%v, %v), wanted (%v, nil)", n, err, wantN)
+ }
+ if got, want := dst[:], []byte("foo"); !bytes.Equal(got, want) {
+ t.Errorf("dst: got %q, wanted %q", got, want)
+ }
+}
+
+func TestBytesIOCopyInFailure(t *testing.T) {
+ b := newBytesIOString("Afo")
+ var dst [3]byte
+ n, err := b.CopyIn(newContext(), 1, dst[:], IOOpts{})
+ if wantN, wantErr := 2, syserror.EFAULT; n != wantN || err != wantErr {
+ t.Errorf("CopyIn: got (%v, %v), wanted (%v, %v)", n, err, wantN, wantErr)
+ }
+ if got, want := dst[:], []byte("fo\x00"); !bytes.Equal(got, want) {
+ t.Errorf("dst: got %q, wanted %q", got, want)
+ }
+}
+
+func TestBytesIOZeroOutSuccess(t *testing.T) {
+ b := newBytesIOString("ABCD")
+ n, err := b.ZeroOut(newContext(), 1, 2, IOOpts{})
+ if wantN := int64(2); n != wantN || err != nil {
+ t.Errorf("ZeroOut: got (%v, %v), wanted (%v, nil)", n, err, wantN)
+ }
+ if got, want := b.Bytes, []byte("A\x00\x00D"); !bytes.Equal(got, want) {
+ t.Errorf("Bytes: got %q, wanted %q", got, want)
+ }
+}
+
+func TestBytesIOZeroOutFailure(t *testing.T) {
+ b := newBytesIOString("ABC")
+ n, err := b.ZeroOut(newContext(), 1, 3, IOOpts{})
+ if wantN, wantErr := int64(2), syserror.EFAULT; n != wantN || err != wantErr {
+ t.Errorf("ZeroOut: got (%v, %v), wanted (%v, %v)", n, err, wantN, wantErr)
+ }
+ if got, want := b.Bytes, []byte("A\x00\x00"); !bytes.Equal(got, want) {
+ t.Errorf("Bytes: got %q, wanted %q", got, want)
+ }
+}
+
+func TestBytesIOCopyOutFromSuccess(t *testing.T) {
+ b := newBytesIOString("ABCDEFGH")
+ n, err := b.CopyOutFrom(newContext(), AddrRangeSeqFromSlice([]AddrRange{
+ {Start: 4, End: 7},
+ {Start: 1, End: 4},
+ }), safemem.FromIOReader{bytes.NewBufferString("barfoo")}, IOOpts{})
+ if wantN := int64(6); n != wantN || err != nil {
+ t.Errorf("CopyOutFrom: got (%v, %v), wanted (%v, nil)", n, err, wantN)
+ }
+ if got, want := b.Bytes, []byte("AfoobarH"); !bytes.Equal(got, want) {
+ t.Errorf("Bytes: got %q, wanted %q", got, want)
+ }
+}
+
+func TestBytesIOCopyOutFromFailure(t *testing.T) {
+ b := newBytesIOString("ABCDE")
+ n, err := b.CopyOutFrom(newContext(), AddrRangeSeqFromSlice([]AddrRange{
+ {Start: 1, End: 4},
+ {Start: 4, End: 7},
+ }), safemem.FromIOReader{bytes.NewBufferString("foobar")}, IOOpts{})
+ if wantN, wantErr := int64(4), syserror.EFAULT; n != wantN || err != wantErr {
+ t.Errorf("CopyOutFrom: got (%v, %v), wanted (%v, %v)", n, err, wantN, wantErr)
+ }
+ if got, want := b.Bytes, []byte("Afoob"); !bytes.Equal(got, want) {
+ t.Errorf("Bytes: got %q, wanted %q", got, want)
+ }
+}
+
+func TestBytesIOCopyInToSuccess(t *testing.T) {
+ b := newBytesIOString("AfoobarH")
+ var dst bytes.Buffer
+ n, err := b.CopyInTo(newContext(), AddrRangeSeqFromSlice([]AddrRange{
+ {Start: 4, End: 7},
+ {Start: 1, End: 4},
+ }), safemem.FromIOWriter{&dst}, IOOpts{})
+ if wantN := int64(6); n != wantN || err != nil {
+ t.Errorf("CopyInTo: got (%v, %v), wanted (%v, nil)", n, err, wantN)
+ }
+ if got, want := dst.Bytes(), []byte("barfoo"); !bytes.Equal(got, want) {
+ t.Errorf("dst.Bytes(): got %q, wanted %q", got, want)
+ }
+}
+
+func TestBytesIOCopyInToFailure(t *testing.T) {
+ b := newBytesIOString("Afoob")
+ var dst bytes.Buffer
+ n, err := b.CopyInTo(newContext(), AddrRangeSeqFromSlice([]AddrRange{
+ {Start: 1, End: 4},
+ {Start: 4, End: 7},
+ }), safemem.FromIOWriter{&dst}, IOOpts{})
+ if wantN, wantErr := int64(4), syserror.EFAULT; n != wantN || err != wantErr {
+ t.Errorf("CopyOutFrom: got (%v, %v), wanted (%v, %v)", n, err, wantN, wantErr)
+ }
+ if got, want := dst.Bytes(), []byte("foob"); !bytes.Equal(got, want) {
+ t.Errorf("dst.Bytes(): got %q, wanted %q", got, want)
+ }
+}
+
+type testStruct struct {
+ Int8 int8
+ Uint8 uint8
+ Int16 int16
+ Uint16 uint16
+ Int32 int32
+ Uint32 uint32
+ Int64 int64
+ Uint64 uint64
+}
+
+func TestCopyObject(t *testing.T) {
+ wantObj := testStruct{1, 2, 3, 4, 5, 6, 7, 8}
+ wantN := binary.Size(wantObj)
+ b := &BytesIO{make([]byte, wantN)}
+ ctx := newContext()
+ if n, err := CopyObjectOut(ctx, b, 0, &wantObj, IOOpts{}); n != wantN || err != nil {
+ t.Fatalf("CopyObjectOut: got (%v, %v), wanted (%v, nil)", n, err, wantN)
+ }
+ var gotObj testStruct
+ if n, err := CopyObjectIn(ctx, b, 0, &gotObj, IOOpts{}); n != wantN || err != nil {
+ t.Errorf("CopyObjectIn: got (%v, %v), wanted (%v, nil)", n, err, wantN)
+ }
+ if gotObj != wantObj {
+ t.Errorf("CopyObject round trip: got %+v, wanted %+v", gotObj, wantObj)
+ }
+}
+
+func TestCopyStringInShort(t *testing.T) {
+ // Tests for string length <= copyStringIncrement.
+ want := strings.Repeat("A", copyStringIncrement-2)
+ mem := want + "\x00"
+ if got, err := CopyStringIn(newContext(), newBytesIOString(mem), 0, 2*copyStringIncrement, IOOpts{}); got != want || err != nil {
+ t.Errorf("CopyStringIn: got (%q, %v), wanted (%q, nil)", got, err, want)
+ }
+}
+
+func TestCopyStringInLong(t *testing.T) {
+ // Tests for copyStringIncrement < string length <= copyStringMaxInitBufLen
+ // (requiring multiple calls to IO.CopyIn()).
+ want := strings.Repeat("A", copyStringIncrement*3/4) + strings.Repeat("B", copyStringIncrement*3/4)
+ mem := want + "\x00"
+ if got, err := CopyStringIn(newContext(), newBytesIOString(mem), 0, 2*copyStringIncrement, IOOpts{}); got != want || err != nil {
+ t.Errorf("CopyStringIn: got (%q, %v), wanted (%q, nil)", got, err, want)
+ }
+}
+
+func TestCopyStringInVeryLong(t *testing.T) {
+ // Tests for string length > copyStringMaxInitBufLen (requiring buffer
+ // reallocation).
+ want := strings.Repeat("A", copyStringMaxInitBufLen*3/4) + strings.Repeat("B", copyStringMaxInitBufLen*3/4)
+ mem := want + "\x00"
+ if got, err := CopyStringIn(newContext(), newBytesIOString(mem), 0, 2*copyStringMaxInitBufLen, IOOpts{}); got != want || err != nil {
+ t.Errorf("CopyStringIn: got (%q, %v), wanted (%q, nil)", got, err, want)
+ }
+}
+
+func TestCopyStringInNoTerminatingZeroByte(t *testing.T) {
+ want := strings.Repeat("A", copyStringIncrement-1)
+ got, err := CopyStringIn(newContext(), newBytesIOString(want), 0, 2*copyStringIncrement, IOOpts{})
+ if wantErr := syserror.EFAULT; got != want || err != wantErr {
+ t.Errorf("CopyStringIn: got (%q, %v), wanted (%q, %v)", got, err, want, wantErr)
+ }
+}
+
+func TestCopyStringInTruncatedByMaxlen(t *testing.T) {
+ got, err := CopyStringIn(newContext(), newBytesIOString(strings.Repeat("A", 10)), 0, 5, IOOpts{})
+ if want, wantErr := strings.Repeat("A", 5), syserror.ENAMETOOLONG; got != want || err != wantErr {
+ t.Errorf("CopyStringIn: got (%q, %v), wanted (%q, %v)", got, err, want, wantErr)
+ }
+}
+
+func TestCopyInt32StringsInVec(t *testing.T) {
+ for _, test := range []struct {
+ str string
+ n int
+ initial []int32
+ final []int32
+ }{
+ {
+ str: "100 200",
+ n: len("100 200"),
+ initial: []int32{1, 2},
+ final: []int32{100, 200},
+ },
+ {
+ // Fewer values ok
+ str: "100",
+ n: len("100"),
+ initial: []int32{1, 2},
+ final: []int32{100, 2},
+ },
+ {
+ // Extra values ok
+ str: "100 200 300",
+ n: len("100 200 "),
+ initial: []int32{1, 2},
+ final: []int32{100, 200},
+ },
+ {
+ // Leading and trailing whitespace ok
+ str: " 100\t200\n",
+ n: len(" 100\t200\n"),
+ initial: []int32{1, 2},
+ final: []int32{100, 200},
+ },
+ } {
+ t.Run(fmt.Sprintf("%q", test.str), func(t *testing.T) {
+ src := BytesIOSequence([]byte(test.str))
+ dsts := append([]int32(nil), test.initial...)
+ if n, err := CopyInt32StringsInVec(newContext(), src.IO, src.Addrs, dsts, src.Opts); n != int64(test.n) || err != nil {
+ t.Errorf("CopyInt32StringsInVec: got (%d, %v), wanted (%d, nil)", n, err, test.n)
+ }
+ if !reflect.DeepEqual(dsts, test.final) {
+ t.Errorf("dsts: got %v, wanted %v", dsts, test.final)
+ }
+ })
+ }
+}
+
+func TestCopyInt32StringsInVecRequiresOneValidValue(t *testing.T) {
+ for _, s := range []string{"", "\n", "a123"} {
+ t.Run(fmt.Sprintf("%q", s), func(t *testing.T) {
+ src := BytesIOSequence([]byte(s))
+ initial := []int32{1, 2}
+ dsts := append([]int32(nil), initial...)
+ if n, err := CopyInt32StringsInVec(newContext(), src.IO, src.Addrs, dsts, src.Opts); err != syserror.EINVAL {
+ t.Errorf("CopyInt32StringsInVec: got (%d, %v), wanted (_, %v)", n, err, syserror.EINVAL)
+ }
+ if !reflect.DeepEqual(dsts, initial) {
+ t.Errorf("dsts: got %v, wanted %v", dsts, initial)
+ }
+ })
+ }
+}
+
+func TestIOSequenceCopyOut(t *testing.T) {
+ buf := []byte("ABCD")
+ s := BytesIOSequence(buf)
+
+ // CopyOut limited by len(src).
+ n, err := s.CopyOut(newContext(), []byte("fo"))
+ if wantN := 2; n != wantN || err != nil {
+ t.Errorf("CopyOut: got (%v, %v), wanted (%v, nil)", n, err, wantN)
+ }
+ if want := []byte("foCD"); !bytes.Equal(buf, want) {
+ t.Errorf("buf: got %q, wanted %q", buf, want)
+ }
+ s = s.DropFirst(2)
+ if got, want := s.NumBytes(), int64(2); got != want {
+ t.Errorf("NumBytes: got %v, wanted %v", got, want)
+ }
+
+ // CopyOut limited by s.NumBytes().
+ n, err = s.CopyOut(newContext(), []byte("obar"))
+ if wantN := 2; n != wantN || err != nil {
+ t.Errorf("CopyOut: got (%v, %v), wanted (%v, nil)", n, err, wantN)
+ }
+ if want := []byte("foob"); !bytes.Equal(buf, want) {
+ t.Errorf("buf: got %q, wanted %q", buf, want)
+ }
+ s = s.DropFirst(2)
+ if got, want := s.NumBytes(), int64(0); got != want {
+ t.Errorf("NumBytes: got %v, wanted %v", got, want)
+ }
+}
+
+func TestIOSequenceCopyIn(t *testing.T) {
+ s := BytesIOSequence([]byte("foob"))
+ dst := []byte("ABCDEF")
+
+ // CopyIn limited by len(dst).
+ n, err := s.CopyIn(newContext(), dst[:2])
+ if wantN := 2; n != wantN || err != nil {
+ t.Errorf("CopyIn: got (%v, %v), wanted (%v, nil)", n, err, wantN)
+ }
+ if want := []byte("foCDEF"); !bytes.Equal(dst, want) {
+ t.Errorf("dst: got %q, wanted %q", dst, want)
+ }
+ s = s.DropFirst(2)
+ if got, want := s.NumBytes(), int64(2); got != want {
+ t.Errorf("NumBytes: got %v, wanted %v", got, want)
+ }
+
+ // CopyIn limited by s.Remaining().
+ n, err = s.CopyIn(newContext(), dst[2:])
+ if wantN := 2; n != wantN || err != nil {
+ t.Errorf("CopyIn: got (%v, %v), wanted (%v, nil)", n, err, wantN)
+ }
+ if want := []byte("foobEF"); !bytes.Equal(dst, want) {
+ t.Errorf("dst: got %q, wanted %q", dst, want)
+ }
+ s = s.DropFirst(2)
+ if got, want := s.NumBytes(), int64(0); got != want {
+ t.Errorf("NumBytes: got %v, wanted %v", got, want)
+ }
+}
+
+func TestIOSequenceZeroOut(t *testing.T) {
+ buf := []byte("ABCD")
+ s := BytesIOSequence(buf)
+
+ // ZeroOut limited by toZero.
+ n, err := s.ZeroOut(newContext(), 2)
+ if wantN := int64(2); n != wantN || err != nil {
+ t.Errorf("ZeroOut: got (%v, %v), wanted (%v, nil)", n, err, wantN)
+ }
+ if want := []byte("\x00\x00CD"); !bytes.Equal(buf, want) {
+ t.Errorf("buf: got %q, wanted %q", buf, want)
+ }
+ s = s.DropFirst(2)
+ if got, want := s.NumBytes(), int64(2); got != want {
+ t.Errorf("NumBytes: got %v, wanted %v", got, want)
+ }
+
+ // ZeroOut limited by s.NumBytes().
+ n, err = s.ZeroOut(newContext(), 4)
+ if wantN := int64(2); n != wantN || err != nil {
+ t.Errorf("CopyOut: got (%v, %v), wanted (%v, nil)", n, err, wantN)
+ }
+ if want := []byte("\x00\x00\x00\x00"); !bytes.Equal(buf, want) {
+ t.Errorf("buf: got %q, wanted %q", buf, want)
+ }
+ s = s.DropFirst(2)
+ if got, want := s.NumBytes(), int64(0); got != want {
+ t.Errorf("NumBytes: got %v, wanted %v", got, want)
+ }
+}
+
+func TestIOSequenceTakeFirst(t *testing.T) {
+ s := BytesIOSequence([]byte("foobar"))
+ if got, want := s.NumBytes(), int64(6); got != want {
+ t.Errorf("NumBytes: got %v, wanted %v", got, want)
+ }
+
+ s = s.TakeFirst(3)
+ if got, want := s.NumBytes(), int64(3); got != want {
+ t.Errorf("NumBytes: got %v, wanted %v", got, want)
+ }
+
+ // TakeFirst(n) where n > s.NumBytes() is a no-op.
+ s = s.TakeFirst(9)
+ if got, want := s.NumBytes(), int64(3); got != want {
+ t.Errorf("NumBytes: got %v, wanted %v", got, want)
+ }
+
+ var dst [3]byte
+ n, err := s.CopyIn(newContext(), dst[:])
+ if wantN := 3; n != wantN || err != nil {
+ t.Errorf("CopyIn: got (%v, %v), wanted (%v, nil)", n, err, wantN)
+ }
+ if got, want := dst[:], []byte("foo"); !bytes.Equal(got, want) {
+ t.Errorf("dst: got %q, wanted %q", got, want)
+ }
+ s = s.DropFirst(3)
+ if got, want := s.NumBytes(), int64(0); got != want {
+ t.Errorf("NumBytes: got %v, wanted %v", got, want)
+ }
+}
diff --git a/pkg/usermem/usermem_x86.go b/pkg/usermem/usermem_x86.go
new file mode 100644
index 000000000..d96f829fb
--- /dev/null
+++ b/pkg/usermem/usermem_x86.go
@@ -0,0 +1,38 @@
+// Copyright 2018 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build amd64 386
+
+package usermem
+
+import "encoding/binary"
+
+const (
+ // PageSize is the system page size.
+ PageSize = 1 << PageShift
+
+ // HugePageSize is the system huge page size.
+ HugePageSize = 1 << HugePageShift
+
+ // PageShift is the binary log of the system page size.
+ PageShift = 12
+
+ // HugePageShift is the binary log of the system huge page size.
+ HugePageShift = 21
+)
+
+var (
+ // ByteOrder is the native byte order (little endian).
+ ByteOrder = binary.LittleEndian
+)