summaryrefslogtreecommitdiffhomepage
path: root/tools/go_marshal
diff options
context:
space:
mode:
authorZach Koopmans <zkoopmans@google.com>2021-03-29 13:28:32 -0700
committergVisor bot <gvisor-bot@google.com>2021-03-29 13:30:21 -0700
commit8a2f7e716dcc62f04d2808e8ade34941c94fc956 (patch)
treeb2195d5728dcbc4f4e59c23ad95d7486ef744371 /tools/go_marshal
parentb125afba416ebeba906ea595a44a55afe4729d64 (diff)
[syserror] Split usermem package
Split usermem package to help remove syserror dependency in go_marshal. New hostarch package contains code not dependent on syserror. PiperOrigin-RevId: 365651233
Diffstat (limited to 'tools/go_marshal')
-rw-r--r--tools/go_marshal/defs.bzl2
-rw-r--r--tools/go_marshal/gomarshal/generator.go2
-rw-r--r--tools/go_marshal/gomarshal/generator_interfaces.go24
-rw-r--r--tools/go_marshal/gomarshal/generator_interfaces_array_newtype.go8
-rw-r--r--tools/go_marshal/gomarshal/generator_interfaces_dynamic.go12
-rw-r--r--tools/go_marshal/gomarshal/generator_interfaces_primitive_newtype.go38
-rw-r--r--tools/go_marshal/gomarshal/generator_interfaces_struct.go18
-rw-r--r--tools/go_marshal/gomarshal/generator_tests.go6
-rw-r--r--tools/go_marshal/test/BUILD3
-rw-r--r--tools/go_marshal/test/benchmark_test.go80
-rw-r--r--tools/go_marshal/test/escape/BUILD2
-rw-r--r--tools/go_marshal/test/escape/escape.go22
-rw-r--r--tools/go_marshal/test/marshal_test.go57
13 files changed, 138 insertions, 136 deletions
diff --git a/tools/go_marshal/defs.bzl b/tools/go_marshal/defs.bzl
index f44f83eab..e23901815 100644
--- a/tools/go_marshal/defs.bzl
+++ b/tools/go_marshal/defs.bzl
@@ -58,7 +58,7 @@ go_marshal = rule(
marshal_deps = [
"//pkg/gohacks",
"//pkg/safecopy",
- "//pkg/usermem",
+ "//pkg/hostarch",
"//pkg/marshal",
]
diff --git a/tools/go_marshal/gomarshal/generator.go b/tools/go_marshal/gomarshal/generator.go
index 39394d2a7..0e2d752cb 100644
--- a/tools/go_marshal/gomarshal/generator.go
+++ b/tools/go_marshal/gomarshal/generator.go
@@ -113,7 +113,7 @@ func NewGenerator(srcs []string, out, outTest, outTestUnconditional, pkg string,
g.imports.add("unsafe")
g.imports.add("gvisor.dev/gvisor/pkg/gohacks")
g.imports.add("gvisor.dev/gvisor/pkg/safecopy")
- g.imports.add("gvisor.dev/gvisor/pkg/usermem")
+ g.imports.add("gvisor.dev/gvisor/pkg/hostarch")
g.imports.add("gvisor.dev/gvisor/pkg/marshal")
return &g, nil
diff --git a/tools/go_marshal/gomarshal/generator_interfaces.go b/tools/go_marshal/gomarshal/generator_interfaces.go
index 65f5ea34d..3e643e77f 100644
--- a/tools/go_marshal/gomarshal/generator_interfaces.go
+++ b/tools/go_marshal/gomarshal/generator_interfaces.go
@@ -120,16 +120,16 @@ func (g *interfaceGenerator) marshalScalar(accessor, typ, bufVar string) {
g.emit("%s[0] = byte(%s)\n", bufVar, accessor)
g.shift(bufVar, 1)
case "int16", "uint16":
- g.recordUsedImport("usermem")
- g.emit("usermem.ByteOrder.PutUint16(%s[:2], uint16(%s))\n", bufVar, accessor)
+ g.recordUsedImport("hostarch")
+ g.emit("hostarch.ByteOrder.PutUint16(%s[:2], uint16(%s))\n", bufVar, accessor)
g.shift(bufVar, 2)
case "int32", "uint32":
- g.recordUsedImport("usermem")
- g.emit("usermem.ByteOrder.PutUint32(%s[:4], uint32(%s))\n", bufVar, accessor)
+ g.recordUsedImport("hostarch")
+ g.emit("hostarch.ByteOrder.PutUint32(%s[:4], uint32(%s))\n", bufVar, accessor)
g.shift(bufVar, 4)
case "int64", "uint64":
- g.recordUsedImport("usermem")
- g.emit("usermem.ByteOrder.PutUint64(%s[:8], uint64(%s))\n", bufVar, accessor)
+ g.recordUsedImport("hostarch")
+ g.emit("hostarch.ByteOrder.PutUint64(%s[:8], uint64(%s))\n", bufVar, accessor)
g.shift(bufVar, 8)
default:
g.emit("%s.MarshalBytes(%s[:%s.SizeBytes()])\n", accessor, bufVar, accessor)
@@ -147,16 +147,16 @@ func (g *interfaceGenerator) unmarshalScalar(accessor, typ, bufVar string) {
g.emit("%s = %s(%s[0])\n", accessor, typ, bufVar)
g.shift(bufVar, 1)
case "int16", "uint16":
- g.recordUsedImport("usermem")
- g.emit("%s = %s(usermem.ByteOrder.Uint16(%s[:2]))\n", accessor, typ, bufVar)
+ g.recordUsedImport("hostarch")
+ g.emit("%s = %s(hostarch.ByteOrder.Uint16(%s[:2]))\n", accessor, typ, bufVar)
g.shift(bufVar, 2)
case "int32", "uint32":
- g.recordUsedImport("usermem")
- g.emit("%s = %s(usermem.ByteOrder.Uint32(%s[:4]))\n", accessor, typ, bufVar)
+ g.recordUsedImport("hostarch")
+ g.emit("%s = %s(hostarch.ByteOrder.Uint32(%s[:4]))\n", accessor, typ, bufVar)
g.shift(bufVar, 4)
case "int64", "uint64":
- g.recordUsedImport("usermem")
- g.emit("%s = %s(usermem.ByteOrder.Uint64(%s[:8]))\n", accessor, typ, bufVar)
+ g.recordUsedImport("hostarch")
+ g.emit("%s = %s(hostarch.ByteOrder.Uint64(%s[:8]))\n", accessor, typ, bufVar)
g.shift(bufVar, 8)
default:
g.emit("%s.UnmarshalBytes(%s[:%s.SizeBytes()])\n", accessor, bufVar, accessor)
diff --git a/tools/go_marshal/gomarshal/generator_interfaces_array_newtype.go b/tools/go_marshal/gomarshal/generator_interfaces_array_newtype.go
index 7525b52da..32afece2e 100644
--- a/tools/go_marshal/gomarshal/generator_interfaces_array_newtype.go
+++ b/tools/go_marshal/gomarshal/generator_interfaces_array_newtype.go
@@ -39,7 +39,7 @@ func (g *interfaceGenerator) emitMarshallableForArrayNewtype(n *ast.Ident, a *as
g.recordUsedImport("runtime")
g.recordUsedImport("safecopy")
g.recordUsedImport("unsafe")
- g.recordUsedImport("usermem")
+ g.recordUsedImport("hostarch")
lenExpr := g.arrayLenExpr(a)
@@ -102,7 +102,7 @@ func (g *interfaceGenerator) emitMarshallableForArrayNewtype(n *ast.Ident, a *as
g.emit("// CopyOutN implements marshal.Marshallable.CopyOutN.\n")
g.emit("//go:nosplit\n")
- g.emit("func (%s *%s) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {\n", g.r, g.typeName())
+ g.emit("func (%s *%s) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {\n", g.r, g.typeName())
g.inIndent(func() {
g.emitCastToByteSlice(g.r, "buf", fmt.Sprintf("%s.SizeBytes()", g.r))
@@ -114,7 +114,7 @@ func (g *interfaceGenerator) emitMarshallableForArrayNewtype(n *ast.Ident, a *as
g.emit("// CopyOut implements marshal.Marshallable.CopyOut.\n")
g.emit("//go:nosplit\n")
- g.emit("func (%s *%s) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {\n", g.r, g.typeName())
+ g.emit("func (%s *%s) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {\n", g.r, g.typeName())
g.inIndent(func() {
g.emit("return %s.CopyOutN(cc, addr, %s.SizeBytes())\n", g.r, g.r)
})
@@ -122,7 +122,7 @@ func (g *interfaceGenerator) emitMarshallableForArrayNewtype(n *ast.Ident, a *as
g.emit("// CopyIn implements marshal.Marshallable.CopyIn.\n")
g.emit("//go:nosplit\n")
- g.emit("func (%s *%s) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {\n", g.r, g.typeName())
+ g.emit("func (%s *%s) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {\n", g.r, g.typeName())
g.inIndent(func() {
g.emitCastToByteSlice(g.r, "buf", fmt.Sprintf("%s.SizeBytes()", g.r))
diff --git a/tools/go_marshal/gomarshal/generator_interfaces_dynamic.go b/tools/go_marshal/gomarshal/generator_interfaces_dynamic.go
index b1a8622cd..345020ddc 100644
--- a/tools/go_marshal/gomarshal/generator_interfaces_dynamic.go
+++ b/tools/go_marshal/gomarshal/generator_interfaces_dynamic.go
@@ -46,8 +46,8 @@ func (g *interfaceGenerator) emitMarshallableForDynamicType() {
g.emit("// CopyOutN implements marshal.Marshallable.CopyOutN.\n")
g.emit("//go:nosplit\n")
g.recordUsedImport("marshal")
- g.recordUsedImport("usermem")
- g.emit("func (%s *%s) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {\n", g.r, g.typeName())
+ g.recordUsedImport("hostarch")
+ g.emit("func (%s *%s) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {\n", g.r, g.typeName())
g.inIndent(func() {
g.emit("// Type %s doesn't have a packed layout in memory, fall back to MarshalBytes.\n", g.typeName())
g.emit("buf := cc.CopyScratchBuffer(%s.SizeBytes()) // escapes: okay.\n", g.r)
@@ -59,8 +59,8 @@ func (g *interfaceGenerator) emitMarshallableForDynamicType() {
g.emit("// CopyOut implements marshal.Marshallable.CopyOut.\n")
g.emit("//go:nosplit\n")
g.recordUsedImport("marshal")
- g.recordUsedImport("usermem")
- g.emit("func (%s *%s) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {\n", g.r, g.typeName())
+ g.recordUsedImport("hostarch")
+ g.emit("func (%s *%s) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {\n", g.r, g.typeName())
g.inIndent(func() {
g.emit("return %s.CopyOutN(cc, addr, %s.SizeBytes())\n", g.r, g.r)
})
@@ -69,8 +69,8 @@ func (g *interfaceGenerator) emitMarshallableForDynamicType() {
g.emit("// CopyIn implements marshal.Marshallable.CopyIn.\n")
g.emit("//go:nosplit\n")
g.recordUsedImport("marshal")
- g.recordUsedImport("usermem")
- g.emit("func (%s *%s) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {\n", g.r, g.typeName())
+ g.recordUsedImport("hostarch")
+ g.emit("func (%s *%s) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {\n", g.r, g.typeName())
g.inIndent(func() {
g.emit("// Type %s doesn't have a packed layout in memory, fall back to UnmarshalBytes.\n", g.typeName())
g.emit("buf := cc.CopyScratchBuffer(%s.SizeBytes()) // escapes: okay.\n", g.r)
diff --git a/tools/go_marshal/gomarshal/generator_interfaces_primitive_newtype.go b/tools/go_marshal/gomarshal/generator_interfaces_primitive_newtype.go
index 7edaf666c..05f0e0db4 100644
--- a/tools/go_marshal/gomarshal/generator_interfaces_primitive_newtype.go
+++ b/tools/go_marshal/gomarshal/generator_interfaces_primitive_newtype.go
@@ -29,14 +29,14 @@ func (g *interfaceGenerator) marshalPrimitiveScalar(accessor, typ, bufVar string
case "int8", "uint8", "byte":
g.emit("%s[0] = byte(*%s)\n", bufVar, accessor)
case "int16", "uint16":
- g.recordUsedImport("usermem")
- g.emit("usermem.ByteOrder.PutUint16(%s[:2], uint16(*%s))\n", bufVar, accessor)
+ g.recordUsedImport("hostarch")
+ g.emit("hostarch.ByteOrder.PutUint16(%s[:2], uint16(*%s))\n", bufVar, accessor)
case "int32", "uint32":
- g.recordUsedImport("usermem")
- g.emit("usermem.ByteOrder.PutUint32(%s[:4], uint32(*%s))\n", bufVar, accessor)
+ g.recordUsedImport("hostarch")
+ g.emit("hostarch.ByteOrder.PutUint32(%s[:4], uint32(*%s))\n", bufVar, accessor)
case "int64", "uint64":
- g.recordUsedImport("usermem")
- g.emit("usermem.ByteOrder.PutUint64(%s[:8], uint64(*%s))\n", bufVar, accessor)
+ g.recordUsedImport("hostarch")
+ g.emit("hostarch.ByteOrder.PutUint64(%s[:8], uint64(*%s))\n", bufVar, accessor)
default:
g.emit("// Explicilty cast to the underlying type before dispatching to\n")
g.emit("// MarshalBytes, so we don't recursively call %s.MarshalBytes\n", accessor)
@@ -53,14 +53,14 @@ func (g *interfaceGenerator) unmarshalPrimitiveScalar(accessor, typ, bufVar, typ
case "int8", "uint8":
g.emit("*%s = %s(%s(%s[0]))\n", accessor, typeCast, typ, bufVar)
case "int16", "uint16":
- g.recordUsedImport("usermem")
- g.emit("*%s = %s(%s(usermem.ByteOrder.Uint16(%s[:2])))\n", accessor, typeCast, typ, bufVar)
+ g.recordUsedImport("hostarch")
+ g.emit("*%s = %s(%s(hostarch.ByteOrder.Uint16(%s[:2])))\n", accessor, typeCast, typ, bufVar)
case "int32", "uint32":
- g.recordUsedImport("usermem")
- g.emit("*%s = %s(%s(usermem.ByteOrder.Uint32(%s[:4])))\n", accessor, typeCast, typ, bufVar)
+ g.recordUsedImport("hostarch")
+ g.emit("*%s = %s(%s(hostarch.ByteOrder.Uint32(%s[:4])))\n", accessor, typeCast, typ, bufVar)
case "int64", "uint64":
- g.recordUsedImport("usermem")
- g.emit("*%s = %s(%s(usermem.ByteOrder.Uint64(%s[:8])))\n", accessor, typeCast, typ, bufVar)
+ g.recordUsedImport("hostarch")
+ g.emit("*%s = %s(%s(hostarch.ByteOrder.Uint64(%s[:8])))\n", accessor, typeCast, typ, bufVar)
default:
g.emit("// Explicilty cast to the underlying type before dispatching to\n")
g.emit("// UnmarshalBytes, so we don't recursively call %s.UnmarshalBytes\n", accessor)
@@ -101,7 +101,7 @@ func (g *interfaceGenerator) emitMarshallableForPrimitiveNewtype(nt *ast.Ident)
g.recordUsedImport("runtime")
g.recordUsedImport("safecopy")
g.recordUsedImport("unsafe")
- g.recordUsedImport("usermem")
+ g.recordUsedImport("hostarch")
g.emit("// SizeBytes implements marshal.Marshallable.SizeBytes.\n")
g.emit("//go:nosplit\n")
@@ -154,7 +154,7 @@ func (g *interfaceGenerator) emitMarshallableForPrimitiveNewtype(nt *ast.Ident)
g.emit("// CopyOutN implements marshal.Marshallable.CopyOutN.\n")
g.emit("//go:nosplit\n")
- g.emit("func (%s *%s) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {\n", g.r, g.typeName())
+ g.emit("func (%s *%s) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {\n", g.r, g.typeName())
g.inIndent(func() {
g.emitCastToByteSlice(g.r, "buf", fmt.Sprintf("%s.SizeBytes()", g.r))
@@ -166,7 +166,7 @@ func (g *interfaceGenerator) emitMarshallableForPrimitiveNewtype(nt *ast.Ident)
g.emit("// CopyOut implements marshal.Marshallable.CopyOut.\n")
g.emit("//go:nosplit\n")
- g.emit("func (%s *%s) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {\n", g.r, g.typeName())
+ g.emit("func (%s *%s) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {\n", g.r, g.typeName())
g.inIndent(func() {
g.emit("return %s.CopyOutN(cc, addr, %s.SizeBytes())\n", g.r, g.r)
})
@@ -174,7 +174,7 @@ func (g *interfaceGenerator) emitMarshallableForPrimitiveNewtype(nt *ast.Ident)
g.emit("// CopyIn implements marshal.Marshallable.CopyIn.\n")
g.emit("//go:nosplit\n")
- g.emit("func (%s *%s) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {\n", g.r, g.typeName())
+ g.emit("func (%s *%s) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {\n", g.r, g.typeName())
g.inIndent(func() {
g.emitCastToByteSlice(g.r, "buf", fmt.Sprintf("%s.SizeBytes()", g.r))
@@ -199,7 +199,7 @@ func (g *interfaceGenerator) emitMarshallableForPrimitiveNewtype(nt *ast.Ident)
func (g *interfaceGenerator) emitMarshallableSliceForPrimitiveNewtype(nt *ast.Ident, slice *sliceAPI) {
g.recordUsedImport("marshal")
- g.recordUsedImport("usermem")
+ g.recordUsedImport("hostarch")
g.recordUsedImport("reflect")
g.recordUsedImport("runtime")
g.recordUsedImport("unsafe")
@@ -211,7 +211,7 @@ func (g *interfaceGenerator) emitMarshallableSliceForPrimitiveNewtype(nt *ast.Id
g.emit("// Copy%sIn copies in a slice of %s objects from the task's memory.\n", slice.ident, eltType)
g.emit("//go:nosplit\n")
- g.emit("func Copy%sIn(cc marshal.CopyContext, addr usermem.Addr, dst []%s) (int, error) {\n", slice.ident, eltType)
+ g.emit("func Copy%sIn(cc marshal.CopyContext, addr hostarch.Addr, dst []%s) (int, error) {\n", slice.ident, eltType)
g.inIndent(func() {
g.emit("count := len(dst)\n")
g.emit("if count == 0 {\n")
@@ -231,7 +231,7 @@ func (g *interfaceGenerator) emitMarshallableSliceForPrimitiveNewtype(nt *ast.Id
g.emit("// Copy%sOut copies a slice of %s objects to the task's memory.\n", slice.ident, eltType)
g.emit("//go:nosplit\n")
- g.emit("func Copy%sOut(cc marshal.CopyContext, addr usermem.Addr, src []%s) (int, error) {\n", slice.ident, eltType)
+ g.emit("func Copy%sOut(cc marshal.CopyContext, addr hostarch.Addr, src []%s) (int, error) {\n", slice.ident, eltType)
g.inIndent(func() {
g.emit("count := len(src)\n")
g.emit("if count == 0 {\n")
diff --git a/tools/go_marshal/gomarshal/generator_interfaces_struct.go b/tools/go_marshal/gomarshal/generator_interfaces_struct.go
index 5f6306b8f..72df1ab64 100644
--- a/tools/go_marshal/gomarshal/generator_interfaces_struct.go
+++ b/tools/go_marshal/gomarshal/generator_interfaces_struct.go
@@ -319,8 +319,8 @@ func (g *interfaceGenerator) emitMarshallableForStruct(st *ast.StructType) {
g.emit("// CopyOutN implements marshal.Marshallable.CopyOutN.\n")
g.emit("//go:nosplit\n")
g.recordUsedImport("marshal")
- g.recordUsedImport("usermem")
- g.emit("func (%s *%s) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {\n", g.r, g.typeName())
+ g.recordUsedImport("hostarch")
+ g.emit("func (%s *%s) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {\n", g.r, g.typeName())
g.inIndent(func() {
fallback := func() {
g.emit("// Type %s doesn't have a packed layout in memory, fall back to MarshalBytes.\n", g.typeName())
@@ -352,8 +352,8 @@ func (g *interfaceGenerator) emitMarshallableForStruct(st *ast.StructType) {
g.emit("// CopyOut implements marshal.Marshallable.CopyOut.\n")
g.emit("//go:nosplit\n")
g.recordUsedImport("marshal")
- g.recordUsedImport("usermem")
- g.emit("func (%s *%s) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {\n", g.r, g.typeName())
+ g.recordUsedImport("hostarch")
+ g.emit("func (%s *%s) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {\n", g.r, g.typeName())
g.inIndent(func() {
g.emit("return %s.CopyOutN(cc, addr, %s.SizeBytes())\n", g.r, g.r)
})
@@ -362,8 +362,8 @@ func (g *interfaceGenerator) emitMarshallableForStruct(st *ast.StructType) {
g.emit("// CopyIn implements marshal.Marshallable.CopyIn.\n")
g.emit("//go:nosplit\n")
g.recordUsedImport("marshal")
- g.recordUsedImport("usermem")
- g.emit("func (%s *%s) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {\n", g.r, g.typeName())
+ g.recordUsedImport("hostarch")
+ g.emit("func (%s *%s) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {\n", g.r, g.typeName())
g.inIndent(func() {
fallback := func() {
g.emit("// Type %s doesn't have a packed layout in memory, fall back to UnmarshalBytes.\n", g.typeName())
@@ -436,10 +436,10 @@ func (g *interfaceGenerator) emitMarshallableSliceForStruct(st *ast.StructType,
}
g.recordUsedImport("marshal")
- g.recordUsedImport("usermem")
+ g.recordUsedImport("hostarch")
g.emit("// Copy%sIn copies in a slice of %s objects from the task's memory.\n", slice.ident, g.typeName())
- g.emit("func Copy%sIn(cc marshal.CopyContext, addr usermem.Addr, dst []%s) (int, error) {\n", slice.ident, g.typeName())
+ g.emit("func Copy%sIn(cc marshal.CopyContext, addr hostarch.Addr, dst []%s) (int, error) {\n", slice.ident, g.typeName())
g.inIndent(func() {
g.emit("count := len(dst)\n")
g.emit("if count == 0 {\n")
@@ -496,7 +496,7 @@ func (g *interfaceGenerator) emitMarshallableSliceForStruct(st *ast.StructType,
g.emit("}\n\n")
g.emit("// Copy%sOut copies a slice of %s objects to the task's memory.\n", slice.ident, g.typeName())
- g.emit("func Copy%sOut(cc marshal.CopyContext, addr usermem.Addr, src []%s) (int, error) {\n", slice.ident, g.typeName())
+ g.emit("func Copy%sOut(cc marshal.CopyContext, addr hostarch.Addr, src []%s) (int, error) {\n", slice.ident, g.typeName())
g.inIndent(func() {
g.emit("count := len(src)\n")
g.emit("if count == 0 {\n")
diff --git a/tools/go_marshal/gomarshal/generator_tests.go b/tools/go_marshal/gomarshal/generator_tests.go
index 6cf00843f..8f93a1de5 100644
--- a/tools/go_marshal/gomarshal/generator_tests.go
+++ b/tools/go_marshal/gomarshal/generator_tests.go
@@ -32,7 +32,7 @@ var standardImports = []string{
var sliceAPIImports = []string{
"encoding/binary",
- "gvisor.dev/gvisor/pkg/usermem",
+ "gvisor.dev/gvisor/pkg/hostarch",
}
type testGenerator struct {
@@ -143,7 +143,7 @@ func (g *testGenerator) emitTestMarshalUnmarshalPreservesData() {
}
func (g *testGenerator) emitTestMarshalUnmarshalSlicePreservesData(slice *sliceAPI) {
- for _, name := range []string{"binary", "usermem"} {
+ for _, name := range []string{"binary", "hostarch"} {
if !g.imports.markUsed(name) {
panic(fmt.Sprintf("Generated test for '%s' referenced a non-existent import with local name '%s'", g.typeName(), name))
}
@@ -155,7 +155,7 @@ func (g *testGenerator) emitTestMarshalUnmarshalSlicePreservesData(slice *sliceA
g.emit("size := (*%s)(nil).SizeBytes() * len(x)\n", g.typeName())
g.emit("buf := bytes.NewBuffer(make([]byte, size))\n")
g.emit("buf.Reset()\n")
- g.emit("if err := binary.Write(buf, usermem.ByteOrder, x[:]); err != nil {\n")
+ g.emit("if err := binary.Write(buf, hostarch.ByteOrder, x[:]); err != nil {\n")
g.inIndent(func() {
g.emit("t.Fatal(fmt.Sprintf(\"binary.Write failed: %v\", err))\n")
})
diff --git a/tools/go_marshal/test/BUILD b/tools/go_marshal/test/BUILD
index 5bceacd32..e872560a9 100644
--- a/tools/go_marshal/test/BUILD
+++ b/tools/go_marshal/test/BUILD
@@ -15,7 +15,7 @@ go_test(
deps = [
":test",
"//pkg/binary",
- "//pkg/usermem",
+ "//pkg/hostarch",
"//tools/go_marshal/analysis",
],
)
@@ -41,6 +41,7 @@ go_test(
srcs = ["marshal_test.go"],
deps = [
":test",
+ "//pkg/hostarch",
"//pkg/marshal",
"//pkg/marshal/primitive",
"//pkg/syserror",
diff --git a/tools/go_marshal/test/benchmark_test.go b/tools/go_marshal/test/benchmark_test.go
index 224d308c7..16f478ff7 100644
--- a/tools/go_marshal/test/benchmark_test.go
+++ b/tools/go_marshal/test/benchmark_test.go
@@ -22,7 +22,7 @@ import (
"testing"
"gvisor.dev/gvisor/pkg/binary"
- "gvisor.dev/gvisor/pkg/usermem"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/tools/go_marshal/analysis"
"gvisor.dev/gvisor/tools/go_marshal/test"
)
@@ -39,10 +39,10 @@ func BenchmarkEncodingBinary(b *testing.B) {
for n := 0; n < b.N; n++ {
buf := bytes.NewBuffer(make([]byte, size))
buf.Reset()
- if err := encbin.Write(buf, usermem.ByteOrder, &s1); err != nil {
+ if err := encbin.Write(buf, hostarch.ByteOrder, &s1); err != nil {
b.Error("Write:", err)
}
- if err := encbin.Read(buf, usermem.ByteOrder, &s2); err != nil {
+ if err := encbin.Read(buf, hostarch.ByteOrder, &s2); err != nil {
b.Error("Read:", err)
}
}
@@ -66,8 +66,8 @@ func BenchmarkBinary(b *testing.B) {
for n := 0; n < b.N; n++ {
buf := make([]byte, 0, size)
- buf = binary.Marshal(buf, usermem.ByteOrder, &s1)
- binary.Unmarshal(buf, usermem.ByteOrder, &s2)
+ buf = binary.Marshal(buf, hostarch.ByteOrder, &s1)
+ binary.Unmarshal(buf, hostarch.ByteOrder, &s2)
}
b.StopTimer()
@@ -89,42 +89,42 @@ func BenchmarkMarshalManual(b *testing.B) {
buf := make([]byte, 0, s1.SizeBytes())
// Marshal
- buf = binary.AppendUint64(buf, usermem.ByteOrder, s1.Dev)
- buf = binary.AppendUint64(buf, usermem.ByteOrder, s1.Ino)
- buf = binary.AppendUint64(buf, usermem.ByteOrder, s1.Nlink)
- buf = binary.AppendUint32(buf, usermem.ByteOrder, s1.Mode)
- buf = binary.AppendUint32(buf, usermem.ByteOrder, s1.UID)
- buf = binary.AppendUint32(buf, usermem.ByteOrder, s1.GID)
- buf = binary.AppendUint32(buf, usermem.ByteOrder, 0)
- buf = binary.AppendUint64(buf, usermem.ByteOrder, s1.Rdev)
- buf = binary.AppendUint64(buf, usermem.ByteOrder, uint64(s1.Size))
- buf = binary.AppendUint64(buf, usermem.ByteOrder, uint64(s1.Blksize))
- buf = binary.AppendUint64(buf, usermem.ByteOrder, uint64(s1.Blocks))
- buf = binary.AppendUint64(buf, usermem.ByteOrder, uint64(s1.ATime.Sec))
- buf = binary.AppendUint64(buf, usermem.ByteOrder, uint64(s1.ATime.Nsec))
- buf = binary.AppendUint64(buf, usermem.ByteOrder, uint64(s1.MTime.Sec))
- buf = binary.AppendUint64(buf, usermem.ByteOrder, uint64(s1.MTime.Nsec))
- buf = binary.AppendUint64(buf, usermem.ByteOrder, uint64(s1.CTime.Sec))
- buf = binary.AppendUint64(buf, usermem.ByteOrder, uint64(s1.CTime.Nsec))
+ buf = binary.AppendUint64(buf, hostarch.ByteOrder, s1.Dev)
+ buf = binary.AppendUint64(buf, hostarch.ByteOrder, s1.Ino)
+ buf = binary.AppendUint64(buf, hostarch.ByteOrder, s1.Nlink)
+ buf = binary.AppendUint32(buf, hostarch.ByteOrder, s1.Mode)
+ buf = binary.AppendUint32(buf, hostarch.ByteOrder, s1.UID)
+ buf = binary.AppendUint32(buf, hostarch.ByteOrder, s1.GID)
+ buf = binary.AppendUint32(buf, hostarch.ByteOrder, 0)
+ buf = binary.AppendUint64(buf, hostarch.ByteOrder, s1.Rdev)
+ buf = binary.AppendUint64(buf, hostarch.ByteOrder, uint64(s1.Size))
+ buf = binary.AppendUint64(buf, hostarch.ByteOrder, uint64(s1.Blksize))
+ buf = binary.AppendUint64(buf, hostarch.ByteOrder, uint64(s1.Blocks))
+ buf = binary.AppendUint64(buf, hostarch.ByteOrder, uint64(s1.ATime.Sec))
+ buf = binary.AppendUint64(buf, hostarch.ByteOrder, uint64(s1.ATime.Nsec))
+ buf = binary.AppendUint64(buf, hostarch.ByteOrder, uint64(s1.MTime.Sec))
+ buf = binary.AppendUint64(buf, hostarch.ByteOrder, uint64(s1.MTime.Nsec))
+ buf = binary.AppendUint64(buf, hostarch.ByteOrder, uint64(s1.CTime.Sec))
+ buf = binary.AppendUint64(buf, hostarch.ByteOrder, uint64(s1.CTime.Nsec))
// Unmarshal
- s2.Dev = usermem.ByteOrder.Uint64(buf[0:8])
- s2.Ino = usermem.ByteOrder.Uint64(buf[8:16])
- s2.Nlink = usermem.ByteOrder.Uint64(buf[16:24])
- s2.Mode = usermem.ByteOrder.Uint32(buf[24:28])
- s2.UID = usermem.ByteOrder.Uint32(buf[28:32])
- s2.GID = usermem.ByteOrder.Uint32(buf[32:36])
+ s2.Dev = hostarch.ByteOrder.Uint64(buf[0:8])
+ s2.Ino = hostarch.ByteOrder.Uint64(buf[8:16])
+ s2.Nlink = hostarch.ByteOrder.Uint64(buf[16:24])
+ s2.Mode = hostarch.ByteOrder.Uint32(buf[24:28])
+ s2.UID = hostarch.ByteOrder.Uint32(buf[28:32])
+ s2.GID = hostarch.ByteOrder.Uint32(buf[32:36])
// Padding: buf[36:40]
- s2.Rdev = usermem.ByteOrder.Uint64(buf[40:48])
- s2.Size = int64(usermem.ByteOrder.Uint64(buf[48:56]))
- s2.Blksize = int64(usermem.ByteOrder.Uint64(buf[56:64]))
- s2.Blocks = int64(usermem.ByteOrder.Uint64(buf[64:72]))
- s2.ATime.Sec = int64(usermem.ByteOrder.Uint64(buf[72:80]))
- s2.ATime.Nsec = int64(usermem.ByteOrder.Uint64(buf[80:88]))
- s2.MTime.Sec = int64(usermem.ByteOrder.Uint64(buf[88:96]))
- s2.MTime.Nsec = int64(usermem.ByteOrder.Uint64(buf[96:104]))
- s2.CTime.Sec = int64(usermem.ByteOrder.Uint64(buf[104:112]))
- s2.CTime.Nsec = int64(usermem.ByteOrder.Uint64(buf[112:120]))
+ s2.Rdev = hostarch.ByteOrder.Uint64(buf[40:48])
+ s2.Size = int64(hostarch.ByteOrder.Uint64(buf[48:56]))
+ s2.Blksize = int64(hostarch.ByteOrder.Uint64(buf[56:64]))
+ s2.Blocks = int64(hostarch.ByteOrder.Uint64(buf[64:72]))
+ s2.ATime.Sec = int64(hostarch.ByteOrder.Uint64(buf[72:80]))
+ s2.ATime.Nsec = int64(hostarch.ByteOrder.Uint64(buf[80:88]))
+ s2.MTime.Sec = int64(hostarch.ByteOrder.Uint64(buf[88:96]))
+ s2.MTime.Nsec = int64(hostarch.ByteOrder.Uint64(buf[96:104]))
+ s2.CTime.Sec = int64(hostarch.ByteOrder.Uint64(buf[104:112]))
+ s2.CTime.Nsec = int64(hostarch.ByteOrder.Uint64(buf[112:120]))
}
b.StopTimer()
@@ -187,8 +187,8 @@ func BenchmarkBinarySlice(b *testing.B) {
for n := 0; n < b.N; n++ {
buf := make([]byte, 0, size)
- buf = binary.Marshal(buf, usermem.ByteOrder, &s1)
- binary.Unmarshal(buf, usermem.ByteOrder, &s2)
+ buf = binary.Marshal(buf, hostarch.ByteOrder, &s1)
+ binary.Unmarshal(buf, hostarch.ByteOrder, &s2)
}
b.StopTimer()
diff --git a/tools/go_marshal/test/escape/BUILD b/tools/go_marshal/test/escape/BUILD
index 2981ef196..62e0b4665 100644
--- a/tools/go_marshal/test/escape/BUILD
+++ b/tools/go_marshal/test/escape/BUILD
@@ -7,8 +7,8 @@ go_library(
testonly = 1,
srcs = ["escape.go"],
deps = [
+ "//pkg/hostarch",
"//pkg/marshal",
- "//pkg/usermem",
"//tools/go_marshal/test",
],
)
diff --git a/tools/go_marshal/test/escape/escape.go b/tools/go_marshal/test/escape/escape.go
index df14ae98e..1ac606862 100644
--- a/tools/go_marshal/test/escape/escape.go
+++ b/tools/go_marshal/test/escape/escape.go
@@ -16,8 +16,8 @@
package escape
import (
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal"
- "gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/tools/go_marshal/test"
)
@@ -29,21 +29,21 @@ func (*dummyCopyContext) CopyScratchBuffer(size int) []byte {
return make([]byte, size)
}
-func (*dummyCopyContext) CopyOutBytes(addr usermem.Addr, b []byte) (int, error) {
+func (*dummyCopyContext) CopyOutBytes(addr hostarch.Addr, b []byte) (int, error) {
return len(b), nil
}
-func (*dummyCopyContext) CopyInBytes(addr usermem.Addr, b []byte) (int, error) {
+func (*dummyCopyContext) CopyInBytes(addr hostarch.Addr, b []byte) (int, error) {
return len(b), nil
}
-func (t *dummyCopyContext) MarshalBytes(addr usermem.Addr, marshallable marshal.Marshallable) {
+func (t *dummyCopyContext) MarshalBytes(addr hostarch.Addr, marshallable marshal.Marshallable) {
buf := t.CopyScratchBuffer(marshallable.SizeBytes())
marshallable.MarshalBytes(buf)
t.CopyOutBytes(addr, buf)
}
-func (t *dummyCopyContext) MarshalUnsafe(addr usermem.Addr, marshallable marshal.Marshallable) {
+func (t *dummyCopyContext) MarshalUnsafe(addr hostarch.Addr, marshallable marshal.Marshallable) {
buf := t.CopyScratchBuffer(marshallable.SizeBytes())
marshallable.MarshalUnsafe(buf)
t.CopyOutBytes(addr, buf)
@@ -53,14 +53,14 @@ func (t *dummyCopyContext) MarshalUnsafe(addr usermem.Addr, marshallable marshal
//go:nosplit
func doCopyIn(t *dummyCopyContext) {
var stat test.Stat
- stat.CopyIn(t, usermem.Addr(0xf000ba12))
+ stat.CopyIn(t, hostarch.Addr(0xf000ba12))
}
// +checkescape:all
//go:nosplit
func doCopyOut(t *dummyCopyContext) {
var stat test.Stat
- stat.CopyOut(t, usermem.Addr(0xf000ba12))
+ stat.CopyOut(t, hostarch.Addr(0xf000ba12))
}
// +mustescape:builtin
@@ -70,7 +70,7 @@ func doMarshalBytesDirect(t *dummyCopyContext) {
var stat test.Stat
buf := t.CopyScratchBuffer(stat.SizeBytes())
stat.MarshalBytes(buf)
- t.CopyOutBytes(usermem.Addr(0xf000ba12), buf)
+ t.CopyOutBytes(hostarch.Addr(0xf000ba12), buf)
}
// +mustescape:builtin
@@ -80,7 +80,7 @@ func doMarshalUnsafeDirect(t *dummyCopyContext) {
var stat test.Stat
buf := t.CopyScratchBuffer(stat.SizeBytes())
stat.MarshalUnsafe(buf)
- t.CopyOutBytes(usermem.Addr(0xf000ba12), buf)
+ t.CopyOutBytes(hostarch.Addr(0xf000ba12), buf)
}
// +mustescape:local,heap
@@ -88,7 +88,7 @@ func doMarshalUnsafeDirect(t *dummyCopyContext) {
//go:nosplit
func doMarshalBytesViaMarshallable(t *dummyCopyContext) {
var stat test.Stat
- t.MarshalBytes(usermem.Addr(0xf000ba12), &stat)
+ t.MarshalBytes(hostarch.Addr(0xf000ba12), &stat)
}
// +mustescape:local,heap
@@ -96,5 +96,5 @@ func doMarshalBytesViaMarshallable(t *dummyCopyContext) {
//go:nosplit
func doMarshalUnsafeViaMarshallable(t *dummyCopyContext) {
var stat test.Stat
- t.MarshalUnsafe(usermem.Addr(0xf000ba12), &stat)
+ t.MarshalUnsafe(hostarch.Addr(0xf000ba12), &stat)
}
diff --git a/tools/go_marshal/test/marshal_test.go b/tools/go_marshal/test/marshal_test.go
index 733689c79..43bafbf96 100644
--- a/tools/go_marshal/test/marshal_test.go
+++ b/tools/go_marshal/test/marshal_test.go
@@ -27,6 +27,7 @@ import (
"unsafe"
"github.com/google/go-cmp/cmp"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal"
"gvisor.dev/gvisor/pkg/marshal/primitive"
"gvisor.dev/gvisor/pkg/syserror"
@@ -47,7 +48,7 @@ func (t *mockCopyContext) populate(val interface{}) {
var buf bytes.Buffer
// Use binary.Write so we aren't testing go-marshal against its own
// potentially buggy implementation.
- if err := binary.Write(&buf, usermem.ByteOrder, val); err != nil {
+ if err := binary.Write(&buf, hostarch.ByteOrder, val); err != nil {
panic(err)
}
t.taskMem.Bytes = buf.Bytes()
@@ -71,14 +72,14 @@ func (t *mockCopyContext) CopyScratchBuffer(size int) []byte {
// CopyOutBytes implements marshal.CopyContext.CopyOutBytes. The implementation
// completely ignores the target address and stores a copy of b in its
// internally buffer, overriding any previous contents.
-func (t *mockCopyContext) CopyOutBytes(_ usermem.Addr, b []byte) (int, error) {
+func (t *mockCopyContext) CopyOutBytes(_ hostarch.Addr, b []byte) (int, error) {
return t.taskMem.CopyOut(nil, 0, b, usermem.IOOpts{})
}
// CopyInBytes implements marshal.CopyContext.CopyInBytes. The implementation
// completely ignores the source address and always fills b from the begining of
// its internal buffer.
-func (t *mockCopyContext) CopyInBytes(_ usermem.Addr, b []byte) (int, error) {
+func (t *mockCopyContext) CopyInBytes(_ hostarch.Addr, b []byte) (int, error) {
return t.taskMem.CopyIn(nil, 0, b, usermem.IOOpts{})
}
@@ -91,7 +92,7 @@ func unsafeMemory(m marshal.Marshallable) []byte {
// since the layout isn't packed. Allocate a temporary buffer
// and marshal instead.
var buf bytes.Buffer
- if err := binary.Write(&buf, usermem.ByteOrder, m); err != nil {
+ if err := binary.Write(&buf, hostarch.ByteOrder, m); err != nil {
panic(err)
}
return buf.Bytes()
@@ -130,7 +131,7 @@ func unsafeMemorySlice(m interface{}, elt marshal.Marshallable) []byte {
// since the layout isn't packed. Allocate a temporary buffer
// and marshal instead.
var buf bytes.Buffer
- if err := binary.Write(&buf, usermem.ByteOrder, m); err != nil {
+ if err := binary.Write(&buf, hostarch.ByteOrder, m); err != nil {
panic(err)
}
return buf.Bytes()
@@ -176,7 +177,7 @@ func limitedCopyIn(t *testing.T, src, dst marshal.Marshallable, limit int) {
cc.populate(src)
cc.setLimit(limit)
- n, err := dst.CopyIn(&cc, usermem.Addr(0))
+ n, err := dst.CopyIn(&cc, hostarch.Addr(0))
if n != limit {
t.Errorf("CopyIn copied unexpected number of bytes, expected %d, got %d", limit, n)
}
@@ -206,7 +207,7 @@ func limitedCopyOut(t *testing.T, src marshal.Marshallable, limit int) {
var cc mockCopyContext
cc.setLimit(limit)
- n, err := src.CopyOut(&cc, usermem.Addr(0))
+ n, err := src.CopyOut(&cc, hostarch.Addr(0))
if n != limit {
t.Errorf("CopyOut copied unexpected number of bytes, expected %d, got %d", limit, n)
}
@@ -227,7 +228,7 @@ func copyOutN(t *testing.T, src marshal.Marshallable, limit int) {
var cc mockCopyContext
cc.setLimit(limit)
- n, err := src.CopyOutN(&cc, usermem.Addr(0), limit)
+ n, err := src.CopyOutN(&cc, hostarch.Addr(0), limit)
if err != nil {
t.Errorf("CopyOut returned unexpected error: %v", err)
}
@@ -304,18 +305,18 @@ func TestLimitedMarshalling(t *testing.T) {
func TestLimitedSliceMarshalling(t *testing.T) {
types := []struct {
arrayPtrType reflect.Type
- copySliceIn func(cc marshal.CopyContext, addr usermem.Addr, dstSlice interface{}) (int, error)
- copySliceOut func(cc marshal.CopyContext, addr usermem.Addr, srcSlice interface{}) (int, error)
+ copySliceIn func(cc marshal.CopyContext, addr hostarch.Addr, dstSlice interface{}) (int, error)
+ copySliceOut func(cc marshal.CopyContext, addr hostarch.Addr, srcSlice interface{}) (int, error)
unsafeMemory func(arrPtr interface{}) []byte
}{
// Packed types.
{
reflect.TypeOf((*[20]test.Stat)(nil)),
- func(cc marshal.CopyContext, addr usermem.Addr, dst interface{}) (int, error) {
+ func(cc marshal.CopyContext, addr hostarch.Addr, dst interface{}) (int, error) {
slice := dst.(*[20]test.Stat)[:]
return test.CopyStatSliceIn(cc, addr, slice)
},
- func(cc marshal.CopyContext, addr usermem.Addr, src interface{}) (int, error) {
+ func(cc marshal.CopyContext, addr hostarch.Addr, src interface{}) (int, error) {
slice := src.(*[20]test.Stat)[:]
return test.CopyStatSliceOut(cc, addr, slice)
},
@@ -326,11 +327,11 @@ func TestLimitedSliceMarshalling(t *testing.T) {
},
{
reflect.TypeOf((*[1]test.Stat)(nil)),
- func(cc marshal.CopyContext, addr usermem.Addr, dst interface{}) (int, error) {
+ func(cc marshal.CopyContext, addr hostarch.Addr, dst interface{}) (int, error) {
slice := dst.(*[1]test.Stat)[:]
return test.CopyStatSliceIn(cc, addr, slice)
},
- func(cc marshal.CopyContext, addr usermem.Addr, src interface{}) (int, error) {
+ func(cc marshal.CopyContext, addr hostarch.Addr, src interface{}) (int, error) {
slice := src.(*[1]test.Stat)[:]
return test.CopyStatSliceOut(cc, addr, slice)
},
@@ -341,11 +342,11 @@ func TestLimitedSliceMarshalling(t *testing.T) {
},
{
reflect.TypeOf((*[5]test.SignalSetAlias)(nil)),
- func(cc marshal.CopyContext, addr usermem.Addr, dst interface{}) (int, error) {
+ func(cc marshal.CopyContext, addr hostarch.Addr, dst interface{}) (int, error) {
slice := dst.(*[5]test.SignalSetAlias)[:]
return test.CopySignalSetAliasSliceIn(cc, addr, slice)
},
- func(cc marshal.CopyContext, addr usermem.Addr, src interface{}) (int, error) {
+ func(cc marshal.CopyContext, addr hostarch.Addr, src interface{}) (int, error) {
slice := src.(*[5]test.SignalSetAlias)[:]
return test.CopySignalSetAliasSliceOut(cc, addr, slice)
},
@@ -357,11 +358,11 @@ func TestLimitedSliceMarshalling(t *testing.T) {
// Non-packed types.
{
reflect.TypeOf((*[20]test.Type1)(nil)),
- func(cc marshal.CopyContext, addr usermem.Addr, dst interface{}) (int, error) {
+ func(cc marshal.CopyContext, addr hostarch.Addr, dst interface{}) (int, error) {
slice := dst.(*[20]test.Type1)[:]
return test.CopyType1SliceIn(cc, addr, slice)
},
- func(cc marshal.CopyContext, addr usermem.Addr, src interface{}) (int, error) {
+ func(cc marshal.CopyContext, addr hostarch.Addr, src interface{}) (int, error) {
slice := src.(*[20]test.Type1)[:]
return test.CopyType1SliceOut(cc, addr, slice)
},
@@ -372,11 +373,11 @@ func TestLimitedSliceMarshalling(t *testing.T) {
},
{
reflect.TypeOf((*[1]test.Type1)(nil)),
- func(cc marshal.CopyContext, addr usermem.Addr, dst interface{}) (int, error) {
+ func(cc marshal.CopyContext, addr hostarch.Addr, dst interface{}) (int, error) {
slice := dst.(*[1]test.Type1)[:]
return test.CopyType1SliceIn(cc, addr, slice)
},
- func(cc marshal.CopyContext, addr usermem.Addr, src interface{}) (int, error) {
+ func(cc marshal.CopyContext, addr hostarch.Addr, src interface{}) (int, error) {
slice := src.(*[1]test.Type1)[:]
return test.CopyType1SliceOut(cc, addr, slice)
},
@@ -387,11 +388,11 @@ func TestLimitedSliceMarshalling(t *testing.T) {
},
{
reflect.TypeOf((*[7]test.Type8)(nil)),
- func(cc marshal.CopyContext, addr usermem.Addr, dst interface{}) (int, error) {
+ func(cc marshal.CopyContext, addr hostarch.Addr, dst interface{}) (int, error) {
slice := dst.(*[7]test.Type8)[:]
return test.CopyType8SliceIn(cc, addr, slice)
},
- func(cc marshal.CopyContext, addr usermem.Addr, src interface{}) (int, error) {
+ func(cc marshal.CopyContext, addr hostarch.Addr, src interface{}) (int, error) {
slice := src.(*[7]test.Type8)[:]
return test.CopyType8SliceOut(cc, addr, slice)
},
@@ -444,7 +445,7 @@ func TestLimitedSliceMarshalling(t *testing.T) {
cc.populate(expected)
cc.setLimit(limit)
- n, err := tt.copySliceIn(&cc, usermem.Addr(0), actual)
+ n, err := tt.copySliceIn(&cc, hostarch.Addr(0), actual)
if n != limit {
t.Errorf("CopyIn copied unexpected number of bytes, expected %d, got %d", limit, n)
}
@@ -498,7 +499,7 @@ func TestLimitedSliceMarshalling(t *testing.T) {
cc.populate(expected)
cc.setLimit(limit)
- n, err := tt.copySliceOut(&cc, usermem.Addr(0), expected)
+ n, err := tt.copySliceOut(&cc, hostarch.Addr(0), expected)
if n != limit {
t.Errorf("CopyIn copied unexpected number of bytes, expected %d, got %d", limit, n)
}
@@ -523,14 +524,14 @@ func TestDynamicTypeStruct(t *testing.T) {
var cc mockCopyContext
cc.setLimit(t12.SizeBytes())
- if _, err := t12.CopyOut(&cc, usermem.Addr(0)); err != nil {
+ if _, err := t12.CopyOut(&cc, hostarch.Addr(0)); err != nil {
t.Fatalf("cc.CopyOut faile: %v", err)
}
res := test.Type12Dynamic{
Y: make([]primitive.Int64, len(t12.Y)),
}
- res.CopyIn(&cc, usermem.Addr(0))
+ res.CopyIn(&cc, hostarch.Addr(0))
if !reflect.DeepEqual(t12, res) {
t.Errorf("dynamic type is not same after marshalling and unmarshalling: before = %+v, after = %+v", t12, res)
}
@@ -541,12 +542,12 @@ func TestDynamicTypeIdentifier(t *testing.T) {
var cc mockCopyContext
cc.setLimit(s.SizeBytes())
- if _, err := s.CopyOut(&cc, usermem.Addr(0)); err != nil {
+ if _, err := s.CopyOut(&cc, hostarch.Addr(0)); err != nil {
t.Fatalf("cc.CopyOut faile: %v", err)
}
res := test.Type13Dynamic(make([]byte, len(s)))
- res.CopyIn(&cc, usermem.Addr(0))
+ res.CopyIn(&cc, hostarch.Addr(0))
if res != s {
t.Errorf("dynamic type is not same after marshalling and unmarshalling: before = %s, after = %s", s, res)
}