summaryrefslogtreecommitdiffhomepage
path: root/pkg/lisafs
diff options
context:
space:
mode:
Diffstat (limited to 'pkg/lisafs')
-rw-r--r--pkg/lisafs/BUILD117
-rw-r--r--pkg/lisafs/README.md366
-rw-r--r--pkg/lisafs/connection_test.go194
-rw-r--r--pkg/lisafs/control_fd_list.go221
-rw-r--r--pkg/lisafs/control_fd_refs.go140
-rw-r--r--pkg/lisafs/lisafs_abi_autogen_unsafe.go4214
-rw-r--r--pkg/lisafs/lisafs_state_autogen.go176
-rw-r--r--pkg/lisafs/open_fd_list.go221
-rw-r--r--pkg/lisafs/open_fd_refs.go140
-rw-r--r--pkg/lisafs/sock_test.go217
-rw-r--r--pkg/lisafs/testsuite/BUILD20
-rw-r--r--pkg/lisafs/testsuite/testsuite.go637
12 files changed, 5112 insertions, 1551 deletions
diff --git a/pkg/lisafs/BUILD b/pkg/lisafs/BUILD
deleted file mode 100644
index 313c1756d..000000000
--- a/pkg/lisafs/BUILD
+++ /dev/null
@@ -1,117 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-load("//tools/go_generics:defs.bzl", "go_template_instance")
-
-package(
- default_visibility = ["//visibility:public"],
- licenses = ["notice"],
-)
-
-go_template_instance(
- name = "control_fd_refs",
- out = "control_fd_refs.go",
- package = "lisafs",
- prefix = "controlFD",
- template = "//pkg/refsvfs2:refs_template",
- types = {
- "T": "ControlFD",
- },
-)
-
-go_template_instance(
- name = "open_fd_refs",
- out = "open_fd_refs.go",
- package = "lisafs",
- prefix = "openFD",
- template = "//pkg/refsvfs2:refs_template",
- types = {
- "T": "OpenFD",
- },
-)
-
-go_template_instance(
- name = "control_fd_list",
- out = "control_fd_list.go",
- package = "lisafs",
- prefix = "controlFD",
- template = "//pkg/ilist:generic_list",
- types = {
- "Element": "*ControlFD",
- "Linker": "*ControlFD",
- },
-)
-
-go_template_instance(
- name = "open_fd_list",
- out = "open_fd_list.go",
- package = "lisafs",
- prefix = "openFD",
- template = "//pkg/ilist:generic_list",
- types = {
- "Element": "*OpenFD",
- "Linker": "*OpenFD",
- },
-)
-
-go_library(
- name = "lisafs",
- srcs = [
- "channel.go",
- "client.go",
- "client_file.go",
- "communicator.go",
- "connection.go",
- "control_fd_list.go",
- "control_fd_refs.go",
- "fd.go",
- "handlers.go",
- "lisafs.go",
- "message.go",
- "open_fd_list.go",
- "open_fd_refs.go",
- "sample_message.go",
- "server.go",
- "sock.go",
- ],
- marshal = True,
- deps = [
- "//pkg/abi/linux",
- "//pkg/cleanup",
- "//pkg/context",
- "//pkg/fdchannel",
- "//pkg/flipcall",
- "//pkg/fspath",
- "//pkg/hostarch",
- "//pkg/log",
- "//pkg/marshal/primitive",
- "//pkg/p9",
- "//pkg/refsvfs2",
- "//pkg/sync",
- "//pkg/unet",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-go_test(
- name = "sock_test",
- size = "small",
- srcs = ["sock_test.go"],
- library = ":lisafs",
- deps = [
- "//pkg/marshal",
- "//pkg/sync",
- "//pkg/unet",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-go_test(
- name = "connection_test",
- size = "small",
- srcs = ["connection_test.go"],
- deps = [
- ":lisafs",
- "//pkg/sync",
- "//pkg/unet",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
diff --git a/pkg/lisafs/README.md b/pkg/lisafs/README.md
deleted file mode 100644
index 6b857321a..000000000
--- a/pkg/lisafs/README.md
+++ /dev/null
@@ -1,366 +0,0 @@
-# Replacing 9P
-
-NOTE: LISAFS is **NOT** production ready. There are still some security concerns
-that must be resolved first.
-
-## Background
-
-The Linux filesystem model consists of the following key aspects (modulo mounts,
-which are outside the scope of this discussion):
-
-- A `struct inode` represents a "filesystem object", such as a directory or a
- regular file. "Filesystem object" is most precisely defined by the practical
- properties of an inode, such as an immutable type (regular file, directory,
- symbolic link, etc.) and its independence from the path originally used to
- obtain it.
-
-- A `struct dentry` represents a node in a filesystem tree. Semantically, each
- dentry is immutably associated with an inode representing the filesystem
- object at that position. (Linux implements optimizations involving reuse of
- unreferenced dentries, which allows their associated inodes to change, but
- this is outside the scope of this discussion.)
-
-- A `struct file` represents an open file description (hereafter FD) and is
- needed to perform I/O. Each FD is immutably associated with the dentry
- through which it was opened.
-
-The current gVisor virtual filesystem implementation (hereafter VFS1) closely
-imitates the Linux design:
-
-- `struct inode` => `fs.Inode`
-
-- `struct dentry` => `fs.Dirent`
-
-- `struct file` => `fs.File`
-
-gVisor accesses most external filesystems through a variant of the 9P2000.L
-protocol, including extensions for performance (`walkgetattr`) and for features
-not supported by vanilla 9P2000.L (`flushf`, `lconnect`). The 9P protocol family
-is inode-based; 9P fids represent a file (equivalently "file system object"),
-and the protocol is structured around alternatively obtaining fids to represent
-files (with `walk` and, in gVisor, `walkgetattr`) and performing operations on
-those fids.
-
-In the sections below, a **shared** filesystem is a filesystem that is *mutably*
-accessible by multiple concurrent clients, such that a **non-shared** filesystem
-is a filesystem that is either read-only or accessible by only a single client.
-
-## Problems
-
-### Serialization of Path Component RPCs
-
-Broadly speaking, VFS1 traverses each path component in a pathname, alternating
-between verifying that each traversed dentry represents an inode that represents
-a searchable directory and moving to the next dentry in the path.
-
-In the context of a remote filesystem, the structure of this traversal means
-that - modulo caching - a path involving N components requires at least N-1
-*sequential* RPCs to obtain metadata for intermediate directories, incurring
-significant latency. (In vanilla 9P2000.L, 2(N-1) RPCs are required: N-1 `walk`
-and N-1 `getattr`. We added the `walkgetattr` RPC to reduce this overhead.) On
-non-shared filesystems, this overhead is primarily significant during
-application startup; caching mitigates much of this overhead at steady state. On
-shared filesystems, where correct caching requires revalidation (requiring RPCs
-for each revalidated directory anyway), this overhead is consistently ruinous.
-
-### Inefficient RPCs
-
-9P is not exceptionally economical with RPCs in general. In addition to the
-issue described above:
-
-- Opening an existing file in 9P involves at least 2 RPCs: `walk` to produce
- an unopened fid representing the file, and `lopen` to open the fid.
-
-- Creating a file also involves at least 2 RPCs: `walk` to produce an unopened
- fid representing the parent directory, and `lcreate` to create the file and
- convert the fid to an open fid representing the created file. In practice,
- both the Linux and gVisor 9P clients expect to have an unopened fid for the
- created file (necessitating an additional `walk`), as well as attributes for
- the created file (necessitating an additional `getattr`), for a total of 4
- RPCs. (In a shared filesystem, where whether a file already exists can
- change between RPCs, a correct implementation of `open(O_CREAT)` would have
- to alternate between these two paths (plus `clunk`ing the temporary fid
- between alternations, since the nature of the `fid` differs between the two
- paths). Neither Linux nor gVisor implement the required alternation, so
- `open(O_CREAT)` without `O_EXCL` can spuriously fail with `EEXIST` on both.)
-
-- Closing (`clunk`ing) a fid requires an RPC. VFS1 issues this RPC
- asynchronously in an attempt to reduce critical path latency, but scheduling
- overhead makes this not clearly advantageous in practice.
-
-- `read` and `readdir` can return partial reads without a way to indicate EOF,
- necessitating an additional final read to detect EOF.
-
-- Operations that affect filesystem state do not consistently return updated
- filesystem state. In gVisor, the client implementation attempts to handle
- this by tracking what it thinks updated state "should" be; this is complex,
- and especially brittle for timestamps (which are often not arbitrarily
- settable). In Linux, the client implemtation invalidates cached metadata
- whenever it performs such an operation, and reloads it when a dentry
- corresponding to an inode with no valid cached metadata is revalidated; this
- is simple, but necessitates an additional `getattr`.
-
-### Dentry/Inode Ambiguity
-
-As noted above, 9P's documentation tends to imply that unopened fids represent
-an inode. In practice, most filesystem APIs present very limited interfaces for
-working with inodes at best, such that the interpretation of unopened fids
-varies:
-
-- Linux's 9P client associates unopened fids with (dentry, uid) pairs. When
- caching is enabled, it also associates each inode with the first fid opened
- writably that references that inode, in order to support page cache
- writeback.
-
-- gVisor's 9P client associates unopened fids with inodes, and also caches
- opened fids in inodes in a manner similar to Linux.
-
-- The runsc fsgofer associates unopened fids with both "dentries" (host
- filesystem paths) and "inodes" (host file descriptors); which is used
- depends on the operation invoked on the fid.
-
-For non-shared filesystems, this confusion has resulted in correctness issues
-that are (in gVisor) currently handled by a number of coarse-grained locks that
-serialize renames with all other filesystem operations. For shared filesystems,
-this means inconsistent behavior in the presence of concurrent mutation.
-
-## Design
-
-Almost all Linux filesystem syscalls describe filesystem resources in one of two
-ways:
-
-- Path-based: A filesystem position is described by a combination of a
- starting position and a sequence of path components relative to that
- position, where the starting position is one of:
-
- - The VFS root (defined by mount namespace and chroot), for absolute paths
-
- - The VFS position of an existing FD, for relative paths passed to `*at`
- syscalls (e.g. `statat`)
-
- - The current working directory, for relative paths passed to non-`*at`
- syscalls and `*at` syscalls with `AT_FDCWD`
-
-- File-description-based: A filesystem object is described by an existing FD,
- passed to a `f*` syscall (e.g. `fstat`).
-
-Many of our issues with 9P arise from its (and VFS') interposition of a model
-based on inodes between the filesystem syscall API and filesystem
-implementations. We propose to replace 9P with a protocol that does not feature
-inodes at all, and instead closely follows the filesystem syscall API by
-featuring only path-based and FD-based operations, with minimal deviations as
-necessary to ameliorate deficiencies in the syscall interface (see below). This
-approach addresses the issues described above:
-
-- Even on shared filesystems, most application filesystem syscalls are
- translated to a single RPC (possibly excepting special cases described
- below), which is a logical lower bound.
-
-- The behavior of application syscalls on shared filesystems is
- straightforwardly predictable: path-based syscalls are translated to
- path-based RPCs, which will re-lookup the file at that path, and FD-based
- syscalls are translated to FD-based RPCs, which use an existing open file
- without performing another lookup. (This is at least true on gofers that
- proxy the host local filesystem; other filesystems that lack support for
- e.g. certain operations on FDs may have different behavior, but this
- divergence is at least still predictable and inherent to the underlying
- filesystem implementation.)
-
-Note that this approach is only feasible in gVisor's next-generation virtual
-filesystem (VFS2), which does not assume the existence of inodes and allows the
-remote filesystem client to translate whole path-based syscalls into RPCs. Thus
-one of the unavoidable tradeoffs associated with such a protocol vs. 9P is the
-inability to construct a Linux client that is performance-competitive with
-gVisor.
-
-### File Permissions
-
-Many filesystem operations are side-effectual, such that file permissions must
-be checked before such operations take effect. The simplest approach to file
-permission checking is for the sentry to obtain permissions from the remote
-filesystem, then apply permission checks in the sentry before performing the
-application-requested operation. However, this requires an additional RPC per
-application syscall (which can't be mitigated by caching on shared filesystems).
-Alternatively, we may delegate file permission checking to gofers. In general,
-file permission checks depend on the following properties of the accessor:
-
-- Filesystem UID/GID
-
-- Supplementary GIDs
-
-- Effective capabilities in the accessor's user namespace (i.e. the accessor's
- effective capability set)
-
-- All UIDs and GIDs mapped in the accessor's user namespace (which determine
- if the accessor's capabilities apply to accessed files)
-
-We may choose to delay implementation of file permission checking delegation,
-although this is potentially costly since it doubles the number of required RPCs
-for most operations on shared filesystems. We may also consider compromise
-options, such as only delegating file permission checks for accessors in the
-root user namespace.
-
-### Symbolic Links
-
-gVisor usually interprets symbolic link targets in its VFS rather than on the
-filesystem containing the symbolic link; thus e.g. a symlink to
-"/proc/self/maps" on a remote filesystem resolves to said file in the sentry's
-procfs rather than the host's. This implies that:
-
-- Remote filesystem servers that proxy filesystems supporting symlinks must
- check if each path component is a symlink during path traversal.
-
-- Absolute symlinks require that the sentry restart the operation at its
- contextual VFS root (which is task-specific and may not be on a remote
- filesystem at all), so if a remote filesystem server encounters an absolute
- symlink during path traversal on behalf of a path-based operation, it must
- terminate path traversal and return the symlink target.
-
-- Relative symlinks begin target resolution in the parent directory of the
- symlink, so in theory most relative symlinks can be handled automatically
- during the path traversal that encounters the symlink, provided that said
- traversal is supplied with the number of remaining symlinks before `ELOOP`.
- However, the new path traversed by the symlink target may cross VFS mount
- boundaries, such that it's only safe for remote filesystem servers to
- speculatively follow relative symlinks for side-effect-free operations such
- as `stat` (where the sentry can simply ignore results that are inapplicable
- due to crossing mount boundaries). We may choose to delay implementation of
- this feature, at the cost of an additional RPC per relative symlink (note
- that even if the symlink target crosses a mount boundary, the sentry will
- need to `stat` the path to the mount boundary to confirm that each traversed
- component is an accessible directory); until it is implemented, relative
- symlinks may be handled like absolute symlinks, by terminating path
- traversal and returning the symlink target.
-
-The possibility of symlinks (and the possibility of a compromised sentry) means
-that the sentry may issue RPCs with paths that, in the absence of symlinks,
-would traverse beyond the root of the remote filesystem. For example, the sentry
-may issue an RPC with a path like "/foo/../..", on the premise that if "/foo" is
-a symlink then the resulting path may be elsewhere on the remote filesystem. To
-handle this, path traversal must also track its current depth below the remote
-filesystem root, and terminate path traversal if it would ascend beyond this
-point.
-
-### Path Traversal
-
-Since path-based VFS operations will translate to path-based RPCs, filesystem
-servers will need to handle path traversal. From the perspective of a given
-filesystem implementation in the server, there are two basic approaches to path
-traversal:
-
-- Inode-walk: For each path component, obtain a handle to the underlying
- filesystem object (e.g. with `open(O_PATH)`), check if that object is a
- symlink (as described above) and that that object is accessible by the
- caller (e.g. with `fstat()`), then continue to the next path component (e.g.
- with `openat()`). This ensures that the checked filesystem object is the one
- used to obtain the next object in the traversal, which is intuitively
- appealing. However, while this approach works for host local filesystems, it
- requires features that are not widely supported by other filesystems.
-
-- Path-walk: For each path component, use a path-based operation to determine
- if the filesystem object currently referred to by that path component is a
- symlink / is accessible. This is highly portable, but suffers from quadratic
- behavior (at the level of the underlying filesystem implementation, the
- first path component will be traversed a number of times equal to the number
- of path components in the path).
-
-The implementation should support either option by delegating path traversal to
-filesystem implementations within the server (like VFS and the remote filesystem
-protocol itself), as inode-walking is still safe, efficient, amenable to FD
-caching, and implementable on non-shared host local filesystems (a sufficiently
-common case as to be worth considering in the design).
-
-Both approaches are susceptible to race conditions that may permit sandboxed
-filesystem escapes:
-
-- Under inode-walk, a malicious application may cause a directory to be moved
- (with `rename`) during path traversal, such that the filesystem
- implementation incorrectly determines whether subsequent inodes are located
- in paths that should be visible to sandboxed applications.
-
-- Under path-walk, a malicious application may cause a non-symlink file to be
- replaced with a symlink during path traversal, such that following path
- operations will incorrectly follow the symlink.
-
-Both race conditions can, to some extent, be mitigated in filesystem server
-implementations by synchronizing path traversal with the hazardous operations in
-question. However, shared filesystems are frequently used to share data between
-sandboxed and unsandboxed applications in a controlled way, and in some cases a
-malicious sandboxed application may be able to take advantage of a hazardous
-filesystem operation performed by an unsandboxed application. In some cases,
-filesystem features may be available to ensure safety even in such cases (e.g.
-[the new openat2() syscall](https://man7.org/linux/man-pages/man2/openat2.2.html)),
-but it is not clear how to solve this problem in general. (Note that this issue
-is not specific to our design; rather, it is a fundamental limitation of
-filesystem sandboxing.)
-
-### Filesystem Multiplexing
-
-A given sentry may need to access multiple distinct remote filesystems (e.g.
-different volumes for a given container). In many cases, there is no advantage
-to serving these filesystems from distinct filesystem servers, or accessing them
-through distinct connections (factors such as maximum RPC concurrency should be
-based on available host resources). Therefore, the protocol should support
-multiplexing of distinct filesystem trees within a single session. 9P supports
-this by allowing multiple calls to the `attach` RPC to produce fids representing
-distinct filesystem trees, but this is somewhat clunky; we propose a much
-simpler mechanism wherein each message that conveys a path also conveys a
-numeric filesystem ID that identifies a filesystem tree.
-
-## Alternatives Considered
-
-### Additional Extensions to 9P
-
-There are at least three conceptual aspects to 9P:
-
-- Wire format: messages with a 4-byte little-endian size prefix, strings with
- a 2-byte little-endian size prefix, etc. Whether the wire format is worth
- retaining is unclear; in particular, it's unclear that the 9P wire format
- has a significant advantage over protobufs, which are substantially easier
- to extend. Note that the official Go protobuf implementation is widely known
- to suffer from a significant number of performance deficiencies, so if we
- choose to switch to protobuf, we may need to use an alternative toolchain
- such as `gogo/protobuf` (which is also widely used in the Go ecosystem, e.g.
- by Kubernetes).
-
-- Filesystem model: fids, qids, etc. Discarding this is one of the motivations
- for this proposal.
-
-- RPCs: Twalk, Tlopen, etc. In addition to previously-described
- inefficiencies, most of these are dependent on the filesystem model and
- therefore must be discarded.
-
-### FUSE
-
-The FUSE (Filesystem in Userspace) protocol is frequently used to provide
-arbitrary userspace filesystem implementations to a host Linux kernel.
-Unfortunately, FUSE is also inode-based, and therefore doesn't address any of
-the problems we have with 9P.
-
-### virtio-fs
-
-virtio-fs is an ongoing project aimed at improving Linux VM filesystem
-performance when accessing Linux host filesystems (vs. virtio-9p). In brief, it
-is based on:
-
-- Using a FUSE client in the guest that communicates over virtio with a FUSE
- server in the host.
-
-- Using DAX to map the host page cache into the guest.
-
-- Using a file metadata table in shared memory to avoid VM exits for metadata
- updates.
-
-None of these improvements seem applicable to gVisor:
-
-- As explained above, FUSE is still inode-based, so it is still susceptible to
- most of the problems we have with 9P.
-
-- Our use of host file descriptors already allows us to leverage the host page
- cache for file contents.
-
-- Our need for shared filesystem coherence is usually based on a user
- requirement that an out-of-sandbox filesystem mutation is guaranteed to be
- visible by all subsequent observations from within the sandbox, or vice
- versa; it's not clear that this can be guaranteed without a synchronous
- signaling mechanism like an RPC.
diff --git a/pkg/lisafs/connection_test.go b/pkg/lisafs/connection_test.go
deleted file mode 100644
index 28ba47112..000000000
--- a/pkg/lisafs/connection_test.go
+++ /dev/null
@@ -1,194 +0,0 @@
-// Copyright 2021 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package connection_test
-
-import (
- "reflect"
- "testing"
-
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/lisafs"
- "gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/unet"
-)
-
-const (
- dynamicMsgID = lisafs.Channel + 1
- versionMsgID = dynamicMsgID + 1
-)
-
-var handlers = [...]lisafs.RPCHandler{
- lisafs.Error: lisafs.ErrorHandler,
- lisafs.Mount: lisafs.MountHandler,
- lisafs.Channel: lisafs.ChannelHandler,
- dynamicMsgID: dynamicMsgHandler,
- versionMsgID: versionHandler,
-}
-
-// testServer implements lisafs.ServerImpl.
-type testServer struct {
- lisafs.Server
-}
-
-var _ lisafs.ServerImpl = (*testServer)(nil)
-
-type testControlFD struct {
- lisafs.ControlFD
- lisafs.ControlFDImpl
-}
-
-func (fd *testControlFD) FD() *lisafs.ControlFD {
- return &fd.ControlFD
-}
-
-// Mount implements lisafs.Mount.
-func (s *testServer) Mount(c *lisafs.Connection, mountPath string) (lisafs.ControlFDImpl, lisafs.Inode, error) {
- return &testControlFD{}, lisafs.Inode{ControlFD: 1}, nil
-}
-
-// MaxMessageSize implements lisafs.MaxMessageSize.
-func (s *testServer) MaxMessageSize() uint32 {
- return lisafs.MaxMessageSize()
-}
-
-// SupportedMessages implements lisafs.ServerImpl.SupportedMessages.
-func (s *testServer) SupportedMessages() []lisafs.MID {
- return []lisafs.MID{
- lisafs.Mount,
- lisafs.Channel,
- dynamicMsgID,
- versionMsgID,
- }
-}
-
-func runServerClient(t testing.TB, clientFn func(c *lisafs.Client)) {
- serverSocket, clientSocket, err := unet.SocketPair(false)
- if err != nil {
- t.Fatalf("socketpair got err %v expected nil", err)
- }
-
- ts := &testServer{}
- ts.Server.InitTestOnly(ts, handlers[:])
- conn, err := ts.CreateConnection(serverSocket, false /* readonly */)
- if err != nil {
- t.Fatalf("starting connection failed: %v", err)
- return
- }
- ts.StartConnection(conn)
-
- c, _, err := lisafs.NewClient(clientSocket, "/")
- if err != nil {
- t.Fatalf("client creation failed: %v", err)
- }
-
- clientFn(c)
-
- c.Close() // This should trigger client and server shutdown.
- ts.Wait()
-}
-
-// TestStartUp tests that the server and client can be started up correctly.
-func TestStartUp(t *testing.T) {
- runServerClient(t, func(c *lisafs.Client) {
- if c.IsSupported(lisafs.Error) {
- t.Errorf("sending error messages should not be supported")
- }
- })
-}
-
-func TestUnsupportedMessage(t *testing.T) {
- unsupportedM := lisafs.MID(len(handlers))
- runServerClient(t, func(c *lisafs.Client) {
- if err := c.SndRcvMessage(unsupportedM, 0, lisafs.NoopMarshal, lisafs.NoopUnmarshal, nil); err != unix.EOPNOTSUPP {
- t.Errorf("expected EOPNOTSUPP but got err: %v", err)
- }
- })
-}
-
-func dynamicMsgHandler(c *lisafs.Connection, comm lisafs.Communicator, payloadLen uint32) (uint32, error) {
- var req lisafs.MsgDynamic
- req.UnmarshalBytes(comm.PayloadBuf(payloadLen))
-
- // Just echo back the message.
- respPayloadLen := uint32(req.SizeBytes())
- req.MarshalBytes(comm.PayloadBuf(respPayloadLen))
- return respPayloadLen, nil
-}
-
-// TestStress stress tests sending many messages from various goroutines.
-func TestStress(t *testing.T) {
- runServerClient(t, func(c *lisafs.Client) {
- concurrency := 8
- numMsgPerGoroutine := 5000
- var clientWg sync.WaitGroup
- for i := 0; i < concurrency; i++ {
- clientWg.Add(1)
- go func() {
- defer clientWg.Done()
-
- for j := 0; j < numMsgPerGoroutine; j++ {
- // Create a massive random message.
- var req lisafs.MsgDynamic
- req.Randomize(100)
-
- var resp lisafs.MsgDynamic
- if err := c.SndRcvMessage(dynamicMsgID, uint32(req.SizeBytes()), req.MarshalBytes, resp.UnmarshalBytes, nil); err != nil {
- t.Errorf("SndRcvMessage: received unexpected error %v", err)
- return
- }
- if !reflect.DeepEqual(&req, &resp) {
- t.Errorf("response should be the same as request: request = %+v, response = %+v", req, resp)
- }
- }
- }()
- }
-
- clientWg.Wait()
- })
-}
-
-func versionHandler(c *lisafs.Connection, comm lisafs.Communicator, payloadLen uint32) (uint32, error) {
- // To be fair, usually handlers will create their own objects and return a
- // pointer to those. Might be tempting to reuse above variables, but don't.
- var rv lisafs.P9Version
- rv.UnmarshalBytes(comm.PayloadBuf(payloadLen))
-
- // Create a new response.
- sv := lisafs.P9Version{
- MSize: rv.MSize,
- Version: "9P2000.L.Google.11",
- }
- respPayloadLen := uint32(sv.SizeBytes())
- sv.MarshalBytes(comm.PayloadBuf(respPayloadLen))
- return respPayloadLen, nil
-}
-
-// BenchmarkSendRecv exists to compete against p9's BenchmarkSendRecvChannel.
-func BenchmarkSendRecv(b *testing.B) {
- b.ReportAllocs()
- sendV := lisafs.P9Version{
- MSize: 1 << 20,
- Version: "9P2000.L.Google.12",
- }
-
- var recvV lisafs.P9Version
- runServerClient(b, func(c *lisafs.Client) {
- for i := 0; i < b.N; i++ {
- if err := c.SndRcvMessage(versionMsgID, uint32(sendV.SizeBytes()), sendV.MarshalBytes, recvV.UnmarshalBytes, nil); err != nil {
- b.Fatalf("unexpected error occurred: %v", err)
- }
- }
- })
-}
diff --git a/pkg/lisafs/control_fd_list.go b/pkg/lisafs/control_fd_list.go
new file mode 100644
index 000000000..684d9c265
--- /dev/null
+++ b/pkg/lisafs/control_fd_list.go
@@ -0,0 +1,221 @@
+package lisafs
+
+// ElementMapper provides an identity mapping by default.
+//
+// This can be replaced to provide a struct that maps elements to linker
+// objects, if they are not the same. An ElementMapper is not typically
+// required if: Linker is left as is, Element is left as is, or Linker and
+// Element are the same type.
+type controlFDElementMapper struct{}
+
+// linkerFor maps an Element to a Linker.
+//
+// This default implementation should be inlined.
+//
+//go:nosplit
+func (controlFDElementMapper) linkerFor(elem *ControlFD) *ControlFD { return elem }
+
+// List is an intrusive list. Entries can be added to or removed from the list
+// in O(1) time and with no additional memory allocations.
+//
+// The zero value for List is an empty list ready to use.
+//
+// To iterate over a list (where l is a List):
+// for e := l.Front(); e != nil; e = e.Next() {
+// // do something with e.
+// }
+//
+// +stateify savable
+type controlFDList struct {
+ head *ControlFD
+ tail *ControlFD
+}
+
+// Reset resets list l to the empty state.
+func (l *controlFDList) Reset() {
+ l.head = nil
+ l.tail = nil
+}
+
+// Empty returns true iff the list is empty.
+//
+//go:nosplit
+func (l *controlFDList) Empty() bool {
+ return l.head == nil
+}
+
+// Front returns the first element of list l or nil.
+//
+//go:nosplit
+func (l *controlFDList) Front() *ControlFD {
+ return l.head
+}
+
+// Back returns the last element of list l or nil.
+//
+//go:nosplit
+func (l *controlFDList) Back() *ControlFD {
+ return l.tail
+}
+
+// Len returns the number of elements in the list.
+//
+// NOTE: This is an O(n) operation.
+//
+//go:nosplit
+func (l *controlFDList) Len() (count int) {
+ for e := l.Front(); e != nil; e = (controlFDElementMapper{}.linkerFor(e)).Next() {
+ count++
+ }
+ return count
+}
+
+// PushFront inserts the element e at the front of list l.
+//
+//go:nosplit
+func (l *controlFDList) PushFront(e *ControlFD) {
+ linker := controlFDElementMapper{}.linkerFor(e)
+ linker.SetNext(l.head)
+ linker.SetPrev(nil)
+ if l.head != nil {
+ controlFDElementMapper{}.linkerFor(l.head).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+
+ l.head = e
+}
+
+// PushBack inserts the element e at the back of list l.
+//
+//go:nosplit
+func (l *controlFDList) PushBack(e *ControlFD) {
+ linker := controlFDElementMapper{}.linkerFor(e)
+ linker.SetNext(nil)
+ linker.SetPrev(l.tail)
+ if l.tail != nil {
+ controlFDElementMapper{}.linkerFor(l.tail).SetNext(e)
+ } else {
+ l.head = e
+ }
+
+ l.tail = e
+}
+
+// PushBackList inserts list m at the end of list l, emptying m.
+//
+//go:nosplit
+func (l *controlFDList) PushBackList(m *controlFDList) {
+ if l.head == nil {
+ l.head = m.head
+ l.tail = m.tail
+ } else if m.head != nil {
+ controlFDElementMapper{}.linkerFor(l.tail).SetNext(m.head)
+ controlFDElementMapper{}.linkerFor(m.head).SetPrev(l.tail)
+
+ l.tail = m.tail
+ }
+ m.head = nil
+ m.tail = nil
+}
+
+// InsertAfter inserts e after b.
+//
+//go:nosplit
+func (l *controlFDList) InsertAfter(b, e *ControlFD) {
+ bLinker := controlFDElementMapper{}.linkerFor(b)
+ eLinker := controlFDElementMapper{}.linkerFor(e)
+
+ a := bLinker.Next()
+
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ bLinker.SetNext(e)
+
+ if a != nil {
+ controlFDElementMapper{}.linkerFor(a).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+}
+
+// InsertBefore inserts e before a.
+//
+//go:nosplit
+func (l *controlFDList) InsertBefore(a, e *ControlFD) {
+ aLinker := controlFDElementMapper{}.linkerFor(a)
+ eLinker := controlFDElementMapper{}.linkerFor(e)
+
+ b := aLinker.Prev()
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ aLinker.SetPrev(e)
+
+ if b != nil {
+ controlFDElementMapper{}.linkerFor(b).SetNext(e)
+ } else {
+ l.head = e
+ }
+}
+
+// Remove removes e from l.
+//
+//go:nosplit
+func (l *controlFDList) Remove(e *ControlFD) {
+ linker := controlFDElementMapper{}.linkerFor(e)
+ prev := linker.Prev()
+ next := linker.Next()
+
+ if prev != nil {
+ controlFDElementMapper{}.linkerFor(prev).SetNext(next)
+ } else if l.head == e {
+ l.head = next
+ }
+
+ if next != nil {
+ controlFDElementMapper{}.linkerFor(next).SetPrev(prev)
+ } else if l.tail == e {
+ l.tail = prev
+ }
+
+ linker.SetNext(nil)
+ linker.SetPrev(nil)
+}
+
+// Entry is a default implementation of Linker. Users can add anonymous fields
+// of this type to their structs to make them automatically implement the
+// methods needed by List.
+//
+// +stateify savable
+type controlFDEntry struct {
+ next *ControlFD
+ prev *ControlFD
+}
+
+// Next returns the entry that follows e in the list.
+//
+//go:nosplit
+func (e *controlFDEntry) Next() *ControlFD {
+ return e.next
+}
+
+// Prev returns the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *controlFDEntry) Prev() *ControlFD {
+ return e.prev
+}
+
+// SetNext assigns 'entry' as the entry that follows e in the list.
+//
+//go:nosplit
+func (e *controlFDEntry) SetNext(elem *ControlFD) {
+ e.next = elem
+}
+
+// SetPrev assigns 'entry' as the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *controlFDEntry) SetPrev(elem *ControlFD) {
+ e.prev = elem
+}
diff --git a/pkg/lisafs/control_fd_refs.go b/pkg/lisafs/control_fd_refs.go
new file mode 100644
index 000000000..cc24833f2
--- /dev/null
+++ b/pkg/lisafs/control_fd_refs.go
@@ -0,0 +1,140 @@
+package lisafs
+
+import (
+ "fmt"
+ "sync/atomic"
+
+ "gvisor.dev/gvisor/pkg/refsvfs2"
+)
+
+// enableLogging indicates whether reference-related events should be logged (with
+// stack traces). This is false by default and should only be set to true for
+// debugging purposes, as it can generate an extremely large amount of output
+// and drastically degrade performance.
+const controlFDenableLogging = false
+
+// obj is used to customize logging. Note that we use a pointer to T so that
+// we do not copy the entire object when passed as a format parameter.
+var controlFDobj *ControlFD
+
+// Refs implements refs.RefCounter. It keeps a reference count using atomic
+// operations and calls the destructor when the count reaches zero.
+//
+// NOTE: Do not introduce additional fields to the Refs struct. It is used by
+// many filesystem objects, and we want to keep it as small as possible (i.e.,
+// the same size as using an int64 directly) to avoid taking up extra cache
+// space. In general, this template should not be extended at the cost of
+// performance. If it does not offer enough flexibility for a particular object
+// (example: b/187877947), we should implement the RefCounter/CheckedObject
+// interfaces manually.
+//
+// +stateify savable
+type controlFDRefs struct {
+ // refCount is composed of two fields:
+ //
+ // [32-bit speculative references]:[32-bit real references]
+ //
+ // Speculative references are used for TryIncRef, to avoid a CompareAndSwap
+ // loop. See IncRef, DecRef and TryIncRef for details of how these fields are
+ // used.
+ refCount int64
+}
+
+// InitRefs initializes r with one reference and, if enabled, activates leak
+// checking.
+func (r *controlFDRefs) InitRefs() {
+ atomic.StoreInt64(&r.refCount, 1)
+ refsvfs2.Register(r)
+}
+
+// RefType implements refsvfs2.CheckedObject.RefType.
+func (r *controlFDRefs) RefType() string {
+ return fmt.Sprintf("%T", controlFDobj)[1:]
+}
+
+// LeakMessage implements refsvfs2.CheckedObject.LeakMessage.
+func (r *controlFDRefs) LeakMessage() string {
+ return fmt.Sprintf("[%s %p] reference count of %d instead of 0", r.RefType(), r, r.ReadRefs())
+}
+
+// LogRefs implements refsvfs2.CheckedObject.LogRefs.
+func (r *controlFDRefs) LogRefs() bool {
+ return controlFDenableLogging
+}
+
+// ReadRefs returns the current number of references. The returned count is
+// inherently racy and is unsafe to use without external synchronization.
+func (r *controlFDRefs) ReadRefs() int64 {
+ return atomic.LoadInt64(&r.refCount)
+}
+
+// IncRef implements refs.RefCounter.IncRef.
+//
+//go:nosplit
+func (r *controlFDRefs) IncRef() {
+ v := atomic.AddInt64(&r.refCount, 1)
+ if controlFDenableLogging {
+ refsvfs2.LogIncRef(r, v)
+ }
+ if v <= 1 {
+ panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
+ }
+}
+
+// TryIncRef implements refs.TryRefCounter.TryIncRef.
+//
+// To do this safely without a loop, a speculative reference is first acquired
+// on the object. This allows multiple concurrent TryIncRef calls to distinguish
+// other TryIncRef calls from genuine references held.
+//
+//go:nosplit
+func (r *controlFDRefs) TryIncRef() bool {
+ const speculativeRef = 1 << 32
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 {
+
+ atomic.AddInt64(&r.refCount, -speculativeRef)
+ return false
+ }
+
+ v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
+ if controlFDenableLogging {
+ refsvfs2.LogTryIncRef(r, v)
+ }
+ return true
+}
+
+// DecRef implements refs.RefCounter.DecRef.
+//
+// Note that speculative references are counted here. Since they were added
+// prior to real references reaching zero, they will successfully convert to
+// real references. In other words, we see speculative references only in the
+// following case:
+//
+// A: TryIncRef [speculative increase => sees non-negative references]
+// B: DecRef [real decrease]
+// A: TryIncRef [transform speculative to real]
+//
+//go:nosplit
+func (r *controlFDRefs) DecRef(destroy func()) {
+ v := atomic.AddInt64(&r.refCount, -1)
+ if controlFDenableLogging {
+ refsvfs2.LogDecRef(r, v)
+ }
+ switch {
+ case v < 0:
+ panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
+
+ case v == 0:
+ refsvfs2.Unregister(r)
+
+ if destroy != nil {
+ destroy()
+ }
+ }
+}
+
+func (r *controlFDRefs) afterLoad() {
+ if r.ReadRefs() > 0 {
+ refsvfs2.Register(r)
+ }
+}
diff --git a/pkg/lisafs/lisafs_abi_autogen_unsafe.go b/pkg/lisafs/lisafs_abi_autogen_unsafe.go
new file mode 100644
index 000000000..828c75a4e
--- /dev/null
+++ b/pkg/lisafs/lisafs_abi_autogen_unsafe.go
@@ -0,0 +1,4214 @@
+// Automatically generated marshal implementation. See tools/go_marshal.
+
+package lisafs
+
+import (
+ "gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/gohacks"
+ "gvisor.dev/gvisor/pkg/hostarch"
+ "gvisor.dev/gvisor/pkg/marshal"
+ "io"
+ "reflect"
+ "runtime"
+ "unsafe"
+)
+
+// Marshallable types used by this file.
+var _ marshal.Marshallable = (*ChannelResp)(nil)
+var _ marshal.Marshallable = (*ConnectReq)(nil)
+var _ marshal.Marshallable = (*ErrorResp)(nil)
+var _ marshal.Marshallable = (*FAllocateReq)(nil)
+var _ marshal.Marshallable = (*FDID)(nil)
+var _ marshal.Marshallable = (*FListXattrReq)(nil)
+var _ marshal.Marshallable = (*FStatFSReq)(nil)
+var _ marshal.Marshallable = (*FlushReq)(nil)
+var _ marshal.Marshallable = (*GID)(nil)
+var _ marshal.Marshallable = (*Getdents64Req)(nil)
+var _ marshal.Marshallable = (*Inode)(nil)
+var _ marshal.Marshallable = (*LinkAtResp)(nil)
+var _ marshal.Marshallable = (*MID)(nil)
+var _ marshal.Marshallable = (*MkdirAtResp)(nil)
+var _ marshal.Marshallable = (*MknodAtResp)(nil)
+var _ marshal.Marshallable = (*MsgDynamic)(nil)
+var _ marshal.Marshallable = (*MsgSimple)(nil)
+var _ marshal.Marshallable = (*OpenAtReq)(nil)
+var _ marshal.Marshallable = (*OpenAtResp)(nil)
+var _ marshal.Marshallable = (*OpenCreateAtResp)(nil)
+var _ marshal.Marshallable = (*P9Version)(nil)
+var _ marshal.Marshallable = (*PReadReq)(nil)
+var _ marshal.Marshallable = (*PWriteResp)(nil)
+var _ marshal.Marshallable = (*ReadLinkAtReq)(nil)
+var _ marshal.Marshallable = (*SetStatReq)(nil)
+var _ marshal.Marshallable = (*SetStatResp)(nil)
+var _ marshal.Marshallable = (*StatFS)(nil)
+var _ marshal.Marshallable = (*StatReq)(nil)
+var _ marshal.Marshallable = (*SymlinkAtResp)(nil)
+var _ marshal.Marshallable = (*UID)(nil)
+var _ marshal.Marshallable = (*channelHeader)(nil)
+var _ marshal.Marshallable = (*createCommon)(nil)
+var _ marshal.Marshallable = (*linux.FileMode)(nil)
+var _ marshal.Marshallable = (*linux.Statx)(nil)
+var _ marshal.Marshallable = (*linux.Timespec)(nil)
+var _ marshal.Marshallable = (*sockHeader)(nil)
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (c *channelHeader) SizeBytes() int {
+ return 2 +
+ (*MID)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (c *channelHeader) MarshalBytes(dst []byte) {
+ c.message.MarshalBytes(dst[:c.message.SizeBytes()])
+ dst = dst[c.message.SizeBytes():]
+ dst[0] = byte(c.numFDs)
+ dst = dst[1:]
+ // Padding: dst[:sizeof(uint8)] ~= uint8(0)
+ dst = dst[1:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (c *channelHeader) UnmarshalBytes(src []byte) {
+ c.message.UnmarshalBytes(src[:c.message.SizeBytes()])
+ src = src[c.message.SizeBytes():]
+ c.numFDs = uint8(src[0])
+ src = src[1:]
+ // Padding: var _ uint8 ~= src[:sizeof(uint8)]
+ src = src[1:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (c *channelHeader) Packed() bool {
+ return c.message.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (c *channelHeader) MarshalUnsafe(dst []byte) {
+ if c.message.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(c), uintptr(c.SizeBytes()))
+ } else {
+ // Type channelHeader doesn't have a packed layout in memory, fallback to MarshalBytes.
+ c.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (c *channelHeader) UnmarshalUnsafe(src []byte) {
+ if c.message.Packed() {
+ gohacks.Memmove(unsafe.Pointer(c), unsafe.Pointer(&src[0]), uintptr(c.SizeBytes()))
+ } else {
+ // Type channelHeader doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ c.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (c *channelHeader) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !c.message.Packed() {
+ // Type channelHeader doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(c.SizeBytes()) // escapes: okay.
+ c.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c)))
+ hdr.Len = c.SizeBytes()
+ hdr.Cap = c.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that c
+ // must live until the use above.
+ runtime.KeepAlive(c) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (c *channelHeader) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return c.CopyOutN(cc, addr, c.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (c *channelHeader) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !c.message.Packed() {
+ // Type channelHeader doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(c.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ c.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c)))
+ hdr.Len = c.SizeBytes()
+ hdr.Cap = c.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that c
+ // must live until the use above.
+ runtime.KeepAlive(c) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (c *channelHeader) WriteTo(writer io.Writer) (int64, error) {
+ if !c.message.Packed() {
+ // Type channelHeader doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, c.SizeBytes())
+ c.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c)))
+ hdr.Len = c.SizeBytes()
+ hdr.Cap = c.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that c
+ // must live until the use above.
+ runtime.KeepAlive(c) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+//go:nosplit
+func (f *FDID) SizeBytes() int {
+ return 4
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (f *FDID) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(*f))
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (f *FDID) UnmarshalBytes(src []byte) {
+ *f = FDID(uint32(hostarch.ByteOrder.Uint32(src[:4])))
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (f *FDID) Packed() bool {
+ // Scalar newtypes are always packed.
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (f *FDID) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(f), uintptr(f.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (f *FDID) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(f), unsafe.Pointer(&src[0]), uintptr(f.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (f *FDID) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (f *FDID) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return f.CopyOutN(cc, addr, f.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (f *FDID) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (f *FDID) WriteTo(w io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := w.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// CopyFDIDSliceIn copies in a slice of FDID objects from the task's memory.
+//go:nosplit
+func CopyFDIDSliceIn(cc marshal.CopyContext, addr hostarch.Addr, dst []FDID) (int, error) {
+ count := len(dst)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*FDID)(nil).SizeBytes()
+
+ ptr := unsafe.Pointer(&dst)
+ val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(val)
+ hdr.Len = size * count
+ hdr.Cap = size * count
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that dst
+ // must live until the use above.
+ runtime.KeepAlive(dst) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyFDIDSliceOut copies a slice of FDID objects to the task's memory.
+//go:nosplit
+func CopyFDIDSliceOut(cc marshal.CopyContext, addr hostarch.Addr, src []FDID) (int, error) {
+ count := len(src)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*FDID)(nil).SizeBytes()
+
+ ptr := unsafe.Pointer(&src)
+ val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(val)
+ hdr.Len = size * count
+ hdr.Cap = size * count
+
+ length, err := cc.CopyOutBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that src
+ // must live until the use above.
+ runtime.KeepAlive(src) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// MarshalUnsafeFDIDSlice is like FDID.MarshalUnsafe, but for a []FDID.
+func MarshalUnsafeFDIDSlice(src []FDID, dst []byte) (int, error) {
+ count := len(src)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*FDID)(nil).SizeBytes()
+
+ dst = dst[:size*count]
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(&src[0]), uintptr(len(dst)))
+ return size*count, nil
+}
+
+// UnmarshalUnsafeFDIDSlice is like FDID.UnmarshalUnsafe, but for a []FDID.
+func UnmarshalUnsafeFDIDSlice(dst []FDID, src []byte) (int, error) {
+ count := len(dst)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*FDID)(nil).SizeBytes()
+
+ src = src[:(size*count)]
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(&src[0]), uintptr(len(src)))
+ return size*count, nil
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (c *ChannelResp) SizeBytes() int {
+ return 16
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (c *ChannelResp) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(c.dataOffset))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(c.dataLength))
+ dst = dst[8:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (c *ChannelResp) UnmarshalBytes(src []byte) {
+ c.dataOffset = int64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ c.dataLength = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (c *ChannelResp) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (c *ChannelResp) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(c), uintptr(c.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (c *ChannelResp) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(c), unsafe.Pointer(&src[0]), uintptr(c.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (c *ChannelResp) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c)))
+ hdr.Len = c.SizeBytes()
+ hdr.Cap = c.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that c
+ // must live until the use above.
+ runtime.KeepAlive(c) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (c *ChannelResp) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return c.CopyOutN(cc, addr, c.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (c *ChannelResp) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c)))
+ hdr.Len = c.SizeBytes()
+ hdr.Cap = c.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that c
+ // must live until the use above.
+ runtime.KeepAlive(c) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (c *ChannelResp) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c)))
+ hdr.Len = c.SizeBytes()
+ hdr.Cap = c.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that c
+ // must live until the use above.
+ runtime.KeepAlive(c) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (c *ConnectReq) SizeBytes() int {
+ return 4 +
+ (*FDID)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (c *ConnectReq) MarshalBytes(dst []byte) {
+ c.FD.MarshalBytes(dst[:c.FD.SizeBytes()])
+ dst = dst[c.FD.SizeBytes():]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(c.SockType))
+ dst = dst[4:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (c *ConnectReq) UnmarshalBytes(src []byte) {
+ c.FD.UnmarshalBytes(src[:c.FD.SizeBytes()])
+ src = src[c.FD.SizeBytes():]
+ c.SockType = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (c *ConnectReq) Packed() bool {
+ return c.FD.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (c *ConnectReq) MarshalUnsafe(dst []byte) {
+ if c.FD.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(c), uintptr(c.SizeBytes()))
+ } else {
+ // Type ConnectReq doesn't have a packed layout in memory, fallback to MarshalBytes.
+ c.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (c *ConnectReq) UnmarshalUnsafe(src []byte) {
+ if c.FD.Packed() {
+ gohacks.Memmove(unsafe.Pointer(c), unsafe.Pointer(&src[0]), uintptr(c.SizeBytes()))
+ } else {
+ // Type ConnectReq doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ c.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (c *ConnectReq) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !c.FD.Packed() {
+ // Type ConnectReq doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(c.SizeBytes()) // escapes: okay.
+ c.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c)))
+ hdr.Len = c.SizeBytes()
+ hdr.Cap = c.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that c
+ // must live until the use above.
+ runtime.KeepAlive(c) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (c *ConnectReq) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return c.CopyOutN(cc, addr, c.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (c *ConnectReq) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !c.FD.Packed() {
+ // Type ConnectReq doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(c.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ c.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c)))
+ hdr.Len = c.SizeBytes()
+ hdr.Cap = c.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that c
+ // must live until the use above.
+ runtime.KeepAlive(c) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (c *ConnectReq) WriteTo(writer io.Writer) (int64, error) {
+ if !c.FD.Packed() {
+ // Type ConnectReq doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, c.SizeBytes())
+ c.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c)))
+ hdr.Len = c.SizeBytes()
+ hdr.Cap = c.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that c
+ // must live until the use above.
+ runtime.KeepAlive(c) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (e *ErrorResp) SizeBytes() int {
+ return 4
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (e *ErrorResp) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(e.errno))
+ dst = dst[4:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (e *ErrorResp) UnmarshalBytes(src []byte) {
+ e.errno = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (e *ErrorResp) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (e *ErrorResp) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(e), uintptr(e.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (e *ErrorResp) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(e), unsafe.Pointer(&src[0]), uintptr(e.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (e *ErrorResp) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(e)))
+ hdr.Len = e.SizeBytes()
+ hdr.Cap = e.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that e
+ // must live until the use above.
+ runtime.KeepAlive(e) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (e *ErrorResp) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return e.CopyOutN(cc, addr, e.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (e *ErrorResp) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(e)))
+ hdr.Len = e.SizeBytes()
+ hdr.Cap = e.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that e
+ // must live until the use above.
+ runtime.KeepAlive(e) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (e *ErrorResp) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(e)))
+ hdr.Len = e.SizeBytes()
+ hdr.Cap = e.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that e
+ // must live until the use above.
+ runtime.KeepAlive(e) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (f *FAllocateReq) SizeBytes() int {
+ return 28 +
+ (*FDID)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (f *FAllocateReq) MarshalBytes(dst []byte) {
+ f.FD.MarshalBytes(dst[:f.FD.SizeBytes()])
+ dst = dst[f.FD.SizeBytes():]
+ // Padding: dst[:sizeof(uint32)] ~= uint32(0)
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.Mode))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.Offset))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.Length))
+ dst = dst[8:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (f *FAllocateReq) UnmarshalBytes(src []byte) {
+ f.FD.UnmarshalBytes(src[:f.FD.SizeBytes()])
+ src = src[f.FD.SizeBytes():]
+ // Padding: var _ uint32 ~= src[:sizeof(uint32)]
+ src = src[4:]
+ f.Mode = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.Offset = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.Length = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (f *FAllocateReq) Packed() bool {
+ return f.FD.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (f *FAllocateReq) MarshalUnsafe(dst []byte) {
+ if f.FD.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(f), uintptr(f.SizeBytes()))
+ } else {
+ // Type FAllocateReq doesn't have a packed layout in memory, fallback to MarshalBytes.
+ f.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (f *FAllocateReq) UnmarshalUnsafe(src []byte) {
+ if f.FD.Packed() {
+ gohacks.Memmove(unsafe.Pointer(f), unsafe.Pointer(&src[0]), uintptr(f.SizeBytes()))
+ } else {
+ // Type FAllocateReq doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ f.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (f *FAllocateReq) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !f.FD.Packed() {
+ // Type FAllocateReq doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
+ f.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (f *FAllocateReq) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return f.CopyOutN(cc, addr, f.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (f *FAllocateReq) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !f.FD.Packed() {
+ // Type FAllocateReq doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ f.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (f *FAllocateReq) WriteTo(writer io.Writer) (int64, error) {
+ if !f.FD.Packed() {
+ // Type FAllocateReq doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, f.SizeBytes())
+ f.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (f *FListXattrReq) SizeBytes() int {
+ return 12 +
+ (*FDID)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (f *FListXattrReq) MarshalBytes(dst []byte) {
+ f.FD.MarshalBytes(dst[:f.FD.SizeBytes()])
+ dst = dst[f.FD.SizeBytes():]
+ // Padding: dst[:sizeof(uint32)] ~= uint32(0)
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.Size))
+ dst = dst[8:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (f *FListXattrReq) UnmarshalBytes(src []byte) {
+ f.FD.UnmarshalBytes(src[:f.FD.SizeBytes()])
+ src = src[f.FD.SizeBytes():]
+ // Padding: var _ uint32 ~= src[:sizeof(uint32)]
+ src = src[4:]
+ f.Size = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (f *FListXattrReq) Packed() bool {
+ return f.FD.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (f *FListXattrReq) MarshalUnsafe(dst []byte) {
+ if f.FD.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(f), uintptr(f.SizeBytes()))
+ } else {
+ // Type FListXattrReq doesn't have a packed layout in memory, fallback to MarshalBytes.
+ f.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (f *FListXattrReq) UnmarshalUnsafe(src []byte) {
+ if f.FD.Packed() {
+ gohacks.Memmove(unsafe.Pointer(f), unsafe.Pointer(&src[0]), uintptr(f.SizeBytes()))
+ } else {
+ // Type FListXattrReq doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ f.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (f *FListXattrReq) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !f.FD.Packed() {
+ // Type FListXattrReq doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
+ f.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (f *FListXattrReq) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return f.CopyOutN(cc, addr, f.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (f *FListXattrReq) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !f.FD.Packed() {
+ // Type FListXattrReq doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ f.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (f *FListXattrReq) WriteTo(writer io.Writer) (int64, error) {
+ if !f.FD.Packed() {
+ // Type FListXattrReq doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, f.SizeBytes())
+ f.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (f *FStatFSReq) SizeBytes() int {
+ return 0 +
+ (*FDID)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (f *FStatFSReq) MarshalBytes(dst []byte) {
+ f.FD.MarshalBytes(dst[:f.FD.SizeBytes()])
+ dst = dst[f.FD.SizeBytes():]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (f *FStatFSReq) UnmarshalBytes(src []byte) {
+ f.FD.UnmarshalBytes(src[:f.FD.SizeBytes()])
+ src = src[f.FD.SizeBytes():]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (f *FStatFSReq) Packed() bool {
+ return f.FD.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (f *FStatFSReq) MarshalUnsafe(dst []byte) {
+ if f.FD.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(f), uintptr(f.SizeBytes()))
+ } else {
+ // Type FStatFSReq doesn't have a packed layout in memory, fallback to MarshalBytes.
+ f.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (f *FStatFSReq) UnmarshalUnsafe(src []byte) {
+ if f.FD.Packed() {
+ gohacks.Memmove(unsafe.Pointer(f), unsafe.Pointer(&src[0]), uintptr(f.SizeBytes()))
+ } else {
+ // Type FStatFSReq doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ f.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (f *FStatFSReq) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !f.FD.Packed() {
+ // Type FStatFSReq doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
+ f.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (f *FStatFSReq) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return f.CopyOutN(cc, addr, f.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (f *FStatFSReq) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !f.FD.Packed() {
+ // Type FStatFSReq doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ f.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (f *FStatFSReq) WriteTo(writer io.Writer) (int64, error) {
+ if !f.FD.Packed() {
+ // Type FStatFSReq doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, f.SizeBytes())
+ f.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (f *FlushReq) SizeBytes() int {
+ return 0 +
+ (*FDID)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (f *FlushReq) MarshalBytes(dst []byte) {
+ f.FD.MarshalBytes(dst[:f.FD.SizeBytes()])
+ dst = dst[f.FD.SizeBytes():]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (f *FlushReq) UnmarshalBytes(src []byte) {
+ f.FD.UnmarshalBytes(src[:f.FD.SizeBytes()])
+ src = src[f.FD.SizeBytes():]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (f *FlushReq) Packed() bool {
+ return f.FD.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (f *FlushReq) MarshalUnsafe(dst []byte) {
+ if f.FD.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(f), uintptr(f.SizeBytes()))
+ } else {
+ // Type FlushReq doesn't have a packed layout in memory, fallback to MarshalBytes.
+ f.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (f *FlushReq) UnmarshalUnsafe(src []byte) {
+ if f.FD.Packed() {
+ gohacks.Memmove(unsafe.Pointer(f), unsafe.Pointer(&src[0]), uintptr(f.SizeBytes()))
+ } else {
+ // Type FlushReq doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ f.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (f *FlushReq) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !f.FD.Packed() {
+ // Type FlushReq doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
+ f.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (f *FlushReq) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return f.CopyOutN(cc, addr, f.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (f *FlushReq) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !f.FD.Packed() {
+ // Type FlushReq doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ f.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (f *FlushReq) WriteTo(writer io.Writer) (int64, error) {
+ if !f.FD.Packed() {
+ // Type FlushReq doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, f.SizeBytes())
+ f.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+//go:nosplit
+func (gid *GID) SizeBytes() int {
+ return 4
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (gid *GID) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(*gid))
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (gid *GID) UnmarshalBytes(src []byte) {
+ *gid = GID(uint32(hostarch.ByteOrder.Uint32(src[:4])))
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (gid *GID) Packed() bool {
+ // Scalar newtypes are always packed.
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (gid *GID) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(gid), uintptr(gid.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (gid *GID) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(gid), unsafe.Pointer(&src[0]), uintptr(gid.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (gid *GID) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(gid)))
+ hdr.Len = gid.SizeBytes()
+ hdr.Cap = gid.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that gid
+ // must live until the use above.
+ runtime.KeepAlive(gid) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (gid *GID) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return gid.CopyOutN(cc, addr, gid.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (gid *GID) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(gid)))
+ hdr.Len = gid.SizeBytes()
+ hdr.Cap = gid.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that gid
+ // must live until the use above.
+ runtime.KeepAlive(gid) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (gid *GID) WriteTo(w io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(gid)))
+ hdr.Len = gid.SizeBytes()
+ hdr.Cap = gid.SizeBytes()
+
+ length, err := w.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that gid
+ // must live until the use above.
+ runtime.KeepAlive(gid) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (g *Getdents64Req) SizeBytes() int {
+ return 4 +
+ (*FDID)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (g *Getdents64Req) MarshalBytes(dst []byte) {
+ g.DirFD.MarshalBytes(dst[:g.DirFD.SizeBytes()])
+ dst = dst[g.DirFD.SizeBytes():]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(g.Count))
+ dst = dst[4:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (g *Getdents64Req) UnmarshalBytes(src []byte) {
+ g.DirFD.UnmarshalBytes(src[:g.DirFD.SizeBytes()])
+ src = src[g.DirFD.SizeBytes():]
+ g.Count = int32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (g *Getdents64Req) Packed() bool {
+ return g.DirFD.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (g *Getdents64Req) MarshalUnsafe(dst []byte) {
+ if g.DirFD.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(g), uintptr(g.SizeBytes()))
+ } else {
+ // Type Getdents64Req doesn't have a packed layout in memory, fallback to MarshalBytes.
+ g.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (g *Getdents64Req) UnmarshalUnsafe(src []byte) {
+ if g.DirFD.Packed() {
+ gohacks.Memmove(unsafe.Pointer(g), unsafe.Pointer(&src[0]), uintptr(g.SizeBytes()))
+ } else {
+ // Type Getdents64Req doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ g.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (g *Getdents64Req) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !g.DirFD.Packed() {
+ // Type Getdents64Req doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(g.SizeBytes()) // escapes: okay.
+ g.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(g)))
+ hdr.Len = g.SizeBytes()
+ hdr.Cap = g.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that g
+ // must live until the use above.
+ runtime.KeepAlive(g) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (g *Getdents64Req) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return g.CopyOutN(cc, addr, g.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (g *Getdents64Req) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !g.DirFD.Packed() {
+ // Type Getdents64Req doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(g.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ g.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(g)))
+ hdr.Len = g.SizeBytes()
+ hdr.Cap = g.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that g
+ // must live until the use above.
+ runtime.KeepAlive(g) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (g *Getdents64Req) WriteTo(writer io.Writer) (int64, error) {
+ if !g.DirFD.Packed() {
+ // Type Getdents64Req doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, g.SizeBytes())
+ g.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(g)))
+ hdr.Len = g.SizeBytes()
+ hdr.Cap = g.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that g
+ // must live until the use above.
+ runtime.KeepAlive(g) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (i *Inode) SizeBytes() int {
+ return 4 +
+ (*FDID)(nil).SizeBytes() +
+ (*linux.Statx)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (i *Inode) MarshalBytes(dst []byte) {
+ i.ControlFD.MarshalBytes(dst[:i.ControlFD.SizeBytes()])
+ dst = dst[i.ControlFD.SizeBytes():]
+ // Padding: dst[:sizeof(uint32)] ~= uint32(0)
+ dst = dst[4:]
+ i.Stat.MarshalBytes(dst[:i.Stat.SizeBytes()])
+ dst = dst[i.Stat.SizeBytes():]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (i *Inode) UnmarshalBytes(src []byte) {
+ i.ControlFD.UnmarshalBytes(src[:i.ControlFD.SizeBytes()])
+ src = src[i.ControlFD.SizeBytes():]
+ // Padding: var _ uint32 ~= src[:sizeof(uint32)]
+ src = src[4:]
+ i.Stat.UnmarshalBytes(src[:i.Stat.SizeBytes()])
+ src = src[i.Stat.SizeBytes():]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (i *Inode) Packed() bool {
+ return i.ControlFD.Packed() && i.Stat.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (i *Inode) MarshalUnsafe(dst []byte) {
+ if i.ControlFD.Packed() && i.Stat.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(i), uintptr(i.SizeBytes()))
+ } else {
+ // Type Inode doesn't have a packed layout in memory, fallback to MarshalBytes.
+ i.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (i *Inode) UnmarshalUnsafe(src []byte) {
+ if i.ControlFD.Packed() && i.Stat.Packed() {
+ gohacks.Memmove(unsafe.Pointer(i), unsafe.Pointer(&src[0]), uintptr(i.SizeBytes()))
+ } else {
+ // Type Inode doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ i.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (i *Inode) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !i.ControlFD.Packed() && i.Stat.Packed() {
+ // Type Inode doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
+ i.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (i *Inode) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return i.CopyOutN(cc, addr, i.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (i *Inode) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !i.ControlFD.Packed() && i.Stat.Packed() {
+ // Type Inode doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ i.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (i *Inode) WriteTo(writer io.Writer) (int64, error) {
+ if !i.ControlFD.Packed() && i.Stat.Packed() {
+ // Type Inode doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, i.SizeBytes())
+ i.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// CopyInodeSliceIn copies in a slice of Inode objects from the task's memory.
+func CopyInodeSliceIn(cc marshal.CopyContext, addr hostarch.Addr, dst []Inode) (int, error) {
+ count := len(dst)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*Inode)(nil).SizeBytes()
+
+ if !dst[0].Packed() {
+ // Type Inode doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(size * count)
+ length, err := cc.CopyInBytes(addr, buf)
+
+ // Unmarshal as much as possible, even on error. First handle full objects.
+ limit := length/size
+ for idx := 0; idx < limit; idx++ {
+ dst[idx].UnmarshalBytes(buf[size*idx:size*(idx+1)])
+ }
+
+ // Handle any final partial object. buf is guaranteed to be long enough for the
+ // final element, but may not contain valid data for the entire range. This may
+ // result in unmarshalling zero values for some parts of the object.
+ if length%size != 0 {
+ idx := limit
+ dst[idx].UnmarshalBytes(buf[size*idx:size*(idx+1)])
+ }
+
+ return length, err
+ }
+
+ ptr := unsafe.Pointer(&dst)
+ val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(val)
+ hdr.Len = size * count
+ hdr.Cap = size * count
+
+ length, err := cc.CopyInBytes(addr, buf)
+ // Since we bypassed the compiler's escape analysis, indicate that dst
+ // must live until the use above.
+ runtime.KeepAlive(dst) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyInodeSliceOut copies a slice of Inode objects to the task's memory.
+func CopyInodeSliceOut(cc marshal.CopyContext, addr hostarch.Addr, src []Inode) (int, error) {
+ count := len(src)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*Inode)(nil).SizeBytes()
+
+ if !src[0].Packed() {
+ // Type Inode doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(size * count)
+ for idx := 0; idx < count; idx++ {
+ src[idx].MarshalBytes(buf[size*idx:size*(idx+1)])
+ }
+ return cc.CopyOutBytes(addr, buf)
+ }
+
+ ptr := unsafe.Pointer(&src)
+ val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(val)
+ hdr.Len = size * count
+ hdr.Cap = size * count
+
+ length, err := cc.CopyOutBytes(addr, buf)
+ // Since we bypassed the compiler's escape analysis, indicate that src
+ // must live until the use above.
+ runtime.KeepAlive(src) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// MarshalUnsafeInodeSlice is like Inode.MarshalUnsafe, but for a []Inode.
+func MarshalUnsafeInodeSlice(src []Inode, dst []byte) (int, error) {
+ count := len(src)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*Inode)(nil).SizeBytes()
+
+ if !src[0].Packed() {
+ // Type Inode doesn't have a packed layout in memory, fall back to MarshalBytes.
+ for idx := 0; idx < count; idx++ {
+ src[idx].MarshalBytes(dst[size*idx:(size)*(idx+1)])
+ }
+ return size * count, nil
+ }
+
+ dst = dst[:size*count]
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(&src[0]), uintptr(len(dst)))
+ return size * count, nil
+}
+
+// UnmarshalUnsafeInodeSlice is like Inode.UnmarshalUnsafe, but for a []Inode.
+func UnmarshalUnsafeInodeSlice(dst []Inode, src []byte) (int, error) {
+ count := len(dst)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*Inode)(nil).SizeBytes()
+
+ if !dst[0].Packed() {
+ // Type Inode doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ for idx := 0; idx < count; idx++ {
+ dst[idx].UnmarshalBytes(src[size*idx:size*(idx+1)])
+ }
+ return size * count, nil
+ }
+
+ src = src[:(size*count)]
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(&src[0]), uintptr(len(src)))
+ return count*size, nil
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (l *LinkAtResp) SizeBytes() int {
+ return 0 +
+ (*Inode)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (l *LinkAtResp) MarshalBytes(dst []byte) {
+ l.Link.MarshalBytes(dst[:l.Link.SizeBytes()])
+ dst = dst[l.Link.SizeBytes():]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (l *LinkAtResp) UnmarshalBytes(src []byte) {
+ l.Link.UnmarshalBytes(src[:l.Link.SizeBytes()])
+ src = src[l.Link.SizeBytes():]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (l *LinkAtResp) Packed() bool {
+ return l.Link.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (l *LinkAtResp) MarshalUnsafe(dst []byte) {
+ if l.Link.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(l), uintptr(l.SizeBytes()))
+ } else {
+ // Type LinkAtResp doesn't have a packed layout in memory, fallback to MarshalBytes.
+ l.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (l *LinkAtResp) UnmarshalUnsafe(src []byte) {
+ if l.Link.Packed() {
+ gohacks.Memmove(unsafe.Pointer(l), unsafe.Pointer(&src[0]), uintptr(l.SizeBytes()))
+ } else {
+ // Type LinkAtResp doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ l.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (l *LinkAtResp) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !l.Link.Packed() {
+ // Type LinkAtResp doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(l.SizeBytes()) // escapes: okay.
+ l.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(l)))
+ hdr.Len = l.SizeBytes()
+ hdr.Cap = l.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that l
+ // must live until the use above.
+ runtime.KeepAlive(l) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (l *LinkAtResp) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return l.CopyOutN(cc, addr, l.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (l *LinkAtResp) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !l.Link.Packed() {
+ // Type LinkAtResp doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(l.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ l.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(l)))
+ hdr.Len = l.SizeBytes()
+ hdr.Cap = l.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that l
+ // must live until the use above.
+ runtime.KeepAlive(l) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (l *LinkAtResp) WriteTo(writer io.Writer) (int64, error) {
+ if !l.Link.Packed() {
+ // Type LinkAtResp doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, l.SizeBytes())
+ l.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(l)))
+ hdr.Len = l.SizeBytes()
+ hdr.Cap = l.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that l
+ // must live until the use above.
+ runtime.KeepAlive(l) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+//go:nosplit
+func (m *MID) SizeBytes() int {
+ return 2
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (m *MID) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(*m))
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (m *MID) UnmarshalBytes(src []byte) {
+ *m = MID(uint16(hostarch.ByteOrder.Uint16(src[:2])))
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (m *MID) Packed() bool {
+ // Scalar newtypes are always packed.
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (m *MID) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(m), uintptr(m.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (m *MID) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(m), unsafe.Pointer(&src[0]), uintptr(m.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (m *MID) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(m)))
+ hdr.Len = m.SizeBytes()
+ hdr.Cap = m.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that m
+ // must live until the use above.
+ runtime.KeepAlive(m) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (m *MID) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return m.CopyOutN(cc, addr, m.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (m *MID) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(m)))
+ hdr.Len = m.SizeBytes()
+ hdr.Cap = m.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that m
+ // must live until the use above.
+ runtime.KeepAlive(m) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (m *MID) WriteTo(w io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(m)))
+ hdr.Len = m.SizeBytes()
+ hdr.Cap = m.SizeBytes()
+
+ length, err := w.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that m
+ // must live until the use above.
+ runtime.KeepAlive(m) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// CopyMIDSliceIn copies in a slice of MID objects from the task's memory.
+//go:nosplit
+func CopyMIDSliceIn(cc marshal.CopyContext, addr hostarch.Addr, dst []MID) (int, error) {
+ count := len(dst)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*MID)(nil).SizeBytes()
+
+ ptr := unsafe.Pointer(&dst)
+ val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(val)
+ hdr.Len = size * count
+ hdr.Cap = size * count
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that dst
+ // must live until the use above.
+ runtime.KeepAlive(dst) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyMIDSliceOut copies a slice of MID objects to the task's memory.
+//go:nosplit
+func CopyMIDSliceOut(cc marshal.CopyContext, addr hostarch.Addr, src []MID) (int, error) {
+ count := len(src)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*MID)(nil).SizeBytes()
+
+ ptr := unsafe.Pointer(&src)
+ val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(val)
+ hdr.Len = size * count
+ hdr.Cap = size * count
+
+ length, err := cc.CopyOutBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that src
+ // must live until the use above.
+ runtime.KeepAlive(src) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// MarshalUnsafeMIDSlice is like MID.MarshalUnsafe, but for a []MID.
+func MarshalUnsafeMIDSlice(src []MID, dst []byte) (int, error) {
+ count := len(src)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*MID)(nil).SizeBytes()
+
+ dst = dst[:size*count]
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(&src[0]), uintptr(len(dst)))
+ return size*count, nil
+}
+
+// UnmarshalUnsafeMIDSlice is like MID.UnmarshalUnsafe, but for a []MID.
+func UnmarshalUnsafeMIDSlice(dst []MID, src []byte) (int, error) {
+ count := len(dst)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*MID)(nil).SizeBytes()
+
+ src = src[:(size*count)]
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(&src[0]), uintptr(len(src)))
+ return size*count, nil
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (m *MkdirAtResp) SizeBytes() int {
+ return 0 +
+ (*Inode)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (m *MkdirAtResp) MarshalBytes(dst []byte) {
+ m.ChildDir.MarshalBytes(dst[:m.ChildDir.SizeBytes()])
+ dst = dst[m.ChildDir.SizeBytes():]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (m *MkdirAtResp) UnmarshalBytes(src []byte) {
+ m.ChildDir.UnmarshalBytes(src[:m.ChildDir.SizeBytes()])
+ src = src[m.ChildDir.SizeBytes():]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (m *MkdirAtResp) Packed() bool {
+ return m.ChildDir.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (m *MkdirAtResp) MarshalUnsafe(dst []byte) {
+ if m.ChildDir.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(m), uintptr(m.SizeBytes()))
+ } else {
+ // Type MkdirAtResp doesn't have a packed layout in memory, fallback to MarshalBytes.
+ m.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (m *MkdirAtResp) UnmarshalUnsafe(src []byte) {
+ if m.ChildDir.Packed() {
+ gohacks.Memmove(unsafe.Pointer(m), unsafe.Pointer(&src[0]), uintptr(m.SizeBytes()))
+ } else {
+ // Type MkdirAtResp doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ m.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (m *MkdirAtResp) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !m.ChildDir.Packed() {
+ // Type MkdirAtResp doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(m.SizeBytes()) // escapes: okay.
+ m.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(m)))
+ hdr.Len = m.SizeBytes()
+ hdr.Cap = m.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that m
+ // must live until the use above.
+ runtime.KeepAlive(m) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (m *MkdirAtResp) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return m.CopyOutN(cc, addr, m.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (m *MkdirAtResp) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !m.ChildDir.Packed() {
+ // Type MkdirAtResp doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(m.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ m.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(m)))
+ hdr.Len = m.SizeBytes()
+ hdr.Cap = m.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that m
+ // must live until the use above.
+ runtime.KeepAlive(m) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (m *MkdirAtResp) WriteTo(writer io.Writer) (int64, error) {
+ if !m.ChildDir.Packed() {
+ // Type MkdirAtResp doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, m.SizeBytes())
+ m.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(m)))
+ hdr.Len = m.SizeBytes()
+ hdr.Cap = m.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that m
+ // must live until the use above.
+ runtime.KeepAlive(m) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (m *MknodAtResp) SizeBytes() int {
+ return 0 +
+ (*Inode)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (m *MknodAtResp) MarshalBytes(dst []byte) {
+ m.Child.MarshalBytes(dst[:m.Child.SizeBytes()])
+ dst = dst[m.Child.SizeBytes():]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (m *MknodAtResp) UnmarshalBytes(src []byte) {
+ m.Child.UnmarshalBytes(src[:m.Child.SizeBytes()])
+ src = src[m.Child.SizeBytes():]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (m *MknodAtResp) Packed() bool {
+ return m.Child.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (m *MknodAtResp) MarshalUnsafe(dst []byte) {
+ if m.Child.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(m), uintptr(m.SizeBytes()))
+ } else {
+ // Type MknodAtResp doesn't have a packed layout in memory, fallback to MarshalBytes.
+ m.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (m *MknodAtResp) UnmarshalUnsafe(src []byte) {
+ if m.Child.Packed() {
+ gohacks.Memmove(unsafe.Pointer(m), unsafe.Pointer(&src[0]), uintptr(m.SizeBytes()))
+ } else {
+ // Type MknodAtResp doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ m.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (m *MknodAtResp) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !m.Child.Packed() {
+ // Type MknodAtResp doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(m.SizeBytes()) // escapes: okay.
+ m.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(m)))
+ hdr.Len = m.SizeBytes()
+ hdr.Cap = m.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that m
+ // must live until the use above.
+ runtime.KeepAlive(m) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (m *MknodAtResp) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return m.CopyOutN(cc, addr, m.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (m *MknodAtResp) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !m.Child.Packed() {
+ // Type MknodAtResp doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(m.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ m.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(m)))
+ hdr.Len = m.SizeBytes()
+ hdr.Cap = m.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that m
+ // must live until the use above.
+ runtime.KeepAlive(m) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (m *MknodAtResp) WriteTo(writer io.Writer) (int64, error) {
+ if !m.Child.Packed() {
+ // Type MknodAtResp doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, m.SizeBytes())
+ m.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(m)))
+ hdr.Len = m.SizeBytes()
+ hdr.Cap = m.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that m
+ // must live until the use above.
+ runtime.KeepAlive(m) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (o *OpenAtReq) SizeBytes() int {
+ return 4 +
+ (*FDID)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (o *OpenAtReq) MarshalBytes(dst []byte) {
+ o.FD.MarshalBytes(dst[:o.FD.SizeBytes()])
+ dst = dst[o.FD.SizeBytes():]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(o.Flags))
+ dst = dst[4:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (o *OpenAtReq) UnmarshalBytes(src []byte) {
+ o.FD.UnmarshalBytes(src[:o.FD.SizeBytes()])
+ src = src[o.FD.SizeBytes():]
+ o.Flags = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (o *OpenAtReq) Packed() bool {
+ return o.FD.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (o *OpenAtReq) MarshalUnsafe(dst []byte) {
+ if o.FD.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(o), uintptr(o.SizeBytes()))
+ } else {
+ // Type OpenAtReq doesn't have a packed layout in memory, fallback to MarshalBytes.
+ o.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (o *OpenAtReq) UnmarshalUnsafe(src []byte) {
+ if o.FD.Packed() {
+ gohacks.Memmove(unsafe.Pointer(o), unsafe.Pointer(&src[0]), uintptr(o.SizeBytes()))
+ } else {
+ // Type OpenAtReq doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ o.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (o *OpenAtReq) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !o.FD.Packed() {
+ // Type OpenAtReq doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(o.SizeBytes()) // escapes: okay.
+ o.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(o)))
+ hdr.Len = o.SizeBytes()
+ hdr.Cap = o.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that o
+ // must live until the use above.
+ runtime.KeepAlive(o) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (o *OpenAtReq) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return o.CopyOutN(cc, addr, o.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (o *OpenAtReq) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !o.FD.Packed() {
+ // Type OpenAtReq doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(o.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ o.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(o)))
+ hdr.Len = o.SizeBytes()
+ hdr.Cap = o.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that o
+ // must live until the use above.
+ runtime.KeepAlive(o) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (o *OpenAtReq) WriteTo(writer io.Writer) (int64, error) {
+ if !o.FD.Packed() {
+ // Type OpenAtReq doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, o.SizeBytes())
+ o.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(o)))
+ hdr.Len = o.SizeBytes()
+ hdr.Cap = o.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that o
+ // must live until the use above.
+ runtime.KeepAlive(o) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (o *OpenAtResp) SizeBytes() int {
+ return 0 +
+ (*FDID)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (o *OpenAtResp) MarshalBytes(dst []byte) {
+ o.NewFD.MarshalBytes(dst[:o.NewFD.SizeBytes()])
+ dst = dst[o.NewFD.SizeBytes():]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (o *OpenAtResp) UnmarshalBytes(src []byte) {
+ o.NewFD.UnmarshalBytes(src[:o.NewFD.SizeBytes()])
+ src = src[o.NewFD.SizeBytes():]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (o *OpenAtResp) Packed() bool {
+ return o.NewFD.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (o *OpenAtResp) MarshalUnsafe(dst []byte) {
+ if o.NewFD.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(o), uintptr(o.SizeBytes()))
+ } else {
+ // Type OpenAtResp doesn't have a packed layout in memory, fallback to MarshalBytes.
+ o.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (o *OpenAtResp) UnmarshalUnsafe(src []byte) {
+ if o.NewFD.Packed() {
+ gohacks.Memmove(unsafe.Pointer(o), unsafe.Pointer(&src[0]), uintptr(o.SizeBytes()))
+ } else {
+ // Type OpenAtResp doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ o.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (o *OpenAtResp) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !o.NewFD.Packed() {
+ // Type OpenAtResp doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(o.SizeBytes()) // escapes: okay.
+ o.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(o)))
+ hdr.Len = o.SizeBytes()
+ hdr.Cap = o.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that o
+ // must live until the use above.
+ runtime.KeepAlive(o) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (o *OpenAtResp) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return o.CopyOutN(cc, addr, o.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (o *OpenAtResp) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !o.NewFD.Packed() {
+ // Type OpenAtResp doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(o.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ o.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(o)))
+ hdr.Len = o.SizeBytes()
+ hdr.Cap = o.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that o
+ // must live until the use above.
+ runtime.KeepAlive(o) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (o *OpenAtResp) WriteTo(writer io.Writer) (int64, error) {
+ if !o.NewFD.Packed() {
+ // Type OpenAtResp doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, o.SizeBytes())
+ o.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(o)))
+ hdr.Len = o.SizeBytes()
+ hdr.Cap = o.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that o
+ // must live until the use above.
+ runtime.KeepAlive(o) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (o *OpenCreateAtResp) SizeBytes() int {
+ return 4 +
+ (*Inode)(nil).SizeBytes() +
+ (*FDID)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (o *OpenCreateAtResp) MarshalBytes(dst []byte) {
+ o.Child.MarshalBytes(dst[:o.Child.SizeBytes()])
+ dst = dst[o.Child.SizeBytes():]
+ o.NewFD.MarshalBytes(dst[:o.NewFD.SizeBytes()])
+ dst = dst[o.NewFD.SizeBytes():]
+ // Padding: dst[:sizeof(uint32)] ~= uint32(0)
+ dst = dst[4:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (o *OpenCreateAtResp) UnmarshalBytes(src []byte) {
+ o.Child.UnmarshalBytes(src[:o.Child.SizeBytes()])
+ src = src[o.Child.SizeBytes():]
+ o.NewFD.UnmarshalBytes(src[:o.NewFD.SizeBytes()])
+ src = src[o.NewFD.SizeBytes():]
+ // Padding: var _ uint32 ~= src[:sizeof(uint32)]
+ src = src[4:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (o *OpenCreateAtResp) Packed() bool {
+ return o.Child.Packed() && o.NewFD.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (o *OpenCreateAtResp) MarshalUnsafe(dst []byte) {
+ if o.Child.Packed() && o.NewFD.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(o), uintptr(o.SizeBytes()))
+ } else {
+ // Type OpenCreateAtResp doesn't have a packed layout in memory, fallback to MarshalBytes.
+ o.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (o *OpenCreateAtResp) UnmarshalUnsafe(src []byte) {
+ if o.Child.Packed() && o.NewFD.Packed() {
+ gohacks.Memmove(unsafe.Pointer(o), unsafe.Pointer(&src[0]), uintptr(o.SizeBytes()))
+ } else {
+ // Type OpenCreateAtResp doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ o.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (o *OpenCreateAtResp) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !o.Child.Packed() && o.NewFD.Packed() {
+ // Type OpenCreateAtResp doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(o.SizeBytes()) // escapes: okay.
+ o.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(o)))
+ hdr.Len = o.SizeBytes()
+ hdr.Cap = o.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that o
+ // must live until the use above.
+ runtime.KeepAlive(o) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (o *OpenCreateAtResp) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return o.CopyOutN(cc, addr, o.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (o *OpenCreateAtResp) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !o.Child.Packed() && o.NewFD.Packed() {
+ // Type OpenCreateAtResp doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(o.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ o.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(o)))
+ hdr.Len = o.SizeBytes()
+ hdr.Cap = o.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that o
+ // must live until the use above.
+ runtime.KeepAlive(o) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (o *OpenCreateAtResp) WriteTo(writer io.Writer) (int64, error) {
+ if !o.Child.Packed() && o.NewFD.Packed() {
+ // Type OpenCreateAtResp doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, o.SizeBytes())
+ o.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(o)))
+ hdr.Len = o.SizeBytes()
+ hdr.Cap = o.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that o
+ // must live until the use above.
+ runtime.KeepAlive(o) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (p *PReadReq) SizeBytes() int {
+ return 12 +
+ (*FDID)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (p *PReadReq) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.Offset))
+ dst = dst[8:]
+ p.FD.MarshalBytes(dst[:p.FD.SizeBytes()])
+ dst = dst[p.FD.SizeBytes():]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(p.Count))
+ dst = dst[4:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (p *PReadReq) UnmarshalBytes(src []byte) {
+ p.Offset = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ p.FD.UnmarshalBytes(src[:p.FD.SizeBytes()])
+ src = src[p.FD.SizeBytes():]
+ p.Count = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (p *PReadReq) Packed() bool {
+ return p.FD.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (p *PReadReq) MarshalUnsafe(dst []byte) {
+ if p.FD.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(p), uintptr(p.SizeBytes()))
+ } else {
+ // Type PReadReq doesn't have a packed layout in memory, fallback to MarshalBytes.
+ p.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (p *PReadReq) UnmarshalUnsafe(src []byte) {
+ if p.FD.Packed() {
+ gohacks.Memmove(unsafe.Pointer(p), unsafe.Pointer(&src[0]), uintptr(p.SizeBytes()))
+ } else {
+ // Type PReadReq doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ p.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (p *PReadReq) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !p.FD.Packed() {
+ // Type PReadReq doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(p.SizeBytes()) // escapes: okay.
+ p.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(p)))
+ hdr.Len = p.SizeBytes()
+ hdr.Cap = p.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that p
+ // must live until the use above.
+ runtime.KeepAlive(p) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (p *PReadReq) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return p.CopyOutN(cc, addr, p.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (p *PReadReq) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !p.FD.Packed() {
+ // Type PReadReq doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(p.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ p.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(p)))
+ hdr.Len = p.SizeBytes()
+ hdr.Cap = p.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that p
+ // must live until the use above.
+ runtime.KeepAlive(p) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (p *PReadReq) WriteTo(writer io.Writer) (int64, error) {
+ if !p.FD.Packed() {
+ // Type PReadReq doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, p.SizeBytes())
+ p.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(p)))
+ hdr.Len = p.SizeBytes()
+ hdr.Cap = p.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that p
+ // must live until the use above.
+ runtime.KeepAlive(p) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (p *PWriteResp) SizeBytes() int {
+ return 8
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (p *PWriteResp) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.Count))
+ dst = dst[8:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (p *PWriteResp) UnmarshalBytes(src []byte) {
+ p.Count = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (p *PWriteResp) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (p *PWriteResp) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(p), uintptr(p.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (p *PWriteResp) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(p), unsafe.Pointer(&src[0]), uintptr(p.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (p *PWriteResp) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(p)))
+ hdr.Len = p.SizeBytes()
+ hdr.Cap = p.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that p
+ // must live until the use above.
+ runtime.KeepAlive(p) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (p *PWriteResp) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return p.CopyOutN(cc, addr, p.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (p *PWriteResp) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(p)))
+ hdr.Len = p.SizeBytes()
+ hdr.Cap = p.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that p
+ // must live until the use above.
+ runtime.KeepAlive(p) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (p *PWriteResp) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(p)))
+ hdr.Len = p.SizeBytes()
+ hdr.Cap = p.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that p
+ // must live until the use above.
+ runtime.KeepAlive(p) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (r *ReadLinkAtReq) SizeBytes() int {
+ return 0 +
+ (*FDID)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (r *ReadLinkAtReq) MarshalBytes(dst []byte) {
+ r.FD.MarshalBytes(dst[:r.FD.SizeBytes()])
+ dst = dst[r.FD.SizeBytes():]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (r *ReadLinkAtReq) UnmarshalBytes(src []byte) {
+ r.FD.UnmarshalBytes(src[:r.FD.SizeBytes()])
+ src = src[r.FD.SizeBytes():]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (r *ReadLinkAtReq) Packed() bool {
+ return r.FD.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (r *ReadLinkAtReq) MarshalUnsafe(dst []byte) {
+ if r.FD.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(r), uintptr(r.SizeBytes()))
+ } else {
+ // Type ReadLinkAtReq doesn't have a packed layout in memory, fallback to MarshalBytes.
+ r.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (r *ReadLinkAtReq) UnmarshalUnsafe(src []byte) {
+ if r.FD.Packed() {
+ gohacks.Memmove(unsafe.Pointer(r), unsafe.Pointer(&src[0]), uintptr(r.SizeBytes()))
+ } else {
+ // Type ReadLinkAtReq doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ r.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (r *ReadLinkAtReq) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !r.FD.Packed() {
+ // Type ReadLinkAtReq doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(r.SizeBytes()) // escapes: okay.
+ r.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(r)))
+ hdr.Len = r.SizeBytes()
+ hdr.Cap = r.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that r
+ // must live until the use above.
+ runtime.KeepAlive(r) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (r *ReadLinkAtReq) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return r.CopyOutN(cc, addr, r.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (r *ReadLinkAtReq) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !r.FD.Packed() {
+ // Type ReadLinkAtReq doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(r.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ r.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(r)))
+ hdr.Len = r.SizeBytes()
+ hdr.Cap = r.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that r
+ // must live until the use above.
+ runtime.KeepAlive(r) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (r *ReadLinkAtReq) WriteTo(writer io.Writer) (int64, error) {
+ if !r.FD.Packed() {
+ // Type ReadLinkAtReq doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, r.SizeBytes())
+ r.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(r)))
+ hdr.Len = r.SizeBytes()
+ hdr.Cap = r.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that r
+ // must live until the use above.
+ runtime.KeepAlive(r) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (s *SetStatReq) SizeBytes() int {
+ return 20 +
+ (*FDID)(nil).SizeBytes() +
+ (*UID)(nil).SizeBytes() +
+ (*GID)(nil).SizeBytes() +
+ (*linux.Timespec)(nil).SizeBytes() +
+ (*linux.Timespec)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (s *SetStatReq) MarshalBytes(dst []byte) {
+ s.FD.MarshalBytes(dst[:s.FD.SizeBytes()])
+ dst = dst[s.FD.SizeBytes():]
+ // Padding: dst[:sizeof(uint32)] ~= uint32(0)
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Mask))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Mode))
+ dst = dst[4:]
+ s.UID.MarshalBytes(dst[:s.UID.SizeBytes()])
+ dst = dst[s.UID.SizeBytes():]
+ s.GID.MarshalBytes(dst[:s.GID.SizeBytes()])
+ dst = dst[s.GID.SizeBytes():]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Size))
+ dst = dst[8:]
+ s.Atime.MarshalBytes(dst[:s.Atime.SizeBytes()])
+ dst = dst[s.Atime.SizeBytes():]
+ s.Mtime.MarshalBytes(dst[:s.Mtime.SizeBytes()])
+ dst = dst[s.Mtime.SizeBytes():]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (s *SetStatReq) UnmarshalBytes(src []byte) {
+ s.FD.UnmarshalBytes(src[:s.FD.SizeBytes()])
+ src = src[s.FD.SizeBytes():]
+ // Padding: var _ uint32 ~= src[:sizeof(uint32)]
+ src = src[4:]
+ s.Mask = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.Mode = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.UID.UnmarshalBytes(src[:s.UID.SizeBytes()])
+ src = src[s.UID.SizeBytes():]
+ s.GID.UnmarshalBytes(src[:s.GID.SizeBytes()])
+ src = src[s.GID.SizeBytes():]
+ s.Size = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Atime.UnmarshalBytes(src[:s.Atime.SizeBytes()])
+ src = src[s.Atime.SizeBytes():]
+ s.Mtime.UnmarshalBytes(src[:s.Mtime.SizeBytes()])
+ src = src[s.Mtime.SizeBytes():]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (s *SetStatReq) Packed() bool {
+ return s.Atime.Packed() && s.FD.Packed() && s.GID.Packed() && s.Mtime.Packed() && s.UID.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (s *SetStatReq) MarshalUnsafe(dst []byte) {
+ if s.Atime.Packed() && s.FD.Packed() && s.GID.Packed() && s.Mtime.Packed() && s.UID.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(s), uintptr(s.SizeBytes()))
+ } else {
+ // Type SetStatReq doesn't have a packed layout in memory, fallback to MarshalBytes.
+ s.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (s *SetStatReq) UnmarshalUnsafe(src []byte) {
+ if s.Atime.Packed() && s.FD.Packed() && s.GID.Packed() && s.Mtime.Packed() && s.UID.Packed() {
+ gohacks.Memmove(unsafe.Pointer(s), unsafe.Pointer(&src[0]), uintptr(s.SizeBytes()))
+ } else {
+ // Type SetStatReq doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ s.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (s *SetStatReq) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !s.Atime.Packed() && s.FD.Packed() && s.GID.Packed() && s.Mtime.Packed() && s.UID.Packed() {
+ // Type SetStatReq doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
+ s.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (s *SetStatReq) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return s.CopyOutN(cc, addr, s.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (s *SetStatReq) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !s.Atime.Packed() && s.FD.Packed() && s.GID.Packed() && s.Mtime.Packed() && s.UID.Packed() {
+ // Type SetStatReq doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ s.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (s *SetStatReq) WriteTo(writer io.Writer) (int64, error) {
+ if !s.Atime.Packed() && s.FD.Packed() && s.GID.Packed() && s.Mtime.Packed() && s.UID.Packed() {
+ // Type SetStatReq doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, s.SizeBytes())
+ s.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (s *SetStatResp) SizeBytes() int {
+ return 8
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (s *SetStatResp) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.FailureMask))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.FailureErrNo))
+ dst = dst[4:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (s *SetStatResp) UnmarshalBytes(src []byte) {
+ s.FailureMask = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.FailureErrNo = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (s *SetStatResp) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (s *SetStatResp) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(s), uintptr(s.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (s *SetStatResp) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(s), unsafe.Pointer(&src[0]), uintptr(s.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (s *SetStatResp) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (s *SetStatResp) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return s.CopyOutN(cc, addr, s.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (s *SetStatResp) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (s *SetStatResp) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (s *StatFS) SizeBytes() int {
+ return 64
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (s *StatFS) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Type))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.BlockSize))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Blocks))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.BlocksFree))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.BlocksAvailable))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Files))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.FilesFree))
+ dst = dst[8:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.NameLength))
+ dst = dst[8:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (s *StatFS) UnmarshalBytes(src []byte) {
+ s.Type = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.BlockSize = int64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Blocks = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.BlocksFree = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.BlocksAvailable = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Files = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.FilesFree = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.NameLength = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (s *StatFS) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (s *StatFS) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(s), uintptr(s.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (s *StatFS) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(s), unsafe.Pointer(&src[0]), uintptr(s.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (s *StatFS) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (s *StatFS) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return s.CopyOutN(cc, addr, s.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (s *StatFS) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (s *StatFS) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (s *StatReq) SizeBytes() int {
+ return 0 +
+ (*FDID)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (s *StatReq) MarshalBytes(dst []byte) {
+ s.FD.MarshalBytes(dst[:s.FD.SizeBytes()])
+ dst = dst[s.FD.SizeBytes():]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (s *StatReq) UnmarshalBytes(src []byte) {
+ s.FD.UnmarshalBytes(src[:s.FD.SizeBytes()])
+ src = src[s.FD.SizeBytes():]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (s *StatReq) Packed() bool {
+ return s.FD.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (s *StatReq) MarshalUnsafe(dst []byte) {
+ if s.FD.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(s), uintptr(s.SizeBytes()))
+ } else {
+ // Type StatReq doesn't have a packed layout in memory, fallback to MarshalBytes.
+ s.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (s *StatReq) UnmarshalUnsafe(src []byte) {
+ if s.FD.Packed() {
+ gohacks.Memmove(unsafe.Pointer(s), unsafe.Pointer(&src[0]), uintptr(s.SizeBytes()))
+ } else {
+ // Type StatReq doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ s.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (s *StatReq) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !s.FD.Packed() {
+ // Type StatReq doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
+ s.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (s *StatReq) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return s.CopyOutN(cc, addr, s.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (s *StatReq) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !s.FD.Packed() {
+ // Type StatReq doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ s.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (s *StatReq) WriteTo(writer io.Writer) (int64, error) {
+ if !s.FD.Packed() {
+ // Type StatReq doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, s.SizeBytes())
+ s.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (s *SymlinkAtResp) SizeBytes() int {
+ return 0 +
+ (*Inode)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (s *SymlinkAtResp) MarshalBytes(dst []byte) {
+ s.Symlink.MarshalBytes(dst[:s.Symlink.SizeBytes()])
+ dst = dst[s.Symlink.SizeBytes():]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (s *SymlinkAtResp) UnmarshalBytes(src []byte) {
+ s.Symlink.UnmarshalBytes(src[:s.Symlink.SizeBytes()])
+ src = src[s.Symlink.SizeBytes():]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (s *SymlinkAtResp) Packed() bool {
+ return s.Symlink.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (s *SymlinkAtResp) MarshalUnsafe(dst []byte) {
+ if s.Symlink.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(s), uintptr(s.SizeBytes()))
+ } else {
+ // Type SymlinkAtResp doesn't have a packed layout in memory, fallback to MarshalBytes.
+ s.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (s *SymlinkAtResp) UnmarshalUnsafe(src []byte) {
+ if s.Symlink.Packed() {
+ gohacks.Memmove(unsafe.Pointer(s), unsafe.Pointer(&src[0]), uintptr(s.SizeBytes()))
+ } else {
+ // Type SymlinkAtResp doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ s.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (s *SymlinkAtResp) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !s.Symlink.Packed() {
+ // Type SymlinkAtResp doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
+ s.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (s *SymlinkAtResp) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return s.CopyOutN(cc, addr, s.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (s *SymlinkAtResp) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !s.Symlink.Packed() {
+ // Type SymlinkAtResp doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ s.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (s *SymlinkAtResp) WriteTo(writer io.Writer) (int64, error) {
+ if !s.Symlink.Packed() {
+ // Type SymlinkAtResp doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, s.SizeBytes())
+ s.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+//go:nosplit
+func (uid *UID) SizeBytes() int {
+ return 4
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (uid *UID) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(*uid))
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (uid *UID) UnmarshalBytes(src []byte) {
+ *uid = UID(uint32(hostarch.ByteOrder.Uint32(src[:4])))
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (uid *UID) Packed() bool {
+ // Scalar newtypes are always packed.
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (uid *UID) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(uid), uintptr(uid.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (uid *UID) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(uid), unsafe.Pointer(&src[0]), uintptr(uid.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (uid *UID) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(uid)))
+ hdr.Len = uid.SizeBytes()
+ hdr.Cap = uid.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that uid
+ // must live until the use above.
+ runtime.KeepAlive(uid) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (uid *UID) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return uid.CopyOutN(cc, addr, uid.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (uid *UID) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(uid)))
+ hdr.Len = uid.SizeBytes()
+ hdr.Cap = uid.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that uid
+ // must live until the use above.
+ runtime.KeepAlive(uid) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (uid *UID) WriteTo(w io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(uid)))
+ hdr.Len = uid.SizeBytes()
+ hdr.Cap = uid.SizeBytes()
+
+ length, err := w.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that uid
+ // must live until the use above.
+ runtime.KeepAlive(uid) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (c *createCommon) SizeBytes() int {
+ return 2 +
+ (*FDID)(nil).SizeBytes() +
+ (*linux.FileMode)(nil).SizeBytes() +
+ (*UID)(nil).SizeBytes() +
+ (*GID)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (c *createCommon) MarshalBytes(dst []byte) {
+ c.DirFD.MarshalBytes(dst[:c.DirFD.SizeBytes()])
+ dst = dst[c.DirFD.SizeBytes():]
+ c.Mode.MarshalBytes(dst[:c.Mode.SizeBytes()])
+ dst = dst[c.Mode.SizeBytes():]
+ // Padding: dst[:sizeof(uint16)] ~= uint16(0)
+ dst = dst[2:]
+ c.UID.MarshalBytes(dst[:c.UID.SizeBytes()])
+ dst = dst[c.UID.SizeBytes():]
+ c.GID.MarshalBytes(dst[:c.GID.SizeBytes()])
+ dst = dst[c.GID.SizeBytes():]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (c *createCommon) UnmarshalBytes(src []byte) {
+ c.DirFD.UnmarshalBytes(src[:c.DirFD.SizeBytes()])
+ src = src[c.DirFD.SizeBytes():]
+ c.Mode.UnmarshalBytes(src[:c.Mode.SizeBytes()])
+ src = src[c.Mode.SizeBytes():]
+ // Padding: var _ uint16 ~= src[:sizeof(uint16)]
+ src = src[2:]
+ c.UID.UnmarshalBytes(src[:c.UID.SizeBytes()])
+ src = src[c.UID.SizeBytes():]
+ c.GID.UnmarshalBytes(src[:c.GID.SizeBytes()])
+ src = src[c.GID.SizeBytes():]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (c *createCommon) Packed() bool {
+ return c.DirFD.Packed() && c.GID.Packed() && c.Mode.Packed() && c.UID.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (c *createCommon) MarshalUnsafe(dst []byte) {
+ if c.DirFD.Packed() && c.GID.Packed() && c.Mode.Packed() && c.UID.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(c), uintptr(c.SizeBytes()))
+ } else {
+ // Type createCommon doesn't have a packed layout in memory, fallback to MarshalBytes.
+ c.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (c *createCommon) UnmarshalUnsafe(src []byte) {
+ if c.DirFD.Packed() && c.GID.Packed() && c.Mode.Packed() && c.UID.Packed() {
+ gohacks.Memmove(unsafe.Pointer(c), unsafe.Pointer(&src[0]), uintptr(c.SizeBytes()))
+ } else {
+ // Type createCommon doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ c.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (c *createCommon) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !c.DirFD.Packed() && c.GID.Packed() && c.Mode.Packed() && c.UID.Packed() {
+ // Type createCommon doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(c.SizeBytes()) // escapes: okay.
+ c.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c)))
+ hdr.Len = c.SizeBytes()
+ hdr.Cap = c.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that c
+ // must live until the use above.
+ runtime.KeepAlive(c) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (c *createCommon) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return c.CopyOutN(cc, addr, c.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (c *createCommon) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !c.DirFD.Packed() && c.GID.Packed() && c.Mode.Packed() && c.UID.Packed() {
+ // Type createCommon doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(c.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ c.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c)))
+ hdr.Len = c.SizeBytes()
+ hdr.Cap = c.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that c
+ // must live until the use above.
+ runtime.KeepAlive(c) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (c *createCommon) WriteTo(writer io.Writer) (int64, error) {
+ if !c.DirFD.Packed() && c.GID.Packed() && c.Mode.Packed() && c.UID.Packed() {
+ // Type createCommon doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, c.SizeBytes())
+ c.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c)))
+ hdr.Len = c.SizeBytes()
+ hdr.Cap = c.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that c
+ // must live until the use above.
+ runtime.KeepAlive(c) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (m *MsgDynamic) Packed() bool {
+ // Type MsgDynamic is dynamic so it might have slice/string headers. Hence, it is not packed.
+ return false
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (m *MsgDynamic) MarshalUnsafe(dst []byte) {
+ // Type MsgDynamic doesn't have a packed layout in memory, fallback to MarshalBytes.
+ m.MarshalBytes(dst)
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (m *MsgDynamic) UnmarshalUnsafe(src []byte) {
+ // Type MsgDynamic doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ m.UnmarshalBytes(src)
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (m *MsgDynamic) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Type MsgDynamic doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(m.SizeBytes()) // escapes: okay.
+ m.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (m *MsgDynamic) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return m.CopyOutN(cc, addr, m.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (m *MsgDynamic) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Type MsgDynamic doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(m.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ m.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (m *MsgDynamic) WriteTo(writer io.Writer) (int64, error) {
+ // Type MsgDynamic doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, m.SizeBytes())
+ m.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (m *MsgSimple) SizeBytes() int {
+ return 16
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (m *MsgSimple) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(m.A))
+ dst = dst[2:]
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(m.B))
+ dst = dst[2:]
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(m.C))
+ dst = dst[4:]
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(m.D))
+ dst = dst[8:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (m *MsgSimple) UnmarshalBytes(src []byte) {
+ m.A = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ m.B = uint16(hostarch.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ m.C = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ m.D = uint64(hostarch.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (m *MsgSimple) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (m *MsgSimple) MarshalUnsafe(dst []byte) {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(m), uintptr(m.SizeBytes()))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (m *MsgSimple) UnmarshalUnsafe(src []byte) {
+ gohacks.Memmove(unsafe.Pointer(m), unsafe.Pointer(&src[0]), uintptr(m.SizeBytes()))
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (m *MsgSimple) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(m)))
+ hdr.Len = m.SizeBytes()
+ hdr.Cap = m.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that m
+ // must live until the use above.
+ runtime.KeepAlive(m) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (m *MsgSimple) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return m.CopyOutN(cc, addr, m.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (m *MsgSimple) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(m)))
+ hdr.Len = m.SizeBytes()
+ hdr.Cap = m.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that m
+ // must live until the use above.
+ runtime.KeepAlive(m) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (m *MsgSimple) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(m)))
+ hdr.Len = m.SizeBytes()
+ hdr.Cap = m.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that m
+ // must live until the use above.
+ runtime.KeepAlive(m) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// CopyMsg1SliceIn copies in a slice of MsgSimple objects from the task's memory.
+func CopyMsg1SliceIn(cc marshal.CopyContext, addr hostarch.Addr, dst []MsgSimple) (int, error) {
+ count := len(dst)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*MsgSimple)(nil).SizeBytes()
+
+ ptr := unsafe.Pointer(&dst)
+ val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(val)
+ hdr.Len = size * count
+ hdr.Cap = size * count
+
+ length, err := cc.CopyInBytes(addr, buf)
+ // Since we bypassed the compiler's escape analysis, indicate that dst
+ // must live until the use above.
+ runtime.KeepAlive(dst) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyMsg1SliceOut copies a slice of MsgSimple objects to the task's memory.
+func CopyMsg1SliceOut(cc marshal.CopyContext, addr hostarch.Addr, src []MsgSimple) (int, error) {
+ count := len(src)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*MsgSimple)(nil).SizeBytes()
+
+ ptr := unsafe.Pointer(&src)
+ val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(val)
+ hdr.Len = size * count
+ hdr.Cap = size * count
+
+ length, err := cc.CopyOutBytes(addr, buf)
+ // Since we bypassed the compiler's escape analysis, indicate that src
+ // must live until the use above.
+ runtime.KeepAlive(src) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// MarshalUnsafeMsg1Slice is like MsgSimple.MarshalUnsafe, but for a []MsgSimple.
+func MarshalUnsafeMsg1Slice(src []MsgSimple, dst []byte) (int, error) {
+ count := len(src)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*MsgSimple)(nil).SizeBytes()
+
+ dst = dst[:size*count]
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(&src[0]), uintptr(len(dst)))
+ return size * count, nil
+}
+
+// UnmarshalUnsafeMsg1Slice is like MsgSimple.UnmarshalUnsafe, but for a []MsgSimple.
+func UnmarshalUnsafeMsg1Slice(dst []MsgSimple, src []byte) (int, error) {
+ count := len(dst)
+ if count == 0 {
+ return 0, nil
+ }
+ size := (*MsgSimple)(nil).SizeBytes()
+
+ src = src[:(size*count)]
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(&src[0]), uintptr(len(src)))
+ return count*size, nil
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (v *P9Version) Packed() bool {
+ // Type P9Version is dynamic so it might have slice/string headers. Hence, it is not packed.
+ return false
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (v *P9Version) MarshalUnsafe(dst []byte) {
+ // Type P9Version doesn't have a packed layout in memory, fallback to MarshalBytes.
+ v.MarshalBytes(dst)
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (v *P9Version) UnmarshalUnsafe(src []byte) {
+ // Type P9Version doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ v.UnmarshalBytes(src)
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (v *P9Version) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ // Type P9Version doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(v.SizeBytes()) // escapes: okay.
+ v.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (v *P9Version) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return v.CopyOutN(cc, addr, v.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (v *P9Version) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ // Type P9Version doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(v.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ v.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (v *P9Version) WriteTo(writer io.Writer) (int64, error) {
+ // Type P9Version doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, v.SizeBytes())
+ v.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (s *sockHeader) SizeBytes() int {
+ return 6 +
+ (*MID)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (s *sockHeader) MarshalBytes(dst []byte) {
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.payloadLen))
+ dst = dst[4:]
+ s.message.MarshalBytes(dst[:s.message.SizeBytes()])
+ dst = dst[s.message.SizeBytes():]
+ // Padding: dst[:sizeof(uint16)] ~= uint16(0)
+ dst = dst[2:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (s *sockHeader) UnmarshalBytes(src []byte) {
+ s.payloadLen = uint32(hostarch.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.message.UnmarshalBytes(src[:s.message.SizeBytes()])
+ src = src[s.message.SizeBytes():]
+ // Padding: var _ uint16 ~= src[:sizeof(uint16)]
+ src = src[2:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (s *sockHeader) Packed() bool {
+ return s.message.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (s *sockHeader) MarshalUnsafe(dst []byte) {
+ if s.message.Packed() {
+ gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(s), uintptr(s.SizeBytes()))
+ } else {
+ // Type sockHeader doesn't have a packed layout in memory, fallback to MarshalBytes.
+ s.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (s *sockHeader) UnmarshalUnsafe(src []byte) {
+ if s.message.Packed() {
+ gohacks.Memmove(unsafe.Pointer(s), unsafe.Pointer(&src[0]), uintptr(s.SizeBytes()))
+ } else {
+ // Type sockHeader doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ s.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (s *sockHeader) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
+ if !s.message.Packed() {
+ // Type sockHeader doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
+ s.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (s *sockHeader) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ return s.CopyOutN(cc, addr, s.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (s *sockHeader) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
+ if !s.message.Packed() {
+ // Type sockHeader doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ s.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (s *sockHeader) WriteTo(writer io.Writer) (int64, error) {
+ if !s.message.Packed() {
+ // Type sockHeader doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, s.SizeBytes())
+ s.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
diff --git a/pkg/lisafs/lisafs_state_autogen.go b/pkg/lisafs/lisafs_state_autogen.go
new file mode 100644
index 000000000..fc032f947
--- /dev/null
+++ b/pkg/lisafs/lisafs_state_autogen.go
@@ -0,0 +1,176 @@
+// automatically generated by stateify.
+
+package lisafs
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (l *controlFDList) StateTypeName() string {
+ return "pkg/lisafs.controlFDList"
+}
+
+func (l *controlFDList) StateFields() []string {
+ return []string{
+ "head",
+ "tail",
+ }
+}
+
+func (l *controlFDList) beforeSave() {}
+
+// +checklocksignore
+func (l *controlFDList) StateSave(stateSinkObject state.Sink) {
+ l.beforeSave()
+ stateSinkObject.Save(0, &l.head)
+ stateSinkObject.Save(1, &l.tail)
+}
+
+func (l *controlFDList) afterLoad() {}
+
+// +checklocksignore
+func (l *controlFDList) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &l.head)
+ stateSourceObject.Load(1, &l.tail)
+}
+
+func (e *controlFDEntry) StateTypeName() string {
+ return "pkg/lisafs.controlFDEntry"
+}
+
+func (e *controlFDEntry) StateFields() []string {
+ return []string{
+ "next",
+ "prev",
+ }
+}
+
+func (e *controlFDEntry) beforeSave() {}
+
+// +checklocksignore
+func (e *controlFDEntry) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ stateSinkObject.Save(0, &e.next)
+ stateSinkObject.Save(1, &e.prev)
+}
+
+func (e *controlFDEntry) afterLoad() {}
+
+// +checklocksignore
+func (e *controlFDEntry) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.next)
+ stateSourceObject.Load(1, &e.prev)
+}
+
+func (r *controlFDRefs) StateTypeName() string {
+ return "pkg/lisafs.controlFDRefs"
+}
+
+func (r *controlFDRefs) StateFields() []string {
+ return []string{
+ "refCount",
+ }
+}
+
+func (r *controlFDRefs) beforeSave() {}
+
+// +checklocksignore
+func (r *controlFDRefs) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.refCount)
+}
+
+// +checklocksignore
+func (r *controlFDRefs) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.refCount)
+ stateSourceObject.AfterLoad(r.afterLoad)
+}
+
+func (l *openFDList) StateTypeName() string {
+ return "pkg/lisafs.openFDList"
+}
+
+func (l *openFDList) StateFields() []string {
+ return []string{
+ "head",
+ "tail",
+ }
+}
+
+func (l *openFDList) beforeSave() {}
+
+// +checklocksignore
+func (l *openFDList) StateSave(stateSinkObject state.Sink) {
+ l.beforeSave()
+ stateSinkObject.Save(0, &l.head)
+ stateSinkObject.Save(1, &l.tail)
+}
+
+func (l *openFDList) afterLoad() {}
+
+// +checklocksignore
+func (l *openFDList) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &l.head)
+ stateSourceObject.Load(1, &l.tail)
+}
+
+func (e *openFDEntry) StateTypeName() string {
+ return "pkg/lisafs.openFDEntry"
+}
+
+func (e *openFDEntry) StateFields() []string {
+ return []string{
+ "next",
+ "prev",
+ }
+}
+
+func (e *openFDEntry) beforeSave() {}
+
+// +checklocksignore
+func (e *openFDEntry) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ stateSinkObject.Save(0, &e.next)
+ stateSinkObject.Save(1, &e.prev)
+}
+
+func (e *openFDEntry) afterLoad() {}
+
+// +checklocksignore
+func (e *openFDEntry) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.next)
+ stateSourceObject.Load(1, &e.prev)
+}
+
+func (r *openFDRefs) StateTypeName() string {
+ return "pkg/lisafs.openFDRefs"
+}
+
+func (r *openFDRefs) StateFields() []string {
+ return []string{
+ "refCount",
+ }
+}
+
+func (r *openFDRefs) beforeSave() {}
+
+// +checklocksignore
+func (r *openFDRefs) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.refCount)
+}
+
+// +checklocksignore
+func (r *openFDRefs) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.refCount)
+ stateSourceObject.AfterLoad(r.afterLoad)
+}
+
+func init() {
+ state.Register((*controlFDList)(nil))
+ state.Register((*controlFDEntry)(nil))
+ state.Register((*controlFDRefs)(nil))
+ state.Register((*openFDList)(nil))
+ state.Register((*openFDEntry)(nil))
+ state.Register((*openFDRefs)(nil))
+}
diff --git a/pkg/lisafs/open_fd_list.go b/pkg/lisafs/open_fd_list.go
new file mode 100644
index 000000000..9a8b1e30b
--- /dev/null
+++ b/pkg/lisafs/open_fd_list.go
@@ -0,0 +1,221 @@
+package lisafs
+
+// ElementMapper provides an identity mapping by default.
+//
+// This can be replaced to provide a struct that maps elements to linker
+// objects, if they are not the same. An ElementMapper is not typically
+// required if: Linker is left as is, Element is left as is, or Linker and
+// Element are the same type.
+type openFDElementMapper struct{}
+
+// linkerFor maps an Element to a Linker.
+//
+// This default implementation should be inlined.
+//
+//go:nosplit
+func (openFDElementMapper) linkerFor(elem *OpenFD) *OpenFD { return elem }
+
+// List is an intrusive list. Entries can be added to or removed from the list
+// in O(1) time and with no additional memory allocations.
+//
+// The zero value for List is an empty list ready to use.
+//
+// To iterate over a list (where l is a List):
+// for e := l.Front(); e != nil; e = e.Next() {
+// // do something with e.
+// }
+//
+// +stateify savable
+type openFDList struct {
+ head *OpenFD
+ tail *OpenFD
+}
+
+// Reset resets list l to the empty state.
+func (l *openFDList) Reset() {
+ l.head = nil
+ l.tail = nil
+}
+
+// Empty returns true iff the list is empty.
+//
+//go:nosplit
+func (l *openFDList) Empty() bool {
+ return l.head == nil
+}
+
+// Front returns the first element of list l or nil.
+//
+//go:nosplit
+func (l *openFDList) Front() *OpenFD {
+ return l.head
+}
+
+// Back returns the last element of list l or nil.
+//
+//go:nosplit
+func (l *openFDList) Back() *OpenFD {
+ return l.tail
+}
+
+// Len returns the number of elements in the list.
+//
+// NOTE: This is an O(n) operation.
+//
+//go:nosplit
+func (l *openFDList) Len() (count int) {
+ for e := l.Front(); e != nil; e = (openFDElementMapper{}.linkerFor(e)).Next() {
+ count++
+ }
+ return count
+}
+
+// PushFront inserts the element e at the front of list l.
+//
+//go:nosplit
+func (l *openFDList) PushFront(e *OpenFD) {
+ linker := openFDElementMapper{}.linkerFor(e)
+ linker.SetNext(l.head)
+ linker.SetPrev(nil)
+ if l.head != nil {
+ openFDElementMapper{}.linkerFor(l.head).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+
+ l.head = e
+}
+
+// PushBack inserts the element e at the back of list l.
+//
+//go:nosplit
+func (l *openFDList) PushBack(e *OpenFD) {
+ linker := openFDElementMapper{}.linkerFor(e)
+ linker.SetNext(nil)
+ linker.SetPrev(l.tail)
+ if l.tail != nil {
+ openFDElementMapper{}.linkerFor(l.tail).SetNext(e)
+ } else {
+ l.head = e
+ }
+
+ l.tail = e
+}
+
+// PushBackList inserts list m at the end of list l, emptying m.
+//
+//go:nosplit
+func (l *openFDList) PushBackList(m *openFDList) {
+ if l.head == nil {
+ l.head = m.head
+ l.tail = m.tail
+ } else if m.head != nil {
+ openFDElementMapper{}.linkerFor(l.tail).SetNext(m.head)
+ openFDElementMapper{}.linkerFor(m.head).SetPrev(l.tail)
+
+ l.tail = m.tail
+ }
+ m.head = nil
+ m.tail = nil
+}
+
+// InsertAfter inserts e after b.
+//
+//go:nosplit
+func (l *openFDList) InsertAfter(b, e *OpenFD) {
+ bLinker := openFDElementMapper{}.linkerFor(b)
+ eLinker := openFDElementMapper{}.linkerFor(e)
+
+ a := bLinker.Next()
+
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ bLinker.SetNext(e)
+
+ if a != nil {
+ openFDElementMapper{}.linkerFor(a).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+}
+
+// InsertBefore inserts e before a.
+//
+//go:nosplit
+func (l *openFDList) InsertBefore(a, e *OpenFD) {
+ aLinker := openFDElementMapper{}.linkerFor(a)
+ eLinker := openFDElementMapper{}.linkerFor(e)
+
+ b := aLinker.Prev()
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ aLinker.SetPrev(e)
+
+ if b != nil {
+ openFDElementMapper{}.linkerFor(b).SetNext(e)
+ } else {
+ l.head = e
+ }
+}
+
+// Remove removes e from l.
+//
+//go:nosplit
+func (l *openFDList) Remove(e *OpenFD) {
+ linker := openFDElementMapper{}.linkerFor(e)
+ prev := linker.Prev()
+ next := linker.Next()
+
+ if prev != nil {
+ openFDElementMapper{}.linkerFor(prev).SetNext(next)
+ } else if l.head == e {
+ l.head = next
+ }
+
+ if next != nil {
+ openFDElementMapper{}.linkerFor(next).SetPrev(prev)
+ } else if l.tail == e {
+ l.tail = prev
+ }
+
+ linker.SetNext(nil)
+ linker.SetPrev(nil)
+}
+
+// Entry is a default implementation of Linker. Users can add anonymous fields
+// of this type to their structs to make them automatically implement the
+// methods needed by List.
+//
+// +stateify savable
+type openFDEntry struct {
+ next *OpenFD
+ prev *OpenFD
+}
+
+// Next returns the entry that follows e in the list.
+//
+//go:nosplit
+func (e *openFDEntry) Next() *OpenFD {
+ return e.next
+}
+
+// Prev returns the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *openFDEntry) Prev() *OpenFD {
+ return e.prev
+}
+
+// SetNext assigns 'entry' as the entry that follows e in the list.
+//
+//go:nosplit
+func (e *openFDEntry) SetNext(elem *OpenFD) {
+ e.next = elem
+}
+
+// SetPrev assigns 'entry' as the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *openFDEntry) SetPrev(elem *OpenFD) {
+ e.prev = elem
+}
diff --git a/pkg/lisafs/open_fd_refs.go b/pkg/lisafs/open_fd_refs.go
new file mode 100644
index 000000000..f1a99f335
--- /dev/null
+++ b/pkg/lisafs/open_fd_refs.go
@@ -0,0 +1,140 @@
+package lisafs
+
+import (
+ "fmt"
+ "sync/atomic"
+
+ "gvisor.dev/gvisor/pkg/refsvfs2"
+)
+
+// enableLogging indicates whether reference-related events should be logged (with
+// stack traces). This is false by default and should only be set to true for
+// debugging purposes, as it can generate an extremely large amount of output
+// and drastically degrade performance.
+const openFDenableLogging = false
+
+// obj is used to customize logging. Note that we use a pointer to T so that
+// we do not copy the entire object when passed as a format parameter.
+var openFDobj *OpenFD
+
+// Refs implements refs.RefCounter. It keeps a reference count using atomic
+// operations and calls the destructor when the count reaches zero.
+//
+// NOTE: Do not introduce additional fields to the Refs struct. It is used by
+// many filesystem objects, and we want to keep it as small as possible (i.e.,
+// the same size as using an int64 directly) to avoid taking up extra cache
+// space. In general, this template should not be extended at the cost of
+// performance. If it does not offer enough flexibility for a particular object
+// (example: b/187877947), we should implement the RefCounter/CheckedObject
+// interfaces manually.
+//
+// +stateify savable
+type openFDRefs struct {
+ // refCount is composed of two fields:
+ //
+ // [32-bit speculative references]:[32-bit real references]
+ //
+ // Speculative references are used for TryIncRef, to avoid a CompareAndSwap
+ // loop. See IncRef, DecRef and TryIncRef for details of how these fields are
+ // used.
+ refCount int64
+}
+
+// InitRefs initializes r with one reference and, if enabled, activates leak
+// checking.
+func (r *openFDRefs) InitRefs() {
+ atomic.StoreInt64(&r.refCount, 1)
+ refsvfs2.Register(r)
+}
+
+// RefType implements refsvfs2.CheckedObject.RefType.
+func (r *openFDRefs) RefType() string {
+ return fmt.Sprintf("%T", openFDobj)[1:]
+}
+
+// LeakMessage implements refsvfs2.CheckedObject.LeakMessage.
+func (r *openFDRefs) LeakMessage() string {
+ return fmt.Sprintf("[%s %p] reference count of %d instead of 0", r.RefType(), r, r.ReadRefs())
+}
+
+// LogRefs implements refsvfs2.CheckedObject.LogRefs.
+func (r *openFDRefs) LogRefs() bool {
+ return openFDenableLogging
+}
+
+// ReadRefs returns the current number of references. The returned count is
+// inherently racy and is unsafe to use without external synchronization.
+func (r *openFDRefs) ReadRefs() int64 {
+ return atomic.LoadInt64(&r.refCount)
+}
+
+// IncRef implements refs.RefCounter.IncRef.
+//
+//go:nosplit
+func (r *openFDRefs) IncRef() {
+ v := atomic.AddInt64(&r.refCount, 1)
+ if openFDenableLogging {
+ refsvfs2.LogIncRef(r, v)
+ }
+ if v <= 1 {
+ panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
+ }
+}
+
+// TryIncRef implements refs.TryRefCounter.TryIncRef.
+//
+// To do this safely without a loop, a speculative reference is first acquired
+// on the object. This allows multiple concurrent TryIncRef calls to distinguish
+// other TryIncRef calls from genuine references held.
+//
+//go:nosplit
+func (r *openFDRefs) TryIncRef() bool {
+ const speculativeRef = 1 << 32
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 {
+
+ atomic.AddInt64(&r.refCount, -speculativeRef)
+ return false
+ }
+
+ v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
+ if openFDenableLogging {
+ refsvfs2.LogTryIncRef(r, v)
+ }
+ return true
+}
+
+// DecRef implements refs.RefCounter.DecRef.
+//
+// Note that speculative references are counted here. Since they were added
+// prior to real references reaching zero, they will successfully convert to
+// real references. In other words, we see speculative references only in the
+// following case:
+//
+// A: TryIncRef [speculative increase => sees non-negative references]
+// B: DecRef [real decrease]
+// A: TryIncRef [transform speculative to real]
+//
+//go:nosplit
+func (r *openFDRefs) DecRef(destroy func()) {
+ v := atomic.AddInt64(&r.refCount, -1)
+ if openFDenableLogging {
+ refsvfs2.LogDecRef(r, v)
+ }
+ switch {
+ case v < 0:
+ panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
+
+ case v == 0:
+ refsvfs2.Unregister(r)
+
+ if destroy != nil {
+ destroy()
+ }
+ }
+}
+
+func (r *openFDRefs) afterLoad() {
+ if r.ReadRefs() > 0 {
+ refsvfs2.Register(r)
+ }
+}
diff --git a/pkg/lisafs/sock_test.go b/pkg/lisafs/sock_test.go
deleted file mode 100644
index 387f4b7a8..000000000
--- a/pkg/lisafs/sock_test.go
+++ /dev/null
@@ -1,217 +0,0 @@
-// Copyright 2021 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package lisafs
-
-import (
- "bytes"
- "math/rand"
- "reflect"
- "testing"
-
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/marshal"
- "gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/unet"
-)
-
-func runSocketTest(t *testing.T, fun1 func(*sockCommunicator), fun2 func(*sockCommunicator)) {
- sock1, sock2, err := unet.SocketPair(false)
- if err != nil {
- t.Fatalf("socketpair got err %v expected nil", err)
- }
- defer sock1.Close()
- defer sock2.Close()
-
- var testWg sync.WaitGroup
- testWg.Add(2)
-
- go func() {
- fun1(newSockComm(sock1))
- testWg.Done()
- }()
-
- go func() {
- fun2(newSockComm(sock2))
- testWg.Done()
- }()
-
- testWg.Wait()
-}
-
-func TestReadWrite(t *testing.T) {
- // Create random data to send.
- n := 10000
- data := make([]byte, n)
- if _, err := rand.Read(data); err != nil {
- t.Fatalf("rand.Read(data) failed: %v", err)
- }
-
- runSocketTest(t, func(comm *sockCommunicator) {
- // Scatter that data into two parts using Iovecs while sending.
- mid := n / 2
- if err := writeTo(comm.sock, [][]byte{data[:mid], data[mid:]}, n, nil); err != nil {
- t.Errorf("writeTo socket failed: %v", err)
- }
- }, func(comm *sockCommunicator) {
- gotData := make([]byte, n)
- if _, err := readFrom(comm.sock, gotData, 0); err != nil {
- t.Fatalf("reading from socket failed: %v", err)
- }
-
- // Make sure we got the right data.
- if res := bytes.Compare(data, gotData); res != 0 {
- t.Errorf("data received differs from data sent, want = %v, got = %v", data, gotData)
- }
- })
-}
-
-func TestFDDonation(t *testing.T) {
- n := 10
- data := make([]byte, n)
- if _, err := rand.Read(data); err != nil {
- t.Fatalf("rand.Read(data) failed: %v", err)
- }
-
- // Try donating FDs to these files.
- path1 := "/dev/null"
- path2 := "/dev"
- path3 := "/dev/random"
-
- runSocketTest(t, func(comm *sockCommunicator) {
- devNullFD, err := unix.Open(path1, unix.O_RDONLY, 0)
- defer unix.Close(devNullFD)
- if err != nil {
- t.Fatalf("open(%s) failed: %v", path1, err)
- }
- devFD, err := unix.Open(path2, unix.O_RDONLY, 0)
- defer unix.Close(devFD)
- if err != nil {
- t.Fatalf("open(%s) failed: %v", path2, err)
- }
- devRandomFD, err := unix.Open(path3, unix.O_RDONLY, 0)
- defer unix.Close(devRandomFD)
- if err != nil {
- t.Fatalf("open(%s) failed: %v", path2, err)
- }
- if err := writeTo(comm.sock, [][]byte{data}, n, []int{devNullFD, devFD, devRandomFD}); err != nil {
- t.Errorf("writeTo socket failed: %v", err)
- }
- }, func(comm *sockCommunicator) {
- gotData := make([]byte, n)
- fds, err := readFrom(comm.sock, gotData, 3)
- if err != nil {
- t.Fatalf("reading from socket failed: %v", err)
- }
- defer closeFDs(fds[:])
-
- if res := bytes.Compare(data, gotData); res != 0 {
- t.Errorf("data received differs from data sent, want = %v, got = %v", data, gotData)
- }
-
- if len(fds) != 3 {
- t.Fatalf("wanted 3 FD, got %d", len(fds))
- }
-
- // Check that the FDs actually point to the correct file.
- compareFDWithFile(t, fds[0], path1)
- compareFDWithFile(t, fds[1], path2)
- compareFDWithFile(t, fds[2], path3)
- })
-}
-
-func compareFDWithFile(t *testing.T, fd int, path string) {
- var want unix.Stat_t
- if err := unix.Stat(path, &want); err != nil {
- t.Fatalf("stat(%s) failed: %v", path, err)
- }
-
- var got unix.Stat_t
- if err := unix.Fstat(fd, &got); err != nil {
- t.Fatalf("fstat on donated FD failed: %v", err)
- }
-
- if got.Ino != want.Ino || got.Dev != want.Dev {
- t.Errorf("FD does not point to %s, want = %+v, got = %+v", path, want, got)
- }
-}
-
-func testSndMsg(comm *sockCommunicator, m MID, msg marshal.Marshallable) error {
- var payloadLen uint32
- if msg != nil {
- payloadLen = uint32(msg.SizeBytes())
- msg.MarshalUnsafe(comm.PayloadBuf(payloadLen))
- }
- return comm.sndPrepopulatedMsg(m, payloadLen, nil)
-}
-
-func TestSndRcvMessage(t *testing.T) {
- req := &MsgSimple{}
- req.Randomize()
- reqM := MID(1)
-
- // Create a massive random response.
- var resp MsgDynamic
- resp.Randomize(100)
- respM := MID(2)
-
- runSocketTest(t, func(comm *sockCommunicator) {
- if err := testSndMsg(comm, reqM, req); err != nil {
- t.Errorf("writeMessageTo failed: %v", err)
- }
- checkMessageReceive(t, comm, respM, &resp)
- }, func(comm *sockCommunicator) {
- checkMessageReceive(t, comm, reqM, req)
- if err := testSndMsg(comm, respM, &resp); err != nil {
- t.Errorf("writeMessageTo failed: %v", err)
- }
- })
-}
-
-func TestSndRcvMessageNoPayload(t *testing.T) {
- reqM := MID(1)
- respM := MID(2)
- runSocketTest(t, func(comm *sockCommunicator) {
- if err := testSndMsg(comm, reqM, nil); err != nil {
- t.Errorf("writeMessageTo failed: %v", err)
- }
- checkMessageReceive(t, comm, respM, nil)
- }, func(comm *sockCommunicator) {
- checkMessageReceive(t, comm, reqM, nil)
- if err := testSndMsg(comm, respM, nil); err != nil {
- t.Errorf("writeMessageTo failed: %v", err)
- }
- })
-}
-
-func checkMessageReceive(t *testing.T, comm *sockCommunicator, wantM MID, wantMsg marshal.Marshallable) {
- gotM, payloadLen, err := comm.rcvMsg(0)
- if err != nil {
- t.Fatalf("readMessageFrom failed: %v", err)
- }
- if gotM != wantM {
- t.Errorf("got incorrect message ID: got = %d, want = %d", gotM, wantM)
- }
- if wantMsg == nil {
- if payloadLen != 0 {
- t.Errorf("no payload expect but got %d bytes", payloadLen)
- }
- } else {
- gotMsg := reflect.New(reflect.ValueOf(wantMsg).Elem().Type()).Interface().(marshal.Marshallable)
- gotMsg.UnmarshalUnsafe(comm.PayloadBuf(payloadLen))
- if !reflect.DeepEqual(wantMsg, gotMsg) {
- t.Errorf("msg differs: want = %+v, got = %+v", wantMsg, gotMsg)
- }
- }
-}
diff --git a/pkg/lisafs/testsuite/BUILD b/pkg/lisafs/testsuite/BUILD
deleted file mode 100644
index b4a542b3a..000000000
--- a/pkg/lisafs/testsuite/BUILD
+++ /dev/null
@@ -1,20 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(
- default_visibility = ["//visibility:public"],
- licenses = ["notice"],
-)
-
-go_library(
- name = "testsuite",
- testonly = True,
- srcs = ["testsuite.go"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/lisafs",
- "//pkg/unet",
- "@com_github_syndtr_gocapability//capability:go_default_library",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
diff --git a/pkg/lisafs/testsuite/testsuite.go b/pkg/lisafs/testsuite/testsuite.go
deleted file mode 100644
index 5fc7c364d..000000000
--- a/pkg/lisafs/testsuite/testsuite.go
+++ /dev/null
@@ -1,637 +0,0 @@
-// Copyright 2021 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package testsuite provides a integration testing suite for lisafs.
-// These tests are intended for servers serving the local filesystem.
-package testsuite
-
-import (
- "bytes"
- "fmt"
- "io/ioutil"
- "math/rand"
- "os"
- "testing"
- "time"
-
- "github.com/syndtr/gocapability/capability"
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/lisafs"
- "gvisor.dev/gvisor/pkg/unet"
-)
-
-// Tester is the client code using this test suite. This interface abstracts
-// away all the caller specific details.
-type Tester interface {
- // NewServer returns a new instance of the tester server.
- NewServer(t *testing.T) *lisafs.Server
-
- // LinkSupported returns true if the backing server supports LinkAt.
- LinkSupported() bool
-
- // SetUserGroupIDSupported returns true if the backing server supports
- // changing UID/GID for files.
- SetUserGroupIDSupported() bool
-}
-
-// RunAllLocalFSTests runs all local FS tests as subtests.
-func RunAllLocalFSTests(t *testing.T, tester Tester) {
- for name, testFn := range localFSTests {
- t.Run(name, func(t *testing.T) {
- runServerClient(t, tester, testFn)
- })
- }
-}
-
-type testFunc func(context.Context, *testing.T, Tester, lisafs.ClientFD)
-
-var localFSTests map[string]testFunc = map[string]testFunc{
- "Stat": testStat,
- "RegularFileIO": testRegularFileIO,
- "RegularFileOpen": testRegularFileOpen,
- "SetStat": testSetStat,
- "Allocate": testAllocate,
- "StatFS": testStatFS,
- "Unlink": testUnlink,
- "Symlink": testSymlink,
- "HardLink": testHardLink,
- "Walk": testWalk,
- "Rename": testRename,
- "Mknod": testMknod,
- "Getdents": testGetdents,
-}
-
-func runServerClient(t *testing.T, tester Tester, testFn testFunc) {
- mountPath, err := ioutil.TempDir(os.Getenv("TEST_TMPDIR"), "")
- if err != nil {
- t.Fatalf("creation of temporary mountpoint failed: %v", err)
- }
- defer os.RemoveAll(mountPath)
-
- // fsgofer should run with a umask of 0, because we want to preserve file
- // modes exactly for testing purposes.
- unix.Umask(0)
-
- serverSocket, clientSocket, err := unet.SocketPair(false)
- if err != nil {
- t.Fatalf("socketpair got err %v expected nil", err)
- }
-
- server := tester.NewServer(t)
- conn, err := server.CreateConnection(serverSocket, false /* readonly */)
- if err != nil {
- t.Fatalf("starting connection failed: %v", err)
- return
- }
- server.StartConnection(conn)
-
- c, root, err := lisafs.NewClient(clientSocket, mountPath)
- if err != nil {
- t.Fatalf("client creation failed: %v", err)
- }
-
- if !root.ControlFD.Ok() {
- t.Fatalf("root control FD is not valid")
- }
- rootFile := c.NewFD(root.ControlFD)
-
- ctx := context.Background()
- testFn(ctx, t, tester, rootFile)
- closeFD(ctx, t, rootFile)
-
- c.Close() // This should trigger client and server shutdown.
- server.Wait()
-}
-
-func closeFD(ctx context.Context, t testing.TB, fdLisa lisafs.ClientFD) {
- if err := fdLisa.Close(ctx); err != nil {
- t.Errorf("failed to close FD: %v", err)
- }
-}
-
-func statTo(ctx context.Context, t *testing.T, fdLisa lisafs.ClientFD, stat *linux.Statx) {
- if err := fdLisa.StatTo(ctx, stat); err != nil {
- t.Fatalf("stat failed: %v", err)
- }
-}
-
-func openCreateFile(ctx context.Context, t *testing.T, fdLisa lisafs.ClientFD, name string) (lisafs.ClientFD, linux.Statx, lisafs.ClientFD, int) {
- child, childFD, childHostFD, err := fdLisa.OpenCreateAt(ctx, name, unix.O_RDWR, 0777, lisafs.UID(unix.Getuid()), lisafs.GID(unix.Getgid()))
- if err != nil {
- t.Fatalf("OpenCreateAt failed: %v", err)
- }
- if childHostFD == -1 {
- t.Error("no host FD donated")
- }
- client := fdLisa.Client()
- return client.NewFD(child.ControlFD), child.Stat, fdLisa.Client().NewFD(childFD), childHostFD
-}
-
-func openFile(ctx context.Context, t *testing.T, fdLisa lisafs.ClientFD, flags uint32, isReg bool) (lisafs.ClientFD, int) {
- newFD, hostFD, err := fdLisa.OpenAt(ctx, flags)
- if err != nil {
- t.Fatalf("OpenAt failed: %v", err)
- }
- if hostFD == -1 && isReg {
- t.Error("no host FD donated")
- }
- return fdLisa.Client().NewFD(newFD), hostFD
-}
-
-func unlinkFile(ctx context.Context, t *testing.T, dir lisafs.ClientFD, name string, isDir bool) {
- var flags uint32
- if isDir {
- flags = unix.AT_REMOVEDIR
- }
- if err := dir.UnlinkAt(ctx, name, flags); err != nil {
- t.Errorf("unlinking file %s failed: %v", name, err)
- }
-}
-
-func symlink(ctx context.Context, t *testing.T, dir lisafs.ClientFD, name, target string) (lisafs.ClientFD, linux.Statx) {
- linkIno, err := dir.SymlinkAt(ctx, name, target, lisafs.UID(unix.Getuid()), lisafs.GID(unix.Getgid()))
- if err != nil {
- t.Fatalf("symlink failed: %v", err)
- }
- return dir.Client().NewFD(linkIno.ControlFD), linkIno.Stat
-}
-
-func link(ctx context.Context, t *testing.T, dir lisafs.ClientFD, name string, target lisafs.ClientFD) (lisafs.ClientFD, linux.Statx) {
- linkIno, err := dir.LinkAt(ctx, target.ID(), name)
- if err != nil {
- t.Fatalf("link failed: %v", err)
- }
- return dir.Client().NewFD(linkIno.ControlFD), linkIno.Stat
-}
-
-func mkdir(ctx context.Context, t *testing.T, dir lisafs.ClientFD, name string) (lisafs.ClientFD, linux.Statx) {
- childIno, err := dir.MkdirAt(ctx, name, 0777, lisafs.UID(unix.Getuid()), lisafs.GID(unix.Getgid()))
- if err != nil {
- t.Fatalf("mkdir failed: %v", err)
- }
- return dir.Client().NewFD(childIno.ControlFD), childIno.Stat
-}
-
-func mknod(ctx context.Context, t *testing.T, dir lisafs.ClientFD, name string) (lisafs.ClientFD, linux.Statx) {
- nodeIno, err := dir.MknodAt(ctx, name, unix.S_IFREG|0777, lisafs.UID(unix.Getuid()), lisafs.GID(unix.Getgid()), 0, 0)
- if err != nil {
- t.Fatalf("mknod failed: %v", err)
- }
- return dir.Client().NewFD(nodeIno.ControlFD), nodeIno.Stat
-}
-
-func walk(ctx context.Context, t *testing.T, dir lisafs.ClientFD, names []string) []lisafs.Inode {
- _, inodes, err := dir.WalkMultiple(ctx, names)
- if err != nil {
- t.Fatalf("walk failed while trying to walk components %+v: %v", names, err)
- }
- return inodes
-}
-
-func walkStat(ctx context.Context, t *testing.T, dir lisafs.ClientFD, names []string) []linux.Statx {
- stats, err := dir.WalkStat(ctx, names)
- if err != nil {
- t.Fatalf("walk failed while trying to walk components %+v: %v", names, err)
- }
- return stats
-}
-
-func writeFD(ctx context.Context, t *testing.T, fdLisa lisafs.ClientFD, off uint64, buf []byte) error {
- count, err := fdLisa.Write(ctx, buf, off)
- if err != nil {
- return err
- }
- if int(count) != len(buf) {
- t.Errorf("partial write: buf size = %d, written = %d", len(buf), count)
- }
- return nil
-}
-
-func readFDAndCmp(ctx context.Context, t *testing.T, fdLisa lisafs.ClientFD, off uint64, want []byte) {
- buf := make([]byte, len(want))
- n, err := fdLisa.Read(ctx, buf, off)
- if err != nil {
- t.Errorf("read failed: %v", err)
- return
- }
- if int(n) != len(want) {
- t.Errorf("partial read: buf size = %d, read = %d", len(want), n)
- return
- }
- if bytes.Compare(buf, want) != 0 {
- t.Errorf("bytes read differ from what was expected: want = %v, got = %v", want, buf)
- }
-}
-
-func allocateAndVerify(ctx context.Context, t *testing.T, fdLisa lisafs.ClientFD, off uint64, length uint64) {
- if err := fdLisa.Allocate(ctx, 0, off, length); err != nil {
- t.Fatalf("fallocate failed: %v", err)
- }
-
- var stat linux.Statx
- statTo(ctx, t, fdLisa, &stat)
- if want := off + length; stat.Size != want {
- t.Errorf("incorrect file size after allocate: expected %d, got %d", off+length, stat.Size)
- }
-}
-
-func cmpStatx(t *testing.T, want, got linux.Statx) {
- if got.Mask&unix.STATX_MODE != 0 && want.Mask&unix.STATX_MODE != 0 {
- if got.Mode != want.Mode {
- t.Errorf("mode differs: want %d, got %d", want.Mode, got.Mode)
- }
- }
- if got.Mask&unix.STATX_INO != 0 && want.Mask&unix.STATX_INO != 0 {
- if got.Ino != want.Ino {
- t.Errorf("inode number differs: want %d, got %d", want.Ino, got.Ino)
- }
- }
- if got.Mask&unix.STATX_NLINK != 0 && want.Mask&unix.STATX_NLINK != 0 {
- if got.Nlink != want.Nlink {
- t.Errorf("nlink differs: want %d, got %d", want.Nlink, got.Nlink)
- }
- }
- if got.Mask&unix.STATX_UID != 0 && want.Mask&unix.STATX_UID != 0 {
- if got.UID != want.UID {
- t.Errorf("UID differs: want %d, got %d", want.UID, got.UID)
- }
- }
- if got.Mask&unix.STATX_GID != 0 && want.Mask&unix.STATX_GID != 0 {
- if got.GID != want.GID {
- t.Errorf("GID differs: want %d, got %d", want.GID, got.GID)
- }
- }
- if got.Mask&unix.STATX_SIZE != 0 && want.Mask&unix.STATX_SIZE != 0 {
- if got.Size != want.Size {
- t.Errorf("size differs: want %d, got %d", want.Size, got.Size)
- }
- }
- if got.Mask&unix.STATX_BLOCKS != 0 && want.Mask&unix.STATX_BLOCKS != 0 {
- if got.Blocks != want.Blocks {
- t.Errorf("blocks differs: want %d, got %d", want.Blocks, got.Blocks)
- }
- }
- if got.Mask&unix.STATX_ATIME != 0 && want.Mask&unix.STATX_ATIME != 0 {
- if got.Atime != want.Atime {
- t.Errorf("atime differs: want %d, got %d", want.Atime, got.Atime)
- }
- }
- if got.Mask&unix.STATX_MTIME != 0 && want.Mask&unix.STATX_MTIME != 0 {
- if got.Mtime != want.Mtime {
- t.Errorf("mtime differs: want %d, got %d", want.Mtime, got.Mtime)
- }
- }
- if got.Mask&unix.STATX_CTIME != 0 && want.Mask&unix.STATX_CTIME != 0 {
- if got.Ctime != want.Ctime {
- t.Errorf("ctime differs: want %d, got %d", want.Ctime, got.Ctime)
- }
- }
-}
-
-func hasCapability(c capability.Cap) bool {
- caps, err := capability.NewPid2(os.Getpid())
- if err != nil {
- return false
- }
- if err := caps.Load(); err != nil {
- return false
- }
- return caps.Get(capability.EFFECTIVE, c)
-}
-
-func testStat(ctx context.Context, t *testing.T, tester Tester, root lisafs.ClientFD) {
- var rootStat linux.Statx
- if err := root.StatTo(ctx, &rootStat); err != nil {
- t.Errorf("stat on the root dir failed: %v", err)
- }
-
- if ftype := rootStat.Mode & unix.S_IFMT; ftype != unix.S_IFDIR {
- t.Errorf("root inode is not a directory, file type = %d", ftype)
- }
-}
-
-func testRegularFileIO(ctx context.Context, t *testing.T, tester Tester, root lisafs.ClientFD) {
- name := "tempFile"
- controlFile, _, fd, hostFD := openCreateFile(ctx, t, root, name)
- defer closeFD(ctx, t, controlFile)
- defer closeFD(ctx, t, fd)
- defer unix.Close(hostFD)
-
- // Test Read/Write RPCs with 2MB of data to test IO in chunks.
- data := make([]byte, 1<<21)
- rand.Read(data)
- if err := writeFD(ctx, t, fd, 0, data); err != nil {
- t.Fatalf("write failed: %v", err)
- }
- readFDAndCmp(ctx, t, fd, 0, data)
- readFDAndCmp(ctx, t, fd, 50, data[50:])
-
- // Make sure the host FD is configured properly.
- hostReadData := make([]byte, len(data))
- if n, err := unix.Pread(hostFD, hostReadData, 0); err != nil {
- t.Errorf("host read failed: %v", err)
- } else if n != len(hostReadData) {
- t.Errorf("partial read: buf size = %d, read = %d", len(hostReadData), n)
- } else if bytes.Compare(hostReadData, data) != 0 {
- t.Errorf("bytes read differ from what was expected: want = %v, got = %v", data, hostReadData)
- }
-
- // Test syncing the writable FD.
- if err := fd.Sync(ctx); err != nil {
- t.Errorf("syncing the FD failed: %v", err)
- }
-}
-
-func testRegularFileOpen(ctx context.Context, t *testing.T, tester Tester, root lisafs.ClientFD) {
- name := "tempFile"
- controlFile, _, fd, hostFD := openCreateFile(ctx, t, root, name)
- defer closeFD(ctx, t, controlFile)
- defer closeFD(ctx, t, fd)
- defer unix.Close(hostFD)
-
- // Open a readonly FD and try writing to it to get an EBADF.
- roFile, roHostFD := openFile(ctx, t, controlFile, unix.O_RDONLY, true /* isReg */)
- defer closeFD(ctx, t, roFile)
- defer unix.Close(roHostFD)
- if err := writeFD(ctx, t, roFile, 0, []byte{1, 2, 3}); err != unix.EBADF {
- t.Errorf("writing to read only FD should generate EBADF, but got %v", err)
- }
-}
-
-func testSetStat(ctx context.Context, t *testing.T, tester Tester, root lisafs.ClientFD) {
- name := "tempFile"
- controlFile, _, fd, hostFD := openCreateFile(ctx, t, root, name)
- defer closeFD(ctx, t, controlFile)
- defer closeFD(ctx, t, fd)
- defer unix.Close(hostFD)
-
- now := time.Now()
- wantStat := linux.Statx{
- Mask: unix.STATX_MODE | unix.STATX_ATIME | unix.STATX_MTIME | unix.STATX_SIZE,
- Mode: 0760,
- UID: uint32(unix.Getuid()),
- GID: uint32(unix.Getgid()),
- Size: 50,
- Atime: linux.NsecToStatxTimestamp(now.UnixNano()),
- Mtime: linux.NsecToStatxTimestamp(now.UnixNano()),
- }
- if tester.SetUserGroupIDSupported() {
- wantStat.Mask |= unix.STATX_UID | unix.STATX_GID
- }
- failureMask, failureErr, err := controlFile.SetStat(ctx, &wantStat)
- if err != nil {
- t.Fatalf("setstat failed: %v", err)
- }
- if failureMask != 0 {
- t.Fatalf("some setstat operations failed: failureMask = %#b, failureErr = %v", failureMask, failureErr)
- }
-
- // Verify that attributes were updated.
- var gotStat linux.Statx
- statTo(ctx, t, controlFile, &gotStat)
- if gotStat.Mode&07777 != wantStat.Mode ||
- gotStat.Size != wantStat.Size ||
- gotStat.Atime.ToNsec() != wantStat.Atime.ToNsec() ||
- gotStat.Mtime.ToNsec() != wantStat.Mtime.ToNsec() ||
- (tester.SetUserGroupIDSupported() && (uint32(gotStat.UID) != wantStat.UID || uint32(gotStat.GID) != wantStat.GID)) {
- t.Errorf("setStat did not update file correctly: setStat = %+v, stat = %+v", wantStat, gotStat)
- }
-}
-
-func testAllocate(ctx context.Context, t *testing.T, tester Tester, root lisafs.ClientFD) {
- name := "tempFile"
- controlFile, _, fd, hostFD := openCreateFile(ctx, t, root, name)
- defer closeFD(ctx, t, controlFile)
- defer closeFD(ctx, t, fd)
- defer unix.Close(hostFD)
-
- allocateAndVerify(ctx, t, fd, 0, 40)
- allocateAndVerify(ctx, t, fd, 20, 100)
-}
-
-func testStatFS(ctx context.Context, t *testing.T, tester Tester, root lisafs.ClientFD) {
- var statFS lisafs.StatFS
- if err := root.StatFSTo(ctx, &statFS); err != nil {
- t.Errorf("statfs failed: %v", err)
- }
-}
-
-func testUnlink(ctx context.Context, t *testing.T, tester Tester, root lisafs.ClientFD) {
- name := "tempFile"
- controlFile, _, fd, hostFD := openCreateFile(ctx, t, root, name)
- defer closeFD(ctx, t, controlFile)
- defer closeFD(ctx, t, fd)
- defer unix.Close(hostFD)
-
- unlinkFile(ctx, t, root, name, false /* isDir */)
- if inodes := walk(ctx, t, root, []string{name}); len(inodes) > 0 {
- t.Errorf("deleted file should not be generating inodes on walk: %+v", inodes)
- }
-}
-
-func testSymlink(ctx context.Context, t *testing.T, tester Tester, root lisafs.ClientFD) {
- target := "/tmp/some/path"
- name := "symlinkFile"
- link, linkStat := symlink(ctx, t, root, name, target)
- defer closeFD(ctx, t, link)
-
- if linkStat.Mode&unix.S_IFMT != unix.S_IFLNK {
- t.Errorf("stat return from symlink RPC indicates that the inode is not a symlink: mode = %d", linkStat.Mode)
- }
-
- if gotTarget, err := link.ReadLinkAt(ctx); err != nil {
- t.Fatalf("readlink failed: %v", err)
- } else if gotTarget != target {
- t.Errorf("readlink return incorrect target: expected %q, got %q", target, gotTarget)
- }
-}
-
-func testHardLink(ctx context.Context, t *testing.T, tester Tester, root lisafs.ClientFD) {
- if !tester.LinkSupported() {
- t.Skipf("server does not support LinkAt RPC")
- }
- if !hasCapability(capability.CAP_DAC_READ_SEARCH) {
- t.Skipf("TestHardLink requires CAP_DAC_READ_SEARCH, running as %d", unix.Getuid())
- }
- name := "tempFile"
- controlFile, fileIno, fd, hostFD := openCreateFile(ctx, t, root, name)
- defer closeFD(ctx, t, controlFile)
- defer closeFD(ctx, t, fd)
- defer unix.Close(hostFD)
-
- link, linkStat := link(ctx, t, root, name, controlFile)
- defer closeFD(ctx, t, link)
-
- if linkStat.Ino != fileIno.Ino {
- t.Errorf("hard linked files have different inode numbers: %d %d", linkStat.Ino, fileIno.Ino)
- }
- if linkStat.DevMinor != fileIno.DevMinor {
- t.Errorf("hard linked files have different minor device numbers: %d %d", linkStat.DevMinor, fileIno.DevMinor)
- }
- if linkStat.DevMajor != fileIno.DevMajor {
- t.Errorf("hard linked files have different major device numbers: %d %d", linkStat.DevMajor, fileIno.DevMajor)
- }
-}
-
-func testWalk(ctx context.Context, t *testing.T, tester Tester, root lisafs.ClientFD) {
- // Create 10 nested directories.
- n := 10
- curDir := root
-
- dirNames := make([]string, 0, n)
- for i := 0; i < n; i++ {
- name := fmt.Sprintf("tmpdir-%d", i)
- childDir, _ := mkdir(ctx, t, curDir, name)
- defer closeFD(ctx, t, childDir)
- defer unlinkFile(ctx, t, curDir, name, true /* isDir */)
-
- curDir = childDir
- dirNames = append(dirNames, name)
- }
-
- // Walk all these directories. Add some junk at the end which should not be
- // walked on.
- dirNames = append(dirNames, []string{"a", "b", "c"}...)
- inodes := walk(ctx, t, root, dirNames)
- if len(inodes) != n {
- t.Errorf("walk returned the incorrect number of inodes: wanted %d, got %d", n, len(inodes))
- }
-
- // Close all control FDs and collect stat results for all dirs including
- // the root directory.
- dirStats := make([]linux.Statx, 0, n+1)
- var stat linux.Statx
- statTo(ctx, t, root, &stat)
- dirStats = append(dirStats, stat)
- for _, inode := range inodes {
- dirStats = append(dirStats, inode.Stat)
- closeFD(ctx, t, root.Client().NewFD(inode.ControlFD))
- }
-
- // Test WalkStat which additonally returns Statx for root because the first
- // path component is "".
- dirNames = append([]string{""}, dirNames...)
- gotStats := walkStat(ctx, t, root, dirNames)
- if len(gotStats) != len(dirStats) {
- t.Errorf("walkStat returned the incorrect number of statx: wanted %d, got %d", len(dirStats), len(gotStats))
- } else {
- for i := range gotStats {
- cmpStatx(t, dirStats[i], gotStats[i])
- }
- }
-}
-
-func testRename(ctx context.Context, t *testing.T, tester Tester, root lisafs.ClientFD) {
- name := "tempFile"
- tempFile, _, fd, hostFD := openCreateFile(ctx, t, root, name)
- defer closeFD(ctx, t, tempFile)
- defer closeFD(ctx, t, fd)
- defer unix.Close(hostFD)
-
- tempDir, _ := mkdir(ctx, t, root, "tempDir")
- defer closeFD(ctx, t, tempDir)
-
- // Move tempFile into tempDir.
- if err := tempFile.RenameTo(ctx, tempDir.ID(), "movedFile"); err != nil {
- t.Fatalf("rename failed: %v", err)
- }
-
- inodes := walkStat(ctx, t, root, []string{"tempDir", "movedFile"})
- if len(inodes) != 2 {
- t.Errorf("expected 2 files on walk but only found %d", len(inodes))
- }
-}
-
-func testMknod(ctx context.Context, t *testing.T, tester Tester, root lisafs.ClientFD) {
- name := "namedPipe"
- pipeFile, pipeStat := mknod(ctx, t, root, name)
- defer closeFD(ctx, t, pipeFile)
-
- var stat linux.Statx
- statTo(ctx, t, pipeFile, &stat)
-
- if stat.Mode != pipeStat.Mode {
- t.Errorf("mknod mode is incorrect: want %d, got %d", pipeStat.Mode, stat.Mode)
- }
- if stat.UID != pipeStat.UID {
- t.Errorf("mknod UID is incorrect: want %d, got %d", pipeStat.UID, stat.UID)
- }
- if stat.GID != pipeStat.GID {
- t.Errorf("mknod GID is incorrect: want %d, got %d", pipeStat.GID, stat.GID)
- }
-}
-
-func testGetdents(ctx context.Context, t *testing.T, tester Tester, root lisafs.ClientFD) {
- tempDir, _ := mkdir(ctx, t, root, "tempDir")
- defer closeFD(ctx, t, tempDir)
- defer unlinkFile(ctx, t, root, "tempDir", true /* isDir */)
-
- // Create 10 files in tempDir.
- n := 10
- fileStats := make(map[string]linux.Statx)
- for i := 0; i < n; i++ {
- name := fmt.Sprintf("file-%d", i)
- newFile, fileStat := mknod(ctx, t, tempDir, name)
- defer closeFD(ctx, t, newFile)
- defer unlinkFile(ctx, t, tempDir, name, false /* isDir */)
-
- fileStats[name] = fileStat
- }
-
- // Use opened directory FD for getdents.
- openDirFile, _ := openFile(ctx, t, tempDir, unix.O_RDONLY, false /* isReg */)
- defer closeFD(ctx, t, openDirFile)
-
- dirents := make([]lisafs.Dirent64, 0, n)
- for i := 0; i < n+2; i++ {
- gotDirents, err := openDirFile.Getdents64(ctx, 40)
- if err != nil {
- t.Fatalf("getdents failed: %v", err)
- }
- if len(gotDirents) == 0 {
- break
- }
- for _, dirent := range gotDirents {
- if dirent.Name != "." && dirent.Name != ".." {
- dirents = append(dirents, dirent)
- }
- }
- }
-
- if len(dirents) != n {
- t.Errorf("got incorrect number of dirents: wanted %d, got %d", n, len(dirents))
- }
- for _, dirent := range dirents {
- stat, ok := fileStats[string(dirent.Name)]
- if !ok {
- t.Errorf("received a dirent that was not created: %+v", dirent)
- continue
- }
-
- if dirent.Type != unix.DT_REG {
- t.Errorf("dirent type of %s is incorrect: %d", dirent.Name, dirent.Type)
- }
- if uint64(dirent.Ino) != stat.Ino {
- t.Errorf("dirent ino of %s is incorrect: want %d, got %d", dirent.Name, stat.Ino, dirent.Ino)
- }
- if uint32(dirent.DevMinor) != stat.DevMinor {
- t.Errorf("dirent dev minor of %s is incorrect: want %d, got %d", dirent.Name, stat.DevMinor, dirent.DevMinor)
- }
- if uint32(dirent.DevMajor) != stat.DevMajor {
- t.Errorf("dirent dev major of %s is incorrect: want %d, got %d", dirent.Name, stat.DevMajor, dirent.DevMajor)
- }
- }
-}