diff options
Diffstat (limited to 'pkg')
-rw-r--r-- | pkg/abi/linux/fuse.go | 55 | ||||
-rw-r--r-- | pkg/abi/linux/linux_abi_autogen_unsafe.go | 406 | ||||
-rw-r--r-- | pkg/sentry/fsimpl/fuse/fusefs.go | 96 | ||||
-rw-r--r-- | pkg/sentry/platform/ring0/defs_impl_arm64.go | 4 |
4 files changed, 549 insertions, 12 deletions
diff --git a/pkg/abi/linux/fuse.go b/pkg/abi/linux/fuse.go index 5c6ffe4a3..7e30483ee 100644 --- a/pkg/abi/linux/fuse.go +++ b/pkg/abi/linux/fuse.go @@ -246,3 +246,58 @@ type FUSEInitOut struct { _ [8]uint32 } + +// FUSEGetAttrIn is the request sent by the kernel to the daemon, +// to get the attribute of a inode. +// +// +marshal +type FUSEGetAttrIn struct { + // GetAttrFlags specifies whether getattr request is sent with a nodeid or + // with a file handle. + GetAttrFlags uint32 + + _ uint32 + + // Fh is the file handler when GetAttrFlags has FUSE_GETATTR_FH bit. If + // used, the operation is analogous to fstat(2). + Fh uint64 +} + +// FUSEAttr is the struct used in the reponse FUSEGetAttrOut. +// +// +marshal +type FUSEAttr struct { + Ino uint64 + Size uint64 + Blocks uint64 + Atime uint64 + Mtime uint64 + Ctime uint64 + AtimeNsec uint32 + MtimeNsec uint32 + CtimeNsec uint32 + Mode uint32 + Nlink uint32 + UID uint32 + GID uint32 + Rdev uint32 + BlkSize uint32 + _ uint32 +} + +// FUSEGetAttrOut is the reply sent by the daemon to the kernel +// for FUSEGetAttrIn. +// +// +marshal +type FUSEGetAttrOut struct { + // AttrValid and AttrValidNsec describe the attribute cache duration + AttrValid uint64 + + // AttrValidNsec is the nanosecond part of the attribute cache duration + AttrValidNsec uint32 + + _ uint32 + + // Attr contains the metadata returned from the FUSE server + Attr FUSEAttr +} diff --git a/pkg/abi/linux/linux_abi_autogen_unsafe.go b/pkg/abi/linux/linux_abi_autogen_unsafe.go index 0ff71c56b..8fdb4f1b1 100644 --- a/pkg/abi/linux/linux_abi_autogen_unsafe.go +++ b/pkg/abi/linux/linux_abi_autogen_unsafe.go @@ -15,6 +15,9 @@ import ( // Marshallable types used by this file. var _ marshal.Marshallable = (*ControlMessageCredentials)(nil) +var _ marshal.Marshallable = (*FUSEAttr)(nil) +var _ marshal.Marshallable = (*FUSEGetAttrIn)(nil) +var _ marshal.Marshallable = (*FUSEGetAttrOut)(nil) var _ marshal.Marshallable = (*FUSEHeaderIn)(nil) var _ marshal.Marshallable = (*FUSEHeaderOut)(nil) var _ marshal.Marshallable = (*FUSEInitIn)(nil) @@ -145,12 +148,12 @@ func (s *Statx) UnmarshalBytes(src []byte) { // Packed implements marshal.Marshallable.Packed. //go:nosplit func (s *Statx) Packed() bool { - return s.Atime.Packed() && s.Btime.Packed() && s.Ctime.Packed() && s.Mtime.Packed() + return s.Ctime.Packed() && s.Mtime.Packed() && s.Atime.Packed() && s.Btime.Packed() } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. func (s *Statx) MarshalUnsafe(dst []byte) { - if s.Atime.Packed() && s.Btime.Packed() && s.Ctime.Packed() && s.Mtime.Packed() { + if s.Mtime.Packed() && s.Atime.Packed() && s.Btime.Packed() && s.Ctime.Packed() { safecopy.CopyIn(dst, unsafe.Pointer(s)) } else { // Type Statx doesn't have a packed layout in memory, fallback to MarshalBytes. @@ -160,7 +163,7 @@ func (s *Statx) MarshalUnsafe(dst []byte) { // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. func (s *Statx) UnmarshalUnsafe(src []byte) { - if s.Atime.Packed() && s.Btime.Packed() && s.Ctime.Packed() && s.Mtime.Packed() { + if s.Mtime.Packed() && s.Atime.Packed() && s.Btime.Packed() && s.Ctime.Packed() { safecopy.CopyOut(unsafe.Pointer(s), src) } else { // Type Statx doesn't have a packed layout in memory, fallback to UnmarshalBytes. @@ -171,7 +174,7 @@ func (s *Statx) UnmarshalUnsafe(src []byte) { // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit func (s *Statx) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (int, error) { - if !s.Atime.Packed() && s.Btime.Packed() && s.Ctime.Packed() && s.Mtime.Packed() { + if !s.Ctime.Packed() && s.Mtime.Packed() && s.Atime.Packed() && s.Btime.Packed() { // Type Statx doesn't have a packed layout in memory, fall back to MarshalBytes. buf := task.CopyScratchBuffer(s.SizeBytes()) // escapes: okay. s.MarshalBytes(buf) // escapes: fallback. @@ -201,7 +204,7 @@ func (s *Statx) CopyOut(task marshal.Task, addr usermem.Addr) (int, error) { // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit func (s *Statx) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) { - if !s.Atime.Packed() && s.Btime.Packed() && s.Ctime.Packed() && s.Mtime.Packed() { + if !s.Btime.Packed() && s.Ctime.Packed() && s.Mtime.Packed() && s.Atime.Packed() { // Type Statx doesn't have a packed layout in memory, fall back to UnmarshalBytes. buf := task.CopyScratchBuffer(s.SizeBytes()) // escapes: okay. length, err := task.CopyInBytes(addr, buf) // escapes: okay. @@ -227,7 +230,7 @@ func (s *Statx) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) { // WriteTo implements io.WriterTo.WriteTo. func (s *Statx) WriteTo(w io.Writer) (int64, error) { - if !s.Atime.Packed() && s.Btime.Packed() && s.Ctime.Packed() && s.Mtime.Packed() { + if !s.Mtime.Packed() && s.Atime.Packed() && s.Btime.Packed() && s.Ctime.Packed() { // Type Statx doesn't have a packed layout in memory, fall back to MarshalBytes. buf := make([]byte, s.SizeBytes()) s.MarshalBytes(buf) @@ -625,7 +628,7 @@ func (f *FUSEHeaderIn) Packed() bool { // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. func (f *FUSEHeaderIn) MarshalUnsafe(dst []byte) { - if f.Unique.Packed() && f.Opcode.Packed() { + if f.Opcode.Packed() && f.Unique.Packed() { safecopy.CopyIn(dst, unsafe.Pointer(f)) } else { // Type FUSEHeaderIn doesn't have a packed layout in memory, fallback to MarshalBytes. @@ -1202,6 +1205,389 @@ func (f *FUSEInitOut) WriteTo(w io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. +func (f *FUSEGetAttrIn) SizeBytes() int { + return 16 +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (f *FUSEGetAttrIn) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.GetAttrFlags)) + dst = dst[4:] + // Padding: dst[:sizeof(uint32)] ~= uint32(0) + dst = dst[4:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Fh)) + dst = dst[8:] +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (f *FUSEGetAttrIn) UnmarshalBytes(src []byte) { + f.GetAttrFlags = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + // Padding: var _ uint32 ~= src[:sizeof(uint32)] + src = src[4:] + f.Fh = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (f *FUSEGetAttrIn) Packed() bool { + return true +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (f *FUSEGetAttrIn) MarshalUnsafe(dst []byte) { + safecopy.CopyIn(dst, unsafe.Pointer(f)) +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (f *FUSEGetAttrIn) UnmarshalUnsafe(src []byte) { + safecopy.CopyOut(unsafe.Pointer(f), src) +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (f *FUSEGetAttrIn) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f))) + hdr.Len = f.SizeBytes() + hdr.Cap = f.SizeBytes() + + length, err := task.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that f + // must live until the use above. + runtime.KeepAlive(f) + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (f *FUSEGetAttrIn) CopyOut(task marshal.Task, addr usermem.Addr) (int, error) { + return f.CopyOutN(task, addr, f.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (f *FUSEGetAttrIn) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f))) + hdr.Len = f.SizeBytes() + hdr.Cap = f.SizeBytes() + + length, err := task.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that f + // must live until the use above. + runtime.KeepAlive(f) + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (f *FUSEGetAttrIn) WriteTo(w io.Writer) (int64, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f))) + hdr.Len = f.SizeBytes() + hdr.Cap = f.SizeBytes() + + length, err := w.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that f + // must live until the use above. + runtime.KeepAlive(f) + return int64(length), err +} + +// SizeBytes implements marshal.Marshallable.SizeBytes. +func (f *FUSEAttr) SizeBytes() int { + return 88 +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (f *FUSEAttr) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Ino)) + dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Size)) + dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Blocks)) + dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Atime)) + dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Mtime)) + dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Ctime)) + dst = dst[8:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.AtimeNsec)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.MtimeNsec)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.CtimeNsec)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Mode)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Nlink)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.UID)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.GID)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Rdev)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.BlkSize)) + dst = dst[4:] + // Padding: dst[:sizeof(uint32)] ~= uint32(0) + dst = dst[4:] +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (f *FUSEAttr) UnmarshalBytes(src []byte) { + f.Ino = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + f.Size = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + f.Blocks = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + f.Atime = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + f.Mtime = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + f.Ctime = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + f.AtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + f.MtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + f.CtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + f.Mode = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + f.Nlink = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + f.UID = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + f.GID = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + f.Rdev = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + f.BlkSize = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + // Padding: var _ uint32 ~= src[:sizeof(uint32)] + src = src[4:] +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (f *FUSEAttr) Packed() bool { + return true +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (f *FUSEAttr) MarshalUnsafe(dst []byte) { + safecopy.CopyIn(dst, unsafe.Pointer(f)) +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (f *FUSEAttr) UnmarshalUnsafe(src []byte) { + safecopy.CopyOut(unsafe.Pointer(f), src) +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (f *FUSEAttr) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f))) + hdr.Len = f.SizeBytes() + hdr.Cap = f.SizeBytes() + + length, err := task.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that f + // must live until the use above. + runtime.KeepAlive(f) + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (f *FUSEAttr) CopyOut(task marshal.Task, addr usermem.Addr) (int, error) { + return f.CopyOutN(task, addr, f.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (f *FUSEAttr) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f))) + hdr.Len = f.SizeBytes() + hdr.Cap = f.SizeBytes() + + length, err := task.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that f + // must live until the use above. + runtime.KeepAlive(f) + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (f *FUSEAttr) WriteTo(w io.Writer) (int64, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f))) + hdr.Len = f.SizeBytes() + hdr.Cap = f.SizeBytes() + + length, err := w.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that f + // must live until the use above. + runtime.KeepAlive(f) + return int64(length), err +} + +// SizeBytes implements marshal.Marshallable.SizeBytes. +func (f *FUSEGetAttrOut) SizeBytes() int { + return 16 + + (*FUSEAttr)(nil).SizeBytes() +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (f *FUSEGetAttrOut) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.AttrValid)) + dst = dst[8:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.AttrValidNsec)) + dst = dst[4:] + // Padding: dst[:sizeof(uint32)] ~= uint32(0) + dst = dst[4:] + f.Attr.MarshalBytes(dst[:f.Attr.SizeBytes()]) + dst = dst[f.Attr.SizeBytes():] +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (f *FUSEGetAttrOut) UnmarshalBytes(src []byte) { + f.AttrValid = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + f.AttrValidNsec = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + // Padding: var _ uint32 ~= src[:sizeof(uint32)] + src = src[4:] + f.Attr.UnmarshalBytes(src[:f.Attr.SizeBytes()]) + src = src[f.Attr.SizeBytes():] +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (f *FUSEGetAttrOut) Packed() bool { + return f.Attr.Packed() +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (f *FUSEGetAttrOut) MarshalUnsafe(dst []byte) { + if f.Attr.Packed() { + safecopy.CopyIn(dst, unsafe.Pointer(f)) + } else { + // Type FUSEGetAttrOut doesn't have a packed layout in memory, fallback to MarshalBytes. + f.MarshalBytes(dst) + } +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (f *FUSEGetAttrOut) UnmarshalUnsafe(src []byte) { + if f.Attr.Packed() { + safecopy.CopyOut(unsafe.Pointer(f), src) + } else { + // Type FUSEGetAttrOut doesn't have a packed layout in memory, fallback to UnmarshalBytes. + f.UnmarshalBytes(src) + } +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (f *FUSEGetAttrOut) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (int, error) { + if !f.Attr.Packed() { + // Type FUSEGetAttrOut doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := task.CopyScratchBuffer(f.SizeBytes()) // escapes: okay. + f.MarshalBytes(buf) // escapes: fallback. + return task.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f))) + hdr.Len = f.SizeBytes() + hdr.Cap = f.SizeBytes() + + length, err := task.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that f + // must live until the use above. + runtime.KeepAlive(f) + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (f *FUSEGetAttrOut) CopyOut(task marshal.Task, addr usermem.Addr) (int, error) { + return f.CopyOutN(task, addr, f.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (f *FUSEGetAttrOut) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) { + if !f.Attr.Packed() { + // Type FUSEGetAttrOut doesn't have a packed layout in memory, fall back to UnmarshalBytes. + buf := task.CopyScratchBuffer(f.SizeBytes()) // escapes: okay. + length, err := task.CopyInBytes(addr, buf) // escapes: okay. + // Unmarshal unconditionally. If we had a short copy-in, this results in a + // partially unmarshalled struct. + f.UnmarshalBytes(buf) // escapes: fallback. + return length, err + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f))) + hdr.Len = f.SizeBytes() + hdr.Cap = f.SizeBytes() + + length, err := task.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that f + // must live until the use above. + runtime.KeepAlive(f) + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (f *FUSEGetAttrOut) WriteTo(w io.Writer) (int64, error) { + if !f.Attr.Packed() { + // Type FUSEGetAttrOut doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := make([]byte, f.SizeBytes()) + f.MarshalBytes(buf) + length, err := w.Write(buf) + return int64(length), err + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f))) + hdr.Len = f.SizeBytes() + hdr.Cap = f.SizeBytes() + + length, err := w.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that f + // must live until the use above. + runtime.KeepAlive(f) + return int64(length), err +} + +// SizeBytes implements marshal.Marshallable.SizeBytes. func (r *RobustListHead) SizeBytes() int { return 24 } @@ -1818,7 +2204,7 @@ func (i *IPTIP) UnmarshalBytes(src []byte) { // Packed implements marshal.Marshallable.Packed. //go:nosplit func (i *IPTIP) Packed() bool { - return i.DstMask.Packed() && i.Src.Packed() && i.Dst.Packed() && i.SrcMask.Packed() + return i.Src.Packed() && i.Dst.Packed() && i.SrcMask.Packed() && i.DstMask.Packed() } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. @@ -1844,7 +2230,7 @@ func (i *IPTIP) UnmarshalUnsafe(src []byte) { // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit func (i *IPTIP) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (int, error) { - if !i.SrcMask.Packed() && i.DstMask.Packed() && i.Src.Packed() && i.Dst.Packed() { + if !i.Src.Packed() && i.Dst.Packed() && i.SrcMask.Packed() && i.DstMask.Packed() { // Type IPTIP doesn't have a packed layout in memory, fall back to MarshalBytes. buf := task.CopyScratchBuffer(i.SizeBytes()) // escapes: okay. i.MarshalBytes(buf) // escapes: fallback. @@ -1900,7 +2286,7 @@ func (i *IPTIP) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) { // WriteTo implements io.WriterTo.WriteTo. func (i *IPTIP) WriteTo(w io.Writer) (int64, error) { - if !i.DstMask.Packed() && i.Src.Packed() && i.Dst.Packed() && i.SrcMask.Packed() { + if !i.Dst.Packed() && i.SrcMask.Packed() && i.DstMask.Packed() && i.Src.Packed() { // Type IPTIP doesn't have a packed layout in memory, fall back to MarshalBytes. buf := make([]byte, i.SizeBytes()) i.MarshalBytes(buf) diff --git a/pkg/sentry/fsimpl/fuse/fusefs.go b/pkg/sentry/fsimpl/fuse/fusefs.go index a1405f7c3..83c24ec25 100644 --- a/pkg/sentry/fsimpl/fuse/fusefs.go +++ b/pkg/sentry/fsimpl/fuse/fusefs.go @@ -226,3 +226,99 @@ func (i *inode) Open(ctx context.Context, rp *vfs.ResolvingPath, vfsd *vfs.Dentr } return fd.VFSFileDescription(), nil } + +// statFromFUSEAttr makes attributes from linux.FUSEAttr to linux.Statx. The +// opts.Sync attribute is ignored since the synchronization is handled by the +// FUSE server. +func statFromFUSEAttr(attr linux.FUSEAttr, mask, devMinor uint32) linux.Statx { + var stat linux.Statx + stat.Blksize = attr.BlkSize + stat.DevMajor, stat.DevMinor = linux.UNNAMED_MAJOR, devMinor + + rdevMajor, rdevMinor := linux.DecodeDeviceID(attr.Rdev) + stat.RdevMajor, stat.RdevMinor = uint32(rdevMajor), rdevMinor + + if mask&linux.STATX_MODE != 0 { + stat.Mode = uint16(attr.Mode) + } + if mask&linux.STATX_NLINK != 0 { + stat.Nlink = attr.Nlink + } + if mask&linux.STATX_UID != 0 { + stat.UID = attr.UID + } + if mask&linux.STATX_GID != 0 { + stat.GID = attr.GID + } + if mask&linux.STATX_ATIME != 0 { + stat.Atime = linux.StatxTimestamp{ + Sec: int64(attr.Atime), + Nsec: attr.AtimeNsec, + } + } + if mask&linux.STATX_MTIME != 0 { + stat.Mtime = linux.StatxTimestamp{ + Sec: int64(attr.Mtime), + Nsec: attr.MtimeNsec, + } + } + if mask&linux.STATX_CTIME != 0 { + stat.Ctime = linux.StatxTimestamp{ + Sec: int64(attr.Ctime), + Nsec: attr.CtimeNsec, + } + } + if mask&linux.STATX_INO != 0 { + stat.Ino = attr.Ino + } + if mask&linux.STATX_SIZE != 0 { + stat.Size = attr.Size + } + if mask&linux.STATX_BLOCKS != 0 { + stat.Blocks = attr.Blocks + } + return stat +} + +// Stat implements kernfs.Inode.Stat. +func (i *inode) Stat(ctx context.Context, fs *vfs.Filesystem, opts vfs.StatOptions) (linux.Statx, error) { + fusefs := fs.Impl().(*filesystem) + conn := fusefs.conn + task, creds := kernel.TaskFromContext(ctx), auth.CredentialsFromContext(ctx) + if task == nil { + log.Warningf("couldn't get kernel task from context") + return linux.Statx{}, syserror.EINVAL + } + + var in linux.FUSEGetAttrIn + // We don't set any attribute in the request, because in VFS2 fstat(2) will + // finally be translated into vfs.FilesystemImpl.StatAt() (see + // pkg/sentry/syscalls/linux/vfs2/stat.go), resulting in the same flow + // as stat(2). Thus GetAttrFlags and Fh variable will never be used in VFS2. + req, err := conn.NewRequest(creds, uint32(task.ThreadID()), i.Ino(), linux.FUSE_GETATTR, &in) + if err != nil { + return linux.Statx{}, err + } + + res, err := conn.Call(task, req) + if err != nil { + return linux.Statx{}, err + } + if err := res.Error(); err != nil { + return linux.Statx{}, err + } + + var out linux.FUSEGetAttrOut + if err := res.UnmarshalPayload(&out); err != nil { + return linux.Statx{}, err + } + + // Set all metadata into kernfs.InodeAttrs. + if err := i.SetStat(ctx, fs, creds, vfs.SetStatOptions{ + Stat: statFromFUSEAttr(out.Attr, linux.STATX_ALL, fusefs.devMinor), + }); err != nil { + return linux.Statx{}, err + } + + return statFromFUSEAttr(out.Attr, opts.Mask, fusefs.devMinor), nil +} diff --git a/pkg/sentry/platform/ring0/defs_impl_arm64.go b/pkg/sentry/platform/ring0/defs_impl_arm64.go index 424b66f76..eba2eac30 100644 --- a/pkg/sentry/platform/ring0/defs_impl_arm64.go +++ b/pkg/sentry/platform/ring0/defs_impl_arm64.go @@ -3,11 +3,11 @@ package ring0 import ( "gvisor.dev/gvisor/pkg/sentry/arch" "gvisor.dev/gvisor/pkg/sentry/platform/ring0/pagetables" + "io" + "reflect" "fmt" "gvisor.dev/gvisor/pkg/usermem" - "io" - "reflect" ) // Useful bits. |