summaryrefslogtreecommitdiffhomepage
path: root/pkg/sentry
diff options
context:
space:
mode:
Diffstat (limited to 'pkg/sentry')
-rw-r--r--pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go336
-rw-r--r--pkg/sentry/socket/netstack/netstack.go15
-rw-r--r--pkg/sentry/syscalls/linux/linux_abi_autogen_unsafe.go142
3 files changed, 244 insertions, 249 deletions
diff --git a/pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go b/pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go
index b8667cdb9..aac25375e 100644
--- a/pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go
+++ b/pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go
@@ -27,6 +27,174 @@ var _ marshal.Marshallable = (*aarch64Ctx)(nil)
var _ marshal.Marshallable = (*linux.SignalSet)(nil)
// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (s *SignalContext64) SizeBytes() int {
+ return 32 +
+ 8*31 +
+ 1*8 +
+ (*FpsimdContext)(nil).SizeBytes() +
+ 1*3568
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (s *SignalContext64) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.FaultAddr))
+ dst = dst[8:]
+ for idx := 0; idx < 31; idx++ {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Regs[idx]))
+ dst = dst[8:]
+ }
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Sp))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Pc))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Pstate))
+ dst = dst[8:]
+ for idx := 0; idx < 8; idx++ {
+ dst[0] = byte(s._pad[idx])
+ dst = dst[1:]
+ }
+ s.Fpsimd64.MarshalBytes(dst[:s.Fpsimd64.SizeBytes()])
+ dst = dst[s.Fpsimd64.SizeBytes():]
+ for idx := 0; idx < 3568; idx++ {
+ dst[0] = byte(s.Reserved[idx])
+ dst = dst[1:]
+ }
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (s *SignalContext64) UnmarshalBytes(src []byte) {
+ s.FaultAddr = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ for idx := 0; idx < 31; idx++ {
+ s.Regs[idx] = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ }
+ s.Sp = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Pc = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Pstate = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ for idx := 0; idx < 8; idx++ {
+ s._pad[idx] = src[0]
+ src = src[1:]
+ }
+ s.Fpsimd64.UnmarshalBytes(src[:s.Fpsimd64.SizeBytes()])
+ src = src[s.Fpsimd64.SizeBytes():]
+ for idx := 0; idx < 3568; idx++ {
+ s.Reserved[idx] = uint8(src[0])
+ src = src[1:]
+ }
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (s *SignalContext64) Packed() bool {
+ return s.Fpsimd64.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (s *SignalContext64) MarshalUnsafe(dst []byte) {
+ if s.Fpsimd64.Packed() {
+ safecopy.CopyIn(dst, unsafe.Pointer(s))
+ } else {
+ // Type SignalContext64 doesn't have a packed layout in memory, fallback to MarshalBytes.
+ s.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (s *SignalContext64) UnmarshalUnsafe(src []byte) {
+ if s.Fpsimd64.Packed() {
+ safecopy.CopyOut(unsafe.Pointer(s), src)
+ } else {
+ // Type SignalContext64 doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ s.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (s *SignalContext64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ if !s.Fpsimd64.Packed() {
+ // Type SignalContext64 doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
+ s.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (s *SignalContext64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return s.CopyOutN(cc, addr, s.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (s *SignalContext64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ if !s.Fpsimd64.Packed() {
+ // Type SignalContext64 doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ s.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (s *SignalContext64) WriteTo(writer io.Writer) (int64, error) {
+ if !s.Fpsimd64.Packed() {
+ // Type SignalContext64 doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, s.SizeBytes())
+ s.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
func (a *aarch64Ctx) SizeBytes() int {
return 8
}
@@ -422,171 +590,3 @@ func (u *UContext64) WriteTo(writer io.Writer) (int64, error) {
return int64(length), err
}
-// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (s *SignalContext64) SizeBytes() int {
- return 32 +
- 8*31 +
- 1*8 +
- (*FpsimdContext)(nil).SizeBytes() +
- 1*3568
-}
-
-// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (s *SignalContext64) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.FaultAddr))
- dst = dst[8:]
- for idx := 0; idx < 31; idx++ {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Regs[idx]))
- dst = dst[8:]
- }
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Sp))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Pc))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Pstate))
- dst = dst[8:]
- for idx := 0; idx < 8; idx++ {
- dst[0] = byte(s._pad[idx])
- dst = dst[1:]
- }
- s.Fpsimd64.MarshalBytes(dst[:s.Fpsimd64.SizeBytes()])
- dst = dst[s.Fpsimd64.SizeBytes():]
- for idx := 0; idx < 3568; idx++ {
- dst[0] = byte(s.Reserved[idx])
- dst = dst[1:]
- }
-}
-
-// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (s *SignalContext64) UnmarshalBytes(src []byte) {
- s.FaultAddr = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- for idx := 0; idx < 31; idx++ {
- s.Regs[idx] = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- }
- s.Sp = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- s.Pc = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- s.Pstate = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- for idx := 0; idx < 8; idx++ {
- s._pad[idx] = src[0]
- src = src[1:]
- }
- s.Fpsimd64.UnmarshalBytes(src[:s.Fpsimd64.SizeBytes()])
- src = src[s.Fpsimd64.SizeBytes():]
- for idx := 0; idx < 3568; idx++ {
- s.Reserved[idx] = uint8(src[0])
- src = src[1:]
- }
-}
-
-// Packed implements marshal.Marshallable.Packed.
-//go:nosplit
-func (s *SignalContext64) Packed() bool {
- return s.Fpsimd64.Packed()
-}
-
-// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (s *SignalContext64) MarshalUnsafe(dst []byte) {
- if s.Fpsimd64.Packed() {
- safecopy.CopyIn(dst, unsafe.Pointer(s))
- } else {
- // Type SignalContext64 doesn't have a packed layout in memory, fallback to MarshalBytes.
- s.MarshalBytes(dst)
- }
-}
-
-// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (s *SignalContext64) UnmarshalUnsafe(src []byte) {
- if s.Fpsimd64.Packed() {
- safecopy.CopyOut(unsafe.Pointer(s), src)
- } else {
- // Type SignalContext64 doesn't have a packed layout in memory, fallback to UnmarshalBytes.
- s.UnmarshalBytes(src)
- }
-}
-
-// CopyOutN implements marshal.Marshallable.CopyOutN.
-//go:nosplit
-func (s *SignalContext64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- if !s.Fpsimd64.Packed() {
- // Type SignalContext64 doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
- s.MarshalBytes(buf) // escapes: fallback.
- return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- }
-
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
- hdr.Len = s.SizeBytes()
- hdr.Cap = s.SizeBytes()
-
- length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that s
- // must live until the use above.
- runtime.KeepAlive(s) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// CopyOut implements marshal.Marshallable.CopyOut.
-//go:nosplit
-func (s *SignalContext64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return s.CopyOutN(cc, addr, s.SizeBytes())
-}
-
-// CopyIn implements marshal.Marshallable.CopyIn.
-//go:nosplit
-func (s *SignalContext64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- if !s.Fpsimd64.Packed() {
- // Type SignalContext64 doesn't have a packed layout in memory, fall back to UnmarshalBytes.
- buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Unmarshal unconditionally. If we had a short copy-in, this results in a
- // partially unmarshalled struct.
- s.UnmarshalBytes(buf) // escapes: fallback.
- return length, err
- }
-
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
- hdr.Len = s.SizeBytes()
- hdr.Cap = s.SizeBytes()
-
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that s
- // must live until the use above.
- runtime.KeepAlive(s) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// WriteTo implements io.WriterTo.WriteTo.
-func (s *SignalContext64) WriteTo(writer io.Writer) (int64, error) {
- if !s.Fpsimd64.Packed() {
- // Type SignalContext64 doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := make([]byte, s.SizeBytes())
- s.MarshalBytes(buf)
- length, err := writer.Write(buf)
- return int64(length), err
- }
-
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
- hdr.Len = s.SizeBytes()
- hdr.Cap = s.SizeBytes()
-
- length, err := writer.Write(buf)
- // Since we bypassed the compiler's escape analysis, indicate that s
- // must live until the use above.
- runtime.KeepAlive(s) // escapes: replaced by intrinsic.
- return int64(length), err
-}
-
diff --git a/pkg/sentry/socket/netstack/netstack.go b/pkg/sentry/socket/netstack/netstack.go
index af75e2670..48e950a5e 100644
--- a/pkg/sentry/socket/netstack/netstack.go
+++ b/pkg/sentry/socket/netstack/netstack.go
@@ -965,7 +965,7 @@ func getSockOptSocket(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, fam
}
// Get the last error and convert it.
- err := ep.LastError()
+ err := ep.SocketOptions().GetLastError()
if err == nil {
optP := primitive.Int32(0)
return &optP, nil
@@ -1127,13 +1127,8 @@ func getSockOptSocket(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, fam
return nil, syserr.ErrInvalidArgument
}
- var v tcpip.OutOfBandInlineOption
- if err := ep.GetSockOpt(&v); err != nil {
- return nil, syserr.TranslateNetstackError(err)
- }
-
- vP := primitive.Int32(v)
- return &vP, nil
+ v := primitive.Int32(boolToInt32(ep.SocketOptions().GetOutOfBandInline()))
+ return &v, nil
case linux.SO_NO_CHECK:
if outLen < sizeOfInt32 {
@@ -1880,8 +1875,8 @@ func setSockOptSocket(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, nam
socket.SetSockOptEmitUnimplementedEvent(t, name)
}
- opt := tcpip.OutOfBandInlineOption(v)
- return syserr.TranslateNetstackError(ep.SetSockOpt(&opt))
+ ep.SocketOptions().SetOutOfBandInline(v != 0)
+ return nil
case linux.SO_NO_CHECK:
if len(optVal) < sizeOfInt32 {
diff --git a/pkg/sentry/syscalls/linux/linux_abi_autogen_unsafe.go b/pkg/sentry/syscalls/linux/linux_abi_autogen_unsafe.go
index 862c286d9..956643160 100644
--- a/pkg/sentry/syscalls/linux/linux_abi_autogen_unsafe.go
+++ b/pkg/sentry/syscalls/linux/linux_abi_autogen_unsafe.go
@@ -23,156 +23,156 @@ var _ marshal.Marshallable = (*rlimit64)(nil)
var _ marshal.Marshallable = (*userSockFprog)(nil)
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (d *direntHdr) SizeBytes() int {
- return 1 +
- (*oldDirentHdr)(nil).SizeBytes()
+func (o *oldDirentHdr) SizeBytes() int {
+ return 18
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (d *direntHdr) MarshalBytes(dst []byte) {
- d.OldHdr.MarshalBytes(dst[:d.OldHdr.SizeBytes()])
- dst = dst[d.OldHdr.SizeBytes():]
- dst[0] = byte(d.Typ)
- dst = dst[1:]
+func (o *oldDirentHdr) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(o.Ino))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(o.Off))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint16(dst[:2], uint16(o.Reclen))
+ dst = dst[2:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (d *direntHdr) UnmarshalBytes(src []byte) {
- d.OldHdr.UnmarshalBytes(src[:d.OldHdr.SizeBytes()])
- src = src[d.OldHdr.SizeBytes():]
- d.Typ = uint8(src[0])
- src = src[1:]
+func (o *oldDirentHdr) UnmarshalBytes(src []byte) {
+ o.Ino = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ o.Off = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ o.Reclen = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (d *direntHdr) Packed() bool {
+func (o *oldDirentHdr) Packed() bool {
return false
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (d *direntHdr) MarshalUnsafe(dst []byte) {
- // Type direntHdr doesn't have a packed layout in memory, fallback to MarshalBytes.
- d.MarshalBytes(dst)
+func (o *oldDirentHdr) MarshalUnsafe(dst []byte) {
+ // Type oldDirentHdr doesn't have a packed layout in memory, fallback to MarshalBytes.
+ o.MarshalBytes(dst)
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (d *direntHdr) UnmarshalUnsafe(src []byte) {
- // Type direntHdr doesn't have a packed layout in memory, fallback to UnmarshalBytes.
- d.UnmarshalBytes(src)
+func (o *oldDirentHdr) UnmarshalUnsafe(src []byte) {
+ // Type oldDirentHdr doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ o.UnmarshalBytes(src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (d *direntHdr) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- // Type direntHdr doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := cc.CopyScratchBuffer(d.SizeBytes()) // escapes: okay.
- d.MarshalBytes(buf) // escapes: fallback.
+func (o *oldDirentHdr) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ // Type oldDirentHdr doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(o.SizeBytes()) // escapes: okay.
+ o.MarshalBytes(buf) // escapes: fallback.
return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
}
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (d *direntHdr) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return d.CopyOutN(cc, addr, d.SizeBytes())
+func (o *oldDirentHdr) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return o.CopyOutN(cc, addr, o.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (d *direntHdr) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- // Type direntHdr doesn't have a packed layout in memory, fall back to UnmarshalBytes.
- buf := cc.CopyScratchBuffer(d.SizeBytes()) // escapes: okay.
+func (o *oldDirentHdr) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ // Type oldDirentHdr doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(o.SizeBytes()) // escapes: okay.
length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
// Unmarshal unconditionally. If we had a short copy-in, this results in a
// partially unmarshalled struct.
- d.UnmarshalBytes(buf) // escapes: fallback.
+ o.UnmarshalBytes(buf) // escapes: fallback.
return length, err
}
// WriteTo implements io.WriterTo.WriteTo.
-func (d *direntHdr) WriteTo(writer io.Writer) (int64, error) {
- // Type direntHdr doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := make([]byte, d.SizeBytes())
- d.MarshalBytes(buf)
+func (o *oldDirentHdr) WriteTo(writer io.Writer) (int64, error) {
+ // Type oldDirentHdr doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, o.SizeBytes())
+ o.MarshalBytes(buf)
length, err := writer.Write(buf)
return int64(length), err
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (o *oldDirentHdr) SizeBytes() int {
- return 18
+func (d *direntHdr) SizeBytes() int {
+ return 1 +
+ (*oldDirentHdr)(nil).SizeBytes()
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (o *oldDirentHdr) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(o.Ino))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(o.Off))
- dst = dst[8:]
- usermem.ByteOrder.PutUint16(dst[:2], uint16(o.Reclen))
- dst = dst[2:]
+func (d *direntHdr) MarshalBytes(dst []byte) {
+ d.OldHdr.MarshalBytes(dst[:d.OldHdr.SizeBytes()])
+ dst = dst[d.OldHdr.SizeBytes():]
+ dst[0] = byte(d.Typ)
+ dst = dst[1:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (o *oldDirentHdr) UnmarshalBytes(src []byte) {
- o.Ino = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- o.Off = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- o.Reclen = uint16(usermem.ByteOrder.Uint16(src[:2]))
- src = src[2:]
+func (d *direntHdr) UnmarshalBytes(src []byte) {
+ d.OldHdr.UnmarshalBytes(src[:d.OldHdr.SizeBytes()])
+ src = src[d.OldHdr.SizeBytes():]
+ d.Typ = uint8(src[0])
+ src = src[1:]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (o *oldDirentHdr) Packed() bool {
+func (d *direntHdr) Packed() bool {
return false
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (o *oldDirentHdr) MarshalUnsafe(dst []byte) {
- // Type oldDirentHdr doesn't have a packed layout in memory, fallback to MarshalBytes.
- o.MarshalBytes(dst)
+func (d *direntHdr) MarshalUnsafe(dst []byte) {
+ // Type direntHdr doesn't have a packed layout in memory, fallback to MarshalBytes.
+ d.MarshalBytes(dst)
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (o *oldDirentHdr) UnmarshalUnsafe(src []byte) {
- // Type oldDirentHdr doesn't have a packed layout in memory, fallback to UnmarshalBytes.
- o.UnmarshalBytes(src)
+func (d *direntHdr) UnmarshalUnsafe(src []byte) {
+ // Type direntHdr doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ d.UnmarshalBytes(src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (o *oldDirentHdr) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- // Type oldDirentHdr doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := cc.CopyScratchBuffer(o.SizeBytes()) // escapes: okay.
- o.MarshalBytes(buf) // escapes: fallback.
+func (d *direntHdr) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ // Type direntHdr doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(d.SizeBytes()) // escapes: okay.
+ d.MarshalBytes(buf) // escapes: fallback.
return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
}
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (o *oldDirentHdr) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return o.CopyOutN(cc, addr, o.SizeBytes())
+func (d *direntHdr) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return d.CopyOutN(cc, addr, d.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (o *oldDirentHdr) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- // Type oldDirentHdr doesn't have a packed layout in memory, fall back to UnmarshalBytes.
- buf := cc.CopyScratchBuffer(o.SizeBytes()) // escapes: okay.
+func (d *direntHdr) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ // Type direntHdr doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(d.SizeBytes()) // escapes: okay.
length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
// Unmarshal unconditionally. If we had a short copy-in, this results in a
// partially unmarshalled struct.
- o.UnmarshalBytes(buf) // escapes: fallback.
+ d.UnmarshalBytes(buf) // escapes: fallback.
return length, err
}
// WriteTo implements io.WriterTo.WriteTo.
-func (o *oldDirentHdr) WriteTo(writer io.Writer) (int64, error) {
- // Type oldDirentHdr doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := make([]byte, o.SizeBytes())
- o.MarshalBytes(buf)
+func (d *direntHdr) WriteTo(writer io.Writer) (int64, error) {
+ // Type direntHdr doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, d.SizeBytes())
+ d.MarshalBytes(buf)
length, err := writer.Write(buf)
return int64(length), err
}