diff options
31 files changed, 3633 insertions, 208 deletions
diff --git a/pkg/abi/linux/linux_abi_autogen_unsafe.go b/pkg/abi/linux/linux_abi_autogen_unsafe.go index 346b3bdef..4a267d833 100644 --- a/pkg/abi/linux/linux_abi_autogen_unsafe.go +++ b/pkg/abi/linux/linux_abi_autogen_unsafe.go @@ -24,7 +24,10 @@ var _ marshal.Marshallable = (*CapUserData)(nil) var _ marshal.Marshallable = (*CapUserHeader)(nil) var _ marshal.Marshallable = (*ClockT)(nil) var _ marshal.Marshallable = (*ControlMessageCredentials)(nil) +var _ marshal.Marshallable = (*ControlMessageHeader)(nil) +var _ marshal.Marshallable = (*ControlMessageIPPacketInfo)(nil) var _ marshal.Marshallable = (*DigestMetadata)(nil) +var _ marshal.Marshallable = (*ErrorName)(nil) var _ marshal.Marshallable = (*ExtensionName)(nil) var _ marshal.Marshallable = (*FOwnerEx)(nil) var _ marshal.Marshallable = (*FUSEAttr)(nil) @@ -61,8 +64,15 @@ var _ marshal.Marshallable = (*IPTEntry)(nil) var _ marshal.Marshallable = (*IPTGetEntries)(nil) var _ marshal.Marshallable = (*IPTGetinfo)(nil) var _ marshal.Marshallable = (*IPTIP)(nil) +var _ marshal.Marshallable = (*IPTOwnerInfo)(nil) +var _ marshal.Marshallable = (*IPTReplace)(nil) var _ marshal.Marshallable = (*Inet6Addr)(nil) +var _ marshal.Marshallable = (*Inet6MulticastRequest)(nil) var _ marshal.Marshallable = (*InetAddr)(nil) +var _ marshal.Marshallable = (*InetMulticastRequest)(nil) +var _ marshal.Marshallable = (*InetMulticastRequestWithNIC)(nil) +var _ marshal.Marshallable = (*InterfaceAddrMessage)(nil) +var _ marshal.Marshallable = (*InterfaceInfoMessage)(nil) var _ marshal.Marshallable = (*ItimerVal)(nil) var _ marshal.Marshallable = (*Itimerspec)(nil) var _ marshal.Marshallable = (*KernelIP6TEntry)(nil) @@ -70,10 +80,17 @@ var _ marshal.Marshallable = (*KernelIP6TGetEntries)(nil) var _ marshal.Marshallable = (*KernelIPTEntry)(nil) var _ marshal.Marshallable = (*KernelIPTGetEntries)(nil) var _ marshal.Marshallable = (*Linger)(nil) +var _ marshal.Marshallable = (*NFNATRange)(nil) +var _ marshal.Marshallable = (*NetlinkAttrHeader)(nil) +var _ marshal.Marshallable = (*NetlinkErrorMessage)(nil) +var _ marshal.Marshallable = (*NetlinkMessageHeader)(nil) +var _ marshal.Marshallable = (*NfNATIPV4MultiRangeCompat)(nil) +var _ marshal.Marshallable = (*NfNATIPV4Range)(nil) var _ marshal.Marshallable = (*NumaPolicy)(nil) var _ marshal.Marshallable = (*PollFD)(nil) var _ marshal.Marshallable = (*RSeqCriticalSection)(nil) var _ marshal.Marshallable = (*RobustListHead)(nil) +var _ marshal.Marshallable = (*RouteMessage)(nil) var _ marshal.Marshallable = (*Rusage)(nil) var _ marshal.Marshallable = (*SeccompData)(nil) var _ marshal.Marshallable = (*SemInfo)(nil) @@ -109,7 +126,15 @@ var _ marshal.Marshallable = (*UtsName)(nil) var _ marshal.Marshallable = (*WindowSize)(nil) var _ marshal.Marshallable = (*Winsize)(nil) var _ marshal.Marshallable = (*XTCounters)(nil) +var _ marshal.Marshallable = (*XTEntryMatch)(nil) +var _ marshal.Marshallable = (*XTEntryTarget)(nil) +var _ marshal.Marshallable = (*XTErrorTarget)(nil) var _ marshal.Marshallable = (*XTGetRevision)(nil) +var _ marshal.Marshallable = (*XTRedirectTarget)(nil) +var _ marshal.Marshallable = (*XTSNATTarget)(nil) +var _ marshal.Marshallable = (*XTStandardTarget)(nil) +var _ marshal.Marshallable = (*XTTCP)(nil) +var _ marshal.Marshallable = (*XTUDP)(nil) // SizeBytes implements marshal.Marshallable.SizeBytes. func (i *IOCallback) SizeBytes() int { @@ -4839,6 +4864,101 @@ func (ifr *IFReq) WriteTo(writer io.Writer) (int64, error) { // SizeBytes implements marshal.Marshallable.SizeBytes. //go:nosplit +func (en *ErrorName) SizeBytes() int { + return 1 * XT_FUNCTION_MAXNAMELEN +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (en *ErrorName) MarshalBytes(dst []byte) { + for idx := 0; idx < XT_FUNCTION_MAXNAMELEN; idx++ { + dst[0] = byte(en[idx]) + dst = dst[1:] + } +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (en *ErrorName) UnmarshalBytes(src []byte) { + for idx := 0; idx < XT_FUNCTION_MAXNAMELEN; idx++ { + en[idx] = src[0] + src = src[1:] + } +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (en *ErrorName) Packed() bool { + // Array newtypes are always packed. + return true +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (en *ErrorName) MarshalUnsafe(dst []byte) { + gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(&en[0]), uintptr(en.SizeBytes())) +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (en *ErrorName) UnmarshalUnsafe(src []byte) { + gohacks.Memmove(unsafe.Pointer(en), unsafe.Pointer(&src[0]), uintptr(en.SizeBytes())) +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (en *ErrorName) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(en))) + hdr.Len = en.SizeBytes() + hdr.Cap = en.SizeBytes() + + length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that en + // must live until the use above. + runtime.KeepAlive(en) // escapes: replaced by intrinsic. + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (en *ErrorName) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + return en.CopyOutN(cc, addr, en.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (en *ErrorName) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(en))) + hdr.Len = en.SizeBytes() + hdr.Cap = en.SizeBytes() + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that en + // must live until the use above. + runtime.KeepAlive(en) // escapes: replaced by intrinsic. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (en *ErrorName) WriteTo(w io.Writer) (int64, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(en))) + hdr.Len = en.SizeBytes() + hdr.Cap = en.SizeBytes() + + length, err := w.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that en + // must live until the use above. + runtime.KeepAlive(en) // escapes: replaced by intrinsic. + return int64(length), err +} + +// SizeBytes implements marshal.Marshallable.SizeBytes. +//go:nosplit func (en *ExtensionName) SizeBytes() int { return 1 * XT_EXTENSION_MAXNAMELEN } @@ -5555,6 +5675,269 @@ func (i *IPTIP) WriteTo(writer io.Writer) (int64, error) { return int64(length), err } +// SizeBytes implements marshal.Marshallable.SizeBytes. +func (i *IPTOwnerInfo) SizeBytes() int { + return 18 + + 1*16 +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (i *IPTOwnerInfo) MarshalBytes(dst []byte) { + hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.UID)) + dst = dst[4:] + hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.GID)) + dst = dst[4:] + hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.PID)) + dst = dst[4:] + hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.SID)) + dst = dst[4:] + for idx := 0; idx < 16; idx++ { + dst[0] = byte(i.Comm[idx]) + dst = dst[1:] + } + dst[0] = byte(i.Match) + dst = dst[1:] + dst[0] = byte(i.Invert) + dst = dst[1:] +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (i *IPTOwnerInfo) UnmarshalBytes(src []byte) { + i.UID = uint32(hostarch.ByteOrder.Uint32(src[:4])) + src = src[4:] + i.GID = uint32(hostarch.ByteOrder.Uint32(src[:4])) + src = src[4:] + i.PID = uint32(hostarch.ByteOrder.Uint32(src[:4])) + src = src[4:] + i.SID = uint32(hostarch.ByteOrder.Uint32(src[:4])) + src = src[4:] + for idx := 0; idx < 16; idx++ { + i.Comm[idx] = src[0] + src = src[1:] + } + i.Match = uint8(src[0]) + src = src[1:] + i.Invert = uint8(src[0]) + src = src[1:] +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (i *IPTOwnerInfo) Packed() bool { + return false +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (i *IPTOwnerInfo) MarshalUnsafe(dst []byte) { + // Type IPTOwnerInfo doesn't have a packed layout in memory, fallback to MarshalBytes. + i.MarshalBytes(dst) +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (i *IPTOwnerInfo) UnmarshalUnsafe(src []byte) { + // Type IPTOwnerInfo doesn't have a packed layout in memory, fallback to UnmarshalBytes. + i.UnmarshalBytes(src) +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (i *IPTOwnerInfo) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) { + // Type IPTOwnerInfo doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay. + i.MarshalBytes(buf) // escapes: fallback. + return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (i *IPTOwnerInfo) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + return i.CopyOutN(cc, addr, i.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (i *IPTOwnerInfo) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + // Type IPTOwnerInfo doesn't have a packed layout in memory, fall back to UnmarshalBytes. + buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay. + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Unmarshal unconditionally. If we had a short copy-in, this results in a + // partially unmarshalled struct. + i.UnmarshalBytes(buf) // escapes: fallback. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (i *IPTOwnerInfo) WriteTo(writer io.Writer) (int64, error) { + // Type IPTOwnerInfo doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := make([]byte, i.SizeBytes()) + i.MarshalBytes(buf) + length, err := writer.Write(buf) + return int64(length), err +} + +// SizeBytes implements marshal.Marshallable.SizeBytes. +func (i *IPTReplace) SizeBytes() int { + return 24 + + (*TableName)(nil).SizeBytes() + + 4*NF_INET_NUMHOOKS + + 4*NF_INET_NUMHOOKS +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (i *IPTReplace) MarshalBytes(dst []byte) { + i.Name.MarshalBytes(dst[:i.Name.SizeBytes()]) + dst = dst[i.Name.SizeBytes():] + hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.ValidHooks)) + dst = dst[4:] + hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.NumEntries)) + dst = dst[4:] + hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.Size)) + dst = dst[4:] + for idx := 0; idx < NF_INET_NUMHOOKS; idx++ { + hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.HookEntry[idx])) + dst = dst[4:] + } + for idx := 0; idx < NF_INET_NUMHOOKS; idx++ { + hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.Underflow[idx])) + dst = dst[4:] + } + hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.NumCounters)) + dst = dst[4:] + hostarch.ByteOrder.PutUint64(dst[:8], uint64(i.Counters)) + dst = dst[8:] +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (i *IPTReplace) UnmarshalBytes(src []byte) { + i.Name.UnmarshalBytes(src[:i.Name.SizeBytes()]) + src = src[i.Name.SizeBytes():] + i.ValidHooks = uint32(hostarch.ByteOrder.Uint32(src[:4])) + src = src[4:] + i.NumEntries = uint32(hostarch.ByteOrder.Uint32(src[:4])) + src = src[4:] + i.Size = uint32(hostarch.ByteOrder.Uint32(src[:4])) + src = src[4:] + for idx := 0; idx < NF_INET_NUMHOOKS; idx++ { + i.HookEntry[idx] = uint32(hostarch.ByteOrder.Uint32(src[:4])) + src = src[4:] + } + for idx := 0; idx < NF_INET_NUMHOOKS; idx++ { + i.Underflow[idx] = uint32(hostarch.ByteOrder.Uint32(src[:4])) + src = src[4:] + } + i.NumCounters = uint32(hostarch.ByteOrder.Uint32(src[:4])) + src = src[4:] + i.Counters = uint64(hostarch.ByteOrder.Uint64(src[:8])) + src = src[8:] +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (i *IPTReplace) Packed() bool { + return i.Name.Packed() +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (i *IPTReplace) MarshalUnsafe(dst []byte) { + if i.Name.Packed() { + gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(i), uintptr(i.SizeBytes())) + } else { + // Type IPTReplace doesn't have a packed layout in memory, fallback to MarshalBytes. + i.MarshalBytes(dst) + } +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (i *IPTReplace) UnmarshalUnsafe(src []byte) { + if i.Name.Packed() { + gohacks.Memmove(unsafe.Pointer(i), unsafe.Pointer(&src[0]), uintptr(i.SizeBytes())) + } else { + // Type IPTReplace doesn't have a packed layout in memory, fallback to UnmarshalBytes. + i.UnmarshalBytes(src) + } +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (i *IPTReplace) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) { + if !i.Name.Packed() { + // Type IPTReplace doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay. + i.MarshalBytes(buf) // escapes: fallback. + return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) + hdr.Len = i.SizeBytes() + hdr.Cap = i.SizeBytes() + + length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that i + // must live until the use above. + runtime.KeepAlive(i) // escapes: replaced by intrinsic. + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (i *IPTReplace) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + return i.CopyOutN(cc, addr, i.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (i *IPTReplace) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + if !i.Name.Packed() { + // Type IPTReplace doesn't have a packed layout in memory, fall back to UnmarshalBytes. + buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay. + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Unmarshal unconditionally. If we had a short copy-in, this results in a + // partially unmarshalled struct. + i.UnmarshalBytes(buf) // escapes: fallback. + return length, err + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) + hdr.Len = i.SizeBytes() + hdr.Cap = i.SizeBytes() + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that i + // must live until the use above. + runtime.KeepAlive(i) // escapes: replaced by intrinsic. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (i *IPTReplace) WriteTo(writer io.Writer) (int64, error) { + if !i.Name.Packed() { + // Type IPTReplace doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := make([]byte, i.SizeBytes()) + i.MarshalBytes(buf) + length, err := writer.Write(buf) + return int64(length), err + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) + hdr.Len = i.SizeBytes() + hdr.Cap = i.SizeBytes() + + length, err := writer.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that i + // must live until the use above. + runtime.KeepAlive(i) // escapes: replaced by intrinsic. + return int64(length), err +} + // Packed implements marshal.Marshallable.Packed. //go:nosplit func (ke *KernelIPTEntry) Packed() bool { @@ -5666,6 +6049,250 @@ func (ke *KernelIPTGetEntries) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. +func (n *NfNATIPV4MultiRangeCompat) SizeBytes() int { + return 4 + + (*NfNATIPV4Range)(nil).SizeBytes() +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (n *NfNATIPV4MultiRangeCompat) MarshalBytes(dst []byte) { + hostarch.ByteOrder.PutUint32(dst[:4], uint32(n.RangeSize)) + dst = dst[4:] + n.RangeIPV4.MarshalBytes(dst[:n.RangeIPV4.SizeBytes()]) + dst = dst[n.RangeIPV4.SizeBytes():] +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (n *NfNATIPV4MultiRangeCompat) UnmarshalBytes(src []byte) { + n.RangeSize = uint32(hostarch.ByteOrder.Uint32(src[:4])) + src = src[4:] + n.RangeIPV4.UnmarshalBytes(src[:n.RangeIPV4.SizeBytes()]) + src = src[n.RangeIPV4.SizeBytes():] +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (n *NfNATIPV4MultiRangeCompat) Packed() bool { + return n.RangeIPV4.Packed() +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (n *NfNATIPV4MultiRangeCompat) MarshalUnsafe(dst []byte) { + if n.RangeIPV4.Packed() { + gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(n), uintptr(n.SizeBytes())) + } else { + // Type NfNATIPV4MultiRangeCompat doesn't have a packed layout in memory, fallback to MarshalBytes. + n.MarshalBytes(dst) + } +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (n *NfNATIPV4MultiRangeCompat) UnmarshalUnsafe(src []byte) { + if n.RangeIPV4.Packed() { + gohacks.Memmove(unsafe.Pointer(n), unsafe.Pointer(&src[0]), uintptr(n.SizeBytes())) + } else { + // Type NfNATIPV4MultiRangeCompat doesn't have a packed layout in memory, fallback to UnmarshalBytes. + n.UnmarshalBytes(src) + } +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (n *NfNATIPV4MultiRangeCompat) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) { + if !n.RangeIPV4.Packed() { + // Type NfNATIPV4MultiRangeCompat doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := cc.CopyScratchBuffer(n.SizeBytes()) // escapes: okay. + n.MarshalBytes(buf) // escapes: fallback. + return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(n))) + hdr.Len = n.SizeBytes() + hdr.Cap = n.SizeBytes() + + length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that n + // must live until the use above. + runtime.KeepAlive(n) // escapes: replaced by intrinsic. + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (n *NfNATIPV4MultiRangeCompat) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + return n.CopyOutN(cc, addr, n.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (n *NfNATIPV4MultiRangeCompat) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + if !n.RangeIPV4.Packed() { + // Type NfNATIPV4MultiRangeCompat doesn't have a packed layout in memory, fall back to UnmarshalBytes. + buf := cc.CopyScratchBuffer(n.SizeBytes()) // escapes: okay. + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Unmarshal unconditionally. If we had a short copy-in, this results in a + // partially unmarshalled struct. + n.UnmarshalBytes(buf) // escapes: fallback. + return length, err + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(n))) + hdr.Len = n.SizeBytes() + hdr.Cap = n.SizeBytes() + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that n + // must live until the use above. + runtime.KeepAlive(n) // escapes: replaced by intrinsic. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (n *NfNATIPV4MultiRangeCompat) WriteTo(writer io.Writer) (int64, error) { + if !n.RangeIPV4.Packed() { + // Type NfNATIPV4MultiRangeCompat doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := make([]byte, n.SizeBytes()) + n.MarshalBytes(buf) + length, err := writer.Write(buf) + return int64(length), err + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(n))) + hdr.Len = n.SizeBytes() + hdr.Cap = n.SizeBytes() + + length, err := writer.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that n + // must live until the use above. + runtime.KeepAlive(n) // escapes: replaced by intrinsic. + return int64(length), err +} + +// SizeBytes implements marshal.Marshallable.SizeBytes. +func (n *NfNATIPV4Range) SizeBytes() int { + return 8 + + 1*4 + + 1*4 +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (n *NfNATIPV4Range) MarshalBytes(dst []byte) { + hostarch.ByteOrder.PutUint32(dst[:4], uint32(n.Flags)) + dst = dst[4:] + for idx := 0; idx < 4; idx++ { + dst[0] = byte(n.MinIP[idx]) + dst = dst[1:] + } + for idx := 0; idx < 4; idx++ { + dst[0] = byte(n.MaxIP[idx]) + dst = dst[1:] + } + hostarch.ByteOrder.PutUint16(dst[:2], uint16(n.MinPort)) + dst = dst[2:] + hostarch.ByteOrder.PutUint16(dst[:2], uint16(n.MaxPort)) + dst = dst[2:] +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (n *NfNATIPV4Range) UnmarshalBytes(src []byte) { + n.Flags = uint32(hostarch.ByteOrder.Uint32(src[:4])) + src = src[4:] + for idx := 0; idx < 4; idx++ { + n.MinIP[idx] = src[0] + src = src[1:] + } + for idx := 0; idx < 4; idx++ { + n.MaxIP[idx] = src[0] + src = src[1:] + } + n.MinPort = uint16(hostarch.ByteOrder.Uint16(src[:2])) + src = src[2:] + n.MaxPort = uint16(hostarch.ByteOrder.Uint16(src[:2])) + src = src[2:] +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (n *NfNATIPV4Range) Packed() bool { + return true +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (n *NfNATIPV4Range) MarshalUnsafe(dst []byte) { + gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(n), uintptr(n.SizeBytes())) +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (n *NfNATIPV4Range) UnmarshalUnsafe(src []byte) { + gohacks.Memmove(unsafe.Pointer(n), unsafe.Pointer(&src[0]), uintptr(n.SizeBytes())) +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (n *NfNATIPV4Range) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(n))) + hdr.Len = n.SizeBytes() + hdr.Cap = n.SizeBytes() + + length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that n + // must live until the use above. + runtime.KeepAlive(n) // escapes: replaced by intrinsic. + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (n *NfNATIPV4Range) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + return n.CopyOutN(cc, addr, n.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (n *NfNATIPV4Range) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(n))) + hdr.Len = n.SizeBytes() + hdr.Cap = n.SizeBytes() + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that n + // must live until the use above. + runtime.KeepAlive(n) // escapes: replaced by intrinsic. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (n *NfNATIPV4Range) WriteTo(writer io.Writer) (int64, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(n))) + hdr.Len = n.SizeBytes() + hdr.Cap = n.SizeBytes() + + length, err := writer.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that n + // must live until the use above. + runtime.KeepAlive(n) // escapes: replaced by intrinsic. + return int64(length), err +} + +// SizeBytes implements marshal.Marshallable.SizeBytes. //go:nosplit func (tn *TableName) SizeBytes() int { return 1 * XT_TABLE_MAXNAMELEN @@ -5854,6 +6481,407 @@ func (x *XTCounters) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. +func (x *XTEntryMatch) SizeBytes() int { + return 3 + + (*ExtensionName)(nil).SizeBytes() +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (x *XTEntryMatch) MarshalBytes(dst []byte) { + hostarch.ByteOrder.PutUint16(dst[:2], uint16(x.MatchSize)) + dst = dst[2:] + x.Name.MarshalBytes(dst[:x.Name.SizeBytes()]) + dst = dst[x.Name.SizeBytes():] + dst[0] = byte(x.Revision) + dst = dst[1:] +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (x *XTEntryMatch) UnmarshalBytes(src []byte) { + x.MatchSize = uint16(hostarch.ByteOrder.Uint16(src[:2])) + src = src[2:] + x.Name.UnmarshalBytes(src[:x.Name.SizeBytes()]) + src = src[x.Name.SizeBytes():] + x.Revision = uint8(src[0]) + src = src[1:] +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (x *XTEntryMatch) Packed() bool { + return x.Name.Packed() +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (x *XTEntryMatch) MarshalUnsafe(dst []byte) { + if x.Name.Packed() { + gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(x), uintptr(x.SizeBytes())) + } else { + // Type XTEntryMatch doesn't have a packed layout in memory, fallback to MarshalBytes. + x.MarshalBytes(dst) + } +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (x *XTEntryMatch) UnmarshalUnsafe(src []byte) { + if x.Name.Packed() { + gohacks.Memmove(unsafe.Pointer(x), unsafe.Pointer(&src[0]), uintptr(x.SizeBytes())) + } else { + // Type XTEntryMatch doesn't have a packed layout in memory, fallback to UnmarshalBytes. + x.UnmarshalBytes(src) + } +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (x *XTEntryMatch) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) { + if !x.Name.Packed() { + // Type XTEntryMatch doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := cc.CopyScratchBuffer(x.SizeBytes()) // escapes: okay. + x.MarshalBytes(buf) // escapes: fallback. + return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(x))) + hdr.Len = x.SizeBytes() + hdr.Cap = x.SizeBytes() + + length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that x + // must live until the use above. + runtime.KeepAlive(x) // escapes: replaced by intrinsic. + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (x *XTEntryMatch) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + return x.CopyOutN(cc, addr, x.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (x *XTEntryMatch) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + if !x.Name.Packed() { + // Type XTEntryMatch doesn't have a packed layout in memory, fall back to UnmarshalBytes. + buf := cc.CopyScratchBuffer(x.SizeBytes()) // escapes: okay. + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Unmarshal unconditionally. If we had a short copy-in, this results in a + // partially unmarshalled struct. + x.UnmarshalBytes(buf) // escapes: fallback. + return length, err + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(x))) + hdr.Len = x.SizeBytes() + hdr.Cap = x.SizeBytes() + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that x + // must live until the use above. + runtime.KeepAlive(x) // escapes: replaced by intrinsic. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (x *XTEntryMatch) WriteTo(writer io.Writer) (int64, error) { + if !x.Name.Packed() { + // Type XTEntryMatch doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := make([]byte, x.SizeBytes()) + x.MarshalBytes(buf) + length, err := writer.Write(buf) + return int64(length), err + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(x))) + hdr.Len = x.SizeBytes() + hdr.Cap = x.SizeBytes() + + length, err := writer.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that x + // must live until the use above. + runtime.KeepAlive(x) // escapes: replaced by intrinsic. + return int64(length), err +} + +// SizeBytes implements marshal.Marshallable.SizeBytes. +func (x *XTEntryTarget) SizeBytes() int { + return 3 + + (*ExtensionName)(nil).SizeBytes() +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (x *XTEntryTarget) MarshalBytes(dst []byte) { + hostarch.ByteOrder.PutUint16(dst[:2], uint16(x.TargetSize)) + dst = dst[2:] + x.Name.MarshalBytes(dst[:x.Name.SizeBytes()]) + dst = dst[x.Name.SizeBytes():] + dst[0] = byte(x.Revision) + dst = dst[1:] +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (x *XTEntryTarget) UnmarshalBytes(src []byte) { + x.TargetSize = uint16(hostarch.ByteOrder.Uint16(src[:2])) + src = src[2:] + x.Name.UnmarshalBytes(src[:x.Name.SizeBytes()]) + src = src[x.Name.SizeBytes():] + x.Revision = uint8(src[0]) + src = src[1:] +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (x *XTEntryTarget) Packed() bool { + return x.Name.Packed() +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (x *XTEntryTarget) MarshalUnsafe(dst []byte) { + if x.Name.Packed() { + gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(x), uintptr(x.SizeBytes())) + } else { + // Type XTEntryTarget doesn't have a packed layout in memory, fallback to MarshalBytes. + x.MarshalBytes(dst) + } +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (x *XTEntryTarget) UnmarshalUnsafe(src []byte) { + if x.Name.Packed() { + gohacks.Memmove(unsafe.Pointer(x), unsafe.Pointer(&src[0]), uintptr(x.SizeBytes())) + } else { + // Type XTEntryTarget doesn't have a packed layout in memory, fallback to UnmarshalBytes. + x.UnmarshalBytes(src) + } +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (x *XTEntryTarget) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) { + if !x.Name.Packed() { + // Type XTEntryTarget doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := cc.CopyScratchBuffer(x.SizeBytes()) // escapes: okay. + x.MarshalBytes(buf) // escapes: fallback. + return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(x))) + hdr.Len = x.SizeBytes() + hdr.Cap = x.SizeBytes() + + length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that x + // must live until the use above. + runtime.KeepAlive(x) // escapes: replaced by intrinsic. + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (x *XTEntryTarget) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + return x.CopyOutN(cc, addr, x.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (x *XTEntryTarget) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + if !x.Name.Packed() { + // Type XTEntryTarget doesn't have a packed layout in memory, fall back to UnmarshalBytes. + buf := cc.CopyScratchBuffer(x.SizeBytes()) // escapes: okay. + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Unmarshal unconditionally. If we had a short copy-in, this results in a + // partially unmarshalled struct. + x.UnmarshalBytes(buf) // escapes: fallback. + return length, err + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(x))) + hdr.Len = x.SizeBytes() + hdr.Cap = x.SizeBytes() + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that x + // must live until the use above. + runtime.KeepAlive(x) // escapes: replaced by intrinsic. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (x *XTEntryTarget) WriteTo(writer io.Writer) (int64, error) { + if !x.Name.Packed() { + // Type XTEntryTarget doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := make([]byte, x.SizeBytes()) + x.MarshalBytes(buf) + length, err := writer.Write(buf) + return int64(length), err + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(x))) + hdr.Len = x.SizeBytes() + hdr.Cap = x.SizeBytes() + + length, err := writer.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that x + // must live until the use above. + runtime.KeepAlive(x) // escapes: replaced by intrinsic. + return int64(length), err +} + +// SizeBytes implements marshal.Marshallable.SizeBytes. +func (x *XTErrorTarget) SizeBytes() int { + return 0 + + (*XTEntryTarget)(nil).SizeBytes() + + (*ErrorName)(nil).SizeBytes() + + 1*2 +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (x *XTErrorTarget) MarshalBytes(dst []byte) { + x.Target.MarshalBytes(dst[:x.Target.SizeBytes()]) + dst = dst[x.Target.SizeBytes():] + x.Name.MarshalBytes(dst[:x.Name.SizeBytes()]) + dst = dst[x.Name.SizeBytes():] + // Padding: dst[:sizeof(byte)*2] ~= [2]byte{0} + dst = dst[1*(2):] +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (x *XTErrorTarget) UnmarshalBytes(src []byte) { + x.Target.UnmarshalBytes(src[:x.Target.SizeBytes()]) + src = src[x.Target.SizeBytes():] + x.Name.UnmarshalBytes(src[:x.Name.SizeBytes()]) + src = src[x.Name.SizeBytes():] + // Padding: ~ copy([2]byte(x._), src[:sizeof(byte)*2]) + src = src[1*(2):] +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (x *XTErrorTarget) Packed() bool { + return x.Name.Packed() && x.Target.Packed() +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (x *XTErrorTarget) MarshalUnsafe(dst []byte) { + if x.Name.Packed() && x.Target.Packed() { + gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(x), uintptr(x.SizeBytes())) + } else { + // Type XTErrorTarget doesn't have a packed layout in memory, fallback to MarshalBytes. + x.MarshalBytes(dst) + } +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (x *XTErrorTarget) UnmarshalUnsafe(src []byte) { + if x.Name.Packed() && x.Target.Packed() { + gohacks.Memmove(unsafe.Pointer(x), unsafe.Pointer(&src[0]), uintptr(x.SizeBytes())) + } else { + // Type XTErrorTarget doesn't have a packed layout in memory, fallback to UnmarshalBytes. + x.UnmarshalBytes(src) + } +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (x *XTErrorTarget) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) { + if !x.Name.Packed() && x.Target.Packed() { + // Type XTErrorTarget doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := cc.CopyScratchBuffer(x.SizeBytes()) // escapes: okay. + x.MarshalBytes(buf) // escapes: fallback. + return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(x))) + hdr.Len = x.SizeBytes() + hdr.Cap = x.SizeBytes() + + length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that x + // must live until the use above. + runtime.KeepAlive(x) // escapes: replaced by intrinsic. + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (x *XTErrorTarget) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + return x.CopyOutN(cc, addr, x.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (x *XTErrorTarget) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + if !x.Name.Packed() && x.Target.Packed() { + // Type XTErrorTarget doesn't have a packed layout in memory, fall back to UnmarshalBytes. + buf := cc.CopyScratchBuffer(x.SizeBytes()) // escapes: okay. + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Unmarshal unconditionally. If we had a short copy-in, this results in a + // partially unmarshalled struct. + x.UnmarshalBytes(buf) // escapes: fallback. + return length, err + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(x))) + hdr.Len = x.SizeBytes() + hdr.Cap = x.SizeBytes() + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that x + // must live until the use above. + runtime.KeepAlive(x) // escapes: replaced by intrinsic. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (x *XTErrorTarget) WriteTo(writer io.Writer) (int64, error) { + if !x.Name.Packed() && x.Target.Packed() { + // Type XTErrorTarget doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := make([]byte, x.SizeBytes()) + x.MarshalBytes(buf) + length, err := writer.Write(buf) + return int64(length), err + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(x))) + hdr.Len = x.SizeBytes() + hdr.Cap = x.SizeBytes() + + length, err := writer.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that x + // must live until the use above. + runtime.KeepAlive(x) // escapes: replaced by intrinsic. + return int64(length), err +} + +// SizeBytes implements marshal.Marshallable.SizeBytes. func (x *XTGetRevision) SizeBytes() int { return 1 + (*ExtensionName)(nil).SizeBytes() @@ -5983,6 +7011,636 @@ func (x *XTGetRevision) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. +func (x *XTRedirectTarget) SizeBytes() int { + return 0 + + (*XTEntryTarget)(nil).SizeBytes() + + (*NfNATIPV4MultiRangeCompat)(nil).SizeBytes() + + 1*4 +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (x *XTRedirectTarget) MarshalBytes(dst []byte) { + x.Target.MarshalBytes(dst[:x.Target.SizeBytes()]) + dst = dst[x.Target.SizeBytes():] + x.NfRange.MarshalBytes(dst[:x.NfRange.SizeBytes()]) + dst = dst[x.NfRange.SizeBytes():] + // Padding: dst[:sizeof(byte)*4] ~= [4]byte{0} + dst = dst[1*(4):] +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (x *XTRedirectTarget) UnmarshalBytes(src []byte) { + x.Target.UnmarshalBytes(src[:x.Target.SizeBytes()]) + src = src[x.Target.SizeBytes():] + x.NfRange.UnmarshalBytes(src[:x.NfRange.SizeBytes()]) + src = src[x.NfRange.SizeBytes():] + // Padding: ~ copy([4]byte(x._), src[:sizeof(byte)*4]) + src = src[1*(4):] +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (x *XTRedirectTarget) Packed() bool { + return x.NfRange.Packed() && x.Target.Packed() +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (x *XTRedirectTarget) MarshalUnsafe(dst []byte) { + if x.NfRange.Packed() && x.Target.Packed() { + gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(x), uintptr(x.SizeBytes())) + } else { + // Type XTRedirectTarget doesn't have a packed layout in memory, fallback to MarshalBytes. + x.MarshalBytes(dst) + } +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (x *XTRedirectTarget) UnmarshalUnsafe(src []byte) { + if x.NfRange.Packed() && x.Target.Packed() { + gohacks.Memmove(unsafe.Pointer(x), unsafe.Pointer(&src[0]), uintptr(x.SizeBytes())) + } else { + // Type XTRedirectTarget doesn't have a packed layout in memory, fallback to UnmarshalBytes. + x.UnmarshalBytes(src) + } +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (x *XTRedirectTarget) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) { + if !x.NfRange.Packed() && x.Target.Packed() { + // Type XTRedirectTarget doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := cc.CopyScratchBuffer(x.SizeBytes()) // escapes: okay. + x.MarshalBytes(buf) // escapes: fallback. + return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(x))) + hdr.Len = x.SizeBytes() + hdr.Cap = x.SizeBytes() + + length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that x + // must live until the use above. + runtime.KeepAlive(x) // escapes: replaced by intrinsic. + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (x *XTRedirectTarget) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + return x.CopyOutN(cc, addr, x.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (x *XTRedirectTarget) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + if !x.NfRange.Packed() && x.Target.Packed() { + // Type XTRedirectTarget doesn't have a packed layout in memory, fall back to UnmarshalBytes. + buf := cc.CopyScratchBuffer(x.SizeBytes()) // escapes: okay. + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Unmarshal unconditionally. If we had a short copy-in, this results in a + // partially unmarshalled struct. + x.UnmarshalBytes(buf) // escapes: fallback. + return length, err + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(x))) + hdr.Len = x.SizeBytes() + hdr.Cap = x.SizeBytes() + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that x + // must live until the use above. + runtime.KeepAlive(x) // escapes: replaced by intrinsic. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (x *XTRedirectTarget) WriteTo(writer io.Writer) (int64, error) { + if !x.NfRange.Packed() && x.Target.Packed() { + // Type XTRedirectTarget doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := make([]byte, x.SizeBytes()) + x.MarshalBytes(buf) + length, err := writer.Write(buf) + return int64(length), err + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(x))) + hdr.Len = x.SizeBytes() + hdr.Cap = x.SizeBytes() + + length, err := writer.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that x + // must live until the use above. + runtime.KeepAlive(x) // escapes: replaced by intrinsic. + return int64(length), err +} + +// SizeBytes implements marshal.Marshallable.SizeBytes. +func (x *XTSNATTarget) SizeBytes() int { + return 0 + + (*XTEntryTarget)(nil).SizeBytes() + + (*NfNATIPV4MultiRangeCompat)(nil).SizeBytes() + + 1*4 +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (x *XTSNATTarget) MarshalBytes(dst []byte) { + x.Target.MarshalBytes(dst[:x.Target.SizeBytes()]) + dst = dst[x.Target.SizeBytes():] + x.NfRange.MarshalBytes(dst[:x.NfRange.SizeBytes()]) + dst = dst[x.NfRange.SizeBytes():] + // Padding: dst[:sizeof(byte)*4] ~= [4]byte{0} + dst = dst[1*(4):] +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (x *XTSNATTarget) UnmarshalBytes(src []byte) { + x.Target.UnmarshalBytes(src[:x.Target.SizeBytes()]) + src = src[x.Target.SizeBytes():] + x.NfRange.UnmarshalBytes(src[:x.NfRange.SizeBytes()]) + src = src[x.NfRange.SizeBytes():] + // Padding: ~ copy([4]byte(x._), src[:sizeof(byte)*4]) + src = src[1*(4):] +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (x *XTSNATTarget) Packed() bool { + return x.NfRange.Packed() && x.Target.Packed() +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (x *XTSNATTarget) MarshalUnsafe(dst []byte) { + if x.NfRange.Packed() && x.Target.Packed() { + gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(x), uintptr(x.SizeBytes())) + } else { + // Type XTSNATTarget doesn't have a packed layout in memory, fallback to MarshalBytes. + x.MarshalBytes(dst) + } +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (x *XTSNATTarget) UnmarshalUnsafe(src []byte) { + if x.NfRange.Packed() && x.Target.Packed() { + gohacks.Memmove(unsafe.Pointer(x), unsafe.Pointer(&src[0]), uintptr(x.SizeBytes())) + } else { + // Type XTSNATTarget doesn't have a packed layout in memory, fallback to UnmarshalBytes. + x.UnmarshalBytes(src) + } +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (x *XTSNATTarget) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) { + if !x.NfRange.Packed() && x.Target.Packed() { + // Type XTSNATTarget doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := cc.CopyScratchBuffer(x.SizeBytes()) // escapes: okay. + x.MarshalBytes(buf) // escapes: fallback. + return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(x))) + hdr.Len = x.SizeBytes() + hdr.Cap = x.SizeBytes() + + length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that x + // must live until the use above. + runtime.KeepAlive(x) // escapes: replaced by intrinsic. + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (x *XTSNATTarget) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + return x.CopyOutN(cc, addr, x.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (x *XTSNATTarget) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + if !x.NfRange.Packed() && x.Target.Packed() { + // Type XTSNATTarget doesn't have a packed layout in memory, fall back to UnmarshalBytes. + buf := cc.CopyScratchBuffer(x.SizeBytes()) // escapes: okay. + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Unmarshal unconditionally. If we had a short copy-in, this results in a + // partially unmarshalled struct. + x.UnmarshalBytes(buf) // escapes: fallback. + return length, err + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(x))) + hdr.Len = x.SizeBytes() + hdr.Cap = x.SizeBytes() + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that x + // must live until the use above. + runtime.KeepAlive(x) // escapes: replaced by intrinsic. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (x *XTSNATTarget) WriteTo(writer io.Writer) (int64, error) { + if !x.NfRange.Packed() && x.Target.Packed() { + // Type XTSNATTarget doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := make([]byte, x.SizeBytes()) + x.MarshalBytes(buf) + length, err := writer.Write(buf) + return int64(length), err + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(x))) + hdr.Len = x.SizeBytes() + hdr.Cap = x.SizeBytes() + + length, err := writer.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that x + // must live until the use above. + runtime.KeepAlive(x) // escapes: replaced by intrinsic. + return int64(length), err +} + +// SizeBytes implements marshal.Marshallable.SizeBytes. +func (x *XTStandardTarget) SizeBytes() int { + return 4 + + (*XTEntryTarget)(nil).SizeBytes() + + 1*4 +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (x *XTStandardTarget) MarshalBytes(dst []byte) { + x.Target.MarshalBytes(dst[:x.Target.SizeBytes()]) + dst = dst[x.Target.SizeBytes():] + hostarch.ByteOrder.PutUint32(dst[:4], uint32(x.Verdict)) + dst = dst[4:] + // Padding: dst[:sizeof(byte)*4] ~= [4]byte{0} + dst = dst[1*(4):] +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (x *XTStandardTarget) UnmarshalBytes(src []byte) { + x.Target.UnmarshalBytes(src[:x.Target.SizeBytes()]) + src = src[x.Target.SizeBytes():] + x.Verdict = int32(hostarch.ByteOrder.Uint32(src[:4])) + src = src[4:] + // Padding: ~ copy([4]byte(x._), src[:sizeof(byte)*4]) + src = src[1*(4):] +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (x *XTStandardTarget) Packed() bool { + return x.Target.Packed() +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (x *XTStandardTarget) MarshalUnsafe(dst []byte) { + if x.Target.Packed() { + gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(x), uintptr(x.SizeBytes())) + } else { + // Type XTStandardTarget doesn't have a packed layout in memory, fallback to MarshalBytes. + x.MarshalBytes(dst) + } +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (x *XTStandardTarget) UnmarshalUnsafe(src []byte) { + if x.Target.Packed() { + gohacks.Memmove(unsafe.Pointer(x), unsafe.Pointer(&src[0]), uintptr(x.SizeBytes())) + } else { + // Type XTStandardTarget doesn't have a packed layout in memory, fallback to UnmarshalBytes. + x.UnmarshalBytes(src) + } +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (x *XTStandardTarget) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) { + if !x.Target.Packed() { + // Type XTStandardTarget doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := cc.CopyScratchBuffer(x.SizeBytes()) // escapes: okay. + x.MarshalBytes(buf) // escapes: fallback. + return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(x))) + hdr.Len = x.SizeBytes() + hdr.Cap = x.SizeBytes() + + length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that x + // must live until the use above. + runtime.KeepAlive(x) // escapes: replaced by intrinsic. + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (x *XTStandardTarget) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + return x.CopyOutN(cc, addr, x.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (x *XTStandardTarget) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + if !x.Target.Packed() { + // Type XTStandardTarget doesn't have a packed layout in memory, fall back to UnmarshalBytes. + buf := cc.CopyScratchBuffer(x.SizeBytes()) // escapes: okay. + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Unmarshal unconditionally. If we had a short copy-in, this results in a + // partially unmarshalled struct. + x.UnmarshalBytes(buf) // escapes: fallback. + return length, err + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(x))) + hdr.Len = x.SizeBytes() + hdr.Cap = x.SizeBytes() + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that x + // must live until the use above. + runtime.KeepAlive(x) // escapes: replaced by intrinsic. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (x *XTStandardTarget) WriteTo(writer io.Writer) (int64, error) { + if !x.Target.Packed() { + // Type XTStandardTarget doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := make([]byte, x.SizeBytes()) + x.MarshalBytes(buf) + length, err := writer.Write(buf) + return int64(length), err + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(x))) + hdr.Len = x.SizeBytes() + hdr.Cap = x.SizeBytes() + + length, err := writer.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that x + // must live until the use above. + runtime.KeepAlive(x) // escapes: replaced by intrinsic. + return int64(length), err +} + +// SizeBytes implements marshal.Marshallable.SizeBytes. +func (x *XTTCP) SizeBytes() int { + return 12 +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (x *XTTCP) MarshalBytes(dst []byte) { + hostarch.ByteOrder.PutUint16(dst[:2], uint16(x.SourcePortStart)) + dst = dst[2:] + hostarch.ByteOrder.PutUint16(dst[:2], uint16(x.SourcePortEnd)) + dst = dst[2:] + hostarch.ByteOrder.PutUint16(dst[:2], uint16(x.DestinationPortStart)) + dst = dst[2:] + hostarch.ByteOrder.PutUint16(dst[:2], uint16(x.DestinationPortEnd)) + dst = dst[2:] + dst[0] = byte(x.Option) + dst = dst[1:] + dst[0] = byte(x.FlagMask) + dst = dst[1:] + dst[0] = byte(x.FlagCompare) + dst = dst[1:] + dst[0] = byte(x.InverseFlags) + dst = dst[1:] +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (x *XTTCP) UnmarshalBytes(src []byte) { + x.SourcePortStart = uint16(hostarch.ByteOrder.Uint16(src[:2])) + src = src[2:] + x.SourcePortEnd = uint16(hostarch.ByteOrder.Uint16(src[:2])) + src = src[2:] + x.DestinationPortStart = uint16(hostarch.ByteOrder.Uint16(src[:2])) + src = src[2:] + x.DestinationPortEnd = uint16(hostarch.ByteOrder.Uint16(src[:2])) + src = src[2:] + x.Option = uint8(src[0]) + src = src[1:] + x.FlagMask = uint8(src[0]) + src = src[1:] + x.FlagCompare = uint8(src[0]) + src = src[1:] + x.InverseFlags = uint8(src[0]) + src = src[1:] +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (x *XTTCP) Packed() bool { + return true +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (x *XTTCP) MarshalUnsafe(dst []byte) { + gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(x), uintptr(x.SizeBytes())) +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (x *XTTCP) UnmarshalUnsafe(src []byte) { + gohacks.Memmove(unsafe.Pointer(x), unsafe.Pointer(&src[0]), uintptr(x.SizeBytes())) +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (x *XTTCP) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(x))) + hdr.Len = x.SizeBytes() + hdr.Cap = x.SizeBytes() + + length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that x + // must live until the use above. + runtime.KeepAlive(x) // escapes: replaced by intrinsic. + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (x *XTTCP) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + return x.CopyOutN(cc, addr, x.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (x *XTTCP) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(x))) + hdr.Len = x.SizeBytes() + hdr.Cap = x.SizeBytes() + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that x + // must live until the use above. + runtime.KeepAlive(x) // escapes: replaced by intrinsic. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (x *XTTCP) WriteTo(writer io.Writer) (int64, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(x))) + hdr.Len = x.SizeBytes() + hdr.Cap = x.SizeBytes() + + length, err := writer.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that x + // must live until the use above. + runtime.KeepAlive(x) // escapes: replaced by intrinsic. + return int64(length), err +} + +// SizeBytes implements marshal.Marshallable.SizeBytes. +func (x *XTUDP) SizeBytes() int { + return 10 +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (x *XTUDP) MarshalBytes(dst []byte) { + hostarch.ByteOrder.PutUint16(dst[:2], uint16(x.SourcePortStart)) + dst = dst[2:] + hostarch.ByteOrder.PutUint16(dst[:2], uint16(x.SourcePortEnd)) + dst = dst[2:] + hostarch.ByteOrder.PutUint16(dst[:2], uint16(x.DestinationPortStart)) + dst = dst[2:] + hostarch.ByteOrder.PutUint16(dst[:2], uint16(x.DestinationPortEnd)) + dst = dst[2:] + dst[0] = byte(x.InverseFlags) + dst = dst[1:] + // Padding: dst[:sizeof(uint8)] ~= uint8(0) + dst = dst[1:] +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (x *XTUDP) UnmarshalBytes(src []byte) { + x.SourcePortStart = uint16(hostarch.ByteOrder.Uint16(src[:2])) + src = src[2:] + x.SourcePortEnd = uint16(hostarch.ByteOrder.Uint16(src[:2])) + src = src[2:] + x.DestinationPortStart = uint16(hostarch.ByteOrder.Uint16(src[:2])) + src = src[2:] + x.DestinationPortEnd = uint16(hostarch.ByteOrder.Uint16(src[:2])) + src = src[2:] + x.InverseFlags = uint8(src[0]) + src = src[1:] + // Padding: var _ uint8 ~= src[:sizeof(uint8)] + src = src[1:] +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (x *XTUDP) Packed() bool { + return true +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (x *XTUDP) MarshalUnsafe(dst []byte) { + gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(x), uintptr(x.SizeBytes())) +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (x *XTUDP) UnmarshalUnsafe(src []byte) { + gohacks.Memmove(unsafe.Pointer(x), unsafe.Pointer(&src[0]), uintptr(x.SizeBytes())) +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (x *XTUDP) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(x))) + hdr.Len = x.SizeBytes() + hdr.Cap = x.SizeBytes() + + length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that x + // must live until the use above. + runtime.KeepAlive(x) // escapes: replaced by intrinsic. + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (x *XTUDP) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + return x.CopyOutN(cc, addr, x.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (x *XTUDP) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(x))) + hdr.Len = x.SizeBytes() + hdr.Cap = x.SizeBytes() + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that x + // must live until the use above. + runtime.KeepAlive(x) // escapes: replaced by intrinsic. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (x *XTUDP) WriteTo(writer io.Writer) (int64, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(x))) + hdr.Len = x.SizeBytes() + hdr.Cap = x.SizeBytes() + + length, err := writer.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that x + // must live until the use above. + runtime.KeepAlive(x) // escapes: replaced by intrinsic. + return int64(length), err +} + +// SizeBytes implements marshal.Marshallable.SizeBytes. func (i *IP6TEntry) SizeBytes() int { return 12 + (*IP6TIP)(nil).SizeBytes() + @@ -6604,6 +8262,475 @@ func (ke *KernelIP6TGetEntries) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. +func (n *NFNATRange) SizeBytes() int { + return 8 + + (*Inet6Addr)(nil).SizeBytes() + + (*Inet6Addr)(nil).SizeBytes() +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (n *NFNATRange) MarshalBytes(dst []byte) { + hostarch.ByteOrder.PutUint32(dst[:4], uint32(n.Flags)) + dst = dst[4:] + n.MinAddr.MarshalBytes(dst[:n.MinAddr.SizeBytes()]) + dst = dst[n.MinAddr.SizeBytes():] + n.MaxAddr.MarshalBytes(dst[:n.MaxAddr.SizeBytes()]) + dst = dst[n.MaxAddr.SizeBytes():] + hostarch.ByteOrder.PutUint16(dst[:2], uint16(n.MinProto)) + dst = dst[2:] + hostarch.ByteOrder.PutUint16(dst[:2], uint16(n.MaxProto)) + dst = dst[2:] +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (n *NFNATRange) UnmarshalBytes(src []byte) { + n.Flags = uint32(hostarch.ByteOrder.Uint32(src[:4])) + src = src[4:] + n.MinAddr.UnmarshalBytes(src[:n.MinAddr.SizeBytes()]) + src = src[n.MinAddr.SizeBytes():] + n.MaxAddr.UnmarshalBytes(src[:n.MaxAddr.SizeBytes()]) + src = src[n.MaxAddr.SizeBytes():] + n.MinProto = uint16(hostarch.ByteOrder.Uint16(src[:2])) + src = src[2:] + n.MaxProto = uint16(hostarch.ByteOrder.Uint16(src[:2])) + src = src[2:] +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (n *NFNATRange) Packed() bool { + return n.MaxAddr.Packed() && n.MinAddr.Packed() +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (n *NFNATRange) MarshalUnsafe(dst []byte) { + if n.MaxAddr.Packed() && n.MinAddr.Packed() { + gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(n), uintptr(n.SizeBytes())) + } else { + // Type NFNATRange doesn't have a packed layout in memory, fallback to MarshalBytes. + n.MarshalBytes(dst) + } +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (n *NFNATRange) UnmarshalUnsafe(src []byte) { + if n.MaxAddr.Packed() && n.MinAddr.Packed() { + gohacks.Memmove(unsafe.Pointer(n), unsafe.Pointer(&src[0]), uintptr(n.SizeBytes())) + } else { + // Type NFNATRange doesn't have a packed layout in memory, fallback to UnmarshalBytes. + n.UnmarshalBytes(src) + } +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (n *NFNATRange) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) { + if !n.MaxAddr.Packed() && n.MinAddr.Packed() { + // Type NFNATRange doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := cc.CopyScratchBuffer(n.SizeBytes()) // escapes: okay. + n.MarshalBytes(buf) // escapes: fallback. + return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(n))) + hdr.Len = n.SizeBytes() + hdr.Cap = n.SizeBytes() + + length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that n + // must live until the use above. + runtime.KeepAlive(n) // escapes: replaced by intrinsic. + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (n *NFNATRange) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + return n.CopyOutN(cc, addr, n.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (n *NFNATRange) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + if !n.MaxAddr.Packed() && n.MinAddr.Packed() { + // Type NFNATRange doesn't have a packed layout in memory, fall back to UnmarshalBytes. + buf := cc.CopyScratchBuffer(n.SizeBytes()) // escapes: okay. + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Unmarshal unconditionally. If we had a short copy-in, this results in a + // partially unmarshalled struct. + n.UnmarshalBytes(buf) // escapes: fallback. + return length, err + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(n))) + hdr.Len = n.SizeBytes() + hdr.Cap = n.SizeBytes() + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that n + // must live until the use above. + runtime.KeepAlive(n) // escapes: replaced by intrinsic. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (n *NFNATRange) WriteTo(writer io.Writer) (int64, error) { + if !n.MaxAddr.Packed() && n.MinAddr.Packed() { + // Type NFNATRange doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := make([]byte, n.SizeBytes()) + n.MarshalBytes(buf) + length, err := writer.Write(buf) + return int64(length), err + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(n))) + hdr.Len = n.SizeBytes() + hdr.Cap = n.SizeBytes() + + length, err := writer.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that n + // must live until the use above. + runtime.KeepAlive(n) // escapes: replaced by intrinsic. + return int64(length), err +} + +// SizeBytes implements marshal.Marshallable.SizeBytes. +func (n *NetlinkAttrHeader) SizeBytes() int { + return 4 +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (n *NetlinkAttrHeader) MarshalBytes(dst []byte) { + hostarch.ByteOrder.PutUint16(dst[:2], uint16(n.Length)) + dst = dst[2:] + hostarch.ByteOrder.PutUint16(dst[:2], uint16(n.Type)) + dst = dst[2:] +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (n *NetlinkAttrHeader) UnmarshalBytes(src []byte) { + n.Length = uint16(hostarch.ByteOrder.Uint16(src[:2])) + src = src[2:] + n.Type = uint16(hostarch.ByteOrder.Uint16(src[:2])) + src = src[2:] +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (n *NetlinkAttrHeader) Packed() bool { + return true +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (n *NetlinkAttrHeader) MarshalUnsafe(dst []byte) { + gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(n), uintptr(n.SizeBytes())) +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (n *NetlinkAttrHeader) UnmarshalUnsafe(src []byte) { + gohacks.Memmove(unsafe.Pointer(n), unsafe.Pointer(&src[0]), uintptr(n.SizeBytes())) +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (n *NetlinkAttrHeader) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(n))) + hdr.Len = n.SizeBytes() + hdr.Cap = n.SizeBytes() + + length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that n + // must live until the use above. + runtime.KeepAlive(n) // escapes: replaced by intrinsic. + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (n *NetlinkAttrHeader) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + return n.CopyOutN(cc, addr, n.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (n *NetlinkAttrHeader) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(n))) + hdr.Len = n.SizeBytes() + hdr.Cap = n.SizeBytes() + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that n + // must live until the use above. + runtime.KeepAlive(n) // escapes: replaced by intrinsic. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (n *NetlinkAttrHeader) WriteTo(writer io.Writer) (int64, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(n))) + hdr.Len = n.SizeBytes() + hdr.Cap = n.SizeBytes() + + length, err := writer.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that n + // must live until the use above. + runtime.KeepAlive(n) // escapes: replaced by intrinsic. + return int64(length), err +} + +// SizeBytes implements marshal.Marshallable.SizeBytes. +func (n *NetlinkErrorMessage) SizeBytes() int { + return 4 + + (*NetlinkMessageHeader)(nil).SizeBytes() +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (n *NetlinkErrorMessage) MarshalBytes(dst []byte) { + hostarch.ByteOrder.PutUint32(dst[:4], uint32(n.Error)) + dst = dst[4:] + n.Header.MarshalBytes(dst[:n.Header.SizeBytes()]) + dst = dst[n.Header.SizeBytes():] +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (n *NetlinkErrorMessage) UnmarshalBytes(src []byte) { + n.Error = int32(hostarch.ByteOrder.Uint32(src[:4])) + src = src[4:] + n.Header.UnmarshalBytes(src[:n.Header.SizeBytes()]) + src = src[n.Header.SizeBytes():] +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (n *NetlinkErrorMessage) Packed() bool { + return n.Header.Packed() +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (n *NetlinkErrorMessage) MarshalUnsafe(dst []byte) { + if n.Header.Packed() { + gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(n), uintptr(n.SizeBytes())) + } else { + // Type NetlinkErrorMessage doesn't have a packed layout in memory, fallback to MarshalBytes. + n.MarshalBytes(dst) + } +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (n *NetlinkErrorMessage) UnmarshalUnsafe(src []byte) { + if n.Header.Packed() { + gohacks.Memmove(unsafe.Pointer(n), unsafe.Pointer(&src[0]), uintptr(n.SizeBytes())) + } else { + // Type NetlinkErrorMessage doesn't have a packed layout in memory, fallback to UnmarshalBytes. + n.UnmarshalBytes(src) + } +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (n *NetlinkErrorMessage) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) { + if !n.Header.Packed() { + // Type NetlinkErrorMessage doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := cc.CopyScratchBuffer(n.SizeBytes()) // escapes: okay. + n.MarshalBytes(buf) // escapes: fallback. + return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(n))) + hdr.Len = n.SizeBytes() + hdr.Cap = n.SizeBytes() + + length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that n + // must live until the use above. + runtime.KeepAlive(n) // escapes: replaced by intrinsic. + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (n *NetlinkErrorMessage) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + return n.CopyOutN(cc, addr, n.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (n *NetlinkErrorMessage) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + if !n.Header.Packed() { + // Type NetlinkErrorMessage doesn't have a packed layout in memory, fall back to UnmarshalBytes. + buf := cc.CopyScratchBuffer(n.SizeBytes()) // escapes: okay. + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Unmarshal unconditionally. If we had a short copy-in, this results in a + // partially unmarshalled struct. + n.UnmarshalBytes(buf) // escapes: fallback. + return length, err + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(n))) + hdr.Len = n.SizeBytes() + hdr.Cap = n.SizeBytes() + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that n + // must live until the use above. + runtime.KeepAlive(n) // escapes: replaced by intrinsic. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (n *NetlinkErrorMessage) WriteTo(writer io.Writer) (int64, error) { + if !n.Header.Packed() { + // Type NetlinkErrorMessage doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := make([]byte, n.SizeBytes()) + n.MarshalBytes(buf) + length, err := writer.Write(buf) + return int64(length), err + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(n))) + hdr.Len = n.SizeBytes() + hdr.Cap = n.SizeBytes() + + length, err := writer.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that n + // must live until the use above. + runtime.KeepAlive(n) // escapes: replaced by intrinsic. + return int64(length), err +} + +// SizeBytes implements marshal.Marshallable.SizeBytes. +func (n *NetlinkMessageHeader) SizeBytes() int { + return 16 +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (n *NetlinkMessageHeader) MarshalBytes(dst []byte) { + hostarch.ByteOrder.PutUint32(dst[:4], uint32(n.Length)) + dst = dst[4:] + hostarch.ByteOrder.PutUint16(dst[:2], uint16(n.Type)) + dst = dst[2:] + hostarch.ByteOrder.PutUint16(dst[:2], uint16(n.Flags)) + dst = dst[2:] + hostarch.ByteOrder.PutUint32(dst[:4], uint32(n.Seq)) + dst = dst[4:] + hostarch.ByteOrder.PutUint32(dst[:4], uint32(n.PortID)) + dst = dst[4:] +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (n *NetlinkMessageHeader) UnmarshalBytes(src []byte) { + n.Length = uint32(hostarch.ByteOrder.Uint32(src[:4])) + src = src[4:] + n.Type = uint16(hostarch.ByteOrder.Uint16(src[:2])) + src = src[2:] + n.Flags = uint16(hostarch.ByteOrder.Uint16(src[:2])) + src = src[2:] + n.Seq = uint32(hostarch.ByteOrder.Uint32(src[:4])) + src = src[4:] + n.PortID = uint32(hostarch.ByteOrder.Uint32(src[:4])) + src = src[4:] +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (n *NetlinkMessageHeader) Packed() bool { + return true +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (n *NetlinkMessageHeader) MarshalUnsafe(dst []byte) { + gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(n), uintptr(n.SizeBytes())) +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (n *NetlinkMessageHeader) UnmarshalUnsafe(src []byte) { + gohacks.Memmove(unsafe.Pointer(n), unsafe.Pointer(&src[0]), uintptr(n.SizeBytes())) +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (n *NetlinkMessageHeader) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(n))) + hdr.Len = n.SizeBytes() + hdr.Cap = n.SizeBytes() + + length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that n + // must live until the use above. + runtime.KeepAlive(n) // escapes: replaced by intrinsic. + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (n *NetlinkMessageHeader) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + return n.CopyOutN(cc, addr, n.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (n *NetlinkMessageHeader) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(n))) + hdr.Len = n.SizeBytes() + hdr.Cap = n.SizeBytes() + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that n + // must live until the use above. + runtime.KeepAlive(n) // escapes: replaced by intrinsic. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (n *NetlinkMessageHeader) WriteTo(writer io.Writer) (int64, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(n))) + hdr.Len = n.SizeBytes() + hdr.Cap = n.SizeBytes() + + length, err := writer.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that n + // must live until the use above. + runtime.KeepAlive(n) // escapes: replaced by intrinsic. + return int64(length), err +} + +// SizeBytes implements marshal.Marshallable.SizeBytes. func (s *SockAddrNetlink) SizeBytes() int { return 12 } @@ -6705,6 +8832,341 @@ func (s *SockAddrNetlink) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. +func (i *InterfaceAddrMessage) SizeBytes() int { + return 8 +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (i *InterfaceAddrMessage) MarshalBytes(dst []byte) { + dst[0] = byte(i.Family) + dst = dst[1:] + dst[0] = byte(i.PrefixLen) + dst = dst[1:] + dst[0] = byte(i.Flags) + dst = dst[1:] + dst[0] = byte(i.Scope) + dst = dst[1:] + hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.Index)) + dst = dst[4:] +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (i *InterfaceAddrMessage) UnmarshalBytes(src []byte) { + i.Family = uint8(src[0]) + src = src[1:] + i.PrefixLen = uint8(src[0]) + src = src[1:] + i.Flags = uint8(src[0]) + src = src[1:] + i.Scope = uint8(src[0]) + src = src[1:] + i.Index = uint32(hostarch.ByteOrder.Uint32(src[:4])) + src = src[4:] +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (i *InterfaceAddrMessage) Packed() bool { + return true +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (i *InterfaceAddrMessage) MarshalUnsafe(dst []byte) { + gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(i), uintptr(i.SizeBytes())) +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (i *InterfaceAddrMessage) UnmarshalUnsafe(src []byte) { + gohacks.Memmove(unsafe.Pointer(i), unsafe.Pointer(&src[0]), uintptr(i.SizeBytes())) +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (i *InterfaceAddrMessage) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) + hdr.Len = i.SizeBytes() + hdr.Cap = i.SizeBytes() + + length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that i + // must live until the use above. + runtime.KeepAlive(i) // escapes: replaced by intrinsic. + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (i *InterfaceAddrMessage) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + return i.CopyOutN(cc, addr, i.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (i *InterfaceAddrMessage) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) + hdr.Len = i.SizeBytes() + hdr.Cap = i.SizeBytes() + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that i + // must live until the use above. + runtime.KeepAlive(i) // escapes: replaced by intrinsic. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (i *InterfaceAddrMessage) WriteTo(writer io.Writer) (int64, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) + hdr.Len = i.SizeBytes() + hdr.Cap = i.SizeBytes() + + length, err := writer.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that i + // must live until the use above. + runtime.KeepAlive(i) // escapes: replaced by intrinsic. + return int64(length), err +} + +// SizeBytes implements marshal.Marshallable.SizeBytes. +func (i *InterfaceInfoMessage) SizeBytes() int { + return 16 +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (i *InterfaceInfoMessage) MarshalBytes(dst []byte) { + dst[0] = byte(i.Family) + dst = dst[1:] + // Padding: dst[:sizeof(uint8)] ~= uint8(0) + dst = dst[1:] + hostarch.ByteOrder.PutUint16(dst[:2], uint16(i.Type)) + dst = dst[2:] + hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.Index)) + dst = dst[4:] + hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.Flags)) + dst = dst[4:] + hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.Change)) + dst = dst[4:] +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (i *InterfaceInfoMessage) UnmarshalBytes(src []byte) { + i.Family = uint8(src[0]) + src = src[1:] + // Padding: var _ uint8 ~= src[:sizeof(uint8)] + src = src[1:] + i.Type = uint16(hostarch.ByteOrder.Uint16(src[:2])) + src = src[2:] + i.Index = int32(hostarch.ByteOrder.Uint32(src[:4])) + src = src[4:] + i.Flags = uint32(hostarch.ByteOrder.Uint32(src[:4])) + src = src[4:] + i.Change = uint32(hostarch.ByteOrder.Uint32(src[:4])) + src = src[4:] +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (i *InterfaceInfoMessage) Packed() bool { + return true +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (i *InterfaceInfoMessage) MarshalUnsafe(dst []byte) { + gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(i), uintptr(i.SizeBytes())) +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (i *InterfaceInfoMessage) UnmarshalUnsafe(src []byte) { + gohacks.Memmove(unsafe.Pointer(i), unsafe.Pointer(&src[0]), uintptr(i.SizeBytes())) +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (i *InterfaceInfoMessage) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) + hdr.Len = i.SizeBytes() + hdr.Cap = i.SizeBytes() + + length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that i + // must live until the use above. + runtime.KeepAlive(i) // escapes: replaced by intrinsic. + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (i *InterfaceInfoMessage) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + return i.CopyOutN(cc, addr, i.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (i *InterfaceInfoMessage) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) + hdr.Len = i.SizeBytes() + hdr.Cap = i.SizeBytes() + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that i + // must live until the use above. + runtime.KeepAlive(i) // escapes: replaced by intrinsic. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (i *InterfaceInfoMessage) WriteTo(writer io.Writer) (int64, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) + hdr.Len = i.SizeBytes() + hdr.Cap = i.SizeBytes() + + length, err := writer.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that i + // must live until the use above. + runtime.KeepAlive(i) // escapes: replaced by intrinsic. + return int64(length), err +} + +// SizeBytes implements marshal.Marshallable.SizeBytes. +func (r *RouteMessage) SizeBytes() int { + return 12 +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (r *RouteMessage) MarshalBytes(dst []byte) { + dst[0] = byte(r.Family) + dst = dst[1:] + dst[0] = byte(r.DstLen) + dst = dst[1:] + dst[0] = byte(r.SrcLen) + dst = dst[1:] + dst[0] = byte(r.TOS) + dst = dst[1:] + dst[0] = byte(r.Table) + dst = dst[1:] + dst[0] = byte(r.Protocol) + dst = dst[1:] + dst[0] = byte(r.Scope) + dst = dst[1:] + dst[0] = byte(r.Type) + dst = dst[1:] + hostarch.ByteOrder.PutUint32(dst[:4], uint32(r.Flags)) + dst = dst[4:] +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (r *RouteMessage) UnmarshalBytes(src []byte) { + r.Family = uint8(src[0]) + src = src[1:] + r.DstLen = uint8(src[0]) + src = src[1:] + r.SrcLen = uint8(src[0]) + src = src[1:] + r.TOS = uint8(src[0]) + src = src[1:] + r.Table = uint8(src[0]) + src = src[1:] + r.Protocol = uint8(src[0]) + src = src[1:] + r.Scope = uint8(src[0]) + src = src[1:] + r.Type = uint8(src[0]) + src = src[1:] + r.Flags = uint32(hostarch.ByteOrder.Uint32(src[:4])) + src = src[4:] +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (r *RouteMessage) Packed() bool { + return true +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (r *RouteMessage) MarshalUnsafe(dst []byte) { + gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(r), uintptr(r.SizeBytes())) +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (r *RouteMessage) UnmarshalUnsafe(src []byte) { + gohacks.Memmove(unsafe.Pointer(r), unsafe.Pointer(&src[0]), uintptr(r.SizeBytes())) +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (r *RouteMessage) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(r))) + hdr.Len = r.SizeBytes() + hdr.Cap = r.SizeBytes() + + length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that r + // must live until the use above. + runtime.KeepAlive(r) // escapes: replaced by intrinsic. + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (r *RouteMessage) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + return r.CopyOutN(cc, addr, r.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (r *RouteMessage) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(r))) + hdr.Len = r.SizeBytes() + hdr.Cap = r.SizeBytes() + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that r + // must live until the use above. + runtime.KeepAlive(r) // escapes: replaced by intrinsic. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (r *RouteMessage) WriteTo(writer io.Writer) (int64, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(r))) + hdr.Len = r.SizeBytes() + hdr.Cap = r.SizeBytes() + + length, err := writer.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that r + // must live until the use above. + runtime.KeepAlive(r) // escapes: replaced by intrinsic. + return int64(length), err +} + +// SizeBytes implements marshal.Marshallable.SizeBytes. func (p *PollFD) SizeBytes() int { return 8 } @@ -8392,6 +10854,237 @@ func (c *ControlMessageCredentials) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. +func (c *ControlMessageHeader) SizeBytes() int { + return 16 +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (c *ControlMessageHeader) MarshalBytes(dst []byte) { + hostarch.ByteOrder.PutUint64(dst[:8], uint64(c.Length)) + dst = dst[8:] + hostarch.ByteOrder.PutUint32(dst[:4], uint32(c.Level)) + dst = dst[4:] + hostarch.ByteOrder.PutUint32(dst[:4], uint32(c.Type)) + dst = dst[4:] +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (c *ControlMessageHeader) UnmarshalBytes(src []byte) { + c.Length = uint64(hostarch.ByteOrder.Uint64(src[:8])) + src = src[8:] + c.Level = int32(hostarch.ByteOrder.Uint32(src[:4])) + src = src[4:] + c.Type = int32(hostarch.ByteOrder.Uint32(src[:4])) + src = src[4:] +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (c *ControlMessageHeader) Packed() bool { + return true +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (c *ControlMessageHeader) MarshalUnsafe(dst []byte) { + gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(c), uintptr(c.SizeBytes())) +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (c *ControlMessageHeader) UnmarshalUnsafe(src []byte) { + gohacks.Memmove(unsafe.Pointer(c), unsafe.Pointer(&src[0]), uintptr(c.SizeBytes())) +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (c *ControlMessageHeader) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c))) + hdr.Len = c.SizeBytes() + hdr.Cap = c.SizeBytes() + + length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that c + // must live until the use above. + runtime.KeepAlive(c) // escapes: replaced by intrinsic. + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (c *ControlMessageHeader) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + return c.CopyOutN(cc, addr, c.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (c *ControlMessageHeader) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c))) + hdr.Len = c.SizeBytes() + hdr.Cap = c.SizeBytes() + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that c + // must live until the use above. + runtime.KeepAlive(c) // escapes: replaced by intrinsic. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (c *ControlMessageHeader) WriteTo(writer io.Writer) (int64, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c))) + hdr.Len = c.SizeBytes() + hdr.Cap = c.SizeBytes() + + length, err := writer.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that c + // must live until the use above. + runtime.KeepAlive(c) // escapes: replaced by intrinsic. + return int64(length), err +} + +// SizeBytes implements marshal.Marshallable.SizeBytes. +func (c *ControlMessageIPPacketInfo) SizeBytes() int { + return 4 + + (*InetAddr)(nil).SizeBytes() + + (*InetAddr)(nil).SizeBytes() +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (c *ControlMessageIPPacketInfo) MarshalBytes(dst []byte) { + hostarch.ByteOrder.PutUint32(dst[:4], uint32(c.NIC)) + dst = dst[4:] + c.LocalAddr.MarshalBytes(dst[:c.LocalAddr.SizeBytes()]) + dst = dst[c.LocalAddr.SizeBytes():] + c.DestinationAddr.MarshalBytes(dst[:c.DestinationAddr.SizeBytes()]) + dst = dst[c.DestinationAddr.SizeBytes():] +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (c *ControlMessageIPPacketInfo) UnmarshalBytes(src []byte) { + c.NIC = int32(hostarch.ByteOrder.Uint32(src[:4])) + src = src[4:] + c.LocalAddr.UnmarshalBytes(src[:c.LocalAddr.SizeBytes()]) + src = src[c.LocalAddr.SizeBytes():] + c.DestinationAddr.UnmarshalBytes(src[:c.DestinationAddr.SizeBytes()]) + src = src[c.DestinationAddr.SizeBytes():] +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (c *ControlMessageIPPacketInfo) Packed() bool { + return c.DestinationAddr.Packed() && c.LocalAddr.Packed() +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (c *ControlMessageIPPacketInfo) MarshalUnsafe(dst []byte) { + if c.DestinationAddr.Packed() && c.LocalAddr.Packed() { + gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(c), uintptr(c.SizeBytes())) + } else { + // Type ControlMessageIPPacketInfo doesn't have a packed layout in memory, fallback to MarshalBytes. + c.MarshalBytes(dst) + } +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (c *ControlMessageIPPacketInfo) UnmarshalUnsafe(src []byte) { + if c.DestinationAddr.Packed() && c.LocalAddr.Packed() { + gohacks.Memmove(unsafe.Pointer(c), unsafe.Pointer(&src[0]), uintptr(c.SizeBytes())) + } else { + // Type ControlMessageIPPacketInfo doesn't have a packed layout in memory, fallback to UnmarshalBytes. + c.UnmarshalBytes(src) + } +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (c *ControlMessageIPPacketInfo) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) { + if !c.DestinationAddr.Packed() && c.LocalAddr.Packed() { + // Type ControlMessageIPPacketInfo doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := cc.CopyScratchBuffer(c.SizeBytes()) // escapes: okay. + c.MarshalBytes(buf) // escapes: fallback. + return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c))) + hdr.Len = c.SizeBytes() + hdr.Cap = c.SizeBytes() + + length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that c + // must live until the use above. + runtime.KeepAlive(c) // escapes: replaced by intrinsic. + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (c *ControlMessageIPPacketInfo) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + return c.CopyOutN(cc, addr, c.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (c *ControlMessageIPPacketInfo) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + if !c.DestinationAddr.Packed() && c.LocalAddr.Packed() { + // Type ControlMessageIPPacketInfo doesn't have a packed layout in memory, fall back to UnmarshalBytes. + buf := cc.CopyScratchBuffer(c.SizeBytes()) // escapes: okay. + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Unmarshal unconditionally. If we had a short copy-in, this results in a + // partially unmarshalled struct. + c.UnmarshalBytes(buf) // escapes: fallback. + return length, err + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c))) + hdr.Len = c.SizeBytes() + hdr.Cap = c.SizeBytes() + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that c + // must live until the use above. + runtime.KeepAlive(c) // escapes: replaced by intrinsic. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (c *ControlMessageIPPacketInfo) WriteTo(writer io.Writer) (int64, error) { + if !c.DestinationAddr.Packed() && c.LocalAddr.Packed() { + // Type ControlMessageIPPacketInfo doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := make([]byte, c.SizeBytes()) + c.MarshalBytes(buf) + length, err := writer.Write(buf) + return int64(length), err + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c))) + hdr.Len = c.SizeBytes() + hdr.Cap = c.SizeBytes() + + length, err := writer.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that c + // must live until the use above. + runtime.KeepAlive(c) // escapes: replaced by intrinsic. + return int64(length), err +} + +// SizeBytes implements marshal.Marshallable.SizeBytes. //go:nosplit func (i *Inet6Addr) SizeBytes() int { return 1 * 16 @@ -8487,6 +11180,135 @@ func (i *Inet6Addr) WriteTo(w io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. +func (i *Inet6MulticastRequest) SizeBytes() int { + return 4 + + (*Inet6Addr)(nil).SizeBytes() +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (i *Inet6MulticastRequest) MarshalBytes(dst []byte) { + i.MulticastAddr.MarshalBytes(dst[:i.MulticastAddr.SizeBytes()]) + dst = dst[i.MulticastAddr.SizeBytes():] + hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.InterfaceIndex)) + dst = dst[4:] +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (i *Inet6MulticastRequest) UnmarshalBytes(src []byte) { + i.MulticastAddr.UnmarshalBytes(src[:i.MulticastAddr.SizeBytes()]) + src = src[i.MulticastAddr.SizeBytes():] + i.InterfaceIndex = int32(hostarch.ByteOrder.Uint32(src[:4])) + src = src[4:] +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (i *Inet6MulticastRequest) Packed() bool { + return i.MulticastAddr.Packed() +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (i *Inet6MulticastRequest) MarshalUnsafe(dst []byte) { + if i.MulticastAddr.Packed() { + gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(i), uintptr(i.SizeBytes())) + } else { + // Type Inet6MulticastRequest doesn't have a packed layout in memory, fallback to MarshalBytes. + i.MarshalBytes(dst) + } +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (i *Inet6MulticastRequest) UnmarshalUnsafe(src []byte) { + if i.MulticastAddr.Packed() { + gohacks.Memmove(unsafe.Pointer(i), unsafe.Pointer(&src[0]), uintptr(i.SizeBytes())) + } else { + // Type Inet6MulticastRequest doesn't have a packed layout in memory, fallback to UnmarshalBytes. + i.UnmarshalBytes(src) + } +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (i *Inet6MulticastRequest) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) { + if !i.MulticastAddr.Packed() { + // Type Inet6MulticastRequest doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay. + i.MarshalBytes(buf) // escapes: fallback. + return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) + hdr.Len = i.SizeBytes() + hdr.Cap = i.SizeBytes() + + length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that i + // must live until the use above. + runtime.KeepAlive(i) // escapes: replaced by intrinsic. + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (i *Inet6MulticastRequest) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + return i.CopyOutN(cc, addr, i.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (i *Inet6MulticastRequest) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + if !i.MulticastAddr.Packed() { + // Type Inet6MulticastRequest doesn't have a packed layout in memory, fall back to UnmarshalBytes. + buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay. + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Unmarshal unconditionally. If we had a short copy-in, this results in a + // partially unmarshalled struct. + i.UnmarshalBytes(buf) // escapes: fallback. + return length, err + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) + hdr.Len = i.SizeBytes() + hdr.Cap = i.SizeBytes() + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that i + // must live until the use above. + runtime.KeepAlive(i) // escapes: replaced by intrinsic. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (i *Inet6MulticastRequest) WriteTo(writer io.Writer) (int64, error) { + if !i.MulticastAddr.Packed() { + // Type Inet6MulticastRequest doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := make([]byte, i.SizeBytes()) + i.MarshalBytes(buf) + length, err := writer.Write(buf) + return int64(length), err + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) + hdr.Len = i.SizeBytes() + hdr.Cap = i.SizeBytes() + + length, err := writer.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that i + // must live until the use above. + runtime.KeepAlive(i) // escapes: replaced by intrinsic. + return int64(length), err +} + +// SizeBytes implements marshal.Marshallable.SizeBytes. //go:nosplit func (i *InetAddr) SizeBytes() int { return 1 * 4 @@ -8582,6 +11404,265 @@ func (i *InetAddr) WriteTo(w io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. +func (i *InetMulticastRequest) SizeBytes() int { + return 0 + + (*InetAddr)(nil).SizeBytes() + + (*InetAddr)(nil).SizeBytes() +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (i *InetMulticastRequest) MarshalBytes(dst []byte) { + i.MulticastAddr.MarshalBytes(dst[:i.MulticastAddr.SizeBytes()]) + dst = dst[i.MulticastAddr.SizeBytes():] + i.InterfaceAddr.MarshalBytes(dst[:i.InterfaceAddr.SizeBytes()]) + dst = dst[i.InterfaceAddr.SizeBytes():] +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (i *InetMulticastRequest) UnmarshalBytes(src []byte) { + i.MulticastAddr.UnmarshalBytes(src[:i.MulticastAddr.SizeBytes()]) + src = src[i.MulticastAddr.SizeBytes():] + i.InterfaceAddr.UnmarshalBytes(src[:i.InterfaceAddr.SizeBytes()]) + src = src[i.InterfaceAddr.SizeBytes():] +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (i *InetMulticastRequest) Packed() bool { + return i.InterfaceAddr.Packed() && i.MulticastAddr.Packed() +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (i *InetMulticastRequest) MarshalUnsafe(dst []byte) { + if i.InterfaceAddr.Packed() && i.MulticastAddr.Packed() { + gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(i), uintptr(i.SizeBytes())) + } else { + // Type InetMulticastRequest doesn't have a packed layout in memory, fallback to MarshalBytes. + i.MarshalBytes(dst) + } +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (i *InetMulticastRequest) UnmarshalUnsafe(src []byte) { + if i.InterfaceAddr.Packed() && i.MulticastAddr.Packed() { + gohacks.Memmove(unsafe.Pointer(i), unsafe.Pointer(&src[0]), uintptr(i.SizeBytes())) + } else { + // Type InetMulticastRequest doesn't have a packed layout in memory, fallback to UnmarshalBytes. + i.UnmarshalBytes(src) + } +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (i *InetMulticastRequest) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) { + if !i.InterfaceAddr.Packed() && i.MulticastAddr.Packed() { + // Type InetMulticastRequest doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay. + i.MarshalBytes(buf) // escapes: fallback. + return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) + hdr.Len = i.SizeBytes() + hdr.Cap = i.SizeBytes() + + length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that i + // must live until the use above. + runtime.KeepAlive(i) // escapes: replaced by intrinsic. + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (i *InetMulticastRequest) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + return i.CopyOutN(cc, addr, i.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (i *InetMulticastRequest) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + if !i.InterfaceAddr.Packed() && i.MulticastAddr.Packed() { + // Type InetMulticastRequest doesn't have a packed layout in memory, fall back to UnmarshalBytes. + buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay. + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Unmarshal unconditionally. If we had a short copy-in, this results in a + // partially unmarshalled struct. + i.UnmarshalBytes(buf) // escapes: fallback. + return length, err + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) + hdr.Len = i.SizeBytes() + hdr.Cap = i.SizeBytes() + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that i + // must live until the use above. + runtime.KeepAlive(i) // escapes: replaced by intrinsic. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (i *InetMulticastRequest) WriteTo(writer io.Writer) (int64, error) { + if !i.InterfaceAddr.Packed() && i.MulticastAddr.Packed() { + // Type InetMulticastRequest doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := make([]byte, i.SizeBytes()) + i.MarshalBytes(buf) + length, err := writer.Write(buf) + return int64(length), err + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) + hdr.Len = i.SizeBytes() + hdr.Cap = i.SizeBytes() + + length, err := writer.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that i + // must live until the use above. + runtime.KeepAlive(i) // escapes: replaced by intrinsic. + return int64(length), err +} + +// SizeBytes implements marshal.Marshallable.SizeBytes. +func (i *InetMulticastRequestWithNIC) SizeBytes() int { + return 4 + + (*InetMulticastRequest)(nil).SizeBytes() +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (i *InetMulticastRequestWithNIC) MarshalBytes(dst []byte) { + i.InetMulticastRequest.MarshalBytes(dst[:i.InetMulticastRequest.SizeBytes()]) + dst = dst[i.InetMulticastRequest.SizeBytes():] + hostarch.ByteOrder.PutUint32(dst[:4], uint32(i.InterfaceIndex)) + dst = dst[4:] +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (i *InetMulticastRequestWithNIC) UnmarshalBytes(src []byte) { + i.InetMulticastRequest.UnmarshalBytes(src[:i.InetMulticastRequest.SizeBytes()]) + src = src[i.InetMulticastRequest.SizeBytes():] + i.InterfaceIndex = int32(hostarch.ByteOrder.Uint32(src[:4])) + src = src[4:] +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (i *InetMulticastRequestWithNIC) Packed() bool { + return i.InetMulticastRequest.Packed() +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (i *InetMulticastRequestWithNIC) MarshalUnsafe(dst []byte) { + if i.InetMulticastRequest.Packed() { + gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(i), uintptr(i.SizeBytes())) + } else { + // Type InetMulticastRequestWithNIC doesn't have a packed layout in memory, fallback to MarshalBytes. + i.MarshalBytes(dst) + } +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (i *InetMulticastRequestWithNIC) UnmarshalUnsafe(src []byte) { + if i.InetMulticastRequest.Packed() { + gohacks.Memmove(unsafe.Pointer(i), unsafe.Pointer(&src[0]), uintptr(i.SizeBytes())) + } else { + // Type InetMulticastRequestWithNIC doesn't have a packed layout in memory, fallback to UnmarshalBytes. + i.UnmarshalBytes(src) + } +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (i *InetMulticastRequestWithNIC) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) { + if !i.InetMulticastRequest.Packed() { + // Type InetMulticastRequestWithNIC doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay. + i.MarshalBytes(buf) // escapes: fallback. + return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) + hdr.Len = i.SizeBytes() + hdr.Cap = i.SizeBytes() + + length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that i + // must live until the use above. + runtime.KeepAlive(i) // escapes: replaced by intrinsic. + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (i *InetMulticastRequestWithNIC) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + return i.CopyOutN(cc, addr, i.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (i *InetMulticastRequestWithNIC) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + if !i.InetMulticastRequest.Packed() { + // Type InetMulticastRequestWithNIC doesn't have a packed layout in memory, fall back to UnmarshalBytes. + buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay. + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Unmarshal unconditionally. If we had a short copy-in, this results in a + // partially unmarshalled struct. + i.UnmarshalBytes(buf) // escapes: fallback. + return length, err + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) + hdr.Len = i.SizeBytes() + hdr.Cap = i.SizeBytes() + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that i + // must live until the use above. + runtime.KeepAlive(i) // escapes: replaced by intrinsic. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (i *InetMulticastRequestWithNIC) WriteTo(writer io.Writer) (int64, error) { + if !i.InetMulticastRequest.Packed() { + // Type InetMulticastRequestWithNIC doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := make([]byte, i.SizeBytes()) + i.MarshalBytes(buf) + length, err := writer.Write(buf) + return int64(length), err + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) + hdr.Len = i.SizeBytes() + hdr.Cap = i.SizeBytes() + + length, err := writer.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that i + // must live until the use above. + runtime.KeepAlive(i) // escapes: replaced by intrinsic. + return int64(length), err +} + +// SizeBytes implements marshal.Marshallable.SizeBytes. func (l *Linger) SizeBytes() int { return 8 } diff --git a/pkg/abi/linux/netfilter.go b/pkg/abi/linux/netfilter.go index 35c632168..3fd05483a 100644 --- a/pkg/abi/linux/netfilter.go +++ b/pkg/abi/linux/netfilter.go @@ -245,6 +245,8 @@ const SizeOfXTCounters = 16 // include/uapi/linux/netfilter/x_tables.h. That struct contains a union // exposing different data to the user and kernel, but this struct holds only // the user data. +// +// +marshal type XTEntryMatch struct { MatchSize uint16 Name ExtensionName @@ -284,6 +286,8 @@ const SizeOfXTGetRevision = 30 // include/uapi/linux/netfilter/x_tables.h. That struct contains a union // exposing different data to the user and kernel, but this struct holds only // the user data. +// +// +marshal type XTEntryTarget struct { TargetSize uint16 Name ExtensionName @@ -306,6 +310,8 @@ type KernelXTEntryTarget struct { // XTStandardTarget is a built-in target, one of ACCEPT, DROP, JUMP, QUEUE, // RETURN, or jump. It corresponds to struct xt_standard_target in // include/uapi/linux/netfilter/x_tables.h. +// +// +marshal type XTStandardTarget struct { Target XTEntryTarget // A positive verdict indicates a jump, and is the offset from the @@ -322,6 +328,8 @@ const SizeOfXTStandardTarget = 40 // beginning of user-defined chains by putting the name of the chain in // ErrorName. It corresponds to struct xt_error_target in // include/uapi/linux/netfilter/x_tables.h. +// +// +marshal type XTErrorTarget struct { Target XTEntryTarget Name ErrorName @@ -349,6 +357,8 @@ const ( // NfNATIPV4Range corresponds to struct nf_nat_ipv4_range // in include/uapi/linux/netfilter/nf_nat.h. The fields are in // network byte order. +// +// +marshal type NfNATIPV4Range struct { Flags uint32 MinIP [4]byte @@ -359,6 +369,8 @@ type NfNATIPV4Range struct { // NfNATIPV4MultiRangeCompat corresponds to struct // nf_nat_ipv4_multi_range_compat in include/uapi/linux/netfilter/nf_nat.h. +// +// +marshal type NfNATIPV4MultiRangeCompat struct { RangeSize uint32 RangeIPV4 NfNATIPV4Range @@ -366,6 +378,8 @@ type NfNATIPV4MultiRangeCompat struct { // XTRedirectTarget triggers a redirect when reached. // Adding 4 bytes of padding to make the struct 8 byte aligned. +// +// +marshal type XTRedirectTarget struct { Target XTEntryTarget NfRange NfNATIPV4MultiRangeCompat @@ -377,6 +391,8 @@ const SizeOfXTRedirectTarget = 56 // XTSNATTarget triggers Source NAT when reached. // Adding 4 bytes of padding to make the struct 8 byte aligned. +// +// +marshal type XTSNATTarget struct { Target XTEntryTarget NfRange NfNATIPV4MultiRangeCompat @@ -463,6 +479,8 @@ var _ marshal.Marshallable = (*KernelIPTGetEntries)(nil) // IPTReplace is the argument for the IPT_SO_SET_REPLACE sockopt. It // corresponds to struct ipt_replace in // include/uapi/linux/netfilter_ipv4/ip_tables.h. +// +// +marshal type IPTReplace struct { Name TableName ValidHooks uint32 @@ -502,6 +520,8 @@ func (tn TableName) String() string { // ErrorName holds the name of a netfilter error. These can also hold // user-defined chains. +// +// +marshal type ErrorName [XT_FUNCTION_MAXNAMELEN]byte // String implements fmt.Stringer. @@ -520,6 +540,8 @@ func goString(cstring []byte) string { // XTTCP holds data for matching TCP packets. It corresponds to struct xt_tcp // in include/uapi/linux/netfilter/xt_tcpudp.h. +// +// +marshal type XTTCP struct { // SourcePortStart specifies the inclusive start of the range of source // ports to which the matcher applies. @@ -573,6 +595,8 @@ const ( // XTUDP holds data for matching UDP packets. It corresponds to struct xt_udp // in include/uapi/linux/netfilter/xt_tcpudp.h. +// +// +marshal type XTUDP struct { // SourcePortStart is the inclusive start of the range of source ports // to which the matcher applies. @@ -613,6 +637,8 @@ const ( // IPTOwnerInfo holds data for matching packets with owner. It corresponds // to struct ipt_owner_info in libxt_owner.c of iptables binary. +// +// +marshal type IPTOwnerInfo struct { // UID is user id which created the packet. UID uint32 @@ -634,7 +660,7 @@ type IPTOwnerInfo struct { Match uint8 // Invert flips the meaning of Match field. - Invert uint8 + Invert uint8 `marshal:"unaligned"` } // SizeOfIPTOwnerInfo is the size of an XTOwnerMatchInfo. diff --git a/pkg/abi/linux/netfilter_ipv6.go b/pkg/abi/linux/netfilter_ipv6.go index f7c70b430..b088b207c 100644 --- a/pkg/abi/linux/netfilter_ipv6.go +++ b/pkg/abi/linux/netfilter_ipv6.go @@ -264,6 +264,8 @@ const ( // NFNATRange corresponds to struct nf_nat_range in // include/uapi/linux/netfilter/nf_nat.h. +// +// +marshal type NFNATRange struct { Flags uint32 MinAddr Inet6Addr diff --git a/pkg/abi/linux/netlink.go b/pkg/abi/linux/netlink.go index b41f94a69..232fee67e 100644 --- a/pkg/abi/linux/netlink.go +++ b/pkg/abi/linux/netlink.go @@ -53,6 +53,8 @@ type SockAddrNetlink struct { const SockAddrNetlinkSize = 12 // NetlinkMessageHeader is struct nlmsghdr, from uapi/linux/netlink.h. +// +// +marshal type NetlinkMessageHeader struct { Length uint32 Type uint16 @@ -99,6 +101,8 @@ const NLMSG_ALIGNTO = 4 // NetlinkAttrHeader is the header of a netlink attribute, followed by payload. // // This is struct nlattr, from uapi/linux/netlink.h. +// +// +marshal type NetlinkAttrHeader struct { Length uint16 Type uint16 @@ -126,6 +130,8 @@ const ( ) // NetlinkErrorMessage is struct nlmsgerr, from uapi/linux/netlink.h. +// +// +marshal type NetlinkErrorMessage struct { Error int32 Header NetlinkMessageHeader diff --git a/pkg/abi/linux/netlink_route.go b/pkg/abi/linux/netlink_route.go index ceda0a8d3..581a11b24 100644 --- a/pkg/abi/linux/netlink_route.go +++ b/pkg/abi/linux/netlink_route.go @@ -85,6 +85,8 @@ const ( ) // InterfaceInfoMessage is struct ifinfomsg, from uapi/linux/rtnetlink.h. +// +// +marshal type InterfaceInfoMessage struct { Family uint8 _ uint8 @@ -164,6 +166,8 @@ const ( ) // InterfaceAddrMessage is struct ifaddrmsg, from uapi/linux/if_addr.h. +// +// +marshal type InterfaceAddrMessage struct { Family uint8 PrefixLen uint8 @@ -193,6 +197,8 @@ const ( ) // RouteMessage is struct rtmsg, from uapi/linux/rtnetlink.h. +// +// +marshal type RouteMessage struct { Family uint8 DstLen uint8 diff --git a/pkg/abi/linux/socket.go b/pkg/abi/linux/socket.go index 185eee0bb..95871b8a5 100644 --- a/pkg/abi/linux/socket.go +++ b/pkg/abi/linux/socket.go @@ -15,7 +15,6 @@ package linux import ( - "gvisor.dev/gvisor/pkg/binary" "gvisor.dev/gvisor/pkg/marshal" ) @@ -251,18 +250,24 @@ type SockAddrInet struct { } // Inet6MulticastRequest is struct ipv6_mreq, from uapi/linux/in6.h. +// +// +marshal type Inet6MulticastRequest struct { MulticastAddr Inet6Addr InterfaceIndex int32 } // InetMulticastRequest is struct ip_mreq, from uapi/linux/in.h. +// +// +marshal type InetMulticastRequest struct { MulticastAddr InetAddr InterfaceAddr InetAddr } // InetMulticastRequestWithNIC is struct ip_mreqn, from uapi/linux/in.h. +// +// +marshal type InetMulticastRequestWithNIC struct { InetMulticastRequest InterfaceIndex int32 @@ -491,7 +496,7 @@ type TCPInfo struct { } // SizeOfTCPInfo is the binary size of a TCPInfo struct. -var SizeOfTCPInfo = int(binary.Size(TCPInfo{})) +var SizeOfTCPInfo = (*TCPInfo)(nil).SizeBytes() // Control message types, from linux/socket.h. const ( @@ -502,6 +507,8 @@ const ( // A ControlMessageHeader is the header for a socket control message. // // ControlMessageHeader represents struct cmsghdr from linux/socket.h. +// +// +marshal type ControlMessageHeader struct { Length uint64 Level int32 @@ -510,7 +517,7 @@ type ControlMessageHeader struct { // SizeOfControlMessageHeader is the binary size of a ControlMessageHeader // struct. -var SizeOfControlMessageHeader = int(binary.Size(ControlMessageHeader{})) +var SizeOfControlMessageHeader = (*ControlMessageHeader)(nil).SizeBytes() // A ControlMessageCredentials is an SCM_CREDENTIALS socket control message. // @@ -527,6 +534,7 @@ type ControlMessageCredentials struct { // // ControlMessageIPPacketInfo represents struct in_pktinfo from linux/in.h. // +// +marshal // +stateify savable type ControlMessageIPPacketInfo struct { NIC int32 @@ -536,7 +544,7 @@ type ControlMessageIPPacketInfo struct { // SizeOfControlMessageCredentials is the binary size of a // ControlMessageCredentials struct. -var SizeOfControlMessageCredentials = int(binary.Size(ControlMessageCredentials{})) +var SizeOfControlMessageCredentials = (*ControlMessageCredentials)(nil).SizeBytes() // A ControlMessageRights is an SCM_RIGHTS socket control message. type ControlMessageRights []int32 diff --git a/pkg/bits/bits.go b/pkg/bits/bits.go index a26433ad6..d16448c3d 100644 --- a/pkg/bits/bits.go +++ b/pkg/bits/bits.go @@ -14,3 +14,13 @@ // Package bits includes all bit related types and operations. package bits + +// AlignUp rounds a length up to an alignment. align must be a power of 2. +func AlignUp(length int, align uint) int { + return (length + int(align) - 1) & ^(int(align) - 1) +} + +// AlignDown rounds a length down to an alignment. align must be a power of 2. +func AlignDown(length int, align uint) int { + return length & ^(int(align) - 1) +} diff --git a/pkg/marshal/primitive/primitive.go b/pkg/marshal/primitive/primitive.go index 32c8ed138..6f38992b7 100644 --- a/pkg/marshal/primitive/primitive.go +++ b/pkg/marshal/primitive/primitive.go @@ -125,6 +125,81 @@ func (b *ByteSlice) WriteTo(w io.Writer) (int64, error) { var _ marshal.Marshallable = (*ByteSlice)(nil) +// The following set of functions are convenient shorthands for wrapping a +// built-in type in a marshallable primitive type. For example: +// +// func useMarshallable(m marshal.Marshallable) { ... } +// +// // Compare: +// +// buf = []byte{...} +// // useMarshallable(&primitive.ByteSlice(buf)) // Not allowed, can't address temp value. +// bufP := primitive.ByteSlice(buf) +// useMarshallable(&bufP) +// +// // Vs: +// +// useMarshallable(AsByteSlice(buf)) +// +// Note that the argument to these function escapes, so avoid using them on very +// hot code paths. But generally if a function accepts an interface as an +// argument, the argument escapes anyways. + +// AllocateInt8 returns x as a marshallable. +func AllocateInt8(x int8) marshal.Marshallable { + p := Int8(x) + return &p +} + +// AllocateUint8 returns x as a marshallable. +func AllocateUint8(x uint8) marshal.Marshallable { + p := Uint8(x) + return &p +} + +// AllocateInt16 returns x as a marshallable. +func AllocateInt16(x int16) marshal.Marshallable { + p := Int16(x) + return &p +} + +// AllocateUint16 returns x as a marshallable. +func AllocateUint16(x uint16) marshal.Marshallable { + p := Uint16(x) + return &p +} + +// AllocateInt32 returns x as a marshallable. +func AllocateInt32(x int32) marshal.Marshallable { + p := Int32(x) + return &p +} + +// AllocateUint32 returns x as a marshallable. +func AllocateUint32(x uint32) marshal.Marshallable { + p := Uint32(x) + return &p +} + +// AllocateInt64 returns x as a marshallable. +func AllocateInt64(x int64) marshal.Marshallable { + p := Int64(x) + return &p +} + +// AllocateUint64 returns x as a marshallable. +func AllocateUint64(x uint64) marshal.Marshallable { + p := Uint64(x) + return &p +} + +// AsByteSlice returns b as a marshallable. Note that this allocates a new slice +// header, but does not copy the slice contents. +func AsByteSlice(b []byte) marshal.Marshallable { + bs := ByteSlice(b) + return &bs +} + // Below, we define some convenience functions for marshalling primitive types // using the newtypes above, without requiring superfluous casts. diff --git a/pkg/marshal/util.go b/pkg/marshal/util.go new file mode 100644 index 000000000..c1e5475bd --- /dev/null +++ b/pkg/marshal/util.go @@ -0,0 +1,23 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package marshal + +// Marshal returns the serialized contents of m in a newly allocated +// byte slice. +func Marshal(m Marshallable) []byte { + buf := make([]byte, m.SizeBytes()) + m.MarshalUnsafe(buf) + return buf +} diff --git a/pkg/sentry/socket/control/control.go b/pkg/sentry/socket/control/control.go index 45a05cd63..235b9c306 100644 --- a/pkg/sentry/socket/control/control.go +++ b/pkg/sentry/socket/control/control.go @@ -18,9 +18,11 @@ package control import ( "gvisor.dev/gvisor/pkg/abi/linux" - "gvisor.dev/gvisor/pkg/binary" + "gvisor.dev/gvisor/pkg/bits" "gvisor.dev/gvisor/pkg/context" "gvisor.dev/gvisor/pkg/hostarch" + "gvisor.dev/gvisor/pkg/marshal" + "gvisor.dev/gvisor/pkg/marshal/primitive" "gvisor.dev/gvisor/pkg/sentry/fs" "gvisor.dev/gvisor/pkg/sentry/kernel" "gvisor.dev/gvisor/pkg/sentry/kernel/auth" @@ -193,7 +195,7 @@ func putUint32(buf []byte, n uint32) []byte { // putCmsg writes a control message header and as much data as will fit into // the unused capacity of a buffer. func putCmsg(buf []byte, flags int, msgType uint32, align uint, data []int32) ([]byte, int) { - space := binary.AlignDown(cap(buf)-len(buf), 4) + space := bits.AlignDown(cap(buf)-len(buf), 4) // We can't write to space that doesn't exist, so if we are going to align // the available space, we must align down. @@ -230,7 +232,7 @@ func putCmsg(buf []byte, flags int, msgType uint32, align uint, data []int32) ([ return alignSlice(buf, align), flags } -func putCmsgStruct(buf []byte, msgLevel, msgType uint32, align uint, data interface{}) []byte { +func putCmsgStruct(buf []byte, msgLevel, msgType uint32, align uint, data marshal.Marshallable) []byte { if cap(buf)-len(buf) < linux.SizeOfControlMessageHeader { return buf } @@ -241,8 +243,7 @@ func putCmsgStruct(buf []byte, msgLevel, msgType uint32, align uint, data interf buf = putUint32(buf, msgType) hdrBuf := buf - - buf = binary.Marshal(buf, hostarch.ByteOrder, data) + buf = append(buf, marshal.Marshal(data)...) // If the control message data brought us over capacity, omit it. if cap(buf) != cap(ob) { @@ -288,7 +289,7 @@ func PackCredentials(t *kernel.Task, creds SCMCredentials, buf []byte, flags int // alignSlice extends a slice's length (up to the capacity) to align it. func alignSlice(buf []byte, align uint) []byte { - aligned := binary.AlignUp(len(buf), align) + aligned := bits.AlignUp(len(buf), align) if aligned > cap(buf) { // Linux allows unaligned data if there isn't room for alignment. // Since there isn't room for alignment, there isn't room for any @@ -300,12 +301,13 @@ func alignSlice(buf []byte, align uint) []byte { // PackTimestamp packs a SO_TIMESTAMP socket control message. func PackTimestamp(t *kernel.Task, timestamp int64, buf []byte) []byte { + timestampP := linux.NsecToTimeval(timestamp) return putCmsgStruct( buf, linux.SOL_SOCKET, linux.SO_TIMESTAMP, t.Arch().Width(), - linux.NsecToTimeval(timestamp), + ×tampP, ) } @@ -316,7 +318,7 @@ func PackInq(t *kernel.Task, inq int32, buf []byte) []byte { linux.SOL_TCP, linux.TCP_INQ, t.Arch().Width(), - inq, + primitive.AllocateInt32(inq), ) } @@ -327,7 +329,7 @@ func PackTOS(t *kernel.Task, tos uint8, buf []byte) []byte { linux.SOL_IP, linux.IP_TOS, t.Arch().Width(), - tos, + primitive.AllocateUint8(tos), ) } @@ -338,7 +340,7 @@ func PackTClass(t *kernel.Task, tClass uint32, buf []byte) []byte { linux.SOL_IPV6, linux.IPV6_TCLASS, t.Arch().Width(), - tClass, + primitive.AllocateUint32(tClass), ) } @@ -423,7 +425,7 @@ func PackControlMessages(t *kernel.Task, cmsgs socket.ControlMessages, buf []byt // cmsgSpace is equivalent to CMSG_SPACE in Linux. func cmsgSpace(t *kernel.Task, dataLen int) int { - return linux.SizeOfControlMessageHeader + binary.AlignUp(dataLen, t.Arch().Width()) + return linux.SizeOfControlMessageHeader + bits.AlignUp(dataLen, t.Arch().Width()) } // CmsgsSpace returns the number of bytes needed to fit the control messages @@ -475,7 +477,7 @@ func Parse(t *kernel.Task, socketOrEndpoint interface{}, buf []byte, width uint) } var h linux.ControlMessageHeader - binary.Unmarshal(buf[i:i+linux.SizeOfControlMessageHeader], hostarch.ByteOrder, &h) + h.UnmarshalUnsafe(buf[i : i+linux.SizeOfControlMessageHeader]) if h.Length < uint64(linux.SizeOfControlMessageHeader) { return socket.ControlMessages{}, syserror.EINVAL @@ -491,7 +493,7 @@ func Parse(t *kernel.Task, socketOrEndpoint interface{}, buf []byte, width uint) case linux.SOL_SOCKET: switch h.Type { case linux.SCM_RIGHTS: - rightsSize := binary.AlignDown(length, linux.SizeOfControlMessageRight) + rightsSize := bits.AlignDown(length, linux.SizeOfControlMessageRight) numRights := rightsSize / linux.SizeOfControlMessageRight if len(fds)+numRights > linux.SCM_MAX_FD { @@ -502,7 +504,7 @@ func Parse(t *kernel.Task, socketOrEndpoint interface{}, buf []byte, width uint) fds = append(fds, int32(hostarch.ByteOrder.Uint32(buf[j:j+linux.SizeOfControlMessageRight]))) } - i += binary.AlignUp(length, width) + i += bits.AlignUp(length, width) case linux.SCM_CREDENTIALS: if length < linux.SizeOfControlMessageCredentials { @@ -510,23 +512,23 @@ func Parse(t *kernel.Task, socketOrEndpoint interface{}, buf []byte, width uint) } var creds linux.ControlMessageCredentials - binary.Unmarshal(buf[i:i+linux.SizeOfControlMessageCredentials], hostarch.ByteOrder, &creds) + creds.UnmarshalUnsafe(buf[i : i+linux.SizeOfControlMessageCredentials]) scmCreds, err := NewSCMCredentials(t, creds) if err != nil { return socket.ControlMessages{}, err } cmsgs.Unix.Credentials = scmCreds - i += binary.AlignUp(length, width) + i += bits.AlignUp(length, width) case linux.SO_TIMESTAMP: if length < linux.SizeOfTimeval { return socket.ControlMessages{}, syserror.EINVAL } var ts linux.Timeval - binary.Unmarshal(buf[i:i+linux.SizeOfTimeval], hostarch.ByteOrder, &ts) + ts.UnmarshalUnsafe(buf[i : i+linux.SizeOfTimeval]) cmsgs.IP.Timestamp = ts.ToNsecCapped() cmsgs.IP.HasTimestamp = true - i += binary.AlignUp(length, width) + i += bits.AlignUp(length, width) default: // Unknown message type. @@ -539,8 +541,10 @@ func Parse(t *kernel.Task, socketOrEndpoint interface{}, buf []byte, width uint) return socket.ControlMessages{}, syserror.EINVAL } cmsgs.IP.HasTOS = true - binary.Unmarshal(buf[i:i+linux.SizeOfControlMessageTOS], hostarch.ByteOrder, &cmsgs.IP.TOS) - i += binary.AlignUp(length, width) + var tos primitive.Uint8 + tos.UnmarshalUnsafe(buf[i : i+linux.SizeOfControlMessageTOS]) + cmsgs.IP.TOS = uint8(tos) + i += bits.AlignUp(length, width) case linux.IP_PKTINFO: if length < linux.SizeOfControlMessageIPPacketInfo { @@ -549,19 +553,19 @@ func Parse(t *kernel.Task, socketOrEndpoint interface{}, buf []byte, width uint) cmsgs.IP.HasIPPacketInfo = true var packetInfo linux.ControlMessageIPPacketInfo - binary.Unmarshal(buf[i:i+linux.SizeOfControlMessageIPPacketInfo], hostarch.ByteOrder, &packetInfo) + packetInfo.UnmarshalUnsafe(buf[i : i+linux.SizeOfControlMessageIPPacketInfo]) cmsgs.IP.PacketInfo = packetInfo - i += binary.AlignUp(length, width) + i += bits.AlignUp(length, width) case linux.IP_RECVORIGDSTADDR: var addr linux.SockAddrInet if length < addr.SizeBytes() { return socket.ControlMessages{}, syserror.EINVAL } - binary.Unmarshal(buf[i:i+addr.SizeBytes()], hostarch.ByteOrder, &addr) + addr.UnmarshalUnsafe(buf[i : i+addr.SizeBytes()]) cmsgs.IP.OriginalDstAddress = &addr - i += binary.AlignUp(length, width) + i += bits.AlignUp(length, width) case linux.IP_RECVERR: var errCmsg linux.SockErrCMsgIPv4 @@ -571,7 +575,7 @@ func Parse(t *kernel.Task, socketOrEndpoint interface{}, buf []byte, width uint) errCmsg.UnmarshalBytes(buf[i : i+errCmsg.SizeBytes()]) cmsgs.IP.SockErr = &errCmsg - i += binary.AlignUp(length, width) + i += bits.AlignUp(length, width) default: return socket.ControlMessages{}, syserror.EINVAL @@ -583,17 +587,19 @@ func Parse(t *kernel.Task, socketOrEndpoint interface{}, buf []byte, width uint) return socket.ControlMessages{}, syserror.EINVAL } cmsgs.IP.HasTClass = true - binary.Unmarshal(buf[i:i+linux.SizeOfControlMessageTClass], hostarch.ByteOrder, &cmsgs.IP.TClass) - i += binary.AlignUp(length, width) + var tclass primitive.Uint32 + tclass.UnmarshalUnsafe(buf[i : i+linux.SizeOfControlMessageTClass]) + cmsgs.IP.TClass = uint32(tclass) + i += bits.AlignUp(length, width) case linux.IPV6_RECVORIGDSTADDR: var addr linux.SockAddrInet6 if length < addr.SizeBytes() { return socket.ControlMessages{}, syserror.EINVAL } - binary.Unmarshal(buf[i:i+addr.SizeBytes()], hostarch.ByteOrder, &addr) + addr.UnmarshalUnsafe(buf[i : i+addr.SizeBytes()]) cmsgs.IP.OriginalDstAddress = &addr - i += binary.AlignUp(length, width) + i += bits.AlignUp(length, width) case linux.IPV6_RECVERR: var errCmsg linux.SockErrCMsgIPv6 @@ -603,7 +609,7 @@ func Parse(t *kernel.Task, socketOrEndpoint interface{}, buf []byte, width uint) errCmsg.UnmarshalBytes(buf[i : i+errCmsg.SizeBytes()]) cmsgs.IP.SockErr = &errCmsg - i += binary.AlignUp(length, width) + i += bits.AlignUp(length, width) default: return socket.ControlMessages{}, syserror.EINVAL diff --git a/pkg/sentry/socket/hostinet/socket.go b/pkg/sentry/socket/hostinet/socket.go index 0d3b23643..52ae4bc9c 100644 --- a/pkg/sentry/socket/hostinet/socket.go +++ b/pkg/sentry/socket/hostinet/socket.go @@ -19,7 +19,6 @@ import ( "golang.org/x/sys/unix" "gvisor.dev/gvisor/pkg/abi/linux" - "gvisor.dev/gvisor/pkg/binary" "gvisor.dev/gvisor/pkg/context" "gvisor.dev/gvisor/pkg/fdnotifier" "gvisor.dev/gvisor/pkg/hostarch" @@ -529,7 +528,7 @@ func parseUnixControlMessages(unixControlMessages []unix.SocketControlMessage) s case linux.SO_TIMESTAMP: controlMessages.IP.HasTimestamp = true ts := linux.Timeval{} - ts.UnmarshalBytes(unixCmsg.Data[:linux.SizeOfTimeval]) + ts.UnmarshalUnsafe(unixCmsg.Data[:linux.SizeOfTimeval]) controlMessages.IP.Timestamp = ts.ToNsecCapped() } @@ -537,17 +536,19 @@ func parseUnixControlMessages(unixControlMessages []unix.SocketControlMessage) s switch unixCmsg.Header.Type { case linux.IP_TOS: controlMessages.IP.HasTOS = true - binary.Unmarshal(unixCmsg.Data[:linux.SizeOfControlMessageTOS], hostarch.ByteOrder, &controlMessages.IP.TOS) + var tos primitive.Uint8 + tos.UnmarshalUnsafe(unixCmsg.Data[:tos.SizeBytes()]) + controlMessages.IP.TOS = uint8(tos) case linux.IP_PKTINFO: controlMessages.IP.HasIPPacketInfo = true var packetInfo linux.ControlMessageIPPacketInfo - binary.Unmarshal(unixCmsg.Data[:linux.SizeOfControlMessageIPPacketInfo], hostarch.ByteOrder, &packetInfo) + packetInfo.UnmarshalUnsafe(unixCmsg.Data[:packetInfo.SizeBytes()]) controlMessages.IP.PacketInfo = packetInfo case linux.IP_RECVORIGDSTADDR: var addr linux.SockAddrInet - binary.Unmarshal(unixCmsg.Data[:addr.SizeBytes()], hostarch.ByteOrder, &addr) + addr.UnmarshalUnsafe(unixCmsg.Data[:addr.SizeBytes()]) controlMessages.IP.OriginalDstAddress = &addr case unix.IP_RECVERR: @@ -560,11 +561,13 @@ func parseUnixControlMessages(unixControlMessages []unix.SocketControlMessage) s switch unixCmsg.Header.Type { case linux.IPV6_TCLASS: controlMessages.IP.HasTClass = true - binary.Unmarshal(unixCmsg.Data[:linux.SizeOfControlMessageTClass], hostarch.ByteOrder, &controlMessages.IP.TClass) + var tclass primitive.Uint32 + tclass.UnmarshalUnsafe(unixCmsg.Data[:tclass.SizeBytes()]) + controlMessages.IP.TClass = uint32(tclass) case linux.IPV6_RECVORIGDSTADDR: var addr linux.SockAddrInet6 - binary.Unmarshal(unixCmsg.Data[:addr.SizeBytes()], hostarch.ByteOrder, &addr) + addr.UnmarshalUnsafe(unixCmsg.Data[:addr.SizeBytes()]) controlMessages.IP.OriginalDstAddress = &addr case unix.IPV6_RECVERR: @@ -577,7 +580,9 @@ func parseUnixControlMessages(unixControlMessages []unix.SocketControlMessage) s switch unixCmsg.Header.Type { case linux.TCP_INQ: controlMessages.IP.HasInq = true - binary.Unmarshal(unixCmsg.Data[:linux.SizeOfControlMessageInq], hostarch.ByteOrder, &controlMessages.IP.Inq) + var inq primitive.Int32 + inq.UnmarshalUnsafe(unixCmsg.Data[:linux.SizeOfControlMessageInq]) + controlMessages.IP.Inq = int32(inq) } } } @@ -691,7 +696,7 @@ func (s *socketOpsCommon) State() uint32 { return 0 } - binary.Unmarshal(buf, hostarch.ByteOrder, &info) + info.UnmarshalUnsafe(buf[:info.SizeBytes()]) return uint32(info.State) } diff --git a/pkg/sentry/socket/hostinet/stack.go b/pkg/sentry/socket/hostinet/stack.go index 26e8ae17a..393a1ab3a 100644 --- a/pkg/sentry/socket/hostinet/stack.go +++ b/pkg/sentry/socket/hostinet/stack.go @@ -15,6 +15,7 @@ package hostinet import ( + "encoding/binary" "fmt" "io" "io/ioutil" @@ -26,10 +27,10 @@ import ( "syscall" "golang.org/x/sys/unix" - "gvisor.dev/gvisor/pkg/binary" + "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/context" - "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/log" + "gvisor.dev/gvisor/pkg/marshal/primitive" "gvisor.dev/gvisor/pkg/sentry/inet" "gvisor.dev/gvisor/pkg/syserr" "gvisor.dev/gvisor/pkg/syserror" @@ -147,8 +148,8 @@ func ExtractHostInterfaces(links []syscall.NetlinkMessage, addrs []syscall.Netli if len(link.Data) < unix.SizeofIfInfomsg { return fmt.Errorf("RTM_GETLINK returned RTM_NEWLINK message with invalid data length (%d bytes, expected at least %d bytes)", len(link.Data), unix.SizeofIfInfomsg) } - var ifinfo unix.IfInfomsg - binary.Unmarshal(link.Data[:unix.SizeofIfInfomsg], hostarch.ByteOrder, &ifinfo) + var ifinfo linux.InterfaceInfoMessage + ifinfo.UnmarshalUnsafe(link.Data[:ifinfo.SizeBytes()]) inetIF := inet.Interface{ DeviceType: ifinfo.Type, Flags: ifinfo.Flags, @@ -178,11 +179,11 @@ func ExtractHostInterfaces(links []syscall.NetlinkMessage, addrs []syscall.Netli if len(addr.Data) < unix.SizeofIfAddrmsg { return fmt.Errorf("RTM_GETADDR returned RTM_NEWADDR message with invalid data length (%d bytes, expected at least %d bytes)", len(addr.Data), unix.SizeofIfAddrmsg) } - var ifaddr unix.IfAddrmsg - binary.Unmarshal(addr.Data[:unix.SizeofIfAddrmsg], hostarch.ByteOrder, &ifaddr) + var ifaddr linux.InterfaceAddrMessage + ifaddr.UnmarshalUnsafe(addr.Data[:ifaddr.SizeBytes()]) inetAddr := inet.InterfaceAddr{ Family: ifaddr.Family, - PrefixLen: ifaddr.Prefixlen, + PrefixLen: ifaddr.PrefixLen, Flags: ifaddr.Flags, } attrs, err := syscall.ParseNetlinkRouteAttr(&addr) @@ -210,13 +211,13 @@ func ExtractHostRoutes(routeMsgs []syscall.NetlinkMessage) ([]inet.Route, error) continue } - var ifRoute unix.RtMsg - binary.Unmarshal(routeMsg.Data[:unix.SizeofRtMsg], hostarch.ByteOrder, &ifRoute) + var ifRoute linux.RouteMessage + ifRoute.UnmarshalUnsafe(routeMsg.Data[:ifRoute.SizeBytes()]) inetRoute := inet.Route{ Family: ifRoute.Family, - DstLen: ifRoute.Dst_len, - SrcLen: ifRoute.Src_len, - TOS: ifRoute.Tos, + DstLen: ifRoute.DstLen, + SrcLen: ifRoute.SrcLen, + TOS: ifRoute.TOS, Table: ifRoute.Table, Protocol: ifRoute.Protocol, Scope: ifRoute.Scope, @@ -245,7 +246,9 @@ func ExtractHostRoutes(routeMsgs []syscall.NetlinkMessage) ([]inet.Route, error) if len(attr.Value) != expected { return nil, fmt.Errorf("RTM_GETROUTE returned RTM_NEWROUTE message with invalid attribute data length (%d bytes, expected %d bytes)", len(attr.Value), expected) } - binary.Unmarshal(attr.Value, hostarch.ByteOrder, &inetRoute.OutputInterface) + var outputIF primitive.Int32 + outputIF.UnmarshalUnsafe(attr.Value) + inetRoute.OutputInterface = int32(outputIF) } } diff --git a/pkg/sentry/socket/netfilter/extensions.go b/pkg/sentry/socket/netfilter/extensions.go index 4bd305a44..6fc7781ad 100644 --- a/pkg/sentry/socket/netfilter/extensions.go +++ b/pkg/sentry/socket/netfilter/extensions.go @@ -18,8 +18,7 @@ import ( "fmt" "gvisor.dev/gvisor/pkg/abi/linux" - "gvisor.dev/gvisor/pkg/binary" - "gvisor.dev/gvisor/pkg/hostarch" + "gvisor.dev/gvisor/pkg/bits" "gvisor.dev/gvisor/pkg/syserr" "gvisor.dev/gvisor/pkg/tcpip" "gvisor.dev/gvisor/pkg/tcpip/stack" @@ -79,7 +78,7 @@ func marshalEntryMatch(name string, data []byte) []byte { nflog("marshaling matcher %q", name) // We have to pad this struct size to a multiple of 8 bytes. - size := binary.AlignUp(linux.SizeOfXTEntryMatch+len(data), 8) + size := bits.AlignUp(linux.SizeOfXTEntryMatch+len(data), 8) matcher := linux.KernelXTEntryMatch{ XTEntryMatch: linux.XTEntryMatch{ MatchSize: uint16(size), @@ -88,9 +87,11 @@ func marshalEntryMatch(name string, data []byte) []byte { } copy(matcher.Name[:], name) - buf := make([]byte, 0, size) - buf = binary.Marshal(buf, hostarch.ByteOrder, matcher) - return append(buf, make([]byte, size-len(buf))...) + buf := make([]byte, size) + entryLen := matcher.XTEntryMatch.SizeBytes() + matcher.XTEntryMatch.MarshalUnsafe(buf[:entryLen]) + copy(buf[entryLen:], matcher.Data) + return buf } func unmarshalMatcher(match linux.XTEntryMatch, filter stack.IPHeaderFilter, buf []byte) (stack.Matcher, error) { diff --git a/pkg/sentry/socket/netfilter/ipv4.go b/pkg/sentry/socket/netfilter/ipv4.go index 1fc4cb651..cb78ef60b 100644 --- a/pkg/sentry/socket/netfilter/ipv4.go +++ b/pkg/sentry/socket/netfilter/ipv4.go @@ -18,8 +18,6 @@ import ( "fmt" "gvisor.dev/gvisor/pkg/abi/linux" - "gvisor.dev/gvisor/pkg/binary" - "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/syserr" "gvisor.dev/gvisor/pkg/tcpip" "gvisor.dev/gvisor/pkg/tcpip/header" @@ -141,10 +139,9 @@ func modifyEntries4(stk *stack.Stack, optVal []byte, replace *linux.IPTReplace, return nil, syserr.ErrInvalidArgument } var entry linux.IPTEntry - buf := optVal[:linux.SizeOfIPTEntry] - binary.Unmarshal(buf, hostarch.ByteOrder, &entry) + entry.UnmarshalUnsafe(optVal[:entry.SizeBytes()]) initialOptValLen := len(optVal) - optVal = optVal[linux.SizeOfIPTEntry:] + optVal = optVal[entry.SizeBytes():] if entry.TargetOffset < linux.SizeOfIPTEntry { nflog("entry has too-small target offset %d", entry.TargetOffset) diff --git a/pkg/sentry/socket/netfilter/ipv6.go b/pkg/sentry/socket/netfilter/ipv6.go index 67a52b628..5cb7fe4aa 100644 --- a/pkg/sentry/socket/netfilter/ipv6.go +++ b/pkg/sentry/socket/netfilter/ipv6.go @@ -18,8 +18,6 @@ import ( "fmt" "gvisor.dev/gvisor/pkg/abi/linux" - "gvisor.dev/gvisor/pkg/binary" - "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/syserr" "gvisor.dev/gvisor/pkg/tcpip" "gvisor.dev/gvisor/pkg/tcpip/header" @@ -144,10 +142,9 @@ func modifyEntries6(stk *stack.Stack, optVal []byte, replace *linux.IPTReplace, return nil, syserr.ErrInvalidArgument } var entry linux.IP6TEntry - buf := optVal[:linux.SizeOfIP6TEntry] - binary.Unmarshal(buf, hostarch.ByteOrder, &entry) + entry.UnmarshalUnsafe(optVal[:entry.SizeBytes()]) initialOptValLen := len(optVal) - optVal = optVal[linux.SizeOfIP6TEntry:] + optVal = optVal[entry.SizeBytes():] if entry.TargetOffset < linux.SizeOfIP6TEntry { nflog("entry has too-small target offset %d", entry.TargetOffset) diff --git a/pkg/sentry/socket/netfilter/netfilter.go b/pkg/sentry/socket/netfilter/netfilter.go index c6fa3fd16..f42d73178 100644 --- a/pkg/sentry/socket/netfilter/netfilter.go +++ b/pkg/sentry/socket/netfilter/netfilter.go @@ -22,7 +22,6 @@ import ( "fmt" "gvisor.dev/gvisor/pkg/abi/linux" - "gvisor.dev/gvisor/pkg/binary" "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/log" "gvisor.dev/gvisor/pkg/sentry/kernel" @@ -121,7 +120,7 @@ func GetEntries4(t *kernel.Task, stack *stack.Stack, outPtr hostarch.Addr, outLe nflog("couldn't read entries: %v", err) return linux.KernelIPTGetEntries{}, syserr.ErrInvalidArgument } - if binary.Size(entries) > uintptr(outLen) { + if entries.SizeBytes() > outLen { nflog("insufficient GetEntries output size: %d", uintptr(outLen)) return linux.KernelIPTGetEntries{}, syserr.ErrInvalidArgument } @@ -146,7 +145,7 @@ func GetEntries6(t *kernel.Task, stack *stack.Stack, outPtr hostarch.Addr, outLe nflog("couldn't read entries: %v", err) return linux.KernelIP6TGetEntries{}, syserr.ErrInvalidArgument } - if binary.Size(entries) > uintptr(outLen) { + if entries.SizeBytes() > outLen { nflog("insufficient GetEntries output size: %d", uintptr(outLen)) return linux.KernelIP6TGetEntries{}, syserr.ErrInvalidArgument } @@ -179,7 +178,7 @@ func SetEntries(stk *stack.Stack, optVal []byte, ipv6 bool) *syserr.Error { var replace linux.IPTReplace replaceBuf := optVal[:linux.SizeOfIPTReplace] optVal = optVal[linux.SizeOfIPTReplace:] - binary.Unmarshal(replaceBuf, hostarch.ByteOrder, &replace) + replace.UnmarshalBytes(replaceBuf) // TODO(gvisor.dev/issue/170): Support other tables. var table stack.Table @@ -309,8 +308,8 @@ func parseMatchers(filter stack.IPHeaderFilter, optVal []byte) ([]stack.Matcher, return nil, fmt.Errorf("optVal has insufficient size for entry match: %d", len(optVal)) } var match linux.XTEntryMatch - buf := optVal[:linux.SizeOfXTEntryMatch] - binary.Unmarshal(buf, hostarch.ByteOrder, &match) + buf := optVal[:match.SizeBytes()] + match.UnmarshalUnsafe(buf) nflog("set entries: parsed entry match %q: %+v", match.Name.String(), match) // Check some invariants. diff --git a/pkg/sentry/socket/netfilter/netfilter_abi_autogen_unsafe.go b/pkg/sentry/socket/netfilter/netfilter_abi_autogen_unsafe.go new file mode 100644 index 000000000..ea4abc34f --- /dev/null +++ b/pkg/sentry/socket/netfilter/netfilter_abi_autogen_unsafe.go @@ -0,0 +1,156 @@ +// Automatically generated marshal implementation. See tools/go_marshal. + +// If there are issues with build tag aggregation, see +// tools/go_marshal/gomarshal/generator.go:writeHeader(). The build tags here +// come from the input set of files used to generate this file. This input set +// is filtered based on pre-defined file suffixes related to build tags, see +// tools/defs.bzl:calculate_sets(). + +package netfilter + +import ( + "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/gohacks" + "gvisor.dev/gvisor/pkg/hostarch" + "gvisor.dev/gvisor/pkg/marshal" + "io" + "reflect" + "runtime" + "unsafe" +) + +// Marshallable types used by this file. +var _ marshal.Marshallable = (*linux.NFNATRange)(nil) +var _ marshal.Marshallable = (*linux.XTEntryTarget)(nil) +var _ marshal.Marshallable = (*nfNATTarget)(nil) + +// SizeBytes implements marshal.Marshallable.SizeBytes. +func (n *nfNATTarget) SizeBytes() int { + return 0 + + (*linux.XTEntryTarget)(nil).SizeBytes() + + (*linux.NFNATRange)(nil).SizeBytes() +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (n *nfNATTarget) MarshalBytes(dst []byte) { + n.Target.MarshalBytes(dst[:n.Target.SizeBytes()]) + dst = dst[n.Target.SizeBytes():] + n.Range.MarshalBytes(dst[:n.Range.SizeBytes()]) + dst = dst[n.Range.SizeBytes():] +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (n *nfNATTarget) UnmarshalBytes(src []byte) { + n.Target.UnmarshalBytes(src[:n.Target.SizeBytes()]) + src = src[n.Target.SizeBytes():] + n.Range.UnmarshalBytes(src[:n.Range.SizeBytes()]) + src = src[n.Range.SizeBytes():] +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (n *nfNATTarget) Packed() bool { + return n.Range.Packed() && n.Target.Packed() +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (n *nfNATTarget) MarshalUnsafe(dst []byte) { + if n.Range.Packed() && n.Target.Packed() { + gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(n), uintptr(n.SizeBytes())) + } else { + // Type nfNATTarget doesn't have a packed layout in memory, fallback to MarshalBytes. + n.MarshalBytes(dst) + } +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (n *nfNATTarget) UnmarshalUnsafe(src []byte) { + if n.Range.Packed() && n.Target.Packed() { + gohacks.Memmove(unsafe.Pointer(n), unsafe.Pointer(&src[0]), uintptr(n.SizeBytes())) + } else { + // Type nfNATTarget doesn't have a packed layout in memory, fallback to UnmarshalBytes. + n.UnmarshalBytes(src) + } +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (n *nfNATTarget) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) { + if !n.Range.Packed() && n.Target.Packed() { + // Type nfNATTarget doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := cc.CopyScratchBuffer(n.SizeBytes()) // escapes: okay. + n.MarshalBytes(buf) // escapes: fallback. + return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(n))) + hdr.Len = n.SizeBytes() + hdr.Cap = n.SizeBytes() + + length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that n + // must live until the use above. + runtime.KeepAlive(n) // escapes: replaced by intrinsic. + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (n *nfNATTarget) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + return n.CopyOutN(cc, addr, n.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (n *nfNATTarget) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { + if !n.Range.Packed() && n.Target.Packed() { + // Type nfNATTarget doesn't have a packed layout in memory, fall back to UnmarshalBytes. + buf := cc.CopyScratchBuffer(n.SizeBytes()) // escapes: okay. + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Unmarshal unconditionally. If we had a short copy-in, this results in a + // partially unmarshalled struct. + n.UnmarshalBytes(buf) // escapes: fallback. + return length, err + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(n))) + hdr.Len = n.SizeBytes() + hdr.Cap = n.SizeBytes() + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that n + // must live until the use above. + runtime.KeepAlive(n) // escapes: replaced by intrinsic. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (n *nfNATTarget) WriteTo(writer io.Writer) (int64, error) { + if !n.Range.Packed() && n.Target.Packed() { + // Type nfNATTarget doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := make([]byte, n.SizeBytes()) + n.MarshalBytes(buf) + length, err := writer.Write(buf) + return int64(length), err + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(n))) + hdr.Len = n.SizeBytes() + hdr.Cap = n.SizeBytes() + + length, err := writer.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that n + // must live until the use above. + runtime.KeepAlive(n) // escapes: replaced by intrinsic. + return int64(length), err +} + diff --git a/pkg/sentry/socket/netfilter/owner_matcher.go b/pkg/sentry/socket/netfilter/owner_matcher.go index b2cc6be20..60845cab3 100644 --- a/pkg/sentry/socket/netfilter/owner_matcher.go +++ b/pkg/sentry/socket/netfilter/owner_matcher.go @@ -18,8 +18,7 @@ import ( "fmt" "gvisor.dev/gvisor/pkg/abi/linux" - "gvisor.dev/gvisor/pkg/binary" - "gvisor.dev/gvisor/pkg/hostarch" + "gvisor.dev/gvisor/pkg/marshal" "gvisor.dev/gvisor/pkg/tcpip/stack" ) @@ -59,8 +58,8 @@ func (ownerMarshaler) marshal(mr matcher) []byte { } } - buf := make([]byte, 0, linux.SizeOfIPTOwnerInfo) - return marshalEntryMatch(matcherNameOwner, binary.Marshal(buf, hostarch.ByteOrder, iptOwnerInfo)) + buf := marshal.Marshal(&iptOwnerInfo) + return marshalEntryMatch(matcherNameOwner, buf) } // unmarshal implements matchMaker.unmarshal. @@ -72,7 +71,7 @@ func (ownerMarshaler) unmarshal(buf []byte, filter stack.IPHeaderFilter) (stack. // For alignment reasons, the match's total size may // exceed what's strictly necessary to hold matchData. var matchData linux.IPTOwnerInfo - binary.Unmarshal(buf[:linux.SizeOfIPTOwnerInfo], hostarch.ByteOrder, &matchData) + matchData.UnmarshalUnsafe(buf[:linux.SizeOfIPTOwnerInfo]) nflog("parseMatchers: parsed IPTOwnerInfo: %+v", matchData) var owner OwnerMatcher diff --git a/pkg/sentry/socket/netfilter/targets.go b/pkg/sentry/socket/netfilter/targets.go index 4ae1592b2..e94aceb92 100644 --- a/pkg/sentry/socket/netfilter/targets.go +++ b/pkg/sentry/socket/netfilter/targets.go @@ -15,11 +15,12 @@ package netfilter import ( + "encoding/binary" "fmt" "gvisor.dev/gvisor/pkg/abi/linux" - "gvisor.dev/gvisor/pkg/binary" "gvisor.dev/gvisor/pkg/hostarch" + "gvisor.dev/gvisor/pkg/marshal" "gvisor.dev/gvisor/pkg/syserr" "gvisor.dev/gvisor/pkg/tcpip" "gvisor.dev/gvisor/pkg/tcpip/header" @@ -189,8 +190,7 @@ func (*standardTargetMaker) marshal(target target) []byte { Verdict: verdict, } - ret := make([]byte, 0, linux.SizeOfXTStandardTarget) - return binary.Marshal(ret, hostarch.ByteOrder, xt) + return marshal.Marshal(&xt) } func (*standardTargetMaker) unmarshal(buf []byte, filter stack.IPHeaderFilter) (target, *syserr.Error) { @@ -199,8 +199,7 @@ func (*standardTargetMaker) unmarshal(buf []byte, filter stack.IPHeaderFilter) ( return nil, syserr.ErrInvalidArgument } var standardTarget linux.XTStandardTarget - buf = buf[:linux.SizeOfXTStandardTarget] - binary.Unmarshal(buf, hostarch.ByteOrder, &standardTarget) + standardTarget.UnmarshalUnsafe(buf[:standardTarget.SizeBytes()]) if standardTarget.Verdict < 0 { // A Verdict < 0 indicates a non-jump verdict. @@ -245,8 +244,7 @@ func (*errorTargetMaker) marshal(target target) []byte { copy(xt.Name[:], errorName) copy(xt.Target.Name[:], ErrorTargetName) - ret := make([]byte, 0, linux.SizeOfXTErrorTarget) - return binary.Marshal(ret, hostarch.ByteOrder, xt) + return marshal.Marshal(&xt) } func (*errorTargetMaker) unmarshal(buf []byte, filter stack.IPHeaderFilter) (target, *syserr.Error) { @@ -256,7 +254,7 @@ func (*errorTargetMaker) unmarshal(buf []byte, filter stack.IPHeaderFilter) (tar } var errTgt linux.XTErrorTarget buf = buf[:linux.SizeOfXTErrorTarget] - binary.Unmarshal(buf, hostarch.ByteOrder, &errTgt) + errTgt.UnmarshalUnsafe(buf) // Error targets are used in 2 cases: // * An actual error case. These rules have an error named @@ -299,12 +297,11 @@ func (*redirectTargetMaker) marshal(target target) []byte { } copy(xt.Target.Name[:], RedirectTargetName) - ret := make([]byte, 0, linux.SizeOfXTRedirectTarget) xt.NfRange.RangeSize = 1 xt.NfRange.RangeIPV4.Flags |= linux.NF_NAT_RANGE_PROTO_SPECIFIED xt.NfRange.RangeIPV4.MinPort = htons(rt.Port) xt.NfRange.RangeIPV4.MaxPort = xt.NfRange.RangeIPV4.MinPort - return binary.Marshal(ret, hostarch.ByteOrder, xt) + return marshal.Marshal(&xt) } func (*redirectTargetMaker) unmarshal(buf []byte, filter stack.IPHeaderFilter) (target, *syserr.Error) { @@ -320,7 +317,7 @@ func (*redirectTargetMaker) unmarshal(buf []byte, filter stack.IPHeaderFilter) ( var rt linux.XTRedirectTarget buf = buf[:linux.SizeOfXTRedirectTarget] - binary.Unmarshal(buf, hostarch.ByteOrder, &rt) + rt.UnmarshalUnsafe(buf) // Copy linux.XTRedirectTarget to stack.RedirectTarget. target := redirectTarget{RedirectTarget: stack.RedirectTarget{ @@ -359,6 +356,7 @@ func (*redirectTargetMaker) unmarshal(buf []byte, filter stack.IPHeaderFilter) ( return &target, nil } +// +marshal type nfNATTarget struct { Target linux.XTEntryTarget Range linux.NFNATRange @@ -394,8 +392,7 @@ func (*nfNATTargetMaker) marshal(target target) []byte { nt.Range.MinProto = htons(rt.Port) nt.Range.MaxProto = nt.Range.MinProto - ret := make([]byte, 0, nfNATMarshalledSize) - return binary.Marshal(ret, hostarch.ByteOrder, nt) + return marshal.Marshal(&nt) } func (*nfNATTargetMaker) unmarshal(buf []byte, filter stack.IPHeaderFilter) (target, *syserr.Error) { @@ -411,7 +408,7 @@ func (*nfNATTargetMaker) unmarshal(buf []byte, filter stack.IPHeaderFilter) (tar var natRange linux.NFNATRange buf = buf[linux.SizeOfXTEntryTarget:nfNATMarshalledSize] - binary.Unmarshal(buf, hostarch.ByteOrder, &natRange) + natRange.UnmarshalUnsafe(buf) // We don't support port or address ranges. if natRange.MinAddr != natRange.MaxAddr { @@ -468,8 +465,7 @@ func (*snatTargetMakerV4) marshal(target target) []byte { xt.NfRange.RangeIPV4.MaxPort = xt.NfRange.RangeIPV4.MinPort copy(xt.NfRange.RangeIPV4.MinIP[:], st.Addr) copy(xt.NfRange.RangeIPV4.MaxIP[:], st.Addr) - ret := make([]byte, 0, linux.SizeOfXTSNATTarget) - return binary.Marshal(ret, hostarch.ByteOrder, xt) + return marshal.Marshal(&xt) } func (*snatTargetMakerV4) unmarshal(buf []byte, filter stack.IPHeaderFilter) (target, *syserr.Error) { @@ -485,7 +481,7 @@ func (*snatTargetMakerV4) unmarshal(buf []byte, filter stack.IPHeaderFilter) (ta var st linux.XTSNATTarget buf = buf[:linux.SizeOfXTSNATTarget] - binary.Unmarshal(buf, hostarch.ByteOrder, &st) + st.UnmarshalUnsafe(buf) // Copy linux.XTSNATTarget to stack.SNATTarget. target := snatTarget{SNATTarget: stack.SNATTarget{ @@ -550,8 +546,7 @@ func (*snatTargetMakerV6) marshal(target target) []byte { nt.Range.MinProto = htons(st.Port) nt.Range.MaxProto = nt.Range.MinProto - ret := make([]byte, 0, nfNATMarshalledSize) - return binary.Marshal(ret, hostarch.ByteOrder, nt) + return marshal.Marshal(&nt) } func (*snatTargetMakerV6) unmarshal(buf []byte, filter stack.IPHeaderFilter) (target, *syserr.Error) { @@ -567,7 +562,7 @@ func (*snatTargetMakerV6) unmarshal(buf []byte, filter stack.IPHeaderFilter) (ta var natRange linux.NFNATRange buf = buf[linux.SizeOfXTEntryTarget:nfNATMarshalledSize] - binary.Unmarshal(buf, hostarch.ByteOrder, &natRange) + natRange.UnmarshalUnsafe(buf) // TODO(gvisor.dev/issue/5689): Support port or address ranges. if natRange.MinAddr != natRange.MaxAddr { @@ -631,8 +626,7 @@ func parseTarget(filter stack.IPHeaderFilter, optVal []byte, ipv6 bool) (stack.T return nil, syserr.ErrInvalidArgument } var target linux.XTEntryTarget - buf := optVal[:linux.SizeOfXTEntryTarget] - binary.Unmarshal(buf, hostarch.ByteOrder, &target) + target.UnmarshalUnsafe(optVal[:target.SizeBytes()]) return unmarshalTarget(target, filter, optVal) } diff --git a/pkg/sentry/socket/netfilter/tcp_matcher.go b/pkg/sentry/socket/netfilter/tcp_matcher.go index 69557f515..95bb9826e 100644 --- a/pkg/sentry/socket/netfilter/tcp_matcher.go +++ b/pkg/sentry/socket/netfilter/tcp_matcher.go @@ -18,8 +18,7 @@ import ( "fmt" "gvisor.dev/gvisor/pkg/abi/linux" - "gvisor.dev/gvisor/pkg/binary" - "gvisor.dev/gvisor/pkg/hostarch" + "gvisor.dev/gvisor/pkg/marshal" "gvisor.dev/gvisor/pkg/tcpip/header" "gvisor.dev/gvisor/pkg/tcpip/stack" ) @@ -47,8 +46,7 @@ func (tcpMarshaler) marshal(mr matcher) []byte { DestinationPortStart: matcher.destinationPortStart, DestinationPortEnd: matcher.destinationPortEnd, } - buf := make([]byte, 0, linux.SizeOfXTTCP) - return marshalEntryMatch(matcherNameTCP, binary.Marshal(buf, hostarch.ByteOrder, xttcp)) + return marshalEntryMatch(matcherNameTCP, marshal.Marshal(&xttcp)) } // unmarshal implements matchMaker.unmarshal. @@ -60,7 +58,7 @@ func (tcpMarshaler) unmarshal(buf []byte, filter stack.IPHeaderFilter) (stack.Ma // For alignment reasons, the match's total size may // exceed what's strictly necessary to hold matchData. var matchData linux.XTTCP - binary.Unmarshal(buf[:linux.SizeOfXTTCP], hostarch.ByteOrder, &matchData) + matchData.UnmarshalUnsafe(buf[:matchData.SizeBytes()]) nflog("parseMatchers: parsed XTTCP: %+v", matchData) if matchData.Option != 0 || diff --git a/pkg/sentry/socket/netfilter/udp_matcher.go b/pkg/sentry/socket/netfilter/udp_matcher.go index 6a60e6bd6..fb8be27e6 100644 --- a/pkg/sentry/socket/netfilter/udp_matcher.go +++ b/pkg/sentry/socket/netfilter/udp_matcher.go @@ -18,8 +18,7 @@ import ( "fmt" "gvisor.dev/gvisor/pkg/abi/linux" - "gvisor.dev/gvisor/pkg/binary" - "gvisor.dev/gvisor/pkg/hostarch" + "gvisor.dev/gvisor/pkg/marshal" "gvisor.dev/gvisor/pkg/tcpip/header" "gvisor.dev/gvisor/pkg/tcpip/stack" ) @@ -47,8 +46,7 @@ func (udpMarshaler) marshal(mr matcher) []byte { DestinationPortStart: matcher.destinationPortStart, DestinationPortEnd: matcher.destinationPortEnd, } - buf := make([]byte, 0, linux.SizeOfXTUDP) - return marshalEntryMatch(matcherNameUDP, binary.Marshal(buf, hostarch.ByteOrder, xtudp)) + return marshalEntryMatch(matcherNameUDP, marshal.Marshal(&xtudp)) } // unmarshal implements matchMaker.unmarshal. @@ -60,7 +58,7 @@ func (udpMarshaler) unmarshal(buf []byte, filter stack.IPHeaderFilter) (stack.Ma // For alignment reasons, the match's total size may exceed what's // strictly necessary to hold matchData. var matchData linux.XTUDP - binary.Unmarshal(buf[:linux.SizeOfXTUDP], hostarch.ByteOrder, &matchData) + matchData.UnmarshalUnsafe(buf[:matchData.SizeBytes()]) nflog("parseMatchers: parsed XTUDP: %+v", matchData) if matchData.InverseFlags != 0 { diff --git a/pkg/sentry/socket/netlink/message.go b/pkg/sentry/socket/netlink/message.go index ab0e68af7..80385bfdc 100644 --- a/pkg/sentry/socket/netlink/message.go +++ b/pkg/sentry/socket/netlink/message.go @@ -19,15 +19,17 @@ import ( "math" "gvisor.dev/gvisor/pkg/abi/linux" - "gvisor.dev/gvisor/pkg/binary" + "gvisor.dev/gvisor/pkg/bits" "gvisor.dev/gvisor/pkg/hostarch" + "gvisor.dev/gvisor/pkg/marshal" + "gvisor.dev/gvisor/pkg/marshal/primitive" ) // alignPad returns the length of padding required for alignment. // // Preconditions: align is a power of two. func alignPad(length int, align uint) int { - return binary.AlignUp(length, align) - length + return bits.AlignUp(length, align) - length } // Message contains a complete serialized netlink message. @@ -42,7 +44,7 @@ type Message struct { func NewMessage(hdr linux.NetlinkMessageHeader) *Message { return &Message{ hdr: hdr, - buf: binary.Marshal(nil, hostarch.ByteOrder, hdr), + buf: marshal.Marshal(&hdr), } } @@ -58,7 +60,7 @@ func ParseMessage(buf []byte) (msg *Message, rest []byte, ok bool) { return } var hdr linux.NetlinkMessageHeader - binary.Unmarshal(hdrBytes, hostarch.ByteOrder, &hdr) + hdr.UnmarshalUnsafe(hdrBytes) // Msg portion. totalMsgLen := int(hdr.Length) @@ -92,7 +94,7 @@ func (m *Message) Header() linux.NetlinkMessageHeader { // GetData unmarshals the payload message header from this netlink message, and // returns the attributes portion. -func (m *Message) GetData(msg interface{}) (AttrsView, bool) { +func (m *Message) GetData(msg marshal.Marshallable) (AttrsView, bool) { b := BytesView(m.buf) _, ok := b.Extract(linux.NetlinkMessageHeaderSize) @@ -100,12 +102,12 @@ func (m *Message) GetData(msg interface{}) (AttrsView, bool) { return nil, false } - size := int(binary.Size(msg)) + size := msg.SizeBytes() msgBytes, ok := b.Extract(size) if !ok { return nil, false } - binary.Unmarshal(msgBytes, hostarch.ByteOrder, msg) + msg.UnmarshalUnsafe(msgBytes) numPad := alignPad(linux.NetlinkMessageHeaderSize+size, linux.NLMSG_ALIGNTO) // Linux permits the last message not being aligned, just consume all of it. @@ -131,7 +133,7 @@ func (m *Message) Finalize() []byte { // Align the message. Note that the message length in the header (set // above) is the useful length of the message, not the total aligned // length. See net/netlink/af_netlink.c:__nlmsg_put. - aligned := binary.AlignUp(len(m.buf), linux.NLMSG_ALIGNTO) + aligned := bits.AlignUp(len(m.buf), linux.NLMSG_ALIGNTO) m.putZeros(aligned - len(m.buf)) return m.buf } @@ -145,45 +147,45 @@ func (m *Message) putZeros(n int) { } // Put serializes v into the message. -func (m *Message) Put(v interface{}) { - m.buf = binary.Marshal(m.buf, hostarch.ByteOrder, v) +func (m *Message) Put(v marshal.Marshallable) { + m.buf = append(m.buf, marshal.Marshal(v)...) } // PutAttr adds v to the message as a netlink attribute. // // Preconditions: The serialized attribute (linux.NetlinkAttrHeaderSize + -// binary.Size(v) fits in math.MaxUint16 bytes. -func (m *Message) PutAttr(atype uint16, v interface{}) { - l := linux.NetlinkAttrHeaderSize + int(binary.Size(v)) +// v.SizeBytes()) fits in math.MaxUint16 bytes. +func (m *Message) PutAttr(atype uint16, v marshal.Marshallable) { + l := linux.NetlinkAttrHeaderSize + v.SizeBytes() if l > math.MaxUint16 { panic(fmt.Sprintf("attribute too large: %d", l)) } - m.Put(linux.NetlinkAttrHeader{ + m.Put(&linux.NetlinkAttrHeader{ Type: atype, Length: uint16(l), }) m.Put(v) // Align the attribute. - aligned := binary.AlignUp(l, linux.NLA_ALIGNTO) + aligned := bits.AlignUp(l, linux.NLA_ALIGNTO) m.putZeros(aligned - l) } // PutAttrString adds s to the message as a netlink attribute. func (m *Message) PutAttrString(atype uint16, s string) { l := linux.NetlinkAttrHeaderSize + len(s) + 1 - m.Put(linux.NetlinkAttrHeader{ + m.Put(&linux.NetlinkAttrHeader{ Type: atype, Length: uint16(l), }) // String + NUL-termination. - m.Put([]byte(s)) + m.Put(primitive.AsByteSlice([]byte(s))) m.putZeros(1) // Align the attribute. - aligned := binary.AlignUp(l, linux.NLA_ALIGNTO) + aligned := bits.AlignUp(l, linux.NLA_ALIGNTO) m.putZeros(aligned - l) } @@ -251,7 +253,7 @@ func (v AttrsView) ParseFirst() (hdr linux.NetlinkAttrHeader, value []byte, rest if !ok { return } - binary.Unmarshal(hdrBytes, hostarch.ByteOrder, &hdr) + hdr.UnmarshalUnsafe(hdrBytes) value, ok = b.Extract(int(hdr.Length) - linux.NetlinkAttrHeaderSize) if !ok { diff --git a/pkg/sentry/socket/netlink/route/protocol.go b/pkg/sentry/socket/netlink/route/protocol.go index 5a2255db3..86f6419dc 100644 --- a/pkg/sentry/socket/netlink/route/protocol.go +++ b/pkg/sentry/socket/netlink/route/protocol.go @@ -21,6 +21,7 @@ import ( "golang.org/x/sys/unix" "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/marshal/primitive" "gvisor.dev/gvisor/pkg/sentry/inet" "gvisor.dev/gvisor/pkg/sentry/kernel" "gvisor.dev/gvisor/pkg/sentry/kernel/auth" @@ -167,7 +168,7 @@ func addNewLinkMessage(ms *netlink.MessageSet, idx int32, i inet.Interface) { Type: linux.RTM_NEWLINK, }) - m.Put(linux.InterfaceInfoMessage{ + m.Put(&linux.InterfaceInfoMessage{ Family: linux.AF_UNSPEC, Type: i.DeviceType, Index: idx, @@ -175,7 +176,7 @@ func addNewLinkMessage(ms *netlink.MessageSet, idx int32, i inet.Interface) { }) m.PutAttrString(linux.IFLA_IFNAME, i.Name) - m.PutAttr(linux.IFLA_MTU, i.MTU) + m.PutAttr(linux.IFLA_MTU, primitive.AllocateUint32(i.MTU)) mac := make([]byte, 6) brd := mac @@ -183,8 +184,8 @@ func addNewLinkMessage(ms *netlink.MessageSet, idx int32, i inet.Interface) { mac = i.Addr brd = bytes.Repeat([]byte{0xff}, len(i.Addr)) } - m.PutAttr(linux.IFLA_ADDRESS, mac) - m.PutAttr(linux.IFLA_BROADCAST, brd) + m.PutAttr(linux.IFLA_ADDRESS, primitive.AsByteSlice(mac)) + m.PutAttr(linux.IFLA_BROADCAST, primitive.AsByteSlice(brd)) // TODO(gvisor.dev/issue/578): There are many more attributes. } @@ -216,14 +217,15 @@ func (p *Protocol) dumpAddrs(ctx context.Context, msg *netlink.Message, ms *netl Type: linux.RTM_NEWADDR, }) - m.Put(linux.InterfaceAddrMessage{ + m.Put(&linux.InterfaceAddrMessage{ Family: a.Family, PrefixLen: a.PrefixLen, Index: uint32(id), }) - m.PutAttr(linux.IFA_LOCAL, []byte(a.Addr)) - m.PutAttr(linux.IFA_ADDRESS, []byte(a.Addr)) + addr := primitive.ByteSlice([]byte(a.Addr)) + m.PutAttr(linux.IFA_LOCAL, &addr) + m.PutAttr(linux.IFA_ADDRESS, &addr) // TODO(gvisor.dev/issue/578): There are many more attributes. } @@ -366,7 +368,7 @@ func (p *Protocol) dumpRoutes(ctx context.Context, msg *netlink.Message, ms *net Type: linux.RTM_NEWROUTE, }) - m.Put(linux.RouteMessage{ + m.Put(&linux.RouteMessage{ Family: rt.Family, DstLen: rt.DstLen, SrcLen: rt.SrcLen, @@ -382,18 +384,18 @@ func (p *Protocol) dumpRoutes(ctx context.Context, msg *netlink.Message, ms *net Flags: rt.Flags, }) - m.PutAttr(254, []byte{123}) + m.PutAttr(254, primitive.AsByteSlice([]byte{123})) if rt.DstLen > 0 { - m.PutAttr(linux.RTA_DST, rt.DstAddr) + m.PutAttr(linux.RTA_DST, primitive.AsByteSlice(rt.DstAddr)) } if rt.SrcLen > 0 { - m.PutAttr(linux.RTA_SRC, rt.SrcAddr) + m.PutAttr(linux.RTA_SRC, primitive.AsByteSlice(rt.SrcAddr)) } if rt.OutputInterface != 0 { - m.PutAttr(linux.RTA_OIF, rt.OutputInterface) + m.PutAttr(linux.RTA_OIF, primitive.AllocateInt32(rt.OutputInterface)) } if len(rt.GatewayAddr) > 0 { - m.PutAttr(linux.RTA_GATEWAY, rt.GatewayAddr) + m.PutAttr(linux.RTA_GATEWAY, primitive.AsByteSlice(rt.GatewayAddr)) } // TODO(gvisor.dev/issue/578): There are many more attributes. @@ -503,7 +505,7 @@ func (p *Protocol) ProcessMessage(ctx context.Context, msg *netlink.Message, ms hdr := msg.Header() // All messages start with a 1 byte protocol family. - var family uint8 + var family primitive.Uint8 if _, ok := msg.GetData(&family); !ok { // Linux ignores messages missing the protocol family. See // net/core/rtnetlink.c:rtnetlink_rcv_msg. diff --git a/pkg/sentry/socket/netlink/socket.go b/pkg/sentry/socket/netlink/socket.go index 30c297149..d75a2879f 100644 --- a/pkg/sentry/socket/netlink/socket.go +++ b/pkg/sentry/socket/netlink/socket.go @@ -20,7 +20,6 @@ import ( "math" "gvisor.dev/gvisor/pkg/abi/linux" - "gvisor.dev/gvisor/pkg/binary" "gvisor.dev/gvisor/pkg/context" "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/marshal" @@ -223,7 +222,7 @@ func ExtractSockAddr(b []byte) (*linux.SockAddrNetlink, *syserr.Error) { } var sa linux.SockAddrNetlink - binary.Unmarshal(b[:linux.SockAddrNetlinkSize], hostarch.ByteOrder, &sa) + sa.UnmarshalUnsafe(b[:sa.SizeBytes()]) if sa.Family != linux.AF_NETLINK { return nil, syserr.ErrInvalidArgument @@ -338,16 +337,14 @@ func (s *socketOpsCommon) GetSockOpt(t *kernel.Task, level int, name int, outPtr } s.mu.Lock() defer s.mu.Unlock() - sendBufferSizeP := primitive.Int32(s.sendBufferSize) - return &sendBufferSizeP, nil + return primitive.AllocateInt32(int32(s.sendBufferSize)), nil case linux.SO_RCVBUF: if outLen < sizeOfInt32 { return nil, syserr.ErrInvalidArgument } // We don't have limit on receiving size. - recvBufferSizeP := primitive.Int32(math.MaxInt32) - return &recvBufferSizeP, nil + return primitive.AllocateInt32(math.MaxInt32), nil case linux.SO_PASSCRED: if outLen < sizeOfInt32 { @@ -484,7 +481,7 @@ func (s *socketOpsCommon) GetSockName(t *kernel.Task) (linux.SockAddr, uint32, * Family: linux.AF_NETLINK, PortID: uint32(s.portID), } - return sa, uint32(binary.Size(sa)), nil + return sa, uint32(sa.SizeBytes()), nil } // GetPeerName implements socket.Socket.GetPeerName. @@ -495,7 +492,7 @@ func (s *socketOpsCommon) GetPeerName(t *kernel.Task) (linux.SockAddr, uint32, * // must be the kernel. PortID: 0, } - return sa, uint32(binary.Size(sa)), nil + return sa, uint32(sa.SizeBytes()), nil } // RecvMsg implements socket.Socket.RecvMsg. @@ -504,7 +501,7 @@ func (s *socketOpsCommon) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags Family: linux.AF_NETLINK, PortID: 0, } - fromLen := uint32(binary.Size(from)) + fromLen := uint32(from.SizeBytes()) trunc := flags&linux.MSG_TRUNC != 0 @@ -640,7 +637,7 @@ func (s *socketOpsCommon) sendResponse(ctx context.Context, ms *MessageSet) *sys }) // Add the dump_done_errno payload. - m.Put(int64(0)) + m.Put(primitive.AllocateInt64(0)) _, notify, err := s.connection.Send(ctx, [][]byte{m.Finalize()}, cms, tcpip.FullAddress{}) if err != nil && err != syserr.ErrWouldBlock { @@ -658,7 +655,7 @@ func dumpErrorMesage(hdr linux.NetlinkMessageHeader, ms *MessageSet, err *syserr m := ms.AddMessage(linux.NetlinkMessageHeader{ Type: linux.NLMSG_ERROR, }) - m.Put(linux.NetlinkErrorMessage{ + m.Put(&linux.NetlinkErrorMessage{ Error: int32(-err.ToLinux().Number()), Header: hdr, }) @@ -668,7 +665,7 @@ func dumpAckMesage(hdr linux.NetlinkMessageHeader, ms *MessageSet) { m := ms.AddMessage(linux.NetlinkMessageHeader{ Type: linux.NLMSG_ERROR, }) - m.Put(linux.NetlinkErrorMessage{ + m.Put(&linux.NetlinkErrorMessage{ Error: 0, Header: hdr, }) diff --git a/pkg/sentry/socket/netstack/netstack.go b/pkg/sentry/socket/netstack/netstack.go index 312f5f85a..264f8d926 100644 --- a/pkg/sentry/socket/netstack/netstack.go +++ b/pkg/sentry/socket/netstack/netstack.go @@ -26,6 +26,7 @@ package netstack import ( "bytes" + "encoding/binary" "fmt" "io" "io/ioutil" @@ -35,7 +36,6 @@ import ( "golang.org/x/sys/unix" "gvisor.dev/gvisor/pkg/abi/linux" - "gvisor.dev/gvisor/pkg/binary" "gvisor.dev/gvisor/pkg/context" "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/log" @@ -375,9 +375,9 @@ func New(t *kernel.Task, family int, skType linux.SockType, protocol int, queue }), nil } -var sockAddrInetSize = int(binary.Size(linux.SockAddrInet{})) -var sockAddrInet6Size = int(binary.Size(linux.SockAddrInet6{})) -var sockAddrLinkSize = int(binary.Size(linux.SockAddrLink{})) +var sockAddrInetSize = (*linux.SockAddrInet)(nil).SizeBytes() +var sockAddrInet6Size = (*linux.SockAddrInet6)(nil).SizeBytes() +var sockAddrLinkSize = (*linux.SockAddrLink)(nil).SizeBytes() // bytesToIPAddress converts an IPv4 or IPv6 address from the user to the // netstack representation taking any addresses into account. @@ -613,7 +613,7 @@ func (s *socketOpsCommon) Bind(t *kernel.Task, sockaddr []byte) *syserr.Error { if len(sockaddr) < sockAddrLinkSize { return syserr.ErrInvalidArgument } - binary.Unmarshal(sockaddr[:sockAddrLinkSize], hostarch.ByteOrder, &a) + a.UnmarshalBytes(sockaddr[:sockAddrLinkSize]) if a.Protocol != uint16(s.protocol) { return syserr.ErrInvalidArgument @@ -1312,7 +1312,7 @@ func getSockOptIPv6(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name return &v, nil case linux.IP6T_ORIGINAL_DST: - if outLen < int(binary.Size(linux.SockAddrInet6{})) { + if outLen < sockAddrInet6Size { return nil, syserr.ErrInvalidArgument } @@ -1509,7 +1509,7 @@ func getSockOptIP(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name in return &v, nil case linux.SO_ORIGINAL_DST: - if outLen < int(binary.Size(linux.SockAddrInet{})) { + if outLen < sockAddrInetSize { return nil, syserr.ErrInvalidArgument } @@ -1742,7 +1742,7 @@ func setSockOptSocket(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, nam } var v linux.Timeval - binary.Unmarshal(optVal[:linux.SizeOfTimeval], hostarch.ByteOrder, &v) + v.UnmarshalBytes(optVal[:linux.SizeOfTimeval]) if v.Usec < 0 || v.Usec >= int64(time.Second/time.Microsecond) { return syserr.ErrDomain } @@ -1755,7 +1755,7 @@ func setSockOptSocket(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, nam } var v linux.Timeval - binary.Unmarshal(optVal[:linux.SizeOfTimeval], hostarch.ByteOrder, &v) + v.UnmarshalBytes(optVal[:linux.SizeOfTimeval]) if v.Usec < 0 || v.Usec >= int64(time.Second/time.Microsecond) { return syserr.ErrDomain } @@ -1791,7 +1791,11 @@ func setSockOptSocket(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, nam } var v linux.Linger - binary.Unmarshal(optVal[:linux.SizeOfLinger], hostarch.ByteOrder, &v) + v.UnmarshalBytes(optVal[:linux.SizeOfLinger]) + + if v != (linux.Linger{}) { + socket.SetSockOptEmitUnimplementedEvent(t, name) + } ep.SocketOptions().SetLinger(tcpip.LingerOption{ Enabled: v.OnOff != 0, @@ -2090,9 +2094,9 @@ func setSockOptIPv6(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name } var ( - inetMulticastRequestSize = int(binary.Size(linux.InetMulticastRequest{})) - inetMulticastRequestWithNICSize = int(binary.Size(linux.InetMulticastRequestWithNIC{})) - inet6MulticastRequestSize = int(binary.Size(linux.Inet6MulticastRequest{})) + inetMulticastRequestSize = (*linux.InetMulticastRequest)(nil).SizeBytes() + inetMulticastRequestWithNICSize = (*linux.InetMulticastRequestWithNIC)(nil).SizeBytes() + inet6MulticastRequestSize = (*linux.Inet6MulticastRequest)(nil).SizeBytes() ) // copyInMulticastRequest copies in a variable-size multicast request. The @@ -2117,12 +2121,12 @@ func copyInMulticastRequest(optVal []byte, allowAddr bool) (linux.InetMulticastR if len(optVal) >= inetMulticastRequestWithNICSize { var req linux.InetMulticastRequestWithNIC - binary.Unmarshal(optVal[:inetMulticastRequestWithNICSize], hostarch.ByteOrder, &req) + req.UnmarshalUnsafe(optVal[:inetMulticastRequestWithNICSize]) return req, nil } var req linux.InetMulticastRequestWithNIC - binary.Unmarshal(optVal[:inetMulticastRequestSize], hostarch.ByteOrder, &req.InetMulticastRequest) + req.InetMulticastRequest.UnmarshalUnsafe(optVal[:inetMulticastRequestSize]) return req, nil } @@ -2132,7 +2136,7 @@ func copyInMulticastV6Request(optVal []byte) (linux.Inet6MulticastRequest, *syse } var req linux.Inet6MulticastRequest - binary.Unmarshal(optVal[:inet6MulticastRequestSize], hostarch.ByteOrder, &req) + req.UnmarshalUnsafe(optVal[:inet6MulticastRequestSize]) return req, nil } @@ -3101,8 +3105,8 @@ func interfaceIoctl(ctx context.Context, io usermem.IO, arg int, ifr *linux.IFRe continue } // Populate ifr.ifr_netmask (type sockaddr). - hostarch.ByteOrder.PutUint16(ifr.Data[0:2], uint16(linux.AF_INET)) - hostarch.ByteOrder.PutUint16(ifr.Data[2:4], 0) + hostarch.ByteOrder.PutUint16(ifr.Data[0:], uint16(linux.AF_INET)) + hostarch.ByteOrder.PutUint16(ifr.Data[2:], 0) var mask uint32 = 0xffffffff << (32 - addr.PrefixLen) // Netmask is expected to be returned as a big endian // value. diff --git a/pkg/sentry/socket/socket.go b/pkg/sentry/socket/socket.go index 4c3d48096..9e56487a6 100644 --- a/pkg/sentry/socket/socket.go +++ b/pkg/sentry/socket/socket.go @@ -24,7 +24,6 @@ import ( "golang.org/x/sys/unix" "gvisor.dev/gvisor/pkg/abi/linux" - "gvisor.dev/gvisor/pkg/binary" "gvisor.dev/gvisor/pkg/context" "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/marshal" @@ -572,19 +571,19 @@ func UnmarshalSockAddr(family int, data []byte) linux.SockAddr { switch family { case unix.AF_INET: var addr linux.SockAddrInet - binary.Unmarshal(data[:unix.SizeofSockaddrInet4], hostarch.ByteOrder, &addr) + addr.UnmarshalUnsafe(data[:addr.SizeBytes()]) return &addr case unix.AF_INET6: var addr linux.SockAddrInet6 - binary.Unmarshal(data[:unix.SizeofSockaddrInet6], hostarch.ByteOrder, &addr) + addr.UnmarshalUnsafe(data[:addr.SizeBytes()]) return &addr case unix.AF_UNIX: var addr linux.SockAddrUnix - binary.Unmarshal(data[:unix.SizeofSockaddrUnix], hostarch.ByteOrder, &addr) + addr.UnmarshalUnsafe(data[:addr.SizeBytes()]) return &addr case unix.AF_NETLINK: var addr linux.SockAddrNetlink - binary.Unmarshal(data[:unix.SizeofSockaddrNetlink], hostarch.ByteOrder, &addr) + addr.UnmarshalUnsafe(data[:addr.SizeBytes()]) return &addr default: panic(fmt.Sprintf("Unsupported socket family %v", family)) @@ -716,7 +715,7 @@ func AddressAndFamily(addr []byte) (tcpip.FullAddress, uint16, *syserr.Error) { if len(addr) < sockAddrInetSize { return tcpip.FullAddress{}, family, syserr.ErrInvalidArgument } - binary.Unmarshal(addr[:sockAddrInetSize], hostarch.ByteOrder, &a) + a.UnmarshalUnsafe(addr[:sockAddrInetSize]) out := tcpip.FullAddress{ Addr: BytesToIPAddress(a.Addr[:]), @@ -729,7 +728,7 @@ func AddressAndFamily(addr []byte) (tcpip.FullAddress, uint16, *syserr.Error) { if len(addr) < sockAddrInet6Size { return tcpip.FullAddress{}, family, syserr.ErrInvalidArgument } - binary.Unmarshal(addr[:sockAddrInet6Size], hostarch.ByteOrder, &a) + a.UnmarshalUnsafe(addr[:sockAddrInet6Size]) out := tcpip.FullAddress{ Addr: BytesToIPAddress(a.Addr[:]), @@ -745,7 +744,7 @@ func AddressAndFamily(addr []byte) (tcpip.FullAddress, uint16, *syserr.Error) { if len(addr) < sockAddrLinkSize { return tcpip.FullAddress{}, family, syserr.ErrInvalidArgument } - binary.Unmarshal(addr[:sockAddrLinkSize], hostarch.ByteOrder, &a) + a.UnmarshalUnsafe(addr[:sockAddrLinkSize]) if a.Family != linux.AF_PACKET || a.HardwareAddrLen != header.EthernetAddressSize { return tcpip.FullAddress{}, family, syserr.ErrInvalidArgument } diff --git a/pkg/sentry/strace/socket.go b/pkg/sentry/strace/socket.go index e5b7f9b96..f4aab25b0 100644 --- a/pkg/sentry/strace/socket.go +++ b/pkg/sentry/strace/socket.go @@ -20,14 +20,13 @@ import ( "gvisor.dev/gvisor/pkg/abi" "gvisor.dev/gvisor/pkg/abi/linux" - "gvisor.dev/gvisor/pkg/binary" + "gvisor.dev/gvisor/pkg/bits" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/marshal/primitive" "gvisor.dev/gvisor/pkg/sentry/kernel" "gvisor.dev/gvisor/pkg/sentry/socket" "gvisor.dev/gvisor/pkg/sentry/socket/netlink" slinux "gvisor.dev/gvisor/pkg/sentry/syscalls/linux" - - "gvisor.dev/gvisor/pkg/hostarch" ) // SocketFamily are the possible socket(2) families. @@ -162,6 +161,15 @@ var controlMessageType = map[int32]string{ linux.SO_TIMESTAMP: "SO_TIMESTAMP", } +func unmarshalControlMessageRights(src []byte) linux.ControlMessageRights { + count := len(src) / linux.SizeOfControlMessageRight + cmr := make(linux.ControlMessageRights, count) + for i, _ := range cmr { + cmr[i] = int32(hostarch.ByteOrder.Uint32(src[i*linux.SizeOfControlMessageRight:])) + } + return cmr +} + func cmsghdr(t *kernel.Task, addr hostarch.Addr, length uint64, maxBytes uint64) string { if length > maxBytes { return fmt.Sprintf("%#x (error decoding control: invalid length (%d))", addr, length) @@ -181,7 +189,7 @@ func cmsghdr(t *kernel.Task, addr hostarch.Addr, length uint64, maxBytes uint64) } var h linux.ControlMessageHeader - binary.Unmarshal(buf[i:i+linux.SizeOfControlMessageHeader], hostarch.ByteOrder, &h) + h.UnmarshalUnsafe(buf[i : i+linux.SizeOfControlMessageHeader]) var skipData bool level := "SOL_SOCKET" @@ -221,18 +229,14 @@ func cmsghdr(t *kernel.Task, addr hostarch.Addr, length uint64, maxBytes uint64) if skipData { strs = append(strs, fmt.Sprintf("{level=%s, type=%s, length=%d}", level, typ, h.Length)) - i += binary.AlignUp(length, width) + i += bits.AlignUp(length, width) continue } switch h.Type { case linux.SCM_RIGHTS: - rightsSize := binary.AlignDown(length, linux.SizeOfControlMessageRight) - - numRights := rightsSize / linux.SizeOfControlMessageRight - fds := make(linux.ControlMessageRights, numRights) - binary.Unmarshal(buf[i:i+rightsSize], hostarch.ByteOrder, &fds) - + rightsSize := bits.AlignDown(length, linux.SizeOfControlMessageRight) + fds := unmarshalControlMessageRights(buf[i : i+rightsSize]) rights := make([]string, 0, len(fds)) for _, fd := range fds { rights = append(rights, fmt.Sprint(fd)) @@ -258,7 +262,7 @@ func cmsghdr(t *kernel.Task, addr hostarch.Addr, length uint64, maxBytes uint64) } var creds linux.ControlMessageCredentials - binary.Unmarshal(buf[i:i+linux.SizeOfControlMessageCredentials], hostarch.ByteOrder, &creds) + creds.UnmarshalUnsafe(buf[i : i+linux.SizeOfControlMessageCredentials]) strs = append(strs, fmt.Sprintf( "{level=%s, type=%s, length=%d, pid: %d, uid: %d, gid: %d}", @@ -282,7 +286,7 @@ func cmsghdr(t *kernel.Task, addr hostarch.Addr, length uint64, maxBytes uint64) } var tv linux.Timeval - binary.Unmarshal(buf[i:i+linux.SizeOfTimeval], hostarch.ByteOrder, &tv) + tv.UnmarshalUnsafe(buf[i : i+linux.SizeOfTimeval]) strs = append(strs, fmt.Sprintf( "{level=%s, type=%s, length=%d, Sec: %d, Usec: %d}", @@ -296,7 +300,7 @@ func cmsghdr(t *kernel.Task, addr hostarch.Addr, length uint64, maxBytes uint64) default: panic("unreachable") } - i += binary.AlignUp(length, width) + i += bits.AlignUp(length, width) } return fmt.Sprintf("%#x %s", addr, strings.Join(strs, ", ")) diff --git a/pkg/sentry/syscalls/linux/sys_socket.go b/pkg/sentry/syscalls/linux/sys_socket.go index 5e9e940df..e07917613 100644 --- a/pkg/sentry/syscalls/linux/sys_socket.go +++ b/pkg/sentry/syscalls/linux/sys_socket.go @@ -463,8 +463,7 @@ func GetSockOpt(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sy return 0, nil, e.ToError() } - vLen := int32(v.SizeBytes()) - if _, err := primitive.CopyInt32Out(t, optLenAddr, vLen); err != nil { + if _, err := primitive.CopyInt32Out(t, optLenAddr, int32(v.SizeBytes())); err != nil { return 0, nil, err } diff --git a/pkg/sentry/syscalls/linux/vfs2/socket.go b/pkg/sentry/syscalls/linux/vfs2/socket.go index 6edde0ed1..69f69e3af 100644 --- a/pkg/sentry/syscalls/linux/vfs2/socket.go +++ b/pkg/sentry/syscalls/linux/vfs2/socket.go @@ -467,8 +467,7 @@ func GetSockOpt(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sy return 0, nil, e.ToError() } - vLen := int32(v.SizeBytes()) - if _, err := primitive.CopyInt32Out(t, optLenAddr, vLen); err != nil { + if _, err := primitive.CopyInt32Out(t, optLenAddr, int32(v.SizeBytes())); err != nil { return 0, nil, err } diff --git a/pkg/tcpip/link/fdbased/endpoint.go b/pkg/tcpip/link/fdbased/endpoint.go index feb79fe0e..6fe2dd33c 100644 --- a/pkg/tcpip/link/fdbased/endpoint.go +++ b/pkg/tcpip/link/fdbased/endpoint.go @@ -45,7 +45,6 @@ import ( "sync/atomic" "golang.org/x/sys/unix" - "gvisor.dev/gvisor/pkg/binary" "gvisor.dev/gvisor/pkg/iovec" "gvisor.dev/gvisor/pkg/sync" "gvisor.dev/gvisor/pkg/tcpip" @@ -403,6 +402,35 @@ type virtioNetHdr struct { csumOffset uint16 } +// marshal serializes h to a newly-allocated byte slice, in little-endian byte +// order. +// +// Note: Virtio v1.0 onwards specifies little-endian as the byte ordering used +// for general serialization. This makes it difficult to use go-marshal for +// virtio types, as go-marshal implicitly uses the native byte ordering. +func (h *virtioNetHdr) marshal() []byte { + buf := [virtioNetHdrSize]byte{ + 0: byte(h.flags), + 1: byte(h.gsoType), + + // Manually lay out the fields in little-endian byte order. Little endian => + // least significant bit goes to the lower address. + + 2: byte(h.hdrLen), + 3: byte(h.hdrLen >> 8), + + 4: byte(h.gsoSize), + 5: byte(h.gsoSize >> 8), + + 6: byte(h.csumStart), + 7: byte(h.csumStart >> 8), + + 8: byte(h.csumOffset), + 9: byte(h.csumOffset >> 8), + } + return buf[:] +} + // These constants are declared in linux/virtio_net.h. const ( _VIRTIO_NET_HDR_F_NEEDS_CSUM = 1 @@ -463,7 +491,7 @@ func (e *endpoint) WritePacket(r stack.RouteInfo, protocol tcpip.NetworkProtocol } } - vnetHdrBuf := binary.Marshal(make([]byte, 0, virtioNetHdrSize), binary.LittleEndian, vnetHdr) + vnetHdrBuf := vnetHdr.marshal() builder.Add(vnetHdrBuf) } @@ -503,7 +531,7 @@ func (e *endpoint) sendBatch(batchFD int, batch []*stack.PacketBuffer) (int, tcp vnetHdr.gsoSize = pkt.GSOOptions.MSS } } - vnetHdrBuf = binary.Marshal(make([]byte, 0, virtioNetHdrSize), binary.LittleEndian, vnetHdr) + vnetHdrBuf = vnetHdr.marshal() } var builder iovec.Builder diff --git a/pkg/tcpip/tcpip.go b/pkg/tcpip/tcpip.go index 0ba71b62e..2e426ffe7 100644 --- a/pkg/tcpip/tcpip.go +++ b/pkg/tcpip/tcpip.go @@ -1107,6 +1107,7 @@ const ( // LingerOption is used by SetSockOpt/GetSockOpt to set/get the // duration for which a socket lingers before returning from Close. // +// +marshal // +stateify savable type LingerOption struct { Enabled bool |