summaryrefslogtreecommitdiffhomepage
path: root/pkg/abi/linux
diff options
context:
space:
mode:
authorgVisor bot <gvisor-bot@google.com>2020-10-02 19:27:31 +0000
committergVisor bot <gvisor-bot@google.com>2020-10-02 19:27:31 +0000
commita0b63cf9de4a2ccac12b5697e7e0f749656da396 (patch)
treeec187a7c2bbb4b2ea1bb2de67e0ed6d26ed62722 /pkg/abi/linux
parenta0712b6e67d589768f80acbef2776b06962ade27 (diff)
parent4f462b0ed9912fa19b3a3eab6d2f08a98d364574 (diff)
Merge release-20200921.0-102-g4f462b0ed (automated)
Diffstat (limited to 'pkg/abi/linux')
-rw-r--r--pkg/abi/linux/linux_abi_autogen_unsafe.go248
-rw-r--r--pkg/abi/linux/seccomp.go19
-rw-r--r--pkg/abi/linux/signalfd.go4
3 files changed, 270 insertions, 1 deletions
diff --git a/pkg/abi/linux/linux_abi_autogen_unsafe.go b/pkg/abi/linux/linux_abi_autogen_unsafe.go
index b6f87d678..b6b61a128 100644
--- a/pkg/abi/linux/linux_abi_autogen_unsafe.go
+++ b/pkg/abi/linux/linux_abi_autogen_unsafe.go
@@ -66,6 +66,7 @@ var _ marshal.Marshallable = (*PollFD)(nil)
var _ marshal.Marshallable = (*RSeqCriticalSection)(nil)
var _ marshal.Marshallable = (*RobustListHead)(nil)
var _ marshal.Marshallable = (*Rusage)(nil)
+var _ marshal.Marshallable = (*SeccompData)(nil)
var _ marshal.Marshallable = (*Sembuf)(nil)
var _ marshal.Marshallable = (*SemidDS)(nil)
var _ marshal.Marshallable = (*ShmInfo)(nil)
@@ -73,6 +74,7 @@ var _ marshal.Marshallable = (*ShmParams)(nil)
var _ marshal.Marshallable = (*ShmidDS)(nil)
var _ marshal.Marshallable = (*Sigevent)(nil)
var _ marshal.Marshallable = (*SignalSet)(nil)
+var _ marshal.Marshallable = (*SignalfdSiginfo)(nil)
var _ marshal.Marshallable = (*SockAddrInet)(nil)
var _ marshal.Marshallable = (*SockAddrInet6)(nil)
var _ marshal.Marshallable = (*SockAddrLink)(nil)
@@ -6592,6 +6594,112 @@ func (r *Rusage) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (s *SeccompData) SizeBytes() int {
+ return 16 +
+ 8*6
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (s *SeccompData) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Nr))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Arch))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.InstructionPointer))
+ dst = dst[8:]
+ for idx := 0; idx < 6; idx++ {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Args[idx]))
+ dst = dst[8:]
+ }
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (s *SeccompData) UnmarshalBytes(src []byte) {
+ s.Nr = int32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.Arch = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.InstructionPointer = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ for idx := 0; idx < 6; idx++ {
+ s.Args[idx] = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ }
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (s *SeccompData) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (s *SeccompData) MarshalUnsafe(dst []byte) {
+ safecopy.CopyIn(dst, unsafe.Pointer(s))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (s *SeccompData) UnmarshalUnsafe(src []byte) {
+ safecopy.CopyOut(unsafe.Pointer(s), src)
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (s *SeccompData) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (s *SeccompData) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return s.CopyOutN(cc, addr, s.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (s *SeccompData) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (s *SeccompData) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
func (s *SemidDS) SizeBytes() int {
return 24 +
(*IPCPerm)(nil).SizeBytes() +
@@ -7504,6 +7612,146 @@ func (s *Sigevent) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (s *SignalfdSiginfo) SizeBytes() int {
+ return 82 +
+ 1*48
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (s *SignalfdSiginfo) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Signo))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Errno))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Code))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(s.PID))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(s.UID))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(s.FD))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(s.TID))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Band))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Overrun))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(s.TrapNo))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Status))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Int))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Ptr))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.UTime))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.STime))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Addr))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint16(dst[:2], uint16(s.AddrLSB))
+ dst = dst[2:]
+ // Padding: dst[:sizeof(uint8)*48] ~= [48]uint8{0}
+ dst = dst[1*(48):]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (s *SignalfdSiginfo) UnmarshalBytes(src []byte) {
+ s.Signo = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.Errno = int32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.Code = int32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.PID = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.UID = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.FD = int32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.TID = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.Band = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.Overrun = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.TrapNo = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.Status = int32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.Int = int32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.Ptr = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.UTime = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.STime = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Addr = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.AddrLSB = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ // Padding: ~ copy([48]uint8(s._), src[:sizeof(uint8)*48])
+ src = src[1*(48):]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (s *SignalfdSiginfo) Packed() bool {
+ return false
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (s *SignalfdSiginfo) MarshalUnsafe(dst []byte) {
+ // Type SignalfdSiginfo doesn't have a packed layout in memory, fallback to MarshalBytes.
+ s.MarshalBytes(dst)
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (s *SignalfdSiginfo) UnmarshalUnsafe(src []byte) {
+ // Type SignalfdSiginfo doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ s.UnmarshalBytes(src)
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (s *SignalfdSiginfo) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ // Type SignalfdSiginfo doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
+ s.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (s *SignalfdSiginfo) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return s.CopyOutN(cc, addr, s.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (s *SignalfdSiginfo) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ // Type SignalfdSiginfo doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ s.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (s *SignalfdSiginfo) WriteTo(writer io.Writer) (int64, error) {
+ // Type SignalfdSiginfo doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, s.SizeBytes())
+ s.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
//go:nosplit
func (i *InetAddr) SizeBytes() int {
return 1 * 4
diff --git a/pkg/abi/linux/seccomp.go b/pkg/abi/linux/seccomp.go
index b07cafe12..5be3f10f9 100644
--- a/pkg/abi/linux/seccomp.go
+++ b/pkg/abi/linux/seccomp.go
@@ -83,3 +83,22 @@ type SockFprog struct {
pad [6]byte
Filter *BPFInstruction
}
+
+// SeccompData is equivalent to struct seccomp_data, which contains the data
+// passed to seccomp-bpf filters.
+//
+// +marshal
+type SeccompData struct {
+ // Nr is the system call number.
+ Nr int32
+
+ // Arch is an AUDIT_ARCH_* value indicating the system call convention.
+ Arch uint32
+
+ // InstructionPointer is the value of the instruction pointer at the time
+ // of the system call.
+ InstructionPointer uint64
+
+ // Args contains the first 6 system call arguments.
+ Args [6]uint64
+}
diff --git a/pkg/abi/linux/signalfd.go b/pkg/abi/linux/signalfd.go
index 85fad9956..468c6a387 100644
--- a/pkg/abi/linux/signalfd.go
+++ b/pkg/abi/linux/signalfd.go
@@ -23,6 +23,8 @@ const (
)
// SignalfdSiginfo is the siginfo encoding for signalfds.
+//
+// +marshal
type SignalfdSiginfo struct {
Signo uint32
Errno int32
@@ -41,5 +43,5 @@ type SignalfdSiginfo struct {
STime uint64
Addr uint64
AddrLSB uint16
- _ [48]uint8
+ _ [48]uint8 `marshal:"unaligned"`
}