summaryrefslogtreecommitdiffhomepage
path: root/pkg
diff options
context:
space:
mode:
authorgVisor bot <gvisor-bot@google.com>2020-08-20 20:36:02 +0000
committergVisor bot <gvisor-bot@google.com>2020-08-20 20:36:02 +0000
commit8c844b60d240b3b00a45620044be39e8797294f7 (patch)
tree0961bd86800ea4a7f39f348ce57a3d012a72cc20 /pkg
parent38234fef22552b4b44bbac542abd68212690378e (diff)
parent129018ab3d945758b800aa5e6fe1a01d0e99446c (diff)
Merge release-20200810.0-74-g129018ab3 (automated)
Diffstat (limited to 'pkg')
-rw-r--r--pkg/abi/linux/linux_abi_autogen_unsafe.go16
-rw-r--r--pkg/fdnotifier/poll_unsafe.go3
-rw-r--r--pkg/flipcall/flipcall.go31
-rw-r--r--pkg/metric/metric.go6
-rw-r--r--pkg/safemem/seq_unsafe.go7
-rw-r--r--pkg/sentry/fs/copy_up.go13
-rw-r--r--pkg/sentry/fs/dirent.go12
-rw-r--r--pkg/sentry/fs/file_operations.go5
-rw-r--r--pkg/sentry/fs/fsutil/dirty_set_impl.go32
-rw-r--r--pkg/sentry/fs/fsutil/file_range_set.go10
-rw-r--r--pkg/sentry/fs/fsutil/file_range_set_impl.go32
-rw-r--r--pkg/sentry/fs/fsutil/frame_ref_set_impl.go32
-rw-r--r--pkg/sentry/fs/fsutil/host_file_mapper.go12
-rw-r--r--pkg/sentry/fs/fsutil/inode_cached.go4
-rw-r--r--pkg/sentry/fs/lock/lock_set.go32
-rw-r--r--pkg/sentry/fs/overlay.go20
-rw-r--r--pkg/sentry/fs/tty/queue.go9
-rw-r--r--pkg/sentry/fsimpl/devpts/queue.go9
-rw-r--r--pkg/sentry/fsimpl/gofer/directory.go21
-rw-r--r--pkg/sentry/fsimpl/gofer/filesystem.go38
-rw-r--r--pkg/sentry/fsimpl/gofer/gofer.go4
-rw-r--r--pkg/sentry/fsimpl/gofer/time.go15
-rw-r--r--pkg/sentry/fsimpl/kernfs/filesystem.go21
-rw-r--r--pkg/sentry/fsimpl/overlay/directory.go4
-rw-r--r--pkg/sentry/fsimpl/overlay/filesystem.go32
-rw-r--r--pkg/sentry/fsimpl/overlay/overlay.go4
-rw-r--r--pkg/sentry/fsimpl/tmpfs/directory.go5
-rw-r--r--pkg/sentry/fsimpl/tmpfs/filesystem.go13
-rw-r--r--pkg/sentry/fsimpl/tmpfs/named_pipe.go4
-rw-r--r--pkg/sentry/fsimpl/tmpfs/tmpfs.go15
-rw-r--r--pkg/sentry/kernel/auth/id_map_set.go32
-rw-r--r--pkg/sentry/kernel/kernel.go10
-rw-r--r--pkg/sentry/kernel/ptrace.go25
-rw-r--r--pkg/sentry/kernel/rseq.go31
-rw-r--r--pkg/sentry/kernel/task_exec.go7
-rw-r--r--pkg/sentry/kernel/task_sched.go11
-rw-r--r--pkg/sentry/kernel/task_signals.go12
-rw-r--r--pkg/sentry/kernel/task_stop.go14
-rw-r--r--pkg/sentry/kernel/task_usermem.go12
-rw-r--r--pkg/sentry/kernel/time/time.go6
-rw-r--r--pkg/sentry/kernel/vdso.go3
-rw-r--r--pkg/sentry/loader/elf.go13
-rw-r--r--pkg/sentry/loader/loader.go4
-rw-r--r--pkg/sentry/memmap/mapping_set.go4
-rw-r--r--pkg/sentry/memmap/mapping_set_impl.go32
-rw-r--r--pkg/sentry/memmap/memmap.go59
-rw-r--r--pkg/sentry/mm/address_space.go8
-rw-r--r--pkg/sentry/mm/file_refcount_set.go32
-rw-r--r--pkg/sentry/mm/io.go9
-rw-r--r--pkg/sentry/mm/pma.go85
-rw-r--r--pkg/sentry/mm/pma_set.go32
-rw-r--r--pkg/sentry/mm/syscalls.go9
-rw-r--r--pkg/sentry/mm/vma.go42
-rw-r--r--pkg/sentry/mm/vma_set.go32
-rw-r--r--pkg/sentry/pgalloc/evictable_range_set.go32
-rw-r--r--pkg/sentry/pgalloc/pgalloc.go10
-rw-r--r--pkg/sentry/pgalloc/reclaim_set.go32
-rw-r--r--pkg/sentry/pgalloc/usage_set.go32
-rw-r--r--pkg/sentry/platform/interrupt/interrupt.go5
-rw-r--r--pkg/sentry/platform/platform.go13
-rw-r--r--pkg/sentry/vfs/dentry.go5
-rw-r--r--pkg/sentry/vfs/file_description.go10
-rw-r--r--pkg/sentry/vfs/filesystem.go41
-rw-r--r--pkg/sentry/vfs/mount.go24
-rw-r--r--pkg/sentry/vfs/mount_unsafe.go18
-rw-r--r--pkg/state/addr_set.go32
-rw-r--r--pkg/tcpip/stack/conntrack.go4
-rw-r--r--pkg/tcpip/stack/iptables.go12
-rw-r--r--pkg/unet/unet.go2
-rw-r--r--pkg/usermem/addr_range_seq_unsafe.go6
-rw-r--r--pkg/usermem/usermem.go77
71 files changed, 797 insertions, 543 deletions
diff --git a/pkg/abi/linux/linux_abi_autogen_unsafe.go b/pkg/abi/linux/linux_abi_autogen_unsafe.go
index a53fc398d..760511bd3 100644
--- a/pkg/abi/linux/linux_abi_autogen_unsafe.go
+++ b/pkg/abi/linux/linux_abi_autogen_unsafe.go
@@ -167,7 +167,7 @@ func (s *Statx) MarshalUnsafe(dst []byte) {
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
func (s *Statx) UnmarshalUnsafe(src []byte) {
- if s.Mtime.Packed() && s.Atime.Packed() && s.Btime.Packed() && s.Ctime.Packed() {
+ if s.Atime.Packed() && s.Btime.Packed() && s.Ctime.Packed() && s.Mtime.Packed() {
safecopy.CopyOut(unsafe.Pointer(s), src)
} else {
// Type Statx doesn't have a packed layout in memory, fallback to UnmarshalBytes.
@@ -178,7 +178,7 @@ func (s *Statx) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
func (s *Statx) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (int, error) {
- if !s.Ctime.Packed() && s.Mtime.Packed() && s.Atime.Packed() && s.Btime.Packed() {
+ if !s.Atime.Packed() && s.Btime.Packed() && s.Ctime.Packed() && s.Mtime.Packed() {
// Type Statx doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := task.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
s.MarshalBytes(buf) // escapes: fallback.
@@ -208,7 +208,7 @@ func (s *Statx) CopyOut(task marshal.Task, addr usermem.Addr) (int, error) {
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
func (s *Statx) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
- if !s.Btime.Packed() && s.Ctime.Packed() && s.Mtime.Packed() && s.Atime.Packed() {
+ if !s.Atime.Packed() && s.Btime.Packed() && s.Ctime.Packed() && s.Mtime.Packed() {
// Type Statx doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := task.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
@@ -2223,7 +2223,7 @@ func (i *IPTIP) MarshalUnsafe(dst []byte) {
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
func (i *IPTIP) UnmarshalUnsafe(src []byte) {
- if i.Dst.Packed() && i.SrcMask.Packed() && i.DstMask.Packed() && i.Src.Packed() {
+ if i.Src.Packed() && i.Dst.Packed() && i.SrcMask.Packed() && i.DstMask.Packed() {
safecopy.CopyOut(unsafe.Pointer(i), src)
} else {
// Type IPTIP doesn't have a packed layout in memory, fallback to UnmarshalBytes.
@@ -2234,7 +2234,7 @@ func (i *IPTIP) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
func (i *IPTIP) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (int, error) {
- if !i.Dst.Packed() && i.SrcMask.Packed() && i.DstMask.Packed() && i.Src.Packed() {
+ if !i.Src.Packed() && i.Dst.Packed() && i.SrcMask.Packed() && i.DstMask.Packed() {
// Type IPTIP doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := task.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
i.MarshalBytes(buf) // escapes: fallback.
@@ -3004,7 +3004,7 @@ func (i *IP6TEntry) Packed() bool {
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
func (i *IP6TEntry) MarshalUnsafe(dst []byte) {
- if i.IPv6.Packed() && i.Counters.Packed() {
+ if i.Counters.Packed() && i.IPv6.Packed() {
safecopy.CopyIn(dst, unsafe.Pointer(i))
} else {
// Type IP6TEntry doesn't have a packed layout in memory, fallback to MarshalBytes.
@@ -3081,7 +3081,7 @@ func (i *IP6TEntry) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
// WriteTo implements io.WriterTo.WriteTo.
func (i *IP6TEntry) WriteTo(w io.Writer) (int64, error) {
- if !i.IPv6.Packed() && i.Counters.Packed() {
+ if !i.Counters.Packed() && i.IPv6.Packed() {
// Type IP6TEntry doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := make([]byte, i.SizeBytes())
i.MarshalBytes(buf)
@@ -3222,7 +3222,7 @@ func (i *IP6TIP) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
func (i *IP6TIP) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (int, error) {
- if !i.Src.Packed() && i.Dst.Packed() && i.SrcMask.Packed() && i.DstMask.Packed() {
+ if !i.Dst.Packed() && i.SrcMask.Packed() && i.DstMask.Packed() && i.Src.Packed() {
// Type IP6TIP doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := task.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
i.MarshalBytes(buf) // escapes: fallback.
diff --git a/pkg/fdnotifier/poll_unsafe.go b/pkg/fdnotifier/poll_unsafe.go
index 4225b04dd..ec2f997a2 100644
--- a/pkg/fdnotifier/poll_unsafe.go
+++ b/pkg/fdnotifier/poll_unsafe.go
@@ -65,8 +65,7 @@ func NonBlockingPoll(fd int32, mask waiter.EventMask) waiter.EventMask {
// epollWait performs a blocking wait on epfd.
//
-// Preconditions:
-// * len(events) > 0
+// Preconditions: len(events) > 0
func epollWait(epfd int, events []syscall.EpollEvent, msec int) (int, error) {
if len(events) == 0 {
panic("Empty events passed to EpollWait")
diff --git a/pkg/flipcall/flipcall.go b/pkg/flipcall/flipcall.go
index ec742c091..c4a3366ce 100644
--- a/pkg/flipcall/flipcall.go
+++ b/pkg/flipcall/flipcall.go
@@ -179,8 +179,10 @@ const (
// Connect blocks until the peer Endpoint has called Endpoint.RecvFirst().
//
-// Preconditions: ep is a client Endpoint. ep.Connect(), ep.RecvFirst(),
-// ep.SendRecv(), and ep.SendLast() have never been called.
+// Preconditions:
+// * ep is a client Endpoint.
+// * ep.Connect(), ep.RecvFirst(), ep.SendRecv(), and ep.SendLast() have never
+// been called.
func (ep *Endpoint) Connect() error {
err := ep.ctrlConnect()
if err == nil {
@@ -192,8 +194,9 @@ func (ep *Endpoint) Connect() error {
// RecvFirst blocks until the peer Endpoint calls Endpoint.SendRecv(), then
// returns the datagram length specified by that call.
//
-// Preconditions: ep is a server Endpoint. ep.SendRecv(), ep.RecvFirst(), and
-// ep.SendLast() have never been called.
+// Preconditions:
+// * ep is a server Endpoint.
+// * ep.SendRecv(), ep.RecvFirst(), and ep.SendLast() have never been called.
func (ep *Endpoint) RecvFirst() (uint32, error) {
if err := ep.ctrlWaitFirst(); err != nil {
return 0, err
@@ -211,10 +214,12 @@ func (ep *Endpoint) RecvFirst() (uint32, error) {
// datagram length, then blocks until the peer Endpoint calls
// Endpoint.SendRecv() or Endpoint.SendLast().
//
-// Preconditions: dataLen <= ep.DataCap(). No previous call to ep.SendRecv() or
-// ep.RecvFirst() has returned an error. ep.SendLast() has never been called.
-// If ep is a client Endpoint, ep.Connect() has previously been called and
-// returned nil.
+// Preconditions:
+// * dataLen <= ep.DataCap().
+// * No previous call to ep.SendRecv() or ep.RecvFirst() has returned an error.
+// * ep.SendLast() has never been called.
+// * If ep is a client Endpoint, ep.Connect() has previously been called and
+// returned nil.
func (ep *Endpoint) SendRecv(dataLen uint32) (uint32, error) {
if dataLen > ep.dataCap {
panic(fmt.Sprintf("attempting to send packet with datagram length %d (maximum %d)", dataLen, ep.dataCap))
@@ -240,10 +245,12 @@ func (ep *Endpoint) SendRecv(dataLen uint32) (uint32, error) {
// SendLast causes the peer Endpoint's call to Endpoint.SendRecv() or
// Endpoint.RecvFirst() to return with the given datagram length.
//
-// Preconditions: dataLen <= ep.DataCap(). No previous call to ep.SendRecv() or
-// ep.RecvFirst() has returned an error. ep.SendLast() has never been called.
-// If ep is a client Endpoint, ep.Connect() has previously been called and
-// returned nil.
+// Preconditions:
+// * dataLen <= ep.DataCap().
+// * No previous call to ep.SendRecv() or ep.RecvFirst() has returned an error.
+// * ep.SendLast() has never been called.
+// * If ep is a client Endpoint, ep.Connect() has previously been called and
+// returned nil.
func (ep *Endpoint) SendLast(dataLen uint32) error {
if dataLen > ep.dataCap {
panic(fmt.Sprintf("attempting to send packet with datagram length %d (maximum %d)", dataLen, ep.dataCap))
diff --git a/pkg/metric/metric.go b/pkg/metric/metric.go
index 64aa365ce..d012c5734 100644
--- a/pkg/metric/metric.go
+++ b/pkg/metric/metric.go
@@ -106,8 +106,8 @@ type customUint64Metric struct {
// after Initialized.
//
// Preconditions:
-// * name must be globally unique.
-// * Initialize/Disable have not been called.
+// * name must be globally unique.
+// * Initialize/Disable have not been called.
func RegisterCustomUint64Metric(name string, cumulative, sync bool, units pb.MetricMetadata_Units, description string, value func() uint64) error {
if initialized {
return ErrInitializationDone
@@ -221,7 +221,7 @@ var (
// EmitMetricUpdate is thread-safe.
//
// Preconditions:
-// * Initialize has been called.
+// * Initialize has been called.
func EmitMetricUpdate() {
emitMu.Lock()
defer emitMu.Unlock()
diff --git a/pkg/safemem/seq_unsafe.go b/pkg/safemem/seq_unsafe.go
index f5f0574f8..fc4049eeb 100644
--- a/pkg/safemem/seq_unsafe.go
+++ b/pkg/safemem/seq_unsafe.go
@@ -91,9 +91,10 @@ func BlockSeqFromSlice(slice []Block) BlockSeq {
return blockSeqFromSliceLimited(slice, limit)
}
-// Preconditions: The combined length of all Blocks in slice <= limit. If
-// len(slice) != 0, the first Block in slice has non-zero length, and limit >
-// 0.
+// Preconditions:
+// * The combined length of all Blocks in slice <= limit.
+// * If len(slice) != 0, the first Block in slice has non-zero length and
+// limit > 0.
func blockSeqFromSliceLimited(slice []Block, limit uint64) BlockSeq {
switch len(slice) {
case 0:
diff --git a/pkg/sentry/fs/copy_up.go b/pkg/sentry/fs/copy_up.go
index 735452b07..ff2fe6712 100644
--- a/pkg/sentry/fs/copy_up.go
+++ b/pkg/sentry/fs/copy_up.go
@@ -107,8 +107,7 @@ func copyUp(ctx context.Context, d *Dirent) error {
// leave the upper filesystem filled with any number of parent directories
// but the upper filesystem will never be in an inconsistent state.
//
-// Preconditions:
-// - d.Inode.overlay is non-nil.
+// Preconditions: d.Inode.overlay is non-nil.
func copyUpLockedForRename(ctx context.Context, d *Dirent) error {
for {
// Did we race with another copy up or does there
@@ -183,12 +182,12 @@ func doCopyUp(ctx context.Context, d *Dirent) error {
// Returns a generic error on failure.
//
// Preconditions:
-// - parent.Inode.overlay.upper must be non-nil.
-// - next.Inode.overlay.copyMu must be locked writable.
-// - next.Inode.overlay.lower must be non-nil.
-// - next.Inode.overlay.lower.StableAttr.Type must be RegularFile, Directory,
+// * parent.Inode.overlay.upper must be non-nil.
+// * next.Inode.overlay.copyMu must be locked writable.
+// * next.Inode.overlay.lower must be non-nil.
+// * next.Inode.overlay.lower.StableAttr.Type must be RegularFile, Directory,
// or Symlink.
-// - upper filesystem must support setting file ownership and timestamps.
+// * upper filesystem must support setting file ownership and timestamps.
func copyUpLocked(ctx context.Context, parent *Dirent, next *Dirent) error {
// Extract the attributes of the file we wish to copy.
attrs, err := next.Inode.overlay.lower.UnstableAttr(ctx)
diff --git a/pkg/sentry/fs/dirent.go b/pkg/sentry/fs/dirent.go
index a2f751068..00c526b03 100644
--- a/pkg/sentry/fs/dirent.go
+++ b/pkg/sentry/fs/dirent.go
@@ -413,9 +413,9 @@ func (d *Dirent) descendantOf(p *Dirent) bool {
// Inode.Lookup, otherwise walk will keep d.mu locked.
//
// Preconditions:
-// - renameMu must be held for reading.
-// - d.mu must be held.
-// - name must must not contain "/"s.
+// * renameMu must be held for reading.
+// * d.mu must be held.
+// * name must must not contain "/"s.
func (d *Dirent) walk(ctx context.Context, root *Dirent, name string, walkMayUnlock bool) (*Dirent, error) {
if !IsDir(d.Inode.StableAttr) {
return nil, syscall.ENOTDIR
@@ -577,9 +577,9 @@ func (d *Dirent) Walk(ctx context.Context, root *Dirent, name string) (*Dirent,
// exists returns true if name exists in relation to d.
//
// Preconditions:
-// - renameMu must be held for reading.
-// - d.mu must be held.
-// - name must must not contain "/"s.
+// * renameMu must be held for reading.
+// * d.mu must be held.
+// * name must must not contain "/"s.
func (d *Dirent) exists(ctx context.Context, root *Dirent, name string) bool {
child, err := d.walk(ctx, root, name, false /* may unlock */)
if err != nil {
diff --git a/pkg/sentry/fs/file_operations.go b/pkg/sentry/fs/file_operations.go
index 305c0f840..6ec721022 100644
--- a/pkg/sentry/fs/file_operations.go
+++ b/pkg/sentry/fs/file_operations.go
@@ -159,8 +159,9 @@ type FileOperations interface {
// io provides access to the virtual memory space to which pointers in args
// refer.
//
- // Preconditions: The AddressSpace (if any) that io refers to is activated.
- // Must only be called from a task goroutine.
+ // Preconditions:
+ // * The AddressSpace (if any) that io refers to is activated.
+ // * Must only be called from a task goroutine.
Ioctl(ctx context.Context, file *File, io usermem.IO, args arch.SyscallArguments) (uintptr, error)
}
diff --git a/pkg/sentry/fs/fsutil/dirty_set_impl.go b/pkg/sentry/fs/fsutil/dirty_set_impl.go
index 8d462c412..9f1463389 100644
--- a/pkg/sentry/fs/fsutil/dirty_set_impl.go
+++ b/pkg/sentry/fs/fsutil/dirty_set_impl.go
@@ -347,7 +347,9 @@ func (s *DirtySet) InsertWithoutMerging(gap DirtyGapIterator, r __generics_impor
// and returns an iterator to the inserted segment. All existing iterators
// (including gap, but not including the returned iterator) are invalidated.
//
-// Preconditions: r.Start >= gap.Start(); r.End <= gap.End().
+// Preconditions:
+// * r.Start >= gap.Start().
+// * r.End <= gap.End().
func (s *DirtySet) InsertWithoutMergingUnchecked(gap DirtyGapIterator, r __generics_imported0.MappableRange, val DirtyInfo) DirtyIterator {
gap = gap.node.rebalanceBeforeInsert(gap)
splitMaxGap := DirtytrackGaps != 0 && (gap.node.nrSegments == 0 || gap.Range().Length() == gap.node.maxGap.Get())
@@ -1101,12 +1103,10 @@ func (seg DirtyIterator) End() uint64 {
// does not invalidate any iterators.
//
// Preconditions:
-//
-// - r.Length() > 0.
-//
-// - The new range must not overlap an existing one: If seg.NextSegment().Ok(),
-// then r.end <= seg.NextSegment().Start(); if seg.PrevSegment().Ok(), then
-// r.start >= seg.PrevSegment().End().
+// * r.Length() > 0.
+// * The new range must not overlap an existing one:
+// * If seg.NextSegment().Ok(), then r.end <= seg.NextSegment().Start().
+// * If seg.PrevSegment().Ok(), then r.start >= seg.PrevSegment().End().
func (seg DirtyIterator) SetRangeUnchecked(r __generics_imported0.MappableRange) {
seg.node.keys[seg.index] = r
}
@@ -1131,8 +1131,9 @@ func (seg DirtyIterator) SetRange(r __generics_imported0.MappableRange) {
// SetStartUnchecked mutates the iterated segment's start. This operation does
// not invalidate any iterators.
//
-// Preconditions: The new start must be valid: start < seg.End(); if
-// seg.PrevSegment().Ok(), then start >= seg.PrevSegment().End().
+// Preconditions: The new start must be valid:
+// * start < seg.End()
+// * If seg.PrevSegment().Ok(), then start >= seg.PrevSegment().End().
func (seg DirtyIterator) SetStartUnchecked(start uint64) {
seg.node.keys[seg.index].Start = start
}
@@ -1154,8 +1155,9 @@ func (seg DirtyIterator) SetStart(start uint64) {
// SetEndUnchecked mutates the iterated segment's end. This operation does not
// invalidate any iterators.
//
-// Preconditions: The new end must be valid: end > seg.Start(); if
-// seg.NextSegment().Ok(), then end <= seg.NextSegment().Start().
+// Preconditions: The new end must be valid:
+// * end > seg.Start().
+// * If seg.NextSegment().Ok(), then end <= seg.NextSegment().Start().
func (seg DirtyIterator) SetEndUnchecked(end uint64) {
seg.node.keys[seg.index].End = end
}
@@ -1575,9 +1577,11 @@ func (s *DirtySet) ExportSortedSlices() *DirtySegmentDataSlices {
// ImportSortedSlice initializes the given set from the given slice.
//
-// Preconditions: s must be empty. sds must represent a valid set (the segments
-// in sds must have valid lengths that do not overlap). The segments in sds
-// must be sorted in ascending key order.
+// Preconditions:
+// * s must be empty.
+// * sds must represent a valid set (the segments in sds must have valid
+// lengths that do not overlap).
+// * The segments in sds must be sorted in ascending key order.
func (s *DirtySet) ImportSortedSlices(sds *DirtySegmentDataSlices) error {
if !s.IsEmpty() {
return fmt.Errorf("cannot import into non-empty set %v", s)
diff --git a/pkg/sentry/fs/fsutil/file_range_set.go b/pkg/sentry/fs/fsutil/file_range_set.go
index bbafebf03..9197aeb88 100644
--- a/pkg/sentry/fs/fsutil/file_range_set.go
+++ b/pkg/sentry/fs/fsutil/file_range_set.go
@@ -70,7 +70,9 @@ func (seg FileRangeIterator) FileRange() memmap.FileRange {
// FileRangeOf returns the FileRange mapped by mr.
//
-// Preconditions: seg.Range().IsSupersetOf(mr). mr.Length() != 0.
+// Preconditions:
+// * seg.Range().IsSupersetOf(mr).
+// * mr.Length() != 0.
func (seg FileRangeIterator) FileRangeOf(mr memmap.MappableRange) memmap.FileRange {
frstart := seg.Value() + (mr.Start - seg.Start())
return memmap.FileRange{frstart, frstart + mr.Length()}
@@ -88,8 +90,10 @@ func (seg FileRangeIterator) FileRangeOf(mr memmap.MappableRange) memmap.FileRan
// outside of optional. It returns a non-nil error if any error occurs, even
// if the error only affects offsets in optional, but not in required.
//
-// Preconditions: required.Length() > 0. optional.IsSupersetOf(required).
-// required and optional must be page-aligned.
+// Preconditions:
+// * required.Length() > 0.
+// * optional.IsSupersetOf(required).
+// * required and optional must be page-aligned.
func (frs *FileRangeSet) Fill(ctx context.Context, required, optional memmap.MappableRange, mf *pgalloc.MemoryFile, kind usage.MemoryKind, readAt func(ctx context.Context, dsts safemem.BlockSeq, offset uint64) (uint64, error)) error {
gap := frs.LowerBoundGap(required.Start)
for gap.Ok() && gap.Start() < required.End {
diff --git a/pkg/sentry/fs/fsutil/file_range_set_impl.go b/pkg/sentry/fs/fsutil/file_range_set_impl.go
index e5b6d1041..374ed79b7 100644
--- a/pkg/sentry/fs/fsutil/file_range_set_impl.go
+++ b/pkg/sentry/fs/fsutil/file_range_set_impl.go
@@ -347,7 +347,9 @@ func (s *FileRangeSet) InsertWithoutMerging(gap FileRangeGapIterator, r __generi
// and returns an iterator to the inserted segment. All existing iterators
// (including gap, but not including the returned iterator) are invalidated.
//
-// Preconditions: r.Start >= gap.Start(); r.End <= gap.End().
+// Preconditions:
+// * r.Start >= gap.Start().
+// * r.End <= gap.End().
func (s *FileRangeSet) InsertWithoutMergingUnchecked(gap FileRangeGapIterator, r __generics_imported0.MappableRange, val uint64) FileRangeIterator {
gap = gap.node.rebalanceBeforeInsert(gap)
splitMaxGap := FileRangetrackGaps != 0 && (gap.node.nrSegments == 0 || gap.Range().Length() == gap.node.maxGap.Get())
@@ -1101,12 +1103,10 @@ func (seg FileRangeIterator) End() uint64 {
// does not invalidate any iterators.
//
// Preconditions:
-//
-// - r.Length() > 0.
-//
-// - The new range must not overlap an existing one: If seg.NextSegment().Ok(),
-// then r.end <= seg.NextSegment().Start(); if seg.PrevSegment().Ok(), then
-// r.start >= seg.PrevSegment().End().
+// * r.Length() > 0.
+// * The new range must not overlap an existing one:
+// * If seg.NextSegment().Ok(), then r.end <= seg.NextSegment().Start().
+// * If seg.PrevSegment().Ok(), then r.start >= seg.PrevSegment().End().
func (seg FileRangeIterator) SetRangeUnchecked(r __generics_imported0.MappableRange) {
seg.node.keys[seg.index] = r
}
@@ -1131,8 +1131,9 @@ func (seg FileRangeIterator) SetRange(r __generics_imported0.MappableRange) {
// SetStartUnchecked mutates the iterated segment's start. This operation does
// not invalidate any iterators.
//
-// Preconditions: The new start must be valid: start < seg.End(); if
-// seg.PrevSegment().Ok(), then start >= seg.PrevSegment().End().
+// Preconditions: The new start must be valid:
+// * start < seg.End()
+// * If seg.PrevSegment().Ok(), then start >= seg.PrevSegment().End().
func (seg FileRangeIterator) SetStartUnchecked(start uint64) {
seg.node.keys[seg.index].Start = start
}
@@ -1154,8 +1155,9 @@ func (seg FileRangeIterator) SetStart(start uint64) {
// SetEndUnchecked mutates the iterated segment's end. This operation does not
// invalidate any iterators.
//
-// Preconditions: The new end must be valid: end > seg.Start(); if
-// seg.NextSegment().Ok(), then end <= seg.NextSegment().Start().
+// Preconditions: The new end must be valid:
+// * end > seg.Start().
+// * If seg.NextSegment().Ok(), then end <= seg.NextSegment().Start().
func (seg FileRangeIterator) SetEndUnchecked(end uint64) {
seg.node.keys[seg.index].End = end
}
@@ -1575,9 +1577,11 @@ func (s *FileRangeSet) ExportSortedSlices() *FileRangeSegmentDataSlices {
// ImportSortedSlice initializes the given set from the given slice.
//
-// Preconditions: s must be empty. sds must represent a valid set (the segments
-// in sds must have valid lengths that do not overlap). The segments in sds
-// must be sorted in ascending key order.
+// Preconditions:
+// * s must be empty.
+// * sds must represent a valid set (the segments in sds must have valid
+// lengths that do not overlap).
+// * The segments in sds must be sorted in ascending key order.
func (s *FileRangeSet) ImportSortedSlices(sds *FileRangeSegmentDataSlices) error {
if !s.IsEmpty() {
return fmt.Errorf("cannot import into non-empty set %v", s)
diff --git a/pkg/sentry/fs/fsutil/frame_ref_set_impl.go b/pkg/sentry/fs/fsutil/frame_ref_set_impl.go
index e799c6b71..619246875 100644
--- a/pkg/sentry/fs/fsutil/frame_ref_set_impl.go
+++ b/pkg/sentry/fs/fsutil/frame_ref_set_impl.go
@@ -347,7 +347,9 @@ func (s *FrameRefSet) InsertWithoutMerging(gap FrameRefGapIterator, r __generics
// and returns an iterator to the inserted segment. All existing iterators
// (including gap, but not including the returned iterator) are invalidated.
//
-// Preconditions: r.Start >= gap.Start(); r.End <= gap.End().
+// Preconditions:
+// * r.Start >= gap.Start().
+// * r.End <= gap.End().
func (s *FrameRefSet) InsertWithoutMergingUnchecked(gap FrameRefGapIterator, r __generics_imported0.FileRange, val uint64) FrameRefIterator {
gap = gap.node.rebalanceBeforeInsert(gap)
splitMaxGap := FrameReftrackGaps != 0 && (gap.node.nrSegments == 0 || gap.Range().Length() == gap.node.maxGap.Get())
@@ -1101,12 +1103,10 @@ func (seg FrameRefIterator) End() uint64 {
// does not invalidate any iterators.
//
// Preconditions:
-//
-// - r.Length() > 0.
-//
-// - The new range must not overlap an existing one: If seg.NextSegment().Ok(),
-// then r.end <= seg.NextSegment().Start(); if seg.PrevSegment().Ok(), then
-// r.start >= seg.PrevSegment().End().
+// * r.Length() > 0.
+// * The new range must not overlap an existing one:
+// * If seg.NextSegment().Ok(), then r.end <= seg.NextSegment().Start().
+// * If seg.PrevSegment().Ok(), then r.start >= seg.PrevSegment().End().
func (seg FrameRefIterator) SetRangeUnchecked(r __generics_imported0.FileRange) {
seg.node.keys[seg.index] = r
}
@@ -1131,8 +1131,9 @@ func (seg FrameRefIterator) SetRange(r __generics_imported0.FileRange) {
// SetStartUnchecked mutates the iterated segment's start. This operation does
// not invalidate any iterators.
//
-// Preconditions: The new start must be valid: start < seg.End(); if
-// seg.PrevSegment().Ok(), then start >= seg.PrevSegment().End().
+// Preconditions: The new start must be valid:
+// * start < seg.End()
+// * If seg.PrevSegment().Ok(), then start >= seg.PrevSegment().End().
func (seg FrameRefIterator) SetStartUnchecked(start uint64) {
seg.node.keys[seg.index].Start = start
}
@@ -1154,8 +1155,9 @@ func (seg FrameRefIterator) SetStart(start uint64) {
// SetEndUnchecked mutates the iterated segment's end. This operation does not
// invalidate any iterators.
//
-// Preconditions: The new end must be valid: end > seg.Start(); if
-// seg.NextSegment().Ok(), then end <= seg.NextSegment().Start().
+// Preconditions: The new end must be valid:
+// * end > seg.Start().
+// * If seg.NextSegment().Ok(), then end <= seg.NextSegment().Start().
func (seg FrameRefIterator) SetEndUnchecked(end uint64) {
seg.node.keys[seg.index].End = end
}
@@ -1575,9 +1577,11 @@ func (s *FrameRefSet) ExportSortedSlices() *FrameRefSegmentDataSlices {
// ImportSortedSlice initializes the given set from the given slice.
//
-// Preconditions: s must be empty. sds must represent a valid set (the segments
-// in sds must have valid lengths that do not overlap). The segments in sds
-// must be sorted in ascending key order.
+// Preconditions:
+// * s must be empty.
+// * sds must represent a valid set (the segments in sds must have valid
+// lengths that do not overlap).
+// * The segments in sds must be sorted in ascending key order.
func (s *FrameRefSet) ImportSortedSlices(sds *FrameRefSegmentDataSlices) error {
if !s.IsEmpty() {
return fmt.Errorf("cannot import into non-empty set %v", s)
diff --git a/pkg/sentry/fs/fsutil/host_file_mapper.go b/pkg/sentry/fs/fsutil/host_file_mapper.go
index ef0113b52..1390a9a7f 100644
--- a/pkg/sentry/fs/fsutil/host_file_mapper.go
+++ b/pkg/sentry/fs/fsutil/host_file_mapper.go
@@ -80,7 +80,9 @@ func NewHostFileMapper() *HostFileMapper {
// IncRefOn increments the reference count on all offsets in mr.
//
-// Preconditions: mr.Length() != 0. mr.Start and mr.End must be page-aligned.
+// Preconditions:
+// * mr.Length() != 0.
+// * mr.Start and mr.End must be page-aligned.
func (f *HostFileMapper) IncRefOn(mr memmap.MappableRange) {
f.refsMu.Lock()
defer f.refsMu.Unlock()
@@ -97,7 +99,9 @@ func (f *HostFileMapper) IncRefOn(mr memmap.MappableRange) {
// DecRefOn decrements the reference count on all offsets in mr.
//
-// Preconditions: mr.Length() != 0. mr.Start and mr.End must be page-aligned.
+// Preconditions:
+// * mr.Length() != 0.
+// * mr.Start and mr.End must be page-aligned.
func (f *HostFileMapper) DecRefOn(mr memmap.MappableRange) {
f.refsMu.Lock()
defer f.refsMu.Unlock()
@@ -204,7 +208,9 @@ func (f *HostFileMapper) UnmapAll() {
}
}
-// Preconditions: f.mapsMu must be locked. f.mappings[chunkStart] == m.
+// Preconditions:
+// * f.mapsMu must be locked.
+// * f.mappings[chunkStart] == m.
func (f *HostFileMapper) unmapAndRemoveLocked(chunkStart uint64, m mapping) {
if _, _, errno := syscall.Syscall(syscall.SYS_MUNMAP, m.addr, chunkSize, 0); errno != 0 {
// This leaks address space and is unexpected, but is otherwise
diff --git a/pkg/sentry/fs/fsutil/inode_cached.go b/pkg/sentry/fs/fsutil/inode_cached.go
index fe8b0b6ac..9eb6f522e 100644
--- a/pkg/sentry/fs/fsutil/inode_cached.go
+++ b/pkg/sentry/fs/fsutil/inode_cached.go
@@ -684,7 +684,9 @@ func (rw *inodeReadWriter) ReadToBlocks(dsts safemem.BlockSeq) (uint64, error) {
// maybeGrowFile grows the file's size if data has been written past the old
// size.
//
-// Preconditions: rw.c.attrMu and rw.c.dataMu bust be locked.
+// Preconditions:
+// * rw.c.attrMu must be locked.
+// * rw.c.dataMu must be locked.
func (rw *inodeReadWriter) maybeGrowFile() {
// If the write ends beyond the file's previous size, it causes the
// file to grow.
diff --git a/pkg/sentry/fs/lock/lock_set.go b/pkg/sentry/fs/lock/lock_set.go
index 5356f5791..37c216b95 100644
--- a/pkg/sentry/fs/lock/lock_set.go
+++ b/pkg/sentry/fs/lock/lock_set.go
@@ -343,7 +343,9 @@ func (s *LockSet) InsertWithoutMerging(gap LockGapIterator, r LockRange, val Loc
// and returns an iterator to the inserted segment. All existing iterators
// (including gap, but not including the returned iterator) are invalidated.
//
-// Preconditions: r.Start >= gap.Start(); r.End <= gap.End().
+// Preconditions:
+// * r.Start >= gap.Start().
+// * r.End <= gap.End().
func (s *LockSet) InsertWithoutMergingUnchecked(gap LockGapIterator, r LockRange, val Lock) LockIterator {
gap = gap.node.rebalanceBeforeInsert(gap)
splitMaxGap := LocktrackGaps != 0 && (gap.node.nrSegments == 0 || gap.Range().Length() == gap.node.maxGap.Get())
@@ -1097,12 +1099,10 @@ func (seg LockIterator) End() uint64 {
// does not invalidate any iterators.
//
// Preconditions:
-//
-// - r.Length() > 0.
-//
-// - The new range must not overlap an existing one: If seg.NextSegment().Ok(),
-// then r.end <= seg.NextSegment().Start(); if seg.PrevSegment().Ok(), then
-// r.start >= seg.PrevSegment().End().
+// * r.Length() > 0.
+// * The new range must not overlap an existing one:
+// * If seg.NextSegment().Ok(), then r.end <= seg.NextSegment().Start().
+// * If seg.PrevSegment().Ok(), then r.start >= seg.PrevSegment().End().
func (seg LockIterator) SetRangeUnchecked(r LockRange) {
seg.node.keys[seg.index] = r
}
@@ -1127,8 +1127,9 @@ func (seg LockIterator) SetRange(r LockRange) {
// SetStartUnchecked mutates the iterated segment's start. This operation does
// not invalidate any iterators.
//
-// Preconditions: The new start must be valid: start < seg.End(); if
-// seg.PrevSegment().Ok(), then start >= seg.PrevSegment().End().
+// Preconditions: The new start must be valid:
+// * start < seg.End()
+// * If seg.PrevSegment().Ok(), then start >= seg.PrevSegment().End().
func (seg LockIterator) SetStartUnchecked(start uint64) {
seg.node.keys[seg.index].Start = start
}
@@ -1150,8 +1151,9 @@ func (seg LockIterator) SetStart(start uint64) {
// SetEndUnchecked mutates the iterated segment's end. This operation does not
// invalidate any iterators.
//
-// Preconditions: The new end must be valid: end > seg.Start(); if
-// seg.NextSegment().Ok(), then end <= seg.NextSegment().Start().
+// Preconditions: The new end must be valid:
+// * end > seg.Start().
+// * If seg.NextSegment().Ok(), then end <= seg.NextSegment().Start().
func (seg LockIterator) SetEndUnchecked(end uint64) {
seg.node.keys[seg.index].End = end
}
@@ -1571,9 +1573,11 @@ func (s *LockSet) ExportSortedSlices() *LockSegmentDataSlices {
// ImportSortedSlice initializes the given set from the given slice.
//
-// Preconditions: s must be empty. sds must represent a valid set (the segments
-// in sds must have valid lengths that do not overlap). The segments in sds
-// must be sorted in ascending key order.
+// Preconditions:
+// * s must be empty.
+// * sds must represent a valid set (the segments in sds must have valid
+// lengths that do not overlap).
+// * The segments in sds must be sorted in ascending key order.
func (s *LockSet) ImportSortedSlices(sds *LockSegmentDataSlices) error {
if !s.IsEmpty() {
return fmt.Errorf("cannot import into non-empty set %v", s)
diff --git a/pkg/sentry/fs/overlay.go b/pkg/sentry/fs/overlay.go
index 35013a21b..01a1235b8 100644
--- a/pkg/sentry/fs/overlay.go
+++ b/pkg/sentry/fs/overlay.go
@@ -86,13 +86,12 @@ func isXattrOverlay(name string) bool {
// NewOverlayRoot produces the root of an overlay.
//
// Preconditions:
-//
-// - upper and lower must be non-nil.
-// - upper must not be an overlay.
-// - lower should not expose character devices, pipes, or sockets, because
+// * upper and lower must be non-nil.
+// * upper must not be an overlay.
+// * lower should not expose character devices, pipes, or sockets, because
// copying up these types of files is not supported.
-// - lower must not require that file objects be revalidated.
-// - lower must not have dynamic file/directory content.
+// * lower must not require that file objects be revalidated.
+// * lower must not have dynamic file/directory content.
func NewOverlayRoot(ctx context.Context, upper *Inode, lower *Inode, flags MountSourceFlags) (*Inode, error) {
if !IsDir(upper.StableAttr) {
return nil, fmt.Errorf("upper Inode is a %v, not a directory", upper.StableAttr.Type)
@@ -117,12 +116,11 @@ func NewOverlayRoot(ctx context.Context, upper *Inode, lower *Inode, flags Mount
// NewOverlayRootFile produces the root of an overlay that points to a file.
//
// Preconditions:
-//
-// - lower must be non-nil.
-// - lower should not expose character devices, pipes, or sockets, because
+// * lower must be non-nil.
+// * lower should not expose character devices, pipes, or sockets, because
// copying up these types of files is not supported. Neither it can be a dir.
-// - lower must not require that file objects be revalidated.
-// - lower must not have dynamic file/directory content.
+// * lower must not require that file objects be revalidated.
+// * lower must not have dynamic file/directory content.
func NewOverlayRootFile(ctx context.Context, upperMS *MountSource, lower *Inode, flags MountSourceFlags) (*Inode, error) {
if !IsRegular(lower.StableAttr) {
return nil, fmt.Errorf("lower Inode is not a regular file")
diff --git a/pkg/sentry/fs/tty/queue.go b/pkg/sentry/fs/tty/queue.go
index ceabb9b1e..c5d7ec717 100644
--- a/pkg/sentry/fs/tty/queue.go
+++ b/pkg/sentry/fs/tty/queue.go
@@ -104,8 +104,7 @@ func (q *queue) readableSize(ctx context.Context, io usermem.IO, args arch.Sysca
// as whether the read caused more readable data to become available (whether
// data was pushed from the wait buffer to the read buffer).
//
-// Preconditions:
-// * l.termiosMu must be held for reading.
+// Preconditions: l.termiosMu must be held for reading.
func (q *queue) read(ctx context.Context, dst usermem.IOSequence, l *lineDiscipline) (int64, bool, error) {
q.mu.Lock()
defer q.mu.Unlock()
@@ -145,8 +144,7 @@ func (q *queue) read(ctx context.Context, dst usermem.IOSequence, l *lineDiscipl
// write writes to q from userspace.
//
-// Preconditions:
-// * l.termiosMu must be held for reading.
+// Preconditions: l.termiosMu must be held for reading.
func (q *queue) write(ctx context.Context, src usermem.IOSequence, l *lineDiscipline) (int64, error) {
q.mu.Lock()
defer q.mu.Unlock()
@@ -188,8 +186,7 @@ func (q *queue) write(ctx context.Context, src usermem.IOSequence, l *lineDiscip
// writeBytes writes to q from b.
//
-// Preconditions:
-// * l.termiosMu must be held for reading.
+// Preconditions: l.termiosMu must be held for reading.
func (q *queue) writeBytes(b []byte, l *lineDiscipline) {
q.mu.Lock()
defer q.mu.Unlock()
diff --git a/pkg/sentry/fsimpl/devpts/queue.go b/pkg/sentry/fsimpl/devpts/queue.go
index dffb4232c..331c13997 100644
--- a/pkg/sentry/fsimpl/devpts/queue.go
+++ b/pkg/sentry/fsimpl/devpts/queue.go
@@ -102,8 +102,7 @@ func (q *queue) readableSize(ctx context.Context, io usermem.IO, args arch.Sysca
// as whether the read caused more readable data to become available (whether
// data was pushed from the wait buffer to the read buffer).
//
-// Preconditions:
-// * l.termiosMu must be held for reading.
+// Preconditions: l.termiosMu must be held for reading.
func (q *queue) read(ctx context.Context, dst usermem.IOSequence, l *lineDiscipline) (int64, bool, error) {
q.mu.Lock()
defer q.mu.Unlock()
@@ -143,8 +142,7 @@ func (q *queue) read(ctx context.Context, dst usermem.IOSequence, l *lineDiscipl
// write writes to q from userspace.
//
-// Preconditions:
-// * l.termiosMu must be held for reading.
+// Preconditions: l.termiosMu must be held for reading.
func (q *queue) write(ctx context.Context, src usermem.IOSequence, l *lineDiscipline) (int64, error) {
q.mu.Lock()
defer q.mu.Unlock()
@@ -186,8 +184,7 @@ func (q *queue) write(ctx context.Context, src usermem.IOSequence, l *lineDiscip
// writeBytes writes to q from b.
//
-// Preconditions:
-// * l.termiosMu must be held for reading.
+// Preconditions: l.termiosMu must be held for reading.
func (q *queue) writeBytes(b []byte, l *lineDiscipline) {
q.mu.Lock()
defer q.mu.Unlock()
diff --git a/pkg/sentry/fsimpl/gofer/directory.go b/pkg/sentry/fsimpl/gofer/directory.go
index 40dce553e..91d2ae199 100644
--- a/pkg/sentry/fsimpl/gofer/directory.go
+++ b/pkg/sentry/fsimpl/gofer/directory.go
@@ -34,8 +34,11 @@ func (d *dentry) isDir() bool {
return d.fileType() == linux.S_IFDIR
}
-// Preconditions: filesystem.renameMu must be locked. d.dirMu must be locked.
-// d.isDir(). child must be a newly-created dentry that has never had a parent.
+// Preconditions:
+// * filesystem.renameMu must be locked.
+// * d.dirMu must be locked.
+// * d.isDir().
+// * child must be a newly-created dentry that has never had a parent.
func (d *dentry) cacheNewChildLocked(child *dentry, name string) {
d.IncRef() // reference held by child on its parent
child.parent = d
@@ -46,7 +49,9 @@ func (d *dentry) cacheNewChildLocked(child *dentry, name string) {
d.children[name] = child
}
-// Preconditions: d.dirMu must be locked. d.isDir().
+// Preconditions:
+// * d.dirMu must be locked.
+// * d.isDir().
func (d *dentry) cacheNegativeLookupLocked(name string) {
// Don't cache negative lookups if InteropModeShared is in effect (since
// this makes remote lookup unavoidable), or if d.isSynthetic() (in which
@@ -79,8 +84,10 @@ type createSyntheticOpts struct {
// createSyntheticChildLocked creates a synthetic file with the given name
// in d.
//
-// Preconditions: d.dirMu must be locked. d.isDir(). d does not already contain
-// a child with the given name.
+// Preconditions:
+// * d.dirMu must be locked.
+// * d.isDir().
+// * d does not already contain a child with the given name.
func (d *dentry) createSyntheticChildLocked(opts *createSyntheticOpts) {
child := &dentry{
refs: 1, // held by d
@@ -151,7 +158,9 @@ func (fd *directoryFD) IterDirents(ctx context.Context, cb vfs.IterDirentsCallba
return nil
}
-// Preconditions: d.isDir(). There exists at least one directoryFD representing d.
+// Preconditions:
+// * d.isDir().
+// * There exists at least one directoryFD representing d.
func (d *dentry) getDirents(ctx context.Context) ([]vfs.Dirent, error) {
// NOTE(b/135560623): 9P2000.L's readdir does not specify behavior in the
// presence of concurrent mutation of an iterated directory, so
diff --git a/pkg/sentry/fsimpl/gofer/filesystem.go b/pkg/sentry/fsimpl/gofer/filesystem.go
index 1b6fa4e14..4d581fc29 100644
--- a/pkg/sentry/fsimpl/gofer/filesystem.go
+++ b/pkg/sentry/fsimpl/gofer/filesystem.go
@@ -115,9 +115,12 @@ func putDentrySlice(ds *[]*dentry) {
// Dentries which may become cached as a result of the traversal are appended
// to *ds.
//
-// Preconditions: fs.renameMu must be locked. d.dirMu must be locked.
-// !rp.Done(). If !d.cachedMetadataAuthoritative(), then d's cached metadata
-// must be up to date.
+// Preconditions:
+// * fs.renameMu must be locked.
+// * d.dirMu must be locked.
+// * !rp.Done().
+// * If !d.cachedMetadataAuthoritative(), then d's cached metadata must be up
+// to date.
//
// Postconditions: The returned dentry's cached metadata is up to date.
func (fs *filesystem) stepLocked(ctx context.Context, rp *vfs.ResolvingPath, d *dentry, mayFollowSymlinks bool, ds **[]*dentry) (*dentry, error) {
@@ -185,8 +188,11 @@ afterSymlink:
// getChildLocked returns a dentry representing the child of parent with the
// given name. If no such child exists, getChildLocked returns (nil, nil).
//
-// Preconditions: fs.renameMu must be locked. parent.dirMu must be locked.
-// parent.isDir(). name is not "." or "..".
+// Preconditions:
+// * fs.renameMu must be locked.
+// * parent.dirMu must be locked.
+// * parent.isDir().
+// * name is not "." or "..".
//
// Postconditions: If getChildLocked returns a non-nil dentry, its cached
// metadata is up to date.
@@ -206,7 +212,8 @@ func (fs *filesystem) getChildLocked(ctx context.Context, vfsObj *vfs.VirtualFil
return fs.revalidateChildLocked(ctx, vfsObj, parent, name, child, ds)
}
-// Preconditions: As for getChildLocked. !parent.isSynthetic().
+// Preconditions: Same as getChildLocked, plus:
+// * !parent.isSynthetic().
func (fs *filesystem) revalidateChildLocked(ctx context.Context, vfsObj *vfs.VirtualFilesystem, parent *dentry, name string, child *dentry, ds **[]*dentry) (*dentry, error) {
if child != nil {
// Need to lock child.metadataMu because we might be updating child
@@ -279,9 +286,11 @@ func (fs *filesystem) revalidateChildLocked(ctx context.Context, vfsObj *vfs.Vir
// rp.Start().Impl().(*dentry)). It does not check that the returned directory
// is searchable by the provider of rp.
//
-// Preconditions: fs.renameMu must be locked. !rp.Done(). If
-// !d.cachedMetadataAuthoritative(), then d's cached metadata must be up to
-// date.
+// Preconditions:
+// * fs.renameMu must be locked.
+// * !rp.Done().
+// * If !d.cachedMetadataAuthoritative(), then d's cached metadata must be up
+// to date.
func (fs *filesystem) walkParentDirLocked(ctx context.Context, rp *vfs.ResolvingPath, d *dentry, ds **[]*dentry) (*dentry, error) {
for !rp.Final() {
d.dirMu.Lock()
@@ -328,8 +337,9 @@ func (fs *filesystem) resolveLocked(ctx context.Context, rp *vfs.ResolvingPath,
// createInRemoteDir (if the parent directory is a real remote directory) or
// createInSyntheticDir (if the parent directory is synthetic) to do so.
//
-// Preconditions: !rp.Done(). For the final path component in rp,
-// !rp.ShouldFollowSymlink().
+// Preconditions:
+// * !rp.Done().
+// * For the final path component in rp, !rp.ShouldFollowSymlink().
func (fs *filesystem) doCreateAt(ctx context.Context, rp *vfs.ResolvingPath, dir bool, createInRemoteDir func(parent *dentry, name string, ds **[]*dentry) error, createInSyntheticDir func(parent *dentry, name string) error) error {
var ds *[]*dentry
fs.renameMu.RLock()
@@ -1087,8 +1097,10 @@ retry:
return &fd.vfsfd, nil
}
-// Preconditions: d.fs.renameMu must be locked. d.dirMu must be locked.
-// !d.isSynthetic().
+// Preconditions:
+// * d.fs.renameMu must be locked.
+// * d.dirMu must be locked.
+// * !d.isSynthetic().
func (d *dentry) createAndOpenChildLocked(ctx context.Context, rp *vfs.ResolvingPath, opts *vfs.OpenOptions, ds **[]*dentry) (*vfs.FileDescription, error) {
if err := d.checkPermissions(rp.Credentials(), vfs.MayWrite); err != nil {
return nil, err
diff --git a/pkg/sentry/fsimpl/gofer/gofer.go b/pkg/sentry/fsimpl/gofer/gofer.go
index 63e589859..c6696b9d8 100644
--- a/pkg/sentry/fsimpl/gofer/gofer.go
+++ b/pkg/sentry/fsimpl/gofer/gofer.go
@@ -1418,7 +1418,9 @@ func (d *dentry) userXattrSupported() bool {
return filetype == linux.ModeRegular || filetype == linux.ModeDirectory
}
-// Preconditions: !d.isSynthetic(). d.isRegularFile() || d.isDir().
+// Preconditions:
+// * !d.isSynthetic().
+// * d.isRegularFile() || d.isDir().
func (d *dentry) ensureSharedHandle(ctx context.Context, read, write, trunc bool) error {
// O_TRUNC unconditionally requires us to obtain a new handle (opened with
// O_TRUNC).
diff --git a/pkg/sentry/fsimpl/gofer/time.go b/pkg/sentry/fsimpl/gofer/time.go
index e59d07e90..98733253d 100644
--- a/pkg/sentry/fsimpl/gofer/time.go
+++ b/pkg/sentry/fsimpl/gofer/time.go
@@ -52,8 +52,9 @@ func (d *dentry) touchAtime(mnt *vfs.Mount) {
mnt.EndWrite()
}
-// Preconditions: d.cachedMetadataAuthoritative() == true. The caller has
-// successfully called vfs.Mount.CheckBeginWrite().
+// Preconditions:
+// * d.cachedMetadataAuthoritative() == true.
+// * The caller has successfully called vfs.Mount.CheckBeginWrite().
func (d *dentry) touchCtime() {
now := d.fs.clock.Now().Nanoseconds()
d.metadataMu.Lock()
@@ -61,8 +62,9 @@ func (d *dentry) touchCtime() {
d.metadataMu.Unlock()
}
-// Preconditions: d.cachedMetadataAuthoritative() == true. The caller has
-// successfully called vfs.Mount.CheckBeginWrite().
+// Preconditions:
+// * d.cachedMetadataAuthoritative() == true.
+// * The caller has successfully called vfs.Mount.CheckBeginWrite().
func (d *dentry) touchCMtime() {
now := d.fs.clock.Now().Nanoseconds()
d.metadataMu.Lock()
@@ -72,8 +74,9 @@ func (d *dentry) touchCMtime() {
d.metadataMu.Unlock()
}
-// Preconditions: d.cachedMetadataAuthoritative() == true. The caller has
-// locked d.metadataMu.
+// Preconditions:
+// * d.cachedMetadataAuthoritative() == true.
+// * The caller has locked d.metadataMu.
func (d *dentry) touchCMtimeLocked() {
now := d.fs.clock.Now().Nanoseconds()
atomic.StoreInt64(&d.mtime, now)
diff --git a/pkg/sentry/fsimpl/kernfs/filesystem.go b/pkg/sentry/fsimpl/kernfs/filesystem.go
index 3e5192edd..e5d6b5c35 100644
--- a/pkg/sentry/fsimpl/kernfs/filesystem.go
+++ b/pkg/sentry/fsimpl/kernfs/filesystem.go
@@ -32,7 +32,9 @@ import (
//
// stepExistingLocked is loosely analogous to fs/namei.c:walk_component().
//
-// Preconditions: Filesystem.mu must be locked for at least reading. !rp.Done().
+// Preconditions:
+// * Filesystem.mu must be locked for at least reading.
+// * !rp.Done().
//
// Postcondition: Caller must call fs.processDeferredDecRefs*.
func (fs *Filesystem) stepExistingLocked(ctx context.Context, rp *vfs.ResolvingPath, vfsd *vfs.Dentry, mayFollowSymlinks bool) (*vfs.Dentry, error) {
@@ -107,8 +109,11 @@ afterSymlink:
// or vfs.ResolvingPath.ResolveChild(name) returns childVFSD (which may be
// nil) to verify that the returned child (or lack thereof) is correct.
//
-// Preconditions: Filesystem.mu must be locked for at least reading.
-// parent.dirMu must be locked. parent.isDir(). name is not "." or "..".
+// Preconditions:
+// * Filesystem.mu must be locked for at least reading.
+// * parent.dirMu must be locked.
+// * parent.isDir().
+// * name is not "." or "..".
//
// Postconditions: Caller must call fs.processDeferredDecRefs*.
func (fs *Filesystem) revalidateChildLocked(ctx context.Context, vfsObj *vfs.VirtualFilesystem, parent *Dentry, name string, child *Dentry) (*Dentry, error) {
@@ -171,7 +176,9 @@ func (fs *Filesystem) walkExistingLocked(ctx context.Context, rp *vfs.ResolvingP
// walkParentDirLocked is loosely analogous to Linux's
// fs/namei.c:path_parentat().
//
-// Preconditions: Filesystem.mu must be locked for at least reading. !rp.Done().
+// Preconditions:
+// * Filesystem.mu must be locked for at least reading.
+// * !rp.Done().
//
// Postconditions: Caller must call fs.processDeferredDecRefs*.
func (fs *Filesystem) walkParentDirLocked(ctx context.Context, rp *vfs.ResolvingPath) (*vfs.Dentry, Inode, error) {
@@ -193,8 +200,10 @@ func (fs *Filesystem) walkParentDirLocked(ctx context.Context, rp *vfs.Resolving
// checkCreateLocked checks that a file named rp.Component() may be created in
// directory parentVFSD, then returns rp.Component().
//
-// Preconditions: Filesystem.mu must be locked for at least reading. parentInode
-// == parentVFSD.Impl().(*Dentry).Inode. isDir(parentInode) == true.
+// Preconditions:
+// * Filesystem.mu must be locked for at least reading.
+// * parentInode == parentVFSD.Impl().(*Dentry).Inode.
+// * isDir(parentInode) == true.
func checkCreateLocked(ctx context.Context, rp *vfs.ResolvingPath, parentVFSD *vfs.Dentry, parentInode Inode) (string, error) {
if err := parentInode.CheckPermissions(ctx, rp.Credentials(), vfs.MayWrite|vfs.MayExec); err != nil {
return "", err
diff --git a/pkg/sentry/fsimpl/overlay/directory.go b/pkg/sentry/fsimpl/overlay/directory.go
index 6a79f7ffe..b1b292e83 100644
--- a/pkg/sentry/fsimpl/overlay/directory.go
+++ b/pkg/sentry/fsimpl/overlay/directory.go
@@ -29,7 +29,9 @@ func (d *dentry) isDir() bool {
return atomic.LoadUint32(&d.mode)&linux.S_IFMT == linux.S_IFDIR
}
-// Preconditions: d.dirMu must be locked. d.isDir().
+// Preconditions:
+// * d.dirMu must be locked.
+// * d.isDir().
func (d *dentry) collectWhiteoutsForRmdirLocked(ctx context.Context) (map[string]bool, error) {
vfsObj := d.fs.vfsfs.VirtualFilesystem()
var readdirErr error
diff --git a/pkg/sentry/fsimpl/overlay/filesystem.go b/pkg/sentry/fsimpl/overlay/filesystem.go
index 86d0164b4..a3cee4047 100644
--- a/pkg/sentry/fsimpl/overlay/filesystem.go
+++ b/pkg/sentry/fsimpl/overlay/filesystem.go
@@ -110,8 +110,10 @@ func (fs *filesystem) renameMuUnlockAndCheckDrop(ctx context.Context, ds **[]*de
// Dentries which may have a reference count of zero, and which therefore
// should be dropped once traversal is complete, are appended to ds.
//
-// Preconditions: fs.renameMu must be locked. d.dirMu must be locked.
-// !rp.Done().
+// Preconditions:
+// * fs.renameMu must be locked.
+// * d.dirMu must be locked.
+// * !rp.Done().
func (fs *filesystem) stepLocked(ctx context.Context, rp *vfs.ResolvingPath, d *dentry, mayFollowSymlinks bool, ds **[]*dentry) (*dentry, error) {
if !d.isDir() {
return nil, syserror.ENOTDIR
@@ -159,7 +161,9 @@ afterSymlink:
return child, nil
}
-// Preconditions: fs.renameMu must be locked. d.dirMu must be locked.
+// Preconditions:
+// * fs.renameMu must be locked.
+// * d.dirMu must be locked.
func (fs *filesystem) getChildLocked(ctx context.Context, parent *dentry, name string, ds **[]*dentry) (*dentry, error) {
if child, ok := parent.children[name]; ok {
return child, nil
@@ -177,7 +181,9 @@ func (fs *filesystem) getChildLocked(ctx context.Context, parent *dentry, name s
return child, nil
}
-// Preconditions: fs.renameMu must be locked. parent.dirMu must be locked.
+// Preconditions:
+// * fs.renameMu must be locked.
+// * parent.dirMu must be locked.
func (fs *filesystem) lookupLocked(ctx context.Context, parent *dentry, name string) (*dentry, error) {
childPath := fspath.Parse(name)
child := fs.newDentry()
@@ -300,7 +306,9 @@ func (fs *filesystem) lookupLocked(ctx context.Context, parent *dentry, name str
// lookupLayerLocked is similar to lookupLocked, but only returns information
// about the file rather than a dentry.
//
-// Preconditions: fs.renameMu must be locked. parent.dirMu must be locked.
+// Preconditions:
+// * fs.renameMu must be locked.
+// * parent.dirMu must be locked.
func (fs *filesystem) lookupLayerLocked(ctx context.Context, parent *dentry, name string) (lookupLayer, error) {
childPath := fspath.Parse(name)
lookupLayer := lookupLayerNone
@@ -385,7 +393,9 @@ func (ll lookupLayer) existsInOverlay() bool {
// rp.Start().Impl().(*dentry)). It does not check that the returned directory
// is searchable by the provider of rp.
//
-// Preconditions: fs.renameMu must be locked. !rp.Done().
+// Preconditions:
+// * fs.renameMu must be locked.
+// * !rp.Done().
func (fs *filesystem) walkParentDirLocked(ctx context.Context, rp *vfs.ResolvingPath, d *dentry, ds **[]*dentry) (*dentry, error) {
for !rp.Final() {
d.dirMu.Lock()
@@ -425,8 +435,9 @@ func (fs *filesystem) resolveLocked(ctx context.Context, rp *vfs.ResolvingPath,
// doCreateAt checks that creating a file at rp is permitted, then invokes
// create to do so.
//
-// Preconditions: !rp.Done(). For the final path component in rp,
-// !rp.ShouldFollowSymlink().
+// Preconditions:
+// * !rp.Done().
+// * For the final path component in rp, !rp.ShouldFollowSymlink().
func (fs *filesystem) doCreateAt(ctx context.Context, rp *vfs.ResolvingPath, dir bool, create func(parent *dentry, name string, haveUpperWhiteout bool) error) error {
var ds *[]*dentry
fs.renameMu.RLock()
@@ -851,8 +862,9 @@ func (d *dentry) openCopiedUp(ctx context.Context, rp *vfs.ResolvingPath, opts *
return &fd.vfsfd, nil
}
-// Preconditions: parent.dirMu must be locked. parent does not already contain
-// a child named rp.Component().
+// Preconditions:
+// * parent.dirMu must be locked.
+// * parent does not already contain a child named rp.Component().
func (fs *filesystem) createAndOpenLocked(ctx context.Context, rp *vfs.ResolvingPath, parent *dentry, opts *vfs.OpenOptions, ds **[]*dentry) (*vfs.FileDescription, error) {
creds := rp.Credentials()
if err := parent.checkPermissions(creds, vfs.MayWrite); err != nil {
diff --git a/pkg/sentry/fsimpl/overlay/overlay.go b/pkg/sentry/fsimpl/overlay/overlay.go
index 75cc006bf..4b3dfbc01 100644
--- a/pkg/sentry/fsimpl/overlay/overlay.go
+++ b/pkg/sentry/fsimpl/overlay/overlay.go
@@ -482,7 +482,9 @@ func (d *dentry) checkDropLocked(ctx context.Context) {
// destroyLocked destroys the dentry.
//
-// Preconditions: d.fs.renameMu must be locked for writing. d.refs == 0.
+// Preconditions:
+// * d.fs.renameMu must be locked for writing.
+// * d.refs == 0.
func (d *dentry) destroyLocked(ctx context.Context) {
switch atomic.LoadInt64(&d.refs) {
case 0:
diff --git a/pkg/sentry/fsimpl/tmpfs/directory.go b/pkg/sentry/fsimpl/tmpfs/directory.go
index 78b4fc5be..070c75e68 100644
--- a/pkg/sentry/fsimpl/tmpfs/directory.go
+++ b/pkg/sentry/fsimpl/tmpfs/directory.go
@@ -57,8 +57,9 @@ func (fs *filesystem) newDirectory(kuid auth.KUID, kgid auth.KGID, mode linux.Fi
return dir
}
-// Preconditions: filesystem.mu must be locked for writing. dir must not
-// already contain a child with the given name.
+// Preconditions:
+// * filesystem.mu must be locked for writing.
+// * dir must not already contain a child with the given name.
func (dir *directory) insertChildLocked(child *dentry, name string) {
child.parent = &dir.dentry
child.name = name
diff --git a/pkg/sentry/fsimpl/tmpfs/filesystem.go b/pkg/sentry/fsimpl/tmpfs/filesystem.go
index b0ec177e6..7924a0911 100644
--- a/pkg/sentry/fsimpl/tmpfs/filesystem.go
+++ b/pkg/sentry/fsimpl/tmpfs/filesystem.go
@@ -39,7 +39,9 @@ func (fs *filesystem) Sync(ctx context.Context) error {
//
// stepLocked is loosely analogous to fs/namei.c:walk_component().
//
-// Preconditions: filesystem.mu must be locked. !rp.Done().
+// Preconditions:
+// * filesystem.mu must be locked.
+// * !rp.Done().
func stepLocked(ctx context.Context, rp *vfs.ResolvingPath, d *dentry) (*dentry, error) {
dir, ok := d.inode.impl.(*directory)
if !ok {
@@ -97,7 +99,9 @@ afterSymlink:
// walkParentDirLocked is loosely analogous to Linux's
// fs/namei.c:path_parentat().
//
-// Preconditions: filesystem.mu must be locked. !rp.Done().
+// Preconditions:
+// * filesystem.mu must be locked.
+// * !rp.Done().
func walkParentDirLocked(ctx context.Context, rp *vfs.ResolvingPath, d *dentry) (*directory, error) {
for !rp.Final() {
next, err := stepLocked(ctx, rp, d)
@@ -139,8 +143,9 @@ func resolveLocked(ctx context.Context, rp *vfs.ResolvingPath) (*dentry, error)
// doCreateAt is loosely analogous to a conjunction of Linux's
// fs/namei.c:filename_create() and done_path_create().
//
-// Preconditions: !rp.Done(). For the final path component in rp,
-// !rp.ShouldFollowSymlink().
+// Preconditions:
+// * !rp.Done().
+// * For the final path component in rp, !rp.ShouldFollowSymlink().
func (fs *filesystem) doCreateAt(ctx context.Context, rp *vfs.ResolvingPath, dir bool, create func(parentDir *directory, name string) error) error {
fs.mu.Lock()
defer fs.mu.Unlock()
diff --git a/pkg/sentry/fsimpl/tmpfs/named_pipe.go b/pkg/sentry/fsimpl/tmpfs/named_pipe.go
index 739350cf0..5b0471ff4 100644
--- a/pkg/sentry/fsimpl/tmpfs/named_pipe.go
+++ b/pkg/sentry/fsimpl/tmpfs/named_pipe.go
@@ -28,8 +28,8 @@ type namedPipe struct {
}
// Preconditions:
-// * fs.mu must be locked.
-// * rp.Mount().CheckBeginWrite() has been called successfully.
+// * fs.mu must be locked.
+// * rp.Mount().CheckBeginWrite() has been called successfully.
func (fs *filesystem) newNamedPipe(kuid auth.KUID, kgid auth.KGID, mode linux.FileMode) *inode {
file := &namedPipe{pipe: pipe.NewVFSPipe(true /* isNamed */, pipe.DefaultPipeSize, usermem.PageSize)}
file.inode.init(file, fs, kuid, kgid, linux.S_IFIFO|mode)
diff --git a/pkg/sentry/fsimpl/tmpfs/tmpfs.go b/pkg/sentry/fsimpl/tmpfs/tmpfs.go
index de2af6d01..428f62aaa 100644
--- a/pkg/sentry/fsimpl/tmpfs/tmpfs.go
+++ b/pkg/sentry/fsimpl/tmpfs/tmpfs.go
@@ -340,8 +340,10 @@ func (i *inode) init(impl interface{}, fs *filesystem, kuid auth.KUID, kgid auth
// incLinksLocked increments i's link count.
//
-// Preconditions: filesystem.mu must be locked for writing. i.nlink != 0.
-// i.nlink < maxLinks.
+// Preconditions:
+// * filesystem.mu must be locked for writing.
+// * i.nlink != 0.
+// * i.nlink < maxLinks.
func (i *inode) incLinksLocked() {
if i.nlink == 0 {
panic("tmpfs.inode.incLinksLocked() called with no existing links")
@@ -355,7 +357,9 @@ func (i *inode) incLinksLocked() {
// decLinksLocked decrements i's link count. If the link count reaches 0, we
// remove a reference on i as well.
//
-// Preconditions: filesystem.mu must be locked for writing. i.nlink != 0.
+// Preconditions:
+// * filesystem.mu must be locked for writing.
+// * i.nlink != 0.
func (i *inode) decLinksLocked(ctx context.Context) {
if i.nlink == 0 {
panic("tmpfs.inode.decLinksLocked() called with no existing links")
@@ -594,8 +598,9 @@ func (i *inode) touchCMtime() {
i.mu.Unlock()
}
-// Preconditions: The caller has called vfs.Mount.CheckBeginWrite() and holds
-// inode.mu.
+// Preconditions:
+// * The caller has called vfs.Mount.CheckBeginWrite().
+// * inode.mu must be locked.
func (i *inode) touchCMtimeLocked() {
now := i.fs.clock.Now().Nanoseconds()
atomic.StoreInt64(&i.mtime, now)
diff --git a/pkg/sentry/kernel/auth/id_map_set.go b/pkg/sentry/kernel/auth/id_map_set.go
index 983a036c5..d8a05ce46 100644
--- a/pkg/sentry/kernel/auth/id_map_set.go
+++ b/pkg/sentry/kernel/auth/id_map_set.go
@@ -343,7 +343,9 @@ func (s *idMapSet) InsertWithoutMerging(gap idMapGapIterator, r idMapRange, val
// and returns an iterator to the inserted segment. All existing iterators
// (including gap, but not including the returned iterator) are invalidated.
//
-// Preconditions: r.Start >= gap.Start(); r.End <= gap.End().
+// Preconditions:
+// * r.Start >= gap.Start().
+// * r.End <= gap.End().
func (s *idMapSet) InsertWithoutMergingUnchecked(gap idMapGapIterator, r idMapRange, val uint32) idMapIterator {
gap = gap.node.rebalanceBeforeInsert(gap)
splitMaxGap := idMaptrackGaps != 0 && (gap.node.nrSegments == 0 || gap.Range().Length() == gap.node.maxGap.Get())
@@ -1097,12 +1099,10 @@ func (seg idMapIterator) End() uint32 {
// does not invalidate any iterators.
//
// Preconditions:
-//
-// - r.Length() > 0.
-//
-// - The new range must not overlap an existing one: If seg.NextSegment().Ok(),
-// then r.end <= seg.NextSegment().Start(); if seg.PrevSegment().Ok(), then
-// r.start >= seg.PrevSegment().End().
+// * r.Length() > 0.
+// * The new range must not overlap an existing one:
+// * If seg.NextSegment().Ok(), then r.end <= seg.NextSegment().Start().
+// * If seg.PrevSegment().Ok(), then r.start >= seg.PrevSegment().End().
func (seg idMapIterator) SetRangeUnchecked(r idMapRange) {
seg.node.keys[seg.index] = r
}
@@ -1127,8 +1127,9 @@ func (seg idMapIterator) SetRange(r idMapRange) {
// SetStartUnchecked mutates the iterated segment's start. This operation does
// not invalidate any iterators.
//
-// Preconditions: The new start must be valid: start < seg.End(); if
-// seg.PrevSegment().Ok(), then start >= seg.PrevSegment().End().
+// Preconditions: The new start must be valid:
+// * start < seg.End()
+// * If seg.PrevSegment().Ok(), then start >= seg.PrevSegment().End().
func (seg idMapIterator) SetStartUnchecked(start uint32) {
seg.node.keys[seg.index].Start = start
}
@@ -1150,8 +1151,9 @@ func (seg idMapIterator) SetStart(start uint32) {
// SetEndUnchecked mutates the iterated segment's end. This operation does not
// invalidate any iterators.
//
-// Preconditions: The new end must be valid: end > seg.Start(); if
-// seg.NextSegment().Ok(), then end <= seg.NextSegment().Start().
+// Preconditions: The new end must be valid:
+// * end > seg.Start().
+// * If seg.NextSegment().Ok(), then end <= seg.NextSegment().Start().
func (seg idMapIterator) SetEndUnchecked(end uint32) {
seg.node.keys[seg.index].End = end
}
@@ -1571,9 +1573,11 @@ func (s *idMapSet) ExportSortedSlices() *idMapSegmentDataSlices {
// ImportSortedSlice initializes the given set from the given slice.
//
-// Preconditions: s must be empty. sds must represent a valid set (the segments
-// in sds must have valid lengths that do not overlap). The segments in sds
-// must be sorted in ascending key order.
+// Preconditions:
+// * s must be empty.
+// * sds must represent a valid set (the segments in sds must have valid
+// lengths that do not overlap).
+// * The segments in sds must be sorted in ascending key order.
func (s *idMapSet) ImportSortedSlices(sds *idMapSegmentDataSlices) error {
if !s.IsEmpty() {
return fmt.Errorf("cannot import into non-empty set %v", s)
diff --git a/pkg/sentry/kernel/kernel.go b/pkg/sentry/kernel/kernel.go
index 1028d13c6..2e0175e36 100644
--- a/pkg/sentry/kernel/kernel.go
+++ b/pkg/sentry/kernel/kernel.go
@@ -1067,8 +1067,9 @@ func (k *Kernel) Start() error {
// pauseTimeLocked pauses all Timers and Timekeeper updates.
//
-// Preconditions: Any task goroutines running in k must be stopped. k.extMu
-// must be locked.
+// Preconditions:
+// * Any task goroutines running in k must be stopped.
+// * k.extMu must be locked.
func (k *Kernel) pauseTimeLocked(ctx context.Context) {
// k.cpuClockTicker may be nil since Kernel.SaveTo() may be called before
// Kernel.Start().
@@ -1111,8 +1112,9 @@ func (k *Kernel) pauseTimeLocked(ctx context.Context) {
// pauseTimeLocked has not been previously called, resumeTimeLocked has no
// effect.
//
-// Preconditions: Any task goroutines running in k must be stopped. k.extMu
-// must be locked.
+// Preconditions:
+// * Any task goroutines running in k must be stopped.
+// * k.extMu must be locked.
func (k *Kernel) resumeTimeLocked(ctx context.Context) {
if k.cpuClockTicker != nil {
k.cpuClockTicker.Resume()
diff --git a/pkg/sentry/kernel/ptrace.go b/pkg/sentry/kernel/ptrace.go
index 619b0cb7c..50df179c3 100644
--- a/pkg/sentry/kernel/ptrace.go
+++ b/pkg/sentry/kernel/ptrace.go
@@ -224,8 +224,9 @@ func (s *ptraceStop) Killable() bool {
// beginPtraceStopLocked does not signal t's tracer or wake it if it is
// waiting.
//
-// Preconditions: The TaskSet mutex must be locked. The caller must be running
-// on the task goroutine.
+// Preconditions:
+// * The TaskSet mutex must be locked.
+// * The caller must be running on the task goroutine.
func (t *Task) beginPtraceStopLocked() bool {
t.tg.signalHandlers.mu.Lock()
defer t.tg.signalHandlers.mu.Unlock()
@@ -270,8 +271,9 @@ func (t *Task) ptraceTrapLocked(code int32) {
// ptraceStop, temporarily preventing it from being removed by a concurrent
// Task.Kill, and returns true. Otherwise it returns false.
//
-// Preconditions: The TaskSet mutex must be locked. The caller must be running
-// on the task goroutine of t's tracer.
+// Preconditions:
+// * The TaskSet mutex must be locked.
+// * The caller must be running on the task goroutine of t's tracer.
func (t *Task) ptraceFreeze() bool {
t.tg.signalHandlers.mu.Lock()
defer t.tg.signalHandlers.mu.Unlock()
@@ -301,8 +303,9 @@ func (t *Task) ptraceUnfreeze() {
t.ptraceUnfreezeLocked()
}
-// Preconditions: t must be in a frozen ptraceStop. t's signal mutex must be
-// locked.
+// Preconditions:
+// * t must be in a frozen ptraceStop.
+// * t's signal mutex must be locked.
func (t *Task) ptraceUnfreezeLocked() {
// Do this even if the task has been killed to ensure a panic if t.stop is
// nil or not a ptraceStop.
@@ -497,8 +500,9 @@ func (t *Task) forgetTracerLocked() {
// ptraceSignalLocked is called after signal dequeueing to check if t should
// enter ptrace signal-delivery-stop.
//
-// Preconditions: The signal mutex must be locked. The caller must be running
-// on the task goroutine.
+// Preconditions:
+// * The signal mutex must be locked.
+// * The caller must be running on the task goroutine.
func (t *Task) ptraceSignalLocked(info *arch.SignalInfo) bool {
if linux.Signal(info.Signo) == linux.SIGKILL {
return false
@@ -828,8 +832,9 @@ func (t *Task) ptraceInterrupt(target *Task) error {
return nil
}
-// Preconditions: The TaskSet mutex must be locked for writing. t must have a
-// tracer.
+// Preconditions:
+// * The TaskSet mutex must be locked for writing.
+// * t must have a tracer.
func (t *Task) ptraceSetOptionsLocked(opts uintptr) error {
const valid = uintptr(linux.PTRACE_O_EXITKILL |
linux.PTRACE_O_TRACESYSGOOD |
diff --git a/pkg/sentry/kernel/rseq.go b/pkg/sentry/kernel/rseq.go
index 18416643b..2a9023fdf 100644
--- a/pkg/sentry/kernel/rseq.go
+++ b/pkg/sentry/kernel/rseq.go
@@ -173,8 +173,10 @@ func (t *Task) OldRSeqCPUAddr() usermem.Addr {
// SetOldRSeqCPUAddr replaces the address that old rseq will keep updated with
// t's CPU number.
//
-// Preconditions: t.RSeqAvailable() == true. The caller must be running on the
-// task goroutine. t's AddressSpace must be active.
+// Preconditions:
+// * t.RSeqAvailable() == true.
+// * The caller must be running on the task goroutine.
+// * t's AddressSpace must be active.
func (t *Task) SetOldRSeqCPUAddr(addr usermem.Addr) error {
t.oldRSeqCPUAddr = addr
@@ -189,8 +191,9 @@ func (t *Task) SetOldRSeqCPUAddr(addr usermem.Addr) error {
return nil
}
-// Preconditions: The caller must be running on the task goroutine. t's
-// AddressSpace must be active.
+// Preconditions:
+// * The caller must be running on the task goroutine.
+// * t's AddressSpace must be active.
func (t *Task) rseqUpdateCPU() error {
if t.rseqAddr == 0 && t.oldRSeqCPUAddr == 0 {
t.rseqCPU = -1
@@ -209,8 +212,9 @@ func (t *Task) rseqUpdateCPU() error {
return oerr
}
-// Preconditions: The caller must be running on the task goroutine. t's
-// AddressSpace must be active.
+// Preconditions:
+// * The caller must be running on the task goroutine.
+// * t's AddressSpace must be active.
func (t *Task) oldRSeqCopyOutCPU() error {
if t.oldRSeqCPUAddr == 0 {
return nil
@@ -222,8 +226,9 @@ func (t *Task) oldRSeqCopyOutCPU() error {
return err
}
-// Preconditions: The caller must be running on the task goroutine. t's
-// AddressSpace must be active.
+// Preconditions:
+// * The caller must be running on the task goroutine.
+// * t's AddressSpace must be active.
func (t *Task) rseqCopyOutCPU() error {
if t.rseqAddr == 0 {
return nil
@@ -240,8 +245,9 @@ func (t *Task) rseqCopyOutCPU() error {
return err
}
-// Preconditions: The caller must be running on the task goroutine. t's
-// AddressSpace must be active.
+// Preconditions:
+// * The caller must be running on the task goroutine.
+// * t's AddressSpace must be active.
func (t *Task) rseqClearCPU() error {
buf := t.CopyScratchBuffer(8)
// CPUIDStart and CPUID are the first two fields in linux.RSeq.
@@ -269,8 +275,9 @@ func (t *Task) rseqClearCPU() error {
//
// See kernel/rseq.c:rseq_ip_fixup for reference.
//
-// Preconditions: The caller must be running on the task goroutine. t's
-// AddressSpace must be active.
+// Preconditions:
+// * The caller must be running on the task goroutine.
+// * t's AddressSpace must be active.
func (t *Task) rseqAddrInterrupt() {
if t.rseqAddr == 0 {
return
diff --git a/pkg/sentry/kernel/task_exec.go b/pkg/sentry/kernel/task_exec.go
index 5e4fb3e3a..412d471d3 100644
--- a/pkg/sentry/kernel/task_exec.go
+++ b/pkg/sentry/kernel/task_exec.go
@@ -237,9 +237,10 @@ func (r *runSyscallAfterExecStop) execute(t *Task) taskRunState {
// promoteLocked makes t the leader of its thread group. If t is already the
// thread group leader, promoteLocked is a no-op.
//
-// Preconditions: All other tasks in t's thread group, including the existing
-// leader (if it is not t), have reached TaskExitZombie. The TaskSet mutex must
-// be locked for writing.
+// Preconditions:
+// * All other tasks in t's thread group, including the existing leader (if it
+// is not t), have reached TaskExitZombie.
+// * The TaskSet mutex must be locked for writing.
func (t *Task) promoteLocked() {
oldLeader := t.tg.leader
if t == oldLeader {
diff --git a/pkg/sentry/kernel/task_sched.go b/pkg/sentry/kernel/task_sched.go
index 09366b60c..52c55d13d 100644
--- a/pkg/sentry/kernel/task_sched.go
+++ b/pkg/sentry/kernel/task_sched.go
@@ -133,9 +133,10 @@ func (t *Task) accountTaskGoroutineEnter(state TaskGoroutineState) {
}
}
-// Preconditions: The caller must be running on the task goroutine, and leaving
-// a state indicated by a previous call to
-// t.accountTaskGoroutineEnter(state).
+// Preconditions:
+// * The caller must be running on the task goroutine
+// * The caller must be leaving a state indicated by a previous call to
+// t.accountTaskGoroutineEnter(state).
func (t *Task) accountTaskGoroutineLeave(state TaskGoroutineState) {
if state != TaskGoroutineRunningApp {
// Task is unblocking/continuing.
@@ -191,8 +192,8 @@ func (tg *ThreadGroup) CPUStats() usage.CPUStats {
return tg.cpuStatsAtLocked(tg.leader.k.CPUClockNow())
}
-// Preconditions: As for TaskGoroutineSchedInfo.userTicksAt. The TaskSet mutex
-// must be locked.
+// Preconditions: Same as TaskGoroutineSchedInfo.userTicksAt, plus:
+// * The TaskSet mutex must be locked.
func (tg *ThreadGroup) cpuStatsAtLocked(now uint64) usage.CPUStats {
stats := tg.exitedCPUStats
// Account for live tasks.
diff --git a/pkg/sentry/kernel/task_signals.go b/pkg/sentry/kernel/task_signals.go
index d6a2040bc..feaa38596 100644
--- a/pkg/sentry/kernel/task_signals.go
+++ b/pkg/sentry/kernel/task_signals.go
@@ -319,8 +319,9 @@ func (t *Task) SignalReturn(rt bool) (*SyscallControl, error) {
// Sigtimedwait implements the semantics of sigtimedwait(2).
//
-// Preconditions: The caller must be running on the task goroutine. t.exitState
-// < TaskExitZombie.
+// Preconditions:
+// * The caller must be running on the task goroutine.
+// * t.exitState < TaskExitZombie.
func (t *Task) Sigtimedwait(set linux.SignalSet, timeout time.Duration) (*arch.SignalInfo, error) {
// set is the set of signals we're interested in; invert it to get the set
// of signals to block.
@@ -584,8 +585,9 @@ func (t *Task) SignalMask() linux.SignalSet {
// SetSignalMask sets t's signal mask.
//
-// Preconditions: SetSignalMask can only be called by the task goroutine.
-// t.exitState < TaskExitZombie.
+// Preconditions:
+// * The caller must be running on the task goroutine.
+// * t.exitState < TaskExitZombie.
func (t *Task) SetSignalMask(mask linux.SignalSet) {
// By precondition, t prevents t.tg from completing an execve and mutating
// t.tg.signalHandlers, so we can skip the TaskSet mutex.
@@ -631,7 +633,7 @@ func (t *Task) setSignalMaskLocked(mask linux.SignalSet) {
// SetSavedSignalMask sets the saved signal mask (see Task.savedSignalMask's
// comment).
//
-// Preconditions: SetSavedSignalMask can only be called by the task goroutine.
+// Preconditions: The caller must be running on the task goroutine.
func (t *Task) SetSavedSignalMask(mask linux.SignalSet) {
t.savedSignalMask = mask
t.haveSavedSignalMask = true
diff --git a/pkg/sentry/kernel/task_stop.go b/pkg/sentry/kernel/task_stop.go
index 296735d32..a35948a5f 100644
--- a/pkg/sentry/kernel/task_stop.go
+++ b/pkg/sentry/kernel/task_stop.go
@@ -99,8 +99,9 @@ type TaskStop interface {
// beginInternalStop indicates the start of an internal stop that applies to t.
//
-// Preconditions: The task must not already be in an internal stop (i.e. t.stop
-// == nil). The caller must be running on the task goroutine.
+// Preconditions:
+// * The caller must be running on the task goroutine.
+// * The task must not already be in an internal stop (i.e. t.stop == nil).
func (t *Task) beginInternalStop(s TaskStop) {
t.tg.pidns.owner.mu.RLock()
defer t.tg.pidns.owner.mu.RUnlock()
@@ -109,8 +110,8 @@ func (t *Task) beginInternalStop(s TaskStop) {
t.beginInternalStopLocked(s)
}
-// Preconditions: The signal mutex must be locked. All preconditions for
-// Task.beginInternalStop also apply.
+// Preconditions: Same as beginInternalStop, plus:
+// * The signal mutex must be locked.
func (t *Task) beginInternalStopLocked(s TaskStop) {
if t.stop != nil {
panic(fmt.Sprintf("Attempting to enter internal stop %#v when already in internal stop %#v", s, t.stop))
@@ -128,8 +129,9 @@ func (t *Task) beginInternalStopLocked(s TaskStop) {
// t.stop, which is why there is no endInternalStop that locks the signal mutex
// for you.
//
-// Preconditions: The signal mutex must be locked. The task must be in an
-// internal stop (i.e. t.stop != nil).
+// Preconditions:
+// * The signal mutex must be locked.
+// * The task must be in an internal stop (i.e. t.stop != nil).
func (t *Task) endInternalStopLocked() {
if t.stop == nil {
panic("Attempting to leave non-existent internal stop")
diff --git a/pkg/sentry/kernel/task_usermem.go b/pkg/sentry/kernel/task_usermem.go
index b02044ad2..4550b9f89 100644
--- a/pkg/sentry/kernel/task_usermem.go
+++ b/pkg/sentry/kernel/task_usermem.go
@@ -143,8 +143,9 @@ func (t *Task) CopyInVector(addr usermem.Addr, maxElemSize, maxTotalSize int) ([
// CopyOutIovecs converts src to an array of struct iovecs and copies it to the
// memory mapped at addr.
//
-// Preconditions: As for usermem.IO.CopyOut. The caller must be running on the
-// task goroutine. t's AddressSpace must be active.
+// Preconditions: Same as usermem.IO.CopyOut, plus:
+// * The caller must be running on the task goroutine.
+// * t's AddressSpace must be active.
func (t *Task) CopyOutIovecs(addr usermem.Addr, src usermem.AddrRangeSeq) error {
switch t.Arch().Width() {
case 8:
@@ -191,8 +192,9 @@ func (t *Task) CopyOutIovecs(addr usermem.Addr, src usermem.AddrRangeSeq) error
// combined length of all AddrRanges would otherwise exceed this amount, ranges
// beyond MAX_RW_COUNT are silently truncated.
//
-// Preconditions: As for usermem.IO.CopyIn. The caller must be running on the
-// task goroutine. t's AddressSpace must be active.
+// Preconditions: Same as usermem.IO.CopyIn, plus:
+// * The caller must be running on the task goroutine.
+// * t's AddressSpace must be active.
func (t *Task) CopyInIovecs(addr usermem.Addr, numIovecs int) (usermem.AddrRangeSeq, error) {
if numIovecs == 0 {
return usermem.AddrRangeSeq{}, nil
@@ -284,7 +286,7 @@ func (t *Task) SingleIOSequence(addr usermem.Addr, length int, opts usermem.IOOp
//
// IovecsIOSequence is analogous to Linux's lib/iov_iter.c:import_iovec().
//
-// Preconditions: As for Task.CopyInIovecs.
+// Preconditions: Same as Task.CopyInIovecs.
func (t *Task) IovecsIOSequence(addr usermem.Addr, iovcnt int, opts usermem.IOOpts) (usermem.IOSequence, error) {
if iovcnt < 0 || iovcnt > linux.UIO_MAXIOV {
return usermem.IOSequence{}, syserror.EINVAL
diff --git a/pkg/sentry/kernel/time/time.go b/pkg/sentry/kernel/time/time.go
index e959700f2..f61a8e164 100644
--- a/pkg/sentry/kernel/time/time.go
+++ b/pkg/sentry/kernel/time/time.go
@@ -616,8 +616,10 @@ func (t *Timer) Swap(s Setting) (Time, Setting) {
// Timer's Clock) at which the Setting was changed. Setting s.Enabled to true
// starts the timer, while setting s.Enabled to false stops it.
//
-// Preconditions: The Timer must not be paused. f cannot call any Timer methods
-// since it is called with the Timer mutex locked.
+// Preconditions:
+// * The Timer must not be paused.
+// * f cannot call any Timer methods since it is called with the Timer mutex
+// locked.
func (t *Timer) SwapAnd(s Setting, f func()) (Time, Setting) {
now := t.clock.Now()
t.mu.Lock()
diff --git a/pkg/sentry/kernel/vdso.go b/pkg/sentry/kernel/vdso.go
index 290c32466..e44a139b3 100644
--- a/pkg/sentry/kernel/vdso.go
+++ b/pkg/sentry/kernel/vdso.go
@@ -73,13 +73,10 @@ type VDSOParamPage struct {
// NewVDSOParamPage returns a VDSOParamPage.
//
// Preconditions:
-//
// * fr is a single page allocated from mfp.MemoryFile(). VDSOParamPage does
// not take ownership of fr; it must remain allocated for the lifetime of the
// VDSOParamPage.
-//
// * VDSOParamPage must be the only writer to fr.
-//
// * mfp.MemoryFile().MapInternal(fr) must return a single safemem.Block.
func NewVDSOParamPage(mfp pgalloc.MemoryFileProvider, fr memmap.FileRange) *VDSOParamPage {
return &VDSOParamPage{mfp: mfp, fr: fr}
diff --git a/pkg/sentry/loader/elf.go b/pkg/sentry/loader/elf.go
index 20dd1cc21..d4610ec3b 100644
--- a/pkg/sentry/loader/elf.go
+++ b/pkg/sentry/loader/elf.go
@@ -402,8 +402,7 @@ type loadedELF struct {
//
// It does not load the ELF interpreter, or return any auxv entries.
//
-// Preconditions:
-// * f is an ELF file
+// Preconditions: f is an ELF file.
func loadParsedELF(ctx context.Context, m *mm.MemoryManager, f fsbridge.File, info elfInfo, sharedLoadOffset usermem.Addr) (loadedELF, error) {
first := true
var start, end usermem.Addr
@@ -571,8 +570,8 @@ func loadParsedELF(ctx context.Context, m *mm.MemoryManager, f fsbridge.File, in
// It does not load the ELF interpreter, or return any auxv entries.
//
// Preconditions:
-// * f is an ELF file
-// * f is the first ELF loaded into m
+// * f is an ELF file.
+// * f is the first ELF loaded into m.
func loadInitialELF(ctx context.Context, m *mm.MemoryManager, fs *cpuid.FeatureSet, f fsbridge.File) (loadedELF, arch.Context, error) {
info, err := parseHeader(ctx, f)
if err != nil {
@@ -609,8 +608,7 @@ func loadInitialELF(ctx context.Context, m *mm.MemoryManager, fs *cpuid.FeatureS
//
// It does not return any auxv entries.
//
-// Preconditions:
-// * f is an ELF file
+// Preconditions: f is an ELF file.
func loadInterpreterELF(ctx context.Context, m *mm.MemoryManager, f fsbridge.File, initial loadedELF) (loadedELF, error) {
info, err := parseHeader(ctx, f)
if err != nil {
@@ -640,8 +638,7 @@ func loadInterpreterELF(ctx context.Context, m *mm.MemoryManager, f fsbridge.Fil
// If loadELF returns ErrSwitchFile it should be called again with the returned
// path and argv.
//
-// Preconditions:
-// * args.File is an ELF file
+// Preconditions: args.File is an ELF file.
func loadELF(ctx context.Context, args LoadArgs) (loadedELF, arch.Context, error) {
bin, ac, err := loadInitialELF(ctx, args.MemoryManager, args.Features, args.File)
if err != nil {
diff --git a/pkg/sentry/loader/loader.go b/pkg/sentry/loader/loader.go
index 8d6802ea3..15c88aa7c 100644
--- a/pkg/sentry/loader/loader.go
+++ b/pkg/sentry/loader/loader.go
@@ -215,8 +215,8 @@ func loadExecutable(ctx context.Context, args LoadArgs) (loadedELF, arch.Context
// path and argv.
//
// Preconditions:
-// * The Task MemoryManager is empty.
-// * Load is called on the Task goroutine.
+// * The Task MemoryManager is empty.
+// * Load is called on the Task goroutine.
func Load(ctx context.Context, args LoadArgs, extraAuxv []arch.AuxEntry, vdso *VDSO) (abi.OS, arch.Context, string, *syserr.Error) {
// Load the executable itself.
loaded, ac, file, newArgv, err := loadExecutable(ctx, args)
diff --git a/pkg/sentry/memmap/mapping_set.go b/pkg/sentry/memmap/mapping_set.go
index d609c1ae0..457ed87f8 100644
--- a/pkg/sentry/memmap/mapping_set.go
+++ b/pkg/sentry/memmap/mapping_set.go
@@ -177,7 +177,7 @@ func subsetMapping(wholeRange, subsetRange MappableRange, ms MappingSpace, addr
// AddMapping adds the given mapping and returns the set of MappableRanges that
// previously had no mappings.
//
-// Preconditions: As for Mappable.AddMapping.
+// Preconditions: Same as Mappable.AddMapping.
func (s *MappingSet) AddMapping(ms MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) []MappableRange {
mr := MappableRange{offset, offset + uint64(ar.Length())}
var mapped []MappableRange
@@ -204,7 +204,7 @@ func (s *MappingSet) AddMapping(ms MappingSpace, ar usermem.AddrRange, offset ui
// RemoveMapping removes the given mapping and returns the set of
// MappableRanges that now have no mappings.
//
-// Preconditions: As for Mappable.RemoveMapping.
+// Preconditions: Same as Mappable.RemoveMapping.
func (s *MappingSet) RemoveMapping(ms MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) []MappableRange {
mr := MappableRange{offset, offset + uint64(ar.Length())}
var unmapped []MappableRange
diff --git a/pkg/sentry/memmap/mapping_set_impl.go b/pkg/sentry/memmap/mapping_set_impl.go
index cb4281950..eda4579e8 100644
--- a/pkg/sentry/memmap/mapping_set_impl.go
+++ b/pkg/sentry/memmap/mapping_set_impl.go
@@ -343,7 +343,9 @@ func (s *MappingSet) InsertWithoutMerging(gap MappingGapIterator, r MappableRang
// and returns an iterator to the inserted segment. All existing iterators
// (including gap, but not including the returned iterator) are invalidated.
//
-// Preconditions: r.Start >= gap.Start(); r.End <= gap.End().
+// Preconditions:
+// * r.Start >= gap.Start().
+// * r.End <= gap.End().
func (s *MappingSet) InsertWithoutMergingUnchecked(gap MappingGapIterator, r MappableRange, val MappingsOfRange) MappingIterator {
gap = gap.node.rebalanceBeforeInsert(gap)
splitMaxGap := MappingtrackGaps != 0 && (gap.node.nrSegments == 0 || gap.Range().Length() == gap.node.maxGap.Get())
@@ -1097,12 +1099,10 @@ func (seg MappingIterator) End() uint64 {
// does not invalidate any iterators.
//
// Preconditions:
-//
-// - r.Length() > 0.
-//
-// - The new range must not overlap an existing one: If seg.NextSegment().Ok(),
-// then r.end <= seg.NextSegment().Start(); if seg.PrevSegment().Ok(), then
-// r.start >= seg.PrevSegment().End().
+// * r.Length() > 0.
+// * The new range must not overlap an existing one:
+// * If seg.NextSegment().Ok(), then r.end <= seg.NextSegment().Start().
+// * If seg.PrevSegment().Ok(), then r.start >= seg.PrevSegment().End().
func (seg MappingIterator) SetRangeUnchecked(r MappableRange) {
seg.node.keys[seg.index] = r
}
@@ -1127,8 +1127,9 @@ func (seg MappingIterator) SetRange(r MappableRange) {
// SetStartUnchecked mutates the iterated segment's start. This operation does
// not invalidate any iterators.
//
-// Preconditions: The new start must be valid: start < seg.End(); if
-// seg.PrevSegment().Ok(), then start >= seg.PrevSegment().End().
+// Preconditions: The new start must be valid:
+// * start < seg.End()
+// * If seg.PrevSegment().Ok(), then start >= seg.PrevSegment().End().
func (seg MappingIterator) SetStartUnchecked(start uint64) {
seg.node.keys[seg.index].Start = start
}
@@ -1150,8 +1151,9 @@ func (seg MappingIterator) SetStart(start uint64) {
// SetEndUnchecked mutates the iterated segment's end. This operation does not
// invalidate any iterators.
//
-// Preconditions: The new end must be valid: end > seg.Start(); if
-// seg.NextSegment().Ok(), then end <= seg.NextSegment().Start().
+// Preconditions: The new end must be valid:
+// * end > seg.Start().
+// * If seg.NextSegment().Ok(), then end <= seg.NextSegment().Start().
func (seg MappingIterator) SetEndUnchecked(end uint64) {
seg.node.keys[seg.index].End = end
}
@@ -1571,9 +1573,11 @@ func (s *MappingSet) ExportSortedSlices() *MappingSegmentDataSlices {
// ImportSortedSlice initializes the given set from the given slice.
//
-// Preconditions: s must be empty. sds must represent a valid set (the segments
-// in sds must have valid lengths that do not overlap). The segments in sds
-// must be sorted in ascending key order.
+// Preconditions:
+// * s must be empty.
+// * sds must represent a valid set (the segments in sds must have valid
+// lengths that do not overlap).
+// * The segments in sds must be sorted in ascending key order.
func (s *MappingSet) ImportSortedSlices(sds *MappingSegmentDataSlices) error {
if !s.IsEmpty() {
return fmt.Errorf("cannot import into non-empty set %v", s)
diff --git a/pkg/sentry/memmap/memmap.go b/pkg/sentry/memmap/memmap.go
index 65d83096f..a44fa2b95 100644
--- a/pkg/sentry/memmap/memmap.go
+++ b/pkg/sentry/memmap/memmap.go
@@ -28,9 +28,9 @@ import (
//
// See mm/mm.go for Mappable's place in the lock order.
//
-// Preconditions: For all Mappable methods, usermem.AddrRanges and
-// MappableRanges must be non-empty (Length() != 0), and usermem.Addrs and
-// Mappable offsets must be page-aligned.
+// All Mappable methods have the following preconditions:
+// * usermem.AddrRanges and MappableRanges must be non-empty (Length() != 0).
+// * usermem.Addrs and Mappable offsets must be page-aligned.
type Mappable interface {
// AddMapping notifies the Mappable of a mapping from addresses ar in ms to
// offsets [offset, offset+ar.Length()) in this Mappable.
@@ -48,8 +48,10 @@ type Mappable interface {
// addresses ar in ms to offsets [offset, offset+ar.Length()) in this
// Mappable.
//
- // Preconditions: offset+ar.Length() does not overflow. The removed mapping
- // must exist. writable must match the corresponding call to AddMapping.
+ // Preconditions:
+ // * offset+ar.Length() does not overflow.
+ // * The removed mapping must exist. writable must match the
+ // corresponding call to AddMapping.
RemoveMapping(ctx context.Context, ms MappingSpace, ar usermem.AddrRange, offset uint64, writable bool)
// CopyMapping notifies the Mappable of an attempt to copy a mapping in ms
@@ -60,9 +62,10 @@ type Mappable interface {
// CopyMapping is only called when a mapping is copied within a given
// MappingSpace; it is analogous to Linux's vm_operations_struct::mremap.
//
- // Preconditions: offset+srcAR.Length() and offset+dstAR.Length() do not
- // overflow. The mapping at srcAR must exist. writable must match the
- // corresponding call to AddMapping.
+ // Preconditions:
+ // * offset+srcAR.Length() and offset+dstAR.Length() do not overflow.
+ // * The mapping at srcAR must exist. writable must match the
+ // corresponding call to AddMapping.
CopyMapping(ctx context.Context, ms MappingSpace, srcAR, dstAR usermem.AddrRange, offset uint64, writable bool) error
// Translate returns the Mappable's current mappings for at least the range
@@ -77,11 +80,14 @@ type Mappable interface {
// reference is held on all pages in a File that may be the result
// of a valid Translation.
//
- // Preconditions: required.Length() > 0. optional.IsSupersetOf(required).
- // required and optional must be page-aligned. The caller must have
- // established a mapping for all of the queried offsets via a previous call
- // to AddMapping. The caller is responsible for ensuring that calls to
- // Translate synchronize with invalidation.
+ // Preconditions:
+ // * required.Length() > 0.
+ // * optional.IsSupersetOf(required).
+ // * required and optional must be page-aligned.
+ // * The caller must have established a mapping for all of the queried
+ // offsets via a previous call to AddMapping.
+ // * The caller is responsible for ensuring that calls to Translate
+ // synchronize with invalidation.
//
// Postconditions: See CheckTranslateResult.
Translate(ctx context.Context, required, optional MappableRange, at usermem.AccessType) ([]Translation, error)
@@ -118,7 +124,7 @@ func (t Translation) FileRange() FileRange {
// CheckTranslateResult returns an error if (ts, terr) does not satisfy all
// postconditions for Mappable.Translate(required, optional, at).
//
-// Preconditions: As for Mappable.Translate.
+// Preconditions: Same as Mappable.Translate.
func CheckTranslateResult(required, optional MappableRange, at usermem.AccessType, ts []Translation, terr error) error {
// Verify that the inputs to Mappable.Translate were valid.
if !required.WellFormed() || required.Length() <= 0 {
@@ -214,7 +220,9 @@ type MappingSpace interface {
// Invalidate must not take any locks preceding mm.MemoryManager.activeMu
// in the lock order.
//
- // Preconditions: ar.Length() != 0. ar must be page-aligned.
+ // Preconditions:
+ // * ar.Length() != 0.
+ // * ar must be page-aligned.
Invalidate(ar usermem.AddrRange, opts InvalidateOpts)
}
@@ -375,16 +383,20 @@ type File interface {
// IncRef increments the reference count on all pages in fr.
//
- // Preconditions: fr.Start and fr.End must be page-aligned. fr.Length() >
- // 0. At least one reference must be held on all pages in fr. (The File
- // interface does not provide a way to acquire an initial reference;
- // implementors may define mechanisms for doing so.)
+ // Preconditions:
+ // * fr.Start and fr.End must be page-aligned.
+ // * fr.Length() > 0.
+ // * At least one reference must be held on all pages in fr. (The File
+ // interface does not provide a way to acquire an initial reference;
+ // implementors may define mechanisms for doing so.)
IncRef(fr FileRange)
// DecRef decrements the reference count on all pages in fr.
//
- // Preconditions: fr.Start and fr.End must be page-aligned. fr.Length() >
- // 0. At least one reference must be held on all pages in fr.
+ // Preconditions:
+ // * fr.Start and fr.End must be page-aligned.
+ // * fr.Length() > 0.
+ // * At least one reference must be held on all pages in fr.
DecRef(fr FileRange)
// MapInternal returns a mapping of the given file offsets in the invoking
@@ -392,8 +404,9 @@ type File interface {
//
// Note that fr.Start and fr.End need not be page-aligned.
//
- // Preconditions: fr.Length() > 0. At least one reference must be held on
- // all pages in fr.
+ // Preconditions:
+ // * fr.Length() > 0.
+ // * At least one reference must be held on all pages in fr.
//
// Postconditions: The returned mapping is valid as long as at least one
// reference is held on the mapped pages.
diff --git a/pkg/sentry/mm/address_space.go b/pkg/sentry/mm/address_space.go
index 5c667117c..a93e76c75 100644
--- a/pkg/sentry/mm/address_space.go
+++ b/pkg/sentry/mm/address_space.go
@@ -166,8 +166,12 @@ func (mm *MemoryManager) Deactivate() {
// mapASLocked maps addresses in ar into mm.as. If precommit is true, mappings
// for all addresses in ar should be precommitted.
//
-// Preconditions: mm.activeMu must be locked. mm.as != nil. ar.Length() != 0.
-// ar must be page-aligned. pseg == mm.pmas.LowerBoundSegment(ar.Start).
+// Preconditions:
+// * mm.activeMu must be locked.
+// * mm.as != nil.
+// * ar.Length() != 0.
+// * ar must be page-aligned.
+// * pseg == mm.pmas.LowerBoundSegment(ar.Start).
func (mm *MemoryManager) mapASLocked(pseg pmaIterator, ar usermem.AddrRange, precommit bool) error {
// By default, map entire pmas at a time, under the assumption that there
// is no cost to mapping more of a pma than necessary.
diff --git a/pkg/sentry/mm/file_refcount_set.go b/pkg/sentry/mm/file_refcount_set.go
index b47c74391..1d956b4b4 100644
--- a/pkg/sentry/mm/file_refcount_set.go
+++ b/pkg/sentry/mm/file_refcount_set.go
@@ -347,7 +347,9 @@ func (s *fileRefcountSet) InsertWithoutMerging(gap fileRefcountGapIterator, r __
// and returns an iterator to the inserted segment. All existing iterators
// (including gap, but not including the returned iterator) are invalidated.
//
-// Preconditions: r.Start >= gap.Start(); r.End <= gap.End().
+// Preconditions:
+// * r.Start >= gap.Start().
+// * r.End <= gap.End().
func (s *fileRefcountSet) InsertWithoutMergingUnchecked(gap fileRefcountGapIterator, r __generics_imported0.FileRange, val int32) fileRefcountIterator {
gap = gap.node.rebalanceBeforeInsert(gap)
splitMaxGap := fileRefcounttrackGaps != 0 && (gap.node.nrSegments == 0 || gap.Range().Length() == gap.node.maxGap.Get())
@@ -1101,12 +1103,10 @@ func (seg fileRefcountIterator) End() uint64 {
// does not invalidate any iterators.
//
// Preconditions:
-//
-// - r.Length() > 0.
-//
-// - The new range must not overlap an existing one: If seg.NextSegment().Ok(),
-// then r.end <= seg.NextSegment().Start(); if seg.PrevSegment().Ok(), then
-// r.start >= seg.PrevSegment().End().
+// * r.Length() > 0.
+// * The new range must not overlap an existing one:
+// * If seg.NextSegment().Ok(), then r.end <= seg.NextSegment().Start().
+// * If seg.PrevSegment().Ok(), then r.start >= seg.PrevSegment().End().
func (seg fileRefcountIterator) SetRangeUnchecked(r __generics_imported0.FileRange) {
seg.node.keys[seg.index] = r
}
@@ -1131,8 +1131,9 @@ func (seg fileRefcountIterator) SetRange(r __generics_imported0.FileRange) {
// SetStartUnchecked mutates the iterated segment's start. This operation does
// not invalidate any iterators.
//
-// Preconditions: The new start must be valid: start < seg.End(); if
-// seg.PrevSegment().Ok(), then start >= seg.PrevSegment().End().
+// Preconditions: The new start must be valid:
+// * start < seg.End()
+// * If seg.PrevSegment().Ok(), then start >= seg.PrevSegment().End().
func (seg fileRefcountIterator) SetStartUnchecked(start uint64) {
seg.node.keys[seg.index].Start = start
}
@@ -1154,8 +1155,9 @@ func (seg fileRefcountIterator) SetStart(start uint64) {
// SetEndUnchecked mutates the iterated segment's end. This operation does not
// invalidate any iterators.
//
-// Preconditions: The new end must be valid: end > seg.Start(); if
-// seg.NextSegment().Ok(), then end <= seg.NextSegment().Start().
+// Preconditions: The new end must be valid:
+// * end > seg.Start().
+// * If seg.NextSegment().Ok(), then end <= seg.NextSegment().Start().
func (seg fileRefcountIterator) SetEndUnchecked(end uint64) {
seg.node.keys[seg.index].End = end
}
@@ -1575,9 +1577,11 @@ func (s *fileRefcountSet) ExportSortedSlices() *fileRefcountSegmentDataSlices {
// ImportSortedSlice initializes the given set from the given slice.
//
-// Preconditions: s must be empty. sds must represent a valid set (the segments
-// in sds must have valid lengths that do not overlap). The segments in sds
-// must be sorted in ascending key order.
+// Preconditions:
+// * s must be empty.
+// * sds must represent a valid set (the segments in sds must have valid
+// lengths that do not overlap).
+// * The segments in sds must be sorted in ascending key order.
func (s *fileRefcountSet) ImportSortedSlices(sds *fileRefcountSegmentDataSlices) error {
if !s.IsEmpty() {
return fmt.Errorf("cannot import into non-empty set %v", s)
diff --git a/pkg/sentry/mm/io.go b/pkg/sentry/mm/io.go
index fa776f9c6..a8ac48080 100644
--- a/pkg/sentry/mm/io.go
+++ b/pkg/sentry/mm/io.go
@@ -441,7 +441,10 @@ func (mm *MemoryManager) LoadUint32(ctx context.Context, addr usermem.Addr, opts
// handleASIOFault handles a page fault at address addr for an AddressSpaceIO
// operation spanning ioar.
//
-// Preconditions: mm.as != nil. ioar.Length() != 0. ioar.Contains(addr).
+// Preconditions:
+// * mm.as != nil.
+// * ioar.Length() != 0.
+// * ioar.Contains(addr).
func (mm *MemoryManager) handleASIOFault(ctx context.Context, addr usermem.Addr, ioar usermem.AddrRange, at usermem.AccessType) error {
// Try to map all remaining pages in the I/O operation. This RoundUp can't
// overflow because otherwise it would have been caught by CheckIORange.
@@ -629,7 +632,9 @@ func (mm *MemoryManager) withVecInternalMappings(ctx context.Context, ars userme
// at most address end on AddrRange arsit.Head(). It is used in vector I/O paths to
// truncate usermem.AddrRangeSeq when errors occur.
//
-// Preconditions: !arsit.IsEmpty(). end <= arsit.Head().End.
+// Preconditions:
+// * !arsit.IsEmpty().
+// * end <= arsit.Head().End.
func truncatedAddrRangeSeq(ars, arsit usermem.AddrRangeSeq, end usermem.Addr) usermem.AddrRangeSeq {
ar := arsit.Head()
if end <= ar.Start {
diff --git a/pkg/sentry/mm/pma.go b/pkg/sentry/mm/pma.go
index 930ec895f..30facebf7 100644
--- a/pkg/sentry/mm/pma.go
+++ b/pkg/sentry/mm/pma.go
@@ -31,7 +31,9 @@ import (
// iterator to the pma containing ar.Start. Otherwise it returns a terminal
// iterator.
//
-// Preconditions: mm.activeMu must be locked. ar.Length() != 0.
+// Preconditions:
+// * mm.activeMu must be locked.
+// * ar.Length() != 0.
func (mm *MemoryManager) existingPMAsLocked(ar usermem.AddrRange, at usermem.AccessType, ignorePermissions bool, needInternalMappings bool) pmaIterator {
if checkInvariants {
if !ar.WellFormed() || ar.Length() <= 0 {
@@ -89,10 +91,13 @@ func (mm *MemoryManager) existingVecPMAsLocked(ars usermem.AddrRangeSeq, at user
//
// - An error that is non-nil if pmas exist for only a subset of ar.
//
-// Preconditions: mm.mappingMu must be locked. mm.activeMu must be locked for
-// writing. ar.Length() != 0. vseg.Range().Contains(ar.Start). vmas must exist
-// for all addresses in ar, and support accesses of type at (i.e. permission
-// checks must have been performed against vmas).
+// Preconditions:
+// * mm.mappingMu must be locked.
+// * mm.activeMu must be locked for writing.
+// * ar.Length() != 0.
+// * vseg.Range().Contains(ar.Start).
+// * vmas must exist for all addresses in ar, and support accesses of type at
+// (i.e. permission checks must have been performed against vmas).
func (mm *MemoryManager) getPMAsLocked(ctx context.Context, vseg vmaIterator, ar usermem.AddrRange, at usermem.AccessType) (pmaIterator, pmaGapIterator, error) {
if checkInvariants {
if !ar.WellFormed() || ar.Length() <= 0 {
@@ -135,9 +140,11 @@ func (mm *MemoryManager) getPMAsLocked(ctx context.Context, vseg vmaIterator, ar
// exist. If this is not equal to ars, it returns a non-nil error explaining
// why.
//
-// Preconditions: mm.mappingMu must be locked. mm.activeMu must be locked for
-// writing. vmas must exist for all addresses in ars, and support accesses of
-// type at (i.e. permission checks must have been performed against vmas).
+// Preconditions:
+// * mm.mappingMu must be locked.
+// * mm.activeMu must be locked for writing.
+// * vmas must exist for all addresses in ars, and support accesses of type at
+// (i.e. permission checks must have been performed against vmas).
func (mm *MemoryManager) getVecPMAsLocked(ctx context.Context, ars usermem.AddrRangeSeq, at usermem.AccessType) (usermem.AddrRangeSeq, error) {
for arsit := ars; !arsit.IsEmpty(); arsit = arsit.Tail() {
ar := arsit.Head()
@@ -518,8 +525,10 @@ func privateAligned(ar usermem.AddrRange) usermem.AddrRange {
// the memory it maps, isPMACopyOnWriteLocked will take ownership of the memory
// and update the pma to indicate that it does not require copy-on-write.
//
-// Preconditions: vseg.Range().IsSupersetOf(pseg.Range()). mm.mappingMu must be
-// locked. mm.activeMu must be locked for writing.
+// Preconditions:
+// * vseg.Range().IsSupersetOf(pseg.Range()).
+// * mm.mappingMu must be locked.
+// * mm.activeMu must be locked for writing.
func (mm *MemoryManager) isPMACopyOnWriteLocked(vseg vmaIterator, pseg pmaIterator) bool {
pma := pseg.ValuePtr()
if !pma.needCOW {
@@ -568,8 +577,10 @@ func (mm *MemoryManager) Invalidate(ar usermem.AddrRange, opts memmap.Invalidate
// invalidateLocked removes pmas and AddressSpace mappings of those pmas for
// addresses in ar.
//
-// Preconditions: mm.activeMu must be locked for writing. ar.Length() != 0. ar
-// must be page-aligned.
+// Preconditions:
+// * mm.activeMu must be locked for writing.
+// * ar.Length() != 0.
+// * ar must be page-aligned.
func (mm *MemoryManager) invalidateLocked(ar usermem.AddrRange, invalidatePrivate, invalidateShared bool) {
if checkInvariants {
if !ar.WellFormed() || ar.Length() <= 0 || !ar.IsPageAligned() {
@@ -613,7 +624,9 @@ func (mm *MemoryManager) invalidateLocked(ar usermem.AddrRange, invalidatePrivat
// most I/O. It should only be used in contexts that would use get_user_pages()
// in the Linux kernel.
//
-// Preconditions: ar.Length() != 0. ar must be page-aligned.
+// Preconditions:
+// * ar.Length() != 0.
+// * ar must be page-aligned.
func (mm *MemoryManager) Pin(ctx context.Context, ar usermem.AddrRange, at usermem.AccessType, ignorePermissions bool) ([]PinnedRange, error) {
if checkInvariants {
if !ar.WellFormed() || ar.Length() <= 0 || !ar.IsPageAligned() {
@@ -693,9 +706,13 @@ func Unpin(prs []PinnedRange) {
// movePMAsLocked moves all pmas in oldAR to newAR.
//
-// Preconditions: mm.activeMu must be locked for writing. oldAR.Length() != 0.
-// oldAR.Length() <= newAR.Length(). !oldAR.Overlaps(newAR).
-// mm.pmas.IsEmptyRange(newAR). oldAR and newAR must be page-aligned.
+// Preconditions:
+// * mm.activeMu must be locked for writing.
+// * oldAR.Length() != 0.
+// * oldAR.Length() <= newAR.Length().
+// * !oldAR.Overlaps(newAR).
+// * mm.pmas.IsEmptyRange(newAR).
+// * oldAR and newAR must be page-aligned.
func (mm *MemoryManager) movePMAsLocked(oldAR, newAR usermem.AddrRange) {
if checkInvariants {
if !oldAR.WellFormed() || oldAR.Length() <= 0 || !oldAR.IsPageAligned() {
@@ -751,9 +768,11 @@ func (mm *MemoryManager) movePMAsLocked(oldAR, newAR usermem.AddrRange) {
// - An error that is non-nil if internal mappings exist for only a subset of
// ar.
//
-// Preconditions: mm.activeMu must be locked for writing.
-// pseg.Range().Contains(ar.Start). pmas must exist for all addresses in ar.
-// ar.Length() != 0.
+// Preconditions:
+// * mm.activeMu must be locked for writing.
+// * pseg.Range().Contains(ar.Start).
+// * pmas must exist for all addresses in ar.
+// * ar.Length() != 0.
//
// Postconditions: getPMAInternalMappingsLocked does not invalidate iterators
// into mm.pmas.
@@ -783,8 +802,9 @@ func (mm *MemoryManager) getPMAInternalMappingsLocked(pseg pmaIterator, ar userm
// internal mappings exist. If this is not equal to ars, it returns a non-nil
// error explaining why.
//
-// Preconditions: mm.activeMu must be locked for writing. pmas must exist for
-// all addresses in ar.
+// Preconditions:
+// * mm.activeMu must be locked for writing.
+// * pmas must exist for all addresses in ar.
//
// Postconditions: getVecPMAInternalMappingsLocked does not invalidate iterators
// into mm.pmas.
@@ -803,9 +823,12 @@ func (mm *MemoryManager) getVecPMAInternalMappingsLocked(ars usermem.AddrRangeSe
// internalMappingsLocked returns internal mappings for addresses in ar.
//
-// Preconditions: mm.activeMu must be locked. Internal mappings must have been
-// previously established for all addresses in ar. ar.Length() != 0.
-// pseg.Range().Contains(ar.Start).
+// Preconditions:
+// * mm.activeMu must be locked.
+// * Internal mappings must have been previously established for all addresses
+// in ar.
+// * ar.Length() != 0.
+// * pseg.Range().Contains(ar.Start).
func (mm *MemoryManager) internalMappingsLocked(pseg pmaIterator, ar usermem.AddrRange) safemem.BlockSeq {
if checkInvariants {
if !ar.WellFormed() || ar.Length() <= 0 {
@@ -839,8 +862,10 @@ func (mm *MemoryManager) internalMappingsLocked(pseg pmaIterator, ar usermem.Add
// vecInternalMappingsLocked returns internal mappings for addresses in ars.
//
-// Preconditions: mm.activeMu must be locked. Internal mappings must have been
-// previously established for all addresses in ars.
+// Preconditions:
+// * mm.activeMu must be locked.
+// * Internal mappings must have been previously established for all addresses
+// in ars.
func (mm *MemoryManager) vecInternalMappingsLocked(ars usermem.AddrRangeSeq) safemem.BlockSeq {
var ims []safemem.Block
for ; !ars.IsEmpty(); ars = ars.Tail() {
@@ -969,7 +994,9 @@ func (pmaSetFunctions) Split(ar usermem.AddrRange, p pma, split usermem.Addr) (p
// findOrSeekPrevUpperBoundPMA returns mm.pmas.UpperBoundSegment(addr), but may do
// so by scanning linearly backward from pgap.
//
-// Preconditions: mm.activeMu must be locked. addr <= pgap.Start().
+// Preconditions:
+// * mm.activeMu must be locked.
+// * addr <= pgap.Start().
func (mm *MemoryManager) findOrSeekPrevUpperBoundPMA(addr usermem.Addr, pgap pmaGapIterator) pmaIterator {
if checkInvariants {
if !pgap.Ok() {
@@ -1015,7 +1042,9 @@ func (pseg pmaIterator) fileRange() memmap.FileRange {
return pseg.fileRangeOf(pseg.Range())
}
-// Preconditions: pseg.Range().IsSupersetOf(ar). ar.Length != 0.
+// Preconditions:
+// * pseg.Range().IsSupersetOf(ar).
+// * ar.Length != 0.
func (pseg pmaIterator) fileRangeOf(ar usermem.AddrRange) memmap.FileRange {
if checkInvariants {
if !pseg.Ok() {
diff --git a/pkg/sentry/mm/pma_set.go b/pkg/sentry/mm/pma_set.go
index d0cc1f9d3..09fe6b05f 100644
--- a/pkg/sentry/mm/pma_set.go
+++ b/pkg/sentry/mm/pma_set.go
@@ -347,7 +347,9 @@ func (s *pmaSet) InsertWithoutMerging(gap pmaGapIterator, r __generics_imported0
// and returns an iterator to the inserted segment. All existing iterators
// (including gap, but not including the returned iterator) are invalidated.
//
-// Preconditions: r.Start >= gap.Start(); r.End <= gap.End().
+// Preconditions:
+// * r.Start >= gap.Start().
+// * r.End <= gap.End().
func (s *pmaSet) InsertWithoutMergingUnchecked(gap pmaGapIterator, r __generics_imported0.AddrRange, val pma) pmaIterator {
gap = gap.node.rebalanceBeforeInsert(gap)
splitMaxGap := pmatrackGaps != 0 && (gap.node.nrSegments == 0 || gap.Range().Length() == gap.node.maxGap.Get())
@@ -1101,12 +1103,10 @@ func (seg pmaIterator) End() __generics_imported0.Addr {
// does not invalidate any iterators.
//
// Preconditions:
-//
-// - r.Length() > 0.
-//
-// - The new range must not overlap an existing one: If seg.NextSegment().Ok(),
-// then r.end <= seg.NextSegment().Start(); if seg.PrevSegment().Ok(), then
-// r.start >= seg.PrevSegment().End().
+// * r.Length() > 0.
+// * The new range must not overlap an existing one:
+// * If seg.NextSegment().Ok(), then r.end <= seg.NextSegment().Start().
+// * If seg.PrevSegment().Ok(), then r.start >= seg.PrevSegment().End().
func (seg pmaIterator) SetRangeUnchecked(r __generics_imported0.AddrRange) {
seg.node.keys[seg.index] = r
}
@@ -1131,8 +1131,9 @@ func (seg pmaIterator) SetRange(r __generics_imported0.AddrRange) {
// SetStartUnchecked mutates the iterated segment's start. This operation does
// not invalidate any iterators.
//
-// Preconditions: The new start must be valid: start < seg.End(); if
-// seg.PrevSegment().Ok(), then start >= seg.PrevSegment().End().
+// Preconditions: The new start must be valid:
+// * start < seg.End()
+// * If seg.PrevSegment().Ok(), then start >= seg.PrevSegment().End().
func (seg pmaIterator) SetStartUnchecked(start __generics_imported0.Addr) {
seg.node.keys[seg.index].Start = start
}
@@ -1154,8 +1155,9 @@ func (seg pmaIterator) SetStart(start __generics_imported0.Addr) {
// SetEndUnchecked mutates the iterated segment's end. This operation does not
// invalidate any iterators.
//
-// Preconditions: The new end must be valid: end > seg.Start(); if
-// seg.NextSegment().Ok(), then end <= seg.NextSegment().Start().
+// Preconditions: The new end must be valid:
+// * end > seg.Start().
+// * If seg.NextSegment().Ok(), then end <= seg.NextSegment().Start().
func (seg pmaIterator) SetEndUnchecked(end __generics_imported0.Addr) {
seg.node.keys[seg.index].End = end
}
@@ -1575,9 +1577,11 @@ func (s *pmaSet) ExportSortedSlices() *pmaSegmentDataSlices {
// ImportSortedSlice initializes the given set from the given slice.
//
-// Preconditions: s must be empty. sds must represent a valid set (the segments
-// in sds must have valid lengths that do not overlap). The segments in sds
-// must be sorted in ascending key order.
+// Preconditions:
+// * s must be empty.
+// * sds must represent a valid set (the segments in sds must have valid
+// lengths that do not overlap).
+// * The segments in sds must be sorted in ascending key order.
func (s *pmaSet) ImportSortedSlices(sds *pmaSegmentDataSlices) error {
if !s.IsEmpty() {
return fmt.Errorf("cannot import into non-empty set %v", s)
diff --git a/pkg/sentry/mm/syscalls.go b/pkg/sentry/mm/syscalls.go
index e74d4e1c1..4c9a575e7 100644
--- a/pkg/sentry/mm/syscalls.go
+++ b/pkg/sentry/mm/syscalls.go
@@ -166,7 +166,9 @@ func (mm *MemoryManager) MMap(ctx context.Context, opts memmap.MMapOpts) (userme
// populateVMA obtains pmas for addresses in ar in the given vma, and maps them
// into mm.as if it is active.
//
-// Preconditions: mm.mappingMu must be locked. vseg.Range().IsSupersetOf(ar).
+// Preconditions:
+// * mm.mappingMu must be locked.
+// * vseg.Range().IsSupersetOf(ar).
func (mm *MemoryManager) populateVMA(ctx context.Context, vseg vmaIterator, ar usermem.AddrRange, precommit bool) {
if !vseg.ValuePtr().effectivePerms.Any() {
// Linux doesn't populate inaccessible pages. See
@@ -208,8 +210,9 @@ func (mm *MemoryManager) populateVMA(ctx context.Context, vseg vmaIterator, ar u
// preferable to populateVMA since it unlocks mm.mappingMu before performing
// expensive operations that don't require it to be locked.
//
-// Preconditions: mm.mappingMu must be locked for writing.
-// vseg.Range().IsSupersetOf(ar).
+// Preconditions:
+// * mm.mappingMu must be locked for writing.
+// * vseg.Range().IsSupersetOf(ar).
//
// Postconditions: mm.mappingMu will be unlocked.
func (mm *MemoryManager) populateVMAAndUnlock(ctx context.Context, vseg vmaIterator, ar usermem.AddrRange, precommit bool) {
diff --git a/pkg/sentry/mm/vma.go b/pkg/sentry/mm/vma.go
index c4e1989ed..f769d8294 100644
--- a/pkg/sentry/mm/vma.go
+++ b/pkg/sentry/mm/vma.go
@@ -27,8 +27,9 @@ import (
"gvisor.dev/gvisor/pkg/usermem"
)
-// Preconditions: mm.mappingMu must be locked for writing. opts must be valid
-// as defined by the checks in MMap.
+// Preconditions:
+// * mm.mappingMu must be locked for writing.
+// * opts must be valid as defined by the checks in MMap.
func (mm *MemoryManager) createVMALocked(ctx context.Context, opts memmap.MMapOpts) (vmaIterator, usermem.AddrRange, error) {
if opts.MaxPerms != opts.MaxPerms.Effective() {
panic(fmt.Sprintf("Non-effective MaxPerms %s cannot be enforced", opts.MaxPerms))
@@ -260,8 +261,9 @@ func (mm *MemoryManager) mlockedBytesRangeLocked(ar usermem.AddrRange) uint64 {
//
// - An error that is non-nil if vmas exist for only a subset of ar.
//
-// Preconditions: mm.mappingMu must be locked for reading; it may be
-// temporarily unlocked. ar.Length() != 0.
+// Preconditions:
+// * mm.mappingMu must be locked for reading; it may be temporarily unlocked.
+// * ar.Length() != 0.
func (mm *MemoryManager) getVMAsLocked(ctx context.Context, ar usermem.AddrRange, at usermem.AccessType, ignorePermissions bool) (vmaIterator, vmaGapIterator, error) {
if checkInvariants {
if !ar.WellFormed() || ar.Length() <= 0 {
@@ -342,8 +344,10 @@ const guardBytes = 256 * usermem.PageSize
// unmapLocked unmaps all addresses in ar and returns the resulting gap in
// mm.vmas.
//
-// Preconditions: mm.mappingMu must be locked for writing. ar.Length() != 0.
-// ar must be page-aligned.
+// Preconditions:
+// * mm.mappingMu must be locked for writing.
+// * ar.Length() != 0.
+// * ar must be page-aligned.
func (mm *MemoryManager) unmapLocked(ctx context.Context, ar usermem.AddrRange) vmaGapIterator {
if checkInvariants {
if !ar.WellFormed() || ar.Length() <= 0 || !ar.IsPageAligned() {
@@ -361,8 +365,10 @@ func (mm *MemoryManager) unmapLocked(ctx context.Context, ar usermem.AddrRange)
// gap in mm.vmas. It does not remove pmas or AddressSpace mappings; clients
// must do so before calling removeVMAsLocked.
//
-// Preconditions: mm.mappingMu must be locked for writing. ar.Length() != 0. ar
-// must be page-aligned.
+// Preconditions:
+// * mm.mappingMu must be locked for writing.
+// * ar.Length() != 0.
+// * ar must be page-aligned.
func (mm *MemoryManager) removeVMAsLocked(ctx context.Context, ar usermem.AddrRange) vmaGapIterator {
if checkInvariants {
if !ar.WellFormed() || ar.Length() <= 0 || !ar.IsPageAligned() {
@@ -467,7 +473,9 @@ func (vmaSetFunctions) Split(ar usermem.AddrRange, v vma, split usermem.Addr) (v
return v, v2
}
-// Preconditions: vseg.ValuePtr().mappable != nil. vseg.Range().Contains(addr).
+// Preconditions:
+// * vseg.ValuePtr().mappable != nil.
+// * vseg.Range().Contains(addr).
func (vseg vmaIterator) mappableOffsetAt(addr usermem.Addr) uint64 {
if checkInvariants {
if !vseg.Ok() {
@@ -491,8 +499,10 @@ func (vseg vmaIterator) mappableRange() memmap.MappableRange {
return vseg.mappableRangeOf(vseg.Range())
}
-// Preconditions: vseg.ValuePtr().mappable != nil.
-// vseg.Range().IsSupersetOf(ar). ar.Length() != 0.
+// Preconditions:
+// * vseg.ValuePtr().mappable != nil.
+// * vseg.Range().IsSupersetOf(ar).
+// * ar.Length() != 0.
func (vseg vmaIterator) mappableRangeOf(ar usermem.AddrRange) memmap.MappableRange {
if checkInvariants {
if !vseg.Ok() {
@@ -514,8 +524,10 @@ func (vseg vmaIterator) mappableRangeOf(ar usermem.AddrRange) memmap.MappableRan
return memmap.MappableRange{vma.off + uint64(ar.Start-vstart), vma.off + uint64(ar.End-vstart)}
}
-// Preconditions: vseg.ValuePtr().mappable != nil.
-// vseg.mappableRange().IsSupersetOf(mr). mr.Length() != 0.
+// Preconditions:
+// * vseg.ValuePtr().mappable != nil.
+// * vseg.mappableRange().IsSupersetOf(mr).
+// * mr.Length() != 0.
func (vseg vmaIterator) addrRangeOf(mr memmap.MappableRange) usermem.AddrRange {
if checkInvariants {
if !vseg.Ok() {
@@ -540,7 +552,9 @@ func (vseg vmaIterator) addrRangeOf(mr memmap.MappableRange) usermem.AddrRange {
// seekNextLowerBound returns mm.vmas.LowerBoundSegment(addr), but does so by
// scanning linearly forward from vseg.
//
-// Preconditions: mm.mappingMu must be locked. addr >= vseg.Start().
+// Preconditions:
+// * mm.mappingMu must be locked.
+// * addr >= vseg.Start().
func (vseg vmaIterator) seekNextLowerBound(addr usermem.Addr) vmaIterator {
if checkInvariants {
if !vseg.Ok() {
diff --git a/pkg/sentry/mm/vma_set.go b/pkg/sentry/mm/vma_set.go
index e515ef105..3b32d3cb5 100644
--- a/pkg/sentry/mm/vma_set.go
+++ b/pkg/sentry/mm/vma_set.go
@@ -347,7 +347,9 @@ func (s *vmaSet) InsertWithoutMerging(gap vmaGapIterator, r __generics_imported0
// and returns an iterator to the inserted segment. All existing iterators
// (including gap, but not including the returned iterator) are invalidated.
//
-// Preconditions: r.Start >= gap.Start(); r.End <= gap.End().
+// Preconditions:
+// * r.Start >= gap.Start().
+// * r.End <= gap.End().
func (s *vmaSet) InsertWithoutMergingUnchecked(gap vmaGapIterator, r __generics_imported0.AddrRange, val vma) vmaIterator {
gap = gap.node.rebalanceBeforeInsert(gap)
splitMaxGap := vmatrackGaps != 0 && (gap.node.nrSegments == 0 || gap.Range().Length() == gap.node.maxGap.Get())
@@ -1101,12 +1103,10 @@ func (seg vmaIterator) End() __generics_imported0.Addr {
// does not invalidate any iterators.
//
// Preconditions:
-//
-// - r.Length() > 0.
-//
-// - The new range must not overlap an existing one: If seg.NextSegment().Ok(),
-// then r.end <= seg.NextSegment().Start(); if seg.PrevSegment().Ok(), then
-// r.start >= seg.PrevSegment().End().
+// * r.Length() > 0.
+// * The new range must not overlap an existing one:
+// * If seg.NextSegment().Ok(), then r.end <= seg.NextSegment().Start().
+// * If seg.PrevSegment().Ok(), then r.start >= seg.PrevSegment().End().
func (seg vmaIterator) SetRangeUnchecked(r __generics_imported0.AddrRange) {
seg.node.keys[seg.index] = r
}
@@ -1131,8 +1131,9 @@ func (seg vmaIterator) SetRange(r __generics_imported0.AddrRange) {
// SetStartUnchecked mutates the iterated segment's start. This operation does
// not invalidate any iterators.
//
-// Preconditions: The new start must be valid: start < seg.End(); if
-// seg.PrevSegment().Ok(), then start >= seg.PrevSegment().End().
+// Preconditions: The new start must be valid:
+// * start < seg.End()
+// * If seg.PrevSegment().Ok(), then start >= seg.PrevSegment().End().
func (seg vmaIterator) SetStartUnchecked(start __generics_imported0.Addr) {
seg.node.keys[seg.index].Start = start
}
@@ -1154,8 +1155,9 @@ func (seg vmaIterator) SetStart(start __generics_imported0.Addr) {
// SetEndUnchecked mutates the iterated segment's end. This operation does not
// invalidate any iterators.
//
-// Preconditions: The new end must be valid: end > seg.Start(); if
-// seg.NextSegment().Ok(), then end <= seg.NextSegment().Start().
+// Preconditions: The new end must be valid:
+// * end > seg.Start().
+// * If seg.NextSegment().Ok(), then end <= seg.NextSegment().Start().
func (seg vmaIterator) SetEndUnchecked(end __generics_imported0.Addr) {
seg.node.keys[seg.index].End = end
}
@@ -1575,9 +1577,11 @@ func (s *vmaSet) ExportSortedSlices() *vmaSegmentDataSlices {
// ImportSortedSlice initializes the given set from the given slice.
//
-// Preconditions: s must be empty. sds must represent a valid set (the segments
-// in sds must have valid lengths that do not overlap). The segments in sds
-// must be sorted in ascending key order.
+// Preconditions:
+// * s must be empty.
+// * sds must represent a valid set (the segments in sds must have valid
+// lengths that do not overlap).
+// * The segments in sds must be sorted in ascending key order.
func (s *vmaSet) ImportSortedSlices(sds *vmaSegmentDataSlices) error {
if !s.IsEmpty() {
return fmt.Errorf("cannot import into non-empty set %v", s)
diff --git a/pkg/sentry/pgalloc/evictable_range_set.go b/pkg/sentry/pgalloc/evictable_range_set.go
index 335446b4f..7619b106e 100644
--- a/pkg/sentry/pgalloc/evictable_range_set.go
+++ b/pkg/sentry/pgalloc/evictable_range_set.go
@@ -343,7 +343,9 @@ func (s *evictableRangeSet) InsertWithoutMerging(gap evictableRangeGapIterator,
// and returns an iterator to the inserted segment. All existing iterators
// (including gap, but not including the returned iterator) are invalidated.
//
-// Preconditions: r.Start >= gap.Start(); r.End <= gap.End().
+// Preconditions:
+// * r.Start >= gap.Start().
+// * r.End <= gap.End().
func (s *evictableRangeSet) InsertWithoutMergingUnchecked(gap evictableRangeGapIterator, r EvictableRange, val evictableRangeSetValue) evictableRangeIterator {
gap = gap.node.rebalanceBeforeInsert(gap)
splitMaxGap := evictableRangetrackGaps != 0 && (gap.node.nrSegments == 0 || gap.Range().Length() == gap.node.maxGap.Get())
@@ -1097,12 +1099,10 @@ func (seg evictableRangeIterator) End() uint64 {
// does not invalidate any iterators.
//
// Preconditions:
-//
-// - r.Length() > 0.
-//
-// - The new range must not overlap an existing one: If seg.NextSegment().Ok(),
-// then r.end <= seg.NextSegment().Start(); if seg.PrevSegment().Ok(), then
-// r.start >= seg.PrevSegment().End().
+// * r.Length() > 0.
+// * The new range must not overlap an existing one:
+// * If seg.NextSegment().Ok(), then r.end <= seg.NextSegment().Start().
+// * If seg.PrevSegment().Ok(), then r.start >= seg.PrevSegment().End().
func (seg evictableRangeIterator) SetRangeUnchecked(r EvictableRange) {
seg.node.keys[seg.index] = r
}
@@ -1127,8 +1127,9 @@ func (seg evictableRangeIterator) SetRange(r EvictableRange) {
// SetStartUnchecked mutates the iterated segment's start. This operation does
// not invalidate any iterators.
//
-// Preconditions: The new start must be valid: start < seg.End(); if
-// seg.PrevSegment().Ok(), then start >= seg.PrevSegment().End().
+// Preconditions: The new start must be valid:
+// * start < seg.End()
+// * If seg.PrevSegment().Ok(), then start >= seg.PrevSegment().End().
func (seg evictableRangeIterator) SetStartUnchecked(start uint64) {
seg.node.keys[seg.index].Start = start
}
@@ -1150,8 +1151,9 @@ func (seg evictableRangeIterator) SetStart(start uint64) {
// SetEndUnchecked mutates the iterated segment's end. This operation does not
// invalidate any iterators.
//
-// Preconditions: The new end must be valid: end > seg.Start(); if
-// seg.NextSegment().Ok(), then end <= seg.NextSegment().Start().
+// Preconditions: The new end must be valid:
+// * end > seg.Start().
+// * If seg.NextSegment().Ok(), then end <= seg.NextSegment().Start().
func (seg evictableRangeIterator) SetEndUnchecked(end uint64) {
seg.node.keys[seg.index].End = end
}
@@ -1571,9 +1573,11 @@ func (s *evictableRangeSet) ExportSortedSlices() *evictableRangeSegmentDataSlice
// ImportSortedSlice initializes the given set from the given slice.
//
-// Preconditions: s must be empty. sds must represent a valid set (the segments
-// in sds must have valid lengths that do not overlap). The segments in sds
-// must be sorted in ascending key order.
+// Preconditions:
+// * s must be empty.
+// * sds must represent a valid set (the segments in sds must have valid
+// lengths that do not overlap).
+// * The segments in sds must be sorted in ascending key order.
func (s *evictableRangeSet) ImportSortedSlices(sds *evictableRangeSegmentDataSlices) error {
if !s.IsEmpty() {
return fmt.Errorf("cannot import into non-empty set %v", s)
diff --git a/pkg/sentry/pgalloc/pgalloc.go b/pkg/sentry/pgalloc/pgalloc.go
index 46d3be58c..626d1eaa4 100644
--- a/pkg/sentry/pgalloc/pgalloc.go
+++ b/pkg/sentry/pgalloc/pgalloc.go
@@ -507,7 +507,9 @@ func findAvailableRange(usage *usageSet, fileSize int64, length, alignment uint6
// nearest page. If this is shorter than length bytes due to an error returned
// by r.ReadToBlocks(), it returns that error.
//
-// Preconditions: length > 0. length must be page-aligned.
+// Preconditions:
+// * length > 0.
+// * length must be page-aligned.
func (f *MemoryFile) AllocateAndFill(length uint64, kind usage.MemoryKind, r safemem.Reader) (memmap.FileRange, error) {
fr, err := f.Allocate(length, kind)
if err != nil {
@@ -1167,8 +1169,10 @@ func (f *MemoryFile) startEvictionsLocked() bool {
return startedAny
}
-// Preconditions: info == f.evictable[user]. !info.evicting. f.mu must be
-// locked.
+// Preconditions:
+// * info == f.evictable[user].
+// * !info.evicting.
+// * f.mu must be locked.
func (f *MemoryFile) startEvictionGoroutineLocked(user EvictableMemoryUser, info *evictableMemoryUserInfo) {
info.evicting = true
f.evictionWG.Add(1)
diff --git a/pkg/sentry/pgalloc/reclaim_set.go b/pkg/sentry/pgalloc/reclaim_set.go
index cd8d6ab89..5c62c03fe 100644
--- a/pkg/sentry/pgalloc/reclaim_set.go
+++ b/pkg/sentry/pgalloc/reclaim_set.go
@@ -347,7 +347,9 @@ func (s *reclaimSet) InsertWithoutMerging(gap reclaimGapIterator, r __generics_i
// and returns an iterator to the inserted segment. All existing iterators
// (including gap, but not including the returned iterator) are invalidated.
//
-// Preconditions: r.Start >= gap.Start(); r.End <= gap.End().
+// Preconditions:
+// * r.Start >= gap.Start().
+// * r.End <= gap.End().
func (s *reclaimSet) InsertWithoutMergingUnchecked(gap reclaimGapIterator, r __generics_imported0.FileRange, val reclaimSetValue) reclaimIterator {
gap = gap.node.rebalanceBeforeInsert(gap)
splitMaxGap := reclaimtrackGaps != 0 && (gap.node.nrSegments == 0 || gap.Range().Length() == gap.node.maxGap.Get())
@@ -1101,12 +1103,10 @@ func (seg reclaimIterator) End() uint64 {
// does not invalidate any iterators.
//
// Preconditions:
-//
-// - r.Length() > 0.
-//
-// - The new range must not overlap an existing one: If seg.NextSegment().Ok(),
-// then r.end <= seg.NextSegment().Start(); if seg.PrevSegment().Ok(), then
-// r.start >= seg.PrevSegment().End().
+// * r.Length() > 0.
+// * The new range must not overlap an existing one:
+// * If seg.NextSegment().Ok(), then r.end <= seg.NextSegment().Start().
+// * If seg.PrevSegment().Ok(), then r.start >= seg.PrevSegment().End().
func (seg reclaimIterator) SetRangeUnchecked(r __generics_imported0.FileRange) {
seg.node.keys[seg.index] = r
}
@@ -1131,8 +1131,9 @@ func (seg reclaimIterator) SetRange(r __generics_imported0.FileRange) {
// SetStartUnchecked mutates the iterated segment's start. This operation does
// not invalidate any iterators.
//
-// Preconditions: The new start must be valid: start < seg.End(); if
-// seg.PrevSegment().Ok(), then start >= seg.PrevSegment().End().
+// Preconditions: The new start must be valid:
+// * start < seg.End()
+// * If seg.PrevSegment().Ok(), then start >= seg.PrevSegment().End().
func (seg reclaimIterator) SetStartUnchecked(start uint64) {
seg.node.keys[seg.index].Start = start
}
@@ -1154,8 +1155,9 @@ func (seg reclaimIterator) SetStart(start uint64) {
// SetEndUnchecked mutates the iterated segment's end. This operation does not
// invalidate any iterators.
//
-// Preconditions: The new end must be valid: end > seg.Start(); if
-// seg.NextSegment().Ok(), then end <= seg.NextSegment().Start().
+// Preconditions: The new end must be valid:
+// * end > seg.Start().
+// * If seg.NextSegment().Ok(), then end <= seg.NextSegment().Start().
func (seg reclaimIterator) SetEndUnchecked(end uint64) {
seg.node.keys[seg.index].End = end
}
@@ -1575,9 +1577,11 @@ func (s *reclaimSet) ExportSortedSlices() *reclaimSegmentDataSlices {
// ImportSortedSlice initializes the given set from the given slice.
//
-// Preconditions: s must be empty. sds must represent a valid set (the segments
-// in sds must have valid lengths that do not overlap). The segments in sds
-// must be sorted in ascending key order.
+// Preconditions:
+// * s must be empty.
+// * sds must represent a valid set (the segments in sds must have valid
+// lengths that do not overlap).
+// * The segments in sds must be sorted in ascending key order.
func (s *reclaimSet) ImportSortedSlices(sds *reclaimSegmentDataSlices) error {
if !s.IsEmpty() {
return fmt.Errorf("cannot import into non-empty set %v", s)
diff --git a/pkg/sentry/pgalloc/usage_set.go b/pkg/sentry/pgalloc/usage_set.go
index 08f4762f5..edabff0d8 100644
--- a/pkg/sentry/pgalloc/usage_set.go
+++ b/pkg/sentry/pgalloc/usage_set.go
@@ -347,7 +347,9 @@ func (s *usageSet) InsertWithoutMerging(gap usageGapIterator, r __generics_impor
// and returns an iterator to the inserted segment. All existing iterators
// (including gap, but not including the returned iterator) are invalidated.
//
-// Preconditions: r.Start >= gap.Start(); r.End <= gap.End().
+// Preconditions:
+// * r.Start >= gap.Start().
+// * r.End <= gap.End().
func (s *usageSet) InsertWithoutMergingUnchecked(gap usageGapIterator, r __generics_imported0.FileRange, val usageInfo) usageIterator {
gap = gap.node.rebalanceBeforeInsert(gap)
splitMaxGap := usagetrackGaps != 0 && (gap.node.nrSegments == 0 || gap.Range().Length() == gap.node.maxGap.Get())
@@ -1101,12 +1103,10 @@ func (seg usageIterator) End() uint64 {
// does not invalidate any iterators.
//
// Preconditions:
-//
-// - r.Length() > 0.
-//
-// - The new range must not overlap an existing one: If seg.NextSegment().Ok(),
-// then r.end <= seg.NextSegment().Start(); if seg.PrevSegment().Ok(), then
-// r.start >= seg.PrevSegment().End().
+// * r.Length() > 0.
+// * The new range must not overlap an existing one:
+// * If seg.NextSegment().Ok(), then r.end <= seg.NextSegment().Start().
+// * If seg.PrevSegment().Ok(), then r.start >= seg.PrevSegment().End().
func (seg usageIterator) SetRangeUnchecked(r __generics_imported0.FileRange) {
seg.node.keys[seg.index] = r
}
@@ -1131,8 +1131,9 @@ func (seg usageIterator) SetRange(r __generics_imported0.FileRange) {
// SetStartUnchecked mutates the iterated segment's start. This operation does
// not invalidate any iterators.
//
-// Preconditions: The new start must be valid: start < seg.End(); if
-// seg.PrevSegment().Ok(), then start >= seg.PrevSegment().End().
+// Preconditions: The new start must be valid:
+// * start < seg.End()
+// * If seg.PrevSegment().Ok(), then start >= seg.PrevSegment().End().
func (seg usageIterator) SetStartUnchecked(start uint64) {
seg.node.keys[seg.index].Start = start
}
@@ -1154,8 +1155,9 @@ func (seg usageIterator) SetStart(start uint64) {
// SetEndUnchecked mutates the iterated segment's end. This operation does not
// invalidate any iterators.
//
-// Preconditions: The new end must be valid: end > seg.Start(); if
-// seg.NextSegment().Ok(), then end <= seg.NextSegment().Start().
+// Preconditions: The new end must be valid:
+// * end > seg.Start().
+// * If seg.NextSegment().Ok(), then end <= seg.NextSegment().Start().
func (seg usageIterator) SetEndUnchecked(end uint64) {
seg.node.keys[seg.index].End = end
}
@@ -1575,9 +1577,11 @@ func (s *usageSet) ExportSortedSlices() *usageSegmentDataSlices {
// ImportSortedSlice initializes the given set from the given slice.
//
-// Preconditions: s must be empty. sds must represent a valid set (the segments
-// in sds must have valid lengths that do not overlap). The segments in sds
-// must be sorted in ascending key order.
+// Preconditions:
+// * s must be empty.
+// * sds must represent a valid set (the segments in sds must have valid
+// lengths that do not overlap).
+// * The segments in sds must be sorted in ascending key order.
func (s *usageSet) ImportSortedSlices(sds *usageSegmentDataSlices) error {
if !s.IsEmpty() {
return fmt.Errorf("cannot import into non-empty set %v", s)
diff --git a/pkg/sentry/platform/interrupt/interrupt.go b/pkg/sentry/platform/interrupt/interrupt.go
index 57be41647..9dfac3eae 100644
--- a/pkg/sentry/platform/interrupt/interrupt.go
+++ b/pkg/sentry/platform/interrupt/interrupt.go
@@ -54,8 +54,9 @@ type Forwarder struct {
// }
// defer f.Disable()
//
-// Preconditions: r must not be nil. f must not already be forwarding
-// interrupts to a Receiver.
+// Preconditions:
+// * r must not be nil.
+// * f must not already be forwarding interrupts to a Receiver.
func (f *Forwarder) Enable(r Receiver) bool {
if r == nil {
panic("nil Receiver")
diff --git a/pkg/sentry/platform/platform.go b/pkg/sentry/platform/platform.go
index ba031516a..530e779b0 100644
--- a/pkg/sentry/platform/platform.go
+++ b/pkg/sentry/platform/platform.go
@@ -245,14 +245,19 @@ type AddressSpace interface {
// physical memory) to the mapping. The precommit flag is advisory and
// implementations may choose to ignore it.
//
- // Preconditions: addr and fr must be page-aligned. fr.Length() > 0.
- // at.Any() == true. At least one reference must be held on all pages in
- // fr, and must continue to be held as long as pages are mapped.
+ // Preconditions:
+ // * addr and fr must be page-aligned.
+ // * fr.Length() > 0.
+ // * at.Any() == true.
+ // * At least one reference must be held on all pages in fr, and must
+ // continue to be held as long as pages are mapped.
MapFile(addr usermem.Addr, f memmap.File, fr memmap.FileRange, at usermem.AccessType, precommit bool) error
// Unmap unmaps the given range.
//
- // Preconditions: addr is page-aligned. length > 0.
+ // Preconditions:
+ // * addr is page-aligned.
+ // * length > 0.
Unmap(addr usermem.Addr, length uint64)
// Release releases this address space. After releasing, a new AddressSpace
diff --git a/pkg/sentry/vfs/dentry.go b/pkg/sentry/vfs/dentry.go
index bc7ea93ea..a69a5b2f1 100644
--- a/pkg/sentry/vfs/dentry.go
+++ b/pkg/sentry/vfs/dentry.go
@@ -242,8 +242,9 @@ func (vfs *VirtualFilesystem) InvalidateDentry(ctx context.Context, d *Dentry) {
// caller must call AbortRenameDentry, CommitRenameReplaceDentry, or
// CommitRenameExchangeDentry depending on the rename's outcome.
//
-// Preconditions: If to is not nil, it must be a child Dentry from the same
-// Filesystem. from != to.
+// Preconditions:
+// * If to is not nil, it must be a child Dentry from the same Filesystem.
+// * from != to.
func (vfs *VirtualFilesystem) PrepareRenameDentry(mntns *MountNamespace, from, to *Dentry) error {
vfs.mountMu.Lock()
if mntns.mountpoints[from] != 0 {
diff --git a/pkg/sentry/vfs/file_description.go b/pkg/sentry/vfs/file_description.go
index dcafffe57..d3abe28ee 100644
--- a/pkg/sentry/vfs/file_description.go
+++ b/pkg/sentry/vfs/file_description.go
@@ -371,8 +371,9 @@ type FileDescriptionImpl interface {
//
// - If opts.Flags specifies unsupported options, PRead returns EOPNOTSUPP.
//
- // Preconditions: The FileDescription was opened for reading.
- // FileDescriptionOptions.DenyPRead == false.
+ // Preconditions:
+ // * The FileDescription was opened for reading.
+ // * FileDescriptionOptions.DenyPRead == false.
PRead(ctx context.Context, dst usermem.IOSequence, offset int64, opts ReadOptions) (int64, error)
// Read is similar to PRead, but does not specify an offset.
@@ -403,8 +404,9 @@ type FileDescriptionImpl interface {
// - If opts.Flags specifies unsupported options, PWrite returns
// EOPNOTSUPP.
//
- // Preconditions: The FileDescription was opened for writing.
- // FileDescriptionOptions.DenyPWrite == false.
+ // Preconditions:
+ // * The FileDescription was opened for writing.
+ // * FileDescriptionOptions.DenyPWrite == false.
PWrite(ctx context.Context, src usermem.IOSequence, offset int64, opts WriteOptions) (int64, error)
// Write is similar to PWrite, but does not specify an offset, which is
diff --git a/pkg/sentry/vfs/filesystem.go b/pkg/sentry/vfs/filesystem.go
index df3758fd1..2c60cfab2 100644
--- a/pkg/sentry/vfs/filesystem.go
+++ b/pkg/sentry/vfs/filesystem.go
@@ -212,8 +212,9 @@ type FilesystemImpl interface {
// ENOENT. Equivalently, if vd represents a file with a link count of 0 not
// created by open(O_TMPFILE) without O_EXCL, LinkAt returns ENOENT.
//
- // Preconditions: !rp.Done(). For the final path component in rp,
- // !rp.ShouldFollowSymlink().
+ // Preconditions:
+ // * !rp.Done().
+ // * For the final path component in rp, !rp.ShouldFollowSymlink().
//
// Postconditions: If LinkAt returns an error returned by
// ResolvingPath.Resolve*(), then !rp.Done().
@@ -231,8 +232,9 @@ type FilesystemImpl interface {
// - If the directory in which the new directory would be created has been
// removed by RmdirAt or RenameAt, MkdirAt returns ENOENT.
//
- // Preconditions: !rp.Done(). For the final path component in rp,
- // !rp.ShouldFollowSymlink().
+ // Preconditions:
+ // * !rp.Done().
+ // * For the final path component in rp, !rp.ShouldFollowSymlink().
//
// Postconditions: If MkdirAt returns an error returned by
// ResolvingPath.Resolve*(), then !rp.Done().
@@ -253,8 +255,9 @@ type FilesystemImpl interface {
// - If the directory in which the file would be created has been removed
// by RmdirAt or RenameAt, MknodAt returns ENOENT.
//
- // Preconditions: !rp.Done(). For the final path component in rp,
- // !rp.ShouldFollowSymlink().
+ // Preconditions:
+ // * !rp.Done().
+ // * For the final path component in rp, !rp.ShouldFollowSymlink().
//
// Postconditions: If MknodAt returns an error returned by
// ResolvingPath.Resolve*(), then !rp.Done().
@@ -345,11 +348,12 @@ type FilesystemImpl interface {
// - If renaming would replace a non-empty directory, RenameAt returns
// ENOTEMPTY.
//
- // Preconditions: !rp.Done(). For the final path component in rp,
- // !rp.ShouldFollowSymlink(). oldParentVD.Dentry() was obtained from a
- // previous call to
- // oldParentVD.Mount().Filesystem().Impl().GetParentDentryAt(). oldName is
- // not "." or "..".
+ // Preconditions:
+ // * !rp.Done().
+ // * For the final path component in rp, !rp.ShouldFollowSymlink().
+ // * oldParentVD.Dentry() was obtained from a previous call to
+ // oldParentVD.Mount().Filesystem().Impl().GetParentDentryAt().
+ // * oldName is not "." or "..".
//
// Postconditions: If RenameAt returns an error returned by
// ResolvingPath.Resolve*(), then !rp.Done().
@@ -372,8 +376,9 @@ type FilesystemImpl interface {
// - If the file at rp exists but is not a directory, RmdirAt returns
// ENOTDIR.
//
- // Preconditions: !rp.Done(). For the final path component in rp,
- // !rp.ShouldFollowSymlink().
+ // Preconditions:
+ // * !rp.Done().
+ // * For the final path component in rp, !rp.ShouldFollowSymlink().
//
// Postconditions: If RmdirAt returns an error returned by
// ResolvingPath.Resolve*(), then !rp.Done().
@@ -410,8 +415,9 @@ type FilesystemImpl interface {
// - If the directory in which the symbolic link would be created has been
// removed by RmdirAt or RenameAt, SymlinkAt returns ENOENT.
//
- // Preconditions: !rp.Done(). For the final path component in rp,
- // !rp.ShouldFollowSymlink().
+ // Preconditions:
+ // * !rp.Done().
+ // * For the final path component in rp, !rp.ShouldFollowSymlink().
//
// Postconditions: If SymlinkAt returns an error returned by
// ResolvingPath.Resolve*(), then !rp.Done().
@@ -431,8 +437,9 @@ type FilesystemImpl interface {
//
// - If the file at rp exists but is a directory, UnlinkAt returns EISDIR.
//
- // Preconditions: !rp.Done(). For the final path component in rp,
- // !rp.ShouldFollowSymlink().
+ // Preconditions:
+ // * !rp.Done().
+ // * For the final path component in rp, !rp.ShouldFollowSymlink().
//
// Postconditions: If UnlinkAt returns an error returned by
// ResolvingPath.Resolve*(), then !rp.Done().
diff --git a/pkg/sentry/vfs/mount.go b/pkg/sentry/vfs/mount.go
index 67dfba986..714af6907 100644
--- a/pkg/sentry/vfs/mount.go
+++ b/pkg/sentry/vfs/mount.go
@@ -369,8 +369,9 @@ type umountRecursiveOptions struct {
//
// umountRecursiveLocked is analogous to Linux's fs/namespace.c:umount_tree().
//
-// Preconditions: vfs.mountMu must be locked. vfs.mounts.seq must be in a
-// writer critical section.
+// Preconditions:
+// * vfs.mountMu must be locked.
+// * vfs.mounts.seq must be in a writer critical section.
func (vfs *VirtualFilesystem) umountRecursiveLocked(mnt *Mount, opts *umountRecursiveOptions, vdsToDecRef []VirtualDentry, mountsToDecRef []*Mount) ([]VirtualDentry, []*Mount) {
if !mnt.umounted {
mnt.umounted = true
@@ -399,9 +400,11 @@ func (vfs *VirtualFilesystem) umountRecursiveLocked(mnt *Mount, opts *umountRecu
// connectLocked makes vd the mount parent/point for mnt. It consumes
// references held by vd.
//
-// Preconditions: vfs.mountMu must be locked. vfs.mounts.seq must be in a
-// writer critical section. d.mu must be locked. mnt.parent() == nil, i.e. mnt
-// must not already be connected.
+// Preconditions:
+// * vfs.mountMu must be locked.
+// * vfs.mounts.seq must be in a writer critical section.
+// * d.mu must be locked.
+// * mnt.parent() == nil, i.e. mnt must not already be connected.
func (vfs *VirtualFilesystem) connectLocked(mnt *Mount, vd VirtualDentry, mntns *MountNamespace) {
if checkInvariants {
if mnt.parent() != nil {
@@ -429,8 +432,10 @@ func (vfs *VirtualFilesystem) connectLocked(mnt *Mount, vd VirtualDentry, mntns
// disconnectLocked makes vd have no mount parent/point and returns its old
// mount parent/point with a reference held.
//
-// Preconditions: vfs.mountMu must be locked. vfs.mounts.seq must be in a
-// writer critical section. mnt.parent() != nil.
+// Preconditions:
+// * vfs.mountMu must be locked.
+// * vfs.mounts.seq must be in a writer critical section.
+// * mnt.parent() != nil.
func (vfs *VirtualFilesystem) disconnectLocked(mnt *Mount) VirtualDentry {
vd := mnt.loadKey()
if checkInvariants {
@@ -576,8 +581,9 @@ retryFirst:
// mnt. It takes a reference on the returned VirtualDentry. If no such mount
// point exists (i.e. mnt is a root mount), getMountpointAt returns (nil, nil).
//
-// Preconditions: References are held on mnt and root. vfsroot is not (mnt,
-// mnt.root).
+// Preconditions:
+// * References are held on mnt and root.
+// * vfsroot is not (mnt, mnt.root).
func (vfs *VirtualFilesystem) getMountpointAt(ctx context.Context, mnt *Mount, vfsroot VirtualDentry) VirtualDentry {
// The first mount is special-cased:
//
diff --git a/pkg/sentry/vfs/mount_unsafe.go b/pkg/sentry/vfs/mount_unsafe.go
index 70f850ca4..777d631cb 100644
--- a/pkg/sentry/vfs/mount_unsafe.go
+++ b/pkg/sentry/vfs/mount_unsafe.go
@@ -217,8 +217,9 @@ func (mt *mountTable) Insert(mount *Mount) {
// insertSeqed inserts the given mount into mt.
//
-// Preconditions: mt.seq must be in a writer critical section. mt must not
-// already contain a Mount with the same mount point and parent.
+// Preconditions:
+// * mt.seq must be in a writer critical section.
+// * mt must not already contain a Mount with the same mount point and parent.
func (mt *mountTable) insertSeqed(mount *Mount) {
hash := memhash(unsafe.Pointer(&mount.key), uintptr(mt.seed), mountKeyBytes)
@@ -269,9 +270,11 @@ func (mt *mountTable) insertSeqed(mount *Mount) {
atomic.StorePointer(&mt.slots, newSlots)
}
-// Preconditions: There are no concurrent mutators of the table (slots, cap).
-// If the table is visible to readers, then mt.seq must be in a writer critical
-// section. cap must be a power of 2.
+// Preconditions:
+// * There are no concurrent mutators of the table (slots, cap).
+// * If the table is visible to readers, then mt.seq must be in a writer
+// critical section.
+// * cap must be a power of 2.
func mtInsertLocked(slots unsafe.Pointer, cap uintptr, value unsafe.Pointer, hash uintptr) {
mask := cap - 1
off := (hash & mask) * mountSlotBytes
@@ -313,8 +316,9 @@ func (mt *mountTable) Remove(mount *Mount) {
// removeSeqed removes the given mount from mt.
//
-// Preconditions: mt.seq must be in a writer critical section. mt must contain
-// mount.
+// Preconditions:
+// * mt.seq must be in a writer critical section.
+// * mt must contain mount.
func (mt *mountTable) removeSeqed(mount *Mount) {
hash := memhash(unsafe.Pointer(&mount.key), uintptr(mt.seed), mountKeyBytes)
tcap := uintptr(1) << (mt.size & mtSizeOrderMask)
diff --git a/pkg/state/addr_set.go b/pkg/state/addr_set.go
index 3cc0161b5..591af5292 100644
--- a/pkg/state/addr_set.go
+++ b/pkg/state/addr_set.go
@@ -343,7 +343,9 @@ func (s *addrSet) InsertWithoutMerging(gap addrGapIterator, r addrRange, val *ob
// and returns an iterator to the inserted segment. All existing iterators
// (including gap, but not including the returned iterator) are invalidated.
//
-// Preconditions: r.Start >= gap.Start(); r.End <= gap.End().
+// Preconditions:
+// * r.Start >= gap.Start().
+// * r.End <= gap.End().
func (s *addrSet) InsertWithoutMergingUnchecked(gap addrGapIterator, r addrRange, val *objectEncodeState) addrIterator {
gap = gap.node.rebalanceBeforeInsert(gap)
splitMaxGap := addrtrackGaps != 0 && (gap.node.nrSegments == 0 || gap.Range().Length() == gap.node.maxGap.Get())
@@ -1097,12 +1099,10 @@ func (seg addrIterator) End() uintptr {
// does not invalidate any iterators.
//
// Preconditions:
-//
-// - r.Length() > 0.
-//
-// - The new range must not overlap an existing one: If seg.NextSegment().Ok(),
-// then r.end <= seg.NextSegment().Start(); if seg.PrevSegment().Ok(), then
-// r.start >= seg.PrevSegment().End().
+// * r.Length() > 0.
+// * The new range must not overlap an existing one:
+// * If seg.NextSegment().Ok(), then r.end <= seg.NextSegment().Start().
+// * If seg.PrevSegment().Ok(), then r.start >= seg.PrevSegment().End().
func (seg addrIterator) SetRangeUnchecked(r addrRange) {
seg.node.keys[seg.index] = r
}
@@ -1127,8 +1127,9 @@ func (seg addrIterator) SetRange(r addrRange) {
// SetStartUnchecked mutates the iterated segment's start. This operation does
// not invalidate any iterators.
//
-// Preconditions: The new start must be valid: start < seg.End(); if
-// seg.PrevSegment().Ok(), then start >= seg.PrevSegment().End().
+// Preconditions: The new start must be valid:
+// * start < seg.End()
+// * If seg.PrevSegment().Ok(), then start >= seg.PrevSegment().End().
func (seg addrIterator) SetStartUnchecked(start uintptr) {
seg.node.keys[seg.index].Start = start
}
@@ -1150,8 +1151,9 @@ func (seg addrIterator) SetStart(start uintptr) {
// SetEndUnchecked mutates the iterated segment's end. This operation does not
// invalidate any iterators.
//
-// Preconditions: The new end must be valid: end > seg.Start(); if
-// seg.NextSegment().Ok(), then end <= seg.NextSegment().Start().
+// Preconditions: The new end must be valid:
+// * end > seg.Start().
+// * If seg.NextSegment().Ok(), then end <= seg.NextSegment().Start().
func (seg addrIterator) SetEndUnchecked(end uintptr) {
seg.node.keys[seg.index].End = end
}
@@ -1571,9 +1573,11 @@ func (s *addrSet) ExportSortedSlices() *addrSegmentDataSlices {
// ImportSortedSlice initializes the given set from the given slice.
//
-// Preconditions: s must be empty. sds must represent a valid set (the segments
-// in sds must have valid lengths that do not overlap). The segments in sds
-// must be sorted in ascending key order.
+// Preconditions:
+// * s must be empty.
+// * sds must represent a valid set (the segments in sds must have valid
+// lengths that do not overlap).
+// * The segments in sds must be sorted in ascending key order.
func (s *addrSet) ImportSortedSlices(sds *addrSegmentDataSlices) error {
if !s.IsEmpty() {
return fmt.Errorf("cannot import into non-empty set %v", s)
diff --git a/pkg/tcpip/stack/conntrack.go b/pkg/tcpip/stack/conntrack.go
index 7dd344b4f..836682ea0 100644
--- a/pkg/tcpip/stack/conntrack.go
+++ b/pkg/tcpip/stack/conntrack.go
@@ -572,7 +572,9 @@ func (ct *ConnTrack) reapUnused(start int, prevInterval time.Duration) (int, tim
// reapTupleLocked tries to remove tuple and its reply from the table. It
// returns whether the tuple's connection has timed out.
//
-// Preconditions: ct.mu is locked for reading and bucket is locked.
+// Preconditions:
+// * ct.mu is locked for reading.
+// * bucket is locked.
func (ct *ConnTrack) reapTupleLocked(tuple *tuple, bucket int, now time.Time) bool {
if !tuple.conn.timedOut(now) {
return false
diff --git a/pkg/tcpip/stack/iptables.go b/pkg/tcpip/stack/iptables.go
index c37da814f..41ef4236b 100644
--- a/pkg/tcpip/stack/iptables.go
+++ b/pkg/tcpip/stack/iptables.go
@@ -315,8 +315,8 @@ func (it *IPTables) startReaper(interval time.Duration) {
// should not go forward.
//
// Preconditions:
-// - pkt is a IPv4 packet of at least length header.IPv4MinimumSize.
-// - pkt.NetworkHeader is not nil.
+// * pkt is a IPv4 packet of at least length header.IPv4MinimumSize.
+// * pkt.NetworkHeader is not nil.
//
// NOTE: unlike the Check API the returned map contains packets that should be
// dropped.
@@ -341,8 +341,8 @@ func (it *IPTables) CheckPackets(hook Hook, pkts PacketBufferList, gso *GSO, r *
}
// Preconditions:
-// - pkt is a IPv4 packet of at least length header.IPv4MinimumSize.
-// - pkt.NetworkHeader is not nil.
+// * pkt is a IPv4 packet of at least length header.IPv4MinimumSize.
+// * pkt.NetworkHeader is not nil.
func (it *IPTables) checkChain(hook Hook, pkt *PacketBuffer, table Table, ruleIdx int, gso *GSO, r *Route, address tcpip.Address, nicName string) chainVerdict {
// Start from ruleIdx and walk the list of rules until a rule gives us
// a verdict.
@@ -388,8 +388,8 @@ func (it *IPTables) checkChain(hook Hook, pkt *PacketBuffer, table Table, ruleId
}
// Preconditions:
-// - pkt is a IPv4 packet of at least length header.IPv4MinimumSize.
-// - pkt.NetworkHeader is not nil.
+// * pkt is a IPv4 packet of at least length header.IPv4MinimumSize.
+// * pkt.NetworkHeader is not nil.
func (it *IPTables) checkRule(hook Hook, pkt *PacketBuffer, table Table, ruleIdx int, gso *GSO, r *Route, address tcpip.Address, nicName string) (RuleVerdict, int) {
rule := table.Rules[ruleIdx]
diff --git a/pkg/unet/unet.go b/pkg/unet/unet.go
index d843f19cf..c976d7230 100644
--- a/pkg/unet/unet.go
+++ b/pkg/unet/unet.go
@@ -522,7 +522,7 @@ func (s *ServerSocket) Listen() error {
// This is always blocking.
//
// Preconditions:
-// * ServerSocket is listening (Listen called).
+// * ServerSocket is listening (Listen called).
func (s *ServerSocket) Accept() (*Socket, error) {
fd, ok := s.socket.enterFD()
if !ok {
diff --git a/pkg/usermem/addr_range_seq_unsafe.go b/pkg/usermem/addr_range_seq_unsafe.go
index c09337c15..495896ded 100644
--- a/pkg/usermem/addr_range_seq_unsafe.go
+++ b/pkg/usermem/addr_range_seq_unsafe.go
@@ -81,8 +81,10 @@ func AddrRangeSeqFromSlice(slice []AddrRange) AddrRangeSeq {
return addrRangeSeqFromSliceLimited(slice, limit)
}
-// Preconditions: The combined length of all AddrRanges in slice <= limit.
-// limit >= 0. If len(slice) != 0, then limit > 0.
+// Preconditions:
+// * The combined length of all AddrRanges in slice <= limit.
+// * limit >= 0.
+// * If len(slice) != 0, then limit > 0.
func addrRangeSeqFromSliceLimited(slice []AddrRange, limit int64) AddrRangeSeq {
switch len(slice) {
case 0:
diff --git a/pkg/usermem/usermem.go b/pkg/usermem/usermem.go
index cd6a0ea6b..27279b409 100644
--- a/pkg/usermem/usermem.go
+++ b/pkg/usermem/usermem.go
@@ -54,8 +54,10 @@ type IO interface {
// of bytes zeroed. If the number of bytes zeroed is < toZero, it returns a
// non-nil error explaining why.
//
- // Preconditions: The caller must not hold mm.MemoryManager.mappingMu or
- // any following locks in the lock order. toZero >= 0.
+ // Preconditions:
+ // * The caller must not hold mm.MemoryManager.mappingMu or any
+ // following locks in the lock order.
+ // * toZero >= 0.
ZeroOut(ctx context.Context, addr Addr, toZero int64, opts IOOpts) (int64, error)
// CopyOutFrom copies ars.NumBytes() bytes from src to the memory mapped at
@@ -66,9 +68,11 @@ type IO interface {
//
// CopyOutFrom calls src.ReadToBlocks at most once.
//
- // Preconditions: The caller must not hold mm.MemoryManager.mappingMu or
- // any following locks in the lock order. src.ReadToBlocks must not block
- // on mm.MemoryManager.activeMu or any preceding locks in the lock order.
+ // Preconditions:
+ // * The caller must not hold mm.MemoryManager.mappingMu or any
+ // following locks in the lock order.
+ // * src.ReadToBlocks must not block on mm.MemoryManager.activeMu or
+ // any preceding locks in the lock order.
CopyOutFrom(ctx context.Context, ars AddrRangeSeq, src safemem.Reader, opts IOOpts) (int64, error)
// CopyInTo copies ars.NumBytes() bytes from the memory mapped at ars to
@@ -78,10 +82,11 @@ type IO interface {
//
// CopyInTo calls dst.WriteFromBlocks at most once.
//
- // Preconditions: The caller must not hold mm.MemoryManager.mappingMu or
- // any following locks in the lock order. dst.WriteFromBlocks must not
- // block on mm.MemoryManager.activeMu or any preceding locks in the lock
- // order.
+ // Preconditions:
+ // * The caller must not hold mm.MemoryManager.mappingMu or any
+ // following locks in the lock order.
+ // * dst.WriteFromBlocks must not block on mm.MemoryManager.activeMu or
+ // any preceding locks in the lock order.
CopyInTo(ctx context.Context, ars AddrRangeSeq, dst safemem.Writer, opts IOOpts) (int64, error)
// TODO(jamieliu): The requirement that CopyOutFrom/CopyInTo call src/dst
@@ -93,25 +98,28 @@ type IO interface {
// SwapUint32 atomically sets the uint32 value at addr to new and
// returns the previous value.
//
- // Preconditions: The caller must not hold mm.MemoryManager.mappingMu or
- // any following locks in the lock order. addr must be aligned to a 4-byte
- // boundary.
+ // Preconditions:
+ // * The caller must not hold mm.MemoryManager.mappingMu or any
+ // following locks in the lock order.
+ // * addr must be aligned to a 4-byte boundary.
SwapUint32(ctx context.Context, addr Addr, new uint32, opts IOOpts) (uint32, error)
// CompareAndSwapUint32 atomically compares the uint32 value at addr to
// old; if they are equal, the value in memory is replaced by new. In
// either case, the previous value stored in memory is returned.
//
- // Preconditions: The caller must not hold mm.MemoryManager.mappingMu or
- // any following locks in the lock order. addr must be aligned to a 4-byte
- // boundary.
+ // Preconditions:
+ // * The caller must not hold mm.MemoryManager.mappingMu or any
+ // following locks in the lock order.
+ // * addr must be aligned to a 4-byte boundary.
CompareAndSwapUint32(ctx context.Context, addr Addr, old, new uint32, opts IOOpts) (uint32, error)
// LoadUint32 atomically loads the uint32 value at addr and returns it.
//
- // Preconditions: The caller must not hold mm.MemoryManager.mappingMu or
- // any following locks in the lock order. addr must be aligned to a 4-byte
- // boundary.
+ // Preconditions:
+ // * The caller must not hold mm.MemoryManager.mappingMu or any
+ // following locks in the lock order.
+ // * addr must be aligned to a 4-byte boundary.
LoadUint32(ctx context.Context, addr Addr, opts IOOpts) (uint32, error)
}
@@ -183,7 +191,7 @@ func (rw *IOReadWriter) Write(src []byte) (int, error) {
// CopyObjectOut must use reflection to encode src; performance-sensitive
// clients should do encoding manually and use uio.CopyOut directly.
//
-// Preconditions: As for IO.CopyOut.
+// Preconditions: Same as IO.CopyOut.
func CopyObjectOut(ctx context.Context, uio IO, addr Addr, src interface{}, opts IOOpts) (int, error) {
w := &IOReadWriter{
Ctx: ctx,
@@ -205,7 +213,7 @@ func CopyObjectOut(ctx context.Context, uio IO, addr Addr, src interface{}, opts
// CopyObjectIn must use reflection to decode dst; performance-sensitive
// clients should use uio.CopyIn directly and do decoding manually.
//
-// Preconditions: As for IO.CopyIn.
+// Preconditions: Same as IO.CopyIn.
func CopyObjectIn(ctx context.Context, uio IO, addr Addr, dst interface{}, opts IOOpts) (int, error) {
r := &IOReadWriter{
Ctx: ctx,
@@ -233,7 +241,8 @@ const (
// would exceed maxlen, CopyStringIn returns the string truncated to maxlen and
// ENAMETOOLONG.
//
-// Preconditions: As for IO.CopyFromUser. maxlen >= 0.
+// Preconditions: Same as IO.CopyFromUser, plus:
+// * maxlen >= 0.
func CopyStringIn(ctx context.Context, uio IO, addr Addr, maxlen int, opts IOOpts) (string, error) {
initLen := maxlen
if initLen > copyStringMaxInitBufLen {
@@ -287,7 +296,7 @@ func CopyStringIn(ctx context.Context, uio IO, addr Addr, maxlen int, opts IOOpt
// less. CopyOutVec returns the number of bytes copied; if this is less than
// the maximum, it returns a non-nil error explaining why.
//
-// Preconditions: As for IO.CopyOut.
+// Preconditions: Same as IO.CopyOut.
func CopyOutVec(ctx context.Context, uio IO, ars AddrRangeSeq, src []byte, opts IOOpts) (int, error) {
var done int
for !ars.IsEmpty() && done < len(src) {
@@ -311,7 +320,7 @@ func CopyOutVec(ctx context.Context, uio IO, ars AddrRangeSeq, src []byte, opts
// less. CopyInVec returns the number of bytes copied; if this is less than the
// maximum, it returns a non-nil error explaining why.
//
-// Preconditions: As for IO.CopyIn.
+// Preconditions: Same as IO.CopyIn.
func CopyInVec(ctx context.Context, uio IO, ars AddrRangeSeq, dst []byte, opts IOOpts) (int, error) {
var done int
for !ars.IsEmpty() && done < len(dst) {
@@ -335,7 +344,7 @@ func CopyInVec(ctx context.Context, uio IO, ars AddrRangeSeq, dst []byte, opts I
// ZeroOutVec returns the number of bytes written; if this is less than the
// maximum, it returns a non-nil error explaining why.
//
-// Preconditions: As for IO.ZeroOut.
+// Preconditions: Same as IO.ZeroOut.
func ZeroOutVec(ctx context.Context, uio IO, ars AddrRangeSeq, toZero int64, opts IOOpts) (int64, error) {
var done int64
for !ars.IsEmpty() && done < toZero {
@@ -388,7 +397,7 @@ func isASCIIWhitespace(b byte) bool {
//
// - CopyInt32StringsInVec returns EINVAL if ars.NumBytes() == 0.
//
-// Preconditions: As for CopyInVec.
+// Preconditions: Same as CopyInVec.
func CopyInt32StringsInVec(ctx context.Context, uio IO, ars AddrRangeSeq, dsts []int32, opts IOOpts) (int64, error) {
if len(dsts) == 0 {
return 0, nil
@@ -481,28 +490,28 @@ func (s IOSequence) NumBytes() int64 {
// DropFirst returns a copy of s with s.Addrs.DropFirst(n).
//
-// Preconditions: As for AddrRangeSeq.DropFirst.
+// Preconditions: Same as AddrRangeSeq.DropFirst.
func (s IOSequence) DropFirst(n int) IOSequence {
return IOSequence{s.IO, s.Addrs.DropFirst(n), s.Opts}
}
// DropFirst64 returns a copy of s with s.Addrs.DropFirst64(n).
//
-// Preconditions: As for AddrRangeSeq.DropFirst64.
+// Preconditions: Same as AddrRangeSeq.DropFirst64.
func (s IOSequence) DropFirst64(n int64) IOSequence {
return IOSequence{s.IO, s.Addrs.DropFirst64(n), s.Opts}
}
// TakeFirst returns a copy of s with s.Addrs.TakeFirst(n).
//
-// Preconditions: As for AddrRangeSeq.TakeFirst.
+// Preconditions: Same as AddrRangeSeq.TakeFirst.
func (s IOSequence) TakeFirst(n int) IOSequence {
return IOSequence{s.IO, s.Addrs.TakeFirst(n), s.Opts}
}
// TakeFirst64 returns a copy of s with s.Addrs.TakeFirst64(n).
//
-// Preconditions: As for AddrRangeSeq.TakeFirst64.
+// Preconditions: Same as AddrRangeSeq.TakeFirst64.
func (s IOSequence) TakeFirst64(n int64) IOSequence {
return IOSequence{s.IO, s.Addrs.TakeFirst64(n), s.Opts}
}
@@ -512,7 +521,7 @@ func (s IOSequence) TakeFirst64(n int64) IOSequence {
// As with CopyOutVec, if s.NumBytes() < len(src), the copy will be truncated
// to s.NumBytes(), and a nil error will be returned.
//
-// Preconditions: As for CopyOutVec.
+// Preconditions: Same as CopyOutVec.
func (s IOSequence) CopyOut(ctx context.Context, src []byte) (int, error) {
return CopyOutVec(ctx, s.IO, s.Addrs, src, s.Opts)
}
@@ -522,7 +531,7 @@ func (s IOSequence) CopyOut(ctx context.Context, src []byte) (int, error) {
// As with CopyInVec, if s.NumBytes() < len(dst), the copy will be truncated to
// s.NumBytes(), and a nil error will be returned.
//
-// Preconditions: As for CopyInVec.
+// Preconditions: Same as CopyInVec.
func (s IOSequence) CopyIn(ctx context.Context, dst []byte) (int, error) {
return CopyInVec(ctx, s.IO, s.Addrs, dst, s.Opts)
}
@@ -532,21 +541,21 @@ func (s IOSequence) CopyIn(ctx context.Context, dst []byte) (int, error) {
// As with ZeroOutVec, if s.NumBytes() < toZero, the write will be truncated
// to s.NumBytes(), and a nil error will be returned.
//
-// Preconditions: As for ZeroOutVec.
+// Preconditions: Same as ZeroOutVec.
func (s IOSequence) ZeroOut(ctx context.Context, toZero int64) (int64, error) {
return ZeroOutVec(ctx, s.IO, s.Addrs, toZero, s.Opts)
}
// CopyOutFrom invokes s.CopyOutFrom over s.Addrs.
//
-// Preconditions: As for IO.CopyOutFrom.
+// Preconditions: Same as IO.CopyOutFrom.
func (s IOSequence) CopyOutFrom(ctx context.Context, src safemem.Reader) (int64, error) {
return s.IO.CopyOutFrom(ctx, s.Addrs, src, s.Opts)
}
// CopyInTo invokes s.CopyInTo over s.Addrs.
//
-// Preconditions: As for IO.CopyInTo.
+// Preconditions: Same as IO.CopyInTo.
func (s IOSequence) CopyInTo(ctx context.Context, dst safemem.Writer) (int64, error) {
return s.IO.CopyInTo(ctx, s.Addrs, dst, s.Opts)
}