diff options
author | gVisor bot <gvisor-bot@google.com> | 2020-06-16 23:19:55 +0000 |
---|---|---|
committer | gVisor bot <gvisor-bot@google.com> | 2020-06-16 23:19:55 +0000 |
commit | 76f80d4109e576df2ffa2051955a58c5f9cd918d (patch) | |
tree | 96a88e0fec457354eb6ea1c9a4c4a34496cde5f7 /pkg | |
parent | a207d3ce91f86bd4b33c556a34da9f01bd59a67c (diff) | |
parent | e61acfb5eaec4450587116573f708284a0fe7849 (diff) |
Merge release-20200608.0-66-ge61acfb5e (automated)
Diffstat (limited to 'pkg')
-rw-r--r-- | pkg/abi/linux/linux_abi_autogen_unsafe.go | 6 | ||||
-rw-r--r-- | pkg/sentry/pgalloc/pgalloc.go | 60 |
2 files changed, 37 insertions, 29 deletions
diff --git a/pkg/abi/linux/linux_abi_autogen_unsafe.go b/pkg/abi/linux/linux_abi_autogen_unsafe.go index 14d7634ab..33ab5518b 100644 --- a/pkg/abi/linux/linux_abi_autogen_unsafe.go +++ b/pkg/abi/linux/linux_abi_autogen_unsafe.go @@ -124,12 +124,12 @@ func (s *Statx) UnmarshalBytes(src []byte) { // Packed implements marshal.Marshallable.Packed. //go:nosplit func (s *Statx) Packed() bool { - return s.Atime.Packed() && s.Btime.Packed() && s.Ctime.Packed() && s.Mtime.Packed() + return s.Btime.Packed() && s.Ctime.Packed() && s.Mtime.Packed() && s.Atime.Packed() } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. func (s *Statx) MarshalUnsafe(dst []byte) { - if s.Atime.Packed() && s.Btime.Packed() && s.Ctime.Packed() && s.Mtime.Packed() { + if s.Mtime.Packed() && s.Atime.Packed() && s.Btime.Packed() && s.Ctime.Packed() { safecopy.CopyIn(dst, unsafe.Pointer(s)) } else { s.MarshalBytes(dst) @@ -178,7 +178,7 @@ func (s *Statx) CopyOut(task marshal.Task, addr usermem.Addr) (int, error) { // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit func (s *Statx) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) { - if !s.Mtime.Packed() && s.Atime.Packed() && s.Btime.Packed() && s.Ctime.Packed() { + if !s.Ctime.Packed() && s.Mtime.Packed() && s.Atime.Packed() && s.Btime.Packed() { // Type Statx doesn't have a packed layout in memory, fall back to UnmarshalBytes. buf := task.CopyScratchBuffer(s.SizeBytes()) // escapes: okay. length, err := task.CopyInBytes(addr, buf) // escapes: okay. diff --git a/pkg/sentry/pgalloc/pgalloc.go b/pkg/sentry/pgalloc/pgalloc.go index 46f19d218..afab97c0a 100644 --- a/pkg/sentry/pgalloc/pgalloc.go +++ b/pkg/sentry/pgalloc/pgalloc.go @@ -441,53 +441,61 @@ func (f *MemoryFile) Allocate(length uint64, kind usage.MemoryKind) (platform.Fi // Precondition: alignment must be a power of 2. func findAvailableRange(usage *usageSet, fileSize int64, length, alignment uint64) (platform.FileRange, bool) { alignmentMask := alignment - 1 - for gap := usage.UpperBoundGap(uint64(fileSize)); gap.Ok(); gap = gap.PrevLargeEnoughGap(length) { - // Start searching only at end of file. + + // Search for space in existing gaps, starting at the current end of the + // file and working backward. + lastGap := usage.LastGap() + gap := lastGap + for { end := gap.End() if end > uint64(fileSize) { end = uint64(fileSize) } - // Start at the top and align downwards. - start := end - length - if start > end { - break // Underflow. + // Try to allocate from the end of this gap, with the start of the + // allocated range aligned down to alignment. + unalignedStart := end - length + if unalignedStart > end { + // Negative overflow: this and all preceding gaps are too small to + // accommodate length. + break } - start &^= alignmentMask - - // Is the gap still sufficient? - if start < gap.Start() { - continue + if start := unalignedStart &^ alignmentMask; start >= gap.Start() { + return platform.FileRange{start, start + length}, true } - // Allocate in the given gap. - return platform.FileRange{start, start + length}, true + gap = gap.PrevLargeEnoughGap(length) + if !gap.Ok() { + break + } } // Check that it's possible to fit this allocation at the end of a file of any size. - min := usage.LastGap().Start() + min := lastGap.Start() min = (min + alignmentMask) &^ alignmentMask if min+length < min { - // Overflow. + // Overflow: allocation would exceed the range of uint64. return platform.FileRange{}, false } // Determine the minimum file size required to fit this allocation at its end. for { - if fileSize >= 2*fileSize { - // Is this because it's initially empty? - if fileSize == 0 { - fileSize += chunkSize - } else { - // fileSize overflow. + newFileSize := 2 * fileSize + if newFileSize <= fileSize { + if fileSize != 0 { + // Overflow: allocation would exceed the range of int64. return platform.FileRange{}, false } - } else { - // Double the current fileSize. - fileSize *= 2 + newFileSize = chunkSize + } + fileSize = newFileSize + + unalignedStart := uint64(fileSize) - length + if unalignedStart > uint64(fileSize) { + // Negative overflow: fileSize is still inadequate. + continue } - start := (uint64(fileSize) - length) &^ alignmentMask - if start >= min { + if start := unalignedStart &^ alignmentMask; start >= min { return platform.FileRange{start, start + length}, true } } |