summaryrefslogtreecommitdiffhomepage
path: root/pkg/sentry/mm
diff options
context:
space:
mode:
authorNicolas Lacasse <nlacasse@google.com>2019-04-29 14:03:04 -0700
committerShentubot <shentubot@google.com>2019-04-29 14:04:14 -0700
commitf4ce43e1f426148d99c28c1b0e5c43ddda17a8cb (patch)
treeef64d18350874742742599c8b059b333eb060920 /pkg/sentry/mm
parent38e627644756400413fffe7222cdd5200dc4eccf (diff)
Allow and document bug ids in gVisor codebase.
PiperOrigin-RevId: 245818639 Change-Id: I03703ef0fb9b6675955637b9fe2776204c545789
Diffstat (limited to 'pkg/sentry/mm')
-rw-r--r--pkg/sentry/mm/aio_context.go2
-rw-r--r--pkg/sentry/mm/procfs.go10
-rw-r--r--pkg/sentry/mm/special_mappable.go2
-rw-r--r--pkg/sentry/mm/syscalls.go6
-rw-r--r--pkg/sentry/mm/vma.go2
5 files changed, 11 insertions, 11 deletions
diff --git a/pkg/sentry/mm/aio_context.go b/pkg/sentry/mm/aio_context.go
index f7ff06de0..7075792e0 100644
--- a/pkg/sentry/mm/aio_context.go
+++ b/pkg/sentry/mm/aio_context.go
@@ -331,7 +331,7 @@ func (mm *MemoryManager) NewAIOContext(ctx context.Context, events uint32) (uint
Length: aioRingBufferSize,
MappingIdentity: m,
Mappable: m,
- // TODO: Linux does "do_mmap_pgoff(..., PROT_READ |
+ // TODO(fvoznika): Linux does "do_mmap_pgoff(..., PROT_READ |
// PROT_WRITE, ...)" in fs/aio.c:aio_setup_ring(); why do we make this
// mapping read-only?
Perms: usermem.Read,
diff --git a/pkg/sentry/mm/procfs.go b/pkg/sentry/mm/procfs.go
index 0c4b8895d..7cdbf6e25 100644
--- a/pkg/sentry/mm/procfs.go
+++ b/pkg/sentry/mm/procfs.go
@@ -69,7 +69,7 @@ func (mm *MemoryManager) ReadMapsSeqFileData(ctx context.Context, handle seqfile
start = *handle.(*usermem.Addr)
}
for vseg := mm.vmas.LowerBoundSegment(start); vseg.Ok(); vseg = vseg.NextSegment() {
- // FIXME: If we use a usermem.Addr for the handle, we get
+ // FIXME(b/30793614): If we use a usermem.Addr for the handle, we get
// "panic: autosave error: type usermem.Addr is not registered".
vmaAddr := vseg.End()
data = append(data, seqfile.SeqData{
@@ -88,7 +88,7 @@ func (mm *MemoryManager) ReadMapsSeqFileData(ctx context.Context, handle seqfile
//
// Artifically adjust the seqfile handle so we only output vsyscall entry once.
if start != vsyscallEnd {
- // FIXME: Can't get a pointer to constant vsyscallEnd.
+ // FIXME(b/30793614): Can't get a pointer to constant vsyscallEnd.
vmaAddr := vsyscallEnd
data = append(data, seqfile.SeqData{
Buf: []byte(vsyscallMapsEntry),
@@ -134,7 +134,7 @@ func (mm *MemoryManager) appendVMAMapsEntryLocked(ctx context.Context, vseg vmaI
if vma.hint != "" {
s = vma.hint
} else if vma.id != nil {
- // FIXME: We are holding mm.mappingMu here, which is
+ // FIXME(jamieliu): We are holding mm.mappingMu here, which is
// consistent with Linux's holding mmap_sem in
// fs/proc/task_mmu.c:show_map_vma() => fs/seq_file.c:seq_file_path().
// However, it's not clear that fs.File.MappedName() is actually
@@ -162,7 +162,7 @@ func (mm *MemoryManager) ReadSmapsSeqFileData(ctx context.Context, handle seqfil
start = *handle.(*usermem.Addr)
}
for vseg := mm.vmas.LowerBoundSegment(start); vseg.Ok(); vseg = vseg.NextSegment() {
- // FIXME: If we use a usermem.Addr for the handle, we get
+ // FIXME(b/30793614): If we use a usermem.Addr for the handle, we get
// "panic: autosave error: type usermem.Addr is not registered".
vmaAddr := vseg.End()
data = append(data, seqfile.SeqData{
@@ -174,7 +174,7 @@ func (mm *MemoryManager) ReadSmapsSeqFileData(ctx context.Context, handle seqfil
// We always emulate vsyscall, so advertise it here. See
// ReadMapsSeqFileData for additional commentary.
if start != vsyscallEnd {
- // FIXME: Can't get a pointer to constant vsyscallEnd.
+ // FIXME(b/30793614): Can't get a pointer to constant vsyscallEnd.
vmaAddr := vsyscallEnd
data = append(data, seqfile.SeqData{
Buf: []byte(vsyscallSmapsEntry),
diff --git a/pkg/sentry/mm/special_mappable.go b/pkg/sentry/mm/special_mappable.go
index cfbf7a104..3b5161998 100644
--- a/pkg/sentry/mm/special_mappable.go
+++ b/pkg/sentry/mm/special_mappable.go
@@ -136,7 +136,7 @@ func (m *SpecialMappable) Length() uint64 {
// NewSharedAnonMappable returns a SpecialMappable that implements the
// semantics of mmap(MAP_SHARED|MAP_ANONYMOUS) and mappings of /dev/zero.
//
-// TODO: The use of SpecialMappable is a lazy code reuse hack. Linux
+// TODO(jamieliu): The use of SpecialMappable is a lazy code reuse hack. Linux
// uses an ephemeral file created by mm/shmem.c:shmem_zero_setup(); we should
// do the same to get non-zero device and inode IDs.
func NewSharedAnonMappable(length uint64, mfp pgalloc.MemoryFileProvider) (*SpecialMappable, error) {
diff --git a/pkg/sentry/mm/syscalls.go b/pkg/sentry/mm/syscalls.go
index cc7eb76d2..7b675b9b5 100644
--- a/pkg/sentry/mm/syscalls.go
+++ b/pkg/sentry/mm/syscalls.go
@@ -137,7 +137,7 @@ func (mm *MemoryManager) MMap(ctx context.Context, opts memmap.MMapOpts) (userme
return 0, err
}
- // TODO: In Linux, VM_LOCKONFAULT (which may be set on the new
+ // TODO(jamieliu): In Linux, VM_LOCKONFAULT (which may be set on the new
// vma by mlockall(MCL_FUTURE|MCL_ONFAULT) => mm_struct::def_flags) appears
// to effectively disable MAP_POPULATE by unsetting FOLL_POPULATE in
// mm/util.c:vm_mmap_pgoff() => mm/gup.c:__mm_populate() =>
@@ -148,7 +148,7 @@ func (mm *MemoryManager) MMap(ctx context.Context, opts memmap.MMapOpts) (userme
mm.populateVMAAndUnlock(ctx, vseg, ar, true)
case opts.Mappable == nil && length <= privateAllocUnit:
- // NOTE: Get pmas and map eagerly in the hope
+ // NOTE(b/63077076, b/63360184): Get pmas and map eagerly in the hope
// that doing so will save on future page faults. We only do this for
// anonymous mappings, since otherwise the cost of
// memmap.Mappable.Translate is unknown; and only for small mappings,
@@ -698,7 +698,7 @@ func (mm *MemoryManager) Brk(ctx context.Context, addr usermem.Addr) (usermem.Ad
return mm.brk.End, syserror.EINVAL
}
- // TODO: This enforces RLIMIT_DATA, but is
+ // TODO(gvisor.dev/issue/156): This enforces RLIMIT_DATA, but is
// slightly more permissive than the usual data limit. In particular,
// this only limits the size of the heap; a true RLIMIT_DATA limits the
// size of heap + data + bss. The segment sizes need to be plumbed from
diff --git a/pkg/sentry/mm/vma.go b/pkg/sentry/mm/vma.go
index e9c9a80ea..931995254 100644
--- a/pkg/sentry/mm/vma.go
+++ b/pkg/sentry/mm/vma.go
@@ -274,7 +274,7 @@ func (mm *MemoryManager) getVMAsLocked(ctx context.Context, ar usermem.AddrRange
// Loop invariants: vgap = vseg.PrevGap(); addr < vseg.End().
vma := vseg.ValuePtr()
if addr < vseg.Start() {
- // TODO: Implement vma.growsDown here.
+ // TODO(jamieliu): Implement vma.growsDown here.
return vbegin, vgap, syserror.EFAULT
}