summaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--nogo.yaml2
-rw-r--r--pkg/sentry/fsimpl/gofer/gofer.go28
-rw-r--r--pkg/sentry/fsimpl/gofer/regular_file.go39
-rw-r--r--pkg/sentry/kernel/semaphore/semaphore.go10
-rw-r--r--pkg/sentry/socket/hostinet/socket_unsafe.go2
-rw-r--r--runsc/boot/filter/config.go4
-rw-r--r--runsc/cmd/chroot.go25
-rw-r--r--runsc/cmd/gofer.go14
-rw-r--r--test/perf/BUILD14
-rw-r--r--test/perf/linux/BUILD38
-rw-r--r--test/perf/linux/send_recv_benchmark.cc21
-rw-r--r--test/perf/linux/verity_open_benchmark.cc71
-rw-r--r--test/perf/linux/verity_read_benchmark.cc69
-rw-r--r--test/root/chroot_test.go10
-rw-r--r--test/runner/defs.bzl6
-rw-r--r--test/syscalls/linux/itimer.cc7
-rw-r--r--test/syscalls/linux/semaphore.cc11
-rw-r--r--test/syscalls/linux/socket_inet_loopback.cc10
-rw-r--r--test/syscalls/linux/verity_getdents.cc6
-rw-r--r--test/syscalls/linux/verity_ioctl.cc22
-rw-r--r--test/syscalls/linux/verity_mmap.cc8
-rw-r--r--test/syscalls/linux/verity_symlink.cc18
-rw-r--r--test/util/verity_util.cc13
-rw-r--r--test/util/verity_util.h1
-rw-r--r--tools/constraintutil/BUILD18
-rw-r--r--tools/constraintutil/constraintutil.go169
-rw-r--r--tools/constraintutil/constraintutil_test.go138
-rw-r--r--tools/go_generics/go_merge/BUILD2
-rw-r--r--tools/go_generics/go_merge/main.go13
-rw-r--r--tools/go_marshal/gomarshal/BUILD2
-rw-r--r--tools/go_marshal/gomarshal/generator.go33
-rw-r--r--tools/go_stateify/BUILD2
-rw-r--r--tools/go_stateify/main.go11
-rw-r--r--tools/nogo/BUILD8
-rw-r--r--tools/nogo/config.go18
-rw-r--r--tools/nogo/config_test.go301
-rw-r--r--tools/nogo/findings.go2
-rw-r--r--tools/tags/BUILD11
-rw-r--r--tools/tags/tags.go89
39 files changed, 1045 insertions, 221 deletions
diff --git a/nogo.yaml b/nogo.yaml
index 9b7fc5c8f..d7a3ad956 100644
--- a/nogo.yaml
+++ b/nogo.yaml
@@ -130,6 +130,8 @@ analyzers:
external: # Enabled.
unreachable:
external: # Enabled.
+ exclude:
+ - ".*protobuf/.*.go"
unsafeptr:
internal:
exclude:
diff --git a/pkg/sentry/fsimpl/gofer/gofer.go b/pkg/sentry/fsimpl/gofer/gofer.go
index ec8d58cc9..25d2e39d6 100644
--- a/pkg/sentry/fsimpl/gofer/gofer.go
+++ b/pkg/sentry/fsimpl/gofer/gofer.go
@@ -1161,6 +1161,13 @@ func (d *dentry) setStat(ctx context.Context, creds *auth.Credentials, opts *vfs
if !d.isSynthetic() {
if stat.Mask != 0 {
+ if stat.Mask&linux.STATX_SIZE != 0 {
+ // d.dataMu must be held around the update to both the remote
+ // file's size and d.size to serialize with writeback (which
+ // might otherwise write data back up to the old d.size after
+ // the remote file has been truncated).
+ d.dataMu.Lock()
+ }
if err := d.file.setAttr(ctx, p9.SetAttrMask{
Permissions: stat.Mask&linux.STATX_MODE != 0,
UID: stat.Mask&linux.STATX_UID != 0,
@@ -1180,13 +1187,16 @@ func (d *dentry) setStat(ctx context.Context, creds *auth.Credentials, opts *vfs
MTimeSeconds: uint64(stat.Mtime.Sec),
MTimeNanoSeconds: uint64(stat.Mtime.Nsec),
}); err != nil {
+ if stat.Mask&linux.STATX_SIZE != 0 {
+ d.dataMu.Unlock() // +checklocksforce: locked conditionally above
+ }
return err
}
if stat.Mask&linux.STATX_SIZE != 0 {
// d.size should be kept up to date, and privatized
// copy-on-write mappings of truncated pages need to be
// invalidated, even if InteropModeShared is in effect.
- d.updateSizeLocked(stat.Size)
+ d.updateSizeAndUnlockDataMuLocked(stat.Size) // +checklocksforce: locked conditionally above
}
}
if d.fs.opts.interop == InteropModeShared {
@@ -1249,6 +1259,14 @@ func (d *dentry) doAllocate(ctx context.Context, offset, length uint64, allocate
// Preconditions: d.metadataMu must be locked.
func (d *dentry) updateSizeLocked(newSize uint64) {
d.dataMu.Lock()
+ d.updateSizeAndUnlockDataMuLocked(newSize)
+}
+
+// Preconditions: d.metadataMu and d.dataMu must be locked.
+//
+// Postconditions: d.dataMu is unlocked.
+// +checklocksrelease:d.dataMu
+func (d *dentry) updateSizeAndUnlockDataMuLocked(newSize uint64) {
oldSize := d.size
atomic.StoreUint64(&d.size, newSize)
// d.dataMu must be unlocked to lock d.mapsMu and invalidate mappings
@@ -1257,9 +1275,9 @@ func (d *dentry) updateSizeLocked(newSize uint64) {
// contents beyond the new d.size. (We are still holding d.metadataMu,
// so we can't race with Write or another truncate.)
d.dataMu.Unlock()
- if d.size < oldSize {
+ if newSize < oldSize {
oldpgend, _ := hostarch.PageRoundUp(oldSize)
- newpgend, _ := hostarch.PageRoundUp(d.size)
+ newpgend, _ := hostarch.PageRoundUp(newSize)
if oldpgend != newpgend {
d.mapsMu.Lock()
d.mappings.Invalidate(memmap.MappableRange{newpgend, oldpgend}, memmap.InvalidateOpts{
@@ -1275,8 +1293,8 @@ func (d *dentry) updateSizeLocked(newSize uint64) {
// truncated pages have been removed from the remote file, they
// should be dropped without being written back.
d.dataMu.Lock()
- d.cache.Truncate(d.size, d.fs.mfp.MemoryFile())
- d.dirty.KeepClean(memmap.MappableRange{d.size, oldpgend})
+ d.cache.Truncate(newSize, d.fs.mfp.MemoryFile())
+ d.dirty.KeepClean(memmap.MappableRange{newSize, oldpgend})
d.dataMu.Unlock()
}
}
diff --git a/pkg/sentry/fsimpl/gofer/regular_file.go b/pkg/sentry/fsimpl/gofer/regular_file.go
index 91405fe66..947dbe05f 100644
--- a/pkg/sentry/fsimpl/gofer/regular_file.go
+++ b/pkg/sentry/fsimpl/gofer/regular_file.go
@@ -79,17 +79,22 @@ func (fd *regularFileFD) OnClose(ctx context.Context) error {
if !fd.vfsfd.IsWritable() {
return nil
}
- // Skip flushing if there are client-buffered writes, since (as with the
- // VFS1 client) we don't flush buffered writes on close anyway.
d := fd.dentry()
- if d.fs.opts.interop != InteropModeExclusive {
- return nil
- }
- d.dataMu.RLock()
- haveDirtyPages := !d.dirty.IsEmpty()
- d.dataMu.RUnlock()
- if haveDirtyPages {
- return nil
+ if d.fs.opts.interop == InteropModeExclusive {
+ // d may have dirty pages that we won't write back now (and wouldn't
+ // have in VFS1), making a flushf RPC ineffective. If this is the case,
+ // skip the flushf.
+ //
+ // Note that it's also possible to have dirty pages under other interop
+ // modes if forcePageCache is in effect; we conservatively assume that
+ // applications have some way of tolerating this and still want the
+ // flushf.
+ d.dataMu.RLock()
+ haveDirtyPages := !d.dirty.IsEmpty()
+ d.dataMu.RUnlock()
+ if haveDirtyPages {
+ return nil
+ }
}
d.handleMu.RLock()
defer d.handleMu.RUnlock()
@@ -707,14 +712,8 @@ func (fd *regularFileFD) ConfigureMMap(ctx context.Context, opts *memmap.MMapOpt
return vfs.GenericConfigureMMap(&fd.vfsfd, d, opts)
}
-func (d *dentry) mayCachePages() bool {
- if d.fs.opts.forcePageCache {
- return true
- }
- if d.fs.opts.interop == InteropModeShared {
- return false
- }
- return atomic.LoadInt32(&d.mmapFD) >= 0
+func (fs *filesystem) mayCachePagesInMemoryFile() bool {
+ return fs.opts.forcePageCache || fs.opts.interop != InteropModeShared
}
// AddMapping implements memmap.Mappable.AddMapping.
@@ -726,7 +725,7 @@ func (d *dentry) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar host
for _, r := range mapped {
d.pf.hostFileMapper.IncRefOn(r)
}
- if d.mayCachePages() {
+ if d.fs.mayCachePagesInMemoryFile() {
// d.Evict() will refuse to evict memory-mapped pages, so tell the
// MemoryFile to not bother trying.
mf := d.fs.mfp.MemoryFile()
@@ -745,7 +744,7 @@ func (d *dentry) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar h
for _, r := range unmapped {
d.pf.hostFileMapper.DecRefOn(r)
}
- if d.mayCachePages() {
+ if d.fs.mayCachePagesInMemoryFile() {
// Pages that are no longer referenced by any application memory
// mappings are now considered unused; allow MemoryFile to evict them
// when necessary.
diff --git a/pkg/sentry/kernel/semaphore/semaphore.go b/pkg/sentry/kernel/semaphore/semaphore.go
index b7879d284..8610d3fc1 100644
--- a/pkg/sentry/kernel/semaphore/semaphore.go
+++ b/pkg/sentry/kernel/semaphore/semaphore.go
@@ -214,15 +214,14 @@ func (r *Registry) Remove(id ipc.ID, creds *auth.Credentials) error {
r.mu.Lock()
defer r.mu.Unlock()
- r.reg.Remove(id, creds)
-
index, found := r.findIndexByID(id)
if !found {
- // Inconsistent state.
- panic(fmt.Sprintf("unable to find an index for ID: %d", id))
+ return linuxerr.EINVAL
}
delete(r.indexes, index)
+ r.reg.Remove(id, creds)
+
return nil
}
@@ -245,7 +244,8 @@ func (r *Registry) newSetLocked(ctx context.Context, key ipc.Key, creator fs.Fil
index, found := r.findFirstAvailableIndex()
if !found {
- panic("unable to find an available index")
+ // See linux, ipc/sem.c:newary().
+ return nil, linuxerr.ENOSPC
}
r.indexes[index] = set.obj.ID
diff --git a/pkg/sentry/socket/hostinet/socket_unsafe.go b/pkg/sentry/socket/hostinet/socket_unsafe.go
index ccf4f534d..587f479eb 100644
--- a/pkg/sentry/socket/hostinet/socket_unsafe.go
+++ b/pkg/sentry/socket/hostinet/socket_unsafe.go
@@ -67,7 +67,7 @@ func ioctl(ctx context.Context, fd int, io usermem.IO, args arch.SyscallArgument
AddressSpaceActive: true,
})
return 0, err
- case unix.SIOCGIFFLAGS:
+ case unix.SIOCGIFFLAGS, unix.SIOCGIFCONF:
cc := &usermem.IOCopyContext{
Ctx: ctx,
IO: io,
diff --git a/runsc/boot/filter/config.go b/runsc/boot/filter/config.go
index 33e738efc..703f34827 100644
--- a/runsc/boot/filter/config.go
+++ b/runsc/boot/filter/config.go
@@ -463,6 +463,10 @@ func hostInetFilters() seccomp.SyscallRules {
seccomp.MatchAny{},
seccomp.EqualTo(unix.SIOCGIFFLAGS),
},
+ {
+ seccomp.MatchAny{},
+ seccomp.EqualTo(unix.SIOCGIFCONF),
+ },
},
unix.SYS_LISTEN: {},
unix.SYS_READV: {},
diff --git a/runsc/cmd/chroot.go b/runsc/cmd/chroot.go
index 7b11b3367..1fe9c6435 100644
--- a/runsc/cmd/chroot.go
+++ b/runsc/cmd/chroot.go
@@ -59,6 +59,23 @@ func pivotRoot(root string) error {
return nil
}
+func copyFile(dst, src string) error {
+ in, err := os.Open(src)
+ if err != nil {
+ return err
+ }
+ defer in.Close()
+
+ out, err := os.Create(dst)
+ if err != nil {
+ return err
+ }
+ defer out.Close()
+
+ _, err = out.ReadFrom(in)
+ return err
+}
+
// setUpChroot creates an empty directory with runsc mounted at /runsc and proc
// mounted at /proc.
func setUpChroot(pidns bool) error {
@@ -78,6 +95,14 @@ func setUpChroot(pidns bool) error {
return fmt.Errorf("error mounting tmpfs in choot: %v", err)
}
+ if err := os.Mkdir(filepath.Join(chroot, "etc"), 0755); err != nil {
+ return fmt.Errorf("error creating /etc in chroot: %v", err)
+ }
+
+ if err := copyFile(filepath.Join(chroot, "etc/localtime"), "/etc/localtime"); err != nil {
+ log.Warningf("Failed to copy /etc/localtime: %v. UTC timezone will be used.", err)
+ }
+
if pidns {
flags := uint32(unix.MS_NOSUID | unix.MS_NODEV | unix.MS_NOEXEC | unix.MS_RDONLY)
if err := mountInChroot(chroot, "proc", "/proc", "proc", flags); err != nil {
diff --git a/runsc/cmd/gofer.go b/runsc/cmd/gofer.go
index 20e05f141..2193e9040 100644
--- a/runsc/cmd/gofer.go
+++ b/runsc/cmd/gofer.go
@@ -285,16 +285,22 @@ func setupRootFS(spec *specs.Spec, conf *config.Config) error {
// Prepare tree structure for pivot_root(2).
if err := os.Mkdir("/proc/proc", 0755); err != nil {
- Fatalf("%v", err)
+ Fatalf("error creating /proc/proc: %v", err)
}
if err := os.Mkdir("/proc/root", 0755); err != nil {
- Fatalf("%v", err)
+ Fatalf("error creating /proc/root: %v", err)
+ }
+ if err := os.Mkdir("/proc/etc", 0755); err != nil {
+ Fatalf("error creating /proc/etc: %v", err)
}
// This cannot use SafeMount because there's no available procfs. But we
// know that /proc is an empty tmpfs mount, so this is safe.
if err := unix.Mount("runsc-proc", "/proc/proc", "proc", flags|unix.MS_RDONLY, ""); err != nil {
Fatalf("error mounting proc: %v", err)
}
+ if err := copyFile("/proc/etc/localtime", "/etc/localtime"); err != nil {
+ log.Warningf("Failed to copy /etc/localtime: %v. UTC timezone will be used.", err)
+ }
root = "/proc/root"
procPath = "/proc/proc"
}
@@ -409,7 +415,7 @@ func resolveMounts(conf *config.Config, mounts []specs.Mount, root string) ([]sp
panic(fmt.Sprintf("%q could not be made relative to %q: %v", dst, root, err))
}
- opts, err := adjustMountOptions(filepath.Join(root, relDst), m.Options)
+ opts, err := adjustMountOptions(conf, filepath.Join(root, relDst), m.Options)
if err != nil {
return nil, err
}
@@ -475,7 +481,7 @@ func resolveSymlinksImpl(root, base, rel string, followCount uint) (string, erro
}
// adjustMountOptions adds 'overlayfs_stale_read' if mounting over overlayfs.
-func adjustMountOptions(path string, opts []string) ([]string, error) {
+func adjustMountOptions(conf *config.Config, path string, opts []string) ([]string, error) {
rv := make([]string, len(opts))
copy(rv, opts)
diff --git a/test/perf/BUILD b/test/perf/BUILD
index 75b5003e2..8ac63f11b 100644
--- a/test/perf/BUILD
+++ b/test/perf/BUILD
@@ -139,3 +139,17 @@ syscall_test(
debug = False,
test = "//test/perf/linux:write_benchmark",
)
+
+syscall_test(
+ size = "large",
+ debug = False,
+ test = "//test/perf/linux:verity_open_benchmark",
+ vfs1 = False,
+)
+
+syscall_test(
+ size = "large",
+ debug = False,
+ test = "//test/perf/linux:verity_read_benchmark",
+ vfs1 = False,
+)
diff --git a/test/perf/linux/BUILD b/test/perf/linux/BUILD
index e76e359ff..50dd8e808 100644
--- a/test/perf/linux/BUILD
+++ b/test/perf/linux/BUILD
@@ -370,3 +370,41 @@ cc_binary(
"//test/util:test_main",
],
)
+
+cc_binary(
+ name = "verity_open_benchmark",
+ testonly = 1,
+ srcs = [
+ "verity_open_benchmark.cc",
+ ],
+ deps = [
+ gbenchmark,
+ gtest,
+ "//test/util:capability_util",
+ "//test/util:fs_util",
+ "//test/util:logging",
+ "//test/util:temp_path",
+ "//test/util:test_main",
+ "//test/util:test_util",
+ "//test/util:verity_util",
+ ],
+)
+
+cc_binary(
+ name = "verity_read_benchmark",
+ testonly = 1,
+ srcs = [
+ "verity_read_benchmark.cc",
+ ],
+ deps = [
+ gbenchmark,
+ gtest,
+ "//test/util:capability_util",
+ "//test/util:fs_util",
+ "//test/util:logging",
+ "//test/util:temp_path",
+ "//test/util:test_main",
+ "//test/util:test_util",
+ "//test/util:verity_util",
+ ],
+)
diff --git a/test/perf/linux/send_recv_benchmark.cc b/test/perf/linux/send_recv_benchmark.cc
index 41509e211..2d443f54f 100644
--- a/test/perf/linux/send_recv_benchmark.cc
+++ b/test/perf/linux/send_recv_benchmark.cc
@@ -80,6 +80,9 @@ void BM_Recvmsg(benchmark::State& state) {
int64_t bytes_received = 0;
for (auto ignored : state) {
int n = recvmsg(recv_socket.get(), recv_msg.header(), 0);
+ if (n == -1 && errno == EINTR) {
+ continue;
+ }
TEST_CHECK(n > 0);
bytes_received += n;
}
@@ -108,6 +111,9 @@ void BM_Sendmsg(benchmark::State& state) {
int64_t bytes_sent = 0;
for (auto ignored : state) {
int n = sendmsg(send_socket.get(), send_msg.header(), 0);
+ if (n == -1 && errno == EINTR) {
+ continue;
+ }
TEST_CHECK(n > 0);
bytes_sent += n;
}
@@ -137,6 +143,9 @@ void BM_Recvfrom(benchmark::State& state) {
for (auto ignored : state) {
int n = recvfrom(recv_socket.get(), recv_buffer, kMessageSize, 0, nullptr,
nullptr);
+ if (n == -1 && errno == EINTR) {
+ continue;
+ }
TEST_CHECK(n > 0);
bytes_received += n;
}
@@ -166,6 +175,9 @@ void BM_Sendto(benchmark::State& state) {
int64_t bytes_sent = 0;
for (auto ignored : state) {
int n = sendto(send_socket.get(), send_buffer, kMessageSize, 0, nullptr, 0);
+ if (n == -1 && errno == EINTR) {
+ continue;
+ }
TEST_CHECK(n > 0);
bytes_sent += n;
}
@@ -247,6 +259,9 @@ void BM_RecvmsgWithControlBuf(benchmark::State& state) {
int64_t bytes_received = 0;
for (auto ignored : state) {
int n = recvmsg(recv_socket.get(), recv_msg.header(), 0);
+ if (n == -1 && errno == EINTR) {
+ continue;
+ }
TEST_CHECK(n > 0);
bytes_received += n;
}
@@ -316,7 +331,11 @@ void BM_SendmsgTCP(benchmark::State& state) {
ScopedThread t([&recv_msg, &recv_socket, &notification] {
while (!notification.HasBeenNotified()) {
- TEST_CHECK(recvmsg(recv_socket.get(), recv_msg.header(), 0) >= 0);
+ int rc = recvmsg(recv_socket.get(), recv_msg.header(), 0);
+ if (rc == -1 && errno == EINTR) {
+ continue;
+ }
+ TEST_CHECK(rc >= 0);
}
});
diff --git a/test/perf/linux/verity_open_benchmark.cc b/test/perf/linux/verity_open_benchmark.cc
new file mode 100644
index 000000000..026b6f101
--- /dev/null
+++ b/test/perf/linux/verity_open_benchmark.cc
@@ -0,0 +1,71 @@
+// Copyright 2021 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <fcntl.h>
+#include <stdlib.h>
+#include <sys/mount.h>
+#include <unistd.h>
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "gtest/gtest.h"
+#include "benchmark/benchmark.h"
+#include "test/util/capability_util.h"
+#include "test/util/fs_util.h"
+#include "test/util/logging.h"
+#include "test/util/temp_path.h"
+#include "test/util/test_util.h"
+#include "test/util/verity_util.h"
+
+namespace gvisor {
+namespace testing {
+
+namespace {
+
+void BM_Open(benchmark::State& state) {
+ const int size = state.range(0);
+ std::vector<TempPath> cache;
+ std::vector<EnableTarget> targets;
+
+ // Mount a tmpfs file system to be wrapped by a verity fs.
+ TempPath dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
+ TEST_CHECK(mount("", dir.path().c_str(), "tmpfs", 0, "") == 0);
+
+ for (int i = 0; i < size; i++) {
+ auto path = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(dir.path()));
+ targets.emplace_back(
+ EnableTarget(std::string(Basename(path.path())), O_RDONLY));
+ cache.emplace_back(std::move(path));
+ }
+
+ std::string verity_dir =
+ TEST_CHECK_NO_ERRNO_AND_VALUE(MountVerity(dir.path(), targets));
+
+ unsigned int seed = 1;
+ for (auto _ : state) {
+ const int chosen = rand_r(&seed) % size;
+ int fd = open(JoinPath(verity_dir, targets[chosen].path).c_str(), O_RDONLY);
+ TEST_CHECK(fd != -1);
+ close(fd);
+ }
+}
+
+BENCHMARK(BM_Open)->Range(1, 128)->UseRealTime();
+
+} // namespace
+
+} // namespace testing
+} // namespace gvisor
diff --git a/test/perf/linux/verity_read_benchmark.cc b/test/perf/linux/verity_read_benchmark.cc
new file mode 100644
index 000000000..738b5ba45
--- /dev/null
+++ b/test/perf/linux/verity_read_benchmark.cc
@@ -0,0 +1,69 @@
+// Copyright 2021 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <fcntl.h>
+#include <stdlib.h>
+#include <sys/mount.h>
+#include <unistd.h>
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "gtest/gtest.h"
+#include "benchmark/benchmark.h"
+#include "test/util/capability_util.h"
+#include "test/util/fs_util.h"
+#include "test/util/logging.h"
+#include "test/util/temp_path.h"
+#include "test/util/test_util.h"
+#include "test/util/verity_util.h"
+
+namespace gvisor {
+namespace testing {
+
+namespace {
+
+void BM_VerityRead(benchmark::State& state) {
+ const int size = state.range(0);
+ const std::string contents(size, 0);
+
+ // Mount a tmpfs file system to be wrapped by a verity fs.
+ TempPath dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
+ TEST_CHECK(mount("", dir.path().c_str(), "tmpfs", 0, "") == 0);
+
+ auto path = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(
+ dir.path(), contents, TempPath::kDefaultFileMode));
+ std::string filename = std::string(Basename(path.path()));
+
+ std::string verity_dir = TEST_CHECK_NO_ERRNO_AND_VALUE(
+ MountVerity(dir.path(), {EnableTarget(filename, O_RDONLY)}));
+
+ FileDescriptor fd =
+ ASSERT_NO_ERRNO_AND_VALUE(Open(JoinPath(verity_dir, filename), O_RDONLY));
+ std::vector<char> buf(size);
+ for (auto _ : state) {
+ TEST_CHECK(PreadFd(fd.get(), buf.data(), buf.size(), 0) == size);
+ }
+
+ state.SetBytesProcessed(static_cast<int64_t>(size) *
+ static_cast<int64_t>(state.iterations()));
+}
+
+BENCHMARK(BM_VerityRead)->Range(1, 1 << 26)->UseRealTime();
+
+} // namespace
+
+} // namespace testing
+} // namespace gvisor
diff --git a/test/root/chroot_test.go b/test/root/chroot_test.go
index 58fcd6f08..5114a9602 100644
--- a/test/root/chroot_test.go
+++ b/test/root/chroot_test.go
@@ -68,13 +68,15 @@ func TestChroot(t *testing.T) {
if err != nil {
t.Fatalf("error listing %q: %v", chroot, err)
}
- if want, got := 1, len(fi); want != got {
+ if want, got := 2, len(fi); want != got {
t.Fatalf("chroot dir got %d entries, want %d", got, want)
}
- // chroot dir is prepared by runsc and should contains only /proc.
- if fi[0].Name() != "proc" {
- t.Errorf("chroot got children %v, want %v", fi[0].Name(), "proc")
+ // chroot dir is prepared by runsc and should contains only /etc and /proc.
+ for i, want := range []string{"etc", "proc"} {
+ if got := fi[i].Name(); got != want {
+ t.Errorf("chroot got child %v, want %v", got, want)
+ }
}
d.CleanUp(ctx)
diff --git a/test/runner/defs.bzl b/test/runner/defs.bzl
index 405e03832..05c75b130 100644
--- a/test/runner/defs.bzl
+++ b/test/runner/defs.bzl
@@ -135,6 +135,7 @@ def syscall_test(
add_overlay = False,
add_uds_tree = False,
add_hostinet = False,
+ vfs1 = True,
vfs2 = True,
fuse = False,
debug = True,
@@ -148,6 +149,7 @@ def syscall_test(
add_overlay: add an overlay test.
add_uds_tree: add a UDS test.
add_hostinet: add a hostinet test.
+ vfs1: enable VFS1 tests. Could be false only if vfs2 is true.
vfs2: enable VFS2 support.
fuse: enable FUSE support.
debug: enable debug output.
@@ -157,7 +159,7 @@ def syscall_test(
if not tags:
tags = []
- if vfs2 and not fuse:
+ if vfs2 and vfs1 and not fuse:
# Generate a vfs1 plain test. Most testing will now be
# biased towards vfs2, with only a single vfs1 case.
_syscall_test(
@@ -171,7 +173,7 @@ def syscall_test(
**kwargs
)
- if not fuse:
+ if vfs1 and not fuse:
# Generate a native test if fuse is not required.
_syscall_test(
test = test,
diff --git a/test/syscalls/linux/itimer.cc b/test/syscalls/linux/itimer.cc
index ac113e6da..9fb04eae6 100644
--- a/test/syscalls/linux/itimer.cc
+++ b/test/syscalls/linux/itimer.cc
@@ -197,9 +197,9 @@ int TestSIGALRMToMainThread() {
// (but don't guarantee it), so we expect to see most samples on the main
// thread.
//
- // The number of SIGALRMs delivered to a worker should not exceed 20%
+ // The number of SIGALRMs delivered to a worker should not exceed 40%
// of the number of total signals expected (this is somewhat arbitrary).
- const int worker_threshold = result.expected_total / 5;
+ const int worker_threshold = result.expected_total / 5 * 2;
//
// Linux only guarantees timers will never expire before the requested time.
@@ -230,7 +230,8 @@ TEST(ItimerTest, DeliversSIGALRMToMainThread) {
// Not required anymore.
kill.Release();
- EXPECT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) == 0) << status;
+ EXPECT_EQ(WIFEXITED(status) && WEXITSTATUS(status), 0)
+ << WIFEXITED(status) << " " << WEXITSTATUS(status);
}
// Signals are delivered to threads fairly.
diff --git a/test/syscalls/linux/semaphore.cc b/test/syscalls/linux/semaphore.cc
index f72957f89..87b66aa98 100644
--- a/test/syscalls/linux/semaphore.cc
+++ b/test/syscalls/linux/semaphore.cc
@@ -1019,6 +1019,17 @@ TEST(SemaphoreTest, SemInfo) {
EXPECT_EQ(info.semvmx, kSemVmx);
}
+TEST(SempahoreTest, RemoveNonExistentSemaphore) {
+ EXPECT_THAT(semctl(-1, 0, IPC_RMID), SyscallFailsWithErrno(EINVAL));
+}
+
+TEST(SempahoreTest, RemoveDeletedSemaphore) {
+ int id;
+ EXPECT_THAT(id = semget(IPC_PRIVATE, 1, 0), SyscallSucceeds());
+ EXPECT_THAT(semctl(id, 0, IPC_RMID), SyscallSucceeds());
+ EXPECT_THAT(semctl(id, 0, IPC_RMID), SyscallFailsWithErrno(EINVAL));
+}
+
} // namespace
} // namespace testing
} // namespace gvisor
diff --git a/test/syscalls/linux/socket_inet_loopback.cc b/test/syscalls/linux/socket_inet_loopback.cc
index 5f0b40ecf..13a83a1b3 100644
--- a/test/syscalls/linux/socket_inet_loopback.cc
+++ b/test/syscalls/linux/socket_inet_loopback.cc
@@ -308,7 +308,7 @@ TEST_P(SocketInetLoopbackTest, TCPListenShutdownListen) {
sockaddr_storage conn_addr = connector.addr;
ASSERT_NO_ERRNO(SetAddrPort(connector.family(), &conn_addr, port));
- // TODO(b/157236388): Remove Disable save after bug is fixed. S/R test can
+ // TODO(b/153489135): Remove Disable save after bug is fixed. S/R test can
// fail because the last socket may not be delivered to the accept queue
// by the time connect returns.
DisableSave ds;
@@ -751,7 +751,7 @@ TEST_P(SocketInetLoopbackTest, TCPNonBlockingConnectClose) {
}
}
-// TODO(b/157236388): Remove once bug is fixed. Test fails w/
+// TODO(b/153489135): Remove once bug is fixed. Test fails w/
// random save as established connections which can't be delivered to the accept
// queue because the queue is full are not correctly delivered after restore
// causing the last accept to timeout on the restore.
@@ -801,7 +801,7 @@ TEST_P(SocketInetLoopbackTest, TCPAcceptBacklogSizes) {
}
}
-// TODO(b/157236388): Remove once bug is fixed. Test fails w/
+// TODO(b/153489135): Remove once bug is fixed. Test fails w/
// random save as established connections which can't be delivered to the accept
// queue because the queue is full are not correctly delivered after restore
// causing the last accept to timeout on the restore.
@@ -892,7 +892,7 @@ TEST_P(SocketInetLoopbackTest, TCPBacklog) {
ASSERT_GE(client_conns, accepted_conns);
}
-// TODO(b/157236388): Remove once bug is fixed. Test fails w/
+// TODO(b/153489135): Remove once bug is fixed. Test fails w/
// random save as established connections which can't be delivered to the accept
// queue because the queue is full are not correctly delivered after restore
// causing the last accept to timeout on the restore.
@@ -1136,7 +1136,7 @@ TEST_P(SocketInetLoopbackTest, TCPAcceptAfterReset) {
sockaddr_storage conn_addr = connector.addr;
ASSERT_NO_ERRNO(SetAddrPort(connector.family(), &conn_addr, port));
- // TODO(b/157236388): Reenable Cooperative S/R once bug is fixed.
+ // TODO(b/153489135): Reenable Cooperative S/R once bug is fixed.
DisableSave ds;
ASSERT_THAT(RetryEINTR(connect)(conn_fd.get(), AsSockAddr(&conn_addr),
connector.addr_len),
diff --git a/test/syscalls/linux/verity_getdents.cc b/test/syscalls/linux/verity_getdents.cc
index 2eafc3dd3..822a75254 100644
--- a/test/syscalls/linux/verity_getdents.cc
+++ b/test/syscalls/linux/verity_getdents.cc
@@ -59,7 +59,7 @@ class GetDentsTest : public ::testing::Test {
TEST_F(GetDentsTest, GetDents) {
std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(
- MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));
+ MountVerity(tmpfs_dir_.path(), {EnableTarget(filename_, O_RDONLY)}));
std::vector<std::string> expect = {".", "..", filename_};
EXPECT_NO_ERRNO(DirContains(verity_dir, expect, /*exclude=*/{}));
@@ -67,7 +67,7 @@ TEST_F(GetDentsTest, GetDents) {
TEST_F(GetDentsTest, Deleted) {
std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(
- MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));
+ MountVerity(tmpfs_dir_.path(), {EnableTarget(filename_, O_RDONLY)}));
EXPECT_THAT(unlink(JoinPath(tmpfs_dir_.path(), filename_).c_str()),
SyscallSucceeds());
@@ -78,7 +78,7 @@ TEST_F(GetDentsTest, Deleted) {
TEST_F(GetDentsTest, Renamed) {
std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(
- MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));
+ MountVerity(tmpfs_dir_.path(), {EnableTarget(filename_, O_RDONLY)}));
std::string new_file_name = "renamed-" + filename_;
EXPECT_THAT(rename(JoinPath(tmpfs_dir_.path(), filename_).c_str(),
diff --git a/test/syscalls/linux/verity_ioctl.cc b/test/syscalls/linux/verity_ioctl.cc
index e7e4fa64b..45650809c 100644
--- a/test/syscalls/linux/verity_ioctl.cc
+++ b/test/syscalls/linux/verity_ioctl.cc
@@ -106,7 +106,7 @@ TEST_F(IoctlTest, Measure) {
TEST_F(IoctlTest, Mount) {
std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(
- MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));
+ MountVerity(tmpfs_dir_.path(), {EnableTarget(filename_, O_RDONLY)}));
// Make sure the file can be open and read in the mounted verity fs.
auto const verity_fd = ASSERT_NO_ERRNO_AND_VALUE(
@@ -118,7 +118,7 @@ TEST_F(IoctlTest, Mount) {
TEST_F(IoctlTest, NonExistingFile) {
std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(
- MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));
+ MountVerity(tmpfs_dir_.path(), {EnableTarget(filename_, O_RDONLY)}));
// Confirm that opening a non-existing file in the verity-enabled directory
// triggers the expected error instead of verification failure.
@@ -129,7 +129,7 @@ TEST_F(IoctlTest, NonExistingFile) {
TEST_F(IoctlTest, ModifiedFile) {
std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(
- MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));
+ MountVerity(tmpfs_dir_.path(), {EnableTarget(filename_, O_RDONLY)}));
// Modify the file and check verification failure upon reading from it.
auto const fd = ASSERT_NO_ERRNO_AND_VALUE(
@@ -144,7 +144,7 @@ TEST_F(IoctlTest, ModifiedFile) {
TEST_F(IoctlTest, ModifiedMerkle) {
std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(
- MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));
+ MountVerity(tmpfs_dir_.path(), {EnableTarget(filename_, O_RDONLY)}));
// Modify the Merkle file and check verification failure upon opening the
// corresponding file.
@@ -159,7 +159,7 @@ TEST_F(IoctlTest, ModifiedMerkle) {
TEST_F(IoctlTest, ModifiedDirMerkle) {
std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(
- MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));
+ MountVerity(tmpfs_dir_.path(), {EnableTarget(filename_, O_RDONLY)}));
// Modify the Merkle file for the parent directory and check verification
// failure upon opening the corresponding file.
@@ -174,7 +174,7 @@ TEST_F(IoctlTest, ModifiedDirMerkle) {
TEST_F(IoctlTest, Stat) {
std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(
- MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));
+ MountVerity(tmpfs_dir_.path(), {EnableTarget(filename_, O_RDONLY)}));
struct stat st;
EXPECT_THAT(stat(JoinPath(verity_dir, filename_).c_str(), &st),
@@ -183,7 +183,7 @@ TEST_F(IoctlTest, Stat) {
TEST_F(IoctlTest, ModifiedStat) {
std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(
- MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));
+ MountVerity(tmpfs_dir_.path(), {EnableTarget(filename_, O_RDONLY)}));
EXPECT_THAT(chmod(JoinPath(tmpfs_dir_.path(), filename_).c_str(), 0644),
SyscallSucceeds());
@@ -194,7 +194,7 @@ TEST_F(IoctlTest, ModifiedStat) {
TEST_F(IoctlTest, DeleteFile) {
std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(
- MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));
+ MountVerity(tmpfs_dir_.path(), {EnableTarget(filename_, O_RDONLY)}));
EXPECT_THAT(unlink(JoinPath(tmpfs_dir_.path(), filename_).c_str()),
SyscallSucceeds());
@@ -204,7 +204,7 @@ TEST_F(IoctlTest, DeleteFile) {
TEST_F(IoctlTest, DeleteMerkle) {
std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(
- MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));
+ MountVerity(tmpfs_dir_.path(), {EnableTarget(filename_, O_RDONLY)}));
EXPECT_THAT(
unlink(MerklePath(JoinPath(tmpfs_dir_.path(), filename_)).c_str()),
@@ -215,7 +215,7 @@ TEST_F(IoctlTest, DeleteMerkle) {
TEST_F(IoctlTest, RenameFile) {
std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(
- MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));
+ MountVerity(tmpfs_dir_.path(), {EnableTarget(filename_, O_RDONLY)}));
std::string new_file_name = "renamed-" + filename_;
EXPECT_THAT(rename(JoinPath(tmpfs_dir_.path(), filename_).c_str(),
@@ -227,7 +227,7 @@ TEST_F(IoctlTest, RenameFile) {
TEST_F(IoctlTest, RenameMerkle) {
std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(
- MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));
+ MountVerity(tmpfs_dir_.path(), {EnableTarget(filename_, O_RDONLY)}));
std::string new_file_name = "renamed-" + filename_;
EXPECT_THAT(
diff --git a/test/syscalls/linux/verity_mmap.cc b/test/syscalls/linux/verity_mmap.cc
index 09ced6eb3..2bfd43b16 100644
--- a/test/syscalls/linux/verity_mmap.cc
+++ b/test/syscalls/linux/verity_mmap.cc
@@ -58,7 +58,7 @@ class MmapTest : public ::testing::Test {
TEST_F(MmapTest, MmapRead) {
std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(
- MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));
+ MountVerity(tmpfs_dir_.path(), {EnableTarget(filename_, O_RDONLY)}));
// Make sure the file can be open and mmapped in the mounted verity fs.
auto const verity_fd = ASSERT_NO_ERRNO_AND_VALUE(
@@ -72,7 +72,7 @@ TEST_F(MmapTest, MmapRead) {
TEST_F(MmapTest, ModifiedBeforeMmap) {
std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(
- MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));
+ MountVerity(tmpfs_dir_.path(), {EnableTarget(filename_, O_RDONLY)}));
// Modify the file and check verification failure upon mmapping.
auto const fd = ASSERT_NO_ERRNO_AND_VALUE(
@@ -91,7 +91,7 @@ TEST_F(MmapTest, ModifiedBeforeMmap) {
TEST_F(MmapTest, ModifiedAfterMmap) {
std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(
- MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));
+ MountVerity(tmpfs_dir_.path(), {EnableTarget(filename_, O_RDONLY)}));
auto const verity_fd = ASSERT_NO_ERRNO_AND_VALUE(
Open(JoinPath(verity_dir, filename_), O_RDONLY, 0777));
@@ -127,7 +127,7 @@ INSTANTIATE_TEST_SUITE_P(
TEST_P(MmapParamTest, Mmap) {
std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(
- MountVerity(tmpfs_dir_.path(), filename_, /*targets=*/{}));
+ MountVerity(tmpfs_dir_.path(), {EnableTarget(filename_, O_RDONLY)}));
// Make sure the file can be open and mmapped in the mounted verity fs.
auto const verity_fd = ASSERT_NO_ERRNO_AND_VALUE(
diff --git a/test/syscalls/linux/verity_symlink.cc b/test/syscalls/linux/verity_symlink.cc
index bbf5375cb..c6fce8ead 100644
--- a/test/syscalls/linux/verity_symlink.cc
+++ b/test/syscalls/linux/verity_symlink.cc
@@ -62,9 +62,9 @@ class SymlinkTest : public ::testing::Test {
};
TEST_F(SymlinkTest, Success) {
- std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(
- MountVerity(tmpfs_dir_.path(), filename_,
- {EnableTarget(kSymlink, O_RDONLY | O_NOFOLLOW)}));
+ std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(MountVerity(
+ tmpfs_dir_.path(), {EnableTarget(filename_, O_RDONLY),
+ EnableTarget(kSymlink, O_RDONLY | O_NOFOLLOW)}));
char buf[256];
EXPECT_THAT(
@@ -77,9 +77,9 @@ TEST_F(SymlinkTest, Success) {
}
TEST_F(SymlinkTest, DeleteLink) {
- std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(
- MountVerity(tmpfs_dir_.path(), filename_,
- {EnableTarget(kSymlink, O_RDONLY | O_NOFOLLOW)}));
+ std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(MountVerity(
+ tmpfs_dir_.path(), {EnableTarget(filename_, O_RDONLY),
+ EnableTarget(kSymlink, O_RDONLY | O_NOFOLLOW)}));
ASSERT_THAT(unlink(JoinPath(tmpfs_dir_.path(), kSymlink).c_str()),
SyscallSucceeds());
@@ -92,9 +92,9 @@ TEST_F(SymlinkTest, DeleteLink) {
}
TEST_F(SymlinkTest, ModifyLink) {
- std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(
- MountVerity(tmpfs_dir_.path(), filename_,
- {EnableTarget(kSymlink, O_RDONLY | O_NOFOLLOW)}));
+ std::string verity_dir = ASSERT_NO_ERRNO_AND_VALUE(MountVerity(
+ tmpfs_dir_.path(), {EnableTarget(filename_, O_RDONLY),
+ EnableTarget(kSymlink, O_RDONLY | O_NOFOLLOW)}));
ASSERT_THAT(unlink(JoinPath(tmpfs_dir_.path(), kSymlink).c_str()),
SyscallSucceeds());
diff --git a/test/util/verity_util.cc b/test/util/verity_util.cc
index 501d7c2cf..b7d1cb212 100644
--- a/test/util/verity_util.cc
+++ b/test/util/verity_util.cc
@@ -54,20 +54,14 @@ PosixError FlipRandomBit(int fd, int size) {
return NoError();
}
-PosixErrorOr<std::string> MountVerity(std::string tmpfs_dir,
- std::string filename,
+PosixErrorOr<std::string> MountVerity(std::string lower_dir,
std::vector<EnableTarget> targets) {
- // Mount a verity fs on the existing tmpfs mount.
- std::string mount_opts = "lower_path=" + tmpfs_dir;
+ // Mount a verity fs on the existing mount.
+ std::string mount_opts = "lower_path=" + lower_dir;
ASSIGN_OR_RETURN_ERRNO(TempPath verity_dir, TempPath::CreateDir());
RETURN_ERROR_IF_SYSCALL_FAIL(
mount("", verity_dir.path().c_str(), "verity", 0, mount_opts.c_str()));
- // Enable the file, symlink(if provided) and the directory.
- ASSIGN_OR_RETURN_ERRNO(
- auto fd, Open(JoinPath(verity_dir.path(), filename), O_RDONLY, 0777));
- RETURN_ERROR_IF_SYSCALL_FAIL(ioctl(fd.get(), FS_IOC_ENABLE_VERITY));
-
for (const EnableTarget& target : targets) {
ASSIGN_OR_RETURN_ERRNO(
auto target_fd,
@@ -92,6 +86,7 @@ PosixErrorOr<std::string> MountVerity(std::string tmpfs_dir,
ASSIGN_OR_RETURN_ERRNO(TempPath verity_with_hash_dir, TempPath::CreateDir());
RETURN_ERROR_IF_SYSCALL_FAIL(mount("", verity_with_hash_dir.path().c_str(),
"verity", 0, mount_opts.c_str()));
+
// Verity directories should not be deleted. Release the TempPath objects to
// prevent those directories from being deleted by the destructor.
verity_dir.release();
diff --git a/test/util/verity_util.h b/test/util/verity_util.h
index 44863f322..ebb78b4bb 100644
--- a/test/util/verity_util.h
+++ b/test/util/verity_util.h
@@ -76,7 +76,6 @@ PosixError FlipRandomBit(int fd, int size);
// Mount a verity on the tmpfs and enable both the file and the direcotry. Then
// mount a new verity with measured root hash.
PosixErrorOr<std::string> MountVerity(std::string tmpfs_dir,
- std::string filename,
std::vector<EnableTarget> targets);
} // namespace testing
diff --git a/tools/constraintutil/BUILD b/tools/constraintutil/BUILD
new file mode 100644
index 000000000..004b708c4
--- /dev/null
+++ b/tools/constraintutil/BUILD
@@ -0,0 +1,18 @@
+load("//tools:defs.bzl", "go_library", "go_test")
+
+package(licenses = ["notice"])
+
+go_library(
+ name = "constraintutil",
+ srcs = ["constraintutil.go"],
+ marshal = False,
+ stateify = False,
+ visibility = ["//tools:__subpackages__"],
+)
+
+go_test(
+ name = "constraintutil_test",
+ size = "small",
+ srcs = ["constraintutil_test.go"],
+ library = ":constraintutil",
+)
diff --git a/tools/constraintutil/constraintutil.go b/tools/constraintutil/constraintutil.go
new file mode 100644
index 000000000..fb3fbe5c2
--- /dev/null
+++ b/tools/constraintutil/constraintutil.go
@@ -0,0 +1,169 @@
+// Copyright 2021 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package constraintutil provides utilities for working with Go build
+// constraints.
+package constraintutil
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "go/build/constraint"
+ "io"
+ "os"
+ "strings"
+)
+
+// FromReader extracts the build constraint from the Go source or assembly file
+// whose contents are read by r.
+func FromReader(r io.Reader) (constraint.Expr, error) {
+ // See go/build.parseFileHeader() for the "official" logic that this is
+ // derived from.
+ const (
+ slashStar = "/*"
+ starSlash = "*/"
+ gobuildPrefix = "//go:build"
+ )
+ s := bufio.NewScanner(r)
+ var (
+ inSlashStar = false // between /* and */
+ haveGobuild = false
+ e constraint.Expr
+ )
+Lines:
+ for s.Scan() {
+ line := bytes.TrimSpace(s.Bytes())
+ if !inSlashStar && constraint.IsGoBuild(string(line)) {
+ if haveGobuild {
+ return nil, fmt.Errorf("multiple go:build directives")
+ }
+ haveGobuild = true
+ var err error
+ e, err = constraint.Parse(string(line))
+ if err != nil {
+ return nil, err
+ }
+ }
+ ThisLine:
+ for len(line) > 0 {
+ if inSlashStar {
+ if i := bytes.Index(line, []byte(starSlash)); i >= 0 {
+ inSlashStar = false
+ line = bytes.TrimSpace(line[i+len(starSlash):])
+ continue ThisLine
+ }
+ continue Lines
+ }
+ if bytes.HasPrefix(line, []byte("//")) {
+ continue Lines
+ }
+ // Note that if /* appears in the line, but not at the beginning,
+ // then the line is still non-empty, so skipping this and
+ // terminating below is correct.
+ if bytes.HasPrefix(line, []byte(slashStar)) {
+ inSlashStar = true
+ line = bytes.TrimSpace(line[len(slashStar):])
+ continue ThisLine
+ }
+ // A non-empty non-comment line terminates scanning for go:build.
+ break Lines
+ }
+ }
+ return e, s.Err()
+}
+
+// FromString extracts the build constraint from the Go source or assembly file
+// containing the given data. If no build constraint applies to the file, it
+// returns nil.
+func FromString(str string) (constraint.Expr, error) {
+ return FromReader(strings.NewReader(str))
+}
+
+// FromFile extracts the build constraint from the Go source or assembly file
+// at the given path. If no build constraint applies to the file, it returns
+// nil.
+func FromFile(path string) (constraint.Expr, error) {
+ f, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+ return FromReader(f)
+}
+
+// Combine returns a constraint.Expr that evaluates to true iff all expressions
+// in es evaluate to true. If es is empty, Combine returns nil.
+//
+// Preconditions: All constraint.Exprs in es are non-nil.
+func Combine(es []constraint.Expr) constraint.Expr {
+ switch len(es) {
+ case 0:
+ return nil
+ case 1:
+ return es[0]
+ default:
+ a := &constraint.AndExpr{es[0], es[1]}
+ for i := 2; i < len(es); i++ {
+ a = &constraint.AndExpr{a, es[i]}
+ }
+ return a
+ }
+}
+
+// CombineFromFiles returns a build constraint expression that evaluates to
+// true iff the build constraints from all of the given Go source or assembly
+// files evaluate to true. If no build constraints apply to any of the given
+// files, it returns nil.
+func CombineFromFiles(paths []string) (constraint.Expr, error) {
+ var es []constraint.Expr
+ for _, path := range paths {
+ e, err := FromFile(path)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read build constraints from %q: %v", path, err)
+ }
+ if e != nil {
+ es = append(es, e)
+ }
+ }
+ return Combine(es), nil
+}
+
+// Lines returns a string containing build constraint directives for the given
+// constraint.Expr, including two trailing newlines, as appropriate for a Go
+// source or assembly file. At least a go:build directive will be emitted; if
+// the constraint is expressible using +build directives as well, then +build
+// directives will also be emitted.
+//
+// If e is nil, Lines returns the empty string.
+func Lines(e constraint.Expr) string {
+ if e == nil {
+ return ""
+ }
+
+ var b strings.Builder
+ b.WriteString("//go:build ")
+ b.WriteString(e.String())
+ b.WriteByte('\n')
+
+ if pblines, err := constraint.PlusBuildLines(e); err == nil {
+ for _, line := range pblines {
+ b.WriteString(line)
+ b.WriteByte('\n')
+ }
+ }
+
+ b.WriteByte('\n')
+ return b.String()
+}
diff --git a/tools/constraintutil/constraintutil_test.go b/tools/constraintutil/constraintutil_test.go
new file mode 100644
index 000000000..eeabd8dcf
--- /dev/null
+++ b/tools/constraintutil/constraintutil_test.go
@@ -0,0 +1,138 @@
+// Copyright 2021 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package constraintutil
+
+import (
+ "go/build/constraint"
+ "testing"
+)
+
+func TestFileParsing(t *testing.T) {
+ for _, test := range []struct {
+ name string
+ data string
+ expr string
+ }{
+ {
+ name: "Empty",
+ },
+ {
+ name: "NoConstraint",
+ data: "// copyright header\n\npackage main",
+ },
+ {
+ name: "ConstraintOnFirstLine",
+ data: "//go:build amd64\n#include \"textflag.h\"",
+ expr: "amd64",
+ },
+ {
+ name: "ConstraintAfterSlashSlashComment",
+ data: "// copyright header\n\n//go:build linux\n\npackage newlib",
+ expr: "linux",
+ },
+ {
+ name: "ConstraintAfterSlashStarComment",
+ data: "/*\ncopyright header\n*/\n\n//go:build !race\n\npackage oldlib",
+ expr: "!race",
+ },
+ {
+ name: "ConstraintInSlashSlashComment",
+ data: "// blah blah //go:build windows",
+ },
+ {
+ name: "ConstraintInSlashStarComment",
+ data: "/*\n//go:build windows\n*/",
+ },
+ {
+ name: "ConstraintAfterPackageClause",
+ data: "package oops\n//go:build race",
+ },
+ {
+ name: "ConstraintAfterCppInclude",
+ data: "#include \"textflag.h\"\n//go:build arm64",
+ },
+ } {
+ t.Run(test.name, func(t *testing.T) {
+ e, err := FromString(test.data)
+ if err != nil {
+ t.Fatalf("FromString(%q) failed: %v", test.data, err)
+ }
+ if e == nil {
+ if len(test.expr) != 0 {
+ t.Errorf("FromString(%q): got no constraint, wanted %q", test.data, test.expr)
+ }
+ } else {
+ got := e.String()
+ if len(test.expr) == 0 {
+ t.Errorf("FromString(%q): got %q, wanted no constraint", test.data, got)
+ } else if got != test.expr {
+ t.Errorf("FromString(%q): got %q, wanted %q", test.data, got, test.expr)
+ }
+ }
+ })
+ }
+}
+
+func TestCombine(t *testing.T) {
+ for _, test := range []struct {
+ name string
+ in []string
+ out string
+ }{
+ {
+ name: "0",
+ },
+ {
+ name: "1",
+ in: []string{"amd64 || arm64"},
+ out: "amd64 || arm64",
+ },
+ {
+ name: "2",
+ in: []string{"amd64", "amd64 && linux"},
+ out: "amd64 && amd64 && linux",
+ },
+ {
+ name: "3",
+ in: []string{"amd64", "amd64 || arm64", "amd64 || riscv64"},
+ out: "amd64 && (amd64 || arm64) && (amd64 || riscv64)",
+ },
+ } {
+ t.Run(test.name, func(t *testing.T) {
+ inexprs := make([]constraint.Expr, 0, len(test.in))
+ for _, estr := range test.in {
+ line := "//go:build " + estr
+ e, err := constraint.Parse(line)
+ if err != nil {
+ t.Fatalf("constraint.Parse(%q) failed: %v", line, err)
+ }
+ inexprs = append(inexprs, e)
+ }
+ outexpr := Combine(inexprs)
+ if outexpr == nil {
+ if len(test.out) != 0 {
+ t.Errorf("Combine(%v): got no constraint, wanted %q", test.in, test.out)
+ }
+ } else {
+ got := outexpr.String()
+ if len(test.out) == 0 {
+ t.Errorf("Combine(%v): got %q, wanted no constraint", test.in, got)
+ } else if got != test.out {
+ t.Errorf("Combine(%v): got %q, wanted %q", test.in, got, test.out)
+ }
+ }
+ })
+ }
+}
diff --git a/tools/go_generics/go_merge/BUILD b/tools/go_generics/go_merge/BUILD
index 5e0487e93..211e6b3ed 100644
--- a/tools/go_generics/go_merge/BUILD
+++ b/tools/go_generics/go_merge/BUILD
@@ -7,6 +7,6 @@ go_binary(
srcs = ["main.go"],
visibility = ["//:sandbox"],
deps = [
- "//tools/tags",
+ "//tools/constraintutil",
],
)
diff --git a/tools/go_generics/go_merge/main.go b/tools/go_generics/go_merge/main.go
index 801f2354f..81394ddce 100644
--- a/tools/go_generics/go_merge/main.go
+++ b/tools/go_generics/go_merge/main.go
@@ -25,9 +25,8 @@ import (
"os"
"path/filepath"
"strconv"
- "strings"
- "gvisor.dev/gvisor/tools/tags"
+ "gvisor.dev/gvisor/tools/constraintutil"
)
var (
@@ -131,6 +130,12 @@ func main() {
}
f.Decls = newDecls
+ // Infer build constraints for the output file.
+ bcexpr, err := constraintutil.CombineFromFiles(flag.Args())
+ if err != nil {
+ fatalf("Failed to read build constraints: %v\n", err)
+ }
+
// Write the output file.
var buf bytes.Buffer
if err := format.Node(&buf, fset, f); err != nil {
@@ -141,9 +146,7 @@ func main() {
fatalf("opening output: %v\n", err)
}
defer outf.Close()
- if t := tags.Aggregate(flag.Args()); len(t) > 0 {
- fmt.Fprintf(outf, "%s\n\n", strings.Join(t.Lines(), "\n"))
- }
+ outf.WriteString(constraintutil.Lines(bcexpr))
if _, err := outf.Write(buf.Bytes()); err != nil {
fatalf("write: %v\n", err)
}
diff --git a/tools/go_marshal/gomarshal/BUILD b/tools/go_marshal/gomarshal/BUILD
index c2747d94c..aaa203115 100644
--- a/tools/go_marshal/gomarshal/BUILD
+++ b/tools/go_marshal/gomarshal/BUILD
@@ -18,5 +18,5 @@ go_library(
visibility = [
"//:sandbox",
],
- deps = ["//tools/tags"],
+ deps = ["//tools/constraintutil"],
)
diff --git a/tools/go_marshal/gomarshal/generator.go b/tools/go_marshal/gomarshal/generator.go
index 00961c90d..4c23637c0 100644
--- a/tools/go_marshal/gomarshal/generator.go
+++ b/tools/go_marshal/gomarshal/generator.go
@@ -25,7 +25,7 @@ import (
"sort"
"strings"
- "gvisor.dev/gvisor/tools/tags"
+ "gvisor.dev/gvisor/tools/constraintutil"
)
// List of identifiers we use in generated code that may conflict with a
@@ -123,16 +123,18 @@ func (g *Generator) writeHeader() error {
var b sourceBuffer
b.emit("// Automatically generated marshal implementation. See tools/go_marshal.\n\n")
- // Emit build tags.
- b.emit("// If there are issues with build tag aggregation, see\n")
- b.emit("// tools/go_marshal/gomarshal/generator.go:writeHeader(). The build tags here\n")
- b.emit("// come from the input set of files used to generate this file. This input set\n")
- b.emit("// is filtered based on pre-defined file suffixes related to build tags, see \n")
- b.emit("// tools/defs.bzl:calculate_sets().\n\n")
-
- if t := tags.Aggregate(g.inputs); len(t) > 0 {
- b.emit(strings.Join(t.Lines(), "\n"))
- b.emit("\n\n")
+ bcexpr, err := constraintutil.CombineFromFiles(g.inputs)
+ if err != nil {
+ return err
+ }
+ if bcexpr != nil {
+ // Emit build constraints.
+ b.emit("// If there are issues with build constraint aggregation, see\n")
+ b.emit("// tools/go_marshal/gomarshal/generator.go:writeHeader(). The constraints here\n")
+ b.emit("// come from the input set of files used to generate this file. This input set\n")
+ b.emit("// is filtered based on pre-defined file suffixes related to build constraints,\n")
+ b.emit("// see tools/defs.bzl:calculate_sets().\n\n")
+ b.emit(constraintutil.Lines(bcexpr))
}
// Package header.
@@ -553,11 +555,12 @@ func (g *Generator) writeTests(ts []*testGenerator) error {
b.reset()
b.emit("// Automatically generated marshal tests. See tools/go_marshal.\n\n")
- // Emit build tags.
- if t := tags.Aggregate(g.inputs); len(t) > 0 {
- b.emit(strings.Join(t.Lines(), "\n"))
- b.emit("\n\n")
+ // Emit build constraints.
+ bcexpr, err := constraintutil.CombineFromFiles(g.inputs)
+ if err != nil {
+ return err
}
+ b.emit(constraintutil.Lines(bcexpr))
b.emit("package %s\n\n", g.pkg)
if err := b.write(g.outputTest); err != nil {
diff --git a/tools/go_stateify/BUILD b/tools/go_stateify/BUILD
index 913558b4e..ad66981c7 100644
--- a/tools/go_stateify/BUILD
+++ b/tools/go_stateify/BUILD
@@ -6,7 +6,7 @@ go_binary(
name = "stateify",
srcs = ["main.go"],
visibility = ["//:sandbox"],
- deps = ["//tools/tags"],
+ deps = ["//tools/constraintutil"],
)
bzl_library(
diff --git a/tools/go_stateify/main.go b/tools/go_stateify/main.go
index 93022f504..7216388a0 100644
--- a/tools/go_stateify/main.go
+++ b/tools/go_stateify/main.go
@@ -28,7 +28,7 @@ import (
"strings"
"sync"
- "gvisor.dev/gvisor/tools/tags"
+ "gvisor.dev/gvisor/tools/constraintutil"
)
var (
@@ -214,10 +214,13 @@ func main() {
// Automated warning.
fmt.Fprint(outputFile, "// automatically generated by stateify.\n\n")
- // Emit build tags.
- if t := tags.Aggregate(flag.Args()); len(t) > 0 {
- fmt.Fprintf(outputFile, "%s\n\n", strings.Join(t.Lines(), "\n"))
+ // Emit build constraints.
+ bcexpr, err := constraintutil.CombineFromFiles(flag.Args())
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Failed to infer build constraints: %v", err)
+ os.Exit(1)
}
+ outputFile.WriteString(constraintutil.Lines(bcexpr))
// Emit the package name.
_, pkg := filepath.Split(*fullPkg)
diff --git a/tools/nogo/BUILD b/tools/nogo/BUILD
index a7e280b32..27fe48680 100644
--- a/tools/nogo/BUILD
+++ b/tools/nogo/BUILD
@@ -1,4 +1,4 @@
-load("//tools:defs.bzl", "bzl_library", "go_library", "select_goarch", "select_goos")
+load("//tools:defs.bzl", "bzl_library", "go_library", "go_test", "select_goarch", "select_goos")
load("//tools/nogo:defs.bzl", "nogo_objdump_tool", "nogo_stdlib", "nogo_target")
package(licenses = ["notice"])
@@ -73,6 +73,12 @@ go_library(
],
)
+go_test(
+ name = "nogo_test",
+ srcs = ["config_test.go"],
+ library = ":nogo",
+)
+
bzl_library(
name = "defs_bzl",
srcs = ["defs.bzl"],
diff --git a/tools/nogo/config.go b/tools/nogo/config.go
index 6436f9d34..ee2533610 100644
--- a/tools/nogo/config.go
+++ b/tools/nogo/config.go
@@ -186,16 +186,19 @@ func (a AnalyzerConfig) merge(other AnalyzerConfig) {
}
}
-func (a AnalyzerConfig) shouldReport(groupConfig *Group, fullPos, msg string) bool {
+// shouldReport returns whether the finding should be reported or suppressed.
+// It returns !ok if there is no configuration sufficient to decide one way or
+// another.
+func (a AnalyzerConfig) shouldReport(groupConfig *Group, fullPos, msg string) (report, ok bool) {
gc, ok := a[groupConfig.Name]
if !ok {
- return groupConfig.Default
+ return false, false
}
// Note that if a section appears for a particular group
// for a particular analyzer, then it will now be enabled,
// and the group default no longer applies.
- return gc.shouldReport(fullPos, msg)
+ return gc.shouldReport(fullPos, msg), true
}
// Config is a nogo configuration.
@@ -298,7 +301,8 @@ func (c *Config) ShouldReport(finding Finding) bool {
}
// Suppress via global rule?
- if !c.Global.shouldReport(groupConfig, fullPos, finding.Message) {
+ report, ok := c.Global.shouldReport(groupConfig, fullPos, finding.Message)
+ if ok && !report {
return false
}
@@ -307,5 +311,9 @@ func (c *Config) ShouldReport(finding Finding) bool {
if !ok {
return groupConfig.Default
}
- return ac.shouldReport(groupConfig, fullPos, finding.Message)
+ report, ok = ac.shouldReport(groupConfig, fullPos, finding.Message)
+ if !ok {
+ return groupConfig.Default
+ }
+ return report
}
diff --git a/tools/nogo/config_test.go b/tools/nogo/config_test.go
new file mode 100644
index 000000000..685cffbec
--- /dev/null
+++ b/tools/nogo/config_test.go
@@ -0,0 +1,301 @@
+// Copyright 2021 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.package nogo
+package nogo
+
+import (
+ "go/token"
+ "testing"
+)
+
+// TestShouldReport validates the suppression behavior of Config.ShouldReport.
+func TestShouldReport(t *testing.T) {
+ config := &Config{
+ Groups: []Group{
+ {
+ Name: "default-enabled",
+ Regex: "^default-enabled/",
+ Default: true,
+ },
+ {
+ Name: "default-disabled",
+ Regex: "^default-disabled/",
+ Default: false,
+ },
+ {
+ Name: "default-disabled-omitted-from-global",
+ Regex: "^default-disabled-omitted-from-global/",
+ Default: false,
+ },
+ },
+ Global: AnalyzerConfig{
+ "default-enabled": &ItemConfig{
+ Exclude: []string{"excluded.go"},
+ Suppress: []string{"suppressed"},
+ },
+ "default-disabled": &ItemConfig{
+ Exclude: []string{"excluded.go"},
+ Suppress: []string{"suppressed"},
+ },
+ // Omitting default-disabled-omitted-from-global here
+ // has no effect on configuration below.
+ },
+ Analyzers: map[AnalyzerName]AnalyzerConfig{
+ "analyzer-suppressions": AnalyzerConfig{
+ // Suppress some.
+ "default-enabled": &ItemConfig{
+ Exclude: []string{"limited-exclude.go"},
+ Suppress: []string{"limited suppress"},
+ },
+ // Enable all.
+ "default-disabled": nil,
+ },
+ "enabled-for-default-disabled": AnalyzerConfig{
+ "default-disabled": nil,
+ "default-disabled-omitted-from-global": nil,
+ },
+ },
+ }
+
+ if err := config.Compile(); err != nil {
+ t.Fatalf("Compile(%+v) = %v, want nil", config, err)
+ }
+
+ cases := []struct {
+ name string
+ finding Finding
+ want bool
+ }{
+ {
+ name: "enabled",
+ finding: Finding{
+ Category: "foo",
+ Position: token.Position{
+ Filename: "default-enabled/file.go",
+ Offset: 0,
+ Line: 1,
+ Column: 1,
+ },
+ Message: "message",
+ },
+ want: true,
+ },
+ {
+ name: "ungrouped",
+ finding: Finding{
+ Category: "foo",
+ Position: token.Position{
+ Filename: "ungrouped/file.go",
+ Offset: 0,
+ Line: 1,
+ Column: 1,
+ },
+ Message: "message",
+ },
+ want: true,
+ },
+ {
+ name: "suppressed",
+ finding: Finding{
+ Category: "foo",
+ Position: token.Position{
+ Filename: "default-enabled/file.go",
+ Offset: 0,
+ Line: 1,
+ Column: 1,
+ },
+ Message: "message suppressed",
+ },
+ want: false,
+ },
+ {
+ name: "excluded",
+ finding: Finding{
+ Category: "foo",
+ Position: token.Position{
+ Filename: "default-enabled/excluded.go",
+ Offset: 0,
+ Line: 1,
+ Column: 1,
+ },
+ Message: "message",
+ },
+ want: false,
+ },
+ {
+ name: "disabled",
+ finding: Finding{
+ Category: "foo",
+ Position: token.Position{
+ Filename: "default-disabled/file.go",
+ Offset: 0,
+ Line: 1,
+ Column: 1,
+ },
+ Message: "message",
+ },
+ want: false,
+ },
+ {
+ name: "analyzer suppressed",
+ finding: Finding{
+ Category: "analyzer-suppressions",
+ Position: token.Position{
+ Filename: "default-enabled/file.go",
+ Offset: 0,
+ Line: 1,
+ Column: 1,
+ },
+ Message: "message limited suppress",
+ },
+ want: false,
+ },
+ {
+ name: "analyzer suppressed not global",
+ finding: Finding{
+ // Doesn't apply outside of analyzer-suppressions.
+ Category: "foo",
+ Position: token.Position{
+ Filename: "default-enabled/file.go",
+ Offset: 0,
+ Line: 1,
+ Column: 1,
+ },
+ Message: "message limited suppress",
+ },
+ want: true,
+ },
+ {
+ name: "analyzer suppressed grouped",
+ finding: Finding{
+ Category: "analyzer-suppressions",
+ Position: token.Position{
+ // Doesn't apply outside of default-enabled.
+ Filename: "default-disabled/file.go",
+ Offset: 0,
+ Line: 1,
+ Column: 1,
+ },
+ Message: "message limited suppress",
+ },
+ want: true,
+ },
+ {
+ name: "analyzer excluded",
+ finding: Finding{
+ Category: "analyzer-suppressions",
+ Position: token.Position{
+ Filename: "default-enabled/limited-exclude.go",
+ Offset: 0,
+ Line: 1,
+ Column: 1,
+ },
+ Message: "message",
+ },
+ want: false,
+ },
+ {
+ name: "analyzer excluded not global",
+ finding: Finding{
+ // Doesn't apply outside of analyzer-suppressions.
+ Category: "foo",
+ Position: token.Position{
+ Filename: "default-enabled/limited-exclude.go",
+ Offset: 0,
+ Line: 1,
+ Column: 1,
+ },
+ Message: "message",
+ },
+ want: true,
+ },
+ {
+ name: "analyzer excluded grouped",
+ finding: Finding{
+ Category: "analyzer-suppressions",
+ Position: token.Position{
+ // Doesn't apply outside of default-enabled.
+ Filename: "default-disabled/limited-exclude.go",
+ Offset: 0,
+ Line: 1,
+ Column: 1,
+ },
+ Message: "message",
+ },
+ want: true,
+ },
+ {
+ name: "disabled-omitted",
+ finding: Finding{
+ Category: "foo",
+ Position: token.Position{
+ Filename: "default-disabled-omitted-from-global/file.go",
+ Offset: 0,
+ Line: 1,
+ Column: 1,
+ },
+ Message: "message",
+ },
+ want: false,
+ },
+ {
+ name: "default enabled applies to customized analyzer",
+ finding: Finding{
+ Category: "enabled-for-default-disabled",
+ Position: token.Position{
+ Filename: "default-enabled/file.go",
+ Offset: 0,
+ Line: 1,
+ Column: 1,
+ },
+ Message: "message",
+ },
+ want: true,
+ },
+ {
+ name: "default overridden in customized analyzer",
+ finding: Finding{
+ Category: "enabled-for-default-disabled",
+ Position: token.Position{
+ Filename: "default-disabled/file.go",
+ Offset: 0,
+ Line: 1,
+ Column: 1,
+ },
+ Message: "message",
+ },
+ want: true,
+ },
+ {
+ name: "default overridden in customized analyzer even when omitted from global",
+ finding: Finding{
+ Category: "enabled-for-default-disabled",
+ Position: token.Position{
+ Filename: "default-disabled-omitted-from-global/file.go",
+ Offset: 0,
+ Line: 1,
+ Column: 1,
+ },
+ Message: "message",
+ },
+ want: true,
+ },
+ }
+ for _, tc := range cases {
+ t.Run(tc.name, func(t *testing.T) {
+ if got := config.ShouldReport(tc.finding); got != tc.want {
+ t.Errorf("ShouldReport(%+v) = %v, want %v", tc.finding, got, tc.want)
+ }
+ })
+ }
+}
diff --git a/tools/nogo/findings.go b/tools/nogo/findings.go
index 329a7062e..a73bf1a09 100644
--- a/tools/nogo/findings.go
+++ b/tools/nogo/findings.go
@@ -109,7 +109,7 @@ func ExtractFindingsFromFile(filename string, asJSON bool) (FindingSet, error) {
return ExtractFindingsFrom(r, asJSON)
}
-// ExtractFindingsFromBytes loads findings from bytes.
+// ExtractFindingsFrom loads findings from an io.Reader.
func ExtractFindingsFrom(r io.Reader, asJSON bool) (findings FindingSet, err error) {
if asJSON {
dec := json.NewDecoder(r)
diff --git a/tools/tags/BUILD b/tools/tags/BUILD
deleted file mode 100644
index 1c02e2c89..000000000
--- a/tools/tags/BUILD
+++ /dev/null
@@ -1,11 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "tags",
- srcs = ["tags.go"],
- marshal = False,
- stateify = False,
- visibility = ["//tools:__subpackages__"],
-)
diff --git a/tools/tags/tags.go b/tools/tags/tags.go
deleted file mode 100644
index f35904e0a..000000000
--- a/tools/tags/tags.go
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package tags is a utility for parsing build tags.
-package tags
-
-import (
- "fmt"
- "io/ioutil"
- "strings"
-)
-
-// OrSet is a set of tags on a single line.
-//
-// Note that tags may include ",", and we don't distinguish this case in the
-// logic below. Ideally, this constraints can be split into separate top-level
-// build tags in order to resolve any issues.
-type OrSet []string
-
-// Line returns the line for this or.
-func (or OrSet) Line() string {
- return fmt.Sprintf("// +build %s", strings.Join([]string(or), " "))
-}
-
-// AndSet is the set of all OrSets.
-type AndSet []OrSet
-
-// Lines returns the lines to be printed.
-func (and AndSet) Lines() (ls []string) {
- for _, or := range and {
- ls = append(ls, or.Line())
- }
- return
-}
-
-// Join joins this AndSet with another.
-func (and AndSet) Join(other AndSet) AndSet {
- return append(and, other...)
-}
-
-// Tags returns the unique set of +build tags.
-//
-// Derived form the runtime's canBuild.
-func Tags(file string) (tags AndSet) {
- data, err := ioutil.ReadFile(file)
- if err != nil {
- return nil
- }
- // Check file contents for // +build lines.
- for _, p := range strings.Split(string(data), "\n") {
- p = strings.TrimSpace(p)
- if p == "" {
- continue
- }
- if !strings.HasPrefix(p, "//") {
- break
- }
- if !strings.Contains(p, "+build") {
- continue
- }
- fields := strings.Fields(p[2:])
- if len(fields) < 1 || fields[0] != "+build" {
- continue
- }
- tags = append(tags, OrSet(fields[1:]))
- }
- return tags
-}
-
-// Aggregate aggregates all tags from a set of files.
-//
-// Note that these may be in conflict, in which case the build will fail.
-func Aggregate(files []string) (tags AndSet) {
- for _, file := range files {
- tags = tags.Join(Tags(file))
- }
- return tags
-}