summaryrefslogtreecommitdiffhomepage
path: root/pkg
diff options
context:
space:
mode:
authorRahat Mahmood <rahat@google.com>2021-04-05 19:44:12 -0700
committergVisor bot <gvisor-bot@google.com>2021-04-05 19:46:26 -0700
commit7a7fcf2dbaa7bdcdb9b523358de91c71d5cb05d8 (patch)
treedf242191725f8b27e8c4fe40f9f601e544fff8b3 /pkg
parent63340e61388621d41a5abb08d8902a1565d02a96 (diff)
Report task CPU usage through the cpuacct cgroup controller.
PiperOrigin-RevId: 366923274
Diffstat (limited to 'pkg')
-rw-r--r--pkg/sentry/fsimpl/cgroupfs/base.go8
-rw-r--r--pkg/sentry/fsimpl/cgroupfs/cgroupfs.go2
-rw-r--r--pkg/sentry/fsimpl/cgroupfs/cpuacct.go79
3 files changed, 82 insertions, 7 deletions
diff --git a/pkg/sentry/fsimpl/cgroupfs/base.go b/pkg/sentry/fsimpl/cgroupfs/base.go
index 360bbb17d..39c1013e1 100644
--- a/pkg/sentry/fsimpl/cgroupfs/base.go
+++ b/pkg/sentry/fsimpl/cgroupfs/base.go
@@ -167,8 +167,8 @@ func (d *cgroupProcsData) Generate(ctx context.Context, buf *bytes.Buffer) error
pgids := make(map[kernel.ThreadID]struct{})
- d.fs.tasksMu.Lock()
- defer d.fs.tasksMu.Unlock()
+ d.fs.tasksMu.RLock()
+ defer d.fs.tasksMu.RUnlock()
for task := range d.ts {
// Map dedups pgid, since iterating over all tasks produces multiple
@@ -209,8 +209,8 @@ func (d *tasksData) Generate(ctx context.Context, buf *bytes.Buffer) error {
var pids []kernel.ThreadID
- d.fs.tasksMu.Lock()
- defer d.fs.tasksMu.Unlock()
+ d.fs.tasksMu.RLock()
+ defer d.fs.tasksMu.RUnlock()
for task := range d.ts {
if pid := currPidns.IDOfTask(task); pid != 0 {
diff --git a/pkg/sentry/fsimpl/cgroupfs/cgroupfs.go b/pkg/sentry/fsimpl/cgroupfs/cgroupfs.go
index 6061bace2..ca8caee5f 100644
--- a/pkg/sentry/fsimpl/cgroupfs/cgroupfs.go
+++ b/pkg/sentry/fsimpl/cgroupfs/cgroupfs.go
@@ -129,7 +129,7 @@ type filesystem struct {
// tasksMu serializes task membership changes across all cgroups within a
// filesystem.
- tasksMu sync.Mutex `state:"nosave"`
+ tasksMu sync.RWMutex `state:"nosave"`
}
// Name implements vfs.FilesystemType.Name.
diff --git a/pkg/sentry/fsimpl/cgroupfs/cpuacct.go b/pkg/sentry/fsimpl/cgroupfs/cpuacct.go
index 0bb7f5c76..d4104a00e 100644
--- a/pkg/sentry/fsimpl/cgroupfs/cpuacct.go
+++ b/pkg/sentry/fsimpl/cgroupfs/cpuacct.go
@@ -15,9 +15,14 @@
package cgroupfs
import (
+ "bytes"
+ "fmt"
+
+ "gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
"gvisor.dev/gvisor/pkg/sentry/fsimpl/kernfs"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
+ "gvisor.dev/gvisor/pkg/sentry/usage"
)
// +stateify savable
@@ -34,6 +39,76 @@ func newCPUAcctController(fs *filesystem) *cpuacctController {
}
// AddControlFiles implements controller.AddControlFiles.
-func (c *cpuacctController) AddControlFiles(ctx context.Context, creds *auth.Credentials, _ *cgroupInode, contents map[string]kernfs.Inode) {
- // This controller is currently intentionally empty.
+func (c *cpuacctController) AddControlFiles(ctx context.Context, creds *auth.Credentials, cg *cgroupInode, contents map[string]kernfs.Inode) {
+ cpuacctCG := &cpuacctCgroup{cg}
+ contents["cpuacct.stat"] = c.fs.newControllerFile(ctx, creds, &cpuacctStatData{cpuacctCG})
+ contents["cpuacct.usage"] = c.fs.newControllerFile(ctx, creds, &cpuacctUsageData{cpuacctCG})
+ contents["cpuacct.usage_user"] = c.fs.newControllerFile(ctx, creds, &cpuacctUsageUserData{cpuacctCG})
+ contents["cpuacct.usage_sys"] = c.fs.newControllerFile(ctx, creds, &cpuacctUsageSysData{cpuacctCG})
+}
+
+// +stateify savable
+type cpuacctCgroup struct {
+ *cgroupInode
+}
+
+func (c *cpuacctCgroup) collectCPUStats() usage.CPUStats {
+ var cs usage.CPUStats
+ c.fs.tasksMu.RLock()
+ // Note: This isn't very accurate, since the tasks are potentially
+ // still running as we accumulate their stats.
+ for t := range c.ts {
+ cs.Accumulate(t.CPUStats())
+ }
+ c.fs.tasksMu.RUnlock()
+ return cs
+}
+
+// +stateify savable
+type cpuacctStatData struct {
+ *cpuacctCgroup
+}
+
+// Generate implements vfs.DynamicBytesSource.Generate.
+func (d *cpuacctStatData) Generate(ctx context.Context, buf *bytes.Buffer) error {
+ cs := d.collectCPUStats()
+ fmt.Fprintf(buf, "user %d\n", linux.ClockTFromDuration(cs.UserTime))
+ fmt.Fprintf(buf, "system %d\n", linux.ClockTFromDuration(cs.SysTime))
+ return nil
+}
+
+// +stateify savable
+type cpuacctUsageData struct {
+ *cpuacctCgroup
+}
+
+// Generate implements vfs.DynamicBytesSource.Generate.
+func (d *cpuacctUsageData) Generate(ctx context.Context, buf *bytes.Buffer) error {
+ cs := d.collectCPUStats()
+ fmt.Fprintf(buf, "%d\n", cs.UserTime.Nanoseconds()+cs.SysTime.Nanoseconds())
+ return nil
+}
+
+// +stateify savable
+type cpuacctUsageUserData struct {
+ *cpuacctCgroup
+}
+
+// Generate implements vfs.DynamicBytesSource.Generate.
+func (d *cpuacctUsageUserData) Generate(ctx context.Context, buf *bytes.Buffer) error {
+ cs := d.collectCPUStats()
+ fmt.Fprintf(buf, "%d\n", cs.UserTime.Nanoseconds())
+ return nil
+}
+
+// +stateify savable
+type cpuacctUsageSysData struct {
+ *cpuacctCgroup
+}
+
+// Generate implements vfs.DynamicBytesSource.Generate.
+func (d *cpuacctUsageSysData) Generate(ctx context.Context, buf *bytes.Buffer) error {
+ cs := d.collectCPUStats()
+ fmt.Fprintf(buf, "%d\n", cs.SysTime.Nanoseconds())
+ return nil
}