summaryrefslogtreecommitdiffhomepage
path: root/pkg
diff options
context:
space:
mode:
Diffstat (limited to 'pkg')
-rw-r--r--pkg/sentry/fsimpl/cgroupfs/base.go8
-rw-r--r--pkg/sentry/fsimpl/cgroupfs/cgroupfs.go2
-rw-r--r--pkg/sentry/fsimpl/cgroupfs/cgroupfs_state_autogen.go130
-rw-r--r--pkg/sentry/fsimpl/cgroupfs/cpuacct.go79
4 files changed, 212 insertions, 7 deletions
diff --git a/pkg/sentry/fsimpl/cgroupfs/base.go b/pkg/sentry/fsimpl/cgroupfs/base.go
index 360bbb17d..39c1013e1 100644
--- a/pkg/sentry/fsimpl/cgroupfs/base.go
+++ b/pkg/sentry/fsimpl/cgroupfs/base.go
@@ -167,8 +167,8 @@ func (d *cgroupProcsData) Generate(ctx context.Context, buf *bytes.Buffer) error
pgids := make(map[kernel.ThreadID]struct{})
- d.fs.tasksMu.Lock()
- defer d.fs.tasksMu.Unlock()
+ d.fs.tasksMu.RLock()
+ defer d.fs.tasksMu.RUnlock()
for task := range d.ts {
// Map dedups pgid, since iterating over all tasks produces multiple
@@ -209,8 +209,8 @@ func (d *tasksData) Generate(ctx context.Context, buf *bytes.Buffer) error {
var pids []kernel.ThreadID
- d.fs.tasksMu.Lock()
- defer d.fs.tasksMu.Unlock()
+ d.fs.tasksMu.RLock()
+ defer d.fs.tasksMu.RUnlock()
for task := range d.ts {
if pid := currPidns.IDOfTask(task); pid != 0 {
diff --git a/pkg/sentry/fsimpl/cgroupfs/cgroupfs.go b/pkg/sentry/fsimpl/cgroupfs/cgroupfs.go
index 6061bace2..ca8caee5f 100644
--- a/pkg/sentry/fsimpl/cgroupfs/cgroupfs.go
+++ b/pkg/sentry/fsimpl/cgroupfs/cgroupfs.go
@@ -129,7 +129,7 @@ type filesystem struct {
// tasksMu serializes task membership changes across all cgroups within a
// filesystem.
- tasksMu sync.Mutex `state:"nosave"`
+ tasksMu sync.RWMutex `state:"nosave"`
}
// Name implements vfs.FilesystemType.Name.
diff --git a/pkg/sentry/fsimpl/cgroupfs/cgroupfs_state_autogen.go b/pkg/sentry/fsimpl/cgroupfs/cgroupfs_state_autogen.go
index 02afef1e4..d6f9f6cb4 100644
--- a/pkg/sentry/fsimpl/cgroupfs/cgroupfs_state_autogen.go
+++ b/pkg/sentry/fsimpl/cgroupfs/cgroupfs_state_autogen.go
@@ -383,6 +383,131 @@ func (c *cpuacctController) StateLoad(stateSourceObject state.Source) {
stateSourceObject.Load(0, &c.controllerCommon)
}
+func (c *cpuacctCgroup) StateTypeName() string {
+ return "pkg/sentry/fsimpl/cgroupfs.cpuacctCgroup"
+}
+
+func (c *cpuacctCgroup) StateFields() []string {
+ return []string{
+ "cgroupInode",
+ }
+}
+
+func (c *cpuacctCgroup) beforeSave() {}
+
+// +checklocksignore
+func (c *cpuacctCgroup) StateSave(stateSinkObject state.Sink) {
+ c.beforeSave()
+ stateSinkObject.Save(0, &c.cgroupInode)
+}
+
+func (c *cpuacctCgroup) afterLoad() {}
+
+// +checklocksignore
+func (c *cpuacctCgroup) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &c.cgroupInode)
+}
+
+func (d *cpuacctStatData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/cgroupfs.cpuacctStatData"
+}
+
+func (d *cpuacctStatData) StateFields() []string {
+ return []string{
+ "cpuacctCgroup",
+ }
+}
+
+func (d *cpuacctStatData) beforeSave() {}
+
+// +checklocksignore
+func (d *cpuacctStatData) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.cpuacctCgroup)
+}
+
+func (d *cpuacctStatData) afterLoad() {}
+
+// +checklocksignore
+func (d *cpuacctStatData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.cpuacctCgroup)
+}
+
+func (d *cpuacctUsageData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/cgroupfs.cpuacctUsageData"
+}
+
+func (d *cpuacctUsageData) StateFields() []string {
+ return []string{
+ "cpuacctCgroup",
+ }
+}
+
+func (d *cpuacctUsageData) beforeSave() {}
+
+// +checklocksignore
+func (d *cpuacctUsageData) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.cpuacctCgroup)
+}
+
+func (d *cpuacctUsageData) afterLoad() {}
+
+// +checklocksignore
+func (d *cpuacctUsageData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.cpuacctCgroup)
+}
+
+func (d *cpuacctUsageUserData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/cgroupfs.cpuacctUsageUserData"
+}
+
+func (d *cpuacctUsageUserData) StateFields() []string {
+ return []string{
+ "cpuacctCgroup",
+ }
+}
+
+func (d *cpuacctUsageUserData) beforeSave() {}
+
+// +checklocksignore
+func (d *cpuacctUsageUserData) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.cpuacctCgroup)
+}
+
+func (d *cpuacctUsageUserData) afterLoad() {}
+
+// +checklocksignore
+func (d *cpuacctUsageUserData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.cpuacctCgroup)
+}
+
+func (d *cpuacctUsageSysData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/cgroupfs.cpuacctUsageSysData"
+}
+
+func (d *cpuacctUsageSysData) StateFields() []string {
+ return []string{
+ "cpuacctCgroup",
+ }
+}
+
+func (d *cpuacctUsageSysData) beforeSave() {}
+
+// +checklocksignore
+func (d *cpuacctUsageSysData) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.cpuacctCgroup)
+}
+
+func (d *cpuacctUsageSysData) afterLoad() {}
+
+// +checklocksignore
+func (d *cpuacctUsageSysData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.cpuacctCgroup)
+}
+
func (c *cpusetController) StateTypeName() string {
return "pkg/sentry/fsimpl/cgroupfs.cpusetController"
}
@@ -495,6 +620,11 @@ func init() {
state.Register((*staticControllerFile)(nil))
state.Register((*cpuController)(nil))
state.Register((*cpuacctController)(nil))
+ state.Register((*cpuacctCgroup)(nil))
+ state.Register((*cpuacctStatData)(nil))
+ state.Register((*cpuacctUsageData)(nil))
+ state.Register((*cpuacctUsageUserData)(nil))
+ state.Register((*cpuacctUsageSysData)(nil))
state.Register((*cpusetController)(nil))
state.Register((*dirRefs)(nil))
state.Register((*memoryController)(nil))
diff --git a/pkg/sentry/fsimpl/cgroupfs/cpuacct.go b/pkg/sentry/fsimpl/cgroupfs/cpuacct.go
index 0bb7f5c76..d4104a00e 100644
--- a/pkg/sentry/fsimpl/cgroupfs/cpuacct.go
+++ b/pkg/sentry/fsimpl/cgroupfs/cpuacct.go
@@ -15,9 +15,14 @@
package cgroupfs
import (
+ "bytes"
+ "fmt"
+
+ "gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
"gvisor.dev/gvisor/pkg/sentry/fsimpl/kernfs"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
+ "gvisor.dev/gvisor/pkg/sentry/usage"
)
// +stateify savable
@@ -34,6 +39,76 @@ func newCPUAcctController(fs *filesystem) *cpuacctController {
}
// AddControlFiles implements controller.AddControlFiles.
-func (c *cpuacctController) AddControlFiles(ctx context.Context, creds *auth.Credentials, _ *cgroupInode, contents map[string]kernfs.Inode) {
- // This controller is currently intentionally empty.
+func (c *cpuacctController) AddControlFiles(ctx context.Context, creds *auth.Credentials, cg *cgroupInode, contents map[string]kernfs.Inode) {
+ cpuacctCG := &cpuacctCgroup{cg}
+ contents["cpuacct.stat"] = c.fs.newControllerFile(ctx, creds, &cpuacctStatData{cpuacctCG})
+ contents["cpuacct.usage"] = c.fs.newControllerFile(ctx, creds, &cpuacctUsageData{cpuacctCG})
+ contents["cpuacct.usage_user"] = c.fs.newControllerFile(ctx, creds, &cpuacctUsageUserData{cpuacctCG})
+ contents["cpuacct.usage_sys"] = c.fs.newControllerFile(ctx, creds, &cpuacctUsageSysData{cpuacctCG})
+}
+
+// +stateify savable
+type cpuacctCgroup struct {
+ *cgroupInode
+}
+
+func (c *cpuacctCgroup) collectCPUStats() usage.CPUStats {
+ var cs usage.CPUStats
+ c.fs.tasksMu.RLock()
+ // Note: This isn't very accurate, since the tasks are potentially
+ // still running as we accumulate their stats.
+ for t := range c.ts {
+ cs.Accumulate(t.CPUStats())
+ }
+ c.fs.tasksMu.RUnlock()
+ return cs
+}
+
+// +stateify savable
+type cpuacctStatData struct {
+ *cpuacctCgroup
+}
+
+// Generate implements vfs.DynamicBytesSource.Generate.
+func (d *cpuacctStatData) Generate(ctx context.Context, buf *bytes.Buffer) error {
+ cs := d.collectCPUStats()
+ fmt.Fprintf(buf, "user %d\n", linux.ClockTFromDuration(cs.UserTime))
+ fmt.Fprintf(buf, "system %d\n", linux.ClockTFromDuration(cs.SysTime))
+ return nil
+}
+
+// +stateify savable
+type cpuacctUsageData struct {
+ *cpuacctCgroup
+}
+
+// Generate implements vfs.DynamicBytesSource.Generate.
+func (d *cpuacctUsageData) Generate(ctx context.Context, buf *bytes.Buffer) error {
+ cs := d.collectCPUStats()
+ fmt.Fprintf(buf, "%d\n", cs.UserTime.Nanoseconds()+cs.SysTime.Nanoseconds())
+ return nil
+}
+
+// +stateify savable
+type cpuacctUsageUserData struct {
+ *cpuacctCgroup
+}
+
+// Generate implements vfs.DynamicBytesSource.Generate.
+func (d *cpuacctUsageUserData) Generate(ctx context.Context, buf *bytes.Buffer) error {
+ cs := d.collectCPUStats()
+ fmt.Fprintf(buf, "%d\n", cs.UserTime.Nanoseconds())
+ return nil
+}
+
+// +stateify savable
+type cpuacctUsageSysData struct {
+ *cpuacctCgroup
+}
+
+// Generate implements vfs.DynamicBytesSource.Generate.
+func (d *cpuacctUsageSysData) Generate(ctx context.Context, buf *bytes.Buffer) error {
+ cs := d.collectCPUStats()
+ fmt.Fprintf(buf, "%d\n", cs.SysTime.Nanoseconds())
+ return nil
}