summaryrefslogtreecommitdiffhomepage
path: root/runsc
diff options
context:
space:
mode:
authorgVisor bot <gvisor-bot@google.com>2021-11-03 19:09:18 +0000
committergVisor bot <gvisor-bot@google.com>2021-11-03 19:09:18 +0000
commit9665aa4c45e89e93acdc68084c3eb403661a1c8c (patch)
tree490de7e556eb9c9bb8ce7d951581abedc78881bf /runsc
parent9d6c9e4461c3594bf727d58bbbeb91a91049acd6 (diff)
parent5185548e157be1ec4c8c161d15ca8ee045a31a36 (diff)
Merge release-20211026.0-31-g5185548e1 (automated)
Diffstat (limited to 'runsc')
-rw-r--r--runsc/cgroup/cgroup.go76
-rw-r--r--runsc/container/container.go26
-rw-r--r--runsc/sandbox/sandbox.go19
3 files changed, 82 insertions, 39 deletions
diff --git a/runsc/cgroup/cgroup.go b/runsc/cgroup/cgroup.go
index fdcaed4ea..0eb5821a9 100644
--- a/runsc/cgroup/cgroup.go
+++ b/runsc/cgroup/cgroup.go
@@ -19,6 +19,7 @@ package cgroup
import (
"bufio"
"context"
+ "encoding/json"
"fmt"
"io"
"io/ioutil"
@@ -286,7 +287,19 @@ func loadPathsHelper(cgroup, mountinfo io.Reader) (map[string]string, error) {
return paths, nil
}
-// Cgroup represents a group inside all controllers. For example:
+// Cgroup represents a cgroup configuration.
+type Cgroup interface {
+ Install(res *specs.LinuxResources) error
+ Uninstall() error
+ Join() (func(), error)
+ CPUQuota() (float64, error)
+ CPUUsage() (uint64, error)
+ NumCPU() (int, error)
+ MemoryLimit() (uint64, error)
+ MakePath(controllerName string) string
+}
+
+// cgroupV1 represents a group inside all controllers. For example:
// Name='/foo/bar' maps to /sys/fs/cgroup/<controller>/foo/bar on
// all controllers.
//
@@ -294,7 +307,7 @@ func loadPathsHelper(cgroup, mountinfo io.Reader) (map[string]string, error) {
// location. For example:
// Name='foo/bar' and Parent[ctrl]="/user.slice", then it will map to
// /sys/fs/cgroup/<ctrl>/user.slice/foo/bar
-type Cgroup struct {
+type cgroupV1 struct {
Name string `json:"name"`
Parents map[string]string `json:"parents"`
Own map[string]bool `json:"own"`
@@ -302,7 +315,7 @@ type Cgroup struct {
// NewFromSpec creates a new Cgroup instance if the spec includes a cgroup path.
// Returns nil otherwise. Cgroup paths are loaded based on the current process.
-func NewFromSpec(spec *specs.Spec) (*Cgroup, error) {
+func NewFromSpec(spec *specs.Spec) (Cgroup, error) {
if spec.Linux == nil || spec.Linux.CgroupsPath == "" {
return nil, nil
}
@@ -311,16 +324,16 @@ func NewFromSpec(spec *specs.Spec) (*Cgroup, error) {
// NewFromPath creates a new Cgroup instance from the specified relative path.
// Cgroup paths are loaded based on the current process.
-func NewFromPath(cgroupsPath string) (*Cgroup, error) {
+func NewFromPath(cgroupsPath string) (Cgroup, error) {
return new("self", cgroupsPath)
}
// NewFromPid loads cgroup for the given process.
-func NewFromPid(pid int) (*Cgroup, error) {
+func NewFromPid(pid int) (Cgroup, error) {
return new(strconv.Itoa(pid), "")
}
-func new(pid, cgroupsPath string) (*Cgroup, error) {
+func new(pid, cgroupsPath string) (Cgroup, error) {
var parents map[string]string
// If path is relative, load cgroup paths for the process to build the
@@ -332,7 +345,7 @@ func new(pid, cgroupsPath string) (*Cgroup, error) {
return nil, fmt.Errorf("finding current cgroups: %w", err)
}
}
- cg := &Cgroup{
+ cg := &cgroupV1{
Name: cgroupsPath,
Parents: parents,
Own: make(map[string]bool),
@@ -341,10 +354,39 @@ func new(pid, cgroupsPath string) (*Cgroup, error) {
return cg, nil
}
+// CgroupJSON is a wrapper for Cgroup that can be encoded to JSON.
+type CgroupJSON struct {
+ Cgroup Cgroup `json:"cgroup"`
+}
+
+type cgroupJSONv1 struct {
+ Cgroup *cgroupV1 `json:"cgroup"`
+}
+
+// UnmarshalJSON implements json.Unmarshaler.UnmarshalJSON
+func (c *CgroupJSON) UnmarshalJSON(data []byte) error {
+ v1 := cgroupJSONv1{}
+ err := json.Unmarshal(data, &v1)
+ if v1.Cgroup != nil {
+ c.Cgroup = v1.Cgroup
+ }
+ return err
+}
+
+// MarshalJSON implements json.Marshaler.MarshalJSON
+func (c *CgroupJSON) MarshalJSON() ([]byte, error) {
+ if c.Cgroup == nil {
+ v1 := cgroupJSONv1{}
+ return json.Marshal(&v1)
+ }
+ v1 := cgroupJSONv1{Cgroup: c.Cgroup.(*cgroupV1)}
+ return json.Marshal(&v1)
+}
+
// Install creates and configures cgroups according to 'res'. If cgroup path
// already exists, it means that the caller has already provided a
// pre-configured cgroups, and 'res' is ignored.
-func (c *Cgroup) Install(res *specs.LinuxResources) error {
+func (c *cgroupV1) Install(res *specs.LinuxResources) error {
log.Debugf("Installing cgroup path %q", c.Name)
// Clean up partially created cgroups on error. Errors during cleanup itself
@@ -369,7 +411,7 @@ func (c *Cgroup) Install(res *specs.LinuxResources) error {
for _, key := range missing {
ctrlr := controllers[key]
- if skip, err := c.createController(key); skip && ctrlr.optional() {
+ if skip, err := createController(c, key); skip && ctrlr.optional() {
if err := ctrlr.skip(res); err != nil {
return err
}
@@ -394,7 +436,7 @@ func (c *Cgroup) Install(res *specs.LinuxResources) error {
// controller is enabled in the system. It returns a boolean indicating whether
// the controller should be skipped (e.g. controller is disabled). In case it
// should be skipped, it also returns the error it got.
-func (c *Cgroup) createController(name string) (bool, error) {
+func createController(c Cgroup, name string) (bool, error) {
ctrlrPath := filepath.Join(cgroupRoot, name)
if _, err := os.Stat(ctrlrPath); err != nil {
return os.IsNotExist(err), err
@@ -410,7 +452,7 @@ func (c *Cgroup) createController(name string) (bool, error) {
// Uninstall removes the settings done in Install(). If cgroup path already
// existed when Install() was called, Uninstall is a noop.
-func (c *Cgroup) Uninstall() error {
+func (c *cgroupV1) Uninstall() error {
log.Debugf("Deleting cgroup %q", c.Name)
g, ctx := errgroup.WithContext(context.Background())
for key := range controllers {
@@ -447,7 +489,7 @@ func (c *Cgroup) Uninstall() error {
// Join adds the current process to the all controllers. Returns function that
// restores cgroup to the original state.
-func (c *Cgroup) Join() (func(), error) {
+func (c *cgroupV1) Join() (func(), error) {
// First save the current state so it can be restored.
paths, err := loadPaths("self")
if err != nil {
@@ -492,7 +534,7 @@ func (c *Cgroup) Join() (func(), error) {
}
// CPUQuota returns the CFS CPU quota.
-func (c *Cgroup) CPUQuota() (float64, error) {
+func (c *cgroupV1) CPUQuota() (float64, error) {
path := c.MakePath("cpu")
quota, err := getInt(path, "cpu.cfs_quota_us")
if err != nil {
@@ -509,7 +551,7 @@ func (c *Cgroup) CPUQuota() (float64, error) {
}
// CPUUsage returns the total CPU usage of the cgroup.
-func (c *Cgroup) CPUUsage() (uint64, error) {
+func (c *cgroupV1) CPUUsage() (uint64, error) {
path := c.MakePath("cpuacct")
usage, err := getValue(path, "cpuacct.usage")
if err != nil {
@@ -519,7 +561,7 @@ func (c *Cgroup) CPUUsage() (uint64, error) {
}
// NumCPU returns the number of CPUs configured in 'cpuset/cpuset.cpus'.
-func (c *Cgroup) NumCPU() (int, error) {
+func (c *cgroupV1) NumCPU() (int, error) {
path := c.MakePath("cpuset")
cpuset, err := getValue(path, "cpuset.cpus")
if err != nil {
@@ -529,7 +571,7 @@ func (c *Cgroup) NumCPU() (int, error) {
}
// MemoryLimit returns the memory limit.
-func (c *Cgroup) MemoryLimit() (uint64, error) {
+func (c *cgroupV1) MemoryLimit() (uint64, error) {
path := c.MakePath("memory")
limStr, err := getValue(path, "memory.limit_in_bytes")
if err != nil {
@@ -539,7 +581,7 @@ func (c *Cgroup) MemoryLimit() (uint64, error) {
}
// MakePath builds a path to the given controller.
-func (c *Cgroup) MakePath(controllerName string) string {
+func (c *cgroupV1) MakePath(controllerName string) string {
path := c.Name
if parent, ok := c.Parents[controllerName]; ok {
path = filepath.Join(parent, c.Name)
diff --git a/runsc/container/container.go b/runsc/container/container.go
index 77a0f7eba..bee37e1b3 100644
--- a/runsc/container/container.go
+++ b/runsc/container/container.go
@@ -123,7 +123,7 @@ type Container struct {
// Note that CompatCgroup is created only for compatibility with tools
// that expect container cgroups to exist. Setting limits here makes no change
// to the container in question.
- CompatCgroup *cgroup.Cgroup `json:"compatCgroup"`
+ CompatCgroup cgroup.CgroupJSON `json:"compatCgroup"`
// Saver handles load from/save to the state file safely from multiple
// processes.
@@ -249,7 +249,7 @@ func New(conf *config.Config, args Args) (*Container, error) {
if err != nil {
return nil, err
}
- c.CompatCgroup = subCgroup
+ c.CompatCgroup = cgroup.CgroupJSON{Cgroup: subCgroup}
if err := runInCgroup(parentCgroup, func() error {
ioFiles, specFile, err := c.createGoferProcess(args.Spec, conf, args.BundleDir, args.Attached)
if err != nil {
@@ -297,7 +297,7 @@ func New(conf *config.Config, args Args) (*Container, error) {
if err != nil {
return nil, err
}
- c.CompatCgroup = subCgroup
+ c.CompatCgroup = cgroup.CgroupJSON{Cgroup: subCgroup}
// If the console control socket file is provided, then create a new
// pty master/slave pair and send the TTY to the sandbox process.
@@ -365,7 +365,7 @@ func (c *Container) Start(conf *config.Config) error {
} else {
// Join cgroup to start gofer process to ensure it's part of the cgroup from
// the start (and all their children processes).
- if err := runInCgroup(c.Sandbox.Cgroup, func() error {
+ if err := runInCgroup(c.Sandbox.CgroupJSON.Cgroup, func() error {
// Create the gofer process.
goferFiles, mountsFile, err := c.createGoferProcess(c.Spec, conf, c.BundleDir, false)
if err != nil {
@@ -784,7 +784,7 @@ func (c *Container) saveLocked() error {
// root containers), and waits for the container or sandbox and the gofer
// to stop. If any of them doesn't stop before timeout, an error is returned.
func (c *Container) stop() error {
- var parentCgroup *cgroup.Cgroup
+ var parentCgroup cgroup.Cgroup
if c.Sandbox != nil {
log.Debugf("Destroying container, cid: %s", c.ID)
@@ -793,7 +793,7 @@ func (c *Container) stop() error {
}
// Only uninstall parentCgroup for sandbox stop.
if c.Sandbox.IsRootContainer(c.ID) {
- parentCgroup = c.Sandbox.Cgroup
+ parentCgroup = c.Sandbox.CgroupJSON.Cgroup
}
// Only set sandbox to nil after it has been told to destroy the container.
c.Sandbox = nil
@@ -813,8 +813,8 @@ func (c *Container) stop() error {
}
// Delete container cgroup if any.
- if c.CompatCgroup != nil {
- if err := c.CompatCgroup.Uninstall(); err != nil {
+ if c.CompatCgroup.Cgroup != nil {
+ if err := c.CompatCgroup.Cgroup.Uninstall(); err != nil {
return err
}
}
@@ -1059,7 +1059,7 @@ func isRoot(spec *specs.Spec) bool {
// runInCgroup executes fn inside the specified cgroup. If cg is nil, execute
// it in the current context.
-func runInCgroup(cg *cgroup.Cgroup, fn func() error) error {
+func runInCgroup(cg cgroup.Cgroup, fn func() error) error {
if cg == nil {
return fn()
}
@@ -1222,8 +1222,8 @@ func (c *Container) populateStats(event *boot.EventOut) {
// setupCgroupForRoot configures and returns cgroup for the sandbox and the
// root container. If `cgroupParentAnnotation` is set, use that path as the
// sandbox cgroup and use Spec.Linux.CgroupsPath as the root container cgroup.
-func (c *Container) setupCgroupForRoot(conf *config.Config, spec *specs.Spec) (*cgroup.Cgroup, *cgroup.Cgroup, error) {
- var parentCgroup *cgroup.Cgroup
+func (c *Container) setupCgroupForRoot(conf *config.Config, spec *specs.Spec) (cgroup.Cgroup, cgroup.Cgroup, error) {
+ var parentCgroup cgroup.Cgroup
if parentPath, ok := spec.Annotations[cgroupParentAnnotation]; ok {
var err error
parentCgroup, err = cgroup.NewFromPath(parentPath)
@@ -1256,7 +1256,7 @@ func (c *Container) setupCgroupForRoot(conf *config.Config, spec *specs.Spec) (*
// subcontainers run exclusively inside the sandbox, subcontainer cgroups on the
// host have no effect on them. However, some tools (e.g. cAdvisor) uses cgroups
// paths to discover new containers and report stats for them.
-func (c *Container) setupCgroupForSubcontainer(conf *config.Config, spec *specs.Spec) (*cgroup.Cgroup, error) {
+func (c *Container) setupCgroupForSubcontainer(conf *config.Config, spec *specs.Spec) (cgroup.Cgroup, error) {
if isRoot(spec) {
if _, ok := spec.Annotations[cgroupParentAnnotation]; !ok {
return nil, nil
@@ -1276,7 +1276,7 @@ func (c *Container) setupCgroupForSubcontainer(conf *config.Config, spec *specs.
// For rootless, it's possible that cgroups operations fail, in this case the
// error is suppressed and a nil cgroups instance is returned to indicate that
// no cgroups was configured.
-func cgroupInstall(conf *config.Config, cg *cgroup.Cgroup, res *specs.LinuxResources) (*cgroup.Cgroup, error) {
+func cgroupInstall(conf *config.Config, cg cgroup.Cgroup, res *specs.LinuxResources) (cgroup.Cgroup, error) {
// TODO(gvisor.dev/issue/3481): Remove when cgroups v2 is supported.
if cgroup.IsOnlyV2() {
if conf.Rootless {
diff --git a/runsc/sandbox/sandbox.go b/runsc/sandbox/sandbox.go
index 714919139..8f7022fa1 100644
--- a/runsc/sandbox/sandbox.go
+++ b/runsc/sandbox/sandbox.go
@@ -72,8 +72,9 @@ type Sandbox struct {
// GID is the group ID in the parent namespace that the sandbox is running as.
GID int `json:"gid"`
- // Cgroup has the cgroup configuration for the sandbox.
- Cgroup *cgroup.Cgroup `json:"cgroup"`
+ // CgroupJSON contains the cgroup configuration that the sandbox is part of
+ // and allow serialization of the configuration into json
+ CgroupJSON cgroup.CgroupJSON `json:"cgroup"`
// OriginalOOMScoreAdj stores the value of oom_score_adj when the sandbox
// started, before it may be modified.
@@ -124,7 +125,7 @@ type Args struct {
MountsFile *os.File
// Gcgroup is the cgroup that the sandbox is part of.
- Cgroup *cgroup.Cgroup
+ Cgroup cgroup.Cgroup
// Attached indicates that the sandbox lifecycle is attached with the caller.
// If the caller exits, the sandbox should exit too.
@@ -134,7 +135,7 @@ type Args struct {
// New creates the sandbox process. The caller must call Destroy() on the
// sandbox.
func New(conf *config.Config, args *Args) (*Sandbox, error) {
- s := &Sandbox{ID: args.ID, Cgroup: args.Cgroup}
+ s := &Sandbox{ID: args.ID, CgroupJSON: cgroup.CgroupJSON{Cgroup: args.Cgroup}}
// The Cleanup object cleans up partially created sandboxes when an error
// occurs. Any errors occurring during cleanup itself are ignored.
c := cleanup.Make(func() {
@@ -328,7 +329,7 @@ func (s *Sandbox) Processes(cid string) ([]*control.Process, error) {
}
// NewCGroup returns the sandbox's Cgroup, or an error if it does not have one.
-func (s *Sandbox) NewCGroup() (*cgroup.Cgroup, error) {
+func (s *Sandbox) NewCGroup() (cgroup.Cgroup, error) {
return cgroup.NewFromPid(s.Pid)
}
@@ -763,8 +764,8 @@ func (s *Sandbox) createSandboxProcess(conf *config.Config, args *Args, startSyn
return err
}
- if s.Cgroup != nil {
- cpuNum, err := s.Cgroup.NumCPU()
+ if s.CgroupJSON.Cgroup != nil {
+ cpuNum, err := s.CgroupJSON.Cgroup.NumCPU()
if err != nil {
return fmt.Errorf("getting cpu count from cgroups: %v", err)
}
@@ -774,7 +775,7 @@ func (s *Sandbox) createSandboxProcess(conf *config.Config, args *Args, startSyn
// leaving two cores as reasonable default.
const minCPUs = 2
- quota, err := s.Cgroup.CPUQuota()
+ quota, err := s.CgroupJSON.Cgroup.CPUQuota()
if err != nil {
return fmt.Errorf("getting cpu qouta from cgroups: %v", err)
}
@@ -790,7 +791,7 @@ func (s *Sandbox) createSandboxProcess(conf *config.Config, args *Args, startSyn
}
cmd.Args = append(cmd.Args, "--cpu-num", strconv.Itoa(cpuNum))
- memLimit, err := s.Cgroup.MemoryLimit()
+ memLimit, err := s.CgroupJSON.Cgroup.MemoryLimit()
if err != nil {
return fmt.Errorf("getting memory limit from cgroups: %v", err)
}