From ae2250b1dd07fcdb6a2212022b75d63d26966499 Mon Sep 17 00:00:00 2001 From: Ian Lewis Date: Tue, 4 Dec 2018 04:55:39 -0500 Subject: Initial commit --- pkg/shim/platform.go | 112 ++++++++++ pkg/shim/service.go | 595 +++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 707 insertions(+) create mode 100644 pkg/shim/platform.go create mode 100644 pkg/shim/service.go (limited to 'pkg/shim') diff --git a/pkg/shim/platform.go b/pkg/shim/platform.go new file mode 100644 index 000000000..10c54958f --- /dev/null +++ b/pkg/shim/platform.go @@ -0,0 +1,112 @@ +/* +Copyright The containerd Authors. +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package shim + +import ( + "context" + "io" + "sync" + "syscall" + + "github.com/containerd/console" + "github.com/containerd/fifo" + "github.com/pkg/errors" +) + +type linuxPlatform struct { + epoller *console.Epoller +} + +func (p *linuxPlatform) CopyConsole(ctx context.Context, console console.Console, stdin, stdout, stderr string, wg, cwg *sync.WaitGroup) (console.Console, error) { + if p.epoller == nil { + return nil, errors.New("uninitialized epoller") + } + + epollConsole, err := p.epoller.Add(console) + if err != nil { + return nil, err + } + + if stdin != "" { + in, err := fifo.OpenFifo(ctx, stdin, syscall.O_RDONLY, 0) + if err != nil { + return nil, err + } + cwg.Add(1) + go func() { + cwg.Done() + p := bufPool.Get().(*[]byte) + defer bufPool.Put(p) + io.CopyBuffer(epollConsole, in, *p) + }() + } + + outw, err := fifo.OpenFifo(ctx, stdout, syscall.O_WRONLY, 0) + if err != nil { + return nil, err + } + outr, err := fifo.OpenFifo(ctx, stdout, syscall.O_RDONLY, 0) + if err != nil { + return nil, err + } + wg.Add(1) + cwg.Add(1) + go func() { + cwg.Done() + p := bufPool.Get().(*[]byte) + defer bufPool.Put(p) + io.CopyBuffer(outw, epollConsole, *p) + epollConsole.Close() + outr.Close() + outw.Close() + wg.Done() + }() + return epollConsole, nil +} + +func (p *linuxPlatform) ShutdownConsole(ctx context.Context, cons console.Console) error { + if p.epoller == nil { + return errors.New("uninitialized epoller") + } + epollConsole, ok := cons.(*console.EpollConsole) + if !ok { + return errors.Errorf("expected EpollConsole, got %#v", cons) + } + return epollConsole.Shutdown(p.epoller.CloseConsole) +} + +func (p *linuxPlatform) Close() error { + return p.epoller.Close() +} + +// initialize a single epoll fd to manage our consoles. `initPlatform` should +// only be called once. +func (s *Service) initPlatform() error { + if s.platform != nil { + return nil + } + epoller, err := console.NewEpoller() + if err != nil { + return errors.Wrap(err, "failed to initialize epoller") + } + s.platform = &linuxPlatform{ + epoller: epoller, + } + go epoller.Wait() + return nil +} diff --git a/pkg/shim/service.go b/pkg/shim/service.go new file mode 100644 index 000000000..3516d6da2 --- /dev/null +++ b/pkg/shim/service.go @@ -0,0 +1,595 @@ +/* +Copyright The containerd Authors. +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package shim + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sync" + + "github.com/containerd/console" + eventstypes "github.com/containerd/containerd/api/events" + "github.com/containerd/containerd/api/types/task" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/events" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/mount" + "github.com/containerd/containerd/namespaces" + "github.com/containerd/containerd/runtime" + "github.com/containerd/containerd/runtime/linux/runctypes" + rproc "github.com/containerd/containerd/runtime/proc" + shimapi "github.com/containerd/containerd/runtime/v1/shim/v1" + "github.com/containerd/cri/pkg/annotations" + "github.com/containerd/typeurl" + ptypes "github.com/gogo/protobuf/types" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + runsc "github.com/google/gvisor-containerd-shim/pkg/go-runsc" + "github.com/google/gvisor-containerd-shim/pkg/proc" +) + +var ( + empty = &ptypes.Empty{} + bufPool = sync.Pool{ + New: func() interface{} { + buffer := make([]byte, 32<<10) + return &buffer + }, + } +) + +// Config contains shim specific configuration +type Config struct { + Path string + Namespace string + WorkDir string + RuntimeRoot string + RunscConfig map[string]string +} + +// NewService returns a new shim service that can be used via GRPC +func NewService(config Config, publisher events.Publisher) (*Service, error) { + if config.Namespace == "" { + return nil, fmt.Errorf("shim namespace cannot be empty") + } + ctx := namespaces.WithNamespace(context.Background(), config.Namespace) + ctx = log.WithLogger(ctx, logrus.WithFields(logrus.Fields{ + "namespace": config.Namespace, + "path": config.Path, + "pid": os.Getpid(), + })) + s := &Service{ + config: config, + context: ctx, + processes: make(map[string]rproc.Process), + events: make(chan interface{}, 128), + ec: proc.ExitCh, + } + go s.processExits() + if err := s.initPlatform(); err != nil { + return nil, errors.Wrap(err, "failed to initialized platform behavior") + } + go s.forward(publisher) + return s, nil +} + +// Service is the shim implementation of a remote shim over GRPC +type Service struct { + mu sync.Mutex + + config Config + context context.Context + processes map[string]rproc.Process + events chan interface{} + platform rproc.Platform + ec chan proc.Exit + + // Filled by Create() + id string + bundle string +} + +// Create a new initial process and container with the underlying OCI runtime +func (s *Service) Create(ctx context.Context, r *shimapi.CreateTaskRequest) (_ *shimapi.CreateTaskResponse, err error) { + s.mu.Lock() + defer s.mu.Unlock() + + var mounts []proc.Mount + for _, m := range r.Rootfs { + mounts = append(mounts, proc.Mount{ + Type: m.Type, + Source: m.Source, + Target: m.Target, + Options: m.Options, + }) + } + + config := &proc.CreateConfig{ + ID: r.ID, + Bundle: r.Bundle, + Runtime: r.Runtime, + Rootfs: mounts, + Terminal: r.Terminal, + Stdin: r.Stdin, + Stdout: r.Stdout, + Stderr: r.Stderr, + Options: r.Options, + } + rootfs := filepath.Join(r.Bundle, "rootfs") + defer func() { + if err != nil { + if err2 := mount.UnmountAll(rootfs, 0); err2 != nil { + log.G(ctx).WithError(err2).Warn("Failed to cleanup rootfs mount") + } + } + }() + for _, rm := range mounts { + m := &mount.Mount{ + Type: rm.Type, + Source: rm.Source, + Options: rm.Options, + } + if err := m.Mount(rootfs); err != nil { + return nil, errors.Wrapf(err, "failed to mount rootfs component %v", m) + } + } + process, err := newInit( + ctx, + s.config.Path, + s.config.WorkDir, + s.config.RuntimeRoot, + s.config.Namespace, + s.config.RunscConfig, + s.platform, + config, + ) + if err := process.Create(ctx, config); err != nil { + return nil, errdefs.ToGRPC(err) + } + // save the main task id and bundle to the shim for additional requests + s.id = r.ID + s.bundle = r.Bundle + pid := process.Pid() + s.processes[r.ID] = process + return &shimapi.CreateTaskResponse{ + Pid: uint32(pid), + }, nil +} + +// Start a process +func (s *Service) Start(ctx context.Context, r *shimapi.StartRequest) (*shimapi.StartResponse, error) { + p, err := s.getExecProcess(r.ID) + if err != nil { + return nil, err + } + if err := p.Start(ctx); err != nil { + return nil, err + } + return &shimapi.StartResponse{ + ID: p.ID(), + Pid: uint32(p.Pid()), + }, nil +} + +// Delete the initial process and container +func (s *Service) Delete(ctx context.Context, r *ptypes.Empty) (*shimapi.DeleteResponse, error) { + p, err := s.getInitProcess() + if err != nil { + return nil, err + } + if err := p.Delete(ctx); err != nil { + return nil, err + } + s.mu.Lock() + delete(s.processes, s.id) + s.mu.Unlock() + s.platform.Close() + return &shimapi.DeleteResponse{ + ExitStatus: uint32(p.ExitStatus()), + ExitedAt: p.ExitedAt(), + Pid: uint32(p.Pid()), + }, nil +} + +// DeleteProcess deletes an exec'd process +func (s *Service) DeleteProcess(ctx context.Context, r *shimapi.DeleteProcessRequest) (*shimapi.DeleteResponse, error) { + if r.ID == s.id { + return nil, status.Errorf(codes.InvalidArgument, "cannot delete init process with DeleteProcess") + } + p, err := s.getExecProcess(r.ID) + if err != nil { + return nil, err + } + if err := p.Delete(ctx); err != nil { + return nil, err + } + s.mu.Lock() + delete(s.processes, r.ID) + s.mu.Unlock() + return &shimapi.DeleteResponse{ + ExitStatus: uint32(p.ExitStatus()), + ExitedAt: p.ExitedAt(), + Pid: uint32(p.Pid()), + }, nil +} + +// Exec an additional process inside the container +func (s *Service) Exec(ctx context.Context, r *shimapi.ExecProcessRequest) (*ptypes.Empty, error) { + s.mu.Lock() + + if p := s.processes[r.ID]; p != nil { + s.mu.Unlock() + return nil, errdefs.ToGRPCf(errdefs.ErrAlreadyExists, "id %s", r.ID) + } + + p := s.processes[s.id] + s.mu.Unlock() + if p == nil { + return nil, errdefs.ToGRPCf(errdefs.ErrFailedPrecondition, "container must be created") + } + + process, err := p.(*proc.Init).Exec(ctx, s.config.Path, &proc.ExecConfig{ + ID: r.ID, + Terminal: r.Terminal, + Stdin: r.Stdin, + Stdout: r.Stdout, + Stderr: r.Stderr, + Spec: r.Spec, + }) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + s.mu.Lock() + s.processes[r.ID] = process + s.mu.Unlock() + return empty, nil +} + +// ResizePty of a process +func (s *Service) ResizePty(ctx context.Context, r *shimapi.ResizePtyRequest) (*ptypes.Empty, error) { + if r.ID == "" { + return nil, errdefs.ToGRPCf(errdefs.ErrInvalidArgument, "id not provided") + } + ws := console.WinSize{ + Width: uint16(r.Width), + Height: uint16(r.Height), + } + p, err := s.getExecProcess(r.ID) + if err != nil { + return nil, err + } + if err := p.Resize(ws); err != nil { + return nil, errdefs.ToGRPC(err) + } + return empty, nil +} + +// State returns runtime state information for a process +func (s *Service) State(ctx context.Context, r *shimapi.StateRequest) (*shimapi.StateResponse, error) { + p, err := s.getExecProcess(r.ID) + if err != nil { + return nil, err + } + st, err := p.Status(ctx) + if err != nil { + return nil, err + } + status := task.StatusUnknown + switch st { + case "created": + status = task.StatusCreated + case "running": + status = task.StatusRunning + case "stopped": + status = task.StatusStopped + } + sio := p.Stdio() + return &shimapi.StateResponse{ + ID: p.ID(), + Bundle: s.bundle, + Pid: uint32(p.Pid()), + Status: status, + Stdin: sio.Stdin, + Stdout: sio.Stdout, + Stderr: sio.Stderr, + Terminal: sio.Terminal, + ExitStatus: uint32(p.ExitStatus()), + ExitedAt: p.ExitedAt(), + }, nil +} + +// Pause the container +func (s *Service) Pause(ctx context.Context, r *ptypes.Empty) (*ptypes.Empty, error) { + return empty, errdefs.ToGRPC(errdefs.ErrNotImplemented) +} + +// Resume the container +func (s *Service) Resume(ctx context.Context, r *ptypes.Empty) (*ptypes.Empty, error) { + return empty, errdefs.ToGRPC(errdefs.ErrNotImplemented) +} + +// Kill a process with the provided signal +func (s *Service) Kill(ctx context.Context, r *shimapi.KillRequest) (*ptypes.Empty, error) { + if r.ID == "" { + p, err := s.getInitProcess() + if err != nil { + return nil, err + } + if err := p.Kill(ctx, r.Signal, r.All); err != nil { + return nil, errdefs.ToGRPC(err) + } + return empty, nil + } + + p, err := s.getExecProcess(r.ID) + if err != nil { + return nil, err + } + if err := p.Kill(ctx, r.Signal, r.All); err != nil { + return nil, errdefs.ToGRPC(err) + } + return empty, nil +} + +// ListPids returns all pids inside the container +func (s *Service) ListPids(ctx context.Context, r *shimapi.ListPidsRequest) (*shimapi.ListPidsResponse, error) { + pids, err := s.getContainerPids(ctx, r.ID) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + var processes []*task.ProcessInfo + for _, pid := range pids { + pInfo := task.ProcessInfo{ + Pid: pid, + } + for _, p := range s.processes { + if p.Pid() == int(pid) { + d := &runctypes.ProcessDetails{ + ExecID: p.ID(), + } + a, err := typeurl.MarshalAny(d) + if err != nil { + return nil, errors.Wrapf(err, "failed to marshal process %d info", pid) + } + pInfo.Info = a + break + } + } + processes = append(processes, &pInfo) + } + return &shimapi.ListPidsResponse{ + Processes: processes, + }, nil +} + +// CloseIO of a process +func (s *Service) CloseIO(ctx context.Context, r *shimapi.CloseIORequest) (*ptypes.Empty, error) { + p, err := s.getExecProcess(r.ID) + if err != nil { + return nil, err + } + if stdin := p.Stdin(); stdin != nil { + if err := stdin.Close(); err != nil { + return nil, errors.Wrap(err, "close stdin") + } + } + return empty, nil +} + +// Checkpoint the container +func (s *Service) Checkpoint(ctx context.Context, r *shimapi.CheckpointTaskRequest) (*ptypes.Empty, error) { + return empty, errdefs.ToGRPC(errdefs.ErrNotImplemented) +} + +// ShimInfo returns shim information such as the shim's pid +func (s *Service) ShimInfo(ctx context.Context, r *ptypes.Empty) (*shimapi.ShimInfoResponse, error) { + return &shimapi.ShimInfoResponse{ + ShimPid: uint32(os.Getpid()), + }, nil +} + +// Update a running container +func (s *Service) Update(ctx context.Context, r *shimapi.UpdateTaskRequest) (*ptypes.Empty, error) { + return empty, errdefs.ToGRPC(errdefs.ErrNotImplemented) +} + +// Wait for a process to exit +func (s *Service) Wait(ctx context.Context, r *shimapi.WaitRequest) (*shimapi.WaitResponse, error) { + p, err := s.getExecProcess(r.ID) + if err != nil { + return nil, err + } + p.Wait() + + return &shimapi.WaitResponse{ + ExitStatus: uint32(p.ExitStatus()), + ExitedAt: p.ExitedAt(), + }, nil +} + +func (s *Service) processExits() { + for e := range s.ec { + s.checkProcesses(e) + } +} + +func (s *Service) allProcesses() []rproc.Process { + s.mu.Lock() + defer s.mu.Unlock() + + res := make([]rproc.Process, 0, len(s.processes)) + for _, p := range s.processes { + res = append(res, p) + } + return res +} + +func (s *Service) checkProcesses(e proc.Exit) { + for _, p := range s.allProcesses() { + if p.ID() == e.ID { + if ip, ok := p.(*proc.Init); ok { + // Ensure all children are killed + if err := ip.KillAll(s.context); err != nil { + log.G(s.context).WithError(err).WithField("id", ip.ID()). + Error("failed to kill init's children") + } + } + p.SetExited(e.Status) + s.events <- &eventstypes.TaskExit{ + ContainerID: s.id, + ID: p.ID(), + Pid: uint32(p.Pid()), + ExitStatus: uint32(e.Status), + ExitedAt: p.ExitedAt(), + } + return + } + } +} + +func (s *Service) getContainerPids(ctx context.Context, id string) ([]uint32, error) { + p, err := s.getInitProcess() + if err != nil { + return nil, err + } + + ps, err := p.(*proc.Init).Runtime().Ps(ctx, id) + if err != nil { + return nil, err + } + pids := make([]uint32, 0, len(ps)) + for _, pid := range ps { + pids = append(pids, uint32(pid)) + } + return pids, nil +} + +func (s *Service) forward(publisher events.Publisher) { + for e := range s.events { + if err := publisher.Publish(s.context, getTopic(s.context, e), e); err != nil { + log.G(s.context).WithError(err).Error("post event") + } + } +} + +// getInitProcess returns initial process +func (s *Service) getInitProcess() (rproc.Process, error) { + s.mu.Lock() + defer s.mu.Unlock() + p := s.processes[s.id] + if p == nil { + return nil, errdefs.ToGRPCf(errdefs.ErrFailedPrecondition, "container must be created") + } + return p, nil +} + +// getExecProcess returns exec process +func (s *Service) getExecProcess(id string) (rproc.Process, error) { + s.mu.Lock() + defer s.mu.Unlock() + p := s.processes[id] + if p == nil { + return nil, errdefs.ToGRPCf(errdefs.ErrNotFound, "process %s does not exist", id) + } + return p, nil +} + +func getTopic(ctx context.Context, e interface{}) string { + switch e.(type) { + case *eventstypes.TaskCreate: + return runtime.TaskCreateEventTopic + case *eventstypes.TaskStart: + return runtime.TaskStartEventTopic + case *eventstypes.TaskOOM: + return runtime.TaskOOMEventTopic + case *eventstypes.TaskExit: + return runtime.TaskExitEventTopic + case *eventstypes.TaskDelete: + return runtime.TaskDeleteEventTopic + case *eventstypes.TaskExecAdded: + return runtime.TaskExecAddedEventTopic + case *eventstypes.TaskExecStarted: + return runtime.TaskExecStartedEventTopic + default: + logrus.Warnf("no topic for type %#v", e) + } + return runtime.TaskUnknownTopic +} + +func newInit(ctx context.Context, path, workDir, runtimeRoot, namespace string, config map[string]string, platform rproc.Platform, r *proc.CreateConfig) (*proc.Init, error) { + var options runctypes.CreateOptions + if r.Options != nil { + v, err := typeurl.UnmarshalAny(r.Options) + if err != nil { + return nil, err + } + options = *v.(*runctypes.CreateOptions) + } + + spec, err := readSpec(r.Bundle) + if err != nil { + return nil, errors.Wrap(err, "read oci spec") + } + userLog := runsc.FormatLogPath(r.ID, config) + rootfs := filepath.Join(path, "rootfs") + runtime := proc.NewRunsc(runtimeRoot, path, namespace, r.Runtime, config) + p := proc.New(r.ID, runtime, rproc.Stdio{ + Stdin: r.Stdin, + Stdout: r.Stdout, + Stderr: r.Stderr, + Terminal: r.Terminal, + }) + p.Bundle = r.Bundle + p.Platform = platform + p.Rootfs = rootfs + p.WorkDir = workDir + p.IoUID = int(options.IoUid) + p.IoGID = int(options.IoGid) + p.Sandbox = isSandbox(spec) + p.UserLog = userLog + return p, nil +} + +func readSpec(bundle string) (*specs.Spec, error) { + f, err := os.Open(filepath.Join(bundle, "config.json")) + if err != nil { + return nil, err + } + b, err := ioutil.ReadAll(f) + if err != nil { + return nil, err + } + var spec specs.Spec + if err := json.Unmarshal(b, &spec); err != nil { + return nil, err + } + return &spec, nil +} + +func isSandbox(spec *specs.Spec) bool { + t, ok := spec.Annotations[annotations.ContainerType] + return !ok || t == annotations.ContainerTypeSandbox +} -- cgit v1.2.3 From 35db607dfccd3c03b69f1a42c58cf58ec8259e37 Mon Sep 17 00:00:00 2001 From: Lantao Liu Date: Tue, 29 Jan 2019 18:51:18 -0800 Subject: Add containerd shim v2 support. (#13) * Update vendors Signed-off-by: Lantao Liu * Add containerd shim v2 support. Signed-off-by: Lantao Liu * Add test and doc for containerd-shim-runsc-v1. Signed-off-by: Lantao Liu * Address comments. --- .travis.yml | 1 + Makefile | 6 + README.md | 1 + cmd/containerd-shim-runsc-v1/main.go | 24 + cmd/gvisor-containerd-shim/main.go | 2 +- docs/README.md | 1 + docs/runtime-handler-quickstart.md | 20 +- docs/runtime-handler-shim-v2-quickstart.md | 187 + docs/untrusted-workload-quickstart.md | 20 +- pkg/proc/deleted_state.go | 53 - pkg/proc/exec.go | 254 - pkg/proc/exec_state.go | 187 - pkg/proc/init.go | 416 -- pkg/proc/init_state.go | 224 - pkg/proc/io.go | 146 - pkg/proc/process.go | 39 - pkg/proc/types.go | 62 - pkg/proc/utils.go | 94 - pkg/shim/platform.go | 112 - pkg/shim/service.go | 595 -- pkg/v1/proc/deleted_state.go | 53 + pkg/v1/proc/exec.go | 249 + pkg/v1/proc/exec_state.go | 187 + pkg/v1/proc/init.go | 413 ++ pkg/v1/proc/init_state.go | 224 + pkg/v1/proc/io.go | 146 + pkg/v1/proc/process.go | 39 + pkg/v1/proc/types.go | 72 + pkg/v1/proc/utils.go | 94 + pkg/v1/shim/platform.go | 112 + pkg/v1/shim/service.go | 573 ++ pkg/v1/utils/utils.go | 50 + pkg/v2/options/options.go | 34 + pkg/v2/service.go | 718 +++ pkg/v2/service_linux.go | 111 + test/e2e/runtime-handler-shim-v2/install.sh | 21 + test/e2e/runtime-handler-shim-v2/test.sh | 34 + test/e2e/runtime-handler-shim-v2/validate.sh | 7 + test/e2e/runtime-handler/usage.sh | 4 +- test/e2e/shim-install.sh | 16 +- vendor.conf | 8 +- vendor/github.com/BurntSushi/toml/COPYING | 27 +- vendor/github.com/BurntSushi/toml/lex.go | 2 +- vendor/github.com/containerd/cgroups/LICENSE | 201 + vendor/github.com/containerd/cgroups/README.md | 112 + vendor/github.com/containerd/cgroups/blkio.go | 326 ++ vendor/github.com/containerd/cgroups/cgroup.go | 445 ++ vendor/github.com/containerd/cgroups/control.go | 77 + vendor/github.com/containerd/cgroups/cpu.go | 129 + vendor/github.com/containerd/cgroups/cpuacct.go | 121 + vendor/github.com/containerd/cgroups/cpuset.go | 159 + vendor/github.com/containerd/cgroups/devices.go | 90 + vendor/github.com/containerd/cgroups/errors.go | 47 + vendor/github.com/containerd/cgroups/freezer.go | 82 + vendor/github.com/containerd/cgroups/hierarchy.go | 20 + vendor/github.com/containerd/cgroups/hugetlb.go | 109 + vendor/github.com/containerd/cgroups/memory.go | 325 ++ vendor/github.com/containerd/cgroups/metrics.pb.go | 4288 +++++++++++++++ vendor/github.com/containerd/cgroups/metrics.proto | 123 + vendor/github.com/containerd/cgroups/named.go | 39 + vendor/github.com/containerd/cgroups/net_cls.go | 58 + vendor/github.com/containerd/cgroups/net_prio.go | 66 + vendor/github.com/containerd/cgroups/paths.go | 104 + vendor/github.com/containerd/cgroups/perf_event.go | 37 + vendor/github.com/containerd/cgroups/pids.go | 85 + vendor/github.com/containerd/cgroups/rdma.go | 153 + vendor/github.com/containerd/cgroups/state.go | 28 + vendor/github.com/containerd/cgroups/subsystem.go | 112 + vendor/github.com/containerd/cgroups/systemd.go | 131 + vendor/github.com/containerd/cgroups/ticks.go | 26 + vendor/github.com/containerd/cgroups/utils.go | 297 + vendor/github.com/containerd/cgroups/v1.go | 81 + .../containerd/containerd/runtime/v2/README.md | 173 + .../containerd/runtime/v2/shim/reaper_unix.go | 110 + .../containerd/containerd/runtime/v2/shim/shim.go | 269 + .../containerd/runtime/v2/shim/shim_darwin.go | 29 + .../containerd/runtime/v2/shim/shim_linux.go | 30 + .../containerd/runtime/v2/shim/shim_unix.go | 117 + .../containerd/runtime/v2/shim/shim_windows.go | 307 ++ .../containerd/containerd/runtime/v2/shim/util.go | 128 + .../containerd/runtime/v2/shim/util_unix.go | 70 + .../containerd/runtime/v2/shim/util_windows.go | 90 + .../containerd/containerd/runtime/v2/task/doc.go | 17 + .../containerd/runtime/v2/task/shim.pb.go | 5693 ++++++++++++++++++++ .../containerd/runtime/v2/task/shim.proto | 184 + vendor/github.com/containerd/cri/README.md | 6 +- .../cri/pkg/api/runtimeoptions/v1/api.pb.go | 394 ++ .../cri/pkg/api/runtimeoptions/v1/api.proto | 22 + vendor/github.com/containerd/cri/vendor.conf | 30 +- vendor/github.com/coreos/go-systemd/LICENSE | 191 + vendor/github.com/coreos/go-systemd/README.md | 54 + vendor/github.com/coreos/go-systemd/dbus/dbus.go | 213 + .../github.com/coreos/go-systemd/dbus/methods.go | 565 ++ .../coreos/go-systemd/dbus/properties.go | 237 + vendor/github.com/coreos/go-systemd/dbus/set.go | 47 + .../coreos/go-systemd/dbus/subscription.go | 250 + .../coreos/go-systemd/dbus/subscription_set.go | 57 + vendor/github.com/docker/go-units/LICENSE | 191 + vendor/github.com/docker/go-units/README.md | 16 + vendor/github.com/docker/go-units/duration.go | 33 + vendor/github.com/docker/go-units/size.go | 96 + vendor/github.com/docker/go-units/ulimit.go | 118 + vendor/github.com/godbus/dbus/LICENSE | 25 + vendor/github.com/godbus/dbus/README.markdown | 41 + vendor/github.com/godbus/dbus/auth.go | 253 + vendor/github.com/godbus/dbus/auth_external.go | 26 + vendor/github.com/godbus/dbus/auth_sha1.go | 102 + vendor/github.com/godbus/dbus/call.go | 36 + vendor/github.com/godbus/dbus/conn.go | 625 +++ vendor/github.com/godbus/dbus/conn_darwin.go | 21 + vendor/github.com/godbus/dbus/conn_other.go | 27 + vendor/github.com/godbus/dbus/dbus.go | 258 + vendor/github.com/godbus/dbus/decoder.go | 228 + vendor/github.com/godbus/dbus/doc.go | 63 + vendor/github.com/godbus/dbus/encoder.go | 208 + vendor/github.com/godbus/dbus/export.go | 411 ++ vendor/github.com/godbus/dbus/homedir.go | 28 + vendor/github.com/godbus/dbus/homedir_dynamic.go | 15 + vendor/github.com/godbus/dbus/homedir_static.go | 45 + vendor/github.com/godbus/dbus/message.go | 346 ++ vendor/github.com/godbus/dbus/object.go | 126 + vendor/github.com/godbus/dbus/sig.go | 257 + vendor/github.com/godbus/dbus/transport_darwin.go | 6 + vendor/github.com/godbus/dbus/transport_generic.go | 35 + vendor/github.com/godbus/dbus/transport_unix.go | 196 + .../godbus/dbus/transport_unixcred_dragonfly.go | 95 + .../godbus/dbus/transport_unixcred_linux.go | 25 + vendor/github.com/godbus/dbus/variant.go | 139 + vendor/github.com/godbus/dbus/variant_lexer.go | 284 + vendor/github.com/godbus/dbus/variant_parser.go | 817 +++ vendor/github.com/gogo/protobuf/gogoproto/doc.go | 169 + .../github.com/gogo/protobuf/gogoproto/gogo.pb.go | 804 +++ .../github.com/gogo/protobuf/gogoproto/gogo.proto | 133 + .../github.com/gogo/protobuf/gogoproto/helper.go | 357 ++ .../protoc-gen-gogo/descriptor/descriptor.go | 118 + .../protoc-gen-gogo/descriptor/descriptor.pb.go | 2280 ++++++++ .../descriptor/descriptor_gostring.gen.go | 772 +++ .../protobuf/protoc-gen-gogo/descriptor/helper.go | 390 ++ 138 files changed, 30748 insertions(+), 2259 deletions(-) create mode 100644 cmd/containerd-shim-runsc-v1/main.go create mode 100644 docs/runtime-handler-shim-v2-quickstart.md delete mode 100644 pkg/proc/deleted_state.go delete mode 100644 pkg/proc/exec.go delete mode 100644 pkg/proc/exec_state.go delete mode 100644 pkg/proc/init.go delete mode 100644 pkg/proc/init_state.go delete mode 100644 pkg/proc/io.go delete mode 100644 pkg/proc/process.go delete mode 100644 pkg/proc/types.go delete mode 100644 pkg/proc/utils.go delete mode 100644 pkg/shim/platform.go delete mode 100644 pkg/shim/service.go create mode 100644 pkg/v1/proc/deleted_state.go create mode 100644 pkg/v1/proc/exec.go create mode 100644 pkg/v1/proc/exec_state.go create mode 100644 pkg/v1/proc/init.go create mode 100644 pkg/v1/proc/init_state.go create mode 100644 pkg/v1/proc/io.go create mode 100644 pkg/v1/proc/process.go create mode 100644 pkg/v1/proc/types.go create mode 100644 pkg/v1/proc/utils.go create mode 100644 pkg/v1/shim/platform.go create mode 100644 pkg/v1/shim/service.go create mode 100644 pkg/v1/utils/utils.go create mode 100644 pkg/v2/options/options.go create mode 100644 pkg/v2/service.go create mode 100644 pkg/v2/service_linux.go create mode 100755 test/e2e/runtime-handler-shim-v2/install.sh create mode 100755 test/e2e/runtime-handler-shim-v2/test.sh create mode 100755 test/e2e/runtime-handler-shim-v2/validate.sh create mode 100644 vendor/github.com/containerd/cgroups/LICENSE create mode 100644 vendor/github.com/containerd/cgroups/README.md create mode 100644 vendor/github.com/containerd/cgroups/blkio.go create mode 100644 vendor/github.com/containerd/cgroups/cgroup.go create mode 100644 vendor/github.com/containerd/cgroups/control.go create mode 100644 vendor/github.com/containerd/cgroups/cpu.go create mode 100644 vendor/github.com/containerd/cgroups/cpuacct.go create mode 100644 vendor/github.com/containerd/cgroups/cpuset.go create mode 100644 vendor/github.com/containerd/cgroups/devices.go create mode 100644 vendor/github.com/containerd/cgroups/errors.go create mode 100644 vendor/github.com/containerd/cgroups/freezer.go create mode 100644 vendor/github.com/containerd/cgroups/hierarchy.go create mode 100644 vendor/github.com/containerd/cgroups/hugetlb.go create mode 100644 vendor/github.com/containerd/cgroups/memory.go create mode 100644 vendor/github.com/containerd/cgroups/metrics.pb.go create mode 100644 vendor/github.com/containerd/cgroups/metrics.proto create mode 100644 vendor/github.com/containerd/cgroups/named.go create mode 100644 vendor/github.com/containerd/cgroups/net_cls.go create mode 100644 vendor/github.com/containerd/cgroups/net_prio.go create mode 100644 vendor/github.com/containerd/cgroups/paths.go create mode 100644 vendor/github.com/containerd/cgroups/perf_event.go create mode 100644 vendor/github.com/containerd/cgroups/pids.go create mode 100644 vendor/github.com/containerd/cgroups/rdma.go create mode 100644 vendor/github.com/containerd/cgroups/state.go create mode 100644 vendor/github.com/containerd/cgroups/subsystem.go create mode 100644 vendor/github.com/containerd/cgroups/systemd.go create mode 100644 vendor/github.com/containerd/cgroups/ticks.go create mode 100644 vendor/github.com/containerd/cgroups/utils.go create mode 100644 vendor/github.com/containerd/cgroups/v1.go create mode 100644 vendor/github.com/containerd/containerd/runtime/v2/README.md create mode 100644 vendor/github.com/containerd/containerd/runtime/v2/shim/reaper_unix.go create mode 100644 vendor/github.com/containerd/containerd/runtime/v2/shim/shim.go create mode 100644 vendor/github.com/containerd/containerd/runtime/v2/shim/shim_darwin.go create mode 100644 vendor/github.com/containerd/containerd/runtime/v2/shim/shim_linux.go create mode 100644 vendor/github.com/containerd/containerd/runtime/v2/shim/shim_unix.go create mode 100644 vendor/github.com/containerd/containerd/runtime/v2/shim/shim_windows.go create mode 100644 vendor/github.com/containerd/containerd/runtime/v2/shim/util.go create mode 100644 vendor/github.com/containerd/containerd/runtime/v2/shim/util_unix.go create mode 100644 vendor/github.com/containerd/containerd/runtime/v2/shim/util_windows.go create mode 100644 vendor/github.com/containerd/containerd/runtime/v2/task/doc.go create mode 100644 vendor/github.com/containerd/containerd/runtime/v2/task/shim.pb.go create mode 100644 vendor/github.com/containerd/containerd/runtime/v2/task/shim.proto create mode 100644 vendor/github.com/containerd/cri/pkg/api/runtimeoptions/v1/api.pb.go create mode 100644 vendor/github.com/containerd/cri/pkg/api/runtimeoptions/v1/api.proto create mode 100644 vendor/github.com/coreos/go-systemd/LICENSE create mode 100644 vendor/github.com/coreos/go-systemd/README.md create mode 100644 vendor/github.com/coreos/go-systemd/dbus/dbus.go create mode 100644 vendor/github.com/coreos/go-systemd/dbus/methods.go create mode 100644 vendor/github.com/coreos/go-systemd/dbus/properties.go create mode 100644 vendor/github.com/coreos/go-systemd/dbus/set.go create mode 100644 vendor/github.com/coreos/go-systemd/dbus/subscription.go create mode 100644 vendor/github.com/coreos/go-systemd/dbus/subscription_set.go create mode 100644 vendor/github.com/docker/go-units/LICENSE create mode 100644 vendor/github.com/docker/go-units/README.md create mode 100644 vendor/github.com/docker/go-units/duration.go create mode 100644 vendor/github.com/docker/go-units/size.go create mode 100644 vendor/github.com/docker/go-units/ulimit.go create mode 100644 vendor/github.com/godbus/dbus/LICENSE create mode 100644 vendor/github.com/godbus/dbus/README.markdown create mode 100644 vendor/github.com/godbus/dbus/auth.go create mode 100644 vendor/github.com/godbus/dbus/auth_external.go create mode 100644 vendor/github.com/godbus/dbus/auth_sha1.go create mode 100644 vendor/github.com/godbus/dbus/call.go create mode 100644 vendor/github.com/godbus/dbus/conn.go create mode 100644 vendor/github.com/godbus/dbus/conn_darwin.go create mode 100644 vendor/github.com/godbus/dbus/conn_other.go create mode 100644 vendor/github.com/godbus/dbus/dbus.go create mode 100644 vendor/github.com/godbus/dbus/decoder.go create mode 100644 vendor/github.com/godbus/dbus/doc.go create mode 100644 vendor/github.com/godbus/dbus/encoder.go create mode 100644 vendor/github.com/godbus/dbus/export.go create mode 100644 vendor/github.com/godbus/dbus/homedir.go create mode 100644 vendor/github.com/godbus/dbus/homedir_dynamic.go create mode 100644 vendor/github.com/godbus/dbus/homedir_static.go create mode 100644 vendor/github.com/godbus/dbus/message.go create mode 100644 vendor/github.com/godbus/dbus/object.go create mode 100644 vendor/github.com/godbus/dbus/sig.go create mode 100644 vendor/github.com/godbus/dbus/transport_darwin.go create mode 100644 vendor/github.com/godbus/dbus/transport_generic.go create mode 100644 vendor/github.com/godbus/dbus/transport_unix.go create mode 100644 vendor/github.com/godbus/dbus/transport_unixcred_dragonfly.go create mode 100644 vendor/github.com/godbus/dbus/transport_unixcred_linux.go create mode 100644 vendor/github.com/godbus/dbus/variant.go create mode 100644 vendor/github.com/godbus/dbus/variant_lexer.go create mode 100644 vendor/github.com/godbus/dbus/variant_parser.go create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/doc.go create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/gogo.proto create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/helper.go create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go (limited to 'pkg/shim') diff --git a/.travis.yml b/.travis.yml index 4aaccf523..57bb27901 100644 --- a/.travis.yml +++ b/.travis.yml @@ -7,6 +7,7 @@ env: - CONTAINERD_VERSION=1.1.5 RUNSC_VERSION=2018-12-07 TEST=untrusted-workload - CONTAINERD_VERSION=1.2.1 RUNSC_VERSION=2018-12-07 TEST=untrusted-workload - CONTAINERD_VERSION=1.2.1 RUNSC_VERSION=2018-12-07 TEST=runtime-handler + - CONTAINERD_VERSION=1.2.1 RUNSC_VERSION=2018-12-07 TEST=runtime-handler-shim-v2 go_import_path: github.com/google/gvisor-containerd-shim diff --git a/Makefile b/Makefile index 89e705ddf..0404fc0ee 100644 --- a/Makefile +++ b/Makefile @@ -7,16 +7,22 @@ SOURCES=$(shell find cmd/ pkg/ vendor/ -name '*.go') DEPLOY_PATH=cri-containerd-staging/gvisor-containerd-shim VERSION=$(shell git rev-parse HEAD) +all: bin/gvisor-containerd-shim bin/containerd-shim-runsc-v1 + bin/gvisor-containerd-shim: $(SOURCES) CGO_ENABLED=0 go build ${GO_BUILD_FLAGS} -o bin/gvisor-containerd-shim ${SHIM_GO_LDFLAGS} ${GO_TAGS} ./cmd/gvisor-containerd-shim +bin/containerd-shim-runsc-v1: $(SOURCES) + CGO_ENABLED=0 go build ${GO_BUILD_FLAGS} -o bin/containerd-shim-runsc-v1 ${SHIM_GO_LDFLAGS} ${GO_TAGS} ./cmd/containerd-shim-runsc-v1 install: bin/gvisor-containerd-shim mkdir -p $(DESTDIR)/bin install bin/gvisor-containerd-shim $(DESTDIR)/bin + install bin/containerd-shim-runsc-v1 $(DESTDIR)/bin uninstall: rm -f $(DESTDIR)/bin/gvisor-containerd-shim + rm -f $(DESTDIR)/bin/containerd-shim-runsc-v1 clean: rm -rf bin/* diff --git a/README.md b/README.md index cabd715d7..26efdbc2a 100644 --- a/README.md +++ b/README.md @@ -14,6 +14,7 @@ gvisor-containerd-shim is a containerd shim for [gVisor](https://github.com/goog - [Untrusted Workload Quick Start (containerd >=1.1)](docs/untrusted-workload-quickstart.md) - [Runtime Handler Quick Start (containerd >=1.2)](docs/runtime-handler-quickstart.md) +- [Runtime Handler Quick Start (shim v2) (containerd >=1.2)](docs/runtime-handler-shim-v2-quickstart.md) # Contributing diff --git a/cmd/containerd-shim-runsc-v1/main.go b/cmd/containerd-shim-runsc-v1/main.go new file mode 100644 index 000000000..ec73edbd2 --- /dev/null +++ b/cmd/containerd-shim-runsc-v1/main.go @@ -0,0 +1,24 @@ +/* + Copyright The containerd Authors. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package main + +import ( + "github.com/containerd/containerd/runtime/v2/shim" + + runsc "github.com/google/gvisor-containerd-shim/pkg/v2" +) + +func main() { + shim.Run("io.containerd.runsc.v1", runsc.New) +} diff --git a/cmd/gvisor-containerd-shim/main.go b/cmd/gvisor-containerd-shim/main.go index cdea2d7b1..ea26aa105 100644 --- a/cmd/gvisor-containerd-shim/main.go +++ b/cmd/gvisor-containerd-shim/main.go @@ -48,7 +48,7 @@ import ( "golang.org/x/sys/unix" runsc "github.com/google/gvisor-containerd-shim/pkg/go-runsc" - "github.com/google/gvisor-containerd-shim/pkg/shim" + "github.com/google/gvisor-containerd-shim/pkg/v1/shim" ) var ( diff --git a/docs/README.md b/docs/README.md index a0915e32c..eca857423 100644 --- a/docs/README.md +++ b/docs/README.md @@ -4,3 +4,4 @@ Everything you need to know about gvisor-containerd-shim - [Untrusted Workload Quick Start (containerd >=1.1)](untrusted-workload-quickstart.md) - [Runtime Handler Quick Start (containerd >=1.2)](runtime-handler-quickstart.md) +- [Runtime Handler Quick Start (shim v2) (containerd >=1.2)](runtime-handler-shim-v2-quickstart.md) diff --git a/docs/runtime-handler-quickstart.md b/docs/runtime-handler-quickstart.md index d97b99034..d8fea65fd 100644 --- a/docs/runtime-handler-quickstart.md +++ b/docs/runtime-handler-quickstart.md @@ -16,30 +16,22 @@ later. 1. Download the latest release of the `gvisor-containerd-shim`. See the [releases page](https://github.com/google/gvisor-containerd-shim/releases) -[embedmd]:# (../test/e2e/shim-install.sh shell /{ # Step 1/ /^}/) +[embedmd]:# (../test/e2e/shim-install.sh shell /{ # Step 1\(release\)/ /^}/) ```shell -{ # Step 1: Download gvisor-containerd-shim +{ # Step 1(release): Install gvisor-containerd-shim LATEST_RELEASE=$(wget -qO - https://api.github.com/repos/google/gvisor-containerd-shim/releases | grep -oP '(?<="browser_download_url": ")https://[^"]*' | head -1) wget -O gvisor-containerd-shim chmod +x gvisor-containerd-shim +sudo mv gvisor-containerd-shim /usr/local/bin/gvisor-containerd-shim } ``` -2. Copy the binary to the desired directory: - -[embedmd]:# (../test/e2e/shim-install.sh shell /{ # Step 2/ /^}/) -```shell -{ # Step 2: Copy the binary to the desired directory -sudo mv gvisor-containerd-shim-* /usr/local/bin/gvisor-containerd-shim -} -``` - -3. Create the configuration for the gvisor shim in +2. Create the configuration for the gvisor shim in `/etc/containerd/gvisor-containerd-shim.yaml`: -[embedmd]:# (../test/e2e/shim-install.sh shell /{ # Step 3/ /^}/) +[embedmd]:# (../test/e2e/shim-install.sh shell /{ # Step 2/ /^}/) ```shell -{ # Step 3: Create the gvisor-containerd-shim.yaml +{ # Step 2: Create the gvisor-containerd-shim.yaml cat < +[embedmd]:# (../test/e2e/shim-install.sh shell /{ # Step 1\(dev\)/ /^}/) +```shell +{ # Step 1(dev): Build and install gvisor-containerd-shim and containerd-shim-runsc-v1 + make + sudo make install +} +``` + +### Configure containerd + +1. Update `/etc/containerd/config.toml`. Make sure `containerd-shim-runsc-v1` is + in `${PATH}`. + +[embedmd]:# (../test/e2e/runtime-handler-shim-v2/install.sh shell /{ # Step 1/ /^}/) +```shell +{ # Step 1: Create containerd config.toml +cat < /tmp/containerd-cri.log & +} diff --git a/test/e2e/runtime-handler-shim-v2/test.sh b/test/e2e/runtime-handler-shim-v2/test.sh new file mode 100755 index 000000000..e33655ec1 --- /dev/null +++ b/test/e2e/runtime-handler-shim-v2/test.sh @@ -0,0 +1,34 @@ +#!/bin/bash + +# Runs end-to-end tests for gvisor-containerd-shim to test the use of runtime +# handler. This should work on containerd 1.2+ + +# This is meant to be run in a VM as it makes a fairly invasive install of +# containerd. + +set -ex + +# Install containerd +. ./test/e2e/containerd-install.sh + +# Install gVisor +. ./test/e2e/runsc-install.sh + +# Install gvisor-containerd-shim +. ./test/e2e/shim-install.sh + +# Test installation/configuration +. ./test/e2e/runtime-handler-shim-v2/install.sh + +# Install crictl +. ./test/e2e/crictl-install.sh + +# Test usage (the same with runtime-handler) +. ./test/e2e/runtime-handler/usage.sh + +# Run a container in the sandbox +. ./test/e2e/run-container.sh + +# Validate the pod and container +. ./test/e2e/validate.sh +. ./test/e2e/runtime-handler-shim-v2/validate.sh diff --git a/test/e2e/runtime-handler-shim-v2/validate.sh b/test/e2e/runtime-handler-shim-v2/validate.sh new file mode 100755 index 000000000..b74a059ef --- /dev/null +++ b/test/e2e/runtime-handler-shim-v2/validate.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +# A sample script to validating the running containerd-shim-runsc-v1. + +set -ex + +ps aux | grep [c]ontainerd-shim-runsc-v1 diff --git a/test/e2e/runtime-handler/usage.sh b/test/e2e/runtime-handler/usage.sh index 1f8a09757..350c720c2 100755 --- a/test/e2e/runtime-handler/usage.sh +++ b/test/e2e/runtime-handler/usage.sh @@ -1,7 +1,7 @@ #!/bin/bash -# A sample script for testing the gvisor-containerd-shim # using untrusted -# workload extension. +# A sample script for testing the gvisor-containerd-shim +# using runtime handler. set -ex diff --git a/test/e2e/shim-install.sh b/test/e2e/shim-install.sh index 93587ea50..c0252bdd4 100755 --- a/test/e2e/shim-install.sh +++ b/test/e2e/shim-install.sh @@ -5,23 +5,21 @@ set -ex # Build gvisor-containerd-shim -if [ "${INSTALL_LATEST}" === "1" ]; then -{ # Step 1: Download gvisor-containerd-shim +if [ "${INSTALL_LATEST}" == "1" ]; then +{ # Step 1(release): Install gvisor-containerd-shim LATEST_RELEASE=$(wget -qO - https://api.github.com/repos/google/gvisor-containerd-shim/releases | grep -oP '(?<="browser_download_url": ")https://[^"]*' | head -1) wget -O gvisor-containerd-shim chmod +x gvisor-containerd-shim +sudo mv gvisor-containerd-shim /usr/local/bin/gvisor-containerd-shim } else +{ # Step 1(dev): Build and install gvisor-containerd-shim and containerd-shim-runsc-v1 make - mv bin/gvisor-containerd-shim gvisor-containerd-shim-dev -fi - -{ # Step 2: Copy the binary to the desired directory -sudo mv gvisor-containerd-shim-* /usr/local/bin/gvisor-containerd-shim + sudo make install } +fi - -{ # Step 3: Create the gvisor-containerd-shim.yaml +{ # Step 2: Create the gvisor-containerd-shim.yaml cat < -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: + Everyone is permitted to copy and distribute verbatim or modified + copies of this license document, and changing it is allowed as long + as the name is changed. -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. + DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. You just DO WHAT THE FUCK YOU WANT TO. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go index e0a742a88..6dee7fc79 100644 --- a/vendor/github.com/BurntSushi/toml/lex.go +++ b/vendor/github.com/BurntSushi/toml/lex.go @@ -775,7 +775,7 @@ func lexDatetime(lx *lexer) stateFn { return lexDatetime } switch r { - case '-', 'T', ':', '.', 'Z', '+': + case '-', 'T', ':', '.', 'Z': return lexDatetime } diff --git a/vendor/github.com/containerd/cgroups/LICENSE b/vendor/github.com/containerd/cgroups/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/containerd/cgroups/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containerd/cgroups/README.md b/vendor/github.com/containerd/cgroups/README.md new file mode 100644 index 000000000..69e932a9f --- /dev/null +++ b/vendor/github.com/containerd/cgroups/README.md @@ -0,0 +1,112 @@ +# cgroups + +[![Build Status](https://travis-ci.org/containerd/cgroups.svg?branch=master)](https://travis-ci.org/containerd/cgroups) + +[![codecov](https://codecov.io/gh/containerd/cgroups/branch/master/graph/badge.svg)](https://codecov.io/gh/containerd/cgroups) + +Go package for creating, managing, inspecting, and destroying cgroups. +The resources format for settings on the cgroup uses the OCI runtime-spec found +[here](https://github.com/opencontainers/runtime-spec). + +## Examples + +### Create a new cgroup + +This creates a new cgroup using a static path for all subsystems under `/test`. + +* /sys/fs/cgroup/cpu/test +* /sys/fs/cgroup/memory/test +* etc.... + +It uses a single hierarchy and specifies cpu shares as a resource constraint and +uses the v1 implementation of cgroups. + + +```go +shares := uint64(100) +control, err := cgroups.New(cgroups.V1, cgroups.StaticPath("/test"), &specs.LinuxResources{ + CPU: &specs.CPU{ + Shares: &shares, + }, +}) +defer control.Delete() +``` + +### Create with systemd slice support + + +```go +control, err := cgroups.New(cgroups.Systemd, cgroups.Slice("system.slice", "runc-test"), &specs.LinuxResources{ + CPU: &specs.CPU{ + Shares: &shares, + }, +}) + +``` + +### Load an existing cgroup + +```go +control, err = cgroups.Load(cgroups.V1, cgroups.StaticPath("/test")) +``` + +### Add a process to the cgroup + +```go +if err := control.Add(cgroups.Process{Pid:1234}); err != nil { +} +``` + +### Update the cgroup + +To update the resources applied in the cgroup + +```go +shares = uint64(200) +if err := control.Update(&specs.LinuxResources{ + CPU: &specs.CPU{ + Shares: &shares, + }, +}); err != nil { +} +``` + +### Freeze and Thaw the cgroup + +```go +if err := control.Freeze(); err != nil { +} +if err := control.Thaw(); err != nil { +} +``` + +### List all processes in the cgroup or recursively + +```go +processes, err := control.Processes(cgroups.Devices, recursive) +``` + +### Get Stats on the cgroup + +```go +stats, err := control.Stat() +``` + +By adding `cgroups.IgnoreNotExist` all non-existent files will be ignored, e.g. swap memory stats without swap enabled +```go +stats, err := control.Stat(cgroups.IgnoreNotExist) +``` + +### Move process across cgroups + +This allows you to take processes from one cgroup and move them to another. + +```go +err := control.MoveTo(destination) +``` + +### Create subcgroup + +```go +subCgroup, err := control.New("child", resources) +``` diff --git a/vendor/github.com/containerd/cgroups/blkio.go b/vendor/github.com/containerd/cgroups/blkio.go new file mode 100644 index 000000000..fc1e689cb --- /dev/null +++ b/vendor/github.com/containerd/cgroups/blkio.go @@ -0,0 +1,326 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cgroups + +import ( + "bufio" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +func NewBlkio(root string) *blkioController { + return &blkioController{ + root: filepath.Join(root, string(Blkio)), + } +} + +type blkioController struct { + root string +} + +func (b *blkioController) Name() Name { + return Blkio +} + +func (b *blkioController) Path(path string) string { + return filepath.Join(b.root, path) +} + +func (b *blkioController) Create(path string, resources *specs.LinuxResources) error { + if err := os.MkdirAll(b.Path(path), defaultDirPerm); err != nil { + return err + } + if resources.BlockIO == nil { + return nil + } + for _, t := range createBlkioSettings(resources.BlockIO) { + if t.value != nil { + if err := ioutil.WriteFile( + filepath.Join(b.Path(path), fmt.Sprintf("blkio.%s", t.name)), + t.format(t.value), + defaultFilePerm, + ); err != nil { + return err + } + } + } + return nil +} + +func (b *blkioController) Update(path string, resources *specs.LinuxResources) error { + return b.Create(path, resources) +} + +func (b *blkioController) Stat(path string, stats *Metrics) error { + stats.Blkio = &BlkIOStat{} + settings := []blkioStatSettings{ + { + name: "throttle.io_serviced", + entry: &stats.Blkio.IoServicedRecursive, + }, + { + name: "throttle.io_service_bytes", + entry: &stats.Blkio.IoServiceBytesRecursive, + }, + } + // Try to read CFQ stats available on all CFQ enabled kernels first + if _, err := os.Lstat(filepath.Join(b.Path(path), fmt.Sprintf("blkio.io_serviced_recursive"))); err == nil { + settings = append(settings, + blkioStatSettings{ + name: "sectors_recursive", + entry: &stats.Blkio.SectorsRecursive, + }, + blkioStatSettings{ + name: "io_service_bytes_recursive", + entry: &stats.Blkio.IoServiceBytesRecursive, + }, + blkioStatSettings{ + name: "io_serviced_recursive", + entry: &stats.Blkio.IoServicedRecursive, + }, + blkioStatSettings{ + name: "io_queued_recursive", + entry: &stats.Blkio.IoQueuedRecursive, + }, + blkioStatSettings{ + name: "io_service_time_recursive", + entry: &stats.Blkio.IoServiceTimeRecursive, + }, + blkioStatSettings{ + name: "io_wait_time_recursive", + entry: &stats.Blkio.IoWaitTimeRecursive, + }, + blkioStatSettings{ + name: "io_merged_recursive", + entry: &stats.Blkio.IoMergedRecursive, + }, + blkioStatSettings{ + name: "time_recursive", + entry: &stats.Blkio.IoTimeRecursive, + }, + ) + } + f, err := os.Open("/proc/diskstats") + if err != nil { + return err + } + defer f.Close() + + devices, err := getDevices(f) + if err != nil { + return err + } + + for _, t := range settings { + if err := b.readEntry(devices, path, t.name, t.entry); err != nil { + return err + } + } + return nil +} + +func (b *blkioController) readEntry(devices map[deviceKey]string, path, name string, entry *[]*BlkIOEntry) error { + f, err := os.Open(filepath.Join(b.Path(path), fmt.Sprintf("blkio.%s", name))) + if err != nil { + return err + } + defer f.Close() + sc := bufio.NewScanner(f) + for sc.Scan() { + if err := sc.Err(); err != nil { + return err + } + // format: dev type amount + fields := strings.FieldsFunc(sc.Text(), splitBlkIOStatLine) + if len(fields) < 3 { + if len(fields) == 2 && fields[0] == "Total" { + // skip total line + continue + } else { + return fmt.Errorf("Invalid line found while parsing %s: %s", path, sc.Text()) + } + } + major, err := strconv.ParseUint(fields[0], 10, 64) + if err != nil { + return err + } + minor, err := strconv.ParseUint(fields[1], 10, 64) + if err != nil { + return err + } + op := "" + valueField := 2 + if len(fields) == 4 { + op = fields[2] + valueField = 3 + } + v, err := strconv.ParseUint(fields[valueField], 10, 64) + if err != nil { + return err + } + *entry = append(*entry, &BlkIOEntry{ + Device: devices[deviceKey{major, minor}], + Major: major, + Minor: minor, + Op: op, + Value: v, + }) + } + return nil +} + +func createBlkioSettings(blkio *specs.LinuxBlockIO) []blkioSettings { + settings := []blkioSettings{ + { + name: "weight", + value: blkio.Weight, + format: uintf, + }, + { + name: "leaf_weight", + value: blkio.LeafWeight, + format: uintf, + }, + } + for _, wd := range blkio.WeightDevice { + settings = append(settings, + blkioSettings{ + name: "weight_device", + value: wd, + format: weightdev, + }, + blkioSettings{ + name: "leaf_weight_device", + value: wd, + format: weightleafdev, + }) + } + for _, t := range []struct { + name string + list []specs.LinuxThrottleDevice + }{ + { + name: "throttle.read_bps_device", + list: blkio.ThrottleReadBpsDevice, + }, + { + name: "throttle.read_iops_device", + list: blkio.ThrottleReadIOPSDevice, + }, + { + name: "throttle.write_bps_device", + list: blkio.ThrottleWriteBpsDevice, + }, + { + name: "throttle.write_iops_device", + list: blkio.ThrottleWriteIOPSDevice, + }, + } { + for _, td := range t.list { + settings = append(settings, blkioSettings{ + name: t.name, + value: td, + format: throttleddev, + }) + } + } + return settings +} + +type blkioSettings struct { + name string + value interface{} + format func(v interface{}) []byte +} + +type blkioStatSettings struct { + name string + entry *[]*BlkIOEntry +} + +func uintf(v interface{}) []byte { + return []byte(strconv.FormatUint(uint64(*v.(*uint16)), 10)) +} + +func weightdev(v interface{}) []byte { + wd := v.(specs.LinuxWeightDevice) + return []byte(fmt.Sprintf("%d:%d %d", wd.Major, wd.Minor, wd.Weight)) +} + +func weightleafdev(v interface{}) []byte { + wd := v.(specs.LinuxWeightDevice) + return []byte(fmt.Sprintf("%d:%d %d", wd.Major, wd.Minor, wd.LeafWeight)) +} + +func throttleddev(v interface{}) []byte { + td := v.(specs.LinuxThrottleDevice) + return []byte(fmt.Sprintf("%d:%d %d", td.Major, td.Minor, td.Rate)) +} + +func splitBlkIOStatLine(r rune) bool { + return r == ' ' || r == ':' +} + +type deviceKey struct { + major, minor uint64 +} + +// getDevices makes a best effort attempt to read all the devices into a map +// keyed by major and minor number. Since devices may be mapped multiple times, +// we err on taking the first occurrence. +func getDevices(r io.Reader) (map[deviceKey]string, error) { + + var ( + s = bufio.NewScanner(r) + devices = make(map[deviceKey]string) + ) + for s.Scan() { + fields := strings.Fields(s.Text()) + major, err := strconv.Atoi(fields[0]) + if err != nil { + return nil, err + } + minor, err := strconv.Atoi(fields[1]) + if err != nil { + return nil, err + } + key := deviceKey{ + major: uint64(major), + minor: uint64(minor), + } + if _, ok := devices[key]; ok { + continue + } + devices[key] = filepath.Join("/dev", fields[2]) + } + return devices, s.Err() +} + +func major(devNumber uint64) uint64 { + return (devNumber >> 8) & 0xfff +} + +func minor(devNumber uint64) uint64 { + return (devNumber & 0xff) | ((devNumber >> 12) & 0xfff00) +} diff --git a/vendor/github.com/containerd/cgroups/cgroup.go b/vendor/github.com/containerd/cgroups/cgroup.go new file mode 100644 index 000000000..7959feb49 --- /dev/null +++ b/vendor/github.com/containerd/cgroups/cgroup.go @@ -0,0 +1,445 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cgroups + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + "sync" + + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" +) + +// New returns a new control via the cgroup cgroups interface +func New(hierarchy Hierarchy, path Path, resources *specs.LinuxResources) (Cgroup, error) { + subsystems, err := hierarchy() + if err != nil { + return nil, err + } + for _, s := range subsystems { + if err := initializeSubsystem(s, path, resources); err != nil { + return nil, err + } + } + return &cgroup{ + path: path, + subsystems: subsystems, + }, nil +} + +// Load will load an existing cgroup and allow it to be controlled +func Load(hierarchy Hierarchy, path Path) (Cgroup, error) { + subsystems, err := hierarchy() + if err != nil { + return nil, err + } + // check the the subsystems still exist + for _, s := range pathers(subsystems) { + p, err := path(s.Name()) + if err != nil { + if os.IsNotExist(errors.Cause(err)) { + return nil, ErrCgroupDeleted + } + return nil, err + } + if _, err := os.Lstat(s.Path(p)); err != nil { + if os.IsNotExist(err) { + return nil, ErrCgroupDeleted + } + return nil, err + } + } + return &cgroup{ + path: path, + subsystems: subsystems, + }, nil +} + +type cgroup struct { + path Path + + subsystems []Subsystem + mu sync.Mutex + err error +} + +// New returns a new sub cgroup +func (c *cgroup) New(name string, resources *specs.LinuxResources) (Cgroup, error) { + c.mu.Lock() + defer c.mu.Unlock() + if c.err != nil { + return nil, c.err + } + path := subPath(c.path, name) + for _, s := range c.subsystems { + if err := initializeSubsystem(s, path, resources); err != nil { + return nil, err + } + } + return &cgroup{ + path: path, + subsystems: c.subsystems, + }, nil +} + +// Subsystems returns all the subsystems that are currently being +// consumed by the group +func (c *cgroup) Subsystems() []Subsystem { + return c.subsystems +} + +// Add moves the provided process into the new cgroup +func (c *cgroup) Add(process Process) error { + if process.Pid <= 0 { + return ErrInvalidPid + } + c.mu.Lock() + defer c.mu.Unlock() + if c.err != nil { + return c.err + } + return c.add(process) +} + +func (c *cgroup) add(process Process) error { + for _, s := range pathers(c.subsystems) { + p, err := c.path(s.Name()) + if err != nil { + return err + } + if err := ioutil.WriteFile( + filepath.Join(s.Path(p), cgroupProcs), + []byte(strconv.Itoa(process.Pid)), + defaultFilePerm, + ); err != nil { + return err + } + } + return nil +} + +// AddTask moves the provided tasks (threads) into the new cgroup +func (c *cgroup) AddTask(process Process) error { + if process.Pid <= 0 { + return ErrInvalidPid + } + c.mu.Lock() + defer c.mu.Unlock() + if c.err != nil { + return c.err + } + return c.addTask(process) +} + +func (c *cgroup) addTask(process Process) error { + for _, s := range pathers(c.subsystems) { + p, err := c.path(s.Name()) + if err != nil { + return err + } + if err := ioutil.WriteFile( + filepath.Join(s.Path(p), cgroupTasks), + []byte(strconv.Itoa(process.Pid)), + defaultFilePerm, + ); err != nil { + return err + } + } + return nil +} + +// Delete will remove the control group from each of the subsystems registered +func (c *cgroup) Delete() error { + c.mu.Lock() + defer c.mu.Unlock() + if c.err != nil { + return c.err + } + var errors []string + for _, s := range c.subsystems { + if d, ok := s.(deleter); ok { + sp, err := c.path(s.Name()) + if err != nil { + return err + } + if err := d.Delete(sp); err != nil { + errors = append(errors, string(s.Name())) + } + continue + } + if p, ok := s.(pather); ok { + sp, err := c.path(s.Name()) + if err != nil { + return err + } + path := p.Path(sp) + if err := remove(path); err != nil { + errors = append(errors, path) + } + } + } + if len(errors) > 0 { + return fmt.Errorf("cgroups: unable to remove paths %s", strings.Join(errors, ", ")) + } + c.err = ErrCgroupDeleted + return nil +} + +// Stat returns the current metrics for the cgroup +func (c *cgroup) Stat(handlers ...ErrorHandler) (*Metrics, error) { + c.mu.Lock() + defer c.mu.Unlock() + if c.err != nil { + return nil, c.err + } + if len(handlers) == 0 { + handlers = append(handlers, errPassthrough) + } + var ( + stats = &Metrics{ + CPU: &CPUStat{ + Throttling: &Throttle{}, + Usage: &CPUUsage{}, + }, + } + wg = &sync.WaitGroup{} + errs = make(chan error, len(c.subsystems)) + ) + for _, s := range c.subsystems { + if ss, ok := s.(stater); ok { + sp, err := c.path(s.Name()) + if err != nil { + return nil, err + } + wg.Add(1) + go func() { + defer wg.Done() + if err := ss.Stat(sp, stats); err != nil { + for _, eh := range handlers { + if herr := eh(err); herr != nil { + errs <- herr + } + } + } + }() + } + } + wg.Wait() + close(errs) + for err := range errs { + return nil, err + } + return stats, nil +} + +// Update updates the cgroup with the new resource values provided +// +// Be prepared to handle EBUSY when trying to update a cgroup with +// live processes and other operations like Stats being performed at the +// same time +func (c *cgroup) Update(resources *specs.LinuxResources) error { + c.mu.Lock() + defer c.mu.Unlock() + if c.err != nil { + return c.err + } + for _, s := range c.subsystems { + if u, ok := s.(updater); ok { + sp, err := c.path(s.Name()) + if err != nil { + return err + } + if err := u.Update(sp, resources); err != nil { + return err + } + } + } + return nil +} + +// Processes returns the processes running inside the cgroup along +// with the subsystem used, pid, and path +func (c *cgroup) Processes(subsystem Name, recursive bool) ([]Process, error) { + c.mu.Lock() + defer c.mu.Unlock() + if c.err != nil { + return nil, c.err + } + return c.processes(subsystem, recursive) +} + +func (c *cgroup) processes(subsystem Name, recursive bool) ([]Process, error) { + s := c.getSubsystem(subsystem) + sp, err := c.path(subsystem) + if err != nil { + return nil, err + } + path := s.(pather).Path(sp) + var processes []Process + err = filepath.Walk(path, func(p string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !recursive && info.IsDir() { + if p == path { + return nil + } + return filepath.SkipDir + } + dir, name := filepath.Split(p) + if name != cgroupProcs { + return nil + } + procs, err := readPids(dir, subsystem) + if err != nil { + return err + } + processes = append(processes, procs...) + return nil + }) + return processes, err +} + +// Freeze freezes the entire cgroup and all the processes inside it +func (c *cgroup) Freeze() error { + c.mu.Lock() + defer c.mu.Unlock() + if c.err != nil { + return c.err + } + s := c.getSubsystem(Freezer) + if s == nil { + return ErrFreezerNotSupported + } + sp, err := c.path(Freezer) + if err != nil { + return err + } + return s.(*freezerController).Freeze(sp) +} + +// Thaw thaws out the cgroup and all the processes inside it +func (c *cgroup) Thaw() error { + c.mu.Lock() + defer c.mu.Unlock() + if c.err != nil { + return c.err + } + s := c.getSubsystem(Freezer) + if s == nil { + return ErrFreezerNotSupported + } + sp, err := c.path(Freezer) + if err != nil { + return err + } + return s.(*freezerController).Thaw(sp) +} + +// OOMEventFD returns the memory cgroup's out of memory event fd that triggers +// when processes inside the cgroup receive an oom event. Returns +// ErrMemoryNotSupported if memory cgroups is not supported. +func (c *cgroup) OOMEventFD() (uintptr, error) { + c.mu.Lock() + defer c.mu.Unlock() + if c.err != nil { + return 0, c.err + } + s := c.getSubsystem(Memory) + if s == nil { + return 0, ErrMemoryNotSupported + } + sp, err := c.path(Memory) + if err != nil { + return 0, err + } + return s.(*memoryController).OOMEventFD(sp) +} + +// State returns the state of the cgroup and its processes +func (c *cgroup) State() State { + c.mu.Lock() + defer c.mu.Unlock() + c.checkExists() + if c.err != nil && c.err == ErrCgroupDeleted { + return Deleted + } + s := c.getSubsystem(Freezer) + if s == nil { + return Thawed + } + sp, err := c.path(Freezer) + if err != nil { + return Unknown + } + state, err := s.(*freezerController).state(sp) + if err != nil { + return Unknown + } + return state +} + +// MoveTo does a recursive move subsystem by subsystem of all the processes +// inside the group +func (c *cgroup) MoveTo(destination Cgroup) error { + c.mu.Lock() + defer c.mu.Unlock() + if c.err != nil { + return c.err + } + for _, s := range c.subsystems { + processes, err := c.processes(s.Name(), true) + if err != nil { + return err + } + for _, p := range processes { + if err := destination.Add(p); err != nil { + return err + } + } + } + return nil +} + +func (c *cgroup) getSubsystem(n Name) Subsystem { + for _, s := range c.subsystems { + if s.Name() == n { + return s + } + } + return nil +} + +func (c *cgroup) checkExists() { + for _, s := range pathers(c.subsystems) { + p, err := c.path(s.Name()) + if err != nil { + return + } + if _, err := os.Lstat(s.Path(p)); err != nil { + if os.IsNotExist(err) { + c.err = ErrCgroupDeleted + return + } + } + } +} diff --git a/vendor/github.com/containerd/cgroups/control.go b/vendor/github.com/containerd/cgroups/control.go new file mode 100644 index 000000000..63e2df93d --- /dev/null +++ b/vendor/github.com/containerd/cgroups/control.go @@ -0,0 +1,77 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cgroups + +import ( + "os" + + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +const ( + cgroupProcs = "cgroup.procs" + cgroupTasks = "tasks" + defaultDirPerm = 0755 +) + +// defaultFilePerm is a var so that the test framework can change the filemode +// of all files created when the tests are running. The difference between the +// tests and real world use is that files like "cgroup.procs" will exist when writing +// to a read cgroup filesystem and do not exist prior when running in the tests. +// this is set to a non 0 value in the test code +var defaultFilePerm = os.FileMode(0) + +type Process struct { + // Subsystem is the name of the subsystem that the process is in + Subsystem Name + // Pid is the process id of the process + Pid int + // Path is the full path of the subsystem and location that the process is in + Path string +} + +// Cgroup handles interactions with the individual groups to perform +// actions on them as them main interface to this cgroup package +type Cgroup interface { + // New creates a new cgroup under the calling cgroup + New(string, *specs.LinuxResources) (Cgroup, error) + // Add adds a process to the cgroup (cgroup.procs) + Add(Process) error + // AddTask adds a process to the cgroup (tasks) + AddTask(Process) error + // Delete removes the cgroup as a whole + Delete() error + // MoveTo moves all the processes under the calling cgroup to the provided one + // subsystems are moved one at a time + MoveTo(Cgroup) error + // Stat returns the stats for all subsystems in the cgroup + Stat(...ErrorHandler) (*Metrics, error) + // Update updates all the subsystems with the provided resource changes + Update(resources *specs.LinuxResources) error + // Processes returns all the processes in a select subsystem for the cgroup + Processes(Name, bool) ([]Process, error) + // Freeze freezes or pauses all processes inside the cgroup + Freeze() error + // Thaw thaw or resumes all processes inside the cgroup + Thaw() error + // OOMEventFD returns the memory subsystem's event fd for OOM events + OOMEventFD() (uintptr, error) + // State returns the cgroups current state + State() State + // Subsystems returns all the subsystems in the cgroup + Subsystems() []Subsystem +} diff --git a/vendor/github.com/containerd/cgroups/cpu.go b/vendor/github.com/containerd/cgroups/cpu.go new file mode 100644 index 000000000..431cd3e51 --- /dev/null +++ b/vendor/github.com/containerd/cgroups/cpu.go @@ -0,0 +1,129 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cgroups + +import ( + "bufio" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +func NewCpu(root string) *cpuController { + return &cpuController{ + root: filepath.Join(root, string(Cpu)), + } +} + +type cpuController struct { + root string +} + +func (c *cpuController) Name() Name { + return Cpu +} + +func (c *cpuController) Path(path string) string { + return filepath.Join(c.root, path) +} + +func (c *cpuController) Create(path string, resources *specs.LinuxResources) error { + if err := os.MkdirAll(c.Path(path), defaultDirPerm); err != nil { + return err + } + if cpu := resources.CPU; cpu != nil { + for _, t := range []struct { + name string + ivalue *int64 + uvalue *uint64 + }{ + { + name: "rt_period_us", + uvalue: cpu.RealtimePeriod, + }, + { + name: "rt_runtime_us", + ivalue: cpu.RealtimeRuntime, + }, + { + name: "shares", + uvalue: cpu.Shares, + }, + { + name: "cfs_period_us", + uvalue: cpu.Period, + }, + { + name: "cfs_quota_us", + ivalue: cpu.Quota, + }, + } { + var value []byte + if t.uvalue != nil { + value = []byte(strconv.FormatUint(*t.uvalue, 10)) + } else if t.ivalue != nil { + value = []byte(strconv.FormatInt(*t.ivalue, 10)) + } + if value != nil { + if err := ioutil.WriteFile( + filepath.Join(c.Path(path), fmt.Sprintf("cpu.%s", t.name)), + value, + defaultFilePerm, + ); err != nil { + return err + } + } + } + } + return nil +} + +func (c *cpuController) Update(path string, resources *specs.LinuxResources) error { + return c.Create(path, resources) +} + +func (c *cpuController) Stat(path string, stats *Metrics) error { + f, err := os.Open(filepath.Join(c.Path(path), "cpu.stat")) + if err != nil { + return err + } + defer f.Close() + // get or create the cpu field because cpuacct can also set values on this struct + sc := bufio.NewScanner(f) + for sc.Scan() { + if err := sc.Err(); err != nil { + return err + } + key, v, err := parseKV(sc.Text()) + if err != nil { + return err + } + switch key { + case "nr_periods": + stats.CPU.Throttling.Periods = v + case "nr_throttled": + stats.CPU.Throttling.ThrottledPeriods = v + case "throttled_time": + stats.CPU.Throttling.ThrottledTime = v + } + } + return nil +} diff --git a/vendor/github.com/containerd/cgroups/cpuacct.go b/vendor/github.com/containerd/cgroups/cpuacct.go new file mode 100644 index 000000000..42a490a87 --- /dev/null +++ b/vendor/github.com/containerd/cgroups/cpuacct.go @@ -0,0 +1,121 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cgroups + +import ( + "fmt" + "io/ioutil" + "path/filepath" + "strconv" + "strings" +) + +const nanosecondsInSecond = 1000000000 + +var clockTicks = getClockTicks() + +func NewCpuacct(root string) *cpuacctController { + return &cpuacctController{ + root: filepath.Join(root, string(Cpuacct)), + } +} + +type cpuacctController struct { + root string +} + +func (c *cpuacctController) Name() Name { + return Cpuacct +} + +func (c *cpuacctController) Path(path string) string { + return filepath.Join(c.root, path) +} + +func (c *cpuacctController) Stat(path string, stats *Metrics) error { + user, kernel, err := c.getUsage(path) + if err != nil { + return err + } + total, err := readUint(filepath.Join(c.Path(path), "cpuacct.usage")) + if err != nil { + return err + } + percpu, err := c.percpuUsage(path) + if err != nil { + return err + } + stats.CPU.Usage.Total = total + stats.CPU.Usage.User = user + stats.CPU.Usage.Kernel = kernel + stats.CPU.Usage.PerCPU = percpu + return nil +} + +func (c *cpuacctController) percpuUsage(path string) ([]uint64, error) { + var usage []uint64 + data, err := ioutil.ReadFile(filepath.Join(c.Path(path), "cpuacct.usage_percpu")) + if err != nil { + return nil, err + } + for _, v := range strings.Fields(string(data)) { + u, err := strconv.ParseUint(v, 10, 64) + if err != nil { + return nil, err + } + usage = append(usage, u) + } + return usage, nil +} + +func (c *cpuacctController) getUsage(path string) (user uint64, kernel uint64, err error) { + statPath := filepath.Join(c.Path(path), "cpuacct.stat") + data, err := ioutil.ReadFile(statPath) + if err != nil { + return 0, 0, err + } + fields := strings.Fields(string(data)) + if len(fields) != 4 { + return 0, 0, fmt.Errorf("%q is expected to have 4 fields", statPath) + } + for _, t := range []struct { + index int + name string + value *uint64 + }{ + { + index: 0, + name: "user", + value: &user, + }, + { + index: 2, + name: "system", + value: &kernel, + }, + } { + if fields[t.index] != t.name { + return 0, 0, fmt.Errorf("expected field %q but found %q in %q", t.name, fields[t.index], statPath) + } + v, err := strconv.ParseUint(fields[t.index+1], 10, 64) + if err != nil { + return 0, 0, err + } + *t.value = v + } + return (user * nanosecondsInSecond) / clockTicks, (kernel * nanosecondsInSecond) / clockTicks, nil +} diff --git a/vendor/github.com/containerd/cgroups/cpuset.go b/vendor/github.com/containerd/cgroups/cpuset.go new file mode 100644 index 000000000..f182aa68c --- /dev/null +++ b/vendor/github.com/containerd/cgroups/cpuset.go @@ -0,0 +1,159 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cgroups + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "path/filepath" + + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +func NewCputset(root string) *cpusetController { + return &cpusetController{ + root: filepath.Join(root, string(Cpuset)), + } +} + +type cpusetController struct { + root string +} + +func (c *cpusetController) Name() Name { + return Cpuset +} + +func (c *cpusetController) Path(path string) string { + return filepath.Join(c.root, path) +} + +func (c *cpusetController) Create(path string, resources *specs.LinuxResources) error { + if err := c.ensureParent(c.Path(path), c.root); err != nil { + return err + } + if err := os.MkdirAll(c.Path(path), defaultDirPerm); err != nil { + return err + } + if err := c.copyIfNeeded(c.Path(path), filepath.Dir(c.Path(path))); err != nil { + return err + } + if resources.CPU != nil { + for _, t := range []struct { + name string + value *string + }{ + { + name: "cpus", + value: &resources.CPU.Cpus, + }, + { + name: "mems", + value: &resources.CPU.Mems, + }, + } { + if t.value != nil { + if err := ioutil.WriteFile( + filepath.Join(c.Path(path), fmt.Sprintf("cpuset.%s", t.name)), + []byte(*t.value), + defaultFilePerm, + ); err != nil { + return err + } + } + } + } + return nil +} + +func (c *cpusetController) Update(path string, resources *specs.LinuxResources) error { + return c.Create(path, resources) +} + +func (c *cpusetController) getValues(path string) (cpus []byte, mems []byte, err error) { + if cpus, err = ioutil.ReadFile(filepath.Join(path, "cpuset.cpus")); err != nil && !os.IsNotExist(err) { + return + } + if mems, err = ioutil.ReadFile(filepath.Join(path, "cpuset.mems")); err != nil && !os.IsNotExist(err) { + return + } + return cpus, mems, nil +} + +// ensureParent makes sure that the parent directory of current is created +// and populated with the proper cpus and mems files copied from +// it's parent. +func (c *cpusetController) ensureParent(current, root string) error { + parent := filepath.Dir(current) + if _, err := filepath.Rel(root, parent); err != nil { + return nil + } + // Avoid infinite recursion. + if parent == current { + return fmt.Errorf("cpuset: cgroup parent path outside cgroup root") + } + if cleanPath(parent) != root { + if err := c.ensureParent(parent, root); err != nil { + return err + } + } + if err := os.MkdirAll(current, defaultDirPerm); err != nil { + return err + } + return c.copyIfNeeded(current, parent) +} + +// copyIfNeeded copies the cpuset.cpus and cpuset.mems from the parent +// directory to the current directory if the file's contents are 0 +func (c *cpusetController) copyIfNeeded(current, parent string) error { + var ( + err error + currentCpus, currentMems []byte + parentCpus, parentMems []byte + ) + if currentCpus, currentMems, err = c.getValues(current); err != nil { + return err + } + if parentCpus, parentMems, err = c.getValues(parent); err != nil { + return err + } + if isEmpty(currentCpus) { + if err := ioutil.WriteFile( + filepath.Join(current, "cpuset.cpus"), + parentCpus, + defaultFilePerm, + ); err != nil { + return err + } + } + if isEmpty(currentMems) { + if err := ioutil.WriteFile( + filepath.Join(current, "cpuset.mems"), + parentMems, + defaultFilePerm, + ); err != nil { + return err + } + } + return nil +} + +func isEmpty(b []byte) bool { + return len(bytes.Trim(b, "\n")) == 0 +} diff --git a/vendor/github.com/containerd/cgroups/devices.go b/vendor/github.com/containerd/cgroups/devices.go new file mode 100644 index 000000000..f9a118b22 --- /dev/null +++ b/vendor/github.com/containerd/cgroups/devices.go @@ -0,0 +1,90 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cgroups + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +const ( + allowDeviceFile = "devices.allow" + denyDeviceFile = "devices.deny" + wildcard = -1 +) + +func NewDevices(root string) *devicesController { + return &devicesController{ + root: filepath.Join(root, string(Devices)), + } +} + +type devicesController struct { + root string +} + +func (d *devicesController) Name() Name { + return Devices +} + +func (d *devicesController) Path(path string) string { + return filepath.Join(d.root, path) +} + +func (d *devicesController) Create(path string, resources *specs.LinuxResources) error { + if err := os.MkdirAll(d.Path(path), defaultDirPerm); err != nil { + return err + } + for _, device := range resources.Devices { + file := denyDeviceFile + if device.Allow { + file = allowDeviceFile + } + if err := ioutil.WriteFile( + filepath.Join(d.Path(path), file), + []byte(deviceString(device)), + defaultFilePerm, + ); err != nil { + return err + } + } + return nil +} + +func (d *devicesController) Update(path string, resources *specs.LinuxResources) error { + return d.Create(path, resources) +} + +func deviceString(device specs.LinuxDeviceCgroup) string { + return fmt.Sprintf("%s %s:%s %s", + device.Type, + deviceNumber(device.Major), + deviceNumber(device.Minor), + device.Access, + ) +} + +func deviceNumber(number *int64) string { + if number == nil || *number == wildcard { + return "*" + } + return fmt.Sprint(*number) +} diff --git a/vendor/github.com/containerd/cgroups/errors.go b/vendor/github.com/containerd/cgroups/errors.go new file mode 100644 index 000000000..f1ad8315c --- /dev/null +++ b/vendor/github.com/containerd/cgroups/errors.go @@ -0,0 +1,47 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cgroups + +import ( + "errors" + "os" +) + +var ( + ErrInvalidPid = errors.New("cgroups: pid must be greater than 0") + ErrMountPointNotExist = errors.New("cgroups: cgroup mountpoint does not exist") + ErrInvalidFormat = errors.New("cgroups: parsing file with invalid format failed") + ErrFreezerNotSupported = errors.New("cgroups: freezer cgroup not supported on this system") + ErrMemoryNotSupported = errors.New("cgroups: memory cgroup not supported on this system") + ErrCgroupDeleted = errors.New("cgroups: cgroup deleted") + ErrNoCgroupMountDestination = errors.New("cgroups: cannot find cgroup mount destination") +) + +// ErrorHandler is a function that handles and acts on errors +type ErrorHandler func(err error) error + +// IgnoreNotExist ignores any errors that are for not existing files +func IgnoreNotExist(err error) error { + if os.IsNotExist(err) { + return nil + } + return err +} + +func errPassthrough(err error) error { + return err +} diff --git a/vendor/github.com/containerd/cgroups/freezer.go b/vendor/github.com/containerd/cgroups/freezer.go new file mode 100644 index 000000000..5e668408a --- /dev/null +++ b/vendor/github.com/containerd/cgroups/freezer.go @@ -0,0 +1,82 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cgroups + +import ( + "io/ioutil" + "path/filepath" + "strings" + "time" +) + +func NewFreezer(root string) *freezerController { + return &freezerController{ + root: filepath.Join(root, string(Freezer)), + } +} + +type freezerController struct { + root string +} + +func (f *freezerController) Name() Name { + return Freezer +} + +func (f *freezerController) Path(path string) string { + return filepath.Join(f.root, path) +} + +func (f *freezerController) Freeze(path string) error { + return f.waitState(path, Frozen) +} + +func (f *freezerController) Thaw(path string) error { + return f.waitState(path, Thawed) +} + +func (f *freezerController) changeState(path string, state State) error { + return ioutil.WriteFile( + filepath.Join(f.root, path, "freezer.state"), + []byte(strings.ToUpper(string(state))), + defaultFilePerm, + ) +} + +func (f *freezerController) state(path string) (State, error) { + current, err := ioutil.ReadFile(filepath.Join(f.root, path, "freezer.state")) + if err != nil { + return "", err + } + return State(strings.ToLower(strings.TrimSpace(string(current)))), nil +} + +func (f *freezerController) waitState(path string, state State) error { + for { + if err := f.changeState(path, state); err != nil { + return err + } + current, err := f.state(path) + if err != nil { + return err + } + if current == state { + return nil + } + time.Sleep(1 * time.Millisecond) + } +} diff --git a/vendor/github.com/containerd/cgroups/hierarchy.go b/vendor/github.com/containerd/cgroups/hierarchy.go new file mode 100644 index 000000000..9221bf3f1 --- /dev/null +++ b/vendor/github.com/containerd/cgroups/hierarchy.go @@ -0,0 +1,20 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cgroups + +// Hierarchy enableds both unified and split hierarchy for cgroups +type Hierarchy func() ([]Subsystem, error) diff --git a/vendor/github.com/containerd/cgroups/hugetlb.go b/vendor/github.com/containerd/cgroups/hugetlb.go new file mode 100644 index 000000000..3718706d7 --- /dev/null +++ b/vendor/github.com/containerd/cgroups/hugetlb.go @@ -0,0 +1,109 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cgroups + +import ( + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +func NewHugetlb(root string) (*hugetlbController, error) { + sizes, err := hugePageSizes() + if err != nil { + return nil, err + } + + return &hugetlbController{ + root: filepath.Join(root, string(Hugetlb)), + sizes: sizes, + }, nil +} + +type hugetlbController struct { + root string + sizes []string +} + +func (h *hugetlbController) Name() Name { + return Hugetlb +} + +func (h *hugetlbController) Path(path string) string { + return filepath.Join(h.root, path) +} + +func (h *hugetlbController) Create(path string, resources *specs.LinuxResources) error { + if err := os.MkdirAll(h.Path(path), defaultDirPerm); err != nil { + return err + } + for _, limit := range resources.HugepageLimits { + if err := ioutil.WriteFile( + filepath.Join(h.Path(path), strings.Join([]string{"hugetlb", limit.Pagesize, "limit_in_bytes"}, ".")), + []byte(strconv.FormatUint(limit.Limit, 10)), + defaultFilePerm, + ); err != nil { + return err + } + } + return nil +} + +func (h *hugetlbController) Stat(path string, stats *Metrics) error { + for _, size := range h.sizes { + s, err := h.readSizeStat(path, size) + if err != nil { + return err + } + stats.Hugetlb = append(stats.Hugetlb, s) + } + return nil +} + +func (h *hugetlbController) readSizeStat(path, size string) (*HugetlbStat, error) { + s := HugetlbStat{ + Pagesize: size, + } + for _, t := range []struct { + name string + value *uint64 + }{ + { + name: "usage_in_bytes", + value: &s.Usage, + }, + { + name: "max_usage_in_bytes", + value: &s.Max, + }, + { + name: "failcnt", + value: &s.Failcnt, + }, + } { + v, err := readUint(filepath.Join(h.Path(path), strings.Join([]string{"hugetlb", size, t.name}, "."))) + if err != nil { + return nil, err + } + *t.value = v + } + return &s, nil +} diff --git a/vendor/github.com/containerd/cgroups/memory.go b/vendor/github.com/containerd/cgroups/memory.go new file mode 100644 index 000000000..ce15ca2b9 --- /dev/null +++ b/vendor/github.com/containerd/cgroups/memory.go @@ -0,0 +1,325 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cgroups + +import ( + "bufio" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + "syscall" + + "golang.org/x/sys/unix" + + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +func NewMemory(root string) *memoryController { + return &memoryController{ + root: filepath.Join(root, string(Memory)), + } +} + +type memoryController struct { + root string +} + +func (m *memoryController) Name() Name { + return Memory +} + +func (m *memoryController) Path(path string) string { + return filepath.Join(m.root, path) +} + +func (m *memoryController) Create(path string, resources *specs.LinuxResources) error { + if err := os.MkdirAll(m.Path(path), defaultDirPerm); err != nil { + return err + } + if resources.Memory == nil { + return nil + } + if resources.Memory.Kernel != nil { + // Check if kernel memory is enabled + // We have to limit the kernel memory here as it won't be accounted at all + // until a limit is set on the cgroup and limit cannot be set once the + // cgroup has children, or if there are already tasks in the cgroup. + for _, i := range []int64{1, -1} { + if err := ioutil.WriteFile( + filepath.Join(m.Path(path), "memory.kmem.limit_in_bytes"), + []byte(strconv.FormatInt(i, 10)), + defaultFilePerm, + ); err != nil { + return checkEBUSY(err) + } + } + } + return m.set(path, getMemorySettings(resources)) +} + +func (m *memoryController) Update(path string, resources *specs.LinuxResources) error { + if resources.Memory == nil { + return nil + } + g := func(v *int64) bool { + return v != nil && *v > 0 + } + settings := getMemorySettings(resources) + if g(resources.Memory.Limit) && g(resources.Memory.Swap) { + // if the updated swap value is larger than the current memory limit set the swap changes first + // then set the memory limit as swap must always be larger than the current limit + current, err := readUint(filepath.Join(m.Path(path), "memory.limit_in_bytes")) + if err != nil { + return err + } + if current < uint64(*resources.Memory.Swap) { + settings[0], settings[1] = settings[1], settings[0] + } + } + return m.set(path, settings) +} + +func (m *memoryController) Stat(path string, stats *Metrics) error { + f, err := os.Open(filepath.Join(m.Path(path), "memory.stat")) + if err != nil { + return err + } + defer f.Close() + stats.Memory = &MemoryStat{ + Usage: &MemoryEntry{}, + Swap: &MemoryEntry{}, + Kernel: &MemoryEntry{}, + KernelTCP: &MemoryEntry{}, + } + if err := m.parseStats(f, stats.Memory); err != nil { + return err + } + for _, t := range []struct { + module string + entry *MemoryEntry + }{ + { + module: "", + entry: stats.Memory.Usage, + }, + { + module: "memsw", + entry: stats.Memory.Swap, + }, + { + module: "kmem", + entry: stats.Memory.Kernel, + }, + { + module: "kmem.tcp", + entry: stats.Memory.KernelTCP, + }, + } { + for _, tt := range []struct { + name string + value *uint64 + }{ + { + name: "usage_in_bytes", + value: &t.entry.Usage, + }, + { + name: "max_usage_in_bytes", + value: &t.entry.Max, + }, + { + name: "failcnt", + value: &t.entry.Failcnt, + }, + { + name: "limit_in_bytes", + value: &t.entry.Limit, + }, + } { + parts := []string{"memory"} + if t.module != "" { + parts = append(parts, t.module) + } + parts = append(parts, tt.name) + v, err := readUint(filepath.Join(m.Path(path), strings.Join(parts, "."))) + if err != nil { + return err + } + *tt.value = v + } + } + return nil +} + +func (m *memoryController) OOMEventFD(path string) (uintptr, error) { + root := m.Path(path) + f, err := os.Open(filepath.Join(root, "memory.oom_control")) + if err != nil { + return 0, err + } + defer f.Close() + fd, _, serr := unix.RawSyscall(unix.SYS_EVENTFD2, 0, unix.EFD_CLOEXEC, 0) + if serr != 0 { + return 0, serr + } + if err := writeEventFD(root, f.Fd(), fd); err != nil { + unix.Close(int(fd)) + return 0, err + } + return fd, nil +} + +func writeEventFD(root string, cfd, efd uintptr) error { + f, err := os.OpenFile(filepath.Join(root, "cgroup.event_control"), os.O_WRONLY, 0) + if err != nil { + return err + } + _, err = f.WriteString(fmt.Sprintf("%d %d", efd, cfd)) + f.Close() + return err +} + +func (m *memoryController) parseStats(r io.Reader, stat *MemoryStat) error { + var ( + raw = make(map[string]uint64) + sc = bufio.NewScanner(r) + line int + ) + for sc.Scan() { + if err := sc.Err(); err != nil { + return err + } + key, v, err := parseKV(sc.Text()) + if err != nil { + return fmt.Errorf("%d: %v", line, err) + } + raw[key] = v + line++ + } + stat.Cache = raw["cache"] + stat.RSS = raw["rss"] + stat.RSSHuge = raw["rss_huge"] + stat.MappedFile = raw["mapped_file"] + stat.Dirty = raw["dirty"] + stat.Writeback = raw["writeback"] + stat.PgPgIn = raw["pgpgin"] + stat.PgPgOut = raw["pgpgout"] + stat.PgFault = raw["pgfault"] + stat.PgMajFault = raw["pgmajfault"] + stat.InactiveAnon = raw["inactive_anon"] + stat.ActiveAnon = raw["active_anon"] + stat.InactiveFile = raw["inactive_file"] + stat.ActiveFile = raw["active_file"] + stat.Unevictable = raw["unevictable"] + stat.HierarchicalMemoryLimit = raw["hierarchical_memory_limit"] + stat.HierarchicalSwapLimit = raw["hierarchical_memsw_limit"] + stat.TotalCache = raw["total_cache"] + stat.TotalRSS = raw["total_rss"] + stat.TotalRSSHuge = raw["total_rss_huge"] + stat.TotalMappedFile = raw["total_mapped_file"] + stat.TotalDirty = raw["total_dirty"] + stat.TotalWriteback = raw["total_writeback"] + stat.TotalPgPgIn = raw["total_pgpgin"] + stat.TotalPgPgOut = raw["total_pgpgout"] + stat.TotalPgFault = raw["total_pgfault"] + stat.TotalPgMajFault = raw["total_pgmajfault"] + stat.TotalInactiveAnon = raw["total_inactive_anon"] + stat.TotalActiveAnon = raw["total_active_anon"] + stat.TotalInactiveFile = raw["total_inactive_file"] + stat.TotalActiveFile = raw["total_active_file"] + stat.TotalUnevictable = raw["total_unevictable"] + return nil +} + +func (m *memoryController) set(path string, settings []memorySettings) error { + for _, t := range settings { + if t.value != nil { + if err := ioutil.WriteFile( + filepath.Join(m.Path(path), fmt.Sprintf("memory.%s", t.name)), + []byte(strconv.FormatInt(*t.value, 10)), + defaultFilePerm, + ); err != nil { + return err + } + } + } + return nil +} + +type memorySettings struct { + name string + value *int64 +} + +func getMemorySettings(resources *specs.LinuxResources) []memorySettings { + mem := resources.Memory + var swappiness *int64 + if mem.Swappiness != nil { + v := int64(*mem.Swappiness) + swappiness = &v + } + return []memorySettings{ + { + name: "limit_in_bytes", + value: mem.Limit, + }, + { + name: "memsw.limit_in_bytes", + value: mem.Swap, + }, + { + name: "kmem.limit_in_bytes", + value: mem.Kernel, + }, + { + name: "kmem.tcp.limit_in_bytes", + value: mem.KernelTCP, + }, + { + name: "oom_control", + value: getOomControlValue(mem), + }, + { + name: "swappiness", + value: swappiness, + }, + } +} + +func checkEBUSY(err error) error { + if pathErr, ok := err.(*os.PathError); ok { + if errNo, ok := pathErr.Err.(syscall.Errno); ok { + if errNo == unix.EBUSY { + return fmt.Errorf( + "failed to set memory.kmem.limit_in_bytes, because either tasks have already joined this cgroup or it has children") + } + } + } + return err +} + +func getOomControlValue(mem *specs.LinuxMemory) *int64 { + if mem.DisableOOMKiller != nil && *mem.DisableOOMKiller { + i := int64(1) + return &i + } + return nil +} diff --git a/vendor/github.com/containerd/cgroups/metrics.pb.go b/vendor/github.com/containerd/cgroups/metrics.pb.go new file mode 100644 index 000000000..6043a8f7d --- /dev/null +++ b/vendor/github.com/containerd/cgroups/metrics.pb.go @@ -0,0 +1,4288 @@ +// Code generated by protoc-gen-gogo. +// source: github.com/containerd/cgroups/metrics.proto +// DO NOT EDIT! + +/* + Package cgroups is a generated protocol buffer package. + + It is generated from these files: + github.com/containerd/cgroups/metrics.proto + + It has these top-level messages: + Metrics + HugetlbStat + PidsStat + CPUStat + CPUUsage + Throttle + MemoryStat + MemoryEntry + BlkIOStat + BlkIOEntry + RdmaStat + RdmaEntry +*/ +package cgroups + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type Metrics struct { + Hugetlb []*HugetlbStat `protobuf:"bytes,1,rep,name=hugetlb" json:"hugetlb,omitempty"` + Pids *PidsStat `protobuf:"bytes,2,opt,name=pids" json:"pids,omitempty"` + CPU *CPUStat `protobuf:"bytes,3,opt,name=cpu" json:"cpu,omitempty"` + Memory *MemoryStat `protobuf:"bytes,4,opt,name=memory" json:"memory,omitempty"` + Blkio *BlkIOStat `protobuf:"bytes,5,opt,name=blkio" json:"blkio,omitempty"` + Rdma *RdmaStat `protobuf:"bytes,6,opt,name=rdma" json:"rdma,omitempty"` +} + +func (m *Metrics) Reset() { *m = Metrics{} } +func (*Metrics) ProtoMessage() {} +func (*Metrics) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{0} } + +type HugetlbStat struct { + Usage uint64 `protobuf:"varint,1,opt,name=usage,proto3" json:"usage,omitempty"` + Max uint64 `protobuf:"varint,2,opt,name=max,proto3" json:"max,omitempty"` + Failcnt uint64 `protobuf:"varint,3,opt,name=failcnt,proto3" json:"failcnt,omitempty"` + Pagesize string `protobuf:"bytes,4,opt,name=pagesize,proto3" json:"pagesize,omitempty"` +} + +func (m *HugetlbStat) Reset() { *m = HugetlbStat{} } +func (*HugetlbStat) ProtoMessage() {} +func (*HugetlbStat) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{1} } + +type PidsStat struct { + Current uint64 `protobuf:"varint,1,opt,name=current,proto3" json:"current,omitempty"` + Limit uint64 `protobuf:"varint,2,opt,name=limit,proto3" json:"limit,omitempty"` +} + +func (m *PidsStat) Reset() { *m = PidsStat{} } +func (*PidsStat) ProtoMessage() {} +func (*PidsStat) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{2} } + +type CPUStat struct { + Usage *CPUUsage `protobuf:"bytes,1,opt,name=usage" json:"usage,omitempty"` + Throttling *Throttle `protobuf:"bytes,2,opt,name=throttling" json:"throttling,omitempty"` +} + +func (m *CPUStat) Reset() { *m = CPUStat{} } +func (*CPUStat) ProtoMessage() {} +func (*CPUStat) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{3} } + +type CPUUsage struct { + // values in nanoseconds + Total uint64 `protobuf:"varint,1,opt,name=total,proto3" json:"total,omitempty"` + Kernel uint64 `protobuf:"varint,2,opt,name=kernel,proto3" json:"kernel,omitempty"` + User uint64 `protobuf:"varint,3,opt,name=user,proto3" json:"user,omitempty"` + PerCPU []uint64 `protobuf:"varint,4,rep,packed,name=per_cpu,json=perCpu" json:"per_cpu,omitempty"` +} + +func (m *CPUUsage) Reset() { *m = CPUUsage{} } +func (*CPUUsage) ProtoMessage() {} +func (*CPUUsage) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{4} } + +type Throttle struct { + Periods uint64 `protobuf:"varint,1,opt,name=periods,proto3" json:"periods,omitempty"` + ThrottledPeriods uint64 `protobuf:"varint,2,opt,name=throttled_periods,json=throttledPeriods,proto3" json:"throttled_periods,omitempty"` + ThrottledTime uint64 `protobuf:"varint,3,opt,name=throttled_time,json=throttledTime,proto3" json:"throttled_time,omitempty"` +} + +func (m *Throttle) Reset() { *m = Throttle{} } +func (*Throttle) ProtoMessage() {} +func (*Throttle) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{5} } + +type MemoryStat struct { + Cache uint64 `protobuf:"varint,1,opt,name=cache,proto3" json:"cache,omitempty"` + RSS uint64 `protobuf:"varint,2,opt,name=rss,proto3" json:"rss,omitempty"` + RSSHuge uint64 `protobuf:"varint,3,opt,name=rss_huge,json=rssHuge,proto3" json:"rss_huge,omitempty"` + MappedFile uint64 `protobuf:"varint,4,opt,name=mapped_file,json=mappedFile,proto3" json:"mapped_file,omitempty"` + Dirty uint64 `protobuf:"varint,5,opt,name=dirty,proto3" json:"dirty,omitempty"` + Writeback uint64 `protobuf:"varint,6,opt,name=writeback,proto3" json:"writeback,omitempty"` + PgPgIn uint64 `protobuf:"varint,7,opt,name=pg_pg_in,json=pgPgIn,proto3" json:"pg_pg_in,omitempty"` + PgPgOut uint64 `protobuf:"varint,8,opt,name=pg_pg_out,json=pgPgOut,proto3" json:"pg_pg_out,omitempty"` + PgFault uint64 `protobuf:"varint,9,opt,name=pg_fault,json=pgFault,proto3" json:"pg_fault,omitempty"` + PgMajFault uint64 `protobuf:"varint,10,opt,name=pg_maj_fault,json=pgMajFault,proto3" json:"pg_maj_fault,omitempty"` + InactiveAnon uint64 `protobuf:"varint,11,opt,name=inactive_anon,json=inactiveAnon,proto3" json:"inactive_anon,omitempty"` + ActiveAnon uint64 `protobuf:"varint,12,opt,name=active_anon,json=activeAnon,proto3" json:"active_anon,omitempty"` + InactiveFile uint64 `protobuf:"varint,13,opt,name=inactive_file,json=inactiveFile,proto3" json:"inactive_file,omitempty"` + ActiveFile uint64 `protobuf:"varint,14,opt,name=active_file,json=activeFile,proto3" json:"active_file,omitempty"` + Unevictable uint64 `protobuf:"varint,15,opt,name=unevictable,proto3" json:"unevictable,omitempty"` + HierarchicalMemoryLimit uint64 `protobuf:"varint,16,opt,name=hierarchical_memory_limit,json=hierarchicalMemoryLimit,proto3" json:"hierarchical_memory_limit,omitempty"` + HierarchicalSwapLimit uint64 `protobuf:"varint,17,opt,name=hierarchical_swap_limit,json=hierarchicalSwapLimit,proto3" json:"hierarchical_swap_limit,omitempty"` + TotalCache uint64 `protobuf:"varint,18,opt,name=total_cache,json=totalCache,proto3" json:"total_cache,omitempty"` + TotalRSS uint64 `protobuf:"varint,19,opt,name=total_rss,json=totalRss,proto3" json:"total_rss,omitempty"` + TotalRSSHuge uint64 `protobuf:"varint,20,opt,name=total_rss_huge,json=totalRssHuge,proto3" json:"total_rss_huge,omitempty"` + TotalMappedFile uint64 `protobuf:"varint,21,opt,name=total_mapped_file,json=totalMappedFile,proto3" json:"total_mapped_file,omitempty"` + TotalDirty uint64 `protobuf:"varint,22,opt,name=total_dirty,json=totalDirty,proto3" json:"total_dirty,omitempty"` + TotalWriteback uint64 `protobuf:"varint,23,opt,name=total_writeback,json=totalWriteback,proto3" json:"total_writeback,omitempty"` + TotalPgPgIn uint64 `protobuf:"varint,24,opt,name=total_pg_pg_in,json=totalPgPgIn,proto3" json:"total_pg_pg_in,omitempty"` + TotalPgPgOut uint64 `protobuf:"varint,25,opt,name=total_pg_pg_out,json=totalPgPgOut,proto3" json:"total_pg_pg_out,omitempty"` + TotalPgFault uint64 `protobuf:"varint,26,opt,name=total_pg_fault,json=totalPgFault,proto3" json:"total_pg_fault,omitempty"` + TotalPgMajFault uint64 `protobuf:"varint,27,opt,name=total_pg_maj_fault,json=totalPgMajFault,proto3" json:"total_pg_maj_fault,omitempty"` + TotalInactiveAnon uint64 `protobuf:"varint,28,opt,name=total_inactive_anon,json=totalInactiveAnon,proto3" json:"total_inactive_anon,omitempty"` + TotalActiveAnon uint64 `protobuf:"varint,29,opt,name=total_active_anon,json=totalActiveAnon,proto3" json:"total_active_anon,omitempty"` + TotalInactiveFile uint64 `protobuf:"varint,30,opt,name=total_inactive_file,json=totalInactiveFile,proto3" json:"total_inactive_file,omitempty"` + TotalActiveFile uint64 `protobuf:"varint,31,opt,name=total_active_file,json=totalActiveFile,proto3" json:"total_active_file,omitempty"` + TotalUnevictable uint64 `protobuf:"varint,32,opt,name=total_unevictable,json=totalUnevictable,proto3" json:"total_unevictable,omitempty"` + Usage *MemoryEntry `protobuf:"bytes,33,opt,name=usage" json:"usage,omitempty"` + Swap *MemoryEntry `protobuf:"bytes,34,opt,name=swap" json:"swap,omitempty"` + Kernel *MemoryEntry `protobuf:"bytes,35,opt,name=kernel" json:"kernel,omitempty"` + KernelTCP *MemoryEntry `protobuf:"bytes,36,opt,name=kernel_tcp,json=kernelTcp" json:"kernel_tcp,omitempty"` +} + +func (m *MemoryStat) Reset() { *m = MemoryStat{} } +func (*MemoryStat) ProtoMessage() {} +func (*MemoryStat) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{6} } + +type MemoryEntry struct { + Limit uint64 `protobuf:"varint,1,opt,name=limit,proto3" json:"limit,omitempty"` + Usage uint64 `protobuf:"varint,2,opt,name=usage,proto3" json:"usage,omitempty"` + Max uint64 `protobuf:"varint,3,opt,name=max,proto3" json:"max,omitempty"` + Failcnt uint64 `protobuf:"varint,4,opt,name=failcnt,proto3" json:"failcnt,omitempty"` +} + +func (m *MemoryEntry) Reset() { *m = MemoryEntry{} } +func (*MemoryEntry) ProtoMessage() {} +func (*MemoryEntry) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{7} } + +type BlkIOStat struct { + IoServiceBytesRecursive []*BlkIOEntry `protobuf:"bytes,1,rep,name=io_service_bytes_recursive,json=ioServiceBytesRecursive" json:"io_service_bytes_recursive,omitempty"` + IoServicedRecursive []*BlkIOEntry `protobuf:"bytes,2,rep,name=io_serviced_recursive,json=ioServicedRecursive" json:"io_serviced_recursive,omitempty"` + IoQueuedRecursive []*BlkIOEntry `protobuf:"bytes,3,rep,name=io_queued_recursive,json=ioQueuedRecursive" json:"io_queued_recursive,omitempty"` + IoServiceTimeRecursive []*BlkIOEntry `protobuf:"bytes,4,rep,name=io_service_time_recursive,json=ioServiceTimeRecursive" json:"io_service_time_recursive,omitempty"` + IoWaitTimeRecursive []*BlkIOEntry `protobuf:"bytes,5,rep,name=io_wait_time_recursive,json=ioWaitTimeRecursive" json:"io_wait_time_recursive,omitempty"` + IoMergedRecursive []*BlkIOEntry `protobuf:"bytes,6,rep,name=io_merged_recursive,json=ioMergedRecursive" json:"io_merged_recursive,omitempty"` + IoTimeRecursive []*BlkIOEntry `protobuf:"bytes,7,rep,name=io_time_recursive,json=ioTimeRecursive" json:"io_time_recursive,omitempty"` + SectorsRecursive []*BlkIOEntry `protobuf:"bytes,8,rep,name=sectors_recursive,json=sectorsRecursive" json:"sectors_recursive,omitempty"` +} + +func (m *BlkIOStat) Reset() { *m = BlkIOStat{} } +func (*BlkIOStat) ProtoMessage() {} +func (*BlkIOStat) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{8} } + +type BlkIOEntry struct { + Op string `protobuf:"bytes,1,opt,name=op,proto3" json:"op,omitempty"` + Device string `protobuf:"bytes,2,opt,name=device,proto3" json:"device,omitempty"` + Major uint64 `protobuf:"varint,3,opt,name=major,proto3" json:"major,omitempty"` + Minor uint64 `protobuf:"varint,4,opt,name=minor,proto3" json:"minor,omitempty"` + Value uint64 `protobuf:"varint,5,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *BlkIOEntry) Reset() { *m = BlkIOEntry{} } +func (*BlkIOEntry) ProtoMessage() {} +func (*BlkIOEntry) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{9} } + +type RdmaStat struct { + Current []*RdmaEntry `protobuf:"bytes,1,rep,name=current" json:"current,omitempty"` + Limit []*RdmaEntry `protobuf:"bytes,2,rep,name=limit" json:"limit,omitempty"` +} + +func (m *RdmaStat) Reset() { *m = RdmaStat{} } +func (*RdmaStat) ProtoMessage() {} +func (*RdmaStat) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{10} } + +type RdmaEntry struct { + Device string `protobuf:"bytes,1,opt,name=device,proto3" json:"device,omitempty"` + HcaHandles uint32 `protobuf:"varint,2,opt,name=hca_handles,json=hcaHandles,proto3" json:"hca_handles,omitempty"` + HcaObjects uint32 `protobuf:"varint,3,opt,name=hca_objects,json=hcaObjects,proto3" json:"hca_objects,omitempty"` +} + +func (m *RdmaEntry) Reset() { *m = RdmaEntry{} } +func (*RdmaEntry) ProtoMessage() {} +func (*RdmaEntry) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{11} } + +func init() { + proto.RegisterType((*Metrics)(nil), "io.containerd.cgroups.v1.Metrics") + proto.RegisterType((*HugetlbStat)(nil), "io.containerd.cgroups.v1.HugetlbStat") + proto.RegisterType((*PidsStat)(nil), "io.containerd.cgroups.v1.PidsStat") + proto.RegisterType((*CPUStat)(nil), "io.containerd.cgroups.v1.CPUStat") + proto.RegisterType((*CPUUsage)(nil), "io.containerd.cgroups.v1.CPUUsage") + proto.RegisterType((*Throttle)(nil), "io.containerd.cgroups.v1.Throttle") + proto.RegisterType((*MemoryStat)(nil), "io.containerd.cgroups.v1.MemoryStat") + proto.RegisterType((*MemoryEntry)(nil), "io.containerd.cgroups.v1.MemoryEntry") + proto.RegisterType((*BlkIOStat)(nil), "io.containerd.cgroups.v1.BlkIOStat") + proto.RegisterType((*BlkIOEntry)(nil), "io.containerd.cgroups.v1.BlkIOEntry") + proto.RegisterType((*RdmaStat)(nil), "io.containerd.cgroups.v1.RdmaStat") + proto.RegisterType((*RdmaEntry)(nil), "io.containerd.cgroups.v1.RdmaEntry") +} +func (m *Metrics) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Metrics) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Hugetlb) > 0 { + for _, msg := range m.Hugetlb { + dAtA[i] = 0xa + i++ + i = encodeVarintMetrics(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Pids != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.Pids.Size())) + n1, err := m.Pids.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if m.CPU != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.CPU.Size())) + n2, err := m.CPU.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + if m.Memory != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.Memory.Size())) + n3, err := m.Memory.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + if m.Blkio != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.Blkio.Size())) + n4, err := m.Blkio.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + if m.Rdma != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.Rdma.Size())) + n5, err := m.Rdma.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + } + return i, nil +} + +func (m *HugetlbStat) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HugetlbStat) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Usage != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.Usage)) + } + if m.Max != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.Max)) + } + if m.Failcnt != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.Failcnt)) + } + if len(m.Pagesize) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(len(m.Pagesize))) + i += copy(dAtA[i:], m.Pagesize) + } + return i, nil +} + +func (m *PidsStat) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PidsStat) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Current != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.Current)) + } + if m.Limit != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.Limit)) + } + return i, nil +} + +func (m *CPUStat) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CPUStat) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Usage != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.Usage.Size())) + n5, err := m.Usage.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + } + if m.Throttling != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.Throttling.Size())) + n6, err := m.Throttling.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + return i, nil +} + +func (m *CPUUsage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CPUUsage) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Total != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.Total)) + } + if m.Kernel != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.Kernel)) + } + if m.User != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.User)) + } + if len(m.PerCPU) > 0 { + dAtA8 := make([]byte, len(m.PerCPU)*10) + var j7 int + for _, num := range m.PerCPU { + for num >= 1<<7 { + dAtA8[j7] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j7++ + } + dAtA8[j7] = uint8(num) + j7++ + } + dAtA[i] = 0x22 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(j7)) + i += copy(dAtA[i:], dAtA8[:j7]) + } + return i, nil +} + +func (m *Throttle) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Throttle) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Periods != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.Periods)) + } + if m.ThrottledPeriods != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.ThrottledPeriods)) + } + if m.ThrottledTime != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.ThrottledTime)) + } + return i, nil +} + +func (m *MemoryStat) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MemoryStat) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Cache != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.Cache)) + } + if m.RSS != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.RSS)) + } + if m.RSSHuge != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.RSSHuge)) + } + if m.MappedFile != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.MappedFile)) + } + if m.Dirty != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.Dirty)) + } + if m.Writeback != 0 { + dAtA[i] = 0x30 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.Writeback)) + } + if m.PgPgIn != 0 { + dAtA[i] = 0x38 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.PgPgIn)) + } + if m.PgPgOut != 0 { + dAtA[i] = 0x40 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.PgPgOut)) + } + if m.PgFault != 0 { + dAtA[i] = 0x48 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.PgFault)) + } + if m.PgMajFault != 0 { + dAtA[i] = 0x50 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.PgMajFault)) + } + if m.InactiveAnon != 0 { + dAtA[i] = 0x58 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.InactiveAnon)) + } + if m.ActiveAnon != 0 { + dAtA[i] = 0x60 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.ActiveAnon)) + } + if m.InactiveFile != 0 { + dAtA[i] = 0x68 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.InactiveFile)) + } + if m.ActiveFile != 0 { + dAtA[i] = 0x70 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.ActiveFile)) + } + if m.Unevictable != 0 { + dAtA[i] = 0x78 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.Unevictable)) + } + if m.HierarchicalMemoryLimit != 0 { + dAtA[i] = 0x80 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.HierarchicalMemoryLimit)) + } + if m.HierarchicalSwapLimit != 0 { + dAtA[i] = 0x88 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.HierarchicalSwapLimit)) + } + if m.TotalCache != 0 { + dAtA[i] = 0x90 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalCache)) + } + if m.TotalRSS != 0 { + dAtA[i] = 0x98 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalRSS)) + } + if m.TotalRSSHuge != 0 { + dAtA[i] = 0xa0 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalRSSHuge)) + } + if m.TotalMappedFile != 0 { + dAtA[i] = 0xa8 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalMappedFile)) + } + if m.TotalDirty != 0 { + dAtA[i] = 0xb0 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalDirty)) + } + if m.TotalWriteback != 0 { + dAtA[i] = 0xb8 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalWriteback)) + } + if m.TotalPgPgIn != 0 { + dAtA[i] = 0xc0 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalPgPgIn)) + } + if m.TotalPgPgOut != 0 { + dAtA[i] = 0xc8 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalPgPgOut)) + } + if m.TotalPgFault != 0 { + dAtA[i] = 0xd0 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalPgFault)) + } + if m.TotalPgMajFault != 0 { + dAtA[i] = 0xd8 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalPgMajFault)) + } + if m.TotalInactiveAnon != 0 { + dAtA[i] = 0xe0 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalInactiveAnon)) + } + if m.TotalActiveAnon != 0 { + dAtA[i] = 0xe8 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalActiveAnon)) + } + if m.TotalInactiveFile != 0 { + dAtA[i] = 0xf0 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalInactiveFile)) + } + if m.TotalActiveFile != 0 { + dAtA[i] = 0xf8 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalActiveFile)) + } + if m.TotalUnevictable != 0 { + dAtA[i] = 0x80 + i++ + dAtA[i] = 0x2 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalUnevictable)) + } + if m.Usage != nil { + dAtA[i] = 0x8a + i++ + dAtA[i] = 0x2 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.Usage.Size())) + n9, err := m.Usage.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + if m.Swap != nil { + dAtA[i] = 0x92 + i++ + dAtA[i] = 0x2 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.Swap.Size())) + n10, err := m.Swap.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + } + if m.Kernel != nil { + dAtA[i] = 0x9a + i++ + dAtA[i] = 0x2 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.Kernel.Size())) + n11, err := m.Kernel.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 + } + if m.KernelTCP != nil { + dAtA[i] = 0xa2 + i++ + dAtA[i] = 0x2 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.KernelTCP.Size())) + n12, err := m.KernelTCP.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n12 + } + return i, nil +} + +func (m *MemoryEntry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MemoryEntry) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + + if m.Limit != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.Limit)) + } + if m.Usage != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.Usage)) + } + if m.Max != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.Max)) + } + if m.Failcnt != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.Failcnt)) + } + return i, nil +} + +func (m *BlkIOStat) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BlkIOStat) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.IoServiceBytesRecursive) > 0 { + for _, msg := range m.IoServiceBytesRecursive { + dAtA[i] = 0xa + i++ + i = encodeVarintMetrics(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.IoServicedRecursive) > 0 { + for _, msg := range m.IoServicedRecursive { + dAtA[i] = 0x12 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.IoQueuedRecursive) > 0 { + for _, msg := range m.IoQueuedRecursive { + dAtA[i] = 0x1a + i++ + i = encodeVarintMetrics(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.IoServiceTimeRecursive) > 0 { + for _, msg := range m.IoServiceTimeRecursive { + dAtA[i] = 0x22 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.IoWaitTimeRecursive) > 0 { + for _, msg := range m.IoWaitTimeRecursive { + dAtA[i] = 0x2a + i++ + i = encodeVarintMetrics(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.IoMergedRecursive) > 0 { + for _, msg := range m.IoMergedRecursive { + dAtA[i] = 0x32 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.IoTimeRecursive) > 0 { + for _, msg := range m.IoTimeRecursive { + dAtA[i] = 0x3a + i++ + i = encodeVarintMetrics(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.SectorsRecursive) > 0 { + for _, msg := range m.SectorsRecursive { + dAtA[i] = 0x42 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *BlkIOEntry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BlkIOEntry) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Op) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintMetrics(dAtA, i, uint64(len(m.Op))) + i += copy(dAtA[i:], m.Op) + } + if len(m.Device) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(len(m.Device))) + i += copy(dAtA[i:], m.Device) + } + if m.Major != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.Major)) + } + if m.Minor != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.Minor)) + } + if m.Value != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.Value)) + } + return i, nil +} + +func (m *RdmaStat) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RdmaStat) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Current) > 0 { + for _, msg := range m.Current { + dAtA[i] = 0xa + i++ + i = encodeVarintMetrics(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Limit) > 0 { + for _, msg := range m.Limit { + dAtA[i] = 0x12 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *RdmaEntry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RdmaEntry) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Device) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintMetrics(dAtA, i, uint64(len(m.Device))) + i += copy(dAtA[i:], m.Device) + } + if m.HcaHandles != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.HcaHandles)) + } + if m.HcaObjects != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.HcaObjects)) + } + return i, nil +} + +func encodeFixed64Metrics(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Metrics(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintMetrics(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *Metrics) Size() (n int) { + var l int + _ = l + if len(m.Hugetlb) > 0 { + for _, e := range m.Hugetlb { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if m.Pids != nil { + l = m.Pids.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.CPU != nil { + l = m.CPU.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.Memory != nil { + l = m.Memory.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.Blkio != nil { + l = m.Blkio.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.Rdma != nil { + l = m.Rdma.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + return n +} + +func (m *HugetlbStat) Size() (n int) { + var l int + _ = l + if m.Usage != 0 { + n += 1 + sovMetrics(uint64(m.Usage)) + } + if m.Max != 0 { + n += 1 + sovMetrics(uint64(m.Max)) + } + if m.Failcnt != 0 { + n += 1 + sovMetrics(uint64(m.Failcnt)) + } + l = len(m.Pagesize) + if l > 0 { + n += 1 + l + sovMetrics(uint64(l)) + } + return n +} + +func (m *PidsStat) Size() (n int) { + var l int + _ = l + if m.Current != 0 { + n += 1 + sovMetrics(uint64(m.Current)) + } + if m.Limit != 0 { + n += 1 + sovMetrics(uint64(m.Limit)) + } + return n +} + +func (m *CPUStat) Size() (n int) { + var l int + _ = l + if m.Usage != nil { + l = m.Usage.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.Throttling != nil { + l = m.Throttling.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + return n +} + +func (m *CPUUsage) Size() (n int) { + var l int + _ = l + if m.Total != 0 { + n += 1 + sovMetrics(uint64(m.Total)) + } + if m.Kernel != 0 { + n += 1 + sovMetrics(uint64(m.Kernel)) + } + if m.User != 0 { + n += 1 + sovMetrics(uint64(m.User)) + } + if len(m.PerCPU) > 0 { + l = 0 + for _, e := range m.PerCPU { + l += sovMetrics(uint64(e)) + } + n += 1 + sovMetrics(uint64(l)) + l + } + return n +} + +func (m *Throttle) Size() (n int) { + var l int + _ = l + if m.Periods != 0 { + n += 1 + sovMetrics(uint64(m.Periods)) + } + if m.ThrottledPeriods != 0 { + n += 1 + sovMetrics(uint64(m.ThrottledPeriods)) + } + if m.ThrottledTime != 0 { + n += 1 + sovMetrics(uint64(m.ThrottledTime)) + } + return n +} + +func (m *MemoryStat) Size() (n int) { + var l int + _ = l + if m.Cache != 0 { + n += 1 + sovMetrics(uint64(m.Cache)) + } + if m.RSS != 0 { + n += 1 + sovMetrics(uint64(m.RSS)) + } + if m.RSSHuge != 0 { + n += 1 + sovMetrics(uint64(m.RSSHuge)) + } + if m.MappedFile != 0 { + n += 1 + sovMetrics(uint64(m.MappedFile)) + } + if m.Dirty != 0 { + n += 1 + sovMetrics(uint64(m.Dirty)) + } + if m.Writeback != 0 { + n += 1 + sovMetrics(uint64(m.Writeback)) + } + if m.PgPgIn != 0 { + n += 1 + sovMetrics(uint64(m.PgPgIn)) + } + if m.PgPgOut != 0 { + n += 1 + sovMetrics(uint64(m.PgPgOut)) + } + if m.PgFault != 0 { + n += 1 + sovMetrics(uint64(m.PgFault)) + } + if m.PgMajFault != 0 { + n += 1 + sovMetrics(uint64(m.PgMajFault)) + } + if m.InactiveAnon != 0 { + n += 1 + sovMetrics(uint64(m.InactiveAnon)) + } + if m.ActiveAnon != 0 { + n += 1 + sovMetrics(uint64(m.ActiveAnon)) + } + if m.InactiveFile != 0 { + n += 1 + sovMetrics(uint64(m.InactiveFile)) + } + if m.ActiveFile != 0 { + n += 1 + sovMetrics(uint64(m.ActiveFile)) + } + if m.Unevictable != 0 { + n += 1 + sovMetrics(uint64(m.Unevictable)) + } + if m.HierarchicalMemoryLimit != 0 { + n += 2 + sovMetrics(uint64(m.HierarchicalMemoryLimit)) + } + if m.HierarchicalSwapLimit != 0 { + n += 2 + sovMetrics(uint64(m.HierarchicalSwapLimit)) + } + if m.TotalCache != 0 { + n += 2 + sovMetrics(uint64(m.TotalCache)) + } + if m.TotalRSS != 0 { + n += 2 + sovMetrics(uint64(m.TotalRSS)) + } + if m.TotalRSSHuge != 0 { + n += 2 + sovMetrics(uint64(m.TotalRSSHuge)) + } + if m.TotalMappedFile != 0 { + n += 2 + sovMetrics(uint64(m.TotalMappedFile)) + } + if m.TotalDirty != 0 { + n += 2 + sovMetrics(uint64(m.TotalDirty)) + } + if m.TotalWriteback != 0 { + n += 2 + sovMetrics(uint64(m.TotalWriteback)) + } + if m.TotalPgPgIn != 0 { + n += 2 + sovMetrics(uint64(m.TotalPgPgIn)) + } + if m.TotalPgPgOut != 0 { + n += 2 + sovMetrics(uint64(m.TotalPgPgOut)) + } + if m.TotalPgFault != 0 { + n += 2 + sovMetrics(uint64(m.TotalPgFault)) + } + if m.TotalPgMajFault != 0 { + n += 2 + sovMetrics(uint64(m.TotalPgMajFault)) + } + if m.TotalInactiveAnon != 0 { + n += 2 + sovMetrics(uint64(m.TotalInactiveAnon)) + } + if m.TotalActiveAnon != 0 { + n += 2 + sovMetrics(uint64(m.TotalActiveAnon)) + } + if m.TotalInactiveFile != 0 { + n += 2 + sovMetrics(uint64(m.TotalInactiveFile)) + } + if m.TotalActiveFile != 0 { + n += 2 + sovMetrics(uint64(m.TotalActiveFile)) + } + if m.TotalUnevictable != 0 { + n += 2 + sovMetrics(uint64(m.TotalUnevictable)) + } + if m.Usage != nil { + l = m.Usage.Size() + n += 2 + l + sovMetrics(uint64(l)) + } + if m.Swap != nil { + l = m.Swap.Size() + n += 2 + l + sovMetrics(uint64(l)) + } + if m.Kernel != nil { + l = m.Kernel.Size() + n += 2 + l + sovMetrics(uint64(l)) + } + if m.KernelTCP != nil { + l = m.KernelTCP.Size() + n += 2 + l + sovMetrics(uint64(l)) + } + return n +} + +func (m *MemoryEntry) Size() (n int) { + var l int + _ = l + if m.Limit != 0 { + n += 1 + sovMetrics(uint64(m.Limit)) + } + if m.Usage != 0 { + n += 1 + sovMetrics(uint64(m.Usage)) + } + if m.Max != 0 { + n += 1 + sovMetrics(uint64(m.Max)) + } + if m.Failcnt != 0 { + n += 1 + sovMetrics(uint64(m.Failcnt)) + } + return n +} + +func (m *BlkIOStat) Size() (n int) { + var l int + _ = l + if len(m.IoServiceBytesRecursive) > 0 { + for _, e := range m.IoServiceBytesRecursive { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if len(m.IoServicedRecursive) > 0 { + for _, e := range m.IoServicedRecursive { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if len(m.IoQueuedRecursive) > 0 { + for _, e := range m.IoQueuedRecursive { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if len(m.IoServiceTimeRecursive) > 0 { + for _, e := range m.IoServiceTimeRecursive { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if len(m.IoWaitTimeRecursive) > 0 { + for _, e := range m.IoWaitTimeRecursive { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if len(m.IoMergedRecursive) > 0 { + for _, e := range m.IoMergedRecursive { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if len(m.IoTimeRecursive) > 0 { + for _, e := range m.IoTimeRecursive { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if len(m.SectorsRecursive) > 0 { + for _, e := range m.SectorsRecursive { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + return n +} + +func (m *BlkIOEntry) Size() (n int) { + var l int + _ = l + l = len(m.Op) + if l > 0 { + n += 1 + l + sovMetrics(uint64(l)) + } + l = len(m.Device) + if l > 0 { + n += 1 + l + sovMetrics(uint64(l)) + } + if m.Major != 0 { + n += 1 + sovMetrics(uint64(m.Major)) + } + if m.Minor != 0 { + n += 1 + sovMetrics(uint64(m.Minor)) + } + if m.Value != 0 { + n += 1 + sovMetrics(uint64(m.Value)) + } + return n +} + +func (m *RdmaStat) Size() (n int) { + var l int + _ = l + if len(m.Current) > 0 { + for _, e := range m.Current { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if len(m.Limit) > 0 { + for _, e := range m.Limit { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + return n +} + +func (m *RdmaEntry) Size() (n int) { + var l int + _ = l + l = len(m.Device) + if l > 0 { + n += 1 + l + sovMetrics(uint64(l)) + } + if m.HcaHandles != 0 { + n += 1 + sovMetrics(uint64(m.HcaHandles)) + } + if m.HcaObjects != 0 { + n += 1 + sovMetrics(uint64(m.HcaObjects)) + } + return n +} + +func sovMetrics(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozMetrics(x uint64) (n int) { + return sovMetrics(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Metrics) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Metrics{`, + `Hugetlb:` + strings.Replace(fmt.Sprintf("%v", this.Hugetlb), "HugetlbStat", "HugetlbStat", 1) + `,`, + `Pids:` + strings.Replace(fmt.Sprintf("%v", this.Pids), "PidsStat", "PidsStat", 1) + `,`, + `CPU:` + strings.Replace(fmt.Sprintf("%v", this.CPU), "CPUStat", "CPUStat", 1) + `,`, + `Memory:` + strings.Replace(fmt.Sprintf("%v", this.Memory), "MemoryStat", "MemoryStat", 1) + `,`, + `Blkio:` + strings.Replace(fmt.Sprintf("%v", this.Blkio), "BlkIOStat", "BlkIOStat", 1) + `,`, + `Rdma:` + strings.Replace(fmt.Sprintf("%v", this.Rdma), "RdmaStat", "RdmaStat", 1) + `,`, + `}`, + }, "") + return s +} +func (this *HugetlbStat) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HugetlbStat{`, + `Usage:` + fmt.Sprintf("%v", this.Usage) + `,`, + `Max:` + fmt.Sprintf("%v", this.Max) + `,`, + `Failcnt:` + fmt.Sprintf("%v", this.Failcnt) + `,`, + `Pagesize:` + fmt.Sprintf("%v", this.Pagesize) + `,`, + `}`, + }, "") + return s +} +func (this *PidsStat) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PidsStat{`, + `Current:` + fmt.Sprintf("%v", this.Current) + `,`, + `Limit:` + fmt.Sprintf("%v", this.Limit) + `,`, + `}`, + }, "") + return s +} +func (this *CPUStat) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CPUStat{`, + `Usage:` + strings.Replace(fmt.Sprintf("%v", this.Usage), "CPUUsage", "CPUUsage", 1) + `,`, + `Throttling:` + strings.Replace(fmt.Sprintf("%v", this.Throttling), "Throttle", "Throttle", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CPUUsage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CPUUsage{`, + `Total:` + fmt.Sprintf("%v", this.Total) + `,`, + `Kernel:` + fmt.Sprintf("%v", this.Kernel) + `,`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `PerCPU:` + fmt.Sprintf("%v", this.PerCPU) + `,`, + `}`, + }, "") + return s +} +func (this *Throttle) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Throttle{`, + `Periods:` + fmt.Sprintf("%v", this.Periods) + `,`, + `ThrottledPeriods:` + fmt.Sprintf("%v", this.ThrottledPeriods) + `,`, + `ThrottledTime:` + fmt.Sprintf("%v", this.ThrottledTime) + `,`, + `}`, + }, "") + return s +} +func (this *MemoryStat) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MemoryStat{`, + `Cache:` + fmt.Sprintf("%v", this.Cache) + `,`, + `RSS:` + fmt.Sprintf("%v", this.RSS) + `,`, + `RSSHuge:` + fmt.Sprintf("%v", this.RSSHuge) + `,`, + `MappedFile:` + fmt.Sprintf("%v", this.MappedFile) + `,`, + `Dirty:` + fmt.Sprintf("%v", this.Dirty) + `,`, + `Writeback:` + fmt.Sprintf("%v", this.Writeback) + `,`, + `PgPgIn:` + fmt.Sprintf("%v", this.PgPgIn) + `,`, + `PgPgOut:` + fmt.Sprintf("%v", this.PgPgOut) + `,`, + `PgFault:` + fmt.Sprintf("%v", this.PgFault) + `,`, + `PgMajFault:` + fmt.Sprintf("%v", this.PgMajFault) + `,`, + `InactiveAnon:` + fmt.Sprintf("%v", this.InactiveAnon) + `,`, + `ActiveAnon:` + fmt.Sprintf("%v", this.ActiveAnon) + `,`, + `InactiveFile:` + fmt.Sprintf("%v", this.InactiveFile) + `,`, + `ActiveFile:` + fmt.Sprintf("%v", this.ActiveFile) + `,`, + `Unevictable:` + fmt.Sprintf("%v", this.Unevictable) + `,`, + `HierarchicalMemoryLimit:` + fmt.Sprintf("%v", this.HierarchicalMemoryLimit) + `,`, + `HierarchicalSwapLimit:` + fmt.Sprintf("%v", this.HierarchicalSwapLimit) + `,`, + `TotalCache:` + fmt.Sprintf("%v", this.TotalCache) + `,`, + `TotalRSS:` + fmt.Sprintf("%v", this.TotalRSS) + `,`, + `TotalRSSHuge:` + fmt.Sprintf("%v", this.TotalRSSHuge) + `,`, + `TotalMappedFile:` + fmt.Sprintf("%v", this.TotalMappedFile) + `,`, + `TotalDirty:` + fmt.Sprintf("%v", this.TotalDirty) + `,`, + `TotalWriteback:` + fmt.Sprintf("%v", this.TotalWriteback) + `,`, + `TotalPgPgIn:` + fmt.Sprintf("%v", this.TotalPgPgIn) + `,`, + `TotalPgPgOut:` + fmt.Sprintf("%v", this.TotalPgPgOut) + `,`, + `TotalPgFault:` + fmt.Sprintf("%v", this.TotalPgFault) + `,`, + `TotalPgMajFault:` + fmt.Sprintf("%v", this.TotalPgMajFault) + `,`, + `TotalInactiveAnon:` + fmt.Sprintf("%v", this.TotalInactiveAnon) + `,`, + `TotalActiveAnon:` + fmt.Sprintf("%v", this.TotalActiveAnon) + `,`, + `TotalInactiveFile:` + fmt.Sprintf("%v", this.TotalInactiveFile) + `,`, + `TotalActiveFile:` + fmt.Sprintf("%v", this.TotalActiveFile) + `,`, + `TotalUnevictable:` + fmt.Sprintf("%v", this.TotalUnevictable) + `,`, + `Usage:` + strings.Replace(fmt.Sprintf("%v", this.Usage), "MemoryEntry", "MemoryEntry", 1) + `,`, + `Swap:` + strings.Replace(fmt.Sprintf("%v", this.Swap), "MemoryEntry", "MemoryEntry", 1) + `,`, + `Kernel:` + strings.Replace(fmt.Sprintf("%v", this.Kernel), "MemoryEntry", "MemoryEntry", 1) + `,`, + `KernelTCP:` + strings.Replace(fmt.Sprintf("%v", this.KernelTCP), "MemoryEntry", "MemoryEntry", 1) + `,`, + `}`, + }, "") + return s +} +func (this *MemoryEntry) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MemoryEntry{`, + `Limit:` + fmt.Sprintf("%v", this.Limit) + `,`, + `Usage:` + fmt.Sprintf("%v", this.Usage) + `,`, + `Max:` + fmt.Sprintf("%v", this.Max) + `,`, + `Failcnt:` + fmt.Sprintf("%v", this.Failcnt) + `,`, + `}`, + }, "") + return s +} +func (this *BlkIOStat) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BlkIOStat{`, + `IoServiceBytesRecursive:` + strings.Replace(fmt.Sprintf("%v", this.IoServiceBytesRecursive), "BlkIOEntry", "BlkIOEntry", 1) + `,`, + `IoServicedRecursive:` + strings.Replace(fmt.Sprintf("%v", this.IoServicedRecursive), "BlkIOEntry", "BlkIOEntry", 1) + `,`, + `IoQueuedRecursive:` + strings.Replace(fmt.Sprintf("%v", this.IoQueuedRecursive), "BlkIOEntry", "BlkIOEntry", 1) + `,`, + `IoServiceTimeRecursive:` + strings.Replace(fmt.Sprintf("%v", this.IoServiceTimeRecursive), "BlkIOEntry", "BlkIOEntry", 1) + `,`, + `IoWaitTimeRecursive:` + strings.Replace(fmt.Sprintf("%v", this.IoWaitTimeRecursive), "BlkIOEntry", "BlkIOEntry", 1) + `,`, + `IoMergedRecursive:` + strings.Replace(fmt.Sprintf("%v", this.IoMergedRecursive), "BlkIOEntry", "BlkIOEntry", 1) + `,`, + `IoTimeRecursive:` + strings.Replace(fmt.Sprintf("%v", this.IoTimeRecursive), "BlkIOEntry", "BlkIOEntry", 1) + `,`, + `SectorsRecursive:` + strings.Replace(fmt.Sprintf("%v", this.SectorsRecursive), "BlkIOEntry", "BlkIOEntry", 1) + `,`, + `}`, + }, "") + return s +} +func (this *BlkIOEntry) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BlkIOEntry{`, + `Op:` + fmt.Sprintf("%v", this.Op) + `,`, + `Device:` + fmt.Sprintf("%v", this.Device) + `,`, + `Major:` + fmt.Sprintf("%v", this.Major) + `,`, + `Minor:` + fmt.Sprintf("%v", this.Minor) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *RdmaStat) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RdmaStat{`, + `Current:` + strings.Replace(fmt.Sprintf("%v", this.Current), "RdmaEntry", "RdmaEntry", 1) + `,`, + `Limit:` + strings.Replace(fmt.Sprintf("%v", this.Limit), "RdmaEntry", "RdmaEntry", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RdmaEntry) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RdmaEntry{`, + `Device:` + fmt.Sprintf("%v", this.Device) + `,`, + `HcaHandles:` + fmt.Sprintf("%v", this.HcaHandles) + `,`, + `HcaObjects:` + fmt.Sprintf("%v", this.HcaObjects) + `,`, + `}`, + }, "") + return s +} +func valueToStringMetrics(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Metrics) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Metrics: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Metrics: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hugetlb", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hugetlb = append(m.Hugetlb, &HugetlbStat{}) + if err := m.Hugetlb[len(m.Hugetlb)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pids", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pids == nil { + m.Pids = &PidsStat{} + } + if err := m.Pids.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CPU", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CPU == nil { + m.CPU = &CPUStat{} + } + if err := m.CPU.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Memory", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Memory == nil { + m.Memory = &MemoryStat{} + } + if err := m.Memory.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Blkio", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Blkio == nil { + m.Blkio = &BlkIOStat{} + } + if err := m.Blkio.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rdma", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Rdma == nil { + m.Rdma = &RdmaStat{} + } + if err := m.Rdma.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HugetlbStat) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HugetlbStat: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HugetlbStat: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType) + } + m.Usage = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Usage |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) + } + m.Max = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Max |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Failcnt", wireType) + } + m.Failcnt = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Failcnt |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagesize", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Pagesize = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PidsStat) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PidsStat: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PidsStat: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Current", wireType) + } + m.Current = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Current |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) + } + m.Limit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Limit |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CPUStat) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CPUStat: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CPUStat: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Usage == nil { + m.Usage = &CPUUsage{} + } + if err := m.Usage.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Throttling", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Throttling == nil { + m.Throttling = &Throttle{} + } + if err := m.Throttling.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CPUUsage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CPUUsage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CPUUsage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType) + } + m.Total = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Total |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Kernel", wireType) + } + m.Kernel = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Kernel |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + m.User = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.User |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.PerCPU = append(m.PerCPU, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.PerCPU = append(m.PerCPU, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field PerCPU", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Throttle) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Throttle: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Throttle: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Periods", wireType) + } + m.Periods = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Periods |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ThrottledPeriods", wireType) + } + m.ThrottledPeriods = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ThrottledPeriods |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ThrottledTime", wireType) + } + m.ThrottledTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ThrottledTime |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MemoryStat) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MemoryStat: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MemoryStat: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Cache", wireType) + } + m.Cache = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Cache |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RSS", wireType) + } + m.RSS = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RSS |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RSSHuge", wireType) + } + m.RSSHuge = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RSSHuge |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MappedFile", wireType) + } + m.MappedFile = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MappedFile |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Dirty", wireType) + } + m.Dirty = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Dirty |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Writeback", wireType) + } + m.Writeback = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Writeback |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PgPgIn", wireType) + } + m.PgPgIn = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PgPgIn |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PgPgOut", wireType) + } + m.PgPgOut = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PgPgOut |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PgFault", wireType) + } + m.PgFault = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PgFault |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PgMajFault", wireType) + } + m.PgMajFault = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PgMajFault |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InactiveAnon", wireType) + } + m.InactiveAnon = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.InactiveAnon |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ActiveAnon", wireType) + } + m.ActiveAnon = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ActiveAnon |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InactiveFile", wireType) + } + m.InactiveFile = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.InactiveFile |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ActiveFile", wireType) + } + m.ActiveFile = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ActiveFile |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 15: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Unevictable", wireType) + } + m.Unevictable = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Unevictable |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 16: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HierarchicalMemoryLimit", wireType) + } + m.HierarchicalMemoryLimit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HierarchicalMemoryLimit |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 17: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HierarchicalSwapLimit", wireType) + } + m.HierarchicalSwapLimit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HierarchicalSwapLimit |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 18: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalCache", wireType) + } + m.TotalCache = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalCache |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 19: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalRSS", wireType) + } + m.TotalRSS = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalRSS |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 20: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalRSSHuge", wireType) + } + m.TotalRSSHuge = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalRSSHuge |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 21: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalMappedFile", wireType) + } + m.TotalMappedFile = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalMappedFile |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 22: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalDirty", wireType) + } + m.TotalDirty = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalDirty |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 23: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalWriteback", wireType) + } + m.TotalWriteback = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalWriteback |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 24: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalPgPgIn", wireType) + } + m.TotalPgPgIn = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalPgPgIn |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 25: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalPgPgOut", wireType) + } + m.TotalPgPgOut = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalPgPgOut |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 26: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalPgFault", wireType) + } + m.TotalPgFault = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalPgFault |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 27: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalPgMajFault", wireType) + } + m.TotalPgMajFault = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalPgMajFault |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 28: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalInactiveAnon", wireType) + } + m.TotalInactiveAnon = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalInactiveAnon |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 29: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalActiveAnon", wireType) + } + m.TotalActiveAnon = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalActiveAnon |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 30: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalInactiveFile", wireType) + } + m.TotalInactiveFile = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalInactiveFile |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 31: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalActiveFile", wireType) + } + m.TotalActiveFile = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalActiveFile |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 32: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalUnevictable", wireType) + } + m.TotalUnevictable = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalUnevictable |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 33: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Usage == nil { + m.Usage = &MemoryEntry{} + } + if err := m.Usage.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 34: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Swap", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Swap == nil { + m.Swap = &MemoryEntry{} + } + if err := m.Swap.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 35: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kernel", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Kernel == nil { + m.Kernel = &MemoryEntry{} + } + if err := m.Kernel.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 36: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KernelTCP", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.KernelTCP == nil { + m.KernelTCP = &MemoryEntry{} + } + if err := m.KernelTCP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MemoryEntry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MemoryEntry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MemoryEntry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) + } + m.Limit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Limit |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType) + } + m.Usage = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Usage |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) + } + m.Max = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Max |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Failcnt", wireType) + } + m.Failcnt = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Failcnt |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BlkIOStat) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BlkIOStat: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BlkIOStat: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IoServiceBytesRecursive", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IoServiceBytesRecursive = append(m.IoServiceBytesRecursive, &BlkIOEntry{}) + if err := m.IoServiceBytesRecursive[len(m.IoServiceBytesRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IoServicedRecursive", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IoServicedRecursive = append(m.IoServicedRecursive, &BlkIOEntry{}) + if err := m.IoServicedRecursive[len(m.IoServicedRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IoQueuedRecursive", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IoQueuedRecursive = append(m.IoQueuedRecursive, &BlkIOEntry{}) + if err := m.IoQueuedRecursive[len(m.IoQueuedRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IoServiceTimeRecursive", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IoServiceTimeRecursive = append(m.IoServiceTimeRecursive, &BlkIOEntry{}) + if err := m.IoServiceTimeRecursive[len(m.IoServiceTimeRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IoWaitTimeRecursive", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IoWaitTimeRecursive = append(m.IoWaitTimeRecursive, &BlkIOEntry{}) + if err := m.IoWaitTimeRecursive[len(m.IoWaitTimeRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IoMergedRecursive", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IoMergedRecursive = append(m.IoMergedRecursive, &BlkIOEntry{}) + if err := m.IoMergedRecursive[len(m.IoMergedRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IoTimeRecursive", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IoTimeRecursive = append(m.IoTimeRecursive, &BlkIOEntry{}) + if err := m.IoTimeRecursive[len(m.IoTimeRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SectorsRecursive", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SectorsRecursive = append(m.SectorsRecursive, &BlkIOEntry{}) + if err := m.SectorsRecursive[len(m.SectorsRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BlkIOEntry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BlkIOEntry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BlkIOEntry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Op", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Op = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Device = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Major", wireType) + } + m.Major = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Major |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Minor", wireType) + } + m.Minor = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Minor |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RdmaStat) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RdmaStat: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RdmaStat: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Current", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Current = append(m.Current, &RdmaEntry{}) + if err := m.Current[len(m.Current)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Limit = append(m.Limit, &RdmaEntry{}) + if err := m.Limit[len(m.Limit)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RdmaEntry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RdmaEntry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RdmaEntry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Device = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HcaHandles", wireType) + } + m.HcaHandles = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HcaHandles |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HcaObjects", wireType) + } + m.HcaObjects = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HcaObjects |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} + +func skipMetrics(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMetrics + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMetrics + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMetrics + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthMetrics + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMetrics + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipMetrics(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthMetrics = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowMetrics = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("github.com/containerd/cgroups/metrics.proto", fileDescriptorMetrics) } + +var fileDescriptorMetrics = []byte{ + // 1325 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x57, 0x4d, 0x6f, 0x1b, 0xb7, + 0x16, 0x8d, 0xac, 0xb1, 0x3e, 0xae, 0x6c, 0xc7, 0xa6, 0x13, 0x67, 0xec, 0x97, 0x27, 0x29, 0xb2, + 0xfd, 0x9e, 0x5b, 0x03, 0x32, 0x9a, 0x02, 0x41, 0x93, 0xa6, 0x28, 0x22, 0xb7, 0x41, 0x83, 0xd6, + 0x88, 0x32, 0xb2, 0x91, 0x76, 0x35, 0x18, 0x8d, 0x98, 0x31, 0xe3, 0xd1, 0x70, 0xc2, 0xe1, 0xc8, + 0x71, 0x57, 0xdd, 0xf5, 0x37, 0xf5, 0x1f, 0x64, 0xd9, 0x4d, 0x81, 0x76, 0x63, 0x34, 0xfa, 0x25, + 0x05, 0x2f, 0xe7, 0x4b, 0x49, 0xdc, 0x40, 0xbb, 0xb9, 0xbc, 0xe7, 0x1c, 0x5e, 0x5e, 0x1e, 0x8a, + 0x14, 0xec, 0x7b, 0x4c, 0x9e, 0xc6, 0xc3, 0xae, 0xcb, 0xc7, 0x07, 0x2e, 0x0f, 0xa4, 0xc3, 0x02, + 0x2a, 0x46, 0x07, 0xae, 0x27, 0x78, 0x1c, 0x46, 0x07, 0x63, 0x2a, 0x05, 0x73, 0xa3, 0x6e, 0x28, + 0xb8, 0xe4, 0xc4, 0x64, 0xbc, 0x9b, 0x83, 0xba, 0x09, 0xa8, 0x3b, 0xf9, 0x6c, 0xeb, 0x86, 0xc7, + 0x3d, 0x8e, 0xa0, 0x03, 0xf5, 0xa5, 0xf1, 0x9d, 0xdf, 0x16, 0xa0, 0x7a, 0xa4, 0x15, 0xc8, 0xd7, + 0x50, 0x3d, 0x8d, 0x3d, 0x2a, 0xfd, 0xa1, 0x59, 0x6a, 0x97, 0xf7, 0x1a, 0x77, 0x77, 0xbb, 0x57, + 0xa9, 0x75, 0xbf, 0xd3, 0xc0, 0x81, 0x74, 0xa4, 0x95, 0xb2, 0xc8, 0x3d, 0x30, 0x42, 0x36, 0x8a, + 0xcc, 0x85, 0x76, 0x69, 0xaf, 0x71, 0xb7, 0x73, 0x35, 0xbb, 0xcf, 0x46, 0x11, 0x52, 0x11, 0x4f, + 0x1e, 0x42, 0xd9, 0x0d, 0x63, 0xb3, 0x8c, 0xb4, 0x3b, 0x57, 0xd3, 0x0e, 0xfb, 0x27, 0x8a, 0xd5, + 0xab, 0x4e, 0x2f, 0x5b, 0xe5, 0xc3, 0xfe, 0x89, 0xa5, 0x68, 0xe4, 0x21, 0x54, 0xc6, 0x74, 0xcc, + 0xc5, 0x85, 0x69, 0xa0, 0xc0, 0xce, 0xd5, 0x02, 0x47, 0x88, 0xc3, 0x99, 0x13, 0x0e, 0xb9, 0x0f, + 0x8b, 0x43, 0xff, 0x8c, 0x71, 0x73, 0x11, 0xc9, 0xdb, 0x57, 0x93, 0x7b, 0xfe, 0xd9, 0x93, 0xa7, + 0xc8, 0xd5, 0x8c, 0xce, 0x19, 0x34, 0x0a, 0x6d, 0x20, 0x37, 0x60, 0x31, 0x8e, 0x1c, 0x8f, 0x9a, + 0xa5, 0x76, 0x69, 0xcf, 0xb0, 0x74, 0x40, 0x56, 0xa1, 0x3c, 0x76, 0x5e, 0x63, 0x4b, 0x0c, 0x4b, + 0x7d, 0x12, 0x13, 0xaa, 0x2f, 0x1c, 0xe6, 0xbb, 0x81, 0xc4, 0x15, 0x1b, 0x56, 0x1a, 0x92, 0x2d, + 0xa8, 0x85, 0x8e, 0x47, 0x23, 0xf6, 0x33, 0xc5, 0xb5, 0xd4, 0xad, 0x2c, 0xee, 0x3c, 0x80, 0x5a, + 0xda, 0x35, 0xa5, 0xe0, 0xc6, 0x42, 0xd0, 0x40, 0x26, 0x73, 0xa5, 0xa1, 0xaa, 0xc1, 0x67, 0x63, + 0x26, 0x93, 0xf9, 0x74, 0xd0, 0xf9, 0xb5, 0x04, 0xd5, 0xa4, 0x77, 0xe4, 0x8b, 0x62, 0x95, 0xff, + 0xba, 0x49, 0x87, 0xfd, 0x93, 0x13, 0x85, 0x4c, 0x57, 0xd2, 0x03, 0x90, 0xa7, 0x82, 0x4b, 0xe9, + 0xb3, 0xc0, 0xfb, 0xf8, 0x1e, 0x1f, 0x6b, 0x2c, 0xb5, 0x0a, 0xac, 0xce, 0x2b, 0xa8, 0xa5, 0xb2, + 0xaa, 0x56, 0xc9, 0xa5, 0xe3, 0xa7, 0xfd, 0xc2, 0x80, 0x6c, 0x40, 0xe5, 0x8c, 0x8a, 0x80, 0xfa, + 0xc9, 0x12, 0x92, 0x88, 0x10, 0x30, 0xe2, 0x88, 0x8a, 0xa4, 0x65, 0xf8, 0x4d, 0xb6, 0xa1, 0x1a, + 0x52, 0x61, 0x2b, 0xef, 0x18, 0xed, 0xf2, 0x9e, 0xd1, 0x83, 0xe9, 0x65, 0xab, 0xd2, 0xa7, 0x42, + 0x79, 0xa3, 0x12, 0x52, 0x71, 0x18, 0xc6, 0x9d, 0xd7, 0x50, 0x4b, 0x4b, 0x51, 0x8d, 0x0b, 0xa9, + 0x60, 0x7c, 0x14, 0xa5, 0x8d, 0x4b, 0x42, 0xb2, 0x0f, 0x6b, 0x49, 0x99, 0x74, 0x64, 0xa7, 0x18, + 0x5d, 0xc1, 0x6a, 0x96, 0xe8, 0x27, 0xe0, 0x5d, 0x58, 0xc9, 0xc1, 0x92, 0x8d, 0x69, 0x52, 0xd5, + 0x72, 0x36, 0x7a, 0xcc, 0xc6, 0xb4, 0xf3, 0x57, 0x03, 0x20, 0x77, 0x9c, 0x5a, 0xaf, 0xeb, 0xb8, + 0xa7, 0x99, 0x3f, 0x30, 0x20, 0x9b, 0x50, 0x16, 0x51, 0x32, 0x95, 0x36, 0xb6, 0x35, 0x18, 0x58, + 0x6a, 0x8c, 0xfc, 0x0f, 0x6a, 0x22, 0x8a, 0x6c, 0x75, 0xba, 0xf4, 0x04, 0xbd, 0xc6, 0xf4, 0xb2, + 0x55, 0xb5, 0x06, 0x03, 0x65, 0x3b, 0xab, 0x2a, 0xa2, 0x48, 0x7d, 0x90, 0x16, 0x34, 0xc6, 0x4e, + 0x18, 0xd2, 0x91, 0xfd, 0x82, 0xf9, 0xda, 0x39, 0x86, 0x05, 0x7a, 0xe8, 0x31, 0xf3, 0xb1, 0xd3, + 0x23, 0x26, 0xe4, 0x05, 0x7a, 0xdc, 0xb0, 0x74, 0x40, 0x6e, 0x43, 0xfd, 0x5c, 0x30, 0x49, 0x87, + 0x8e, 0x7b, 0x66, 0x56, 0x30, 0x93, 0x0f, 0x10, 0x13, 0x6a, 0xa1, 0x67, 0x87, 0x9e, 0xcd, 0x02, + 0xb3, 0xaa, 0x77, 0x22, 0xf4, 0xfa, 0xde, 0x93, 0x80, 0x6c, 0x41, 0x5d, 0x67, 0x78, 0x2c, 0xcd, + 0x5a, 0xd2, 0x46, 0xaf, 0xef, 0x3d, 0x8d, 0x25, 0xd9, 0x44, 0xd6, 0x0b, 0x27, 0xf6, 0xa5, 0x59, + 0x4f, 0x53, 0x8f, 0x55, 0x48, 0xda, 0xb0, 0x14, 0x7a, 0xf6, 0xd8, 0x79, 0x99, 0xa4, 0x41, 0x97, + 0x19, 0x7a, 0x47, 0xce, 0x4b, 0x8d, 0xd8, 0x86, 0x65, 0x16, 0x38, 0xae, 0x64, 0x13, 0x6a, 0x3b, + 0x01, 0x0f, 0xcc, 0x06, 0x42, 0x96, 0xd2, 0xc1, 0x47, 0x01, 0x0f, 0xd4, 0x62, 0x8b, 0x90, 0x25, + 0xad, 0x52, 0x00, 0x14, 0x55, 0xb0, 0x1f, 0xcb, 0xb3, 0x2a, 0xd8, 0x91, 0x5c, 0x05, 0x21, 0x2b, + 0x45, 0x15, 0x04, 0xb4, 0xa1, 0x11, 0x07, 0x74, 0xc2, 0x5c, 0xe9, 0x0c, 0x7d, 0x6a, 0x5e, 0x47, + 0x40, 0x71, 0x88, 0x3c, 0x80, 0xcd, 0x53, 0x46, 0x85, 0x23, 0xdc, 0x53, 0xe6, 0x3a, 0xbe, 0xad, + 0x7f, 0x4f, 0x6c, 0x7d, 0xfc, 0x56, 0x11, 0x7f, 0xab, 0x08, 0xd0, 0x4e, 0xf8, 0x41, 0xa5, 0xc9, + 0x3d, 0x98, 0x49, 0xd9, 0xd1, 0xb9, 0x13, 0x26, 0xcc, 0x35, 0x64, 0xde, 0x2c, 0xa6, 0x07, 0xe7, + 0x4e, 0xa8, 0x79, 0x2d, 0x68, 0xe0, 0x29, 0xb1, 0xb5, 0x91, 0x88, 0x2e, 0x1b, 0x87, 0x0e, 0xd1, + 0x4d, 0x9f, 0x40, 0x5d, 0x03, 0x94, 0xa7, 0xd6, 0xd1, 0x33, 0x4b, 0xd3, 0xcb, 0x56, 0xed, 0x58, + 0x0d, 0x2a, 0x63, 0xd5, 0x30, 0x6d, 0x45, 0x11, 0xb9, 0x07, 0x2b, 0x19, 0x54, 0x7b, 0xec, 0x06, + 0xe2, 0x57, 0xa7, 0x97, 0xad, 0xa5, 0x14, 0x8f, 0x46, 0x5b, 0x4a, 0x39, 0xe8, 0xb6, 0x4f, 0x61, + 0x4d, 0xf3, 0x8a, 0x9e, 0xbb, 0x89, 0x95, 0x5c, 0xc7, 0xc4, 0x51, 0x6e, 0xbc, 0xac, 0x5e, 0x6d, + 0xbf, 0x8d, 0x42, 0xbd, 0xdf, 0xa0, 0x07, 0xff, 0x0f, 0x9a, 0x63, 0xe7, 0x4e, 0xbc, 0x85, 0x20, + 0x5d, 0xdb, 0xf3, 0xcc, 0x8e, 0xdb, 0x69, 0xb5, 0x99, 0x29, 0x4d, 0xbd, 0x25, 0x38, 0xda, 0xd7, + 0xce, 0xdc, 0x4d, 0xd5, 0x72, 0x7f, 0x6e, 0xea, 0xcd, 0xcf, 0x50, 0xca, 0xa4, 0x3b, 0x05, 0x2d, + 0xed, 0xc5, 0xad, 0x19, 0x94, 0x76, 0xe3, 0x3e, 0x90, 0x0c, 0x95, 0xbb, 0xf6, 0x3f, 0x85, 0x85, + 0xf6, 0x73, 0xeb, 0x76, 0x61, 0x5d, 0x83, 0x67, 0x0d, 0x7c, 0x1b, 0xd1, 0xba, 0x5f, 0x4f, 0x8a, + 0x2e, 0xce, 0x9a, 0x58, 0x44, 0xff, 0xb7, 0xa0, 0xfd, 0x28, 0xc7, 0xbe, 0xaf, 0x8d, 0x2d, 0x6f, + 0x7e, 0x40, 0x1b, 0x9b, 0xfe, 0xae, 0x36, 0xa2, 0x5b, 0xef, 0x69, 0x23, 0x76, 0x3f, 0xc5, 0x16, + 0xcd, 0xde, 0x4e, 0x7e, 0xf6, 0x54, 0xe2, 0xa4, 0xe0, 0xf8, 0x2f, 0xd3, 0xab, 0xe3, 0x0e, 0xfe, + 0xf6, 0xef, 0x7e, 0xec, 0x9e, 0xfd, 0x36, 0x90, 0xe2, 0x22, 0xbd, 0x3d, 0xee, 0x83, 0xa1, 0x5c, + 0x6e, 0x76, 0xe6, 0xe1, 0x22, 0x85, 0x7c, 0x95, 0x5d, 0x09, 0xdb, 0xf3, 0x90, 0xd3, 0x9b, 0x63, + 0x00, 0xa0, 0xbf, 0x6c, 0xe9, 0x86, 0xe6, 0xce, 0x1c, 0x12, 0xbd, 0xe5, 0xe9, 0x65, 0xab, 0xfe, + 0x3d, 0x92, 0x8f, 0x0f, 0xfb, 0x56, 0x5d, 0xeb, 0x1c, 0xbb, 0x61, 0x87, 0x42, 0xa3, 0x00, 0xcc, + 0xef, 0xdd, 0x52, 0xe1, 0xde, 0xcd, 0x5f, 0x04, 0x0b, 0x1f, 0x78, 0x11, 0x94, 0x3f, 0xf8, 0x22, + 0x30, 0x66, 0x5e, 0x04, 0x9d, 0x3f, 0x16, 0xa1, 0x9e, 0xbd, 0x3b, 0x88, 0x03, 0x5b, 0x8c, 0xdb, + 0x11, 0x15, 0x13, 0xe6, 0x52, 0x7b, 0x78, 0x21, 0x69, 0x64, 0x0b, 0xea, 0xc6, 0x22, 0x62, 0x13, + 0x9a, 0xbc, 0xd9, 0x76, 0x3e, 0xf2, 0x80, 0xd1, 0xbd, 0xb9, 0xc5, 0xf8, 0x40, 0xcb, 0xf4, 0x94, + 0x8a, 0x95, 0x8a, 0x90, 0x1f, 0xe1, 0x66, 0x3e, 0xc5, 0xa8, 0xa0, 0xbe, 0x30, 0x87, 0xfa, 0x7a, + 0xa6, 0x3e, 0xca, 0x95, 0x8f, 0x61, 0x9d, 0x71, 0xfb, 0x55, 0x4c, 0xe3, 0x19, 0xdd, 0xf2, 0x1c, + 0xba, 0x6b, 0x8c, 0x3f, 0x43, 0x7e, 0xae, 0x6a, 0xc3, 0x66, 0xa1, 0x25, 0xea, 0x2e, 0x2e, 0x68, + 0x1b, 0x73, 0x68, 0x6f, 0x64, 0x35, 0xab, 0xbb, 0x3b, 0x9f, 0xe0, 0x27, 0xd8, 0x60, 0xdc, 0x3e, + 0x77, 0x98, 0x7c, 0x57, 0x7d, 0x71, 0xbe, 0x8e, 0x3c, 0x77, 0x98, 0x9c, 0x95, 0xd6, 0x1d, 0x19, + 0x53, 0xe1, 0xcd, 0x74, 0xa4, 0x32, 0x5f, 0x47, 0x8e, 0x90, 0x9f, 0xab, 0xf6, 0x61, 0x8d, 0xf1, + 0x77, 0x6b, 0xad, 0xce, 0xa1, 0x79, 0x9d, 0xf1, 0xd9, 0x3a, 0x9f, 0xc1, 0x5a, 0x44, 0x5d, 0xc9, + 0x45, 0xd1, 0x6d, 0xb5, 0x39, 0x14, 0x57, 0x13, 0x7a, 0x26, 0xd9, 0x99, 0x00, 0xe4, 0x79, 0xb2, + 0x02, 0x0b, 0x3c, 0xc4, 0xa3, 0x53, 0xb7, 0x16, 0x78, 0xa8, 0xde, 0x80, 0x23, 0xf5, 0xb3, 0xa3, + 0x0f, 0x4e, 0xdd, 0x4a, 0x22, 0x75, 0x9e, 0xc6, 0xce, 0x4b, 0x9e, 0x3e, 0x02, 0x75, 0x80, 0xa3, + 0x2c, 0xe0, 0x22, 0x39, 0x3b, 0x3a, 0x50, 0xa3, 0x13, 0xc7, 0x8f, 0x69, 0xfa, 0xe6, 0xc1, 0xa0, + 0x67, 0xbe, 0x79, 0xdb, 0xbc, 0xf6, 0xe7, 0xdb, 0xe6, 0xb5, 0x5f, 0xa6, 0xcd, 0xd2, 0x9b, 0x69, + 0xb3, 0xf4, 0xfb, 0xb4, 0x59, 0xfa, 0x7b, 0xda, 0x2c, 0x0d, 0x2b, 0xf8, 0x7f, 0xe8, 0xf3, 0x7f, + 0x02, 0x00, 0x00, 0xff, 0xff, 0xb2, 0x21, 0x0b, 0xcd, 0x6e, 0x0d, 0x00, 0x00, +} diff --git a/vendor/github.com/containerd/cgroups/metrics.proto b/vendor/github.com/containerd/cgroups/metrics.proto new file mode 100644 index 000000000..642623fce --- /dev/null +++ b/vendor/github.com/containerd/cgroups/metrics.proto @@ -0,0 +1,123 @@ +syntax = "proto3"; + +package io.containerd.cgroups.v1; + +import "gogoproto/gogo.proto"; + +message Metrics { + repeated HugetlbStat hugetlb = 1; + PidsStat pids = 2; + CPUStat cpu = 3 [(gogoproto.customname) = "CPU"]; + MemoryStat memory = 4; + BlkIOStat blkio = 5; + RdmaStat rdma = 6; +} + +message HugetlbStat { + uint64 usage = 1; + uint64 max = 2; + uint64 failcnt = 3; + string pagesize = 4; +} + +message PidsStat { + uint64 current = 1; + uint64 limit = 2; +} + +message CPUStat { + CPUUsage usage = 1; + Throttle throttling = 2; +} + +message CPUUsage { + // values in nanoseconds + uint64 total = 1; + uint64 kernel = 2; + uint64 user = 3; + repeated uint64 per_cpu = 4 [(gogoproto.customname) = "PerCPU"]; + +} + +message Throttle { + uint64 periods = 1; + uint64 throttled_periods = 2; + uint64 throttled_time = 3; +} + +message MemoryStat { + uint64 cache = 1; + uint64 rss = 2 [(gogoproto.customname) = "RSS"]; + uint64 rss_huge = 3 [(gogoproto.customname) = "RSSHuge"]; + uint64 mapped_file = 4; + uint64 dirty = 5; + uint64 writeback = 6; + uint64 pg_pg_in = 7; + uint64 pg_pg_out = 8; + uint64 pg_fault = 9; + uint64 pg_maj_fault = 10; + uint64 inactive_anon = 11; + uint64 active_anon = 12; + uint64 inactive_file = 13; + uint64 active_file = 14; + uint64 unevictable = 15; + uint64 hierarchical_memory_limit = 16; + uint64 hierarchical_swap_limit = 17; + uint64 total_cache = 18; + uint64 total_rss = 19 [(gogoproto.customname) = "TotalRSS"]; + uint64 total_rss_huge = 20 [(gogoproto.customname) = "TotalRSSHuge"]; + uint64 total_mapped_file = 21; + uint64 total_dirty = 22; + uint64 total_writeback = 23; + uint64 total_pg_pg_in = 24; + uint64 total_pg_pg_out = 25; + uint64 total_pg_fault = 26; + uint64 total_pg_maj_fault = 27; + uint64 total_inactive_anon = 28; + uint64 total_active_anon = 29; + uint64 total_inactive_file = 30; + uint64 total_active_file = 31; + uint64 total_unevictable = 32; + MemoryEntry usage = 33; + MemoryEntry swap = 34; + MemoryEntry kernel = 35; + MemoryEntry kernel_tcp = 36 [(gogoproto.customname) = "KernelTCP"]; + +} + +message MemoryEntry { + uint64 limit = 1; + uint64 usage = 2; + uint64 max = 3; + uint64 failcnt = 4; +} + +message BlkIOStat { + repeated BlkIOEntry io_service_bytes_recursive = 1; + repeated BlkIOEntry io_serviced_recursive = 2; + repeated BlkIOEntry io_queued_recursive = 3; + repeated BlkIOEntry io_service_time_recursive = 4; + repeated BlkIOEntry io_wait_time_recursive = 5; + repeated BlkIOEntry io_merged_recursive = 6; + repeated BlkIOEntry io_time_recursive = 7; + repeated BlkIOEntry sectors_recursive = 8; +} + +message BlkIOEntry { + string op = 1; + string device = 2; + uint64 major = 3; + uint64 minor = 4; + uint64 value = 5; +} + +message RdmaStat { + repeated RdmaEntry current = 1; + repeated RdmaEntry limit = 2; +} + +message RdmaEntry { + string device = 1; + uint32 hca_handles = 2; + uint32 hca_objects = 3; +} diff --git a/vendor/github.com/containerd/cgroups/named.go b/vendor/github.com/containerd/cgroups/named.go new file mode 100644 index 000000000..06b16c3b1 --- /dev/null +++ b/vendor/github.com/containerd/cgroups/named.go @@ -0,0 +1,39 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cgroups + +import "path/filepath" + +func NewNamed(root string, name Name) *namedController { + return &namedController{ + root: root, + name: name, + } +} + +type namedController struct { + root string + name Name +} + +func (n *namedController) Name() Name { + return n.name +} + +func (n *namedController) Path(path string) string { + return filepath.Join(n.root, string(n.name), path) +} diff --git a/vendor/github.com/containerd/cgroups/net_cls.go b/vendor/github.com/containerd/cgroups/net_cls.go new file mode 100644 index 000000000..8f1a2651f --- /dev/null +++ b/vendor/github.com/containerd/cgroups/net_cls.go @@ -0,0 +1,58 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cgroups + +import ( + "io/ioutil" + "os" + "path/filepath" + "strconv" + + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +func NewNetCls(root string) *netclsController { + return &netclsController{ + root: filepath.Join(root, string(NetCLS)), + } +} + +type netclsController struct { + root string +} + +func (n *netclsController) Name() Name { + return NetCLS +} + +func (n *netclsController) Path(path string) string { + return filepath.Join(n.root, path) +} + +func (n *netclsController) Create(path string, resources *specs.LinuxResources) error { + if err := os.MkdirAll(n.Path(path), defaultDirPerm); err != nil { + return err + } + if resources.Network != nil && resources.Network.ClassID != nil && *resources.Network.ClassID > 0 { + return ioutil.WriteFile( + filepath.Join(n.Path(path), "net_cls.classid"), + []byte(strconv.FormatUint(uint64(*resources.Network.ClassID), 10)), + defaultFilePerm, + ) + } + return nil +} diff --git a/vendor/github.com/containerd/cgroups/net_prio.go b/vendor/github.com/containerd/cgroups/net_prio.go new file mode 100644 index 000000000..c77169215 --- /dev/null +++ b/vendor/github.com/containerd/cgroups/net_prio.go @@ -0,0 +1,66 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cgroups + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +func NewNetPrio(root string) *netprioController { + return &netprioController{ + root: filepath.Join(root, string(NetPrio)), + } +} + +type netprioController struct { + root string +} + +func (n *netprioController) Name() Name { + return NetPrio +} + +func (n *netprioController) Path(path string) string { + return filepath.Join(n.root, path) +} + +func (n *netprioController) Create(path string, resources *specs.LinuxResources) error { + if err := os.MkdirAll(n.Path(path), defaultDirPerm); err != nil { + return err + } + if resources.Network != nil { + for _, prio := range resources.Network.Priorities { + if err := ioutil.WriteFile( + filepath.Join(n.Path(path), "net_prio_ifpriomap"), + formatPrio(prio.Name, prio.Priority), + defaultFilePerm, + ); err != nil { + return err + } + } + } + return nil +} + +func formatPrio(name string, prio uint32) []byte { + return []byte(fmt.Sprintf("%s %d", name, prio)) +} diff --git a/vendor/github.com/containerd/cgroups/paths.go b/vendor/github.com/containerd/cgroups/paths.go new file mode 100644 index 000000000..455ce857f --- /dev/null +++ b/vendor/github.com/containerd/cgroups/paths.go @@ -0,0 +1,104 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cgroups + +import ( + "fmt" + "path/filepath" + + "github.com/pkg/errors" +) + +type Path func(subsystem Name) (string, error) + +func RootPath(subsysem Name) (string, error) { + return "/", nil +} + +// StaticPath returns a static path to use for all cgroups +func StaticPath(path string) Path { + return func(_ Name) (string, error) { + return path, nil + } +} + +// NestedPath will nest the cgroups based on the calling processes cgroup +// placing its child processes inside its own path +func NestedPath(suffix string) Path { + paths, err := parseCgroupFile("/proc/self/cgroup") + if err != nil { + return errorPath(err) + } + return existingPath(paths, suffix) +} + +// PidPath will return the correct cgroup paths for an existing process running inside a cgroup +// This is commonly used for the Load function to restore an existing container +func PidPath(pid int) Path { + p := fmt.Sprintf("/proc/%d/cgroup", pid) + paths, err := parseCgroupFile(p) + if err != nil { + return errorPath(errors.Wrapf(err, "parse cgroup file %s", p)) + } + return existingPath(paths, "") +} + +func existingPath(paths map[string]string, suffix string) Path { + // localize the paths based on the root mount dest for nested cgroups + for n, p := range paths { + dest, err := getCgroupDestination(string(n)) + if err != nil { + return errorPath(err) + } + rel, err := filepath.Rel(dest, p) + if err != nil { + return errorPath(err) + } + if rel == "." { + rel = dest + } + paths[n] = filepath.Join("/", rel) + } + return func(name Name) (string, error) { + root, ok := paths[string(name)] + if !ok { + if root, ok = paths[fmt.Sprintf("name=%s", name)]; !ok { + return "", fmt.Errorf("unable to find %q in controller set", name) + } + } + if suffix != "" { + return filepath.Join(root, suffix), nil + } + return root, nil + } +} + +func subPath(path Path, subName string) Path { + return func(name Name) (string, error) { + p, err := path(name) + if err != nil { + return "", err + } + return filepath.Join(p, subName), nil + } +} + +func errorPath(err error) Path { + return func(_ Name) (string, error) { + return "", err + } +} diff --git a/vendor/github.com/containerd/cgroups/perf_event.go b/vendor/github.com/containerd/cgroups/perf_event.go new file mode 100644 index 000000000..648786db6 --- /dev/null +++ b/vendor/github.com/containerd/cgroups/perf_event.go @@ -0,0 +1,37 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cgroups + +import "path/filepath" + +func NewPerfEvent(root string) *PerfEventController { + return &PerfEventController{ + root: filepath.Join(root, string(PerfEvent)), + } +} + +type PerfEventController struct { + root string +} + +func (p *PerfEventController) Name() Name { + return PerfEvent +} + +func (p *PerfEventController) Path(path string) string { + return filepath.Join(p.root, path) +} diff --git a/vendor/github.com/containerd/cgroups/pids.go b/vendor/github.com/containerd/cgroups/pids.go new file mode 100644 index 000000000..a1cfcb88d --- /dev/null +++ b/vendor/github.com/containerd/cgroups/pids.go @@ -0,0 +1,85 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cgroups + +import ( + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +func NewPids(root string) *pidsController { + return &pidsController{ + root: filepath.Join(root, string(Pids)), + } +} + +type pidsController struct { + root string +} + +func (p *pidsController) Name() Name { + return Pids +} + +func (p *pidsController) Path(path string) string { + return filepath.Join(p.root, path) +} + +func (p *pidsController) Create(path string, resources *specs.LinuxResources) error { + if err := os.MkdirAll(p.Path(path), defaultDirPerm); err != nil { + return err + } + if resources.Pids != nil && resources.Pids.Limit > 0 { + return ioutil.WriteFile( + filepath.Join(p.Path(path), "pids.max"), + []byte(strconv.FormatInt(resources.Pids.Limit, 10)), + defaultFilePerm, + ) + } + return nil +} + +func (p *pidsController) Update(path string, resources *specs.LinuxResources) error { + return p.Create(path, resources) +} + +func (p *pidsController) Stat(path string, stats *Metrics) error { + current, err := readUint(filepath.Join(p.Path(path), "pids.current")) + if err != nil { + return err + } + var max uint64 + maxData, err := ioutil.ReadFile(filepath.Join(p.Path(path), "pids.max")) + if err != nil { + return err + } + if maxS := strings.TrimSpace(string(maxData)); maxS != "max" { + if max, err = parseUint(maxS, 10, 64); err != nil { + return err + } + } + stats.Pids = &PidsStat{ + Current: current, + Limit: max, + } + return nil +} diff --git a/vendor/github.com/containerd/cgroups/rdma.go b/vendor/github.com/containerd/cgroups/rdma.go new file mode 100644 index 000000000..4f423d33a --- /dev/null +++ b/vendor/github.com/containerd/cgroups/rdma.go @@ -0,0 +1,153 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cgroups + +import ( + "io/ioutil" + "math" + "os" + "path/filepath" + "strconv" + "strings" + + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +type rdmaController struct { + root string +} + +func (p *rdmaController) Name() Name { + return Rdma +} + +func (p *rdmaController) Path(path string) string { + return filepath.Join(p.root, path) +} + +func NewRdma(root string) *rdmaController { + return &rdmaController{ + root: filepath.Join(root, string(Rdma)), + } +} + +func createCmdString(device string, limits *specs.LinuxRdma) string { + var cmdString string + + cmdString = device + if limits.HcaHandles != nil { + cmdString = cmdString + " " + "hca_handle=" + strconv.FormatUint(uint64(*limits.HcaHandles), 10) + } + + if limits.HcaObjects != nil { + cmdString = cmdString + " " + "hca_object=" + strconv.FormatUint(uint64(*limits.HcaObjects), 10) + } + return cmdString +} + +func (p *rdmaController) Create(path string, resources *specs.LinuxResources) error { + if err := os.MkdirAll(p.Path(path), defaultDirPerm); err != nil { + return err + } + + for device, limit := range resources.Rdma { + if device != "" && (limit.HcaHandles != nil || limit.HcaObjects != nil) { + return ioutil.WriteFile( + filepath.Join(p.Path(path), "rdma.max"), + []byte(createCmdString(device, &limit)), + defaultFilePerm, + ) + } + } + return nil +} + +func (p *rdmaController) Update(path string, resources *specs.LinuxResources) error { + return p.Create(path, resources) +} + +func parseRdmaKV(raw string, entry *RdmaEntry) { + var value uint64 + var err error + + parts := strings.Split(raw, "=") + switch len(parts) { + case 2: + if parts[1] == "max" { + value = math.MaxUint32 + } else { + value, err = parseUint(parts[1], 10, 32) + if err != nil { + return + } + } + if parts[0] == "hca_handle" { + entry.HcaHandles = uint32(value) + } else if parts[0] == "hca_object" { + entry.HcaObjects = uint32(value) + } + } +} + +func toRdmaEntry(strEntries []string) []*RdmaEntry { + var rdmaEntries []*RdmaEntry + for i := range strEntries { + parts := strings.Fields(strEntries[i]) + switch len(parts) { + case 3: + entry := new(RdmaEntry) + entry.Device = parts[0] + parseRdmaKV(parts[1], entry) + parseRdmaKV(parts[2], entry) + + rdmaEntries = append(rdmaEntries, entry) + default: + continue + } + } + return rdmaEntries +} + +func (p *rdmaController) Stat(path string, stats *Metrics) error { + + currentData, err := ioutil.ReadFile(filepath.Join(p.Path(path), "rdma.current")) + if err != nil { + return err + } + currentPerDevices := strings.Split(string(currentData), "\n") + + maxData, err := ioutil.ReadFile(filepath.Join(p.Path(path), "rdma.max")) + if err != nil { + return err + } + maxPerDevices := strings.Split(string(maxData), "\n") + + // If device got removed between reading two files, ignore returning + // stats. + if len(currentPerDevices) != len(maxPerDevices) { + return nil + } + + currentEntries := toRdmaEntry(currentPerDevices) + maxEntries := toRdmaEntry(maxPerDevices) + + stats.Rdma = &RdmaStat{ + Current: currentEntries, + Limit: maxEntries, + } + return nil +} diff --git a/vendor/github.com/containerd/cgroups/state.go b/vendor/github.com/containerd/cgroups/state.go new file mode 100644 index 000000000..cfeabbbc6 --- /dev/null +++ b/vendor/github.com/containerd/cgroups/state.go @@ -0,0 +1,28 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cgroups + +// State is a type that represents the state of the current cgroup +type State string + +const ( + Unknown State = "" + Thawed State = "thawed" + Frozen State = "frozen" + Freezing State = "freezing" + Deleted State = "deleted" +) diff --git a/vendor/github.com/containerd/cgroups/subsystem.go b/vendor/github.com/containerd/cgroups/subsystem.go new file mode 100644 index 000000000..933a6c38d --- /dev/null +++ b/vendor/github.com/containerd/cgroups/subsystem.go @@ -0,0 +1,112 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cgroups + +import ( + "fmt" + + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +// Name is a typed name for a cgroup subsystem +type Name string + +const ( + Devices Name = "devices" + Hugetlb Name = "hugetlb" + Freezer Name = "freezer" + Pids Name = "pids" + NetCLS Name = "net_cls" + NetPrio Name = "net_prio" + PerfEvent Name = "perf_event" + Cpuset Name = "cpuset" + Cpu Name = "cpu" + Cpuacct Name = "cpuacct" + Memory Name = "memory" + Blkio Name = "blkio" + Rdma Name = "rdma" +) + +// Subsystems returns a complete list of the default cgroups +// avaliable on most linux systems +func Subsystems() []Name { + n := []Name{ + Hugetlb, + Freezer, + Pids, + NetCLS, + NetPrio, + PerfEvent, + Cpuset, + Cpu, + Cpuacct, + Memory, + Blkio, + Rdma, + } + if !isUserNS { + n = append(n, Devices) + } + return n +} + +type Subsystem interface { + Name() Name +} + +type pather interface { + Subsystem + Path(path string) string +} + +type creator interface { + Subsystem + Create(path string, resources *specs.LinuxResources) error +} + +type deleter interface { + Subsystem + Delete(path string) error +} + +type stater interface { + Subsystem + Stat(path string, stats *Metrics) error +} + +type updater interface { + Subsystem + Update(path string, resources *specs.LinuxResources) error +} + +// SingleSubsystem returns a single cgroup subsystem within the base Hierarchy +func SingleSubsystem(baseHierarchy Hierarchy, subsystem Name) Hierarchy { + return func() ([]Subsystem, error) { + subsystems, err := baseHierarchy() + if err != nil { + return nil, err + } + for _, s := range subsystems { + if s.Name() == subsystem { + return []Subsystem{ + s, + }, nil + } + } + return nil, fmt.Errorf("unable to find subsystem %s", subsystem) + } +} diff --git a/vendor/github.com/containerd/cgroups/systemd.go b/vendor/github.com/containerd/cgroups/systemd.go new file mode 100644 index 000000000..8153d744c --- /dev/null +++ b/vendor/github.com/containerd/cgroups/systemd.go @@ -0,0 +1,131 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cgroups + +import ( + "fmt" + "path/filepath" + "strings" + "sync" + + systemdDbus "github.com/coreos/go-systemd/dbus" + "github.com/godbus/dbus" + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +const ( + SystemdDbus Name = "systemd" + defaultSlice = "system.slice" +) + +func Systemd() ([]Subsystem, error) { + root, err := v1MountPoint() + if err != nil { + return nil, err + } + defaultSubsystems, err := defaults(root) + if err != nil { + return nil, err + } + s, err := NewSystemd(root) + if err != nil { + return nil, err + } + // make sure the systemd controller is added first + return append([]Subsystem{s}, defaultSubsystems...), nil +} + +func Slice(slice, name string) Path { + if slice == "" { + slice = defaultSlice + } + return func(subsystem Name) (string, error) { + return filepath.Join(slice, unitName(name)), nil + } +} + +func NewSystemd(root string) (*SystemdController, error) { + return &SystemdController{ + root: root, + }, nil +} + +type SystemdController struct { + mu sync.Mutex + root string +} + +func (s *SystemdController) Name() Name { + return SystemdDbus +} + +func (s *SystemdController) Create(path string, resources *specs.LinuxResources) error { + conn, err := systemdDbus.New() + if err != nil { + return err + } + defer conn.Close() + slice, name := splitName(path) + properties := []systemdDbus.Property{ + systemdDbus.PropDescription(fmt.Sprintf("cgroup %s", name)), + systemdDbus.PropWants(slice), + newProperty("DefaultDependencies", false), + newProperty("Delegate", true), + newProperty("MemoryAccounting", true), + newProperty("CPUAccounting", true), + newProperty("BlockIOAccounting", true), + } + ch := make(chan string) + _, err = conn.StartTransientUnit(name, "replace", properties, ch) + if err != nil { + return err + } + <-ch + return nil +} + +func (s *SystemdController) Delete(path string) error { + conn, err := systemdDbus.New() + if err != nil { + return err + } + defer conn.Close() + _, name := splitName(path) + ch := make(chan string) + _, err = conn.StopUnit(name, "replace", ch) + if err != nil { + return err + } + <-ch + return nil +} + +func newProperty(name string, units interface{}) systemdDbus.Property { + return systemdDbus.Property{ + Name: name, + Value: dbus.MakeVariant(units), + } +} + +func unitName(name string) string { + return fmt.Sprintf("%s.slice", name) +} + +func splitName(path string) (slice string, unit string) { + slice, unit = filepath.Split(path) + return strings.TrimSuffix(slice, "/"), unit +} diff --git a/vendor/github.com/containerd/cgroups/ticks.go b/vendor/github.com/containerd/cgroups/ticks.go new file mode 100644 index 000000000..84dc38d0c --- /dev/null +++ b/vendor/github.com/containerd/cgroups/ticks.go @@ -0,0 +1,26 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cgroups + +func getClockTicks() uint64 { + // The value comes from `C.sysconf(C._SC_CLK_TCK)`, and + // on Linux it's a constant which is safe to be hard coded, + // so we can avoid using cgo here. + // See https://github.com/containerd/cgroups/pull/12 for + // more details. + return 100 +} diff --git a/vendor/github.com/containerd/cgroups/utils.go b/vendor/github.com/containerd/cgroups/utils.go new file mode 100644 index 000000000..345be4e46 --- /dev/null +++ b/vendor/github.com/containerd/cgroups/utils.go @@ -0,0 +1,297 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cgroups + +import ( + "bufio" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + units "github.com/docker/go-units" + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +var isUserNS = runningInUserNS() + +// runningInUserNS detects whether we are currently running in a user namespace. +// Copied from github.com/lxc/lxd/shared/util.go +func runningInUserNS() bool { + file, err := os.Open("/proc/self/uid_map") + if err != nil { + // This kernel-provided file only exists if user namespaces are supported + return false + } + defer file.Close() + + buf := bufio.NewReader(file) + l, _, err := buf.ReadLine() + if err != nil { + return false + } + + line := string(l) + var a, b, c int64 + fmt.Sscanf(line, "%d %d %d", &a, &b, &c) + /* + * We assume we are in the initial user namespace if we have a full + * range - 4294967295 uids starting at uid 0. + */ + if a == 0 && b == 0 && c == 4294967295 { + return false + } + return true +} + +// defaults returns all known groups +func defaults(root string) ([]Subsystem, error) { + h, err := NewHugetlb(root) + if err != nil && !os.IsNotExist(err) { + return nil, err + } + s := []Subsystem{ + NewNamed(root, "systemd"), + NewFreezer(root), + NewPids(root), + NewNetCls(root), + NewNetPrio(root), + NewPerfEvent(root), + NewCputset(root), + NewCpu(root), + NewCpuacct(root), + NewMemory(root), + NewBlkio(root), + NewRdma(root), + } + // only add the devices cgroup if we are not in a user namespace + // because modifications are not allowed + if !isUserNS { + s = append(s, NewDevices(root)) + } + // add the hugetlb cgroup if error wasn't due to missing hugetlb + // cgroup support on the host + if err == nil { + s = append(s, h) + } + return s, nil +} + +// remove will remove a cgroup path handling EAGAIN and EBUSY errors and +// retrying the remove after a exp timeout +func remove(path string) error { + delay := 10 * time.Millisecond + for i := 0; i < 5; i++ { + if i != 0 { + time.Sleep(delay) + delay *= 2 + } + if err := os.RemoveAll(path); err == nil { + return nil + } + } + return fmt.Errorf("cgroups: unable to remove path %q", path) +} + +// readPids will read all the pids in a cgroup by the provided path +func readPids(path string, subsystem Name) ([]Process, error) { + f, err := os.Open(filepath.Join(path, cgroupProcs)) + if err != nil { + return nil, err + } + defer f.Close() + var ( + out []Process + s = bufio.NewScanner(f) + ) + for s.Scan() { + if t := s.Text(); t != "" { + pid, err := strconv.Atoi(t) + if err != nil { + return nil, err + } + out = append(out, Process{ + Pid: pid, + Subsystem: subsystem, + Path: path, + }) + } + } + return out, nil +} + +func hugePageSizes() ([]string, error) { + var ( + pageSizes []string + sizeList = []string{"B", "kB", "MB", "GB", "TB", "PB"} + ) + files, err := ioutil.ReadDir("/sys/kernel/mm/hugepages") + if err != nil { + return nil, err + } + for _, st := range files { + nameArray := strings.Split(st.Name(), "-") + pageSize, err := units.RAMInBytes(nameArray[1]) + if err != nil { + return nil, err + } + pageSizes = append(pageSizes, units.CustomSize("%g%s", float64(pageSize), 1024.0, sizeList)) + } + return pageSizes, nil +} + +func readUint(path string) (uint64, error) { + v, err := ioutil.ReadFile(path) + if err != nil { + return 0, err + } + return parseUint(strings.TrimSpace(string(v)), 10, 64) +} + +func parseUint(s string, base, bitSize int) (uint64, error) { + v, err := strconv.ParseUint(s, base, bitSize) + if err != nil { + intValue, intErr := strconv.ParseInt(s, base, bitSize) + // 1. Handle negative values greater than MinInt64 (and) + // 2. Handle negative values lesser than MinInt64 + if intErr == nil && intValue < 0 { + return 0, nil + } else if intErr != nil && + intErr.(*strconv.NumError).Err == strconv.ErrRange && + intValue < 0 { + return 0, nil + } + return 0, err + } + return v, nil +} + +func parseKV(raw string) (string, uint64, error) { + parts := strings.Fields(raw) + switch len(parts) { + case 2: + v, err := parseUint(parts[1], 10, 64) + if err != nil { + return "", 0, err + } + return parts[0], v, nil + default: + return "", 0, ErrInvalidFormat + } +} + +func parseCgroupFile(path string) (map[string]string, error) { + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + return parseCgroupFromReader(f) +} + +func parseCgroupFromReader(r io.Reader) (map[string]string, error) { + var ( + cgroups = make(map[string]string) + s = bufio.NewScanner(r) + ) + for s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + var ( + text = s.Text() + parts = strings.SplitN(text, ":", 3) + ) + if len(parts) < 3 { + return nil, fmt.Errorf("invalid cgroup entry: %q", text) + } + for _, subs := range strings.Split(parts[1], ",") { + if subs != "" { + cgroups[subs] = parts[2] + } + } + } + return cgroups, nil +} + +func getCgroupDestination(subsystem string) (string, error) { + f, err := os.Open("/proc/self/mountinfo") + if err != nil { + return "", err + } + defer f.Close() + s := bufio.NewScanner(f) + for s.Scan() { + if err := s.Err(); err != nil { + return "", err + } + fields := strings.Fields(s.Text()) + for _, opt := range strings.Split(fields[len(fields)-1], ",") { + if opt == subsystem { + return fields[3], nil + } + } + } + return "", ErrNoCgroupMountDestination +} + +func pathers(subystems []Subsystem) []pather { + var out []pather + for _, s := range subystems { + if p, ok := s.(pather); ok { + out = append(out, p) + } + } + return out +} + +func initializeSubsystem(s Subsystem, path Path, resources *specs.LinuxResources) error { + if c, ok := s.(creator); ok { + p, err := path(s.Name()) + if err != nil { + return err + } + if err := c.Create(p, resources); err != nil { + return err + } + } else if c, ok := s.(pather); ok { + p, err := path(s.Name()) + if err != nil { + return err + } + // do the default create if the group does not have a custom one + if err := os.MkdirAll(c.Path(p), defaultDirPerm); err != nil { + return err + } + } + return nil +} + +func cleanPath(path string) string { + if path == "" { + return "" + } + path = filepath.Clean(path) + if !filepath.IsAbs(path) { + path, _ = filepath.Rel(string(os.PathSeparator), filepath.Clean(string(os.PathSeparator)+path)) + } + return filepath.Clean(path) +} diff --git a/vendor/github.com/containerd/cgroups/v1.go b/vendor/github.com/containerd/cgroups/v1.go new file mode 100644 index 000000000..a076d4692 --- /dev/null +++ b/vendor/github.com/containerd/cgroups/v1.go @@ -0,0 +1,81 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cgroups + +import ( + "bufio" + "fmt" + "os" + "path/filepath" + "strings" +) + +// V1 returns all the groups in the default cgroups mountpoint in a single hierarchy +func V1() ([]Subsystem, error) { + root, err := v1MountPoint() + if err != nil { + return nil, err + } + subsystems, err := defaults(root) + if err != nil { + return nil, err + } + var enabled []Subsystem + for _, s := range pathers(subsystems) { + // check and remove the default groups that do not exist + if _, err := os.Lstat(s.Path("/")); err == nil { + enabled = append(enabled, s) + } + } + return enabled, nil +} + +// v1MountPoint returns the mount point where the cgroup +// mountpoints are mounted in a single hiearchy +func v1MountPoint() (string, error) { + f, err := os.Open("/proc/self/mountinfo") + if err != nil { + return "", err + } + defer f.Close() + scanner := bufio.NewScanner(f) + for scanner.Scan() { + if err := scanner.Err(); err != nil { + return "", err + } + var ( + text = scanner.Text() + fields = strings.Split(text, " ") + // safe as mountinfo encodes mountpoints with spaces as \040. + index = strings.Index(text, " - ") + postSeparatorFields = strings.Fields(text[index+3:]) + numPostFields = len(postSeparatorFields) + ) + // this is an error as we can't detect if the mount is for "cgroup" + if numPostFields == 0 { + return "", fmt.Errorf("Found no fields post '-' in %q", text) + } + if postSeparatorFields[0] == "cgroup" { + // check that the mount is properly formated. + if numPostFields < 3 { + return "", fmt.Errorf("Error found less than 3 fields post '-' in %q", text) + } + return filepath.Dir(fields[4]), nil + } + } + return "", ErrMountPointNotExist +} diff --git a/vendor/github.com/containerd/containerd/runtime/v2/README.md b/vendor/github.com/containerd/containerd/runtime/v2/README.md new file mode 100644 index 000000000..481861643 --- /dev/null +++ b/vendor/github.com/containerd/containerd/runtime/v2/README.md @@ -0,0 +1,173 @@ +# Runtime v2 + +Runtime v2 introduces a first class shim API for runtime authors to integrate with containerd. +The shim API is minimal and scoped to the execution lifecycle of a container. + +## Binary Naming + +Users specify the runtime they wish to use when creating a container. +The runtime can also be changed via a container update. + +```bash +> ctr run --runtime io.containerd.runc.v1 +``` + +When a user specifies a runtime name, `io.containerd.runc.v1`, they will specify the name and version of the runtime. +This will be translated by containerd into a binary name for the shim. + +`io.containerd.runc.v1` -> `containerd-shim-runc-v1` + +containerd keeps the `containerd-shim-*` prefix so that users can `ps aux | grep containerd-shim` to see running shims on their system. + +## Shim Authoring + +This section is dedicated to runtime authors wishing to build a shim. +It will detail how the API works and different considerations when building shim. + +### Commands + +Container information is provided to a shim in two ways. +The OCI Runtime Bundle and on the `Create` rpc request. + +#### `start` + +Each shim MUST implement a `start` subcommand. +This command will launch new shims. +The start command MUST accept the following flags: + +* `-namespace` the namespace for the container +* `-id` the id of the container +* `-address` the address of the containerd's main socket +* `-publish-binary` the binary path to publish events back to containerd + +The start command, as well as all binary calls to the shim, has the bundle for the container set as the `cwd`. + +The start command MUST return an address to a shim for containerd to issue API requests for container operations. + +The start command can either start a new shim or return an address to an existing shim based on the shim's logic. + +#### `delete` + +Each shim MUST implement a `delete` subcommand. +This command allows containerd to delete any container resources created, mounted, and/or run by a shim when containerd can no longer communicate over rpc. +This happens if a shim is SIGKILL'd with a running container. +These resources will need to be cleaned up when containerd looses the connection to a shim. +This is also used when containerd boots and reconnects to shims. +If a bundle is still on disk but containerd cannot connect to a shim, the delete command is invoked. + +The delete command MUST accept the following flags: + +* `-namespace` the namespace for the container +* `-id` the id of the container +* `-address` the address of the containerd's main socket +* `-publish-binary` the binary path to publish events back to containerd + +The delete command will be executed in the container's bundle as its `cwd`. + +### Host Level Shim Configuration + +containerd does not provide any host level configuration for shims via the API. +If a shim needs configuration from the user with host level information across all instances, a shim specific configuration file can be setup. + +### Container Level Shim Configuration + +On the create request, there is a generic `*protobuf.Any` that allows a user to specify container level configuration for the shim. + +```proto +message CreateTaskRequest { + string id = 1; + ... + google.protobuf.Any options = 10; +} +``` + +A shim author can create their own protobuf message for configuration and clients can import and provide this information is needed. + +### I/O + +I/O for a container is provided by the client to the shim via fifo on Linux, named pipes on Windows, or log files on disk. +The paths to these files are provided on the `Create` rpc for the initial creation and on the `Exec` rpc for additional processes. + +```proto +message CreateTaskRequest { + string id = 1; + bool terminal = 4; + string stdin = 5; + string stdout = 6; + string stderr = 7; +} +``` + +```proto +message ExecProcessRequest { + string id = 1; + string exec_id = 2; + bool terminal = 3; + string stdin = 4; + string stdout = 5; + string stderr = 6; +} +``` + +Containers that are to be launched with an interactive terminal will have the `terminal` field set to `true`, data is still copied over the files(fifos,pipes) in the same way as non interactive containers. + +### Root Filesystems + +The root filesystem for the containers is provided by on the `Create` rpc. +Shims are responsible for managing the lifecycle of the filesystem mount during the lifecycle of a container. + +```proto +message CreateTaskRequest { + string id = 1; + string bundle = 2; + repeated containerd.types.Mount rootfs = 3; + ... +} +``` + +The mount protobuf message is: + +```proto +message Mount { + // Type defines the nature of the mount. + string type = 1; + // Source specifies the name of the mount. Depending on mount type, this + // may be a volume name or a host path, or even ignored. + string source = 2; + // Target path in container + string target = 3; + // Options specifies zero or more fstab style mount options. + repeated string options = 4; +} +``` + +Shims are responsible for mounting the filesystem into the `rootfs/` directory of the bundle. +Shims are also responsible for unmounting of the filesystem. +During a `delete` binary call, the shim MUST ensure that filesystem is also unmounted. +Filesystems are provided by the containerd snapshotters. + +### Events + +The shim MUST publish a `runtime.TaskExitEventTopic` when the container exits. +If the shim collects Out of Memory events, it SHOULD also publish a `runtime.TaskOOMEventTopic`. + +### Other + +#### Unsupported rpcs + +If a shim does not or cannot implement an rpc call, it MUST return a `github.com/containerd/containerd/errdefs.ErrNotImplemented` error. + +#### Debugging and Shim Logs + +A fifo on unix or named pipe on Windows will be provided to the shim. +It can be located inside the `cwd` of the shim named "log". +The shims can use the existing `github.com/containerd/containerd/log` package to log debug messages. +Messages will automatically be output in the containerd's daemon logs with the correct fields and runtime set. + +#### ttrpc + +[ttrpc](https://github.com/containerd/ttrpc) is the only currently supported protocol for shims. +It works with standard protobufs and GRPC services as well as generating clients. +The only difference between grpc and ttrpc is the wire protocol. +ttrpc removes the http stack in order to save memory and binary size to keep shims small. +It is recommended to use ttrpc in your shim but grpc support is also in development. diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/reaper_unix.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/reaper_unix.go new file mode 100644 index 000000000..2937f1a9e --- /dev/null +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/reaper_unix.go @@ -0,0 +1,110 @@ +// +build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package shim + +import ( + "os/exec" + "sync" + "time" + + "github.com/containerd/containerd/sys" + runc "github.com/containerd/go-runc" + "github.com/pkg/errors" +) + +// ErrNoSuchProcess is returned when the process no longer exists +var ErrNoSuchProcess = errors.New("no such process") + +const bufferSize = 32 + +// Reap should be called when the process receives an SIGCHLD. Reap will reap +// all exited processes and close their wait channels +func Reap() error { + now := time.Now() + exits, err := sys.Reap(false) + Default.Lock() + for c := range Default.subscribers { + for _, e := range exits { + c <- runc.Exit{ + Timestamp: now, + Pid: e.Pid, + Status: e.Status, + } + } + + } + Default.Unlock() + return err +} + +// Default is the default monitor initialized for the package +var Default = &Monitor{ + subscribers: make(map[chan runc.Exit]struct{}), +} + +// Monitor monitors the underlying system for process status changes +type Monitor struct { + sync.Mutex + + subscribers map[chan runc.Exit]struct{} +} + +// Start starts the command a registers the process with the reaper +func (m *Monitor) Start(c *exec.Cmd) (chan runc.Exit, error) { + ec := m.Subscribe() + if err := c.Start(); err != nil { + m.Unsubscribe(ec) + return nil, err + } + return ec, nil +} + +// Wait blocks until a process is signal as dead. +// User should rely on the value of the exit status to determine if the +// command was successful or not. +func (m *Monitor) Wait(c *exec.Cmd, ec chan runc.Exit) (int, error) { + for e := range ec { + if e.Pid == c.Process.Pid { + // make sure we flush all IO + c.Wait() + m.Unsubscribe(ec) + return e.Status, nil + } + } + // return no such process if the ec channel is closed and no more exit + // events will be sent + return -1, ErrNoSuchProcess +} + +// Subscribe to process exit changes +func (m *Monitor) Subscribe() chan runc.Exit { + c := make(chan runc.Exit, bufferSize) + m.Lock() + m.subscribers[c] = struct{}{} + m.Unlock() + return c +} + +// Unsubscribe to process exit changes +func (m *Monitor) Unsubscribe(c chan runc.Exit) { + m.Lock() + delete(m.subscribers, c) + close(c) + m.Unlock() +} diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim.go new file mode 100644 index 000000000..39484c191 --- /dev/null +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim.go @@ -0,0 +1,269 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package shim + +import ( + "context" + "flag" + "fmt" + "os" + "runtime" + "runtime/debug" + "strings" + "time" + + "github.com/containerd/containerd/events" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/namespaces" + shimapi "github.com/containerd/containerd/runtime/v2/task" + "github.com/containerd/ttrpc" + "github.com/gogo/protobuf/proto" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// Client for a shim server +type Client struct { + service shimapi.TaskService + context context.Context + signals chan os.Signal +} + +// Init func for the creation of a shim server +type Init func(context.Context, string, events.Publisher) (Shim, error) + +// Shim server interface +type Shim interface { + shimapi.TaskService + Cleanup(ctx context.Context) (*shimapi.DeleteResponse, error) + StartShim(ctx context.Context, id, containerdBinary, containerdAddress string) (string, error) +} + +// OptsKey is the context key for the Opts value. +type OptsKey struct{} + +// Opts are context options associated with the shim invocation. +type Opts struct { + BundlePath string + Debug bool +} + +var ( + debugFlag bool + idFlag string + namespaceFlag string + socketFlag string + bundlePath string + addressFlag string + containerdBinaryFlag string + action string +) + +func parseFlags() { + flag.BoolVar(&debugFlag, "debug", false, "enable debug output in logs") + flag.StringVar(&namespaceFlag, "namespace", "", "namespace that owns the shim") + flag.StringVar(&idFlag, "id", "", "id of the task") + flag.StringVar(&socketFlag, "socket", "", "abstract socket path to serve") + flag.StringVar(&bundlePath, "bundle", "", "path to the bundle if not workdir") + + flag.StringVar(&addressFlag, "address", "", "grpc address back to main containerd") + flag.StringVar(&containerdBinaryFlag, "publish-binary", "containerd", "path to publish binary (used for publishing events)") + + flag.Parse() + action = flag.Arg(0) +} + +func setRuntime() { + debug.SetGCPercent(40) + go func() { + for range time.Tick(30 * time.Second) { + debug.FreeOSMemory() + } + }() + if os.Getenv("GOMAXPROCS") == "" { + // If GOMAXPROCS hasn't been set, we default to a value of 2 to reduce + // the number of Go stacks present in the shim. + runtime.GOMAXPROCS(2) + } +} + +func setLogger(ctx context.Context, id string) error { + logrus.SetFormatter(&logrus.TextFormatter{ + TimestampFormat: log.RFC3339NanoFixed, + FullTimestamp: true, + }) + if debugFlag { + logrus.SetLevel(logrus.DebugLevel) + } + f, err := openLog(ctx, id) + if err != nil { + return err + } + logrus.SetOutput(f) + return nil +} + +// Run initializes and runs a shim server +func Run(id string, initFunc Init) { + if err := run(id, initFunc); err != nil { + fmt.Fprintf(os.Stderr, "%s: %s\n", id, err) + os.Exit(1) + } +} + +func run(id string, initFunc Init) error { + parseFlags() + setRuntime() + + signals, err := setupSignals() + if err != nil { + return err + } + if err := subreaper(); err != nil { + return err + } + publisher := &remoteEventsPublisher{ + address: addressFlag, + containerdBinaryPath: containerdBinaryFlag, + } + if namespaceFlag == "" { + return fmt.Errorf("shim namespace cannot be empty") + } + ctx := namespaces.WithNamespace(context.Background(), namespaceFlag) + ctx = context.WithValue(ctx, OptsKey{}, Opts{BundlePath: bundlePath, Debug: debugFlag}) + ctx = log.WithLogger(ctx, log.G(ctx).WithField("runtime", id)) + + service, err := initFunc(ctx, idFlag, publisher) + if err != nil { + return err + } + switch action { + case "delete": + logger := logrus.WithFields(logrus.Fields{ + "pid": os.Getpid(), + "namespace": namespaceFlag, + }) + go handleSignals(logger, signals) + response, err := service.Cleanup(ctx) + if err != nil { + return err + } + data, err := proto.Marshal(response) + if err != nil { + return err + } + if _, err := os.Stdout.Write(data); err != nil { + return err + } + return nil + case "start": + address, err := service.StartShim(ctx, idFlag, containerdBinaryFlag, addressFlag) + if err != nil { + return err + } + if _, err := os.Stdout.WriteString(address); err != nil { + return err + } + return nil + default: + if err := setLogger(ctx, idFlag); err != nil { + return err + } + client := NewShimClient(ctx, service, signals) + return client.Serve() + } +} + +// NewShimClient creates a new shim server client +func NewShimClient(ctx context.Context, svc shimapi.TaskService, signals chan os.Signal) *Client { + s := &Client{ + service: svc, + context: ctx, + signals: signals, + } + return s +} + +// Serve the shim server +func (s *Client) Serve() error { + dump := make(chan os.Signal, 32) + setupDumpStacks(dump) + + path, err := os.Getwd() + if err != nil { + return err + } + server, err := newServer() + if err != nil { + return errors.Wrap(err, "failed creating server") + } + + logrus.Debug("registering ttrpc server") + shimapi.RegisterTaskService(server, s.service) + + if err := serve(s.context, server, socketFlag); err != nil { + return err + } + logger := logrus.WithFields(logrus.Fields{ + "pid": os.Getpid(), + "path": path, + "namespace": namespaceFlag, + }) + go func() { + for range dump { + dumpStacks(logger) + } + }() + return handleSignals(logger, s.signals) +} + +// serve serves the ttrpc API over a unix socket at the provided path +// this function does not block +func serve(ctx context.Context, server *ttrpc.Server, path string) error { + l, err := serveListener(path) + if err != nil { + return err + } + go func() { + defer l.Close() + if err := server.Serve(ctx, l); err != nil && + !strings.Contains(err.Error(), "use of closed network connection") { + logrus.WithError(err).Fatal("containerd-shim: ttrpc server failure") + } + }() + return nil +} + +func dumpStacks(logger *logrus.Entry) { + var ( + buf []byte + stackSize int + ) + bufferLen := 16384 + for stackSize == len(buf) { + buf = make([]byte, bufferLen) + stackSize = runtime.Stack(buf, true) + bufferLen *= 2 + } + buf = buf[:stackSize] + logger.Infof("=== BEGIN goroutine stack dump ===\n%s\n=== END goroutine stack dump ===", buf) +} + +type remoteEventsPublisher struct { + address string + containerdBinaryPath string +} diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_darwin.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_darwin.go new file mode 100644 index 000000000..314b45cb5 --- /dev/null +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_darwin.go @@ -0,0 +1,29 @@ +// +build darwin + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package shim + +import "github.com/containerd/ttrpc" + +func newServer() (*ttrpc.Server, error) { + return ttrpc.NewServer() +} + +func subreaper() error { + return nil +} diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_linux.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_linux.go new file mode 100644 index 000000000..7ad2a7262 --- /dev/null +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_linux.go @@ -0,0 +1,30 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package shim + +import ( + "github.com/containerd/containerd/sys" + "github.com/containerd/ttrpc" +) + +func newServer() (*ttrpc.Server, error) { + return ttrpc.NewServer(ttrpc.WithServerHandshaker(ttrpc.UnixSocketRequireSameUser())) +} + +func subreaper() error { + return sys.SetSubreaper(1) +} diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_unix.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_unix.go new file mode 100644 index 000000000..6f1ef6e28 --- /dev/null +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_unix.go @@ -0,0 +1,117 @@ +// +build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package shim + +import ( + "bytes" + "context" + "io" + "net" + "os" + "os/exec" + "os/signal" + "syscall" + + "github.com/containerd/containerd/events" + "github.com/containerd/containerd/namespaces" + "github.com/containerd/fifo" + "github.com/containerd/typeurl" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +// setupSignals creates a new signal handler for all signals and sets the shim as a +// sub-reaper so that the container processes are reparented +func setupSignals() (chan os.Signal, error) { + signals := make(chan os.Signal, 32) + signal.Notify(signals, unix.SIGTERM, unix.SIGINT, unix.SIGCHLD, unix.SIGPIPE) + return signals, nil +} + +func setupDumpStacks(dump chan<- os.Signal) { + signal.Notify(dump, syscall.SIGUSR1) +} + +func serveListener(path string) (net.Listener, error) { + var ( + l net.Listener + err error + ) + if path == "" { + l, err = net.FileListener(os.NewFile(3, "socket")) + path = "[inherited from parent]" + } else { + if len(path) > 106 { + return nil, errors.Errorf("%q: unix socket path too long (> 106)", path) + } + l, err = net.Listen("unix", "\x00"+path) + } + if err != nil { + return nil, err + } + logrus.WithField("socket", path).Debug("serving api on abstract socket") + return l, nil +} + +func handleSignals(logger *logrus.Entry, signals chan os.Signal) error { + logger.Info("starting signal loop") + for { + select { + case s := <-signals: + switch s { + case unix.SIGCHLD: + if err := Reap(); err != nil { + logger.WithError(err).Error("reap exit status") + } + case unix.SIGPIPE: + } + } + } +} + +func openLog(ctx context.Context, _ string) (io.Writer, error) { + return fifo.OpenFifo(context.Background(), "log", unix.O_WRONLY, 0700) +} + +func (l *remoteEventsPublisher) Publish(ctx context.Context, topic string, event events.Event) error { + ns, _ := namespaces.Namespace(ctx) + encoded, err := typeurl.MarshalAny(event) + if err != nil { + return err + } + data, err := encoded.Marshal() + if err != nil { + return err + } + cmd := exec.CommandContext(ctx, l.containerdBinaryPath, "--address", l.address, "publish", "--topic", topic, "--namespace", ns) + cmd.Stdin = bytes.NewReader(data) + c, err := Default.Start(cmd) + if err != nil { + return err + } + status, err := Default.Wait(cmd, c) + if err != nil { + return err + } + if status != 0 { + return errors.New("failed to publish event") + } + return nil +} diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_windows.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_windows.go new file mode 100644 index 000000000..6e5642620 --- /dev/null +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_windows.go @@ -0,0 +1,307 @@ +// +build windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package shim + +import ( + "bytes" + "context" + "fmt" + "io" + "net" + "os" + "os/exec" + "sync" + "syscall" + "unsafe" + + winio "github.com/Microsoft/go-winio" + "github.com/containerd/containerd/events" + "github.com/containerd/containerd/namespaces" + "github.com/containerd/ttrpc" + "github.com/containerd/typeurl" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sys/windows" +) + +const ( + errorConnectionAborted syscall.Errno = 1236 +) + +// setupSignals creates a new signal handler for all signals +func setupSignals() (chan os.Signal, error) { + signals := make(chan os.Signal, 32) + return signals, nil +} + +func newServer() (*ttrpc.Server, error) { + return ttrpc.NewServer() +} + +func subreaper() error { + return nil +} + +type fakeSignal struct { +} + +func (fs *fakeSignal) String() string { + return "" +} + +func (fs *fakeSignal) Signal() { +} + +func setupDumpStacks(dump chan<- os.Signal) { + // Windows does not support signals like *nix systems. So instead of + // trapping on SIGUSR1 to dump stacks, we wait on a Win32 event to be + // signaled. ACL'd to builtin administrators and local system + event := "Global\\containerd-shim-runhcs-v1-" + fmt.Sprint(os.Getpid()) + ev, _ := windows.UTF16PtrFromString(event) + sd, err := winio.SddlToSecurityDescriptor("D:P(A;;GA;;;BA)(A;;GA;;;SY)") + if err != nil { + logrus.Errorf("failed to get security descriptor for debug stackdump event %s: %s", event, err.Error()) + return + } + var sa windows.SecurityAttributes + sa.Length = uint32(unsafe.Sizeof(sa)) + sa.InheritHandle = 1 + sa.SecurityDescriptor = uintptr(unsafe.Pointer(&sd[0])) + h, err := windows.CreateEvent(&sa, 0, 0, ev) + if h == 0 || err != nil { + logrus.Errorf("failed to create debug stackdump event %s: %s", event, err.Error()) + return + } + go func() { + logrus.Debugf("Stackdump - waiting signal at %s", event) + for { + windows.WaitForSingleObject(h, windows.INFINITE) + dump <- new(fakeSignal) + } + }() +} + +// serve serves the ttrpc API over a unix socket at the provided path +// this function does not block +func serveListener(path string) (net.Listener, error) { + if path == "" { + return nil, errors.New("'socket' must be npipe path") + } + l, err := winio.ListenPipe(path, nil) + if err != nil { + return nil, err + } + logrus.WithField("socket", path).Debug("serving api on npipe socket") + return l, nil +} + +func handleSignals(logger *logrus.Entry, signals chan os.Signal) error { + logger.Info("starting signal loop") + for { + select { + case s := <-signals: + switch s { + case os.Interrupt: + break + } + } + } +} + +var _ = (io.WriterTo)(&blockingBuffer{}) +var _ = (io.Writer)(&blockingBuffer{}) + +// blockingBuffer implements the `io.Writer` and `io.WriterTo` interfaces. Once +// `capacity` is reached the calls to `Write` will block until a successful call +// to `WriterTo` frees up the buffer space. +// +// Note: This has the same threadding semantics as bytes.Buffer with no +// additional locking so multithreading is not supported. +type blockingBuffer struct { + c *sync.Cond + + capacity int + + buffer bytes.Buffer +} + +func newBlockingBuffer(capacity int) *blockingBuffer { + return &blockingBuffer{ + c: sync.NewCond(&sync.Mutex{}), + capacity: capacity, + } +} + +func (bb *blockingBuffer) Len() int { + bb.c.L.Lock() + defer bb.c.L.Unlock() + return bb.buffer.Len() +} + +func (bb *blockingBuffer) Write(p []byte) (int, error) { + if len(p) > bb.capacity { + return 0, errors.Errorf("len(p) (%d) too large for capacity (%d)", len(p), bb.capacity) + } + + bb.c.L.Lock() + for bb.buffer.Len()+len(p) > bb.capacity { + bb.c.Wait() + } + defer bb.c.L.Unlock() + return bb.buffer.Write(p) +} + +func (bb *blockingBuffer) WriteTo(w io.Writer) (int64, error) { + bb.c.L.Lock() + defer bb.c.L.Unlock() + defer bb.c.Signal() + return bb.buffer.WriteTo(w) +} + +// deferredShimWriteLogger exists to solve the upstream loggin issue presented +// by using Windows Named Pipes for logging. When containerd restarts it tries +// to reconnect to any shims. This means that the connection to the logger will +// be severed but when containerd starts up it should reconnect and start +// logging again. We abstract all of this logic behind what looks like a simple +// `io.Writer` that can reconnect in the lifetime and buffers logs while +// disconnected. +type deferredShimWriteLogger struct { + mu sync.Mutex + + ctx context.Context + + connected bool + aborted bool + + buffer *blockingBuffer + + l net.Listener + c net.Conn + conerr error +} + +// beginAccept issues an accept to wait for a connection. Once a connection +// occurs drains any outstanding buffer. While draining the buffer any writes +// are blocked. If the buffer fails to fully drain due to a connection drop a +// call to `beginAccept` is re-issued waiting for another connection from +// containerd. +func (dswl *deferredShimWriteLogger) beginAccept() { + dswl.mu.Lock() + if dswl.connected { + return + } + dswl.mu.Unlock() + + c, err := dswl.l.Accept() + if err == errorConnectionAborted { + dswl.mu.Lock() + dswl.aborted = true + dswl.l.Close() + dswl.conerr = errors.New("connection closed") + dswl.mu.Unlock() + return + } + dswl.mu.Lock() + dswl.connected = true + dswl.c = c + + // Drain the buffer + if dswl.buffer.Len() > 0 { + _, err := dswl.buffer.WriteTo(dswl.c) + if err != nil { + // We lost our connection draining the buffer. + dswl.connected = false + dswl.c.Close() + go dswl.beginAccept() + } + } + dswl.mu.Unlock() +} + +func (dswl *deferredShimWriteLogger) Write(p []byte) (int, error) { + dswl.mu.Lock() + defer dswl.mu.Unlock() + + if dswl.aborted { + return 0, dswl.conerr + } + + if dswl.connected { + // We have a connection. beginAccept would have drained the buffer so we just write our data to + // the connection directly. + written, err := dswl.c.Write(p) + if err != nil { + // We lost the connection. + dswl.connected = false + dswl.c.Close() + go dswl.beginAccept() + + // We weren't able to write the full `p` bytes. Buffer the rest + if written != len(p) { + w, err := dswl.buffer.Write(p[written:]) + if err != nil { + // We failed to buffer. Return this error + return written + w, err + } + written += w + } + } + + return written, nil + } + + // We are disconnected. Buffer the contents. + return dswl.buffer.Write(p) +} + +// openLog on Windows acts as the server of the log pipe. This allows the +// containerd daemon to independently restart and reconnect to the logs. +func openLog(ctx context.Context, id string) (io.Writer, error) { + ns, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return nil, err + } + + dswl := &deferredShimWriteLogger{ + ctx: ctx, + buffer: newBlockingBuffer(64 * 1024), // 64KB, + } + l, err := winio.ListenPipe(fmt.Sprintf("\\\\.\\pipe\\containerd-shim-%s-%s-log", ns, id), nil) + if err != nil { + return nil, err + } + dswl.l = l + go dswl.beginAccept() + return dswl, nil +} + +func (l *remoteEventsPublisher) Publish(ctx context.Context, topic string, event events.Event) error { + ns, _ := namespaces.Namespace(ctx) + encoded, err := typeurl.MarshalAny(event) + if err != nil { + return err + } + data, err := encoded.Marshal() + if err != nil { + return err + } + cmd := exec.CommandContext(ctx, l.containerdBinaryPath, "--address", l.address, "publish", "--topic", topic, "--namespace", ns) + cmd.Stdin = bytes.NewReader(data) + return cmd.Run() +} diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/util.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/util.go new file mode 100644 index 000000000..b7034ce50 --- /dev/null +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/util.go @@ -0,0 +1,128 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package shim + +import ( + "context" + "fmt" + "net" + "os" + "os/exec" + "path/filepath" + "strings" + "time" + + "github.com/containerd/containerd/namespaces" + "github.com/pkg/errors" +) + +const shimBinaryFormat = "containerd-shim-%s-%s" + +// Command returns the shim command with the provided args and configuration +func Command(ctx context.Context, runtime, containerdAddress, path string, cmdArgs ...string) (*exec.Cmd, error) { + ns, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return nil, err + } + self, err := os.Executable() + if err != nil { + return nil, err + } + args := []string{ + "-namespace", ns, + "-address", containerdAddress, + "-publish-binary", self, + } + args = append(args, cmdArgs...) + name := BinaryName(runtime) + if name == "" { + return nil, fmt.Errorf("invalid runtime name %s, correct runtime name should format like io.containerd.runc.v1", runtime) + } + var cmdPath string + var lerr error + if cmdPath, lerr = exec.LookPath(name); lerr != nil { + if eerr, ok := lerr.(*exec.Error); ok { + if eerr.Err == exec.ErrNotFound { + return nil, errors.Wrapf(os.ErrNotExist, "runtime %q binary not installed %q", runtime, name) + } + } + } + cmdPath, err = filepath.Abs(cmdPath) + if err != nil { + return nil, err + } + cmd := exec.Command(cmdPath, args...) + cmd.Dir = path + cmd.Env = append(os.Environ(), "GOMAXPROCS=2") + cmd.SysProcAttr = getSysProcAttr() + return cmd, nil +} + +// BinaryName returns the shim binary name from the runtime name, +// empty string returns means runtime name is invalid +func BinaryName(runtime string) string { + // runtime name should format like $prefix.name.version + parts := strings.Split(runtime, ".") + if len(parts) < 2 { + return "" + } + + return fmt.Sprintf(shimBinaryFormat, parts[len(parts)-2], parts[len(parts)-1]) +} + +// Connect to the provided address +func Connect(address string, d func(string, time.Duration) (net.Conn, error)) (net.Conn, error) { + return d(address, 100*time.Second) +} + +// WritePidFile writes a pid file atomically +func WritePidFile(path string, pid int) error { + path, err := filepath.Abs(path) + if err != nil { + return err + } + tempPath := filepath.Join(filepath.Dir(path), fmt.Sprintf(".%s", filepath.Base(path))) + f, err := os.OpenFile(tempPath, os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, 0666) + if err != nil { + return err + } + _, err = fmt.Fprintf(f, "%d", pid) + f.Close() + if err != nil { + return err + } + return os.Rename(tempPath, path) +} + +// WriteAddress writes a address file atomically +func WriteAddress(path, address string) error { + path, err := filepath.Abs(path) + if err != nil { + return err + } + tempPath := filepath.Join(filepath.Dir(path), fmt.Sprintf(".%s", filepath.Base(path))) + f, err := os.OpenFile(tempPath, os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, 0666) + if err != nil { + return err + } + _, err = f.WriteString(address) + f.Close() + if err != nil { + return err + } + return os.Rename(tempPath, path) +} diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/util_unix.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/util_unix.go new file mode 100644 index 000000000..262fe2b36 --- /dev/null +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/util_unix.go @@ -0,0 +1,70 @@ +// +build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package shim + +import ( + "context" + "net" + "path/filepath" + "strings" + "syscall" + "time" + + "github.com/containerd/containerd/namespaces" + "github.com/containerd/containerd/sys" + "github.com/pkg/errors" +) + +func getSysProcAttr() *syscall.SysProcAttr { + return &syscall.SysProcAttr{ + Setpgid: true, + } +} + +// SetScore sets the oom score for a process +func SetScore(pid int) error { + return sys.SetOOMScore(pid, sys.OOMScoreMaxKillable) +} + +// SocketAddress returns an abstract socket address +func SocketAddress(ctx context.Context, id string) (string, error) { + ns, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return "", err + } + return filepath.Join(string(filepath.Separator), "containerd-shim", ns, id, "shim.sock"), nil +} + +// AnonDialer returns a dialer for an abstract socket +func AnonDialer(address string, timeout time.Duration) (net.Conn, error) { + address = strings.TrimPrefix(address, "unix://") + return net.DialTimeout("unix", "\x00"+address, timeout) +} + +// NewSocket returns a new socket +func NewSocket(address string) (*net.UnixListener, error) { + if len(address) > 106 { + return nil, errors.Errorf("%q: unix socket path too long (> 106)", address) + } + l, err := net.Listen("unix", "\x00"+address) + if err != nil { + return nil, errors.Wrapf(err, "failed to listen to abstract unix socket %q", address) + } + return l.(*net.UnixListener), nil +} diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/util_windows.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/util_windows.go new file mode 100644 index 000000000..986fc754b --- /dev/null +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/util_windows.go @@ -0,0 +1,90 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package shim + +import ( + "context" + "fmt" + "net" + "os" + "syscall" + "time" + + winio "github.com/Microsoft/go-winio" + "github.com/containerd/containerd/namespaces" + "github.com/pkg/errors" +) + +func getSysProcAttr() *syscall.SysProcAttr { + return nil +} + +// SetScore sets the oom score for a process +func SetScore(pid int) error { + return nil +} + +// SocketAddress returns a npipe address +func SocketAddress(ctx context.Context, id string) (string, error) { + ns, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return "", err + } + return fmt.Sprintf("\\\\.\\pipe\\containerd-shim-%s-%s-pipe", ns, id), nil +} + +// AnonDialer returns a dialer for a npipe +func AnonDialer(address string, timeout time.Duration) (net.Conn, error) { + var c net.Conn + var lastError error + timedOutError := errors.Errorf("timed out waiting for npipe %s", address) + start := time.Now() + for { + remaining := timeout - time.Now().Sub(start) + if remaining <= 0 { + lastError = timedOutError + break + } + c, lastError = winio.DialPipe(address, &remaining) + if lastError == nil { + break + } + if !os.IsNotExist(lastError) { + break + } + // There is nobody serving the pipe. We limit the timeout for this case + // to 5 seconds because any shim that would serve this endpoint should + // serve it within 5 seconds. We use the passed in timeout for the + // `DialPipe` timeout if the pipe exists however to give the pipe time + // to `Accept` the connection. + if time.Now().Sub(start) >= 5*time.Second { + lastError = timedOutError + break + } + time.Sleep(10 * time.Millisecond) + } + return c, lastError +} + +// NewSocket returns a new npipe listener +func NewSocket(address string) (net.Listener, error) { + l, err := winio.ListenPipe(address, nil) + if err != nil { + return nil, errors.Wrapf(err, "failed to listen to npipe %s", address) + } + return l, nil +} diff --git a/vendor/github.com/containerd/containerd/runtime/v2/task/doc.go b/vendor/github.com/containerd/containerd/runtime/v2/task/doc.go new file mode 100644 index 000000000..f933dd8d4 --- /dev/null +++ b/vendor/github.com/containerd/containerd/runtime/v2/task/doc.go @@ -0,0 +1,17 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package task diff --git a/vendor/github.com/containerd/containerd/runtime/v2/task/shim.pb.go b/vendor/github.com/containerd/containerd/runtime/v2/task/shim.pb.go new file mode 100644 index 000000000..9caefd758 --- /dev/null +++ b/vendor/github.com/containerd/containerd/runtime/v2/task/shim.pb.go @@ -0,0 +1,5693 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/containerd/containerd/runtime/v2/task/shim.proto + +/* + Package task is a generated protocol buffer package. + + It is generated from these files: + github.com/containerd/containerd/runtime/v2/task/shim.proto + + It has these top-level messages: + CreateTaskRequest + CreateTaskResponse + DeleteRequest + DeleteResponse + ExecProcessRequest + ExecProcessResponse + ResizePtyRequest + StateRequest + StateResponse + KillRequest + CloseIORequest + PidsRequest + PidsResponse + CheckpointTaskRequest + UpdateTaskRequest + StartRequest + StartResponse + WaitRequest + WaitResponse + StatsRequest + StatsResponse + ConnectRequest + ConnectResponse + ShutdownRequest + PauseRequest + ResumeRequest +*/ +package task + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/gogo/protobuf/types" +import google_protobuf1 "github.com/gogo/protobuf/types" + +// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto" +import _ "github.com/gogo/protobuf/types" +import containerd_types "github.com/containerd/containerd/api/types" +import containerd_v1_types "github.com/containerd/containerd/api/types/task" + +import time "time" + +import types "github.com/gogo/protobuf/types" + +import strings "strings" +import reflect "reflect" + +import context "context" +import ttrpc "github.com/containerd/ttrpc" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type CreateTaskRequest struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Bundle string `protobuf:"bytes,2,opt,name=bundle,proto3" json:"bundle,omitempty"` + Rootfs []*containerd_types.Mount `protobuf:"bytes,3,rep,name=rootfs" json:"rootfs,omitempty"` + Terminal bool `protobuf:"varint,4,opt,name=terminal,proto3" json:"terminal,omitempty"` + Stdin string `protobuf:"bytes,5,opt,name=stdin,proto3" json:"stdin,omitempty"` + Stdout string `protobuf:"bytes,6,opt,name=stdout,proto3" json:"stdout,omitempty"` + Stderr string `protobuf:"bytes,7,opt,name=stderr,proto3" json:"stderr,omitempty"` + Checkpoint string `protobuf:"bytes,8,opt,name=checkpoint,proto3" json:"checkpoint,omitempty"` + ParentCheckpoint string `protobuf:"bytes,9,opt,name=parent_checkpoint,json=parentCheckpoint,proto3" json:"parent_checkpoint,omitempty"` + Options *google_protobuf.Any `protobuf:"bytes,10,opt,name=options" json:"options,omitempty"` +} + +func (m *CreateTaskRequest) Reset() { *m = CreateTaskRequest{} } +func (*CreateTaskRequest) ProtoMessage() {} +func (*CreateTaskRequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{0} } + +type CreateTaskResponse struct { + Pid uint32 `protobuf:"varint,1,opt,name=pid,proto3" json:"pid,omitempty"` +} + +func (m *CreateTaskResponse) Reset() { *m = CreateTaskResponse{} } +func (*CreateTaskResponse) ProtoMessage() {} +func (*CreateTaskResponse) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{1} } + +type DeleteRequest struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` +} + +func (m *DeleteRequest) Reset() { *m = DeleteRequest{} } +func (*DeleteRequest) ProtoMessage() {} +func (*DeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{2} } + +type DeleteResponse struct { + Pid uint32 `protobuf:"varint,1,opt,name=pid,proto3" json:"pid,omitempty"` + ExitStatus uint32 `protobuf:"varint,2,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"` + ExitedAt time.Time `protobuf:"bytes,3,opt,name=exited_at,json=exitedAt,stdtime" json:"exited_at"` +} + +func (m *DeleteResponse) Reset() { *m = DeleteResponse{} } +func (*DeleteResponse) ProtoMessage() {} +func (*DeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{3} } + +type ExecProcessRequest struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` + Terminal bool `protobuf:"varint,3,opt,name=terminal,proto3" json:"terminal,omitempty"` + Stdin string `protobuf:"bytes,4,opt,name=stdin,proto3" json:"stdin,omitempty"` + Stdout string `protobuf:"bytes,5,opt,name=stdout,proto3" json:"stdout,omitempty"` + Stderr string `protobuf:"bytes,6,opt,name=stderr,proto3" json:"stderr,omitempty"` + Spec *google_protobuf.Any `protobuf:"bytes,7,opt,name=spec" json:"spec,omitempty"` +} + +func (m *ExecProcessRequest) Reset() { *m = ExecProcessRequest{} } +func (*ExecProcessRequest) ProtoMessage() {} +func (*ExecProcessRequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{4} } + +type ExecProcessResponse struct { +} + +func (m *ExecProcessResponse) Reset() { *m = ExecProcessResponse{} } +func (*ExecProcessResponse) ProtoMessage() {} +func (*ExecProcessResponse) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{5} } + +type ResizePtyRequest struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` + Width uint32 `protobuf:"varint,3,opt,name=width,proto3" json:"width,omitempty"` + Height uint32 `protobuf:"varint,4,opt,name=height,proto3" json:"height,omitempty"` +} + +func (m *ResizePtyRequest) Reset() { *m = ResizePtyRequest{} } +func (*ResizePtyRequest) ProtoMessage() {} +func (*ResizePtyRequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{6} } + +type StateRequest struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` +} + +func (m *StateRequest) Reset() { *m = StateRequest{} } +func (*StateRequest) ProtoMessage() {} +func (*StateRequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{7} } + +type StateResponse struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Bundle string `protobuf:"bytes,2,opt,name=bundle,proto3" json:"bundle,omitempty"` + Pid uint32 `protobuf:"varint,3,opt,name=pid,proto3" json:"pid,omitempty"` + Status containerd_v1_types.Status `protobuf:"varint,4,opt,name=status,proto3,enum=containerd.v1.types.Status" json:"status,omitempty"` + Stdin string `protobuf:"bytes,5,opt,name=stdin,proto3" json:"stdin,omitempty"` + Stdout string `protobuf:"bytes,6,opt,name=stdout,proto3" json:"stdout,omitempty"` + Stderr string `protobuf:"bytes,7,opt,name=stderr,proto3" json:"stderr,omitempty"` + Terminal bool `protobuf:"varint,8,opt,name=terminal,proto3" json:"terminal,omitempty"` + ExitStatus uint32 `protobuf:"varint,9,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"` + ExitedAt time.Time `protobuf:"bytes,10,opt,name=exited_at,json=exitedAt,stdtime" json:"exited_at"` +} + +func (m *StateResponse) Reset() { *m = StateResponse{} } +func (*StateResponse) ProtoMessage() {} +func (*StateResponse) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{8} } + +type KillRequest struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` + Signal uint32 `protobuf:"varint,3,opt,name=signal,proto3" json:"signal,omitempty"` + All bool `protobuf:"varint,4,opt,name=all,proto3" json:"all,omitempty"` +} + +func (m *KillRequest) Reset() { *m = KillRequest{} } +func (*KillRequest) ProtoMessage() {} +func (*KillRequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{9} } + +type CloseIORequest struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` + Stdin bool `protobuf:"varint,3,opt,name=stdin,proto3" json:"stdin,omitempty"` +} + +func (m *CloseIORequest) Reset() { *m = CloseIORequest{} } +func (*CloseIORequest) ProtoMessage() {} +func (*CloseIORequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{10} } + +type PidsRequest struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` +} + +func (m *PidsRequest) Reset() { *m = PidsRequest{} } +func (*PidsRequest) ProtoMessage() {} +func (*PidsRequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{11} } + +type PidsResponse struct { + Processes []*containerd_v1_types.ProcessInfo `protobuf:"bytes,1,rep,name=processes" json:"processes,omitempty"` +} + +func (m *PidsResponse) Reset() { *m = PidsResponse{} } +func (*PidsResponse) ProtoMessage() {} +func (*PidsResponse) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{12} } + +type CheckpointTaskRequest struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` + Options *google_protobuf.Any `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` +} + +func (m *CheckpointTaskRequest) Reset() { *m = CheckpointTaskRequest{} } +func (*CheckpointTaskRequest) ProtoMessage() {} +func (*CheckpointTaskRequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{13} } + +type UpdateTaskRequest struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Resources *google_protobuf.Any `protobuf:"bytes,2,opt,name=resources" json:"resources,omitempty"` +} + +func (m *UpdateTaskRequest) Reset() { *m = UpdateTaskRequest{} } +func (*UpdateTaskRequest) ProtoMessage() {} +func (*UpdateTaskRequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{14} } + +type StartRequest struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` +} + +func (m *StartRequest) Reset() { *m = StartRequest{} } +func (*StartRequest) ProtoMessage() {} +func (*StartRequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{15} } + +type StartResponse struct { + Pid uint32 `protobuf:"varint,1,opt,name=pid,proto3" json:"pid,omitempty"` +} + +func (m *StartResponse) Reset() { *m = StartResponse{} } +func (*StartResponse) ProtoMessage() {} +func (*StartResponse) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{16} } + +type WaitRequest struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` +} + +func (m *WaitRequest) Reset() { *m = WaitRequest{} } +func (*WaitRequest) ProtoMessage() {} +func (*WaitRequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{17} } + +type WaitResponse struct { + ExitStatus uint32 `protobuf:"varint,1,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"` + ExitedAt time.Time `protobuf:"bytes,2,opt,name=exited_at,json=exitedAt,stdtime" json:"exited_at"` +} + +func (m *WaitResponse) Reset() { *m = WaitResponse{} } +func (*WaitResponse) ProtoMessage() {} +func (*WaitResponse) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{18} } + +type StatsRequest struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` +} + +func (m *StatsRequest) Reset() { *m = StatsRequest{} } +func (*StatsRequest) ProtoMessage() {} +func (*StatsRequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{19} } + +type StatsResponse struct { + Stats *google_protobuf.Any `protobuf:"bytes,1,opt,name=stats" json:"stats,omitempty"` +} + +func (m *StatsResponse) Reset() { *m = StatsResponse{} } +func (*StatsResponse) ProtoMessage() {} +func (*StatsResponse) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{20} } + +type ConnectRequest struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` +} + +func (m *ConnectRequest) Reset() { *m = ConnectRequest{} } +func (*ConnectRequest) ProtoMessage() {} +func (*ConnectRequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{21} } + +type ConnectResponse struct { + ShimPid uint32 `protobuf:"varint,1,opt,name=shim_pid,json=shimPid,proto3" json:"shim_pid,omitempty"` + TaskPid uint32 `protobuf:"varint,2,opt,name=task_pid,json=taskPid,proto3" json:"task_pid,omitempty"` + Version string `protobuf:"bytes,3,opt,name=version,proto3" json:"version,omitempty"` +} + +func (m *ConnectResponse) Reset() { *m = ConnectResponse{} } +func (*ConnectResponse) ProtoMessage() {} +func (*ConnectResponse) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{22} } + +type ShutdownRequest struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Now bool `protobuf:"varint,2,opt,name=now,proto3" json:"now,omitempty"` +} + +func (m *ShutdownRequest) Reset() { *m = ShutdownRequest{} } +func (*ShutdownRequest) ProtoMessage() {} +func (*ShutdownRequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{23} } + +type PauseRequest struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` +} + +func (m *PauseRequest) Reset() { *m = PauseRequest{} } +func (*PauseRequest) ProtoMessage() {} +func (*PauseRequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{24} } + +type ResumeRequest struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` +} + +func (m *ResumeRequest) Reset() { *m = ResumeRequest{} } +func (*ResumeRequest) ProtoMessage() {} +func (*ResumeRequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{25} } + +func init() { + proto.RegisterType((*CreateTaskRequest)(nil), "containerd.task.v2.CreateTaskRequest") + proto.RegisterType((*CreateTaskResponse)(nil), "containerd.task.v2.CreateTaskResponse") + proto.RegisterType((*DeleteRequest)(nil), "containerd.task.v2.DeleteRequest") + proto.RegisterType((*DeleteResponse)(nil), "containerd.task.v2.DeleteResponse") + proto.RegisterType((*ExecProcessRequest)(nil), "containerd.task.v2.ExecProcessRequest") + proto.RegisterType((*ExecProcessResponse)(nil), "containerd.task.v2.ExecProcessResponse") + proto.RegisterType((*ResizePtyRequest)(nil), "containerd.task.v2.ResizePtyRequest") + proto.RegisterType((*StateRequest)(nil), "containerd.task.v2.StateRequest") + proto.RegisterType((*StateResponse)(nil), "containerd.task.v2.StateResponse") + proto.RegisterType((*KillRequest)(nil), "containerd.task.v2.KillRequest") + proto.RegisterType((*CloseIORequest)(nil), "containerd.task.v2.CloseIORequest") + proto.RegisterType((*PidsRequest)(nil), "containerd.task.v2.PidsRequest") + proto.RegisterType((*PidsResponse)(nil), "containerd.task.v2.PidsResponse") + proto.RegisterType((*CheckpointTaskRequest)(nil), "containerd.task.v2.CheckpointTaskRequest") + proto.RegisterType((*UpdateTaskRequest)(nil), "containerd.task.v2.UpdateTaskRequest") + proto.RegisterType((*StartRequest)(nil), "containerd.task.v2.StartRequest") + proto.RegisterType((*StartResponse)(nil), "containerd.task.v2.StartResponse") + proto.RegisterType((*WaitRequest)(nil), "containerd.task.v2.WaitRequest") + proto.RegisterType((*WaitResponse)(nil), "containerd.task.v2.WaitResponse") + proto.RegisterType((*StatsRequest)(nil), "containerd.task.v2.StatsRequest") + proto.RegisterType((*StatsResponse)(nil), "containerd.task.v2.StatsResponse") + proto.RegisterType((*ConnectRequest)(nil), "containerd.task.v2.ConnectRequest") + proto.RegisterType((*ConnectResponse)(nil), "containerd.task.v2.ConnectResponse") + proto.RegisterType((*ShutdownRequest)(nil), "containerd.task.v2.ShutdownRequest") + proto.RegisterType((*PauseRequest)(nil), "containerd.task.v2.PauseRequest") + proto.RegisterType((*ResumeRequest)(nil), "containerd.task.v2.ResumeRequest") +} +func (m *CreateTaskRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CreateTaskRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + if len(m.Bundle) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.Bundle))) + i += copy(dAtA[i:], m.Bundle) + } + if len(m.Rootfs) > 0 { + for _, msg := range m.Rootfs { + dAtA[i] = 0x1a + i++ + i = encodeVarintShim(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Terminal { + dAtA[i] = 0x20 + i++ + if m.Terminal { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.Stdin) > 0 { + dAtA[i] = 0x2a + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.Stdin))) + i += copy(dAtA[i:], m.Stdin) + } + if len(m.Stdout) > 0 { + dAtA[i] = 0x32 + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.Stdout))) + i += copy(dAtA[i:], m.Stdout) + } + if len(m.Stderr) > 0 { + dAtA[i] = 0x3a + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.Stderr))) + i += copy(dAtA[i:], m.Stderr) + } + if len(m.Checkpoint) > 0 { + dAtA[i] = 0x42 + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.Checkpoint))) + i += copy(dAtA[i:], m.Checkpoint) + } + if len(m.ParentCheckpoint) > 0 { + dAtA[i] = 0x4a + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.ParentCheckpoint))) + i += copy(dAtA[i:], m.ParentCheckpoint) + } + if m.Options != nil { + dAtA[i] = 0x52 + i++ + i = encodeVarintShim(dAtA, i, uint64(m.Options.Size())) + n1, err := m.Options.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + return i, nil +} + +func (m *CreateTaskResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CreateTaskResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Pid != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintShim(dAtA, i, uint64(m.Pid)) + } + return i, nil +} + +func (m *DeleteRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeleteRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + if len(m.ExecID) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.ExecID))) + i += copy(dAtA[i:], m.ExecID) + } + return i, nil +} + +func (m *DeleteResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeleteResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Pid != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintShim(dAtA, i, uint64(m.Pid)) + } + if m.ExitStatus != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintShim(dAtA, i, uint64(m.ExitStatus)) + } + dAtA[i] = 0x1a + i++ + i = encodeVarintShim(dAtA, i, uint64(types.SizeOfStdTime(m.ExitedAt))) + n2, err := types.StdTimeMarshalTo(m.ExitedAt, dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + return i, nil +} + +func (m *ExecProcessRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExecProcessRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + if len(m.ExecID) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.ExecID))) + i += copy(dAtA[i:], m.ExecID) + } + if m.Terminal { + dAtA[i] = 0x18 + i++ + if m.Terminal { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.Stdin) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.Stdin))) + i += copy(dAtA[i:], m.Stdin) + } + if len(m.Stdout) > 0 { + dAtA[i] = 0x2a + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.Stdout))) + i += copy(dAtA[i:], m.Stdout) + } + if len(m.Stderr) > 0 { + dAtA[i] = 0x32 + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.Stderr))) + i += copy(dAtA[i:], m.Stderr) + } + if m.Spec != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintShim(dAtA, i, uint64(m.Spec.Size())) + n3, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + return i, nil +} + +func (m *ExecProcessResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExecProcessResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *ResizePtyRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResizePtyRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + if len(m.ExecID) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.ExecID))) + i += copy(dAtA[i:], m.ExecID) + } + if m.Width != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintShim(dAtA, i, uint64(m.Width)) + } + if m.Height != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintShim(dAtA, i, uint64(m.Height)) + } + return i, nil +} + +func (m *StateRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StateRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + if len(m.ExecID) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.ExecID))) + i += copy(dAtA[i:], m.ExecID) + } + return i, nil +} + +func (m *StateResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StateResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + if len(m.Bundle) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.Bundle))) + i += copy(dAtA[i:], m.Bundle) + } + if m.Pid != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintShim(dAtA, i, uint64(m.Pid)) + } + if m.Status != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintShim(dAtA, i, uint64(m.Status)) + } + if len(m.Stdin) > 0 { + dAtA[i] = 0x2a + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.Stdin))) + i += copy(dAtA[i:], m.Stdin) + } + if len(m.Stdout) > 0 { + dAtA[i] = 0x32 + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.Stdout))) + i += copy(dAtA[i:], m.Stdout) + } + if len(m.Stderr) > 0 { + dAtA[i] = 0x3a + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.Stderr))) + i += copy(dAtA[i:], m.Stderr) + } + if m.Terminal { + dAtA[i] = 0x40 + i++ + if m.Terminal { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.ExitStatus != 0 { + dAtA[i] = 0x48 + i++ + i = encodeVarintShim(dAtA, i, uint64(m.ExitStatus)) + } + dAtA[i] = 0x52 + i++ + i = encodeVarintShim(dAtA, i, uint64(types.SizeOfStdTime(m.ExitedAt))) + n4, err := types.StdTimeMarshalTo(m.ExitedAt, dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + return i, nil +} + +func (m *KillRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KillRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + if len(m.ExecID) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.ExecID))) + i += copy(dAtA[i:], m.ExecID) + } + if m.Signal != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintShim(dAtA, i, uint64(m.Signal)) + } + if m.All { + dAtA[i] = 0x20 + i++ + if m.All { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *CloseIORequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CloseIORequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + if len(m.ExecID) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.ExecID))) + i += copy(dAtA[i:], m.ExecID) + } + if m.Stdin { + dAtA[i] = 0x18 + i++ + if m.Stdin { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *PidsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PidsRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + return i, nil +} + +func (m *PidsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PidsResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Processes) > 0 { + for _, msg := range m.Processes { + dAtA[i] = 0xa + i++ + i = encodeVarintShim(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *CheckpointTaskRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CheckpointTaskRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + if len(m.Path) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + } + if m.Options != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintShim(dAtA, i, uint64(m.Options.Size())) + n5, err := m.Options.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + } + return i, nil +} + +func (m *UpdateTaskRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateTaskRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + if m.Resources != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintShim(dAtA, i, uint64(m.Resources.Size())) + n6, err := m.Resources.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + return i, nil +} + +func (m *StartRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StartRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + if len(m.ExecID) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.ExecID))) + i += copy(dAtA[i:], m.ExecID) + } + return i, nil +} + +func (m *StartResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StartResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Pid != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintShim(dAtA, i, uint64(m.Pid)) + } + return i, nil +} + +func (m *WaitRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WaitRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + if len(m.ExecID) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.ExecID))) + i += copy(dAtA[i:], m.ExecID) + } + return i, nil +} + +func (m *WaitResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WaitResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ExitStatus != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintShim(dAtA, i, uint64(m.ExitStatus)) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintShim(dAtA, i, uint64(types.SizeOfStdTime(m.ExitedAt))) + n7, err := types.StdTimeMarshalTo(m.ExitedAt, dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + return i, nil +} + +func (m *StatsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatsRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + return i, nil +} + +func (m *StatsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatsResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Stats != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintShim(dAtA, i, uint64(m.Stats.Size())) + n8, err := m.Stats.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + return i, nil +} + +func (m *ConnectRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConnectRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + return i, nil +} + +func (m *ConnectResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConnectResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ShimPid != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintShim(dAtA, i, uint64(m.ShimPid)) + } + if m.TaskPid != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintShim(dAtA, i, uint64(m.TaskPid)) + } + if len(m.Version) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.Version))) + i += copy(dAtA[i:], m.Version) + } + return i, nil +} + +func (m *ShutdownRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ShutdownRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + if m.Now { + dAtA[i] = 0x10 + i++ + if m.Now { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *PauseRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PauseRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + return i, nil +} + +func (m *ResumeRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResumeRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + return i, nil +} + +func encodeVarintShim(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *CreateTaskRequest) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + l = len(m.Bundle) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + if len(m.Rootfs) > 0 { + for _, e := range m.Rootfs { + l = e.Size() + n += 1 + l + sovShim(uint64(l)) + } + } + if m.Terminal { + n += 2 + } + l = len(m.Stdin) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + l = len(m.Stdout) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + l = len(m.Stderr) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + l = len(m.Checkpoint) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + l = len(m.ParentCheckpoint) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + if m.Options != nil { + l = m.Options.Size() + n += 1 + l + sovShim(uint64(l)) + } + return n +} + +func (m *CreateTaskResponse) Size() (n int) { + var l int + _ = l + if m.Pid != 0 { + n += 1 + sovShim(uint64(m.Pid)) + } + return n +} + +func (m *DeleteRequest) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + l = len(m.ExecID) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + return n +} + +func (m *DeleteResponse) Size() (n int) { + var l int + _ = l + if m.Pid != 0 { + n += 1 + sovShim(uint64(m.Pid)) + } + if m.ExitStatus != 0 { + n += 1 + sovShim(uint64(m.ExitStatus)) + } + l = types.SizeOfStdTime(m.ExitedAt) + n += 1 + l + sovShim(uint64(l)) + return n +} + +func (m *ExecProcessRequest) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + l = len(m.ExecID) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + if m.Terminal { + n += 2 + } + l = len(m.Stdin) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + l = len(m.Stdout) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + l = len(m.Stderr) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + if m.Spec != nil { + l = m.Spec.Size() + n += 1 + l + sovShim(uint64(l)) + } + return n +} + +func (m *ExecProcessResponse) Size() (n int) { + var l int + _ = l + return n +} + +func (m *ResizePtyRequest) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + l = len(m.ExecID) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + if m.Width != 0 { + n += 1 + sovShim(uint64(m.Width)) + } + if m.Height != 0 { + n += 1 + sovShim(uint64(m.Height)) + } + return n +} + +func (m *StateRequest) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + l = len(m.ExecID) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + return n +} + +func (m *StateResponse) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + l = len(m.Bundle) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + if m.Pid != 0 { + n += 1 + sovShim(uint64(m.Pid)) + } + if m.Status != 0 { + n += 1 + sovShim(uint64(m.Status)) + } + l = len(m.Stdin) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + l = len(m.Stdout) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + l = len(m.Stderr) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + if m.Terminal { + n += 2 + } + if m.ExitStatus != 0 { + n += 1 + sovShim(uint64(m.ExitStatus)) + } + l = types.SizeOfStdTime(m.ExitedAt) + n += 1 + l + sovShim(uint64(l)) + return n +} + +func (m *KillRequest) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + l = len(m.ExecID) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + if m.Signal != 0 { + n += 1 + sovShim(uint64(m.Signal)) + } + if m.All { + n += 2 + } + return n +} + +func (m *CloseIORequest) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + l = len(m.ExecID) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + if m.Stdin { + n += 2 + } + return n +} + +func (m *PidsRequest) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + return n +} + +func (m *PidsResponse) Size() (n int) { + var l int + _ = l + if len(m.Processes) > 0 { + for _, e := range m.Processes { + l = e.Size() + n += 1 + l + sovShim(uint64(l)) + } + } + return n +} + +func (m *CheckpointTaskRequest) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + l = len(m.Path) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + if m.Options != nil { + l = m.Options.Size() + n += 1 + l + sovShim(uint64(l)) + } + return n +} + +func (m *UpdateTaskRequest) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + if m.Resources != nil { + l = m.Resources.Size() + n += 1 + l + sovShim(uint64(l)) + } + return n +} + +func (m *StartRequest) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + l = len(m.ExecID) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + return n +} + +func (m *StartResponse) Size() (n int) { + var l int + _ = l + if m.Pid != 0 { + n += 1 + sovShim(uint64(m.Pid)) + } + return n +} + +func (m *WaitRequest) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + l = len(m.ExecID) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + return n +} + +func (m *WaitResponse) Size() (n int) { + var l int + _ = l + if m.ExitStatus != 0 { + n += 1 + sovShim(uint64(m.ExitStatus)) + } + l = types.SizeOfStdTime(m.ExitedAt) + n += 1 + l + sovShim(uint64(l)) + return n +} + +func (m *StatsRequest) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + return n +} + +func (m *StatsResponse) Size() (n int) { + var l int + _ = l + if m.Stats != nil { + l = m.Stats.Size() + n += 1 + l + sovShim(uint64(l)) + } + return n +} + +func (m *ConnectRequest) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + return n +} + +func (m *ConnectResponse) Size() (n int) { + var l int + _ = l + if m.ShimPid != 0 { + n += 1 + sovShim(uint64(m.ShimPid)) + } + if m.TaskPid != 0 { + n += 1 + sovShim(uint64(m.TaskPid)) + } + l = len(m.Version) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + return n +} + +func (m *ShutdownRequest) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + if m.Now { + n += 2 + } + return n +} + +func (m *PauseRequest) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + return n +} + +func (m *ResumeRequest) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovShim(uint64(l)) + } + return n +} + +func sovShim(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozShim(x uint64) (n int) { + return sovShim(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *CreateTaskRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CreateTaskRequest{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Bundle:` + fmt.Sprintf("%v", this.Bundle) + `,`, + `Rootfs:` + strings.Replace(fmt.Sprintf("%v", this.Rootfs), "Mount", "containerd_types.Mount", 1) + `,`, + `Terminal:` + fmt.Sprintf("%v", this.Terminal) + `,`, + `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, + `Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`, + `Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`, + `Checkpoint:` + fmt.Sprintf("%v", this.Checkpoint) + `,`, + `ParentCheckpoint:` + fmt.Sprintf("%v", this.ParentCheckpoint) + `,`, + `Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Any", "google_protobuf.Any", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CreateTaskResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CreateTaskResponse{`, + `Pid:` + fmt.Sprintf("%v", this.Pid) + `,`, + `}`, + }, "") + return s +} +func (this *DeleteRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeleteRequest{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, + `}`, + }, "") + return s +} +func (this *DeleteResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeleteResponse{`, + `Pid:` + fmt.Sprintf("%v", this.Pid) + `,`, + `ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`, + `ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "google_protobuf3.Timestamp", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ExecProcessRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExecProcessRequest{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, + `Terminal:` + fmt.Sprintf("%v", this.Terminal) + `,`, + `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, + `Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`, + `Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`, + `Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "Any", "google_protobuf.Any", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ExecProcessResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExecProcessResponse{`, + `}`, + }, "") + return s +} +func (this *ResizePtyRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResizePtyRequest{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, + `Width:` + fmt.Sprintf("%v", this.Width) + `,`, + `Height:` + fmt.Sprintf("%v", this.Height) + `,`, + `}`, + }, "") + return s +} +func (this *StateRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StateRequest{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, + `}`, + }, "") + return s +} +func (this *StateResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StateResponse{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Bundle:` + fmt.Sprintf("%v", this.Bundle) + `,`, + `Pid:` + fmt.Sprintf("%v", this.Pid) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, + `Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`, + `Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`, + `Terminal:` + fmt.Sprintf("%v", this.Terminal) + `,`, + `ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`, + `ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "google_protobuf3.Timestamp", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *KillRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&KillRequest{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, + `Signal:` + fmt.Sprintf("%v", this.Signal) + `,`, + `All:` + fmt.Sprintf("%v", this.All) + `,`, + `}`, + }, "") + return s +} +func (this *CloseIORequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CloseIORequest{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, + `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, + `}`, + }, "") + return s +} +func (this *PidsRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PidsRequest{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `}`, + }, "") + return s +} +func (this *PidsResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PidsResponse{`, + `Processes:` + strings.Replace(fmt.Sprintf("%v", this.Processes), "ProcessInfo", "containerd_v1_types.ProcessInfo", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CheckpointTaskRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CheckpointTaskRequest{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Any", "google_protobuf.Any", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateTaskRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateTaskRequest{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Resources:` + strings.Replace(fmt.Sprintf("%v", this.Resources), "Any", "google_protobuf.Any", 1) + `,`, + `}`, + }, "") + return s +} +func (this *StartRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StartRequest{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, + `}`, + }, "") + return s +} +func (this *StartResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StartResponse{`, + `Pid:` + fmt.Sprintf("%v", this.Pid) + `,`, + `}`, + }, "") + return s +} +func (this *WaitRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WaitRequest{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, + `}`, + }, "") + return s +} +func (this *WaitResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WaitResponse{`, + `ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`, + `ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "google_protobuf3.Timestamp", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *StatsRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatsRequest{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `}`, + }, "") + return s +} +func (this *StatsResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatsResponse{`, + `Stats:` + strings.Replace(fmt.Sprintf("%v", this.Stats), "Any", "google_protobuf.Any", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ConnectRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ConnectRequest{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `}`, + }, "") + return s +} +func (this *ConnectResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ConnectResponse{`, + `ShimPid:` + fmt.Sprintf("%v", this.ShimPid) + `,`, + `TaskPid:` + fmt.Sprintf("%v", this.TaskPid) + `,`, + `Version:` + fmt.Sprintf("%v", this.Version) + `,`, + `}`, + }, "") + return s +} +func (this *ShutdownRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ShutdownRequest{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Now:` + fmt.Sprintf("%v", this.Now) + `,`, + `}`, + }, "") + return s +} +func (this *PauseRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PauseRequest{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `}`, + }, "") + return s +} +func (this *ResumeRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResumeRequest{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `}`, + }, "") + return s +} +func valueToStringShim(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} + +type TaskService interface { + State(ctx context.Context, req *StateRequest) (*StateResponse, error) + Create(ctx context.Context, req *CreateTaskRequest) (*CreateTaskResponse, error) + Start(ctx context.Context, req *StartRequest) (*StartResponse, error) + Delete(ctx context.Context, req *DeleteRequest) (*DeleteResponse, error) + Pids(ctx context.Context, req *PidsRequest) (*PidsResponse, error) + Pause(ctx context.Context, req *PauseRequest) (*google_protobuf1.Empty, error) + Resume(ctx context.Context, req *ResumeRequest) (*google_protobuf1.Empty, error) + Checkpoint(ctx context.Context, req *CheckpointTaskRequest) (*google_protobuf1.Empty, error) + Kill(ctx context.Context, req *KillRequest) (*google_protobuf1.Empty, error) + Exec(ctx context.Context, req *ExecProcessRequest) (*google_protobuf1.Empty, error) + ResizePty(ctx context.Context, req *ResizePtyRequest) (*google_protobuf1.Empty, error) + CloseIO(ctx context.Context, req *CloseIORequest) (*google_protobuf1.Empty, error) + Update(ctx context.Context, req *UpdateTaskRequest) (*google_protobuf1.Empty, error) + Wait(ctx context.Context, req *WaitRequest) (*WaitResponse, error) + Stats(ctx context.Context, req *StatsRequest) (*StatsResponse, error) + Connect(ctx context.Context, req *ConnectRequest) (*ConnectResponse, error) + Shutdown(ctx context.Context, req *ShutdownRequest) (*google_protobuf1.Empty, error) +} + +func RegisterTaskService(srv *ttrpc.Server, svc TaskService) { + srv.Register("containerd.task.v2.Task", map[string]ttrpc.Method{ + "State": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req StateRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.State(ctx, &req) + }, + "Create": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req CreateTaskRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Create(ctx, &req) + }, + "Start": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req StartRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Start(ctx, &req) + }, + "Delete": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req DeleteRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Delete(ctx, &req) + }, + "Pids": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req PidsRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Pids(ctx, &req) + }, + "Pause": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req PauseRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Pause(ctx, &req) + }, + "Resume": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req ResumeRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Resume(ctx, &req) + }, + "Checkpoint": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req CheckpointTaskRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Checkpoint(ctx, &req) + }, + "Kill": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req KillRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Kill(ctx, &req) + }, + "Exec": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req ExecProcessRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Exec(ctx, &req) + }, + "ResizePty": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req ResizePtyRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.ResizePty(ctx, &req) + }, + "CloseIO": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req CloseIORequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.CloseIO(ctx, &req) + }, + "Update": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req UpdateTaskRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Update(ctx, &req) + }, + "Wait": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req WaitRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Wait(ctx, &req) + }, + "Stats": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req StatsRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Stats(ctx, &req) + }, + "Connect": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req ConnectRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Connect(ctx, &req) + }, + "Shutdown": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req ShutdownRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Shutdown(ctx, &req) + }, + }) +} + +type taskClient struct { + client *ttrpc.Client +} + +func NewTaskClient(client *ttrpc.Client) TaskService { + return &taskClient{ + client: client, + } +} + +func (c *taskClient) State(ctx context.Context, req *StateRequest) (*StateResponse, error) { + var resp StateResponse + if err := c.client.Call(ctx, "containerd.task.v2.Task", "State", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *taskClient) Create(ctx context.Context, req *CreateTaskRequest) (*CreateTaskResponse, error) { + var resp CreateTaskResponse + if err := c.client.Call(ctx, "containerd.task.v2.Task", "Create", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *taskClient) Start(ctx context.Context, req *StartRequest) (*StartResponse, error) { + var resp StartResponse + if err := c.client.Call(ctx, "containerd.task.v2.Task", "Start", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *taskClient) Delete(ctx context.Context, req *DeleteRequest) (*DeleteResponse, error) { + var resp DeleteResponse + if err := c.client.Call(ctx, "containerd.task.v2.Task", "Delete", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *taskClient) Pids(ctx context.Context, req *PidsRequest) (*PidsResponse, error) { + var resp PidsResponse + if err := c.client.Call(ctx, "containerd.task.v2.Task", "Pids", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *taskClient) Pause(ctx context.Context, req *PauseRequest) (*google_protobuf1.Empty, error) { + var resp google_protobuf1.Empty + if err := c.client.Call(ctx, "containerd.task.v2.Task", "Pause", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *taskClient) Resume(ctx context.Context, req *ResumeRequest) (*google_protobuf1.Empty, error) { + var resp google_protobuf1.Empty + if err := c.client.Call(ctx, "containerd.task.v2.Task", "Resume", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *taskClient) Checkpoint(ctx context.Context, req *CheckpointTaskRequest) (*google_protobuf1.Empty, error) { + var resp google_protobuf1.Empty + if err := c.client.Call(ctx, "containerd.task.v2.Task", "Checkpoint", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *taskClient) Kill(ctx context.Context, req *KillRequest) (*google_protobuf1.Empty, error) { + var resp google_protobuf1.Empty + if err := c.client.Call(ctx, "containerd.task.v2.Task", "Kill", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *taskClient) Exec(ctx context.Context, req *ExecProcessRequest) (*google_protobuf1.Empty, error) { + var resp google_protobuf1.Empty + if err := c.client.Call(ctx, "containerd.task.v2.Task", "Exec", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *taskClient) ResizePty(ctx context.Context, req *ResizePtyRequest) (*google_protobuf1.Empty, error) { + var resp google_protobuf1.Empty + if err := c.client.Call(ctx, "containerd.task.v2.Task", "ResizePty", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *taskClient) CloseIO(ctx context.Context, req *CloseIORequest) (*google_protobuf1.Empty, error) { + var resp google_protobuf1.Empty + if err := c.client.Call(ctx, "containerd.task.v2.Task", "CloseIO", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *taskClient) Update(ctx context.Context, req *UpdateTaskRequest) (*google_protobuf1.Empty, error) { + var resp google_protobuf1.Empty + if err := c.client.Call(ctx, "containerd.task.v2.Task", "Update", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *taskClient) Wait(ctx context.Context, req *WaitRequest) (*WaitResponse, error) { + var resp WaitResponse + if err := c.client.Call(ctx, "containerd.task.v2.Task", "Wait", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *taskClient) Stats(ctx context.Context, req *StatsRequest) (*StatsResponse, error) { + var resp StatsResponse + if err := c.client.Call(ctx, "containerd.task.v2.Task", "Stats", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *taskClient) Connect(ctx context.Context, req *ConnectRequest) (*ConnectResponse, error) { + var resp ConnectResponse + if err := c.client.Call(ctx, "containerd.task.v2.Task", "Connect", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *taskClient) Shutdown(ctx context.Context, req *ShutdownRequest) (*google_protobuf1.Empty, error) { + var resp google_protobuf1.Empty + if err := c.client.Call(ctx, "containerd.task.v2.Task", "Shutdown", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} +func (m *CreateTaskRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateTaskRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateTaskRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Bundle", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Bundle = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rootfs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rootfs = append(m.Rootfs, &containerd_types.Mount{}) + if err := m.Rootfs[len(m.Rootfs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Terminal", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Terminal = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stdin = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stdout = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stderr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Checkpoint", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Checkpoint = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ParentCheckpoint", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ParentCheckpoint = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Options == nil { + m.Options = &google_protobuf.Any{} + } + if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipShim(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthShim + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CreateTaskResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateTaskResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateTaskResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType) + } + m.Pid = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Pid |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipShim(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthShim + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExecID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipShim(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthShim + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType) + } + m.Pid = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Pid |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExitStatus", wireType) + } + m.ExitStatus = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ExitStatus |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExitedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := types.StdTimeUnmarshal(&m.ExitedAt, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipShim(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthShim + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExecProcessRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExecProcessRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExecProcessRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExecID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Terminal", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Terminal = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stdin = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stdout = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stderr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Spec == nil { + m.Spec = &google_protobuf.Any{} + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipShim(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthShim + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExecProcessResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExecProcessResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExecProcessResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipShim(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthShim + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResizePtyRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResizePtyRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResizePtyRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExecID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Width", wireType) + } + m.Width = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Width |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipShim(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthShim + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StateRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StateRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StateRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExecID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipShim(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthShim + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StateResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StateResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StateResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Bundle", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Bundle = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType) + } + m.Pid = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Pid |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + m.Status = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Status |= (containerd_v1_types.Status(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stdin = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stdout = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stderr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Terminal", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Terminal = bool(v != 0) + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExitStatus", wireType) + } + m.ExitStatus = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ExitStatus |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExitedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := types.StdTimeUnmarshal(&m.ExitedAt, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipShim(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthShim + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *KillRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KillRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KillRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExecID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Signal", wireType) + } + m.Signal = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Signal |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field All", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.All = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipShim(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthShim + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CloseIORequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CloseIORequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CloseIORequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExecID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Stdin = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipShim(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthShim + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PidsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PidsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PidsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipShim(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthShim + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PidsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PidsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PidsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Processes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Processes = append(m.Processes, &containerd_v1_types.ProcessInfo{}) + if err := m.Processes[len(m.Processes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipShim(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthShim + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CheckpointTaskRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CheckpointTaskRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CheckpointTaskRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Options == nil { + m.Options = &google_protobuf.Any{} + } + if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipShim(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthShim + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateTaskRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateTaskRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateTaskRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Resources == nil { + m.Resources = &google_protobuf.Any{} + } + if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipShim(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthShim + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StartRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StartRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StartRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExecID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipShim(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthShim + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StartResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StartResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StartResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType) + } + m.Pid = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Pid |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipShim(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthShim + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WaitRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WaitRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WaitRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExecID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipShim(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthShim + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WaitResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WaitResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WaitResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExitStatus", wireType) + } + m.ExitStatus = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ExitStatus |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExitedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := types.StdTimeUnmarshal(&m.ExitedAt, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipShim(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthShim + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipShim(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthShim + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stats", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Stats == nil { + m.Stats = &google_protobuf.Any{} + } + if err := m.Stats.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipShim(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthShim + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConnectRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConnectRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConnectRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipShim(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthShim + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConnectResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConnectResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConnectResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ShimPid", wireType) + } + m.ShimPid = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ShimPid |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TaskPid", wireType) + } + m.TaskPid = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TaskPid |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipShim(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthShim + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ShutdownRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ShutdownRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ShutdownRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Now", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Now = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipShim(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthShim + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PauseRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PauseRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PauseRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipShim(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthShim + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResumeRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResumeRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResumeRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShim + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthShim + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipShim(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthShim + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipShim(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowShim + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowShim + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowShim + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthShim + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowShim + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipShim(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthShim = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowShim = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("github.com/containerd/containerd/runtime/v2/task/shim.proto", fileDescriptorShim) +} + +var fileDescriptorShim = []byte{ + // 1240 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x58, 0xdb, 0x6f, 0x1b, 0xc5, + 0x17, 0xee, 0xfa, 0xb2, 0xb6, 0x8f, 0xeb, 0x34, 0x9d, 0x5f, 0x9a, 0xdf, 0xd6, 0x95, 0x6c, 0x77, + 0x4b, 0x83, 0x01, 0xc9, 0x16, 0xae, 0xe0, 0x81, 0x48, 0xa0, 0xdc, 0xa8, 0x4c, 0x0b, 0x89, 0xb6, + 0x45, 0x45, 0xbc, 0x58, 0x1b, 0xef, 0xc4, 0x5e, 0xc5, 0xde, 0x59, 0x76, 0x66, 0x73, 0x41, 0x42, + 0xe2, 0x89, 0x07, 0x9e, 0xf8, 0xb3, 0xf2, 0xc0, 0x03, 0x12, 0x2f, 0xbc, 0x10, 0xa8, 0xff, 0x12, + 0x34, 0x17, 0xc7, 0x6b, 0x67, 0xd7, 0x4e, 0x2a, 0xbf, 0x44, 0x73, 0x76, 0xbe, 0x39, 0x33, 0xe7, + 0xcc, 0x77, 0xbe, 0x33, 0x31, 0x6c, 0xf6, 0x5c, 0xd6, 0x0f, 0x0f, 0x1b, 0x5d, 0x32, 0x6c, 0x76, + 0x89, 0xc7, 0x6c, 0xd7, 0xc3, 0x81, 0x13, 0x1d, 0x06, 0xa1, 0xc7, 0xdc, 0x21, 0x6e, 0x9e, 0xb4, + 0x9a, 0xcc, 0xa6, 0xc7, 0x4d, 0xda, 0x77, 0x87, 0x0d, 0x3f, 0x20, 0x8c, 0x20, 0x34, 0x81, 0x35, + 0xf8, 0x5c, 0xe3, 0xa4, 0x55, 0x7e, 0xd8, 0x23, 0xa4, 0x37, 0xc0, 0x4d, 0x81, 0x38, 0x0c, 0x8f, + 0x9a, 0xb6, 0x77, 0x2e, 0xe1, 0xe5, 0x47, 0xb3, 0x53, 0x78, 0xe8, 0xb3, 0xf1, 0xe4, 0x5a, 0x8f, + 0xf4, 0x88, 0x18, 0x36, 0xf9, 0x48, 0x7d, 0xad, 0xce, 0x2e, 0xe1, 0x47, 0xa1, 0xcc, 0x1e, 0xfa, + 0x0a, 0xf0, 0xe9, 0xc2, 0xf3, 0xdb, 0xbe, 0xdb, 0x64, 0xe7, 0x3e, 0xa6, 0xcd, 0x21, 0x09, 0x3d, + 0xa6, 0xd6, 0x7d, 0x76, 0x8b, 0x75, 0x22, 0x6c, 0x11, 0x9f, 0x58, 0x6b, 0xfe, 0x99, 0x82, 0xfb, + 0x3b, 0x01, 0xb6, 0x19, 0x7e, 0x6d, 0xd3, 0x63, 0x0b, 0xff, 0x10, 0x62, 0xca, 0xd0, 0x3a, 0xa4, + 0x5c, 0xc7, 0xd0, 0x6a, 0x5a, 0xbd, 0xb0, 0xad, 0x8f, 0x2e, 0xab, 0xa9, 0xf6, 0xae, 0x95, 0x72, + 0x1d, 0xb4, 0x0e, 0xfa, 0x61, 0xe8, 0x39, 0x03, 0x6c, 0xa4, 0xf8, 0x9c, 0xa5, 0x2c, 0xd4, 0x04, + 0x3d, 0x20, 0x84, 0x1d, 0x51, 0x23, 0x5d, 0x4b, 0xd7, 0x8b, 0xad, 0xff, 0x37, 0xa2, 0xd9, 0xe4, + 0x1b, 0x37, 0xbe, 0xe6, 0x07, 0xb6, 0x14, 0x0c, 0x95, 0x21, 0xcf, 0x70, 0x30, 0x74, 0x3d, 0x7b, + 0x60, 0x64, 0x6a, 0x5a, 0x3d, 0x6f, 0x5d, 0xd9, 0x68, 0x0d, 0xb2, 0x94, 0x39, 0xae, 0x67, 0x64, + 0xc5, 0x1e, 0xd2, 0xe0, 0x5b, 0x53, 0xe6, 0x90, 0x90, 0x19, 0xba, 0xdc, 0x5a, 0x5a, 0xea, 0x3b, + 0x0e, 0x02, 0x23, 0x77, 0xf5, 0x1d, 0x07, 0x01, 0xaa, 0x00, 0x74, 0xfb, 0xb8, 0x7b, 0xec, 0x13, + 0xd7, 0x63, 0x46, 0x5e, 0xcc, 0x45, 0xbe, 0xa0, 0x8f, 0xe0, 0xbe, 0x6f, 0x07, 0xd8, 0x63, 0x9d, + 0x08, 0xac, 0x20, 0x60, 0xab, 0x72, 0x62, 0x67, 0x02, 0x6e, 0x40, 0x8e, 0xf8, 0xcc, 0x25, 0x1e, + 0x35, 0xa0, 0xa6, 0xd5, 0x8b, 0xad, 0xb5, 0x86, 0xbc, 0xcc, 0xc6, 0xf8, 0x32, 0x1b, 0x5b, 0xde, + 0xb9, 0x35, 0x06, 0x99, 0x1b, 0x80, 0xa2, 0x49, 0xa5, 0x3e, 0xf1, 0x28, 0x46, 0xab, 0x90, 0xf6, + 0x55, 0x5a, 0x4b, 0x16, 0x1f, 0x9a, 0x2f, 0xa1, 0xb4, 0x8b, 0x07, 0x98, 0xe1, 0x45, 0x89, 0x7f, + 0x02, 0x39, 0x7c, 0x86, 0xbb, 0x1d, 0xd7, 0x91, 0x99, 0xdf, 0x86, 0xd1, 0x65, 0x55, 0xdf, 0x3b, + 0xc3, 0xdd, 0xf6, 0xae, 0xa5, 0xf3, 0xa9, 0xb6, 0x63, 0xfe, 0xa2, 0xc1, 0xca, 0xd8, 0x5d, 0xd2, + 0x96, 0xa8, 0x0a, 0x45, 0x7c, 0xe6, 0xb2, 0x0e, 0x65, 0x36, 0x0b, 0xa9, 0xf0, 0x56, 0xb2, 0x80, + 0x7f, 0x7a, 0x25, 0xbe, 0xa0, 0x2d, 0x28, 0x70, 0x0b, 0x3b, 0x1d, 0x9b, 0x19, 0x69, 0x11, 0x6d, + 0xf9, 0x5a, 0xb4, 0xaf, 0xc7, 0xd4, 0xdd, 0xce, 0x5f, 0x5c, 0x56, 0xef, 0xfc, 0xf6, 0x4f, 0x55, + 0xb3, 0xf2, 0x72, 0xd9, 0x16, 0x33, 0xff, 0xd6, 0x00, 0xf1, 0xb3, 0x1d, 0x04, 0xa4, 0x8b, 0x29, + 0x5d, 0x46, 0x70, 0x53, 0x8c, 0x49, 0x27, 0x31, 0x26, 0x13, 0xcf, 0x98, 0x6c, 0x02, 0x63, 0xf4, + 0x29, 0xc6, 0xd4, 0x21, 0x43, 0x7d, 0xdc, 0x15, 0x3c, 0x4a, 0xba, 0x61, 0x81, 0x30, 0x1f, 0xc0, + 0xff, 0xa6, 0xc2, 0x93, 0xc9, 0x36, 0x7f, 0x82, 0x55, 0x0b, 0x53, 0xf7, 0x47, 0x7c, 0xc0, 0xce, + 0x97, 0x12, 0xf3, 0x1a, 0x64, 0x4f, 0x5d, 0x87, 0xf5, 0x45, 0xc0, 0x25, 0x4b, 0x1a, 0xfc, 0xfc, + 0x7d, 0xec, 0xf6, 0xfa, 0x4c, 0x84, 0x5b, 0xb2, 0x94, 0x65, 0xbe, 0x80, 0xbb, 0xfc, 0x0a, 0x97, + 0xc3, 0xa5, 0xdf, 0x53, 0x50, 0x52, 0xde, 0x14, 0x95, 0x6e, 0xab, 0x09, 0x8a, 0x7a, 0xe9, 0x09, + 0xf5, 0x9e, 0xf1, 0xc4, 0x0b, 0xd6, 0xf1, 0x83, 0xaf, 0xb4, 0x1e, 0x45, 0x55, 0xe2, 0xe4, 0x63, + 0x25, 0x14, 0x92, 0x86, 0x96, 0x82, 0x2e, 0x49, 0x0d, 0xa2, 0xec, 0xc9, 0xcf, 0xb0, 0x67, 0xa6, + 0x22, 0x0a, 0xf3, 0x2b, 0x02, 0xde, 0xa9, 0x22, 0x18, 0x14, 0x5f, 0xb8, 0x83, 0xc1, 0x52, 0x58, + 0xc1, 0x63, 0x74, 0x7b, 0xe3, 0x3a, 0x28, 0x59, 0xca, 0xe2, 0x09, 0xb7, 0x07, 0x63, 0x39, 0xe5, + 0x43, 0xb3, 0x0b, 0x2b, 0x3b, 0x03, 0x42, 0x71, 0x7b, 0x7f, 0x59, 0x74, 0x94, 0x57, 0x21, 0xeb, + 0x4f, 0x1a, 0xe6, 0x53, 0x28, 0x1e, 0xb8, 0xce, 0xa2, 0x22, 0x37, 0xbf, 0x81, 0xbb, 0x12, 0xa6, + 0xe8, 0xf4, 0x39, 0x14, 0x7c, 0x59, 0x3f, 0x98, 0x1a, 0x9a, 0xe8, 0x1a, 0xb5, 0x58, 0x3e, 0xa8, + 0x2a, 0x6b, 0x7b, 0x47, 0xc4, 0x9a, 0x2c, 0x31, 0x29, 0x3c, 0x98, 0x08, 0xf4, 0x4d, 0x7a, 0x17, + 0x82, 0x8c, 0x6f, 0xb3, 0xbe, 0x62, 0xa9, 0x18, 0x47, 0x75, 0x3d, 0x7d, 0x13, 0x5d, 0xef, 0xc0, + 0xfd, 0x6f, 0x7d, 0xe7, 0x86, 0xcd, 0xb2, 0x05, 0x85, 0x00, 0x53, 0x12, 0x06, 0x5d, 0x2c, 0x75, + 0x36, 0xc9, 0xfd, 0x04, 0xa6, 0x6a, 0x38, 0x60, 0x4b, 0xa9, 0xe1, 0xc7, 0xa2, 0x84, 0xb9, 0xb3, + 0xc4, 0x06, 0xf4, 0x15, 0x14, 0xdf, 0xd8, 0xee, 0x72, 0xb6, 0x0b, 0xe0, 0xae, 0xf4, 0xa5, 0x76, + 0x9b, 0xa9, 0x2b, 0x6d, 0x7e, 0x5d, 0xa5, 0xde, 0xa9, 0xae, 0x36, 0xa4, 0xe6, 0x2d, 0x64, 0xdf, + 0xa6, 0x54, 0xb3, 0x09, 0xfd, 0x3e, 0xe4, 0x5c, 0xb6, 0x99, 0x3c, 0x56, 0xd2, 0xc5, 0x48, 0x88, + 0x59, 0x87, 0x95, 0x1d, 0xe2, 0x79, 0xb8, 0xbb, 0x28, 0x4f, 0xa6, 0x0d, 0xf7, 0xae, 0x90, 0x6a, + 0xa3, 0x87, 0x90, 0xe7, 0xaf, 0xcc, 0xce, 0x24, 0xf1, 0x39, 0x6e, 0x1f, 0xb8, 0x0e, 0x9f, 0xe2, + 0x2f, 0x31, 0x31, 0x25, 0xfb, 0x70, 0x8e, 0xdb, 0x7c, 0xca, 0x80, 0xdc, 0x09, 0x0e, 0xa8, 0x4b, + 0x64, 0xb1, 0x15, 0xac, 0xb1, 0x69, 0x6e, 0xc2, 0xbd, 0x57, 0xfd, 0x90, 0x39, 0xe4, 0xd4, 0x5b, + 0x74, 0x6b, 0xab, 0x90, 0xf6, 0xc8, 0xa9, 0x70, 0x9d, 0xb7, 0xf8, 0x90, 0xa7, 0xeb, 0xc0, 0x0e, + 0xe9, 0xa2, 0x16, 0x61, 0xbe, 0x0f, 0x25, 0x0b, 0xd3, 0x70, 0xb8, 0x08, 0xd8, 0xfa, 0x15, 0x20, + 0xc3, 0x6b, 0x01, 0xbd, 0x84, 0xac, 0x68, 0x17, 0x68, 0xaa, 0x88, 0xd5, 0x43, 0xba, 0x11, 0xed, + 0x4b, 0xe5, 0xc7, 0x73, 0x10, 0x2a, 0x69, 0x6f, 0x40, 0x97, 0xef, 0x27, 0xf4, 0x34, 0x0e, 0x7c, + 0xed, 0xc1, 0x5a, 0xde, 0x58, 0x04, 0x53, 0x8e, 0xe5, 0x31, 0x03, 0x96, 0x78, 0xcc, 0xab, 0xd2, + 0x4b, 0x3c, 0x66, 0xa4, 0x9e, 0xf6, 0x41, 0x97, 0xef, 0x2d, 0x14, 0x0b, 0x9e, 0x7a, 0xda, 0x95, + 0xcd, 0x79, 0x10, 0xe5, 0xb0, 0x0d, 0x19, 0x2e, 0x92, 0xa8, 0x1a, 0x87, 0x8d, 0xa8, 0x6c, 0xb9, + 0x96, 0x0c, 0x50, 0xae, 0xb6, 0x20, 0x2b, 0xae, 0x3a, 0x3e, 0xd2, 0x28, 0x0b, 0xca, 0xeb, 0xd7, + 0xc8, 0xbf, 0xc7, 0xff, 0x99, 0x41, 0x3b, 0xa0, 0x4b, 0x16, 0xc4, 0x87, 0x37, 0xc5, 0x90, 0x44, + 0x27, 0xfb, 0x00, 0x91, 0x87, 0xf4, 0x07, 0xb1, 0xf7, 0x14, 0xa7, 0xe3, 0x89, 0x0e, 0xbf, 0x80, + 0x0c, 0x6f, 0xa5, 0xf1, 0x39, 0x8a, 0x34, 0xd9, 0x44, 0x07, 0x5f, 0x42, 0x86, 0x2b, 0x17, 0x8a, + 0xe5, 0xcc, 0xf5, 0x67, 0x6b, 0xa2, 0x9f, 0x36, 0x14, 0xae, 0x9e, 0x7b, 0xe8, 0xbd, 0x84, 0x0c, + 0x4d, 0xbd, 0x06, 0x13, 0x5d, 0xed, 0x41, 0x4e, 0x35, 0x6a, 0x14, 0x4b, 0x93, 0xe9, 0x2e, 0x9e, + 0xe8, 0xe6, 0x39, 0xe8, 0xb2, 0x3d, 0xc5, 0x97, 0xcd, 0xb5, 0xd6, 0x35, 0x27, 0xb4, 0x0c, 0x97, + 0xf2, 0xf8, 0x1c, 0x47, 0x1a, 0x46, 0x3c, 0x0f, 0xa7, 0xba, 0x80, 0x12, 0x06, 0x9a, 0x2c, 0x0c, + 0x74, 0xa1, 0x30, 0x4c, 0x58, 0x6d, 0x41, 0x4e, 0x09, 0x6c, 0x42, 0xa2, 0xa6, 0x74, 0xba, 0xfc, + 0x64, 0x2e, 0x46, 0xf9, 0x7c, 0x0e, 0xf9, 0xb1, 0xa2, 0xa2, 0xd8, 0x05, 0x33, 0x7a, 0x9b, 0x94, + 0xb5, 0xed, 0xfd, 0x8b, 0xb7, 0x95, 0x3b, 0x7f, 0xbd, 0xad, 0xdc, 0xf9, 0x79, 0x54, 0xd1, 0x2e, + 0x46, 0x15, 0xed, 0x8f, 0x51, 0x45, 0xfb, 0x77, 0x54, 0xd1, 0xbe, 0xff, 0xe4, 0xb6, 0xbf, 0x4c, + 0x6c, 0xf2, 0x3f, 0xdf, 0xa5, 0x0e, 0x75, 0xb1, 0xc5, 0xb3, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, + 0x11, 0x4c, 0x17, 0x02, 0xdb, 0x10, 0x00, 0x00, +} diff --git a/vendor/github.com/containerd/containerd/runtime/v2/task/shim.proto b/vendor/github.com/containerd/containerd/runtime/v2/task/shim.proto new file mode 100644 index 000000000..5521ff799 --- /dev/null +++ b/vendor/github.com/containerd/containerd/runtime/v2/task/shim.proto @@ -0,0 +1,184 @@ +syntax = "proto3"; + +package containerd.task.v2; + +import "google/protobuf/any.proto"; +import "google/protobuf/empty.proto"; +import weak "gogoproto/gogo.proto"; +import "google/protobuf/timestamp.proto"; +import "github.com/containerd/containerd/api/types/mount.proto"; +import "github.com/containerd/containerd/api/types/task/task.proto"; + +option go_package = "github.com/containerd/containerd/runtime/v2/task;task"; + +// Shim service is launched for each container and is responsible for owning the IO +// for the container and its additional processes. The shim is also the parent of +// each container and allows reattaching to the IO and receiving the exit status +// for the container processes. +service Task { + rpc State(StateRequest) returns (StateResponse); + rpc Create(CreateTaskRequest) returns (CreateTaskResponse); + rpc Start(StartRequest) returns (StartResponse); + rpc Delete(DeleteRequest) returns (DeleteResponse); + rpc Pids(PidsRequest) returns (PidsResponse); + rpc Pause(PauseRequest) returns (google.protobuf.Empty); + rpc Resume(ResumeRequest) returns (google.protobuf.Empty); + rpc Checkpoint(CheckpointTaskRequest) returns (google.protobuf.Empty); + rpc Kill(KillRequest) returns (google.protobuf.Empty); + rpc Exec(ExecProcessRequest) returns (google.protobuf.Empty); + rpc ResizePty(ResizePtyRequest) returns (google.protobuf.Empty); + rpc CloseIO(CloseIORequest) returns (google.protobuf.Empty); + rpc Update(UpdateTaskRequest) returns (google.protobuf.Empty); + rpc Wait(WaitRequest) returns (WaitResponse); + rpc Stats(StatsRequest) returns (StatsResponse); + rpc Connect(ConnectRequest) returns (ConnectResponse); + rpc Shutdown(ShutdownRequest) returns (google.protobuf.Empty); +} + +message CreateTaskRequest { + string id = 1; + string bundle = 2; + repeated containerd.types.Mount rootfs = 3; + bool terminal = 4; + string stdin = 5; + string stdout = 6; + string stderr = 7; + string checkpoint = 8; + string parent_checkpoint = 9; + google.protobuf.Any options = 10; +} + +message CreateTaskResponse { + uint32 pid = 1; +} + +message DeleteRequest { + string id = 1; + string exec_id = 2; +} + +message DeleteResponse { + uint32 pid = 1; + uint32 exit_status = 2; + google.protobuf.Timestamp exited_at = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; +} + +message ExecProcessRequest { + string id = 1; + string exec_id = 2; + bool terminal = 3; + string stdin = 4; + string stdout = 5; + string stderr = 6; + google.protobuf.Any spec = 7; +} + +message ExecProcessResponse { +} + +message ResizePtyRequest { + string id = 1; + string exec_id = 2; + uint32 width = 3; + uint32 height = 4; +} + +message StateRequest { + string id = 1; + string exec_id = 2; +} + +message StateResponse { + string id = 1; + string bundle = 2; + uint32 pid = 3; + containerd.v1.types.Status status = 4; + string stdin = 5; + string stdout = 6; + string stderr = 7; + bool terminal = 8; + uint32 exit_status = 9; + google.protobuf.Timestamp exited_at = 10 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; +} + +message KillRequest { + string id = 1; + string exec_id = 2; + uint32 signal = 3; + bool all = 4; +} + +message CloseIORequest { + string id = 1; + string exec_id = 2; + bool stdin = 3; +} + +message PidsRequest { + string id = 1; +} + +message PidsResponse { + repeated containerd.v1.types.ProcessInfo processes = 1; +} + +message CheckpointTaskRequest { + string id = 1; + string path = 2; + google.protobuf.Any options = 3; +} + +message UpdateTaskRequest { + string id = 1; + google.protobuf.Any resources = 2; +} + +message StartRequest { + string id = 1; + string exec_id = 2; +} + +message StartResponse { + uint32 pid = 1; +} + +message WaitRequest { + string id = 1; + string exec_id = 2; +} + +message WaitResponse { + uint32 exit_status = 1; + google.protobuf.Timestamp exited_at = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; +} + +message StatsRequest { + string id = 1; +} + +message StatsResponse { + google.protobuf.Any stats = 1; +} + +message ConnectRequest { + string id = 1; +} + +message ConnectResponse { + uint32 shim_pid = 1; + uint32 task_pid = 2; + string version = 3; +} + +message ShutdownRequest { + string id = 1; + bool now = 2; +} + +message PauseRequest { + string id = 1; +} + +message ResumeRequest { + string id = 1; +} diff --git a/vendor/github.com/containerd/cri/README.md b/vendor/github.com/containerd/cri/README.md index a97c2fe3d..e77abb51f 100644 --- a/vendor/github.com/containerd/cri/README.md +++ b/vendor/github.com/containerd/cri/README.md @@ -1,7 +1,7 @@ # cri

- +

*Note: The standalone `cri-containerd` binary is end-of-life. `cri-containerd` is @@ -36,6 +36,7 @@ See [test dashboard](https://k8s-testgrid.appspot.com/sig-node-containerd) | v1.0.0-alpha.x | | 1.7, 1.8 | v1alpha1 | | v1.0.0-beta.x | | 1.9 | v1alpha1 | | End-Of-Life | v1.1 | 1.10+ | v1alpha2 | +| | v1.2 | 1.10+ | v1alpha2 | | | HEAD | 1.10+ | v1alpha2 | ## Production Quality Cluster on GCE @@ -149,7 +150,8 @@ implementation. For sync communication we have a community slack with a #containerd channel that everyone is welcome to join and chat about development. -**Slack:** https://dockr.ly/community +**Slack:** Catch us in the #containerd and #containerd-dev channels on dockercommunity.slack.com. +[Click here for an invite to docker community slack.](https://join.slack.com/t/dockercommunity/shared_invite/enQtNDY4MDc1Mzc0MzIwLTgxZDBlMmM4ZGEyNDc1N2FkMzlhODJkYmE1YTVkYjM1MDE3ZjAwZjBkOGFlOTJkZjRmZGYzNjYyY2M3ZTUxYzQ) ## Other Communications As this project is tightly coupled to CRI and CRI-Tools and they are Kubernetes diff --git a/vendor/github.com/containerd/cri/pkg/api/runtimeoptions/v1/api.pb.go b/vendor/github.com/containerd/cri/pkg/api/runtimeoptions/v1/api.pb.go new file mode 100644 index 000000000..71341ec78 --- /dev/null +++ b/vendor/github.com/containerd/cri/pkg/api/runtimeoptions/v1/api.pb.go @@ -0,0 +1,394 @@ +/* +Copyright 2019 The containerd Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: api.proto + +/* + Package cri_runtimeoptions_v1 is a generated protocol buffer package. + + It is generated from these files: + api.proto + + It has these top-level messages: + Options +*/ +package cri_runtimeoptions_v1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type Options struct { + // TypeUrl specifies the type of the content inside the config file. + TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` + // ConfigPath specifies the filesystem location of the config file + // used by the runtime. + ConfigPath string `protobuf:"bytes,2,opt,name=config_path,json=configPath,proto3" json:"config_path,omitempty"` +} + +func (m *Options) Reset() { *m = Options{} } +func (*Options) ProtoMessage() {} +func (*Options) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{0} } + +func (m *Options) GetTypeUrl() string { + if m != nil { + return m.TypeUrl + } + return "" +} + +func (m *Options) GetConfigPath() string { + if m != nil { + return m.ConfigPath + } + return "" +} + +func init() { + proto.RegisterType((*Options)(nil), "cri.runtimeoptions.v1.Options") +} +func (m *Options) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Options) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.TypeUrl) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintApi(dAtA, i, uint64(len(m.TypeUrl))) + i += copy(dAtA[i:], m.TypeUrl) + } + if len(m.ConfigPath) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintApi(dAtA, i, uint64(len(m.ConfigPath))) + i += copy(dAtA[i:], m.ConfigPath) + } + return i, nil +} + +func encodeVarintApi(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *Options) Size() (n int) { + var l int + _ = l + l = len(m.TypeUrl) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.ConfigPath) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func sovApi(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozApi(x uint64) (n int) { + return sovApi(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Options) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Options{`, + `TypeUrl:` + fmt.Sprintf("%v", this.TypeUrl) + `,`, + `ConfigPath:` + fmt.Sprintf("%v", this.ConfigPath) + `,`, + `}`, + }, "") + return s +} +func valueToStringApi(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Options) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Options: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Options: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TypeUrl", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TypeUrl = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConfigPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipApi(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowApi + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowApi + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowApi + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthApi + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowApi + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipApi(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthApi = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowApi = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("api.proto", fileDescriptorApi) } + +var fileDescriptorApi = []byte{ + // 183 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4c, 0x2c, 0xc8, 0xd4, + 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x4d, 0x2e, 0xca, 0xd4, 0x2b, 0x2a, 0xcd, 0x2b, 0xc9, + 0xcc, 0x4d, 0xcd, 0x2f, 0x28, 0xc9, 0xcc, 0xcf, 0x2b, 0xd6, 0x2b, 0x33, 0x94, 0xd2, 0x4d, 0xcf, + 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0x4f, 0xcf, 0xd7, 0x07, 0xab, + 0x4e, 0x2a, 0x4d, 0x03, 0xf3, 0xc0, 0x1c, 0x30, 0x0b, 0x62, 0x8a, 0x92, 0x2b, 0x17, 0xbb, 0x3f, + 0x44, 0xb3, 0x90, 0x24, 0x17, 0x47, 0x49, 0x65, 0x41, 0x6a, 0x7c, 0x69, 0x51, 0x8e, 0x04, 0xa3, + 0x02, 0xa3, 0x06, 0x67, 0x10, 0x3b, 0x88, 0x1f, 0x5a, 0x94, 0x23, 0x24, 0xcf, 0xc5, 0x9d, 0x9c, + 0x9f, 0x97, 0x96, 0x99, 0x1e, 0x5f, 0x90, 0x58, 0x92, 0x21, 0xc1, 0x04, 0x96, 0xe5, 0x82, 0x08, + 0x05, 0x24, 0x96, 0x64, 0x38, 0xc9, 0x9c, 0x78, 0x28, 0xc7, 0x78, 0xe3, 0xa1, 0x1c, 0x43, 0xc3, + 0x23, 0x39, 0xc6, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0x71, + 0xc2, 0x63, 0x39, 0x86, 0x24, 0x36, 0xb0, 0x5d, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x07, + 0x00, 0xf2, 0x18, 0xbe, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/containerd/cri/pkg/api/runtimeoptions/v1/api.proto b/vendor/github.com/containerd/cri/pkg/api/runtimeoptions/v1/api.proto new file mode 100644 index 000000000..f907d609c --- /dev/null +++ b/vendor/github.com/containerd/cri/pkg/api/runtimeoptions/v1/api.proto @@ -0,0 +1,22 @@ +// To regenerate api.pb.go run `make proto` +syntax = "proto3"; + +package cri.runtimeoptions.v1; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +option (gogoproto.goproto_stringer_all) = false; +option (gogoproto.stringer_all) = true; +option (gogoproto.goproto_getters_all) = true; +option (gogoproto.marshaler_all) = true; +option (gogoproto.sizer_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (gogoproto.goproto_unrecognized_all) = false; + +message Options { + // TypeUrl specifies the type of the content inside the config file. + string type_url = 1; + // ConfigPath specifies the filesystem location of the config file + // used by the runtime. + string config_path = 2; +} diff --git a/vendor/github.com/containerd/cri/vendor.conf b/vendor/github.com/containerd/cri/vendor.conf index c81758046..f59340b54 100644 --- a/vendor/github.com/containerd/cri/vendor.conf +++ b/vendor/github.com/containerd/cri/vendor.conf @@ -3,7 +3,7 @@ github.com/blang/semver v3.1.0 github.com/BurntSushi/toml a368813c5e648fee92e5f6c30e3944ff9d5e8895 github.com/containerd/cgroups 5e610833b72089b37d0e615de9a92dfc043757c2 github.com/containerd/console c12b1e7919c14469339a5d38f2f8ed9b64a9de23 -github.com/containerd/containerd 15f19d7a67fa322e6de0ef4c6a1bf9da0f056554 +github.com/containerd/containerd 6937c5a3ba8280edff9e9030767e3b0cb742581c github.com/containerd/continuity bd77b46c8352f74eb12c85bdc01f4b90f69d66b4 github.com/containerd/fifo 3d5202aec260678c48179c56f40e6f38a095738c github.com/containerd/go-cni 40bcf8ec8acd7372be1d77031d585d5d8e561c90 @@ -21,11 +21,9 @@ github.com/docker/go-metrics 4ea375f7759c82740c893fc030bc37088d2ec098 github.com/docker/go-units v0.3.1 github.com/docker/spdystream 449fdfce4d962303d702fec724ef0ad181c92528 github.com/emicklei/go-restful v2.2.1 -github.com/ghodss/yaml v1.0.0 github.com/godbus/dbus v3 github.com/gogo/googleapis 08a7655d27152912db7aaf4f983275eaf8d128ef github.com/gogo/protobuf v1.0.0 -github.com/golang/glog 44145f04b68cf362d9c4df2182967c2275eaefed github.com/golang/protobuf v1.1.0 github.com/google/gofuzz 44d81051d367757e1c7c6a5a86423ece9afcf63c github.com/grpc-ecosystem/go-grpc-prometheus v1.1 @@ -33,15 +31,15 @@ github.com/hashicorp/errwrap 7554cd9344cec97297fa6649b055a8c98c2a1e55 github.com/hashicorp/go-multierror ed905158d87462226a13fe39ddf685ea65f1c11f github.com/json-iterator/go 1.1.5 github.com/matttproud/golang_protobuf_extensions v1.0.0 -github.com/Microsoft/go-winio v0.4.10 -github.com/Microsoft/hcsshim v0.7.6 +github.com/Microsoft/go-winio v0.4.11 +github.com/Microsoft/hcsshim v0.8.2 github.com/modern-go/concurrent 1.0.3 github.com/modern-go/reflect2 1.0.1 github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7 github.com/opencontainers/image-spec v1.0.1 -github.com/opencontainers/runc 00dc70017d222b178a002ed30e9321b12647af2d +github.com/opencontainers/runc v1.0.0-rc6 github.com/opencontainers/runtime-spec eba862dc2470385a233c7507392675cbeadf7353 -github.com/opencontainers/runtime-tools v0.6.0 +github.com/opencontainers/runtime-tools fb101d5d42ab9c040f7d0a004e78336e5d5cb197 github.com/opencontainers/selinux b6fa367ed7f534f9ba25391cc2d467085dbb445a github.com/pkg/errors v0.8.0 github.com/pmezard/go-difflib v1.0.0 @@ -50,7 +48,7 @@ github.com/prometheus/client_model 99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c github.com/prometheus/common 89604d197083d4781071d3c65855d24ecfb0a563 github.com/prometheus/procfs cb4147076ac75738c9a7d279075a253c0cc5acbd github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0 -github.com/sirupsen/logrus v1.0.0 +github.com/sirupsen/logrus v1.0.3 github.com/stretchr/testify v1.1.4 github.com/syndtr/gocapability db04d3cc01c8b54962a58ec7e491717d06cfcc16 github.com/tchap/go-patricia v2.2.6 @@ -69,10 +67,12 @@ golang.org/x/time f51c12702a4d776e4c1fa9b0fabab841babae631 google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944 google.golang.org/grpc v1.12.0 gopkg.in/inf.v0 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4 -gopkg.in/yaml.v2 53feefa2559fb8dfa8d81baad31be332c97d6c77 -k8s.io/api kubernetes-1.12.0 -k8s.io/apimachinery kubernetes-1.12.0 -k8s.io/apiserver kubernetes-1.12.0 -k8s.io/client-go kubernetes-1.12.0 -k8s.io/kubernetes v1.12.0 -k8s.io/utils cd34563cd63c2bd7c6fe88a73c4dcf34ed8a67cb +gopkg.in/yaml.v2 v2.2.1 +k8s.io/api kubernetes-1.13.0 +k8s.io/apimachinery kubernetes-1.13.0 +k8s.io/apiserver kubernetes-1.13.0 +k8s.io/client-go kubernetes-1.13.0 +k8s.io/klog 8139d8cb77af419532b33dfa7dd09fbc5f1d344f +k8s.io/kubernetes v1.13.0 +k8s.io/utils 0d26856f57b32ec3398579285e5c8a2bfe8c5243 +sigs.k8s.io/yaml v1.1.0 diff --git a/vendor/github.com/coreos/go-systemd/LICENSE b/vendor/github.com/coreos/go-systemd/LICENSE new file mode 100644 index 000000000..37ec93a14 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/coreos/go-systemd/README.md b/vendor/github.com/coreos/go-systemd/README.md new file mode 100644 index 000000000..cb87a1124 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/README.md @@ -0,0 +1,54 @@ +# go-systemd + +[![Build Status](https://travis-ci.org/coreos/go-systemd.png?branch=master)](https://travis-ci.org/coreos/go-systemd) +[![godoc](https://godoc.org/github.com/coreos/go-systemd?status.svg)](http://godoc.org/github.com/coreos/go-systemd) + +Go bindings to systemd. The project has several packages: + +- `activation` - for writing and using socket activation from Go +- `dbus` - for starting/stopping/inspecting running services and units +- `journal` - for writing to systemd's logging service, journald +- `sdjournal` - for reading from journald by wrapping its C API +- `machine1` - for registering machines/containers with systemd +- `unit` - for (de)serialization and comparison of unit files + +## Socket Activation + +An example HTTP server using socket activation can be quickly set up by following this README on a Linux machine running systemd: + +https://github.com/coreos/go-systemd/tree/master/examples/activation/httpserver + +## Journal + +Using the pure-Go `journal` package you can submit journal entries directly to systemd's journal, taking advantage of features like indexed key/value pairs for each log entry. +The `sdjournal` package provides read access to the journal by wrapping around journald's native C API; consequently it requires cgo and the journal headers to be available. + +## D-Bus + +The `dbus` package connects to the [systemd D-Bus API](http://www.freedesktop.org/wiki/Software/systemd/dbus/) and lets you start, stop and introspect systemd units. The API docs are here: + +http://godoc.org/github.com/coreos/go-systemd/dbus + +### Debugging + +Create `/etc/dbus-1/system-local.conf` that looks like this: + +``` + + + + + + + +``` + +## machined + +The `machine1` package allows interaction with the [systemd machined D-Bus API](http://www.freedesktop.org/wiki/Software/systemd/machined/). + +## Units + +The `unit` package provides various functions for working with [systemd unit files](http://www.freedesktop.org/software/systemd/man/systemd.unit.html). diff --git a/vendor/github.com/coreos/go-systemd/dbus/dbus.go b/vendor/github.com/coreos/go-systemd/dbus/dbus.go new file mode 100644 index 000000000..c1694fb52 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/dbus/dbus.go @@ -0,0 +1,213 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Integration with the systemd D-Bus API. See http://www.freedesktop.org/wiki/Software/systemd/dbus/ +package dbus + +import ( + "fmt" + "os" + "strconv" + "strings" + "sync" + + "github.com/godbus/dbus" +) + +const ( + alpha = `abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ` + num = `0123456789` + alphanum = alpha + num + signalBuffer = 100 +) + +// needsEscape checks whether a byte in a potential dbus ObjectPath needs to be escaped +func needsEscape(i int, b byte) bool { + // Escape everything that is not a-z-A-Z-0-9 + // Also escape 0-9 if it's the first character + return strings.IndexByte(alphanum, b) == -1 || + (i == 0 && strings.IndexByte(num, b) != -1) +} + +// PathBusEscape sanitizes a constituent string of a dbus ObjectPath using the +// rules that systemd uses for serializing special characters. +func PathBusEscape(path string) string { + // Special case the empty string + if len(path) == 0 { + return "_" + } + n := []byte{} + for i := 0; i < len(path); i++ { + c := path[i] + if needsEscape(i, c) { + e := fmt.Sprintf("_%x", c) + n = append(n, []byte(e)...) + } else { + n = append(n, c) + } + } + return string(n) +} + +// Conn is a connection to systemd's dbus endpoint. +type Conn struct { + // sysconn/sysobj are only used to call dbus methods + sysconn *dbus.Conn + sysobj dbus.BusObject + + // sigconn/sigobj are only used to receive dbus signals + sigconn *dbus.Conn + sigobj dbus.BusObject + + jobListener struct { + jobs map[dbus.ObjectPath]chan<- string + sync.Mutex + } + subscriber struct { + updateCh chan<- *SubStateUpdate + errCh chan<- error + sync.Mutex + ignore map[dbus.ObjectPath]int64 + cleanIgnore int64 + } +} + +// New establishes a connection to any available bus and authenticates. +// Callers should call Close() when done with the connection. +func New() (*Conn, error) { + conn, err := NewSystemConnection() + if err != nil && os.Geteuid() == 0 { + return NewSystemdConnection() + } + return conn, err +} + +// NewSystemConnection establishes a connection to the system bus and authenticates. +// Callers should call Close() when done with the connection +func NewSystemConnection() (*Conn, error) { + return NewConnection(func() (*dbus.Conn, error) { + return dbusAuthHelloConnection(dbus.SystemBusPrivate) + }) +} + +// NewUserConnection establishes a connection to the session bus and +// authenticates. This can be used to connect to systemd user instances. +// Callers should call Close() when done with the connection. +func NewUserConnection() (*Conn, error) { + return NewConnection(func() (*dbus.Conn, error) { + return dbusAuthHelloConnection(dbus.SessionBusPrivate) + }) +} + +// NewSystemdConnection establishes a private, direct connection to systemd. +// This can be used for communicating with systemd without a dbus daemon. +// Callers should call Close() when done with the connection. +func NewSystemdConnection() (*Conn, error) { + return NewConnection(func() (*dbus.Conn, error) { + // We skip Hello when talking directly to systemd. + return dbusAuthConnection(func() (*dbus.Conn, error) { + return dbus.Dial("unix:path=/run/systemd/private") + }) + }) +} + +// Close closes an established connection +func (c *Conn) Close() { + c.sysconn.Close() + c.sigconn.Close() +} + +// NewConnection establishes a connection to a bus using a caller-supplied function. +// This allows connecting to remote buses through a user-supplied mechanism. +// The supplied function may be called multiple times, and should return independent connections. +// The returned connection must be fully initialised: the org.freedesktop.DBus.Hello call must have succeeded, +// and any authentication should be handled by the function. +func NewConnection(dialBus func() (*dbus.Conn, error)) (*Conn, error) { + sysconn, err := dialBus() + if err != nil { + return nil, err + } + + sigconn, err := dialBus() + if err != nil { + sysconn.Close() + return nil, err + } + + c := &Conn{ + sysconn: sysconn, + sysobj: systemdObject(sysconn), + sigconn: sigconn, + sigobj: systemdObject(sigconn), + } + + c.subscriber.ignore = make(map[dbus.ObjectPath]int64) + c.jobListener.jobs = make(map[dbus.ObjectPath]chan<- string) + + // Setup the listeners on jobs so that we can get completions + c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, + "type='signal', interface='org.freedesktop.systemd1.Manager', member='JobRemoved'") + + c.dispatch() + return c, nil +} + +// GetManagerProperty returns the value of a property on the org.freedesktop.systemd1.Manager +// interface. The value is returned in its string representation, as defined at +// https://developer.gnome.org/glib/unstable/gvariant-text.html +func (c *Conn) GetManagerProperty(prop string) (string, error) { + variant, err := c.sysobj.GetProperty("org.freedesktop.systemd1.Manager." + prop) + if err != nil { + return "", err + } + return variant.String(), nil +} + +func dbusAuthConnection(createBus func() (*dbus.Conn, error)) (*dbus.Conn, error) { + conn, err := createBus() + if err != nil { + return nil, err + } + + // Only use EXTERNAL method, and hardcode the uid (not username) + // to avoid a username lookup (which requires a dynamically linked + // libc) + methods := []dbus.Auth{dbus.AuthExternal(strconv.Itoa(os.Getuid()))} + + err = conn.Auth(methods) + if err != nil { + conn.Close() + return nil, err + } + + return conn, nil +} + +func dbusAuthHelloConnection(createBus func() (*dbus.Conn, error)) (*dbus.Conn, error) { + conn, err := dbusAuthConnection(createBus) + if err != nil { + return nil, err + } + + if err = conn.Hello(); err != nil { + conn.Close() + return nil, err + } + + return conn, nil +} + +func systemdObject(conn *dbus.Conn) dbus.BusObject { + return conn.Object("org.freedesktop.systemd1", dbus.ObjectPath("/org/freedesktop/systemd1")) +} diff --git a/vendor/github.com/coreos/go-systemd/dbus/methods.go b/vendor/github.com/coreos/go-systemd/dbus/methods.go new file mode 100644 index 000000000..ab17f7cc7 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/dbus/methods.go @@ -0,0 +1,565 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbus + +import ( + "errors" + "path" + "strconv" + + "github.com/godbus/dbus" +) + +func (c *Conn) jobComplete(signal *dbus.Signal) { + var id uint32 + var job dbus.ObjectPath + var unit string + var result string + dbus.Store(signal.Body, &id, &job, &unit, &result) + c.jobListener.Lock() + out, ok := c.jobListener.jobs[job] + if ok { + out <- result + delete(c.jobListener.jobs, job) + } + c.jobListener.Unlock() +} + +func (c *Conn) startJob(ch chan<- string, job string, args ...interface{}) (int, error) { + if ch != nil { + c.jobListener.Lock() + defer c.jobListener.Unlock() + } + + var p dbus.ObjectPath + err := c.sysobj.Call(job, 0, args...).Store(&p) + if err != nil { + return 0, err + } + + if ch != nil { + c.jobListener.jobs[p] = ch + } + + // ignore error since 0 is fine if conversion fails + jobID, _ := strconv.Atoi(path.Base(string(p))) + + return jobID, nil +} + +// StartUnit enqueues a start job and depending jobs, if any (unless otherwise +// specified by the mode string). +// +// Takes the unit to activate, plus a mode string. The mode needs to be one of +// replace, fail, isolate, ignore-dependencies, ignore-requirements. If +// "replace" the call will start the unit and its dependencies, possibly +// replacing already queued jobs that conflict with this. If "fail" the call +// will start the unit and its dependencies, but will fail if this would change +// an already queued job. If "isolate" the call will start the unit in question +// and terminate all units that aren't dependencies of it. If +// "ignore-dependencies" it will start a unit but ignore all its dependencies. +// If "ignore-requirements" it will start a unit but only ignore the +// requirement dependencies. It is not recommended to make use of the latter +// two options. +// +// If the provided channel is non-nil, a result string will be sent to it upon +// job completion: one of done, canceled, timeout, failed, dependency, skipped. +// done indicates successful execution of a job. canceled indicates that a job +// has been canceled before it finished execution. timeout indicates that the +// job timeout was reached. failed indicates that the job failed. dependency +// indicates that a job this job has been depending on failed and the job hence +// has been removed too. skipped indicates that a job was skipped because it +// didn't apply to the units current state. +// +// If no error occurs, the ID of the underlying systemd job will be returned. There +// does exist the possibility for no error to be returned, but for the returned job +// ID to be 0. In this case, the actual underlying ID is not 0 and this datapoint +// should not be considered authoritative. +// +// If an error does occur, it will be returned to the user alongside a job ID of 0. +func (c *Conn) StartUnit(name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.StartUnit", name, mode) +} + +// StopUnit is similar to StartUnit but stops the specified unit rather +// than starting it. +func (c *Conn) StopUnit(name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.StopUnit", name, mode) +} + +// ReloadUnit reloads a unit. Reloading is done only if the unit is already running and fails otherwise. +func (c *Conn) ReloadUnit(name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadUnit", name, mode) +} + +// RestartUnit restarts a service. If a service is restarted that isn't +// running it will be started. +func (c *Conn) RestartUnit(name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.RestartUnit", name, mode) +} + +// TryRestartUnit is like RestartUnit, except that a service that isn't running +// is not affected by the restart. +func (c *Conn) TryRestartUnit(name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.TryRestartUnit", name, mode) +} + +// ReloadOrRestart attempts a reload if the unit supports it and use a restart +// otherwise. +func (c *Conn) ReloadOrRestartUnit(name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadOrRestartUnit", name, mode) +} + +// ReloadOrTryRestart attempts a reload if the unit supports it and use a "Try" +// flavored restart otherwise. +func (c *Conn) ReloadOrTryRestartUnit(name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadOrTryRestartUnit", name, mode) +} + +// StartTransientUnit() may be used to create and start a transient unit, which +// will be released as soon as it is not running or referenced anymore or the +// system is rebooted. name is the unit name including suffix, and must be +// unique. mode is the same as in StartUnit(), properties contains properties +// of the unit. +func (c *Conn) StartTransientUnit(name string, mode string, properties []Property, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.StartTransientUnit", name, mode, properties, make([]PropertyCollection, 0)) +} + +// KillUnit takes the unit name and a UNIX signal number to send. All of the unit's +// processes are killed. +func (c *Conn) KillUnit(name string, signal int32) { + c.sysobj.Call("org.freedesktop.systemd1.Manager.KillUnit", 0, name, "all", signal).Store() +} + +// ResetFailedUnit resets the "failed" state of a specific unit. +func (c *Conn) ResetFailedUnit(name string) error { + return c.sysobj.Call("org.freedesktop.systemd1.Manager.ResetFailedUnit", 0, name).Store() +} + +// getProperties takes the unit name and returns all of its dbus object properties, for the given dbus interface +func (c *Conn) getProperties(unit string, dbusInterface string) (map[string]interface{}, error) { + var err error + var props map[string]dbus.Variant + + path := unitPath(unit) + if !path.IsValid() { + return nil, errors.New("invalid unit name: " + unit) + } + + obj := c.sysconn.Object("org.freedesktop.systemd1", path) + err = obj.Call("org.freedesktop.DBus.Properties.GetAll", 0, dbusInterface).Store(&props) + if err != nil { + return nil, err + } + + out := make(map[string]interface{}, len(props)) + for k, v := range props { + out[k] = v.Value() + } + + return out, nil +} + +// GetUnitProperties takes the unit name and returns all of its dbus object properties. +func (c *Conn) GetUnitProperties(unit string) (map[string]interface{}, error) { + return c.getProperties(unit, "org.freedesktop.systemd1.Unit") +} + +func (c *Conn) getProperty(unit string, dbusInterface string, propertyName string) (*Property, error) { + var err error + var prop dbus.Variant + + path := unitPath(unit) + if !path.IsValid() { + return nil, errors.New("invalid unit name: " + unit) + } + + obj := c.sysconn.Object("org.freedesktop.systemd1", path) + err = obj.Call("org.freedesktop.DBus.Properties.Get", 0, dbusInterface, propertyName).Store(&prop) + if err != nil { + return nil, err + } + + return &Property{Name: propertyName, Value: prop}, nil +} + +func (c *Conn) GetUnitProperty(unit string, propertyName string) (*Property, error) { + return c.getProperty(unit, "org.freedesktop.systemd1.Unit", propertyName) +} + +// GetServiceProperty returns property for given service name and property name +func (c *Conn) GetServiceProperty(service string, propertyName string) (*Property, error) { + return c.getProperty(service, "org.freedesktop.systemd1.Service", propertyName) +} + +// GetUnitTypeProperties returns the extra properties for a unit, specific to the unit type. +// Valid values for unitType: Service, Socket, Target, Device, Mount, Automount, Snapshot, Timer, Swap, Path, Slice, Scope +// return "dbus.Error: Unknown interface" if the unitType is not the correct type of the unit +func (c *Conn) GetUnitTypeProperties(unit string, unitType string) (map[string]interface{}, error) { + return c.getProperties(unit, "org.freedesktop.systemd1."+unitType) +} + +// SetUnitProperties() may be used to modify certain unit properties at runtime. +// Not all properties may be changed at runtime, but many resource management +// settings (primarily those in systemd.cgroup(5)) may. The changes are applied +// instantly, and stored on disk for future boots, unless runtime is true, in which +// case the settings only apply until the next reboot. name is the name of the unit +// to modify. properties are the settings to set, encoded as an array of property +// name and value pairs. +func (c *Conn) SetUnitProperties(name string, runtime bool, properties ...Property) error { + return c.sysobj.Call("org.freedesktop.systemd1.Manager.SetUnitProperties", 0, name, runtime, properties).Store() +} + +func (c *Conn) GetUnitTypeProperty(unit string, unitType string, propertyName string) (*Property, error) { + return c.getProperty(unit, "org.freedesktop.systemd1."+unitType, propertyName) +} + +type UnitStatus struct { + Name string // The primary unit name as string + Description string // The human readable description string + LoadState string // The load state (i.e. whether the unit file has been loaded successfully) + ActiveState string // The active state (i.e. whether the unit is currently started or not) + SubState string // The sub state (a more fine-grained version of the active state that is specific to the unit type, which the active state is not) + Followed string // A unit that is being followed in its state by this unit, if there is any, otherwise the empty string. + Path dbus.ObjectPath // The unit object path + JobId uint32 // If there is a job queued for the job unit the numeric job id, 0 otherwise + JobType string // The job type as string + JobPath dbus.ObjectPath // The job object path +} + +type storeFunc func(retvalues ...interface{}) error + +func (c *Conn) listUnitsInternal(f storeFunc) ([]UnitStatus, error) { + result := make([][]interface{}, 0) + err := f(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + status := make([]UnitStatus, len(result)) + statusInterface := make([]interface{}, len(status)) + for i := range status { + statusInterface[i] = &status[i] + } + + err = dbus.Store(resultInterface, statusInterface...) + if err != nil { + return nil, err + } + + return status, nil +} + +// ListUnits returns an array with all currently loaded units. Note that +// units may be known by multiple names at the same time, and hence there might +// be more unit names loaded than actual units behind them. +func (c *Conn) ListUnits() ([]UnitStatus, error) { + return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnits", 0).Store) +} + +// ListUnitsFiltered returns an array with units filtered by state. +// It takes a list of units' statuses to filter. +func (c *Conn) ListUnitsFiltered(states []string) ([]UnitStatus, error) { + return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitsFiltered", 0, states).Store) +} + +// ListUnitsByPatterns returns an array with units. +// It takes a list of units' statuses and names to filter. +// Note that units may be known by multiple names at the same time, +// and hence there might be more unit names loaded than actual units behind them. +func (c *Conn) ListUnitsByPatterns(states []string, patterns []string) ([]UnitStatus, error) { + return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitsByPatterns", 0, states, patterns).Store) +} + +// ListUnitsByNames returns an array with units. It takes a list of units' +// names and returns an UnitStatus array. Comparing to ListUnitsByPatterns +// method, this method returns statuses even for inactive or non-existing +// units. Input array should contain exact unit names, but not patterns. +func (c *Conn) ListUnitsByNames(units []string) ([]UnitStatus, error) { + return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitsByNames", 0, units).Store) +} + +type UnitFile struct { + Path string + Type string +} + +func (c *Conn) listUnitFilesInternal(f storeFunc) ([]UnitFile, error) { + result := make([][]interface{}, 0) + err := f(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + files := make([]UnitFile, len(result)) + fileInterface := make([]interface{}, len(files)) + for i := range files { + fileInterface[i] = &files[i] + } + + err = dbus.Store(resultInterface, fileInterface...) + if err != nil { + return nil, err + } + + return files, nil +} + +// ListUnitFiles returns an array of all available units on disk. +func (c *Conn) ListUnitFiles() ([]UnitFile, error) { + return c.listUnitFilesInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitFiles", 0).Store) +} + +// ListUnitFilesByPatterns returns an array of all available units on disk matched the patterns. +func (c *Conn) ListUnitFilesByPatterns(states []string, patterns []string) ([]UnitFile, error) { + return c.listUnitFilesInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitFilesByPatterns", 0, states, patterns).Store) +} + +type LinkUnitFileChange EnableUnitFileChange + +// LinkUnitFiles() links unit files (that are located outside of the +// usual unit search paths) into the unit search path. +// +// It takes a list of absolute paths to unit files to link and two +// booleans. The first boolean controls whether the unit shall be +// enabled for runtime only (true, /run), or persistently (false, +// /etc). +// The second controls whether symlinks pointing to other units shall +// be replaced if necessary. +// +// This call returns a list of the changes made. The list consists of +// structures with three strings: the type of the change (one of symlink +// or unlink), the file name of the symlink and the destination of the +// symlink. +func (c *Conn) LinkUnitFiles(files []string, runtime bool, force bool) ([]LinkUnitFileChange, error) { + result := make([][]interface{}, 0) + err := c.sysobj.Call("org.freedesktop.systemd1.Manager.LinkUnitFiles", 0, files, runtime, force).Store(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + changes := make([]LinkUnitFileChange, len(result)) + changesInterface := make([]interface{}, len(changes)) + for i := range changes { + changesInterface[i] = &changes[i] + } + + err = dbus.Store(resultInterface, changesInterface...) + if err != nil { + return nil, err + } + + return changes, nil +} + +// EnableUnitFiles() may be used to enable one or more units in the system (by +// creating symlinks to them in /etc or /run). +// +// It takes a list of unit files to enable (either just file names or full +// absolute paths if the unit files are residing outside the usual unit +// search paths), and two booleans: the first controls whether the unit shall +// be enabled for runtime only (true, /run), or persistently (false, /etc). +// The second one controls whether symlinks pointing to other units shall +// be replaced if necessary. +// +// This call returns one boolean and an array with the changes made. The +// boolean signals whether the unit files contained any enablement +// information (i.e. an [Install]) section. The changes list consists of +// structures with three strings: the type of the change (one of symlink +// or unlink), the file name of the symlink and the destination of the +// symlink. +func (c *Conn) EnableUnitFiles(files []string, runtime bool, force bool) (bool, []EnableUnitFileChange, error) { + var carries_install_info bool + + result := make([][]interface{}, 0) + err := c.sysobj.Call("org.freedesktop.systemd1.Manager.EnableUnitFiles", 0, files, runtime, force).Store(&carries_install_info, &result) + if err != nil { + return false, nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + changes := make([]EnableUnitFileChange, len(result)) + changesInterface := make([]interface{}, len(changes)) + for i := range changes { + changesInterface[i] = &changes[i] + } + + err = dbus.Store(resultInterface, changesInterface...) + if err != nil { + return false, nil, err + } + + return carries_install_info, changes, nil +} + +type EnableUnitFileChange struct { + Type string // Type of the change (one of symlink or unlink) + Filename string // File name of the symlink + Destination string // Destination of the symlink +} + +// DisableUnitFiles() may be used to disable one or more units in the system (by +// removing symlinks to them from /etc or /run). +// +// It takes a list of unit files to disable (either just file names or full +// absolute paths if the unit files are residing outside the usual unit +// search paths), and one boolean: whether the unit was enabled for runtime +// only (true, /run), or persistently (false, /etc). +// +// This call returns an array with the changes made. The changes list +// consists of structures with three strings: the type of the change (one of +// symlink or unlink), the file name of the symlink and the destination of the +// symlink. +func (c *Conn) DisableUnitFiles(files []string, runtime bool) ([]DisableUnitFileChange, error) { + result := make([][]interface{}, 0) + err := c.sysobj.Call("org.freedesktop.systemd1.Manager.DisableUnitFiles", 0, files, runtime).Store(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + changes := make([]DisableUnitFileChange, len(result)) + changesInterface := make([]interface{}, len(changes)) + for i := range changes { + changesInterface[i] = &changes[i] + } + + err = dbus.Store(resultInterface, changesInterface...) + if err != nil { + return nil, err + } + + return changes, nil +} + +type DisableUnitFileChange struct { + Type string // Type of the change (one of symlink or unlink) + Filename string // File name of the symlink + Destination string // Destination of the symlink +} + +// MaskUnitFiles masks one or more units in the system +// +// It takes three arguments: +// * list of units to mask (either just file names or full +// absolute paths if the unit files are residing outside +// the usual unit search paths) +// * runtime to specify whether the unit was enabled for runtime +// only (true, /run/systemd/..), or persistently (false, /etc/systemd/..) +// * force flag +func (c *Conn) MaskUnitFiles(files []string, runtime bool, force bool) ([]MaskUnitFileChange, error) { + result := make([][]interface{}, 0) + err := c.sysobj.Call("org.freedesktop.systemd1.Manager.MaskUnitFiles", 0, files, runtime, force).Store(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + changes := make([]MaskUnitFileChange, len(result)) + changesInterface := make([]interface{}, len(changes)) + for i := range changes { + changesInterface[i] = &changes[i] + } + + err = dbus.Store(resultInterface, changesInterface...) + if err != nil { + return nil, err + } + + return changes, nil +} + +type MaskUnitFileChange struct { + Type string // Type of the change (one of symlink or unlink) + Filename string // File name of the symlink + Destination string // Destination of the symlink +} + +// UnmaskUnitFiles unmasks one or more units in the system +// +// It takes two arguments: +// * list of unit files to mask (either just file names or full +// absolute paths if the unit files are residing outside +// the usual unit search paths) +// * runtime to specify whether the unit was enabled for runtime +// only (true, /run/systemd/..), or persistently (false, /etc/systemd/..) +func (c *Conn) UnmaskUnitFiles(files []string, runtime bool) ([]UnmaskUnitFileChange, error) { + result := make([][]interface{}, 0) + err := c.sysobj.Call("org.freedesktop.systemd1.Manager.UnmaskUnitFiles", 0, files, runtime).Store(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + changes := make([]UnmaskUnitFileChange, len(result)) + changesInterface := make([]interface{}, len(changes)) + for i := range changes { + changesInterface[i] = &changes[i] + } + + err = dbus.Store(resultInterface, changesInterface...) + if err != nil { + return nil, err + } + + return changes, nil +} + +type UnmaskUnitFileChange struct { + Type string // Type of the change (one of symlink or unlink) + Filename string // File name of the symlink + Destination string // Destination of the symlink +} + +// Reload instructs systemd to scan for and reload unit files. This is +// equivalent to a 'systemctl daemon-reload'. +func (c *Conn) Reload() error { + return c.sysobj.Call("org.freedesktop.systemd1.Manager.Reload", 0).Store() +} + +func unitPath(name string) dbus.ObjectPath { + return dbus.ObjectPath("/org/freedesktop/systemd1/unit/" + PathBusEscape(name)) +} diff --git a/vendor/github.com/coreos/go-systemd/dbus/properties.go b/vendor/github.com/coreos/go-systemd/dbus/properties.go new file mode 100644 index 000000000..6c8189587 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/dbus/properties.go @@ -0,0 +1,237 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbus + +import ( + "github.com/godbus/dbus" +) + +// From the systemd docs: +// +// The properties array of StartTransientUnit() may take many of the settings +// that may also be configured in unit files. Not all parameters are currently +// accepted though, but we plan to cover more properties with future release. +// Currently you may set the Description, Slice and all dependency types of +// units, as well as RemainAfterExit, ExecStart for service units, +// TimeoutStopUSec and PIDs for scope units, and CPUAccounting, CPUShares, +// BlockIOAccounting, BlockIOWeight, BlockIOReadBandwidth, +// BlockIOWriteBandwidth, BlockIODeviceWeight, MemoryAccounting, MemoryLimit, +// DevicePolicy, DeviceAllow for services/scopes/slices. These fields map +// directly to their counterparts in unit files and as normal D-Bus object +// properties. The exception here is the PIDs field of scope units which is +// used for construction of the scope only and specifies the initial PIDs to +// add to the scope object. + +type Property struct { + Name string + Value dbus.Variant +} + +type PropertyCollection struct { + Name string + Properties []Property +} + +type execStart struct { + Path string // the binary path to execute + Args []string // an array with all arguments to pass to the executed command, starting with argument 0 + UncleanIsFailure bool // a boolean whether it should be considered a failure if the process exits uncleanly +} + +// PropExecStart sets the ExecStart service property. The first argument is a +// slice with the binary path to execute followed by the arguments to pass to +// the executed command. See +// http://www.freedesktop.org/software/systemd/man/systemd.service.html#ExecStart= +func PropExecStart(command []string, uncleanIsFailure bool) Property { + execStarts := []execStart{ + execStart{ + Path: command[0], + Args: command, + UncleanIsFailure: uncleanIsFailure, + }, + } + + return Property{ + Name: "ExecStart", + Value: dbus.MakeVariant(execStarts), + } +} + +// PropRemainAfterExit sets the RemainAfterExit service property. See +// http://www.freedesktop.org/software/systemd/man/systemd.service.html#RemainAfterExit= +func PropRemainAfterExit(b bool) Property { + return Property{ + Name: "RemainAfterExit", + Value: dbus.MakeVariant(b), + } +} + +// PropType sets the Type service property. See +// http://www.freedesktop.org/software/systemd/man/systemd.service.html#Type= +func PropType(t string) Property { + return Property{ + Name: "Type", + Value: dbus.MakeVariant(t), + } +} + +// PropDescription sets the Description unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit#Description= +func PropDescription(desc string) Property { + return Property{ + Name: "Description", + Value: dbus.MakeVariant(desc), + } +} + +func propDependency(name string, units []string) Property { + return Property{ + Name: name, + Value: dbus.MakeVariant(units), + } +} + +// PropRequires sets the Requires unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requires= +func PropRequires(units ...string) Property { + return propDependency("Requires", units) +} + +// PropRequiresOverridable sets the RequiresOverridable unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresOverridable= +func PropRequiresOverridable(units ...string) Property { + return propDependency("RequiresOverridable", units) +} + +// PropRequisite sets the Requisite unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requisite= +func PropRequisite(units ...string) Property { + return propDependency("Requisite", units) +} + +// PropRequisiteOverridable sets the RequisiteOverridable unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequisiteOverridable= +func PropRequisiteOverridable(units ...string) Property { + return propDependency("RequisiteOverridable", units) +} + +// PropWants sets the Wants unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Wants= +func PropWants(units ...string) Property { + return propDependency("Wants", units) +} + +// PropBindsTo sets the BindsTo unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#BindsTo= +func PropBindsTo(units ...string) Property { + return propDependency("BindsTo", units) +} + +// PropRequiredBy sets the RequiredBy unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredBy= +func PropRequiredBy(units ...string) Property { + return propDependency("RequiredBy", units) +} + +// PropRequiredByOverridable sets the RequiredByOverridable unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredByOverridable= +func PropRequiredByOverridable(units ...string) Property { + return propDependency("RequiredByOverridable", units) +} + +// PropWantedBy sets the WantedBy unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#WantedBy= +func PropWantedBy(units ...string) Property { + return propDependency("WantedBy", units) +} + +// PropBoundBy sets the BoundBy unit property. See +// http://www.freedesktop.org/software/systemd/main/systemd.unit.html#BoundBy= +func PropBoundBy(units ...string) Property { + return propDependency("BoundBy", units) +} + +// PropConflicts sets the Conflicts unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Conflicts= +func PropConflicts(units ...string) Property { + return propDependency("Conflicts", units) +} + +// PropConflictedBy sets the ConflictedBy unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#ConflictedBy= +func PropConflictedBy(units ...string) Property { + return propDependency("ConflictedBy", units) +} + +// PropBefore sets the Before unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Before= +func PropBefore(units ...string) Property { + return propDependency("Before", units) +} + +// PropAfter sets the After unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#After= +func PropAfter(units ...string) Property { + return propDependency("After", units) +} + +// PropOnFailure sets the OnFailure unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#OnFailure= +func PropOnFailure(units ...string) Property { + return propDependency("OnFailure", units) +} + +// PropTriggers sets the Triggers unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Triggers= +func PropTriggers(units ...string) Property { + return propDependency("Triggers", units) +} + +// PropTriggeredBy sets the TriggeredBy unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#TriggeredBy= +func PropTriggeredBy(units ...string) Property { + return propDependency("TriggeredBy", units) +} + +// PropPropagatesReloadTo sets the PropagatesReloadTo unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#PropagatesReloadTo= +func PropPropagatesReloadTo(units ...string) Property { + return propDependency("PropagatesReloadTo", units) +} + +// PropRequiresMountsFor sets the RequiresMountsFor unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresMountsFor= +func PropRequiresMountsFor(units ...string) Property { + return propDependency("RequiresMountsFor", units) +} + +// PropSlice sets the Slice unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.resource-control.html#Slice= +func PropSlice(slice string) Property { + return Property{ + Name: "Slice", + Value: dbus.MakeVariant(slice), + } +} + +// PropPids sets the PIDs field of scope units used in the initial construction +// of the scope only and specifies the initial PIDs to add to the scope object. +// See https://www.freedesktop.org/wiki/Software/systemd/ControlGroupInterface/#properties +func PropPids(pids ...uint32) Property { + return Property{ + Name: "PIDs", + Value: dbus.MakeVariant(pids), + } +} diff --git a/vendor/github.com/coreos/go-systemd/dbus/set.go b/vendor/github.com/coreos/go-systemd/dbus/set.go new file mode 100644 index 000000000..f92e6fbed --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/dbus/set.go @@ -0,0 +1,47 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbus + +type set struct { + data map[string]bool +} + +func (s *set) Add(value string) { + s.data[value] = true +} + +func (s *set) Remove(value string) { + delete(s.data, value) +} + +func (s *set) Contains(value string) (exists bool) { + _, exists = s.data[value] + return +} + +func (s *set) Length() int { + return len(s.data) +} + +func (s *set) Values() (values []string) { + for val, _ := range s.data { + values = append(values, val) + } + return +} + +func newSet() *set { + return &set{make(map[string]bool)} +} diff --git a/vendor/github.com/coreos/go-systemd/dbus/subscription.go b/vendor/github.com/coreos/go-systemd/dbus/subscription.go new file mode 100644 index 000000000..996451445 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/dbus/subscription.go @@ -0,0 +1,250 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbus + +import ( + "errors" + "time" + + "github.com/godbus/dbus" +) + +const ( + cleanIgnoreInterval = int64(10 * time.Second) + ignoreInterval = int64(30 * time.Millisecond) +) + +// Subscribe sets up this connection to subscribe to all systemd dbus events. +// This is required before calling SubscribeUnits. When the connection closes +// systemd will automatically stop sending signals so there is no need to +// explicitly call Unsubscribe(). +func (c *Conn) Subscribe() error { + c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, + "type='signal',interface='org.freedesktop.systemd1.Manager',member='UnitNew'") + c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, + "type='signal',interface='org.freedesktop.DBus.Properties',member='PropertiesChanged'") + + err := c.sigobj.Call("org.freedesktop.systemd1.Manager.Subscribe", 0).Store() + if err != nil { + return err + } + + return nil +} + +// Unsubscribe this connection from systemd dbus events. +func (c *Conn) Unsubscribe() error { + err := c.sigobj.Call("org.freedesktop.systemd1.Manager.Unsubscribe", 0).Store() + if err != nil { + return err + } + + return nil +} + +func (c *Conn) dispatch() { + ch := make(chan *dbus.Signal, signalBuffer) + + c.sigconn.Signal(ch) + + go func() { + for { + signal, ok := <-ch + if !ok { + return + } + + if signal.Name == "org.freedesktop.systemd1.Manager.JobRemoved" { + c.jobComplete(signal) + } + + if c.subscriber.updateCh == nil { + continue + } + + var unitPath dbus.ObjectPath + switch signal.Name { + case "org.freedesktop.systemd1.Manager.JobRemoved": + unitName := signal.Body[2].(string) + c.sysobj.Call("org.freedesktop.systemd1.Manager.GetUnit", 0, unitName).Store(&unitPath) + case "org.freedesktop.systemd1.Manager.UnitNew": + unitPath = signal.Body[1].(dbus.ObjectPath) + case "org.freedesktop.DBus.Properties.PropertiesChanged": + if signal.Body[0].(string) == "org.freedesktop.systemd1.Unit" { + unitPath = signal.Path + } + } + + if unitPath == dbus.ObjectPath("") { + continue + } + + c.sendSubStateUpdate(unitPath) + } + }() +} + +// Returns two unbuffered channels which will receive all changed units every +// interval. Deleted units are sent as nil. +func (c *Conn) SubscribeUnits(interval time.Duration) (<-chan map[string]*UnitStatus, <-chan error) { + return c.SubscribeUnitsCustom(interval, 0, func(u1, u2 *UnitStatus) bool { return *u1 != *u2 }, nil) +} + +// SubscribeUnitsCustom is like SubscribeUnits but lets you specify the buffer +// size of the channels, the comparison function for detecting changes and a filter +// function for cutting down on the noise that your channel receives. +func (c *Conn) SubscribeUnitsCustom(interval time.Duration, buffer int, isChanged func(*UnitStatus, *UnitStatus) bool, filterUnit func(string) bool) (<-chan map[string]*UnitStatus, <-chan error) { + old := make(map[string]*UnitStatus) + statusChan := make(chan map[string]*UnitStatus, buffer) + errChan := make(chan error, buffer) + + go func() { + for { + timerChan := time.After(interval) + + units, err := c.ListUnits() + if err == nil { + cur := make(map[string]*UnitStatus) + for i := range units { + if filterUnit != nil && filterUnit(units[i].Name) { + continue + } + cur[units[i].Name] = &units[i] + } + + // add all new or changed units + changed := make(map[string]*UnitStatus) + for n, u := range cur { + if oldU, ok := old[n]; !ok || isChanged(oldU, u) { + changed[n] = u + } + delete(old, n) + } + + // add all deleted units + for oldN := range old { + changed[oldN] = nil + } + + old = cur + + if len(changed) != 0 { + statusChan <- changed + } + } else { + errChan <- err + } + + <-timerChan + } + }() + + return statusChan, errChan +} + +type SubStateUpdate struct { + UnitName string + SubState string +} + +// SetSubStateSubscriber writes to updateCh when any unit's substate changes. +// Although this writes to updateCh on every state change, the reported state +// may be more recent than the change that generated it (due to an unavoidable +// race in the systemd dbus interface). That is, this method provides a good +// way to keep a current view of all units' states, but is not guaranteed to +// show every state transition they go through. Furthermore, state changes +// will only be written to the channel with non-blocking writes. If updateCh +// is full, it attempts to write an error to errCh; if errCh is full, the error +// passes silently. +func (c *Conn) SetSubStateSubscriber(updateCh chan<- *SubStateUpdate, errCh chan<- error) { + c.subscriber.Lock() + defer c.subscriber.Unlock() + c.subscriber.updateCh = updateCh + c.subscriber.errCh = errCh +} + +func (c *Conn) sendSubStateUpdate(path dbus.ObjectPath) { + c.subscriber.Lock() + defer c.subscriber.Unlock() + + if c.shouldIgnore(path) { + return + } + + info, err := c.GetUnitProperties(string(path)) + if err != nil { + select { + case c.subscriber.errCh <- err: + default: + } + } + + name := info["Id"].(string) + substate := info["SubState"].(string) + + update := &SubStateUpdate{name, substate} + select { + case c.subscriber.updateCh <- update: + default: + select { + case c.subscriber.errCh <- errors.New("update channel full!"): + default: + } + } + + c.updateIgnore(path, info) +} + +// The ignore functions work around a wart in the systemd dbus interface. +// Requesting the properties of an unloaded unit will cause systemd to send a +// pair of UnitNew/UnitRemoved signals. Because we need to get a unit's +// properties on UnitNew (as that's the only indication of a new unit coming up +// for the first time), we would enter an infinite loop if we did not attempt +// to detect and ignore these spurious signals. The signal themselves are +// indistinguishable from relevant ones, so we (somewhat hackishly) ignore an +// unloaded unit's signals for a short time after requesting its properties. +// This means that we will miss e.g. a transient unit being restarted +// *immediately* upon failure and also a transient unit being started +// immediately after requesting its status (with systemctl status, for example, +// because this causes a UnitNew signal to be sent which then causes us to fetch +// the properties). + +func (c *Conn) shouldIgnore(path dbus.ObjectPath) bool { + t, ok := c.subscriber.ignore[path] + return ok && t >= time.Now().UnixNano() +} + +func (c *Conn) updateIgnore(path dbus.ObjectPath, info map[string]interface{}) { + c.cleanIgnore() + + // unit is unloaded - it will trigger bad systemd dbus behavior + if info["LoadState"].(string) == "not-found" { + c.subscriber.ignore[path] = time.Now().UnixNano() + ignoreInterval + } +} + +// without this, ignore would grow unboundedly over time +func (c *Conn) cleanIgnore() { + now := time.Now().UnixNano() + if c.subscriber.cleanIgnore < now { + c.subscriber.cleanIgnore = now + cleanIgnoreInterval + + for p, t := range c.subscriber.ignore { + if t < now { + delete(c.subscriber.ignore, p) + } + } + } +} diff --git a/vendor/github.com/coreos/go-systemd/dbus/subscription_set.go b/vendor/github.com/coreos/go-systemd/dbus/subscription_set.go new file mode 100644 index 000000000..5b408d584 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/dbus/subscription_set.go @@ -0,0 +1,57 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbus + +import ( + "time" +) + +// SubscriptionSet returns a subscription set which is like conn.Subscribe but +// can filter to only return events for a set of units. +type SubscriptionSet struct { + *set + conn *Conn +} + +func (s *SubscriptionSet) filter(unit string) bool { + return !s.Contains(unit) +} + +// Subscribe starts listening for dbus events for all of the units in the set. +// Returns channels identical to conn.SubscribeUnits. +func (s *SubscriptionSet) Subscribe() (<-chan map[string]*UnitStatus, <-chan error) { + // TODO: Make fully evented by using systemd 209 with properties changed values + return s.conn.SubscribeUnitsCustom(time.Second, 0, + mismatchUnitStatus, + func(unit string) bool { return s.filter(unit) }, + ) +} + +// NewSubscriptionSet returns a new subscription set. +func (conn *Conn) NewSubscriptionSet() *SubscriptionSet { + return &SubscriptionSet{newSet(), conn} +} + +// mismatchUnitStatus returns true if the provided UnitStatus objects +// are not equivalent. false is returned if the objects are equivalent. +// Only the Name, Description and state-related fields are used in +// the comparison. +func mismatchUnitStatus(u1, u2 *UnitStatus) bool { + return u1.Name != u2.Name || + u1.Description != u2.Description || + u1.LoadState != u2.LoadState || + u1.ActiveState != u2.ActiveState || + u1.SubState != u2.SubState +} diff --git a/vendor/github.com/docker/go-units/LICENSE b/vendor/github.com/docker/go-units/LICENSE new file mode 100644 index 000000000..b55b37bc3 --- /dev/null +++ b/vendor/github.com/docker/go-units/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/go-units/README.md b/vendor/github.com/docker/go-units/README.md new file mode 100644 index 000000000..4f70a4e13 --- /dev/null +++ b/vendor/github.com/docker/go-units/README.md @@ -0,0 +1,16 @@ +[![GoDoc](https://godoc.org/github.com/docker/go-units?status.svg)](https://godoc.org/github.com/docker/go-units) + +# Introduction + +go-units is a library to transform human friendly measurements into machine friendly values. + +## Usage + +See the [docs in godoc](https://godoc.org/github.com/docker/go-units) for examples and documentation. + +## Copyright and license + +Copyright © 2015 Docker, Inc. + +go-units is licensed under the Apache License, Version 2.0. +See [LICENSE](LICENSE) for the full text of the license. diff --git a/vendor/github.com/docker/go-units/duration.go b/vendor/github.com/docker/go-units/duration.go new file mode 100644 index 000000000..c219a8a96 --- /dev/null +++ b/vendor/github.com/docker/go-units/duration.go @@ -0,0 +1,33 @@ +// Package units provides helper function to parse and print size and time units +// in human-readable format. +package units + +import ( + "fmt" + "time" +) + +// HumanDuration returns a human-readable approximation of a duration +// (eg. "About a minute", "4 hours ago", etc.). +func HumanDuration(d time.Duration) string { + if seconds := int(d.Seconds()); seconds < 1 { + return "Less than a second" + } else if seconds < 60 { + return fmt.Sprintf("%d seconds", seconds) + } else if minutes := int(d.Minutes()); minutes == 1 { + return "About a minute" + } else if minutes < 60 { + return fmt.Sprintf("%d minutes", minutes) + } else if hours := int(d.Hours()); hours == 1 { + return "About an hour" + } else if hours < 48 { + return fmt.Sprintf("%d hours", hours) + } else if hours < 24*7*2 { + return fmt.Sprintf("%d days", hours/24) + } else if hours < 24*30*3 { + return fmt.Sprintf("%d weeks", hours/24/7) + } else if hours < 24*365*2 { + return fmt.Sprintf("%d months", hours/24/30) + } + return fmt.Sprintf("%d years", int(d.Hours())/24/365) +} diff --git a/vendor/github.com/docker/go-units/size.go b/vendor/github.com/docker/go-units/size.go new file mode 100644 index 000000000..f5b82ea24 --- /dev/null +++ b/vendor/github.com/docker/go-units/size.go @@ -0,0 +1,96 @@ +package units + +import ( + "fmt" + "regexp" + "strconv" + "strings" +) + +// See: http://en.wikipedia.org/wiki/Binary_prefix +const ( + // Decimal + + KB = 1000 + MB = 1000 * KB + GB = 1000 * MB + TB = 1000 * GB + PB = 1000 * TB + + // Binary + + KiB = 1024 + MiB = 1024 * KiB + GiB = 1024 * MiB + TiB = 1024 * GiB + PiB = 1024 * TiB +) + +type unitMap map[string]int64 + +var ( + decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB} + binaryMap = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB} + sizeRegex = regexp.MustCompile(`^(\d+(\.\d+)*) ?([kKmMgGtTpP])?[bB]?$`) +) + +var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} +var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} + +// CustomSize returns a human-readable approximation of a size +// using custom format. +func CustomSize(format string, size float64, base float64, _map []string) string { + i := 0 + unitsLimit := len(_map) - 1 + for size >= base && i < unitsLimit { + size = size / base + i++ + } + return fmt.Sprintf(format, size, _map[i]) +} + +// HumanSize returns a human-readable approximation of a size +// capped at 4 valid numbers (eg. "2.746 MB", "796 KB"). +func HumanSize(size float64) string { + return CustomSize("%.4g %s", size, 1000.0, decimapAbbrs) +} + +// BytesSize returns a human-readable size in bytes, kibibytes, +// mebibytes, gibibytes, or tebibytes (eg. "44kiB", "17MiB"). +func BytesSize(size float64) string { + return CustomSize("%.4g %s", size, 1024.0, binaryAbbrs) +} + +// FromHumanSize returns an integer from a human-readable specification of a +// size using SI standard (eg. "44kB", "17MB"). +func FromHumanSize(size string) (int64, error) { + return parseSize(size, decimalMap) +} + +// RAMInBytes parses a human-readable string representing an amount of RAM +// in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and +// returns the number of bytes, or -1 if the string is unparseable. +// Units are case-insensitive, and the 'b' suffix is optional. +func RAMInBytes(size string) (int64, error) { + return parseSize(size, binaryMap) +} + +// Parses the human-readable size string into the amount it represents. +func parseSize(sizeStr string, uMap unitMap) (int64, error) { + matches := sizeRegex.FindStringSubmatch(sizeStr) + if len(matches) != 4 { + return -1, fmt.Errorf("invalid size: '%s'", sizeStr) + } + + size, err := strconv.ParseFloat(matches[1], 64) + if err != nil { + return -1, err + } + + unitPrefix := strings.ToLower(matches[3]) + if mul, ok := uMap[unitPrefix]; ok { + size *= float64(mul) + } + + return int64(size), nil +} diff --git a/vendor/github.com/docker/go-units/ulimit.go b/vendor/github.com/docker/go-units/ulimit.go new file mode 100644 index 000000000..5ac7fd825 --- /dev/null +++ b/vendor/github.com/docker/go-units/ulimit.go @@ -0,0 +1,118 @@ +package units + +import ( + "fmt" + "strconv" + "strings" +) + +// Ulimit is a human friendly version of Rlimit. +type Ulimit struct { + Name string + Hard int64 + Soft int64 +} + +// Rlimit specifies the resource limits, such as max open files. +type Rlimit struct { + Type int `json:"type,omitempty"` + Hard uint64 `json:"hard,omitempty"` + Soft uint64 `json:"soft,omitempty"` +} + +const ( + // magic numbers for making the syscall + // some of these are defined in the syscall package, but not all. + // Also since Windows client doesn't get access to the syscall package, need to + // define these here + rlimitAs = 9 + rlimitCore = 4 + rlimitCPU = 0 + rlimitData = 2 + rlimitFsize = 1 + rlimitLocks = 10 + rlimitMemlock = 8 + rlimitMsgqueue = 12 + rlimitNice = 13 + rlimitNofile = 7 + rlimitNproc = 6 + rlimitRss = 5 + rlimitRtprio = 14 + rlimitRttime = 15 + rlimitSigpending = 11 + rlimitStack = 3 +) + +var ulimitNameMapping = map[string]int{ + //"as": rlimitAs, // Disabled since this doesn't seem usable with the way Docker inits a container. + "core": rlimitCore, + "cpu": rlimitCPU, + "data": rlimitData, + "fsize": rlimitFsize, + "locks": rlimitLocks, + "memlock": rlimitMemlock, + "msgqueue": rlimitMsgqueue, + "nice": rlimitNice, + "nofile": rlimitNofile, + "nproc": rlimitNproc, + "rss": rlimitRss, + "rtprio": rlimitRtprio, + "rttime": rlimitRttime, + "sigpending": rlimitSigpending, + "stack": rlimitStack, +} + +// ParseUlimit parses and returns a Ulimit from the specified string. +func ParseUlimit(val string) (*Ulimit, error) { + parts := strings.SplitN(val, "=", 2) + if len(parts) != 2 { + return nil, fmt.Errorf("invalid ulimit argument: %s", val) + } + + if _, exists := ulimitNameMapping[parts[0]]; !exists { + return nil, fmt.Errorf("invalid ulimit type: %s", parts[0]) + } + + var ( + soft int64 + hard = &soft // default to soft in case no hard was set + temp int64 + err error + ) + switch limitVals := strings.Split(parts[1], ":"); len(limitVals) { + case 2: + temp, err = strconv.ParseInt(limitVals[1], 10, 64) + if err != nil { + return nil, err + } + hard = &temp + fallthrough + case 1: + soft, err = strconv.ParseInt(limitVals[0], 10, 64) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("too many limit value arguments - %s, can only have up to two, `soft[:hard]`", parts[1]) + } + + if soft > *hard { + return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: %d > %d", soft, *hard) + } + + return &Ulimit{Name: parts[0], Soft: soft, Hard: *hard}, nil +} + +// GetRlimit returns the RLimit corresponding to Ulimit. +func (u *Ulimit) GetRlimit() (*Rlimit, error) { + t, exists := ulimitNameMapping[u.Name] + if !exists { + return nil, fmt.Errorf("invalid ulimit name %s", u.Name) + } + + return &Rlimit{Type: t, Soft: uint64(u.Soft), Hard: uint64(u.Hard)}, nil +} + +func (u *Ulimit) String() string { + return fmt.Sprintf("%s=%d:%d", u.Name, u.Soft, u.Hard) +} diff --git a/vendor/github.com/godbus/dbus/LICENSE b/vendor/github.com/godbus/dbus/LICENSE new file mode 100644 index 000000000..670d88fca --- /dev/null +++ b/vendor/github.com/godbus/dbus/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2013, Georg Reinke (), Google +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright notice, +this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/godbus/dbus/README.markdown b/vendor/github.com/godbus/dbus/README.markdown new file mode 100644 index 000000000..0a6e7e5bf --- /dev/null +++ b/vendor/github.com/godbus/dbus/README.markdown @@ -0,0 +1,41 @@ +dbus +---- + +dbus is a simple library that implements native Go client bindings for the +D-Bus message bus system. + +### Features + +* Complete native implementation of the D-Bus message protocol +* Go-like API (channels for signals / asynchronous method calls, Goroutine-safe connections) +* Subpackages that help with the introspection / property interfaces + +### Installation + +This packages requires Go 1.1. If you installed it and set up your GOPATH, just run: + +``` +go get github.com/godbus/dbus +``` + +If you want to use the subpackages, you can install them the same way. + +### Usage + +The complete package documentation and some simple examples are available at +[godoc.org](http://godoc.org/github.com/godbus/dbus). Also, the +[_examples](https://github.com/godbus/dbus/tree/master/_examples) directory +gives a short overview over the basic usage. + +#### Projects using godbus +- [notify](https://github.com/esiqveland/notify) provides desktop notifications over dbus into a library. + +Please note that the API is considered unstable for now and may change without +further notice. + +### License + +go.dbus is available under the Simplified BSD License; see LICENSE for the full +text. + +Nearly all of the credit for this library goes to github.com/guelfey/go.dbus. diff --git a/vendor/github.com/godbus/dbus/auth.go b/vendor/github.com/godbus/dbus/auth.go new file mode 100644 index 000000000..98017b693 --- /dev/null +++ b/vendor/github.com/godbus/dbus/auth.go @@ -0,0 +1,253 @@ +package dbus + +import ( + "bufio" + "bytes" + "errors" + "io" + "os" + "strconv" +) + +// AuthStatus represents the Status of an authentication mechanism. +type AuthStatus byte + +const ( + // AuthOk signals that authentication is finished; the next command + // from the server should be an OK. + AuthOk AuthStatus = iota + + // AuthContinue signals that additional data is needed; the next command + // from the server should be a DATA. + AuthContinue + + // AuthError signals an error; the server sent invalid data or some + // other unexpected thing happened and the current authentication + // process should be aborted. + AuthError +) + +type authState byte + +const ( + waitingForData authState = iota + waitingForOk + waitingForReject +) + +// Auth defines the behaviour of an authentication mechanism. +type Auth interface { + // Return the name of the mechnism, the argument to the first AUTH command + // and the next status. + FirstData() (name, resp []byte, status AuthStatus) + + // Process the given DATA command, and return the argument to the DATA + // command and the next status. If len(resp) == 0, no DATA command is sent. + HandleData(data []byte) (resp []byte, status AuthStatus) +} + +// Auth authenticates the connection, trying the given list of authentication +// mechanisms (in that order). If nil is passed, the EXTERNAL and +// DBUS_COOKIE_SHA1 mechanisms are tried for the current user. For private +// connections, this method must be called before sending any messages to the +// bus. Auth must not be called on shared connections. +func (conn *Conn) Auth(methods []Auth) error { + if methods == nil { + uid := strconv.Itoa(os.Getuid()) + methods = []Auth{AuthExternal(uid), AuthCookieSha1(uid, getHomeDir())} + } + in := bufio.NewReader(conn.transport) + err := conn.transport.SendNullByte() + if err != nil { + return err + } + err = authWriteLine(conn.transport, []byte("AUTH")) + if err != nil { + return err + } + s, err := authReadLine(in) + if err != nil { + return err + } + if len(s) < 2 || !bytes.Equal(s[0], []byte("REJECTED")) { + return errors.New("dbus: authentication protocol error") + } + s = s[1:] + for _, v := range s { + for _, m := range methods { + if name, data, status := m.FirstData(); bytes.Equal(v, name) { + var ok bool + err = authWriteLine(conn.transport, []byte("AUTH"), []byte(v), data) + if err != nil { + return err + } + switch status { + case AuthOk: + err, ok = conn.tryAuth(m, waitingForOk, in) + case AuthContinue: + err, ok = conn.tryAuth(m, waitingForData, in) + default: + panic("dbus: invalid authentication status") + } + if err != nil { + return err + } + if ok { + if conn.transport.SupportsUnixFDs() { + err = authWriteLine(conn, []byte("NEGOTIATE_UNIX_FD")) + if err != nil { + return err + } + line, err := authReadLine(in) + if err != nil { + return err + } + switch { + case bytes.Equal(line[0], []byte("AGREE_UNIX_FD")): + conn.EnableUnixFDs() + conn.unixFD = true + case bytes.Equal(line[0], []byte("ERROR")): + default: + return errors.New("dbus: authentication protocol error") + } + } + err = authWriteLine(conn.transport, []byte("BEGIN")) + if err != nil { + return err + } + go conn.inWorker() + go conn.outWorker() + return nil + } + } + } + } + return errors.New("dbus: authentication failed") +} + +// tryAuth tries to authenticate with m as the mechanism, using state as the +// initial authState and in for reading input. It returns (nil, true) on +// success, (nil, false) on a REJECTED and (someErr, false) if some other +// error occured. +func (conn *Conn) tryAuth(m Auth, state authState, in *bufio.Reader) (error, bool) { + for { + s, err := authReadLine(in) + if err != nil { + return err, false + } + switch { + case state == waitingForData && string(s[0]) == "DATA": + if len(s) != 2 { + err = authWriteLine(conn.transport, []byte("ERROR")) + if err != nil { + return err, false + } + continue + } + data, status := m.HandleData(s[1]) + switch status { + case AuthOk, AuthContinue: + if len(data) != 0 { + err = authWriteLine(conn.transport, []byte("DATA"), data) + if err != nil { + return err, false + } + } + if status == AuthOk { + state = waitingForOk + } + case AuthError: + err = authWriteLine(conn.transport, []byte("ERROR")) + if err != nil { + return err, false + } + } + case state == waitingForData && string(s[0]) == "REJECTED": + return nil, false + case state == waitingForData && string(s[0]) == "ERROR": + err = authWriteLine(conn.transport, []byte("CANCEL")) + if err != nil { + return err, false + } + state = waitingForReject + case state == waitingForData && string(s[0]) == "OK": + if len(s) != 2 { + err = authWriteLine(conn.transport, []byte("CANCEL")) + if err != nil { + return err, false + } + state = waitingForReject + } + conn.uuid = string(s[1]) + return nil, true + case state == waitingForData: + err = authWriteLine(conn.transport, []byte("ERROR")) + if err != nil { + return err, false + } + case state == waitingForOk && string(s[0]) == "OK": + if len(s) != 2 { + err = authWriteLine(conn.transport, []byte("CANCEL")) + if err != nil { + return err, false + } + state = waitingForReject + } + conn.uuid = string(s[1]) + return nil, true + case state == waitingForOk && string(s[0]) == "REJECTED": + return nil, false + case state == waitingForOk && (string(s[0]) == "DATA" || + string(s[0]) == "ERROR"): + + err = authWriteLine(conn.transport, []byte("CANCEL")) + if err != nil { + return err, false + } + state = waitingForReject + case state == waitingForOk: + err = authWriteLine(conn.transport, []byte("ERROR")) + if err != nil { + return err, false + } + case state == waitingForReject && string(s[0]) == "REJECTED": + return nil, false + case state == waitingForReject: + return errors.New("dbus: authentication protocol error"), false + default: + panic("dbus: invalid auth state") + } + } +} + +// authReadLine reads a line and separates it into its fields. +func authReadLine(in *bufio.Reader) ([][]byte, error) { + data, err := in.ReadBytes('\n') + if err != nil { + return nil, err + } + data = bytes.TrimSuffix(data, []byte("\r\n")) + return bytes.Split(data, []byte{' '}), nil +} + +// authWriteLine writes the given line in the authentication protocol format +// (elements of data separated by a " " and terminated by "\r\n"). +func authWriteLine(out io.Writer, data ...[]byte) error { + buf := make([]byte, 0) + for i, v := range data { + buf = append(buf, v...) + if i != len(data)-1 { + buf = append(buf, ' ') + } + } + buf = append(buf, '\r') + buf = append(buf, '\n') + n, err := out.Write(buf) + if err != nil { + return err + } + if n != len(buf) { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/vendor/github.com/godbus/dbus/auth_external.go b/vendor/github.com/godbus/dbus/auth_external.go new file mode 100644 index 000000000..7e376d3ef --- /dev/null +++ b/vendor/github.com/godbus/dbus/auth_external.go @@ -0,0 +1,26 @@ +package dbus + +import ( + "encoding/hex" +) + +// AuthExternal returns an Auth that authenticates as the given user with the +// EXTERNAL mechanism. +func AuthExternal(user string) Auth { + return authExternal{user} +} + +// AuthExternal implements the EXTERNAL authentication mechanism. +type authExternal struct { + user string +} + +func (a authExternal) FirstData() ([]byte, []byte, AuthStatus) { + b := make([]byte, 2*len(a.user)) + hex.Encode(b, []byte(a.user)) + return []byte("EXTERNAL"), b, AuthOk +} + +func (a authExternal) HandleData(b []byte) ([]byte, AuthStatus) { + return nil, AuthError +} diff --git a/vendor/github.com/godbus/dbus/auth_sha1.go b/vendor/github.com/godbus/dbus/auth_sha1.go new file mode 100644 index 000000000..df15b4611 --- /dev/null +++ b/vendor/github.com/godbus/dbus/auth_sha1.go @@ -0,0 +1,102 @@ +package dbus + +import ( + "bufio" + "bytes" + "crypto/rand" + "crypto/sha1" + "encoding/hex" + "os" +) + +// AuthCookieSha1 returns an Auth that authenticates as the given user with the +// DBUS_COOKIE_SHA1 mechanism. The home parameter should specify the home +// directory of the user. +func AuthCookieSha1(user, home string) Auth { + return authCookieSha1{user, home} +} + +type authCookieSha1 struct { + user, home string +} + +func (a authCookieSha1) FirstData() ([]byte, []byte, AuthStatus) { + b := make([]byte, 2*len(a.user)) + hex.Encode(b, []byte(a.user)) + return []byte("DBUS_COOKIE_SHA1"), b, AuthContinue +} + +func (a authCookieSha1) HandleData(data []byte) ([]byte, AuthStatus) { + challenge := make([]byte, len(data)/2) + _, err := hex.Decode(challenge, data) + if err != nil { + return nil, AuthError + } + b := bytes.Split(challenge, []byte{' '}) + if len(b) != 3 { + return nil, AuthError + } + context := b[0] + id := b[1] + svchallenge := b[2] + cookie := a.getCookie(context, id) + if cookie == nil { + return nil, AuthError + } + clchallenge := a.generateChallenge() + if clchallenge == nil { + return nil, AuthError + } + hash := sha1.New() + hash.Write(bytes.Join([][]byte{svchallenge, clchallenge, cookie}, []byte{':'})) + hexhash := make([]byte, 2*hash.Size()) + hex.Encode(hexhash, hash.Sum(nil)) + data = append(clchallenge, ' ') + data = append(data, hexhash...) + resp := make([]byte, 2*len(data)) + hex.Encode(resp, data) + return resp, AuthOk +} + +// getCookie searches for the cookie identified by id in context and returns +// the cookie content or nil. (Since HandleData can't return a specific error, +// but only whether an error occured, this function also doesn't bother to +// return an error.) +func (a authCookieSha1) getCookie(context, id []byte) []byte { + file, err := os.Open(a.home + "/.dbus-keyrings/" + string(context)) + if err != nil { + return nil + } + defer file.Close() + rd := bufio.NewReader(file) + for { + line, err := rd.ReadBytes('\n') + if err != nil { + return nil + } + line = line[:len(line)-1] + b := bytes.Split(line, []byte{' '}) + if len(b) != 3 { + return nil + } + if bytes.Equal(b[0], id) { + return b[2] + } + } +} + +// generateChallenge returns a random, hex-encoded challenge, or nil on error +// (see above). +func (a authCookieSha1) generateChallenge() []byte { + b := make([]byte, 16) + n, err := rand.Read(b) + if err != nil { + return nil + } + if n != 16 { + return nil + } + enc := make([]byte, 32) + hex.Encode(enc, b) + return enc +} diff --git a/vendor/github.com/godbus/dbus/call.go b/vendor/github.com/godbus/dbus/call.go new file mode 100644 index 000000000..ba6e73f60 --- /dev/null +++ b/vendor/github.com/godbus/dbus/call.go @@ -0,0 +1,36 @@ +package dbus + +import ( + "errors" +) + +// Call represents a pending or completed method call. +type Call struct { + Destination string + Path ObjectPath + Method string + Args []interface{} + + // Strobes when the call is complete. + Done chan *Call + + // After completion, the error status. If this is non-nil, it may be an + // error message from the peer (with Error as its type) or some other error. + Err error + + // Holds the response once the call is done. + Body []interface{} +} + +var errSignature = errors.New("dbus: mismatched signature") + +// Store stores the body of the reply into the provided pointers. It returns +// an error if the signatures of the body and retvalues don't match, or if +// the error status is not nil. +func (c *Call) Store(retvalues ...interface{}) error { + if c.Err != nil { + return c.Err + } + + return Store(c.Body, retvalues...) +} diff --git a/vendor/github.com/godbus/dbus/conn.go b/vendor/github.com/godbus/dbus/conn.go new file mode 100644 index 000000000..a4f539401 --- /dev/null +++ b/vendor/github.com/godbus/dbus/conn.go @@ -0,0 +1,625 @@ +package dbus + +import ( + "errors" + "io" + "os" + "reflect" + "strings" + "sync" +) + +const defaultSystemBusAddress = "unix:path=/var/run/dbus/system_bus_socket" + +var ( + systemBus *Conn + systemBusLck sync.Mutex + sessionBus *Conn + sessionBusLck sync.Mutex +) + +// ErrClosed is the error returned by calls on a closed connection. +var ErrClosed = errors.New("dbus: connection closed by user") + +// Conn represents a connection to a message bus (usually, the system or +// session bus). +// +// Connections are either shared or private. Shared connections +// are shared between calls to the functions that return them. As a result, +// the methods Close, Auth and Hello must not be called on them. +// +// Multiple goroutines may invoke methods on a connection simultaneously. +type Conn struct { + transport + + busObj BusObject + unixFD bool + uuid string + + names []string + namesLck sync.RWMutex + + serialLck sync.Mutex + nextSerial uint32 + serialUsed map[uint32]bool + + calls map[uint32]*Call + callsLck sync.RWMutex + + handlers map[ObjectPath]map[string]exportWithMapping + handlersLck sync.RWMutex + + out chan *Message + closed bool + outLck sync.RWMutex + + signals []chan<- *Signal + signalsLck sync.Mutex + + eavesdropped chan<- *Message + eavesdroppedLck sync.Mutex +} + +// SessionBus returns a shared connection to the session bus, connecting to it +// if not already done. +func SessionBus() (conn *Conn, err error) { + sessionBusLck.Lock() + defer sessionBusLck.Unlock() + if sessionBus != nil { + return sessionBus, nil + } + defer func() { + if conn != nil { + sessionBus = conn + } + }() + conn, err = SessionBusPrivate() + if err != nil { + return + } + if err = conn.Auth(nil); err != nil { + conn.Close() + conn = nil + return + } + if err = conn.Hello(); err != nil { + conn.Close() + conn = nil + } + return +} + +// SessionBusPrivate returns a new private connection to the session bus. +func SessionBusPrivate() (*Conn, error) { + address := os.Getenv("DBUS_SESSION_BUS_ADDRESS") + if address != "" && address != "autolaunch:" { + return Dial(address) + } + + return sessionBusPlatform() +} + +// SystemBus returns a shared connection to the system bus, connecting to it if +// not already done. +func SystemBus() (conn *Conn, err error) { + systemBusLck.Lock() + defer systemBusLck.Unlock() + if systemBus != nil { + return systemBus, nil + } + defer func() { + if conn != nil { + systemBus = conn + } + }() + conn, err = SystemBusPrivate() + if err != nil { + return + } + if err = conn.Auth(nil); err != nil { + conn.Close() + conn = nil + return + } + if err = conn.Hello(); err != nil { + conn.Close() + conn = nil + } + return +} + +// SystemBusPrivate returns a new private connection to the system bus. +func SystemBusPrivate() (*Conn, error) { + address := os.Getenv("DBUS_SYSTEM_BUS_ADDRESS") + if address != "" { + return Dial(address) + } + return Dial(defaultSystemBusAddress) +} + +// Dial establishes a new private connection to the message bus specified by address. +func Dial(address string) (*Conn, error) { + tr, err := getTransport(address) + if err != nil { + return nil, err + } + return newConn(tr) +} + +// NewConn creates a new private *Conn from an already established connection. +func NewConn(conn io.ReadWriteCloser) (*Conn, error) { + return newConn(genericTransport{conn}) +} + +// newConn creates a new *Conn from a transport. +func newConn(tr transport) (*Conn, error) { + conn := new(Conn) + conn.transport = tr + conn.calls = make(map[uint32]*Call) + conn.out = make(chan *Message, 10) + conn.handlers = make(map[ObjectPath]map[string]exportWithMapping) + conn.nextSerial = 1 + conn.serialUsed = map[uint32]bool{0: true} + conn.busObj = conn.Object("org.freedesktop.DBus", "/org/freedesktop/DBus") + return conn, nil +} + +// BusObject returns the object owned by the bus daemon which handles +// administrative requests. +func (conn *Conn) BusObject() BusObject { + return conn.busObj +} + +// Close closes the connection. Any blocked operations will return with errors +// and the channels passed to Eavesdrop and Signal are closed. This method must +// not be called on shared connections. +func (conn *Conn) Close() error { + conn.outLck.Lock() + if conn.closed { + // inWorker calls Close on read error, the read error may + // be caused by another caller calling Close to shutdown the + // dbus connection, a double-close scenario we prevent here. + conn.outLck.Unlock() + return nil + } + close(conn.out) + conn.closed = true + conn.outLck.Unlock() + conn.signalsLck.Lock() + for _, ch := range conn.signals { + close(ch) + } + conn.signalsLck.Unlock() + conn.eavesdroppedLck.Lock() + if conn.eavesdropped != nil { + close(conn.eavesdropped) + } + conn.eavesdroppedLck.Unlock() + return conn.transport.Close() +} + +// Eavesdrop causes conn to send all incoming messages to the given channel +// without further processing. Method replies, errors and signals will not be +// sent to the appropiate channels and method calls will not be handled. If nil +// is passed, the normal behaviour is restored. +// +// The caller has to make sure that ch is sufficiently buffered; +// if a message arrives when a write to ch is not possible, the message is +// discarded. +func (conn *Conn) Eavesdrop(ch chan<- *Message) { + conn.eavesdroppedLck.Lock() + conn.eavesdropped = ch + conn.eavesdroppedLck.Unlock() +} + +// getSerial returns an unused serial. +func (conn *Conn) getSerial() uint32 { + conn.serialLck.Lock() + defer conn.serialLck.Unlock() + n := conn.nextSerial + for conn.serialUsed[n] { + n++ + } + conn.serialUsed[n] = true + conn.nextSerial = n + 1 + return n +} + +// Hello sends the initial org.freedesktop.DBus.Hello call. This method must be +// called after authentication, but before sending any other messages to the +// bus. Hello must not be called for shared connections. +func (conn *Conn) Hello() error { + var s string + err := conn.busObj.Call("org.freedesktop.DBus.Hello", 0).Store(&s) + if err != nil { + return err + } + conn.namesLck.Lock() + conn.names = make([]string, 1) + conn.names[0] = s + conn.namesLck.Unlock() + return nil +} + +// inWorker runs in an own goroutine, reading incoming messages from the +// transport and dispatching them appropiately. +func (conn *Conn) inWorker() { + for { + msg, err := conn.ReadMessage() + if err == nil { + conn.eavesdroppedLck.Lock() + if conn.eavesdropped != nil { + select { + case conn.eavesdropped <- msg: + default: + } + conn.eavesdroppedLck.Unlock() + continue + } + conn.eavesdroppedLck.Unlock() + dest, _ := msg.Headers[FieldDestination].value.(string) + found := false + if dest == "" { + found = true + } else { + conn.namesLck.RLock() + if len(conn.names) == 0 { + found = true + } + for _, v := range conn.names { + if dest == v { + found = true + break + } + } + conn.namesLck.RUnlock() + } + if !found { + // Eavesdropped a message, but no channel for it is registered. + // Ignore it. + continue + } + switch msg.Type { + case TypeMethodReply, TypeError: + serial := msg.Headers[FieldReplySerial].value.(uint32) + conn.callsLck.Lock() + if c, ok := conn.calls[serial]; ok { + if msg.Type == TypeError { + name, _ := msg.Headers[FieldErrorName].value.(string) + c.Err = Error{name, msg.Body} + } else { + c.Body = msg.Body + } + c.Done <- c + conn.serialLck.Lock() + delete(conn.serialUsed, serial) + conn.serialLck.Unlock() + delete(conn.calls, serial) + } + conn.callsLck.Unlock() + case TypeSignal: + iface := msg.Headers[FieldInterface].value.(string) + member := msg.Headers[FieldMember].value.(string) + // as per http://dbus.freedesktop.org/doc/dbus-specification.html , + // sender is optional for signals. + sender, _ := msg.Headers[FieldSender].value.(string) + if iface == "org.freedesktop.DBus" && sender == "org.freedesktop.DBus" { + if member == "NameLost" { + // If we lost the name on the bus, remove it from our + // tracking list. + name, ok := msg.Body[0].(string) + if !ok { + panic("Unable to read the lost name") + } + conn.namesLck.Lock() + for i, v := range conn.names { + if v == name { + conn.names = append(conn.names[:i], + conn.names[i+1:]...) + } + } + conn.namesLck.Unlock() + } else if member == "NameAcquired" { + // If we acquired the name on the bus, add it to our + // tracking list. + name, ok := msg.Body[0].(string) + if !ok { + panic("Unable to read the acquired name") + } + conn.namesLck.Lock() + conn.names = append(conn.names, name) + conn.namesLck.Unlock() + } + } + signal := &Signal{ + Sender: sender, + Path: msg.Headers[FieldPath].value.(ObjectPath), + Name: iface + "." + member, + Body: msg.Body, + } + conn.signalsLck.Lock() + for _, ch := range conn.signals { + ch <- signal + } + conn.signalsLck.Unlock() + case TypeMethodCall: + go conn.handleCall(msg) + } + } else if _, ok := err.(InvalidMessageError); !ok { + // Some read error occured (usually EOF); we can't really do + // anything but to shut down all stuff and returns errors to all + // pending replies. + conn.Close() + conn.callsLck.RLock() + for _, v := range conn.calls { + v.Err = err + v.Done <- v + } + conn.callsLck.RUnlock() + return + } + // invalid messages are ignored + } +} + +// Names returns the list of all names that are currently owned by this +// connection. The slice is always at least one element long, the first element +// being the unique name of the connection. +func (conn *Conn) Names() []string { + conn.namesLck.RLock() + // copy the slice so it can't be modified + s := make([]string, len(conn.names)) + copy(s, conn.names) + conn.namesLck.RUnlock() + return s +} + +// Object returns the object identified by the given destination name and path. +func (conn *Conn) Object(dest string, path ObjectPath) BusObject { + return &Object{conn, dest, path} +} + +// outWorker runs in an own goroutine, encoding and sending messages that are +// sent to conn.out. +func (conn *Conn) outWorker() { + for msg := range conn.out { + err := conn.SendMessage(msg) + conn.callsLck.RLock() + if err != nil { + if c := conn.calls[msg.serial]; c != nil { + c.Err = err + c.Done <- c + } + conn.serialLck.Lock() + delete(conn.serialUsed, msg.serial) + conn.serialLck.Unlock() + } else if msg.Type != TypeMethodCall { + conn.serialLck.Lock() + delete(conn.serialUsed, msg.serial) + conn.serialLck.Unlock() + } + conn.callsLck.RUnlock() + } +} + +// Send sends the given message to the message bus. You usually don't need to +// use this; use the higher-level equivalents (Call / Go, Emit and Export) +// instead. If msg is a method call and NoReplyExpected is not set, a non-nil +// call is returned and the same value is sent to ch (which must be buffered) +// once the call is complete. Otherwise, ch is ignored and a Call structure is +// returned of which only the Err member is valid. +func (conn *Conn) Send(msg *Message, ch chan *Call) *Call { + var call *Call + + msg.serial = conn.getSerial() + if msg.Type == TypeMethodCall && msg.Flags&FlagNoReplyExpected == 0 { + if ch == nil { + ch = make(chan *Call, 5) + } else if cap(ch) == 0 { + panic("dbus: unbuffered channel passed to (*Conn).Send") + } + call = new(Call) + call.Destination, _ = msg.Headers[FieldDestination].value.(string) + call.Path, _ = msg.Headers[FieldPath].value.(ObjectPath) + iface, _ := msg.Headers[FieldInterface].value.(string) + member, _ := msg.Headers[FieldMember].value.(string) + call.Method = iface + "." + member + call.Args = msg.Body + call.Done = ch + conn.callsLck.Lock() + conn.calls[msg.serial] = call + conn.callsLck.Unlock() + conn.outLck.RLock() + if conn.closed { + call.Err = ErrClosed + call.Done <- call + } else { + conn.out <- msg + } + conn.outLck.RUnlock() + } else { + conn.outLck.RLock() + if conn.closed { + call = &Call{Err: ErrClosed} + } else { + conn.out <- msg + call = &Call{Err: nil} + } + conn.outLck.RUnlock() + } + return call +} + +// sendError creates an error message corresponding to the parameters and sends +// it to conn.out. +func (conn *Conn) sendError(e Error, dest string, serial uint32) { + msg := new(Message) + msg.Type = TypeError + msg.serial = conn.getSerial() + msg.Headers = make(map[HeaderField]Variant) + if dest != "" { + msg.Headers[FieldDestination] = MakeVariant(dest) + } + msg.Headers[FieldErrorName] = MakeVariant(e.Name) + msg.Headers[FieldReplySerial] = MakeVariant(serial) + msg.Body = e.Body + if len(e.Body) > 0 { + msg.Headers[FieldSignature] = MakeVariant(SignatureOf(e.Body...)) + } + conn.outLck.RLock() + if !conn.closed { + conn.out <- msg + } + conn.outLck.RUnlock() +} + +// sendReply creates a method reply message corresponding to the parameters and +// sends it to conn.out. +func (conn *Conn) sendReply(dest string, serial uint32, values ...interface{}) { + msg := new(Message) + msg.Type = TypeMethodReply + msg.serial = conn.getSerial() + msg.Headers = make(map[HeaderField]Variant) + if dest != "" { + msg.Headers[FieldDestination] = MakeVariant(dest) + } + msg.Headers[FieldReplySerial] = MakeVariant(serial) + msg.Body = values + if len(values) > 0 { + msg.Headers[FieldSignature] = MakeVariant(SignatureOf(values...)) + } + conn.outLck.RLock() + if !conn.closed { + conn.out <- msg + } + conn.outLck.RUnlock() +} + +// Signal registers the given channel to be passed all received signal messages. +// The caller has to make sure that ch is sufficiently buffered; if a message +// arrives when a write to c is not possible, it is discarded. +// +// Multiple of these channels can be registered at the same time. Passing a +// channel that already is registered will remove it from the list of the +// registered channels. +// +// These channels are "overwritten" by Eavesdrop; i.e., if there currently is a +// channel for eavesdropped messages, this channel receives all signals, and +// none of the channels passed to Signal will receive any signals. +func (conn *Conn) Signal(ch chan<- *Signal) { + conn.signalsLck.Lock() + conn.signals = append(conn.signals, ch) + conn.signalsLck.Unlock() +} + +// SupportsUnixFDs returns whether the underlying transport supports passing of +// unix file descriptors. If this is false, method calls containing unix file +// descriptors will return an error and emitted signals containing them will +// not be sent. +func (conn *Conn) SupportsUnixFDs() bool { + return conn.unixFD +} + +// Error represents a D-Bus message of type Error. +type Error struct { + Name string + Body []interface{} +} + +func NewError(name string, body []interface{}) *Error { + return &Error{name, body} +} + +func (e Error) Error() string { + if len(e.Body) >= 1 { + s, ok := e.Body[0].(string) + if ok { + return s + } + } + return e.Name +} + +// Signal represents a D-Bus message of type Signal. The name member is given in +// "interface.member" notation, e.g. org.freedesktop.D-Bus.NameLost. +type Signal struct { + Sender string + Path ObjectPath + Name string + Body []interface{} +} + +// transport is a D-Bus transport. +type transport interface { + // Read and Write raw data (for example, for the authentication protocol). + io.ReadWriteCloser + + // Send the initial null byte used for the EXTERNAL mechanism. + SendNullByte() error + + // Returns whether this transport supports passing Unix FDs. + SupportsUnixFDs() bool + + // Signal the transport that Unix FD passing is enabled for this connection. + EnableUnixFDs() + + // Read / send a message, handling things like Unix FDs. + ReadMessage() (*Message, error) + SendMessage(*Message) error +} + +var ( + transports = make(map[string]func(string) (transport, error)) +) + +func getTransport(address string) (transport, error) { + var err error + var t transport + + addresses := strings.Split(address, ";") + for _, v := range addresses { + i := strings.IndexRune(v, ':') + if i == -1 { + err = errors.New("dbus: invalid bus address (no transport)") + continue + } + f := transports[v[:i]] + if f == nil { + err = errors.New("dbus: invalid bus address (invalid or unsupported transport)") + continue + } + t, err = f(v[i+1:]) + if err == nil { + return t, nil + } + } + return nil, err +} + +// dereferenceAll returns a slice that, assuming that vs is a slice of pointers +// of arbitrary types, containes the values that are obtained from dereferencing +// all elements in vs. +func dereferenceAll(vs []interface{}) []interface{} { + for i := range vs { + v := reflect.ValueOf(vs[i]) + v = v.Elem() + vs[i] = v.Interface() + } + return vs +} + +// getKey gets a key from a the list of keys. Returns "" on error / not found... +func getKey(s, key string) string { + i := strings.Index(s, key) + if i == -1 { + return "" + } + if i+len(key)+1 >= len(s) || s[i+len(key)] != '=' { + return "" + } + j := strings.Index(s, ",") + if j == -1 { + j = len(s) + } + return s[i+len(key)+1 : j] +} diff --git a/vendor/github.com/godbus/dbus/conn_darwin.go b/vendor/github.com/godbus/dbus/conn_darwin.go new file mode 100644 index 000000000..b67bb1b81 --- /dev/null +++ b/vendor/github.com/godbus/dbus/conn_darwin.go @@ -0,0 +1,21 @@ +package dbus + +import ( + "errors" + "os/exec" +) + +func sessionBusPlatform() (*Conn, error) { + cmd := exec.Command("launchctl", "getenv", "DBUS_LAUNCHD_SESSION_BUS_SOCKET") + b, err := cmd.CombinedOutput() + + if err != nil { + return nil, err + } + + if len(b) == 0 { + return nil, errors.New("dbus: couldn't determine address of session bus") + } + + return Dial("unix:path=" + string(b[:len(b)-1])) +} diff --git a/vendor/github.com/godbus/dbus/conn_other.go b/vendor/github.com/godbus/dbus/conn_other.go new file mode 100644 index 000000000..f74b8758d --- /dev/null +++ b/vendor/github.com/godbus/dbus/conn_other.go @@ -0,0 +1,27 @@ +// +build !darwin + +package dbus + +import ( + "bytes" + "errors" + "os/exec" +) + +func sessionBusPlatform() (*Conn, error) { + cmd := exec.Command("dbus-launch") + b, err := cmd.CombinedOutput() + + if err != nil { + return nil, err + } + + i := bytes.IndexByte(b, '=') + j := bytes.IndexByte(b, '\n') + + if i == -1 || j == -1 { + return nil, errors.New("dbus: couldn't determine address of session bus") + } + + return Dial(string(b[i+1 : j])) +} diff --git a/vendor/github.com/godbus/dbus/dbus.go b/vendor/github.com/godbus/dbus/dbus.go new file mode 100644 index 000000000..2ce68735c --- /dev/null +++ b/vendor/github.com/godbus/dbus/dbus.go @@ -0,0 +1,258 @@ +package dbus + +import ( + "errors" + "reflect" + "strings" +) + +var ( + byteType = reflect.TypeOf(byte(0)) + boolType = reflect.TypeOf(false) + uint8Type = reflect.TypeOf(uint8(0)) + int16Type = reflect.TypeOf(int16(0)) + uint16Type = reflect.TypeOf(uint16(0)) + int32Type = reflect.TypeOf(int32(0)) + uint32Type = reflect.TypeOf(uint32(0)) + int64Type = reflect.TypeOf(int64(0)) + uint64Type = reflect.TypeOf(uint64(0)) + float64Type = reflect.TypeOf(float64(0)) + stringType = reflect.TypeOf("") + signatureType = reflect.TypeOf(Signature{""}) + objectPathType = reflect.TypeOf(ObjectPath("")) + variantType = reflect.TypeOf(Variant{Signature{""}, nil}) + interfacesType = reflect.TypeOf([]interface{}{}) + unixFDType = reflect.TypeOf(UnixFD(0)) + unixFDIndexType = reflect.TypeOf(UnixFDIndex(0)) +) + +// An InvalidTypeError signals that a value which cannot be represented in the +// D-Bus wire format was passed to a function. +type InvalidTypeError struct { + Type reflect.Type +} + +func (e InvalidTypeError) Error() string { + return "dbus: invalid type " + e.Type.String() +} + +// Store copies the values contained in src to dest, which must be a slice of +// pointers. It converts slices of interfaces from src to corresponding structs +// in dest. An error is returned if the lengths of src and dest or the types of +// their elements don't match. +func Store(src []interface{}, dest ...interface{}) error { + if len(src) != len(dest) { + return errors.New("dbus.Store: length mismatch") + } + + for i := range src { + if err := store(src[i], dest[i]); err != nil { + return err + } + } + return nil +} + +func store(src, dest interface{}) error { + if reflect.TypeOf(dest).Elem() == reflect.TypeOf(src) { + reflect.ValueOf(dest).Elem().Set(reflect.ValueOf(src)) + return nil + } else if hasStruct(dest) { + rv := reflect.ValueOf(dest).Elem() + switch rv.Kind() { + case reflect.Struct: + vs, ok := src.([]interface{}) + if !ok { + return errors.New("dbus.Store: type mismatch") + } + t := rv.Type() + ndest := make([]interface{}, 0, rv.NumField()) + for i := 0; i < rv.NumField(); i++ { + field := t.Field(i) + if field.PkgPath == "" && field.Tag.Get("dbus") != "-" { + ndest = append(ndest, rv.Field(i).Addr().Interface()) + } + } + if len(vs) != len(ndest) { + return errors.New("dbus.Store: type mismatch") + } + err := Store(vs, ndest...) + if err != nil { + return errors.New("dbus.Store: type mismatch") + } + case reflect.Slice: + sv := reflect.ValueOf(src) + if sv.Kind() != reflect.Slice { + return errors.New("dbus.Store: type mismatch") + } + rv.Set(reflect.MakeSlice(rv.Type(), sv.Len(), sv.Len())) + for i := 0; i < sv.Len(); i++ { + if err := store(sv.Index(i).Interface(), rv.Index(i).Addr().Interface()); err != nil { + return err + } + } + case reflect.Map: + sv := reflect.ValueOf(src) + if sv.Kind() != reflect.Map { + return errors.New("dbus.Store: type mismatch") + } + keys := sv.MapKeys() + rv.Set(reflect.MakeMap(sv.Type())) + for _, key := range keys { + v := reflect.New(sv.Type().Elem()) + if err := store(v, sv.MapIndex(key).Interface()); err != nil { + return err + } + rv.SetMapIndex(key, v.Elem()) + } + default: + return errors.New("dbus.Store: type mismatch") + } + return nil + } else { + return errors.New("dbus.Store: type mismatch") + } +} + +func hasStruct(v interface{}) bool { + t := reflect.TypeOf(v) + for { + switch t.Kind() { + case reflect.Struct: + return true + case reflect.Slice, reflect.Ptr, reflect.Map: + t = t.Elem() + default: + return false + } + } +} + +// An ObjectPath is an object path as defined by the D-Bus spec. +type ObjectPath string + +// IsValid returns whether the object path is valid. +func (o ObjectPath) IsValid() bool { + s := string(o) + if len(s) == 0 { + return false + } + if s[0] != '/' { + return false + } + if s[len(s)-1] == '/' && len(s) != 1 { + return false + } + // probably not used, but technically possible + if s == "/" { + return true + } + split := strings.Split(s[1:], "/") + for _, v := range split { + if len(v) == 0 { + return false + } + for _, c := range v { + if !isMemberChar(c) { + return false + } + } + } + return true +} + +// A UnixFD is a Unix file descriptor sent over the wire. See the package-level +// documentation for more information about Unix file descriptor passsing. +type UnixFD int32 + +// A UnixFDIndex is the representation of a Unix file descriptor in a message. +type UnixFDIndex uint32 + +// alignment returns the alignment of values of type t. +func alignment(t reflect.Type) int { + switch t { + case variantType: + return 1 + case objectPathType: + return 4 + case signatureType: + return 1 + case interfacesType: // sometimes used for structs + return 8 + } + switch t.Kind() { + case reflect.Uint8: + return 1 + case reflect.Uint16, reflect.Int16: + return 2 + case reflect.Uint32, reflect.Int32, reflect.String, reflect.Array, reflect.Slice, reflect.Map: + return 4 + case reflect.Uint64, reflect.Int64, reflect.Float64, reflect.Struct: + return 8 + case reflect.Ptr: + return alignment(t.Elem()) + } + return 1 +} + +// isKeyType returns whether t is a valid type for a D-Bus dict. +func isKeyType(t reflect.Type) bool { + switch t.Kind() { + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Int16, reflect.Int32, reflect.Int64, reflect.Float64, + reflect.String: + + return true + } + return false +} + +// isValidInterface returns whether s is a valid name for an interface. +func isValidInterface(s string) bool { + if len(s) == 0 || len(s) > 255 || s[0] == '.' { + return false + } + elem := strings.Split(s, ".") + if len(elem) < 2 { + return false + } + for _, v := range elem { + if len(v) == 0 { + return false + } + if v[0] >= '0' && v[0] <= '9' { + return false + } + for _, c := range v { + if !isMemberChar(c) { + return false + } + } + } + return true +} + +// isValidMember returns whether s is a valid name for a member. +func isValidMember(s string) bool { + if len(s) == 0 || len(s) > 255 { + return false + } + i := strings.Index(s, ".") + if i != -1 { + return false + } + if s[0] >= '0' && s[0] <= '9' { + return false + } + for _, c := range s { + if !isMemberChar(c) { + return false + } + } + return true +} + +func isMemberChar(c rune) bool { + return (c >= '0' && c <= '9') || (c >= 'A' && c <= 'Z') || + (c >= 'a' && c <= 'z') || c == '_' +} diff --git a/vendor/github.com/godbus/dbus/decoder.go b/vendor/github.com/godbus/dbus/decoder.go new file mode 100644 index 000000000..ef50dcab9 --- /dev/null +++ b/vendor/github.com/godbus/dbus/decoder.go @@ -0,0 +1,228 @@ +package dbus + +import ( + "encoding/binary" + "io" + "reflect" +) + +type decoder struct { + in io.Reader + order binary.ByteOrder + pos int +} + +// newDecoder returns a new decoder that reads values from in. The input is +// expected to be in the given byte order. +func newDecoder(in io.Reader, order binary.ByteOrder) *decoder { + dec := new(decoder) + dec.in = in + dec.order = order + return dec +} + +// align aligns the input to the given boundary and panics on error. +func (dec *decoder) align(n int) { + if dec.pos%n != 0 { + newpos := (dec.pos + n - 1) & ^(n - 1) + empty := make([]byte, newpos-dec.pos) + if _, err := io.ReadFull(dec.in, empty); err != nil { + panic(err) + } + dec.pos = newpos + } +} + +// Calls binary.Read(dec.in, dec.order, v) and panics on read errors. +func (dec *decoder) binread(v interface{}) { + if err := binary.Read(dec.in, dec.order, v); err != nil { + panic(err) + } +} + +func (dec *decoder) Decode(sig Signature) (vs []interface{}, err error) { + defer func() { + var ok bool + v := recover() + if err, ok = v.(error); ok { + if err == io.EOF || err == io.ErrUnexpectedEOF { + err = FormatError("unexpected EOF") + } + } + }() + vs = make([]interface{}, 0) + s := sig.str + for s != "" { + err, rem := validSingle(s, 0) + if err != nil { + return nil, err + } + v := dec.decode(s[:len(s)-len(rem)], 0) + vs = append(vs, v) + s = rem + } + return vs, nil +} + +func (dec *decoder) decode(s string, depth int) interface{} { + dec.align(alignment(typeFor(s))) + switch s[0] { + case 'y': + var b [1]byte + if _, err := dec.in.Read(b[:]); err != nil { + panic(err) + } + dec.pos++ + return b[0] + case 'b': + i := dec.decode("u", depth).(uint32) + switch { + case i == 0: + return false + case i == 1: + return true + default: + panic(FormatError("invalid value for boolean")) + } + case 'n': + var i int16 + dec.binread(&i) + dec.pos += 2 + return i + case 'i': + var i int32 + dec.binread(&i) + dec.pos += 4 + return i + case 'x': + var i int64 + dec.binread(&i) + dec.pos += 8 + return i + case 'q': + var i uint16 + dec.binread(&i) + dec.pos += 2 + return i + case 'u': + var i uint32 + dec.binread(&i) + dec.pos += 4 + return i + case 't': + var i uint64 + dec.binread(&i) + dec.pos += 8 + return i + case 'd': + var f float64 + dec.binread(&f) + dec.pos += 8 + return f + case 's': + length := dec.decode("u", depth).(uint32) + b := make([]byte, int(length)+1) + if _, err := io.ReadFull(dec.in, b); err != nil { + panic(err) + } + dec.pos += int(length) + 1 + return string(b[:len(b)-1]) + case 'o': + return ObjectPath(dec.decode("s", depth).(string)) + case 'g': + length := dec.decode("y", depth).(byte) + b := make([]byte, int(length)+1) + if _, err := io.ReadFull(dec.in, b); err != nil { + panic(err) + } + dec.pos += int(length) + 1 + sig, err := ParseSignature(string(b[:len(b)-1])) + if err != nil { + panic(err) + } + return sig + case 'v': + if depth >= 64 { + panic(FormatError("input exceeds container depth limit")) + } + var variant Variant + sig := dec.decode("g", depth).(Signature) + if len(sig.str) == 0 { + panic(FormatError("variant signature is empty")) + } + err, rem := validSingle(sig.str, 0) + if err != nil { + panic(err) + } + if rem != "" { + panic(FormatError("variant signature has multiple types")) + } + variant.sig = sig + variant.value = dec.decode(sig.str, depth+1) + return variant + case 'h': + return UnixFDIndex(dec.decode("u", depth).(uint32)) + case 'a': + if len(s) > 1 && s[1] == '{' { + ksig := s[2:3] + vsig := s[3 : len(s)-1] + v := reflect.MakeMap(reflect.MapOf(typeFor(ksig), typeFor(vsig))) + if depth >= 63 { + panic(FormatError("input exceeds container depth limit")) + } + length := dec.decode("u", depth).(uint32) + // Even for empty maps, the correct padding must be included + dec.align(8) + spos := dec.pos + for dec.pos < spos+int(length) { + dec.align(8) + if !isKeyType(v.Type().Key()) { + panic(InvalidTypeError{v.Type()}) + } + kv := dec.decode(ksig, depth+2) + vv := dec.decode(vsig, depth+2) + v.SetMapIndex(reflect.ValueOf(kv), reflect.ValueOf(vv)) + } + return v.Interface() + } + if depth >= 64 { + panic(FormatError("input exceeds container depth limit")) + } + length := dec.decode("u", depth).(uint32) + v := reflect.MakeSlice(reflect.SliceOf(typeFor(s[1:])), 0, int(length)) + // Even for empty arrays, the correct padding must be included + dec.align(alignment(typeFor(s[1:]))) + spos := dec.pos + for dec.pos < spos+int(length) { + ev := dec.decode(s[1:], depth+1) + v = reflect.Append(v, reflect.ValueOf(ev)) + } + return v.Interface() + case '(': + if depth >= 64 { + panic(FormatError("input exceeds container depth limit")) + } + dec.align(8) + v := make([]interface{}, 0) + s = s[1 : len(s)-1] + for s != "" { + err, rem := validSingle(s, 0) + if err != nil { + panic(err) + } + ev := dec.decode(s[:len(s)-len(rem)], depth+1) + v = append(v, ev) + s = rem + } + return v + default: + panic(SignatureError{Sig: s}) + } +} + +// A FormatError is an error in the wire format. +type FormatError string + +func (e FormatError) Error() string { + return "dbus: wire format error: " + string(e) +} diff --git a/vendor/github.com/godbus/dbus/doc.go b/vendor/github.com/godbus/dbus/doc.go new file mode 100644 index 000000000..deff554a3 --- /dev/null +++ b/vendor/github.com/godbus/dbus/doc.go @@ -0,0 +1,63 @@ +/* +Package dbus implements bindings to the D-Bus message bus system. + +To use the message bus API, you first need to connect to a bus (usually the +session or system bus). The acquired connection then can be used to call methods +on remote objects and emit or receive signals. Using the Export method, you can +arrange D-Bus methods calls to be directly translated to method calls on a Go +value. + +Conversion Rules + +For outgoing messages, Go types are automatically converted to the +corresponding D-Bus types. The following types are directly encoded as their +respective D-Bus equivalents: + + Go type | D-Bus type + ------------+----------- + byte | BYTE + bool | BOOLEAN + int16 | INT16 + uint16 | UINT16 + int32 | INT32 + uint32 | UINT32 + int64 | INT64 + uint64 | UINT64 + float64 | DOUBLE + string | STRING + ObjectPath | OBJECT_PATH + Signature | SIGNATURE + Variant | VARIANT + UnixFDIndex | UNIX_FD + +Slices and arrays encode as ARRAYs of their element type. + +Maps encode as DICTs, provided that their key type can be used as a key for +a DICT. + +Structs other than Variant and Signature encode as a STRUCT containing their +exported fields. Fields whose tags contain `dbus:"-"` and unexported fields will +be skipped. + +Pointers encode as the value they're pointed to. + +Trying to encode any other type or a slice, map or struct containing an +unsupported type will result in an InvalidTypeError. + +For incoming messages, the inverse of these rules are used, with the exception +of STRUCTs. Incoming STRUCTS are represented as a slice of empty interfaces +containing the struct fields in the correct order. The Store function can be +used to convert such values to Go structs. + +Unix FD passing + +Handling Unix file descriptors deserves special mention. To use them, you should +first check that they are supported on a connection by calling SupportsUnixFDs. +If it returns true, all method of Connection will translate messages containing +UnixFD's to messages that are accompanied by the given file descriptors with the +UnixFD values being substituted by the correct indices. Similarily, the indices +of incoming messages are automatically resolved. It shouldn't be necessary to use +UnixFDIndex. + +*/ +package dbus diff --git a/vendor/github.com/godbus/dbus/encoder.go b/vendor/github.com/godbus/dbus/encoder.go new file mode 100644 index 000000000..9f0a9e89e --- /dev/null +++ b/vendor/github.com/godbus/dbus/encoder.go @@ -0,0 +1,208 @@ +package dbus + +import ( + "bytes" + "encoding/binary" + "io" + "reflect" +) + +// An encoder encodes values to the D-Bus wire format. +type encoder struct { + out io.Writer + order binary.ByteOrder + pos int +} + +// NewEncoder returns a new encoder that writes to out in the given byte order. +func newEncoder(out io.Writer, order binary.ByteOrder) *encoder { + return newEncoderAtOffset(out, 0, order) +} + +// newEncoderAtOffset returns a new encoder that writes to out in the given +// byte order. Specify the offset to initialize pos for proper alignment +// computation. +func newEncoderAtOffset(out io.Writer, offset int, order binary.ByteOrder) *encoder { + enc := new(encoder) + enc.out = out + enc.order = order + enc.pos = offset + return enc +} + +// Aligns the next output to be on a multiple of n. Panics on write errors. +func (enc *encoder) align(n int) { + pad := enc.padding(0, n) + if pad > 0 { + empty := make([]byte, pad) + if _, err := enc.out.Write(empty); err != nil { + panic(err) + } + enc.pos += pad + } +} + +// pad returns the number of bytes of padding, based on current position and additional offset. +// and alignment. +func (enc *encoder) padding(offset, algn int) int { + abs := enc.pos + offset + if abs%algn != 0 { + newabs := (abs + algn - 1) & ^(algn - 1) + return newabs - abs + } + return 0 +} + +// Calls binary.Write(enc.out, enc.order, v) and panics on write errors. +func (enc *encoder) binwrite(v interface{}) { + if err := binary.Write(enc.out, enc.order, v); err != nil { + panic(err) + } +} + +// Encode encodes the given values to the underyling reader. All written values +// are aligned properly as required by the D-Bus spec. +func (enc *encoder) Encode(vs ...interface{}) (err error) { + defer func() { + err, _ = recover().(error) + }() + for _, v := range vs { + enc.encode(reflect.ValueOf(v), 0) + } + return nil +} + +// encode encodes the given value to the writer and panics on error. depth holds +// the depth of the container nesting. +func (enc *encoder) encode(v reflect.Value, depth int) { + enc.align(alignment(v.Type())) + switch v.Kind() { + case reflect.Uint8: + var b [1]byte + b[0] = byte(v.Uint()) + if _, err := enc.out.Write(b[:]); err != nil { + panic(err) + } + enc.pos++ + case reflect.Bool: + if v.Bool() { + enc.encode(reflect.ValueOf(uint32(1)), depth) + } else { + enc.encode(reflect.ValueOf(uint32(0)), depth) + } + case reflect.Int16: + enc.binwrite(int16(v.Int())) + enc.pos += 2 + case reflect.Uint16: + enc.binwrite(uint16(v.Uint())) + enc.pos += 2 + case reflect.Int32: + enc.binwrite(int32(v.Int())) + enc.pos += 4 + case reflect.Uint32: + enc.binwrite(uint32(v.Uint())) + enc.pos += 4 + case reflect.Int64: + enc.binwrite(v.Int()) + enc.pos += 8 + case reflect.Uint64: + enc.binwrite(v.Uint()) + enc.pos += 8 + case reflect.Float64: + enc.binwrite(v.Float()) + enc.pos += 8 + case reflect.String: + enc.encode(reflect.ValueOf(uint32(len(v.String()))), depth) + b := make([]byte, v.Len()+1) + copy(b, v.String()) + b[len(b)-1] = 0 + n, err := enc.out.Write(b) + if err != nil { + panic(err) + } + enc.pos += n + case reflect.Ptr: + enc.encode(v.Elem(), depth) + case reflect.Slice, reflect.Array: + if depth >= 64 { + panic(FormatError("input exceeds container depth limit")) + } + // Lookahead offset: 4 bytes for uint32 length (with alignment), + // plus alignment for elements. + n := enc.padding(0, 4) + 4 + offset := enc.pos + n + enc.padding(n, alignment(v.Type().Elem())) + + var buf bytes.Buffer + bufenc := newEncoderAtOffset(&buf, offset, enc.order) + + for i := 0; i < v.Len(); i++ { + bufenc.encode(v.Index(i), depth+1) + } + enc.encode(reflect.ValueOf(uint32(buf.Len())), depth) + length := buf.Len() + enc.align(alignment(v.Type().Elem())) + if _, err := buf.WriteTo(enc.out); err != nil { + panic(err) + } + enc.pos += length + case reflect.Struct: + if depth >= 64 && v.Type() != signatureType { + panic(FormatError("input exceeds container depth limit")) + } + switch t := v.Type(); t { + case signatureType: + str := v.Field(0) + enc.encode(reflect.ValueOf(byte(str.Len())), depth+1) + b := make([]byte, str.Len()+1) + copy(b, str.String()) + b[len(b)-1] = 0 + n, err := enc.out.Write(b) + if err != nil { + panic(err) + } + enc.pos += n + case variantType: + variant := v.Interface().(Variant) + enc.encode(reflect.ValueOf(variant.sig), depth+1) + enc.encode(reflect.ValueOf(variant.value), depth+1) + default: + for i := 0; i < v.Type().NumField(); i++ { + field := t.Field(i) + if field.PkgPath == "" && field.Tag.Get("dbus") != "-" { + enc.encode(v.Field(i), depth+1) + } + } + } + case reflect.Map: + // Maps are arrays of structures, so they actually increase the depth by + // 2. + if depth >= 63 { + panic(FormatError("input exceeds container depth limit")) + } + if !isKeyType(v.Type().Key()) { + panic(InvalidTypeError{v.Type()}) + } + keys := v.MapKeys() + // Lookahead offset: 4 bytes for uint32 length (with alignment), + // plus 8-byte alignment + n := enc.padding(0, 4) + 4 + offset := enc.pos + n + enc.padding(n, 8) + + var buf bytes.Buffer + bufenc := newEncoderAtOffset(&buf, offset, enc.order) + for _, k := range keys { + bufenc.align(8) + bufenc.encode(k, depth+2) + bufenc.encode(v.MapIndex(k), depth+2) + } + enc.encode(reflect.ValueOf(uint32(buf.Len())), depth) + length := buf.Len() + enc.align(8) + if _, err := buf.WriteTo(enc.out); err != nil { + panic(err) + } + enc.pos += length + default: + panic(InvalidTypeError{v.Type()}) + } +} diff --git a/vendor/github.com/godbus/dbus/export.go b/vendor/github.com/godbus/dbus/export.go new file mode 100644 index 000000000..c6440a741 --- /dev/null +++ b/vendor/github.com/godbus/dbus/export.go @@ -0,0 +1,411 @@ +package dbus + +import ( + "errors" + "fmt" + "reflect" + "strings" +) + +var ( + errmsgInvalidArg = Error{ + "org.freedesktop.DBus.Error.InvalidArgs", + []interface{}{"Invalid type / number of args"}, + } + errmsgNoObject = Error{ + "org.freedesktop.DBus.Error.NoSuchObject", + []interface{}{"No such object"}, + } + errmsgUnknownMethod = Error{ + "org.freedesktop.DBus.Error.UnknownMethod", + []interface{}{"Unknown / invalid method"}, + } +) + +// exportWithMapping represents an exported struct along with a method name +// mapping to allow for exporting lower-case methods, etc. +type exportWithMapping struct { + export interface{} + + // Method name mapping; key -> struct method, value -> dbus method. + mapping map[string]string + + // Whether or not this export is for the entire subtree + includeSubtree bool +} + +// Sender is a type which can be used in exported methods to receive the message +// sender. +type Sender string + +func exportedMethod(export exportWithMapping, name string) reflect.Value { + if export.export == nil { + return reflect.Value{} + } + + // If a mapping was included in the export, check the map to see if we + // should be looking for a different method in the export. + if export.mapping != nil { + for key, value := range export.mapping { + if value == name { + name = key + break + } + + // Catch the case where a method is aliased but the client is calling + // the original, e.g. the "Foo" method was exported mapped to + // "foo," and dbus client called the original "Foo." + if key == name { + return reflect.Value{} + } + } + } + + value := reflect.ValueOf(export.export) + m := value.MethodByName(name) + + // Catch the case of attempting to call an unexported method + method, ok := value.Type().MethodByName(name) + + if !m.IsValid() || !ok || method.PkgPath != "" { + return reflect.Value{} + } + t := m.Type() + if t.NumOut() == 0 || + t.Out(t.NumOut()-1) != reflect.TypeOf(&errmsgInvalidArg) { + + return reflect.Value{} + } + return m +} + +// searchHandlers will look through all registered handlers looking for one +// to handle the given path. If a verbatim one isn't found, it will check for +// a subtree registration for the path as well. +func (conn *Conn) searchHandlers(path ObjectPath) (map[string]exportWithMapping, bool) { + conn.handlersLck.RLock() + defer conn.handlersLck.RUnlock() + + handlers, ok := conn.handlers[path] + if ok { + return handlers, ok + } + + // If handlers weren't found for this exact path, look for a matching subtree + // registration + handlers = make(map[string]exportWithMapping) + path = path[:strings.LastIndex(string(path), "/")] + for len(path) > 0 { + var subtreeHandlers map[string]exportWithMapping + subtreeHandlers, ok = conn.handlers[path] + if ok { + for iface, handler := range subtreeHandlers { + // Only include this handler if it registered for the subtree + if handler.includeSubtree { + handlers[iface] = handler + } + } + + break + } + + path = path[:strings.LastIndex(string(path), "/")] + } + + return handlers, ok +} + +// handleCall handles the given method call (i.e. looks if it's one of the +// pre-implemented ones and searches for a corresponding handler if not). +func (conn *Conn) handleCall(msg *Message) { + name := msg.Headers[FieldMember].value.(string) + path := msg.Headers[FieldPath].value.(ObjectPath) + ifaceName, hasIface := msg.Headers[FieldInterface].value.(string) + sender, hasSender := msg.Headers[FieldSender].value.(string) + serial := msg.serial + if ifaceName == "org.freedesktop.DBus.Peer" { + switch name { + case "Ping": + conn.sendReply(sender, serial) + case "GetMachineId": + conn.sendReply(sender, serial, conn.uuid) + default: + conn.sendError(errmsgUnknownMethod, sender, serial) + } + return + } + if len(name) == 0 { + conn.sendError(errmsgUnknownMethod, sender, serial) + } + + // Find the exported handler (if any) for this path + handlers, ok := conn.searchHandlers(path) + if !ok { + conn.sendError(errmsgNoObject, sender, serial) + return + } + + var m reflect.Value + if hasIface { + iface := handlers[ifaceName] + m = exportedMethod(iface, name) + } else { + for _, v := range handlers { + m = exportedMethod(v, name) + if m.IsValid() { + break + } + } + } + + if !m.IsValid() { + conn.sendError(errmsgUnknownMethod, sender, serial) + return + } + + t := m.Type() + vs := msg.Body + pointers := make([]interface{}, t.NumIn()) + decode := make([]interface{}, 0, len(vs)) + for i := 0; i < t.NumIn(); i++ { + tp := t.In(i) + val := reflect.New(tp) + pointers[i] = val.Interface() + if tp == reflect.TypeOf((*Sender)(nil)).Elem() { + val.Elem().SetString(sender) + } else if tp == reflect.TypeOf((*Message)(nil)).Elem() { + val.Elem().Set(reflect.ValueOf(*msg)) + } else { + decode = append(decode, pointers[i]) + } + } + + if len(decode) != len(vs) { + conn.sendError(errmsgInvalidArg, sender, serial) + return + } + + if err := Store(vs, decode...); err != nil { + conn.sendError(errmsgInvalidArg, sender, serial) + return + } + + // Extract parameters + params := make([]reflect.Value, len(pointers)) + for i := 0; i < len(pointers); i++ { + params[i] = reflect.ValueOf(pointers[i]).Elem() + } + + // Call method + ret := m.Call(params) + if em := ret[t.NumOut()-1].Interface().(*Error); em != nil { + conn.sendError(*em, sender, serial) + return + } + + if msg.Flags&FlagNoReplyExpected == 0 { + reply := new(Message) + reply.Type = TypeMethodReply + reply.serial = conn.getSerial() + reply.Headers = make(map[HeaderField]Variant) + if hasSender { + reply.Headers[FieldDestination] = msg.Headers[FieldSender] + } + reply.Headers[FieldReplySerial] = MakeVariant(msg.serial) + reply.Body = make([]interface{}, len(ret)-1) + for i := 0; i < len(ret)-1; i++ { + reply.Body[i] = ret[i].Interface() + } + if len(ret) != 1 { + reply.Headers[FieldSignature] = MakeVariant(SignatureOf(reply.Body...)) + } + conn.outLck.RLock() + if !conn.closed { + conn.out <- reply + } + conn.outLck.RUnlock() + } +} + +// Emit emits the given signal on the message bus. The name parameter must be +// formatted as "interface.member", e.g., "org.freedesktop.DBus.NameLost". +func (conn *Conn) Emit(path ObjectPath, name string, values ...interface{}) error { + if !path.IsValid() { + return errors.New("dbus: invalid object path") + } + i := strings.LastIndex(name, ".") + if i == -1 { + return errors.New("dbus: invalid method name") + } + iface := name[:i] + member := name[i+1:] + if !isValidMember(member) { + return errors.New("dbus: invalid method name") + } + if !isValidInterface(iface) { + return errors.New("dbus: invalid interface name") + } + msg := new(Message) + msg.Type = TypeSignal + msg.serial = conn.getSerial() + msg.Headers = make(map[HeaderField]Variant) + msg.Headers[FieldInterface] = MakeVariant(iface) + msg.Headers[FieldMember] = MakeVariant(member) + msg.Headers[FieldPath] = MakeVariant(path) + msg.Body = values + if len(values) > 0 { + msg.Headers[FieldSignature] = MakeVariant(SignatureOf(values...)) + } + conn.outLck.RLock() + defer conn.outLck.RUnlock() + if conn.closed { + return ErrClosed + } + conn.out <- msg + return nil +} + +// Export registers the given value to be exported as an object on the +// message bus. +// +// If a method call on the given path and interface is received, an exported +// method with the same name is called with v as the receiver if the +// parameters match and the last return value is of type *Error. If this +// *Error is not nil, it is sent back to the caller as an error. +// Otherwise, a method reply is sent with the other return values as its body. +// +// Any parameters with the special type Sender are set to the sender of the +// dbus message when the method is called. Parameters of this type do not +// contribute to the dbus signature of the method (i.e. the method is exposed +// as if the parameters of type Sender were not there). +// +// Similarly, any parameters with the type Message are set to the raw message +// received on the bus. Again, parameters of this type do not contribute to the +// dbus signature of the method. +// +// Every method call is executed in a new goroutine, so the method may be called +// in multiple goroutines at once. +// +// Method calls on the interface org.freedesktop.DBus.Peer will be automatically +// handled for every object. +// +// Passing nil as the first parameter will cause conn to cease handling calls on +// the given combination of path and interface. +// +// Export returns an error if path is not a valid path name. +func (conn *Conn) Export(v interface{}, path ObjectPath, iface string) error { + return conn.ExportWithMap(v, nil, path, iface) +} + +// ExportWithMap works exactly like Export but provides the ability to remap +// method names (e.g. export a lower-case method). +// +// The keys in the map are the real method names (exported on the struct), and +// the values are the method names to be exported on DBus. +func (conn *Conn) ExportWithMap(v interface{}, mapping map[string]string, path ObjectPath, iface string) error { + return conn.exportWithMap(v, mapping, path, iface, false) +} + +// ExportSubtree works exactly like Export but registers the given value for +// an entire subtree rather under the root path provided. +// +// In order to make this useful, one parameter in each of the value's exported +// methods should be a Message, in which case it will contain the raw message +// (allowing one to get access to the path that caused the method to be called). +// +// Note that more specific export paths take precedence over less specific. For +// example, a method call using the ObjectPath /foo/bar/baz will call a method +// exported on /foo/bar before a method exported on /foo. +func (conn *Conn) ExportSubtree(v interface{}, path ObjectPath, iface string) error { + return conn.ExportSubtreeWithMap(v, nil, path, iface) +} + +// ExportSubtreeWithMap works exactly like ExportSubtree but provides the +// ability to remap method names (e.g. export a lower-case method). +// +// The keys in the map are the real method names (exported on the struct), and +// the values are the method names to be exported on DBus. +func (conn *Conn) ExportSubtreeWithMap(v interface{}, mapping map[string]string, path ObjectPath, iface string) error { + return conn.exportWithMap(v, mapping, path, iface, true) +} + +// exportWithMap is the worker function for all exports/registrations. +func (conn *Conn) exportWithMap(v interface{}, mapping map[string]string, path ObjectPath, iface string, includeSubtree bool) error { + if !path.IsValid() { + return fmt.Errorf(`dbus: Invalid path name: "%s"`, path) + } + + conn.handlersLck.Lock() + defer conn.handlersLck.Unlock() + + // Remove a previous export if the interface is nil + if v == nil { + if _, ok := conn.handlers[path]; ok { + delete(conn.handlers[path], iface) + if len(conn.handlers[path]) == 0 { + delete(conn.handlers, path) + } + } + + return nil + } + + // If this is the first handler for this path, make a new map to hold all + // handlers for this path. + if _, ok := conn.handlers[path]; !ok { + conn.handlers[path] = make(map[string]exportWithMapping) + } + + // Finally, save this handler + conn.handlers[path][iface] = exportWithMapping{export: v, mapping: mapping, includeSubtree: includeSubtree} + + return nil +} + +// ReleaseName calls org.freedesktop.DBus.ReleaseName and awaits a response. +func (conn *Conn) ReleaseName(name string) (ReleaseNameReply, error) { + var r uint32 + err := conn.busObj.Call("org.freedesktop.DBus.ReleaseName", 0, name).Store(&r) + if err != nil { + return 0, err + } + return ReleaseNameReply(r), nil +} + +// RequestName calls org.freedesktop.DBus.RequestName and awaits a response. +func (conn *Conn) RequestName(name string, flags RequestNameFlags) (RequestNameReply, error) { + var r uint32 + err := conn.busObj.Call("org.freedesktop.DBus.RequestName", 0, name, flags).Store(&r) + if err != nil { + return 0, err + } + return RequestNameReply(r), nil +} + +// ReleaseNameReply is the reply to a ReleaseName call. +type ReleaseNameReply uint32 + +const ( + ReleaseNameReplyReleased ReleaseNameReply = 1 + iota + ReleaseNameReplyNonExistent + ReleaseNameReplyNotOwner +) + +// RequestNameFlags represents the possible flags for a RequestName call. +type RequestNameFlags uint32 + +const ( + NameFlagAllowReplacement RequestNameFlags = 1 << iota + NameFlagReplaceExisting + NameFlagDoNotQueue +) + +// RequestNameReply is the reply to a RequestName call. +type RequestNameReply uint32 + +const ( + RequestNameReplyPrimaryOwner RequestNameReply = 1 + iota + RequestNameReplyInQueue + RequestNameReplyExists + RequestNameReplyAlreadyOwner +) diff --git a/vendor/github.com/godbus/dbus/homedir.go b/vendor/github.com/godbus/dbus/homedir.go new file mode 100644 index 000000000..0b745f931 --- /dev/null +++ b/vendor/github.com/godbus/dbus/homedir.go @@ -0,0 +1,28 @@ +package dbus + +import ( + "os" + "sync" +) + +var ( + homeDir string + homeDirLock sync.Mutex +) + +func getHomeDir() string { + homeDirLock.Lock() + defer homeDirLock.Unlock() + + if homeDir != "" { + return homeDir + } + + homeDir = os.Getenv("HOME") + if homeDir != "" { + return homeDir + } + + homeDir = lookupHomeDir() + return homeDir +} diff --git a/vendor/github.com/godbus/dbus/homedir_dynamic.go b/vendor/github.com/godbus/dbus/homedir_dynamic.go new file mode 100644 index 000000000..2732081e7 --- /dev/null +++ b/vendor/github.com/godbus/dbus/homedir_dynamic.go @@ -0,0 +1,15 @@ +// +build !static_build + +package dbus + +import ( + "os/user" +) + +func lookupHomeDir() string { + u, err := user.Current() + if err != nil { + return "/" + } + return u.HomeDir +} diff --git a/vendor/github.com/godbus/dbus/homedir_static.go b/vendor/github.com/godbus/dbus/homedir_static.go new file mode 100644 index 000000000..b9d9cb552 --- /dev/null +++ b/vendor/github.com/godbus/dbus/homedir_static.go @@ -0,0 +1,45 @@ +// +build static_build + +package dbus + +import ( + "bufio" + "os" + "strconv" + "strings" +) + +func lookupHomeDir() string { + myUid := os.Getuid() + + f, err := os.Open("/etc/passwd") + if err != nil { + return "/" + } + defer f.Close() + + s := bufio.NewScanner(f) + + for s.Scan() { + if err := s.Err(); err != nil { + break + } + + line := strings.TrimSpace(s.Text()) + if line == "" { + continue + } + + parts := strings.Split(line, ":") + + if len(parts) >= 6 { + uid, err := strconv.Atoi(parts[2]) + if err == nil && uid == myUid { + return parts[5] + } + } + } + + // Default to / if we can't get a better value + return "/" +} diff --git a/vendor/github.com/godbus/dbus/message.go b/vendor/github.com/godbus/dbus/message.go new file mode 100644 index 000000000..075d6e38b --- /dev/null +++ b/vendor/github.com/godbus/dbus/message.go @@ -0,0 +1,346 @@ +package dbus + +import ( + "bytes" + "encoding/binary" + "errors" + "io" + "reflect" + "strconv" +) + +const protoVersion byte = 1 + +// Flags represents the possible flags of a D-Bus message. +type Flags byte + +const ( + // FlagNoReplyExpected signals that the message is not expected to generate + // a reply. If this flag is set on outgoing messages, any possible reply + // will be discarded. + FlagNoReplyExpected Flags = 1 << iota + // FlagNoAutoStart signals that the message bus should not automatically + // start an application when handling this message. + FlagNoAutoStart +) + +// Type represents the possible types of a D-Bus message. +type Type byte + +const ( + TypeMethodCall Type = 1 + iota + TypeMethodReply + TypeError + TypeSignal + typeMax +) + +func (t Type) String() string { + switch t { + case TypeMethodCall: + return "method call" + case TypeMethodReply: + return "reply" + case TypeError: + return "error" + case TypeSignal: + return "signal" + } + return "invalid" +} + +// HeaderField represents the possible byte codes for the headers +// of a D-Bus message. +type HeaderField byte + +const ( + FieldPath HeaderField = 1 + iota + FieldInterface + FieldMember + FieldErrorName + FieldReplySerial + FieldDestination + FieldSender + FieldSignature + FieldUnixFDs + fieldMax +) + +// An InvalidMessageError describes the reason why a D-Bus message is regarded as +// invalid. +type InvalidMessageError string + +func (e InvalidMessageError) Error() string { + return "dbus: invalid message: " + string(e) +} + +// fieldType are the types of the various header fields. +var fieldTypes = [fieldMax]reflect.Type{ + FieldPath: objectPathType, + FieldInterface: stringType, + FieldMember: stringType, + FieldErrorName: stringType, + FieldReplySerial: uint32Type, + FieldDestination: stringType, + FieldSender: stringType, + FieldSignature: signatureType, + FieldUnixFDs: uint32Type, +} + +// requiredFields lists the header fields that are required by the different +// message types. +var requiredFields = [typeMax][]HeaderField{ + TypeMethodCall: {FieldPath, FieldMember}, + TypeMethodReply: {FieldReplySerial}, + TypeError: {FieldErrorName, FieldReplySerial}, + TypeSignal: {FieldPath, FieldInterface, FieldMember}, +} + +// Message represents a single D-Bus message. +type Message struct { + Type + Flags + Headers map[HeaderField]Variant + Body []interface{} + + serial uint32 +} + +type header struct { + Field byte + Variant +} + +// DecodeMessage tries to decode a single message in the D-Bus wire format +// from the given reader. The byte order is figured out from the first byte. +// The possibly returned error can be an error of the underlying reader, an +// InvalidMessageError or a FormatError. +func DecodeMessage(rd io.Reader) (msg *Message, err error) { + var order binary.ByteOrder + var hlength, length uint32 + var typ, flags, proto byte + var headers []header + + b := make([]byte, 1) + _, err = rd.Read(b) + if err != nil { + return + } + switch b[0] { + case 'l': + order = binary.LittleEndian + case 'B': + order = binary.BigEndian + default: + return nil, InvalidMessageError("invalid byte order") + } + + dec := newDecoder(rd, order) + dec.pos = 1 + + msg = new(Message) + vs, err := dec.Decode(Signature{"yyyuu"}) + if err != nil { + return nil, err + } + if err = Store(vs, &typ, &flags, &proto, &length, &msg.serial); err != nil { + return nil, err + } + msg.Type = Type(typ) + msg.Flags = Flags(flags) + + // get the header length separately because we need it later + b = make([]byte, 4) + _, err = io.ReadFull(rd, b) + if err != nil { + return nil, err + } + binary.Read(bytes.NewBuffer(b), order, &hlength) + if hlength+length+16 > 1<<27 { + return nil, InvalidMessageError("message is too long") + } + dec = newDecoder(io.MultiReader(bytes.NewBuffer(b), rd), order) + dec.pos = 12 + vs, err = dec.Decode(Signature{"a(yv)"}) + if err != nil { + return nil, err + } + if err = Store(vs, &headers); err != nil { + return nil, err + } + + msg.Headers = make(map[HeaderField]Variant) + for _, v := range headers { + msg.Headers[HeaderField(v.Field)] = v.Variant + } + + dec.align(8) + body := make([]byte, int(length)) + if length != 0 { + _, err := io.ReadFull(rd, body) + if err != nil { + return nil, err + } + } + + if err = msg.IsValid(); err != nil { + return nil, err + } + sig, _ := msg.Headers[FieldSignature].value.(Signature) + if sig.str != "" { + buf := bytes.NewBuffer(body) + dec = newDecoder(buf, order) + vs, err := dec.Decode(sig) + if err != nil { + return nil, err + } + msg.Body = vs + } + + return +} + +// EncodeTo encodes and sends a message to the given writer. The byte order must +// be either binary.LittleEndian or binary.BigEndian. If the message is not +// valid or an error occurs when writing, an error is returned. +func (msg *Message) EncodeTo(out io.Writer, order binary.ByteOrder) error { + if err := msg.IsValid(); err != nil { + return err + } + var vs [7]interface{} + switch order { + case binary.LittleEndian: + vs[0] = byte('l') + case binary.BigEndian: + vs[0] = byte('B') + default: + return errors.New("dbus: invalid byte order") + } + body := new(bytes.Buffer) + enc := newEncoder(body, order) + if len(msg.Body) != 0 { + enc.Encode(msg.Body...) + } + vs[1] = msg.Type + vs[2] = msg.Flags + vs[3] = protoVersion + vs[4] = uint32(len(body.Bytes())) + vs[5] = msg.serial + headers := make([]header, 0, len(msg.Headers)) + for k, v := range msg.Headers { + headers = append(headers, header{byte(k), v}) + } + vs[6] = headers + var buf bytes.Buffer + enc = newEncoder(&buf, order) + enc.Encode(vs[:]...) + enc.align(8) + body.WriteTo(&buf) + if buf.Len() > 1<<27 { + return InvalidMessageError("message is too long") + } + if _, err := buf.WriteTo(out); err != nil { + return err + } + return nil +} + +// IsValid checks whether msg is a valid message and returns an +// InvalidMessageError if it is not. +func (msg *Message) IsValid() error { + if msg.Flags & ^(FlagNoAutoStart|FlagNoReplyExpected) != 0 { + return InvalidMessageError("invalid flags") + } + if msg.Type == 0 || msg.Type >= typeMax { + return InvalidMessageError("invalid message type") + } + for k, v := range msg.Headers { + if k == 0 || k >= fieldMax { + return InvalidMessageError("invalid header") + } + if reflect.TypeOf(v.value) != fieldTypes[k] { + return InvalidMessageError("invalid type of header field") + } + } + for _, v := range requiredFields[msg.Type] { + if _, ok := msg.Headers[v]; !ok { + return InvalidMessageError("missing required header") + } + } + if path, ok := msg.Headers[FieldPath]; ok { + if !path.value.(ObjectPath).IsValid() { + return InvalidMessageError("invalid path name") + } + } + if iface, ok := msg.Headers[FieldInterface]; ok { + if !isValidInterface(iface.value.(string)) { + return InvalidMessageError("invalid interface name") + } + } + if member, ok := msg.Headers[FieldMember]; ok { + if !isValidMember(member.value.(string)) { + return InvalidMessageError("invalid member name") + } + } + if errname, ok := msg.Headers[FieldErrorName]; ok { + if !isValidInterface(errname.value.(string)) { + return InvalidMessageError("invalid error name") + } + } + if len(msg.Body) != 0 { + if _, ok := msg.Headers[FieldSignature]; !ok { + return InvalidMessageError("missing signature") + } + } + return nil +} + +// Serial returns the message's serial number. The returned value is only valid +// for messages received by eavesdropping. +func (msg *Message) Serial() uint32 { + return msg.serial +} + +// String returns a string representation of a message similar to the format of +// dbus-monitor. +func (msg *Message) String() string { + if err := msg.IsValid(); err != nil { + return "" + } + s := msg.Type.String() + if v, ok := msg.Headers[FieldSender]; ok { + s += " from " + v.value.(string) + } + if v, ok := msg.Headers[FieldDestination]; ok { + s += " to " + v.value.(string) + } + s += " serial " + strconv.FormatUint(uint64(msg.serial), 10) + if v, ok := msg.Headers[FieldReplySerial]; ok { + s += " reply_serial " + strconv.FormatUint(uint64(v.value.(uint32)), 10) + } + if v, ok := msg.Headers[FieldUnixFDs]; ok { + s += " unixfds " + strconv.FormatUint(uint64(v.value.(uint32)), 10) + } + if v, ok := msg.Headers[FieldPath]; ok { + s += " path " + string(v.value.(ObjectPath)) + } + if v, ok := msg.Headers[FieldInterface]; ok { + s += " interface " + v.value.(string) + } + if v, ok := msg.Headers[FieldErrorName]; ok { + s += " error " + v.value.(string) + } + if v, ok := msg.Headers[FieldMember]; ok { + s += " member " + v.value.(string) + } + if len(msg.Body) != 0 { + s += "\n" + } + for i, v := range msg.Body { + s += " " + MakeVariant(v).String() + if i != len(msg.Body)-1 { + s += "\n" + } + } + return s +} diff --git a/vendor/github.com/godbus/dbus/object.go b/vendor/github.com/godbus/dbus/object.go new file mode 100644 index 000000000..7ef45da4c --- /dev/null +++ b/vendor/github.com/godbus/dbus/object.go @@ -0,0 +1,126 @@ +package dbus + +import ( + "errors" + "strings" +) + +// BusObject is the interface of a remote object on which methods can be +// invoked. +type BusObject interface { + Call(method string, flags Flags, args ...interface{}) *Call + Go(method string, flags Flags, ch chan *Call, args ...interface{}) *Call + GetProperty(p string) (Variant, error) + Destination() string + Path() ObjectPath +} + +// Object represents a remote object on which methods can be invoked. +type Object struct { + conn *Conn + dest string + path ObjectPath +} + +// Call calls a method with (*Object).Go and waits for its reply. +func (o *Object) Call(method string, flags Flags, args ...interface{}) *Call { + return <-o.Go(method, flags, make(chan *Call, 1), args...).Done +} + +// Go calls a method with the given arguments asynchronously. It returns a +// Call structure representing this method call. The passed channel will +// return the same value once the call is done. If ch is nil, a new channel +// will be allocated. Otherwise, ch has to be buffered or Go will panic. +// +// If the flags include FlagNoReplyExpected, ch is ignored and a Call structure +// is returned of which only the Err member is valid. +// +// If the method parameter contains a dot ('.'), the part before the last dot +// specifies the interface on which the method is called. +func (o *Object) Go(method string, flags Flags, ch chan *Call, args ...interface{}) *Call { + iface := "" + i := strings.LastIndex(method, ".") + if i != -1 { + iface = method[:i] + } + method = method[i+1:] + msg := new(Message) + msg.Type = TypeMethodCall + msg.serial = o.conn.getSerial() + msg.Flags = flags & (FlagNoAutoStart | FlagNoReplyExpected) + msg.Headers = make(map[HeaderField]Variant) + msg.Headers[FieldPath] = MakeVariant(o.path) + msg.Headers[FieldDestination] = MakeVariant(o.dest) + msg.Headers[FieldMember] = MakeVariant(method) + if iface != "" { + msg.Headers[FieldInterface] = MakeVariant(iface) + } + msg.Body = args + if len(args) > 0 { + msg.Headers[FieldSignature] = MakeVariant(SignatureOf(args...)) + } + if msg.Flags&FlagNoReplyExpected == 0 { + if ch == nil { + ch = make(chan *Call, 10) + } else if cap(ch) == 0 { + panic("dbus: unbuffered channel passed to (*Object).Go") + } + call := &Call{ + Destination: o.dest, + Path: o.path, + Method: method, + Args: args, + Done: ch, + } + o.conn.callsLck.Lock() + o.conn.calls[msg.serial] = call + o.conn.callsLck.Unlock() + o.conn.outLck.RLock() + if o.conn.closed { + call.Err = ErrClosed + call.Done <- call + } else { + o.conn.out <- msg + } + o.conn.outLck.RUnlock() + return call + } + o.conn.outLck.RLock() + defer o.conn.outLck.RUnlock() + if o.conn.closed { + return &Call{Err: ErrClosed} + } + o.conn.out <- msg + return &Call{Err: nil} +} + +// GetProperty calls org.freedesktop.DBus.Properties.GetProperty on the given +// object. The property name must be given in interface.member notation. +func (o *Object) GetProperty(p string) (Variant, error) { + idx := strings.LastIndex(p, ".") + if idx == -1 || idx+1 == len(p) { + return Variant{}, errors.New("dbus: invalid property " + p) + } + + iface := p[:idx] + prop := p[idx+1:] + + result := Variant{} + err := o.Call("org.freedesktop.DBus.Properties.Get", 0, iface, prop).Store(&result) + + if err != nil { + return Variant{}, err + } + + return result, nil +} + +// Destination returns the destination that calls on o are sent to. +func (o *Object) Destination() string { + return o.dest +} + +// Path returns the path that calls on o are sent to. +func (o *Object) Path() ObjectPath { + return o.path +} diff --git a/vendor/github.com/godbus/dbus/sig.go b/vendor/github.com/godbus/dbus/sig.go new file mode 100644 index 000000000..f45b53ce1 --- /dev/null +++ b/vendor/github.com/godbus/dbus/sig.go @@ -0,0 +1,257 @@ +package dbus + +import ( + "fmt" + "reflect" + "strings" +) + +var sigToType = map[byte]reflect.Type{ + 'y': byteType, + 'b': boolType, + 'n': int16Type, + 'q': uint16Type, + 'i': int32Type, + 'u': uint32Type, + 'x': int64Type, + 't': uint64Type, + 'd': float64Type, + 's': stringType, + 'g': signatureType, + 'o': objectPathType, + 'v': variantType, + 'h': unixFDIndexType, +} + +// Signature represents a correct type signature as specified by the D-Bus +// specification. The zero value represents the empty signature, "". +type Signature struct { + str string +} + +// SignatureOf returns the concatenation of all the signatures of the given +// values. It panics if one of them is not representable in D-Bus. +func SignatureOf(vs ...interface{}) Signature { + var s string + for _, v := range vs { + s += getSignature(reflect.TypeOf(v)) + } + return Signature{s} +} + +// SignatureOfType returns the signature of the given type. It panics if the +// type is not representable in D-Bus. +func SignatureOfType(t reflect.Type) Signature { + return Signature{getSignature(t)} +} + +// getSignature returns the signature of the given type and panics on unknown types. +func getSignature(t reflect.Type) string { + // handle simple types first + switch t.Kind() { + case reflect.Uint8: + return "y" + case reflect.Bool: + return "b" + case reflect.Int16: + return "n" + case reflect.Uint16: + return "q" + case reflect.Int32: + if t == unixFDType { + return "h" + } + return "i" + case reflect.Uint32: + if t == unixFDIndexType { + return "h" + } + return "u" + case reflect.Int64: + return "x" + case reflect.Uint64: + return "t" + case reflect.Float64: + return "d" + case reflect.Ptr: + return getSignature(t.Elem()) + case reflect.String: + if t == objectPathType { + return "o" + } + return "s" + case reflect.Struct: + if t == variantType { + return "v" + } else if t == signatureType { + return "g" + } + var s string + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + if field.PkgPath == "" && field.Tag.Get("dbus") != "-" { + s += getSignature(t.Field(i).Type) + } + } + return "(" + s + ")" + case reflect.Array, reflect.Slice: + return "a" + getSignature(t.Elem()) + case reflect.Map: + if !isKeyType(t.Key()) { + panic(InvalidTypeError{t}) + } + return "a{" + getSignature(t.Key()) + getSignature(t.Elem()) + "}" + } + panic(InvalidTypeError{t}) +} + +// ParseSignature returns the signature represented by this string, or a +// SignatureError if the string is not a valid signature. +func ParseSignature(s string) (sig Signature, err error) { + if len(s) == 0 { + return + } + if len(s) > 255 { + return Signature{""}, SignatureError{s, "too long"} + } + sig.str = s + for err == nil && len(s) != 0 { + err, s = validSingle(s, 0) + } + if err != nil { + sig = Signature{""} + } + + return +} + +// ParseSignatureMust behaves like ParseSignature, except that it panics if s +// is not valid. +func ParseSignatureMust(s string) Signature { + sig, err := ParseSignature(s) + if err != nil { + panic(err) + } + return sig +} + +// Empty retruns whether the signature is the empty signature. +func (s Signature) Empty() bool { + return s.str == "" +} + +// Single returns whether the signature represents a single, complete type. +func (s Signature) Single() bool { + err, r := validSingle(s.str, 0) + return err != nil && r == "" +} + +// String returns the signature's string representation. +func (s Signature) String() string { + return s.str +} + +// A SignatureError indicates that a signature passed to a function or received +// on a connection is not a valid signature. +type SignatureError struct { + Sig string + Reason string +} + +func (e SignatureError) Error() string { + return fmt.Sprintf("dbus: invalid signature: %q (%s)", e.Sig, e.Reason) +} + +// Try to read a single type from this string. If it was successfull, err is nil +// and rem is the remaining unparsed part. Otherwise, err is a non-nil +// SignatureError and rem is "". depth is the current recursion depth which may +// not be greater than 64 and should be given as 0 on the first call. +func validSingle(s string, depth int) (err error, rem string) { + if s == "" { + return SignatureError{Sig: s, Reason: "empty signature"}, "" + } + if depth > 64 { + return SignatureError{Sig: s, Reason: "container nesting too deep"}, "" + } + switch s[0] { + case 'y', 'b', 'n', 'q', 'i', 'u', 'x', 't', 'd', 's', 'g', 'o', 'v', 'h': + return nil, s[1:] + case 'a': + if len(s) > 1 && s[1] == '{' { + i := findMatching(s[1:], '{', '}') + if i == -1 { + return SignatureError{Sig: s, Reason: "unmatched '{'"}, "" + } + i++ + rem = s[i+1:] + s = s[2:i] + if err, _ = validSingle(s[:1], depth+1); err != nil { + return err, "" + } + err, nr := validSingle(s[1:], depth+1) + if err != nil { + return err, "" + } + if nr != "" { + return SignatureError{Sig: s, Reason: "too many types in dict"}, "" + } + return nil, rem + } + return validSingle(s[1:], depth+1) + case '(': + i := findMatching(s, '(', ')') + if i == -1 { + return SignatureError{Sig: s, Reason: "unmatched ')'"}, "" + } + rem = s[i+1:] + s = s[1:i] + for err == nil && s != "" { + err, s = validSingle(s, depth+1) + } + if err != nil { + rem = "" + } + return + } + return SignatureError{Sig: s, Reason: "invalid type character"}, "" +} + +func findMatching(s string, left, right rune) int { + n := 0 + for i, v := range s { + if v == left { + n++ + } else if v == right { + n-- + } + if n == 0 { + return i + } + } + return -1 +} + +// typeFor returns the type of the given signature. It ignores any left over +// characters and panics if s doesn't start with a valid type signature. +func typeFor(s string) (t reflect.Type) { + err, _ := validSingle(s, 0) + if err != nil { + panic(err) + } + + if t, ok := sigToType[s[0]]; ok { + return t + } + switch s[0] { + case 'a': + if s[1] == '{' { + i := strings.LastIndex(s, "}") + t = reflect.MapOf(sigToType[s[2]], typeFor(s[3:i])) + } else { + t = reflect.SliceOf(typeFor(s[1:])) + } + case '(': + t = interfacesType + } + return +} diff --git a/vendor/github.com/godbus/dbus/transport_darwin.go b/vendor/github.com/godbus/dbus/transport_darwin.go new file mode 100644 index 000000000..1bba0d6bf --- /dev/null +++ b/vendor/github.com/godbus/dbus/transport_darwin.go @@ -0,0 +1,6 @@ +package dbus + +func (t *unixTransport) SendNullByte() error { + _, err := t.Write([]byte{0}) + return err +} diff --git a/vendor/github.com/godbus/dbus/transport_generic.go b/vendor/github.com/godbus/dbus/transport_generic.go new file mode 100644 index 000000000..46f8f49d6 --- /dev/null +++ b/vendor/github.com/godbus/dbus/transport_generic.go @@ -0,0 +1,35 @@ +package dbus + +import ( + "encoding/binary" + "errors" + "io" +) + +type genericTransport struct { + io.ReadWriteCloser +} + +func (t genericTransport) SendNullByte() error { + _, err := t.Write([]byte{0}) + return err +} + +func (t genericTransport) SupportsUnixFDs() bool { + return false +} + +func (t genericTransport) EnableUnixFDs() {} + +func (t genericTransport) ReadMessage() (*Message, error) { + return DecodeMessage(t) +} + +func (t genericTransport) SendMessage(msg *Message) error { + for _, v := range msg.Body { + if _, ok := v.(UnixFD); ok { + return errors.New("dbus: unix fd passing not enabled") + } + } + return msg.EncodeTo(t, binary.LittleEndian) +} diff --git a/vendor/github.com/godbus/dbus/transport_unix.go b/vendor/github.com/godbus/dbus/transport_unix.go new file mode 100644 index 000000000..3fafeabb1 --- /dev/null +++ b/vendor/github.com/godbus/dbus/transport_unix.go @@ -0,0 +1,196 @@ +//+build !windows + +package dbus + +import ( + "bytes" + "encoding/binary" + "errors" + "io" + "net" + "syscall" +) + +type oobReader struct { + conn *net.UnixConn + oob []byte + buf [4096]byte +} + +func (o *oobReader) Read(b []byte) (n int, err error) { + n, oobn, flags, _, err := o.conn.ReadMsgUnix(b, o.buf[:]) + if err != nil { + return n, err + } + if flags&syscall.MSG_CTRUNC != 0 { + return n, errors.New("dbus: control data truncated (too many fds received)") + } + o.oob = append(o.oob, o.buf[:oobn]...) + return n, nil +} + +type unixTransport struct { + *net.UnixConn + hasUnixFDs bool +} + +func newUnixTransport(keys string) (transport, error) { + var err error + + t := new(unixTransport) + abstract := getKey(keys, "abstract") + path := getKey(keys, "path") + switch { + case abstract == "" && path == "": + return nil, errors.New("dbus: invalid address (neither path nor abstract set)") + case abstract != "" && path == "": + t.UnixConn, err = net.DialUnix("unix", nil, &net.UnixAddr{Name: "@" + abstract, Net: "unix"}) + if err != nil { + return nil, err + } + return t, nil + case abstract == "" && path != "": + t.UnixConn, err = net.DialUnix("unix", nil, &net.UnixAddr{Name: path, Net: "unix"}) + if err != nil { + return nil, err + } + return t, nil + default: + return nil, errors.New("dbus: invalid address (both path and abstract set)") + } +} + +func init() { + transports["unix"] = newUnixTransport +} + +func (t *unixTransport) EnableUnixFDs() { + t.hasUnixFDs = true +} + +func (t *unixTransport) ReadMessage() (*Message, error) { + var ( + blen, hlen uint32 + csheader [16]byte + headers []header + order binary.ByteOrder + unixfds uint32 + ) + // To be sure that all bytes of out-of-band data are read, we use a special + // reader that uses ReadUnix on the underlying connection instead of Read + // and gathers the out-of-band data in a buffer. + rd := &oobReader{conn: t.UnixConn} + // read the first 16 bytes (the part of the header that has a constant size), + // from which we can figure out the length of the rest of the message + if _, err := io.ReadFull(rd, csheader[:]); err != nil { + return nil, err + } + switch csheader[0] { + case 'l': + order = binary.LittleEndian + case 'B': + order = binary.BigEndian + default: + return nil, InvalidMessageError("invalid byte order") + } + // csheader[4:8] -> length of message body, csheader[12:16] -> length of + // header fields (without alignment) + binary.Read(bytes.NewBuffer(csheader[4:8]), order, &blen) + binary.Read(bytes.NewBuffer(csheader[12:]), order, &hlen) + if hlen%8 != 0 { + hlen += 8 - (hlen % 8) + } + + // decode headers and look for unix fds + headerdata := make([]byte, hlen+4) + copy(headerdata, csheader[12:]) + if _, err := io.ReadFull(t, headerdata[4:]); err != nil { + return nil, err + } + dec := newDecoder(bytes.NewBuffer(headerdata), order) + dec.pos = 12 + vs, err := dec.Decode(Signature{"a(yv)"}) + if err != nil { + return nil, err + } + Store(vs, &headers) + for _, v := range headers { + if v.Field == byte(FieldUnixFDs) { + unixfds, _ = v.Variant.value.(uint32) + } + } + all := make([]byte, 16+hlen+blen) + copy(all, csheader[:]) + copy(all[16:], headerdata[4:]) + if _, err := io.ReadFull(rd, all[16+hlen:]); err != nil { + return nil, err + } + if unixfds != 0 { + if !t.hasUnixFDs { + return nil, errors.New("dbus: got unix fds on unsupported transport") + } + // read the fds from the OOB data + scms, err := syscall.ParseSocketControlMessage(rd.oob) + if err != nil { + return nil, err + } + if len(scms) != 1 { + return nil, errors.New("dbus: received more than one socket control message") + } + fds, err := syscall.ParseUnixRights(&scms[0]) + if err != nil { + return nil, err + } + msg, err := DecodeMessage(bytes.NewBuffer(all)) + if err != nil { + return nil, err + } + // substitute the values in the message body (which are indices for the + // array receiver via OOB) with the actual values + for i, v := range msg.Body { + if j, ok := v.(UnixFDIndex); ok { + if uint32(j) >= unixfds { + return nil, InvalidMessageError("invalid index for unix fd") + } + msg.Body[i] = UnixFD(fds[j]) + } + } + return msg, nil + } + return DecodeMessage(bytes.NewBuffer(all)) +} + +func (t *unixTransport) SendMessage(msg *Message) error { + fds := make([]int, 0) + for i, v := range msg.Body { + if fd, ok := v.(UnixFD); ok { + msg.Body[i] = UnixFDIndex(len(fds)) + fds = append(fds, int(fd)) + } + } + if len(fds) != 0 { + if !t.hasUnixFDs { + return errors.New("dbus: unix fd passing not enabled") + } + msg.Headers[FieldUnixFDs] = MakeVariant(uint32(len(fds))) + oob := syscall.UnixRights(fds...) + buf := new(bytes.Buffer) + msg.EncodeTo(buf, binary.LittleEndian) + n, oobn, err := t.UnixConn.WriteMsgUnix(buf.Bytes(), oob, nil) + if err != nil { + return err + } + if n != buf.Len() || oobn != len(oob) { + return io.ErrShortWrite + } + } else { + if err := msg.EncodeTo(t, binary.LittleEndian); err != nil { + return nil + } + } + return nil +} + +func (t *unixTransport) SupportsUnixFDs() bool { + return true +} diff --git a/vendor/github.com/godbus/dbus/transport_unixcred_dragonfly.go b/vendor/github.com/godbus/dbus/transport_unixcred_dragonfly.go new file mode 100644 index 000000000..a8cd39395 --- /dev/null +++ b/vendor/github.com/godbus/dbus/transport_unixcred_dragonfly.go @@ -0,0 +1,95 @@ +// The UnixCredentials system call is currently only implemented on Linux +// http://golang.org/src/pkg/syscall/sockcmsg_linux.go +// https://golang.org/s/go1.4-syscall +// http://code.google.com/p/go/source/browse/unix/sockcmsg_linux.go?repo=sys + +// Local implementation of the UnixCredentials system call for DragonFly BSD + +package dbus + +/* +#include +*/ +import "C" + +import ( + "io" + "os" + "syscall" + "unsafe" +) + +// http://golang.org/src/pkg/syscall/ztypes_linux_amd64.go +// http://golang.org/src/pkg/syscall/ztypes_dragonfly_amd64.go +type Ucred struct { + Pid int32 + Uid uint32 + Gid uint32 +} + +// http://golang.org/src/pkg/syscall/types_linux.go +// http://golang.org/src/pkg/syscall/types_dragonfly.go +// https://github.com/DragonFlyBSD/DragonFlyBSD/blob/master/sys/sys/ucred.h +const ( + SizeofUcred = C.sizeof_struct_ucred +) + +// http://golang.org/src/pkg/syscall/sockcmsg_unix.go +func cmsgAlignOf(salen int) int { + // From http://golang.org/src/pkg/syscall/sockcmsg_unix.go + //salign := sizeofPtr + // NOTE: It seems like 64-bit Darwin and DragonFly BSD kernels + // still require 32-bit aligned access to network subsystem. + //if darwin64Bit || dragonfly64Bit { + // salign = 4 + //} + salign := 4 + return (salen + salign - 1) & ^(salign - 1) +} + +// http://golang.org/src/pkg/syscall/sockcmsg_unix.go +func cmsgData(h *syscall.Cmsghdr) unsafe.Pointer { + return unsafe.Pointer(uintptr(unsafe.Pointer(h)) + uintptr(cmsgAlignOf(syscall.SizeofCmsghdr))) +} + +// http://golang.org/src/pkg/syscall/sockcmsg_linux.go +// UnixCredentials encodes credentials into a socket control message +// for sending to another process. This can be used for +// authentication. +func UnixCredentials(ucred *Ucred) []byte { + b := make([]byte, syscall.CmsgSpace(SizeofUcred)) + h := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0])) + h.Level = syscall.SOL_SOCKET + h.Type = syscall.SCM_CREDS + h.SetLen(syscall.CmsgLen(SizeofUcred)) + *((*Ucred)(cmsgData(h))) = *ucred + return b +} + +// http://golang.org/src/pkg/syscall/sockcmsg_linux.go +// ParseUnixCredentials decodes a socket control message that contains +// credentials in a Ucred structure. To receive such a message, the +// SO_PASSCRED option must be enabled on the socket. +func ParseUnixCredentials(m *syscall.SocketControlMessage) (*Ucred, error) { + if m.Header.Level != syscall.SOL_SOCKET { + return nil, syscall.EINVAL + } + if m.Header.Type != syscall.SCM_CREDS { + return nil, syscall.EINVAL + } + ucred := *(*Ucred)(unsafe.Pointer(&m.Data[0])) + return &ucred, nil +} + +func (t *unixTransport) SendNullByte() error { + ucred := &Ucred{Pid: int32(os.Getpid()), Uid: uint32(os.Getuid()), Gid: uint32(os.Getgid())} + b := UnixCredentials(ucred) + _, oobn, err := t.UnixConn.WriteMsgUnix([]byte{0}, b, nil) + if err != nil { + return err + } + if oobn != len(b) { + return io.ErrShortWrite + } + return nil +} diff --git a/vendor/github.com/godbus/dbus/transport_unixcred_linux.go b/vendor/github.com/godbus/dbus/transport_unixcred_linux.go new file mode 100644 index 000000000..d9dfdf698 --- /dev/null +++ b/vendor/github.com/godbus/dbus/transport_unixcred_linux.go @@ -0,0 +1,25 @@ +// The UnixCredentials system call is currently only implemented on Linux +// http://golang.org/src/pkg/syscall/sockcmsg_linux.go +// https://golang.org/s/go1.4-syscall +// http://code.google.com/p/go/source/browse/unix/sockcmsg_linux.go?repo=sys + +package dbus + +import ( + "io" + "os" + "syscall" +) + +func (t *unixTransport) SendNullByte() error { + ucred := &syscall.Ucred{Pid: int32(os.Getpid()), Uid: uint32(os.Getuid()), Gid: uint32(os.Getgid())} + b := syscall.UnixCredentials(ucred) + _, oobn, err := t.UnixConn.WriteMsgUnix([]byte{0}, b, nil) + if err != nil { + return err + } + if oobn != len(b) { + return io.ErrShortWrite + } + return nil +} diff --git a/vendor/github.com/godbus/dbus/variant.go b/vendor/github.com/godbus/dbus/variant.go new file mode 100644 index 000000000..b7b13ae90 --- /dev/null +++ b/vendor/github.com/godbus/dbus/variant.go @@ -0,0 +1,139 @@ +package dbus + +import ( + "bytes" + "fmt" + "reflect" + "sort" + "strconv" +) + +// Variant represents the D-Bus variant type. +type Variant struct { + sig Signature + value interface{} +} + +// MakeVariant converts the given value to a Variant. It panics if v cannot be +// represented as a D-Bus type. +func MakeVariant(v interface{}) Variant { + return Variant{SignatureOf(v), v} +} + +// ParseVariant parses the given string as a variant as described at +// https://developer.gnome.org/glib/unstable/gvariant-text.html. If sig is not +// empty, it is taken to be the expected signature for the variant. +func ParseVariant(s string, sig Signature) (Variant, error) { + tokens := varLex(s) + p := &varParser{tokens: tokens} + n, err := varMakeNode(p) + if err != nil { + return Variant{}, err + } + if sig.str == "" { + sig, err = varInfer(n) + if err != nil { + return Variant{}, err + } + } + v, err := n.Value(sig) + if err != nil { + return Variant{}, err + } + return MakeVariant(v), nil +} + +// format returns a formatted version of v and whether this string can be parsed +// unambigously. +func (v Variant) format() (string, bool) { + switch v.sig.str[0] { + case 'b', 'i': + return fmt.Sprint(v.value), true + case 'n', 'q', 'u', 'x', 't', 'd', 'h': + return fmt.Sprint(v.value), false + case 's': + return strconv.Quote(v.value.(string)), true + case 'o': + return strconv.Quote(string(v.value.(ObjectPath))), false + case 'g': + return strconv.Quote(v.value.(Signature).str), false + case 'v': + s, unamb := v.value.(Variant).format() + if !unamb { + return "<@" + v.value.(Variant).sig.str + " " + s + ">", true + } + return "<" + s + ">", true + case 'y': + return fmt.Sprintf("%#x", v.value.(byte)), false + } + rv := reflect.ValueOf(v.value) + switch rv.Kind() { + case reflect.Slice: + if rv.Len() == 0 { + return "[]", false + } + unamb := true + buf := bytes.NewBuffer([]byte("[")) + for i := 0; i < rv.Len(); i++ { + // TODO: slooow + s, b := MakeVariant(rv.Index(i).Interface()).format() + unamb = unamb && b + buf.WriteString(s) + if i != rv.Len()-1 { + buf.WriteString(", ") + } + } + buf.WriteByte(']') + return buf.String(), unamb + case reflect.Map: + if rv.Len() == 0 { + return "{}", false + } + unamb := true + var buf bytes.Buffer + kvs := make([]string, rv.Len()) + for i, k := range rv.MapKeys() { + s, b := MakeVariant(k.Interface()).format() + unamb = unamb && b + buf.Reset() + buf.WriteString(s) + buf.WriteString(": ") + s, b = MakeVariant(rv.MapIndex(k).Interface()).format() + unamb = unamb && b + buf.WriteString(s) + kvs[i] = buf.String() + } + buf.Reset() + buf.WriteByte('{') + sort.Strings(kvs) + for i, kv := range kvs { + if i > 0 { + buf.WriteString(", ") + } + buf.WriteString(kv) + } + buf.WriteByte('}') + return buf.String(), unamb + } + return `"INVALID"`, true +} + +// Signature returns the D-Bus signature of the underlying value of v. +func (v Variant) Signature() Signature { + return v.sig +} + +// String returns the string representation of the underlying value of v as +// described at https://developer.gnome.org/glib/unstable/gvariant-text.html. +func (v Variant) String() string { + s, unamb := v.format() + if !unamb { + return "@" + v.sig.str + " " + s + } + return s +} + +// Value returns the underlying value of v. +func (v Variant) Value() interface{} { + return v.value +} diff --git a/vendor/github.com/godbus/dbus/variant_lexer.go b/vendor/github.com/godbus/dbus/variant_lexer.go new file mode 100644 index 000000000..332007d6f --- /dev/null +++ b/vendor/github.com/godbus/dbus/variant_lexer.go @@ -0,0 +1,284 @@ +package dbus + +import ( + "fmt" + "strings" + "unicode" + "unicode/utf8" +) + +// Heavily inspired by the lexer from text/template. + +type varToken struct { + typ varTokenType + val string +} + +type varTokenType byte + +const ( + tokEOF varTokenType = iota + tokError + tokNumber + tokString + tokBool + tokArrayStart + tokArrayEnd + tokDictStart + tokDictEnd + tokVariantStart + tokVariantEnd + tokComma + tokColon + tokType + tokByteString +) + +type varLexer struct { + input string + start int + pos int + width int + tokens []varToken +} + +type lexState func(*varLexer) lexState + +func varLex(s string) []varToken { + l := &varLexer{input: s} + l.run() + return l.tokens +} + +func (l *varLexer) accept(valid string) bool { + if strings.IndexRune(valid, l.next()) >= 0 { + return true + } + l.backup() + return false +} + +func (l *varLexer) backup() { + l.pos -= l.width +} + +func (l *varLexer) emit(t varTokenType) { + l.tokens = append(l.tokens, varToken{t, l.input[l.start:l.pos]}) + l.start = l.pos +} + +func (l *varLexer) errorf(format string, v ...interface{}) lexState { + l.tokens = append(l.tokens, varToken{ + tokError, + fmt.Sprintf(format, v...), + }) + return nil +} + +func (l *varLexer) ignore() { + l.start = l.pos +} + +func (l *varLexer) next() rune { + var r rune + + if l.pos >= len(l.input) { + l.width = 0 + return -1 + } + r, l.width = utf8.DecodeRuneInString(l.input[l.pos:]) + l.pos += l.width + return r +} + +func (l *varLexer) run() { + for state := varLexNormal; state != nil; { + state = state(l) + } +} + +func (l *varLexer) peek() rune { + r := l.next() + l.backup() + return r +} + +func varLexNormal(l *varLexer) lexState { + for { + r := l.next() + switch { + case r == -1: + l.emit(tokEOF) + return nil + case r == '[': + l.emit(tokArrayStart) + case r == ']': + l.emit(tokArrayEnd) + case r == '{': + l.emit(tokDictStart) + case r == '}': + l.emit(tokDictEnd) + case r == '<': + l.emit(tokVariantStart) + case r == '>': + l.emit(tokVariantEnd) + case r == ':': + l.emit(tokColon) + case r == ',': + l.emit(tokComma) + case r == '\'' || r == '"': + l.backup() + return varLexString + case r == '@': + l.backup() + return varLexType + case unicode.IsSpace(r): + l.ignore() + case unicode.IsNumber(r) || r == '+' || r == '-': + l.backup() + return varLexNumber + case r == 'b': + pos := l.start + if n := l.peek(); n == '"' || n == '\'' { + return varLexByteString + } + // not a byte string; try to parse it as a type or bool below + l.pos = pos + 1 + l.width = 1 + fallthrough + default: + // either a bool or a type. Try bools first. + l.backup() + if l.pos+4 <= len(l.input) { + if l.input[l.pos:l.pos+4] == "true" { + l.pos += 4 + l.emit(tokBool) + continue + } + } + if l.pos+5 <= len(l.input) { + if l.input[l.pos:l.pos+5] == "false" { + l.pos += 5 + l.emit(tokBool) + continue + } + } + // must be a type. + return varLexType + } + } +} + +var varTypeMap = map[string]string{ + "boolean": "b", + "byte": "y", + "int16": "n", + "uint16": "q", + "int32": "i", + "uint32": "u", + "int64": "x", + "uint64": "t", + "double": "f", + "string": "s", + "objectpath": "o", + "signature": "g", +} + +func varLexByteString(l *varLexer) lexState { + q := l.next() +Loop: + for { + switch l.next() { + case '\\': + if r := l.next(); r != -1 { + break + } + fallthrough + case -1: + return l.errorf("unterminated bytestring") + case q: + break Loop + } + } + l.emit(tokByteString) + return varLexNormal +} + +func varLexNumber(l *varLexer) lexState { + l.accept("+-") + digits := "0123456789" + if l.accept("0") { + if l.accept("x") { + digits = "0123456789abcdefABCDEF" + } else { + digits = "01234567" + } + } + for strings.IndexRune(digits, l.next()) >= 0 { + } + l.backup() + if l.accept(".") { + for strings.IndexRune(digits, l.next()) >= 0 { + } + l.backup() + } + if l.accept("eE") { + l.accept("+-") + for strings.IndexRune("0123456789", l.next()) >= 0 { + } + l.backup() + } + if r := l.peek(); unicode.IsLetter(r) { + l.next() + return l.errorf("bad number syntax: %q", l.input[l.start:l.pos]) + } + l.emit(tokNumber) + return varLexNormal +} + +func varLexString(l *varLexer) lexState { + q := l.next() +Loop: + for { + switch l.next() { + case '\\': + if r := l.next(); r != -1 { + break + } + fallthrough + case -1: + return l.errorf("unterminated string") + case q: + break Loop + } + } + l.emit(tokString) + return varLexNormal +} + +func varLexType(l *varLexer) lexState { + at := l.accept("@") + for { + r := l.next() + if r == -1 { + break + } + if unicode.IsSpace(r) { + l.backup() + break + } + } + if at { + if _, err := ParseSignature(l.input[l.start+1 : l.pos]); err != nil { + return l.errorf("%s", err) + } + } else { + if _, ok := varTypeMap[l.input[l.start:l.pos]]; ok { + l.emit(tokType) + return varLexNormal + } + return l.errorf("unrecognized type %q", l.input[l.start:l.pos]) + } + l.emit(tokType) + return varLexNormal +} diff --git a/vendor/github.com/godbus/dbus/variant_parser.go b/vendor/github.com/godbus/dbus/variant_parser.go new file mode 100644 index 000000000..d20f5da6d --- /dev/null +++ b/vendor/github.com/godbus/dbus/variant_parser.go @@ -0,0 +1,817 @@ +package dbus + +import ( + "bytes" + "errors" + "fmt" + "io" + "reflect" + "strconv" + "strings" + "unicode/utf8" +) + +type varParser struct { + tokens []varToken + i int +} + +func (p *varParser) backup() { + p.i-- +} + +func (p *varParser) next() varToken { + if p.i < len(p.tokens) { + t := p.tokens[p.i] + p.i++ + return t + } + return varToken{typ: tokEOF} +} + +type varNode interface { + Infer() (Signature, error) + String() string + Sigs() sigSet + Value(Signature) (interface{}, error) +} + +func varMakeNode(p *varParser) (varNode, error) { + var sig Signature + + for { + t := p.next() + switch t.typ { + case tokEOF: + return nil, io.ErrUnexpectedEOF + case tokError: + return nil, errors.New(t.val) + case tokNumber: + return varMakeNumNode(t, sig) + case tokString: + return varMakeStringNode(t, sig) + case tokBool: + if sig.str != "" && sig.str != "b" { + return nil, varTypeError{t.val, sig} + } + b, err := strconv.ParseBool(t.val) + if err != nil { + return nil, err + } + return boolNode(b), nil + case tokArrayStart: + return varMakeArrayNode(p, sig) + case tokVariantStart: + return varMakeVariantNode(p, sig) + case tokDictStart: + return varMakeDictNode(p, sig) + case tokType: + if sig.str != "" { + return nil, errors.New("unexpected type annotation") + } + if t.val[0] == '@' { + sig.str = t.val[1:] + } else { + sig.str = varTypeMap[t.val] + } + case tokByteString: + if sig.str != "" && sig.str != "ay" { + return nil, varTypeError{t.val, sig} + } + b, err := varParseByteString(t.val) + if err != nil { + return nil, err + } + return byteStringNode(b), nil + default: + return nil, fmt.Errorf("unexpected %q", t.val) + } + } +} + +type varTypeError struct { + val string + sig Signature +} + +func (e varTypeError) Error() string { + return fmt.Sprintf("dbus: can't parse %q as type %q", e.val, e.sig.str) +} + +type sigSet map[Signature]bool + +func (s sigSet) Empty() bool { + return len(s) == 0 +} + +func (s sigSet) Intersect(s2 sigSet) sigSet { + r := make(sigSet) + for k := range s { + if s2[k] { + r[k] = true + } + } + return r +} + +func (s sigSet) Single() (Signature, bool) { + if len(s) == 1 { + for k := range s { + return k, true + } + } + return Signature{}, false +} + +func (s sigSet) ToArray() sigSet { + r := make(sigSet, len(s)) + for k := range s { + r[Signature{"a" + k.str}] = true + } + return r +} + +type numNode struct { + sig Signature + str string + val interface{} +} + +var numSigSet = sigSet{ + Signature{"y"}: true, + Signature{"n"}: true, + Signature{"q"}: true, + Signature{"i"}: true, + Signature{"u"}: true, + Signature{"x"}: true, + Signature{"t"}: true, + Signature{"d"}: true, +} + +func (n numNode) Infer() (Signature, error) { + if strings.ContainsAny(n.str, ".e") { + return Signature{"d"}, nil + } + return Signature{"i"}, nil +} + +func (n numNode) String() string { + return n.str +} + +func (n numNode) Sigs() sigSet { + if n.sig.str != "" { + return sigSet{n.sig: true} + } + if strings.ContainsAny(n.str, ".e") { + return sigSet{Signature{"d"}: true} + } + return numSigSet +} + +func (n numNode) Value(sig Signature) (interface{}, error) { + if n.sig.str != "" && n.sig != sig { + return nil, varTypeError{n.str, sig} + } + if n.val != nil { + return n.val, nil + } + return varNumAs(n.str, sig) +} + +func varMakeNumNode(tok varToken, sig Signature) (varNode, error) { + if sig.str == "" { + return numNode{str: tok.val}, nil + } + num, err := varNumAs(tok.val, sig) + if err != nil { + return nil, err + } + return numNode{sig: sig, val: num}, nil +} + +func varNumAs(s string, sig Signature) (interface{}, error) { + isUnsigned := false + size := 32 + switch sig.str { + case "n": + size = 16 + case "i": + case "x": + size = 64 + case "y": + size = 8 + isUnsigned = true + case "q": + size = 16 + isUnsigned = true + case "u": + isUnsigned = true + case "t": + size = 64 + isUnsigned = true + case "d": + d, err := strconv.ParseFloat(s, 64) + if err != nil { + return nil, err + } + return d, nil + default: + return nil, varTypeError{s, sig} + } + base := 10 + if strings.HasPrefix(s, "0x") { + base = 16 + s = s[2:] + } + if strings.HasPrefix(s, "0") && len(s) != 1 { + base = 8 + s = s[1:] + } + if isUnsigned { + i, err := strconv.ParseUint(s, base, size) + if err != nil { + return nil, err + } + var v interface{} = i + switch sig.str { + case "y": + v = byte(i) + case "q": + v = uint16(i) + case "u": + v = uint32(i) + } + return v, nil + } + i, err := strconv.ParseInt(s, base, size) + if err != nil { + return nil, err + } + var v interface{} = i + switch sig.str { + case "n": + v = int16(i) + case "i": + v = int32(i) + } + return v, nil +} + +type stringNode struct { + sig Signature + str string // parsed + val interface{} // has correct type +} + +var stringSigSet = sigSet{ + Signature{"s"}: true, + Signature{"g"}: true, + Signature{"o"}: true, +} + +func (n stringNode) Infer() (Signature, error) { + return Signature{"s"}, nil +} + +func (n stringNode) String() string { + return n.str +} + +func (n stringNode) Sigs() sigSet { + if n.sig.str != "" { + return sigSet{n.sig: true} + } + return stringSigSet +} + +func (n stringNode) Value(sig Signature) (interface{}, error) { + if n.sig.str != "" && n.sig != sig { + return nil, varTypeError{n.str, sig} + } + if n.val != nil { + return n.val, nil + } + switch { + case sig.str == "g": + return Signature{n.str}, nil + case sig.str == "o": + return ObjectPath(n.str), nil + case sig.str == "s": + return n.str, nil + default: + return nil, varTypeError{n.str, sig} + } +} + +func varMakeStringNode(tok varToken, sig Signature) (varNode, error) { + if sig.str != "" && sig.str != "s" && sig.str != "g" && sig.str != "o" { + return nil, fmt.Errorf("invalid type %q for string", sig.str) + } + s, err := varParseString(tok.val) + if err != nil { + return nil, err + } + n := stringNode{str: s} + if sig.str == "" { + return stringNode{str: s}, nil + } + n.sig = sig + switch sig.str { + case "o": + n.val = ObjectPath(s) + case "g": + n.val = Signature{s} + case "s": + n.val = s + } + return n, nil +} + +func varParseString(s string) (string, error) { + // quotes are guaranteed to be there + s = s[1 : len(s)-1] + buf := new(bytes.Buffer) + for len(s) != 0 { + r, size := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && size == 1 { + return "", errors.New("invalid UTF-8") + } + s = s[size:] + if r != '\\' { + buf.WriteRune(r) + continue + } + r, size = utf8.DecodeRuneInString(s) + if r == utf8.RuneError && size == 1 { + return "", errors.New("invalid UTF-8") + } + s = s[size:] + switch r { + case 'a': + buf.WriteRune(0x7) + case 'b': + buf.WriteRune(0x8) + case 'f': + buf.WriteRune(0xc) + case 'n': + buf.WriteRune('\n') + case 'r': + buf.WriteRune('\r') + case 't': + buf.WriteRune('\t') + case '\n': + case 'u': + if len(s) < 4 { + return "", errors.New("short unicode escape") + } + r, err := strconv.ParseUint(s[:4], 16, 32) + if err != nil { + return "", err + } + buf.WriteRune(rune(r)) + s = s[4:] + case 'U': + if len(s) < 8 { + return "", errors.New("short unicode escape") + } + r, err := strconv.ParseUint(s[:8], 16, 32) + if err != nil { + return "", err + } + buf.WriteRune(rune(r)) + s = s[8:] + default: + buf.WriteRune(r) + } + } + return buf.String(), nil +} + +var boolSigSet = sigSet{Signature{"b"}: true} + +type boolNode bool + +func (boolNode) Infer() (Signature, error) { + return Signature{"b"}, nil +} + +func (b boolNode) String() string { + if b { + return "true" + } + return "false" +} + +func (boolNode) Sigs() sigSet { + return boolSigSet +} + +func (b boolNode) Value(sig Signature) (interface{}, error) { + if sig.str != "b" { + return nil, varTypeError{b.String(), sig} + } + return bool(b), nil +} + +type arrayNode struct { + set sigSet + children []varNode + val interface{} +} + +func (n arrayNode) Infer() (Signature, error) { + for _, v := range n.children { + csig, err := varInfer(v) + if err != nil { + continue + } + return Signature{"a" + csig.str}, nil + } + return Signature{}, fmt.Errorf("can't infer type for %q", n.String()) +} + +func (n arrayNode) String() string { + s := "[" + for i, v := range n.children { + s += v.String() + if i != len(n.children)-1 { + s += ", " + } + } + return s + "]" +} + +func (n arrayNode) Sigs() sigSet { + return n.set +} + +func (n arrayNode) Value(sig Signature) (interface{}, error) { + if n.set.Empty() { + // no type information whatsoever, so this must be an empty slice + return reflect.MakeSlice(typeFor(sig.str), 0, 0).Interface(), nil + } + if !n.set[sig] { + return nil, varTypeError{n.String(), sig} + } + s := reflect.MakeSlice(typeFor(sig.str), len(n.children), len(n.children)) + for i, v := range n.children { + rv, err := v.Value(Signature{sig.str[1:]}) + if err != nil { + return nil, err + } + s.Index(i).Set(reflect.ValueOf(rv)) + } + return s.Interface(), nil +} + +func varMakeArrayNode(p *varParser, sig Signature) (varNode, error) { + var n arrayNode + if sig.str != "" { + n.set = sigSet{sig: true} + } + if t := p.next(); t.typ == tokArrayEnd { + return n, nil + } else { + p.backup() + } +Loop: + for { + t := p.next() + switch t.typ { + case tokEOF: + return nil, io.ErrUnexpectedEOF + case tokError: + return nil, errors.New(t.val) + } + p.backup() + cn, err := varMakeNode(p) + if err != nil { + return nil, err + } + if cset := cn.Sigs(); !cset.Empty() { + if n.set.Empty() { + n.set = cset.ToArray() + } else { + nset := cset.ToArray().Intersect(n.set) + if nset.Empty() { + return nil, fmt.Errorf("can't parse %q with given type information", cn.String()) + } + n.set = nset + } + } + n.children = append(n.children, cn) + switch t := p.next(); t.typ { + case tokEOF: + return nil, io.ErrUnexpectedEOF + case tokError: + return nil, errors.New(t.val) + case tokArrayEnd: + break Loop + case tokComma: + continue + default: + return nil, fmt.Errorf("unexpected %q", t.val) + } + } + return n, nil +} + +type variantNode struct { + n varNode +} + +var variantSet = sigSet{ + Signature{"v"}: true, +} + +func (variantNode) Infer() (Signature, error) { + return Signature{"v"}, nil +} + +func (n variantNode) String() string { + return "<" + n.n.String() + ">" +} + +func (variantNode) Sigs() sigSet { + return variantSet +} + +func (n variantNode) Value(sig Signature) (interface{}, error) { + if sig.str != "v" { + return nil, varTypeError{n.String(), sig} + } + sig, err := varInfer(n.n) + if err != nil { + return nil, err + } + v, err := n.n.Value(sig) + if err != nil { + return nil, err + } + return MakeVariant(v), nil +} + +func varMakeVariantNode(p *varParser, sig Signature) (varNode, error) { + n, err := varMakeNode(p) + if err != nil { + return nil, err + } + if t := p.next(); t.typ != tokVariantEnd { + return nil, fmt.Errorf("unexpected %q", t.val) + } + vn := variantNode{n} + if sig.str != "" && sig.str != "v" { + return nil, varTypeError{vn.String(), sig} + } + return variantNode{n}, nil +} + +type dictEntry struct { + key, val varNode +} + +type dictNode struct { + kset, vset sigSet + children []dictEntry + val interface{} +} + +func (n dictNode) Infer() (Signature, error) { + for _, v := range n.children { + ksig, err := varInfer(v.key) + if err != nil { + continue + } + vsig, err := varInfer(v.val) + if err != nil { + continue + } + return Signature{"a{" + ksig.str + vsig.str + "}"}, nil + } + return Signature{}, fmt.Errorf("can't infer type for %q", n.String()) +} + +func (n dictNode) String() string { + s := "{" + for i, v := range n.children { + s += v.key.String() + ": " + v.val.String() + if i != len(n.children)-1 { + s += ", " + } + } + return s + "}" +} + +func (n dictNode) Sigs() sigSet { + r := sigSet{} + for k := range n.kset { + for v := range n.vset { + sig := "a{" + k.str + v.str + "}" + r[Signature{sig}] = true + } + } + return r +} + +func (n dictNode) Value(sig Signature) (interface{}, error) { + set := n.Sigs() + if set.Empty() { + // no type information -> empty dict + return reflect.MakeMap(typeFor(sig.str)).Interface(), nil + } + if !set[sig] { + return nil, varTypeError{n.String(), sig} + } + m := reflect.MakeMap(typeFor(sig.str)) + ksig := Signature{sig.str[2:3]} + vsig := Signature{sig.str[3 : len(sig.str)-1]} + for _, v := range n.children { + kv, err := v.key.Value(ksig) + if err != nil { + return nil, err + } + vv, err := v.val.Value(vsig) + if err != nil { + return nil, err + } + m.SetMapIndex(reflect.ValueOf(kv), reflect.ValueOf(vv)) + } + return m.Interface(), nil +} + +func varMakeDictNode(p *varParser, sig Signature) (varNode, error) { + var n dictNode + + if sig.str != "" { + if len(sig.str) < 5 { + return nil, fmt.Errorf("invalid signature %q for dict type", sig) + } + ksig := Signature{string(sig.str[2])} + vsig := Signature{sig.str[3 : len(sig.str)-1]} + n.kset = sigSet{ksig: true} + n.vset = sigSet{vsig: true} + } + if t := p.next(); t.typ == tokDictEnd { + return n, nil + } else { + p.backup() + } +Loop: + for { + t := p.next() + switch t.typ { + case tokEOF: + return nil, io.ErrUnexpectedEOF + case tokError: + return nil, errors.New(t.val) + } + p.backup() + kn, err := varMakeNode(p) + if err != nil { + return nil, err + } + if kset := kn.Sigs(); !kset.Empty() { + if n.kset.Empty() { + n.kset = kset + } else { + n.kset = kset.Intersect(n.kset) + if n.kset.Empty() { + return nil, fmt.Errorf("can't parse %q with given type information", kn.String()) + } + } + } + t = p.next() + switch t.typ { + case tokEOF: + return nil, io.ErrUnexpectedEOF + case tokError: + return nil, errors.New(t.val) + case tokColon: + default: + return nil, fmt.Errorf("unexpected %q", t.val) + } + t = p.next() + switch t.typ { + case tokEOF: + return nil, io.ErrUnexpectedEOF + case tokError: + return nil, errors.New(t.val) + } + p.backup() + vn, err := varMakeNode(p) + if err != nil { + return nil, err + } + if vset := vn.Sigs(); !vset.Empty() { + if n.vset.Empty() { + n.vset = vset + } else { + n.vset = n.vset.Intersect(vset) + if n.vset.Empty() { + return nil, fmt.Errorf("can't parse %q with given type information", vn.String()) + } + } + } + n.children = append(n.children, dictEntry{kn, vn}) + t = p.next() + switch t.typ { + case tokEOF: + return nil, io.ErrUnexpectedEOF + case tokError: + return nil, errors.New(t.val) + case tokDictEnd: + break Loop + case tokComma: + continue + default: + return nil, fmt.Errorf("unexpected %q", t.val) + } + } + return n, nil +} + +type byteStringNode []byte + +var byteStringSet = sigSet{ + Signature{"ay"}: true, +} + +func (byteStringNode) Infer() (Signature, error) { + return Signature{"ay"}, nil +} + +func (b byteStringNode) String() string { + return string(b) +} + +func (b byteStringNode) Sigs() sigSet { + return byteStringSet +} + +func (b byteStringNode) Value(sig Signature) (interface{}, error) { + if sig.str != "ay" { + return nil, varTypeError{b.String(), sig} + } + return []byte(b), nil +} + +func varParseByteString(s string) ([]byte, error) { + // quotes and b at start are guaranteed to be there + b := make([]byte, 0, 1) + s = s[2 : len(s)-1] + for len(s) != 0 { + c := s[0] + s = s[1:] + if c != '\\' { + b = append(b, c) + continue + } + c = s[0] + s = s[1:] + switch c { + case 'a': + b = append(b, 0x7) + case 'b': + b = append(b, 0x8) + case 'f': + b = append(b, 0xc) + case 'n': + b = append(b, '\n') + case 'r': + b = append(b, '\r') + case 't': + b = append(b, '\t') + case 'x': + if len(s) < 2 { + return nil, errors.New("short escape") + } + n, err := strconv.ParseUint(s[:2], 16, 8) + if err != nil { + return nil, err + } + b = append(b, byte(n)) + s = s[2:] + case '0': + if len(s) < 3 { + return nil, errors.New("short escape") + } + n, err := strconv.ParseUint(s[:3], 8, 8) + if err != nil { + return nil, err + } + b = append(b, byte(n)) + s = s[3:] + default: + b = append(b, c) + } + } + return append(b, 0), nil +} + +func varInfer(n varNode) (Signature, error) { + if sig, ok := n.Sigs().Single(); ok { + return sig, nil + } + return n.Infer() +} diff --git a/vendor/github.com/gogo/protobuf/gogoproto/doc.go b/vendor/github.com/gogo/protobuf/gogoproto/doc.go new file mode 100644 index 000000000..147b5ecc6 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/doc.go @@ -0,0 +1,169 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package gogoproto provides extensions for protocol buffers to achieve: + + - fast marshalling and unmarshalling. + - peace of mind by optionally generating test and benchmark code. + - more canonical Go structures. + - less typing by optionally generating extra helper code. + - goprotobuf compatibility + +More Canonical Go Structures + +A lot of time working with a goprotobuf struct will lead you to a place where you create another struct that is easier to work with and then have a function to copy the values between the two structs. +You might also find that basic structs that started their life as part of an API need to be sent over the wire. With gob, you could just send it. With goprotobuf, you need to make a parallel struct. +Gogoprotobuf tries to fix these problems with the nullable, embed, customtype and customname field extensions. + + - nullable, if false, a field is generated without a pointer (see warning below). + - embed, if true, the field is generated as an embedded field. + - customtype, It works with the Marshal and Unmarshal methods, to allow you to have your own types in your struct, but marshal to bytes. For example, custom.Uuid or custom.Fixed128 + - customname (beta), Changes the generated fieldname. This is especially useful when generated methods conflict with fieldnames. + - casttype (beta), Changes the generated fieldtype. All generated code assumes that this type is castable to the protocol buffer field type. It does not work for structs or enums. + - castkey (beta), Changes the generated fieldtype for a map key. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps. + - castvalue (beta), Changes the generated fieldtype for a map value. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps. + +Warning about nullable: According to the Protocol Buffer specification, you should be able to tell whether a field is set or unset. With the option nullable=false this feature is lost, since your non-nullable fields will always be set. It can be seen as a layer on top of Protocol Buffers, where before and after marshalling all non-nullable fields are set and they cannot be unset. + +Let us look at: + + github.com/gogo/protobuf/test/example/example.proto + +for a quicker overview. + +The following message: + + package test; + + import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + + message A { + optional string Description = 1 [(gogoproto.nullable) = false]; + optional int64 Number = 2 [(gogoproto.nullable) = false]; + optional bytes Id = 3 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uuid", (gogoproto.nullable) = false]; + } + +Will generate a go struct which looks a lot like this: + + type A struct { + Description string + Number int64 + Id github_com_gogo_protobuf_test_custom.Uuid + } + +You will see there are no pointers, since all fields are non-nullable. +You will also see a custom type which marshals to a string. +Be warned it is your responsibility to test your custom types thoroughly. +You should think of every possible empty and nil case for your marshaling, unmarshaling and size methods. + +Next we will embed the message A in message B. + + message B { + optional A A = 1 [(gogoproto.nullable) = false, (gogoproto.embed) = true]; + repeated bytes G = 2 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uint128", (gogoproto.nullable) = false]; + } + +See below that A is embedded in B. + + type B struct { + A + G []github_com_gogo_protobuf_test_custom.Uint128 + } + +Also see the repeated custom type. + + type Uint128 [2]uint64 + +Next we will create a custom name for one of our fields. + + message C { + optional int64 size = 1 [(gogoproto.customname) = "MySize"]; + } + +See below that the field's name is MySize and not Size. + + type C struct { + MySize *int64 + } + +The is useful when having a protocol buffer message with a field name which conflicts with a generated method. +As an example, having a field name size and using the sizer plugin to generate a Size method will cause a go compiler error. +Using customname you can fix this error without changing the field name. +This is typically useful when working with a protocol buffer that was designed before these methods and/or the go language were avialable. + +Gogoprotobuf also has some more subtle changes, these could be changed back: + + - the generated package name for imports do not have the extra /filename.pb, + but are actually the imports specified in the .proto file. + +Gogoprotobuf also has lost some features which should be brought back with time: + + - Marshalling and unmarshalling with reflect and without the unsafe package, + this requires work in pointer_reflect.go + +Why does nullable break protocol buffer specifications: + +The protocol buffer specification states, somewhere, that you should be able to tell whether a +field is set or unset. With the option nullable=false this feature is lost, +since your non-nullable fields will always be set. It can be seen as a layer on top of +protocol buffers, where before and after marshalling all non-nullable fields are set +and they cannot be unset. + +Goprotobuf Compatibility: + +Gogoprotobuf is compatible with Goprotobuf, because it is compatible with protocol buffers. +Gogoprotobuf generates the same code as goprotobuf if no extensions are used. +The enumprefix, getters and stringer extensions can be used to remove some of the unnecessary code generated by goprotobuf: + + - gogoproto_import, if false, the generated code imports github.com/golang/protobuf/proto instead of github.com/gogo/protobuf/proto. + - goproto_enum_prefix, if false, generates the enum constant names without the messagetype prefix + - goproto_enum_stringer (experimental), if false, the enum is generated without the default string method, this is useful for rather using enum_stringer, or allowing you to write your own string method. + - goproto_getters, if false, the message is generated without get methods, this is useful when you would rather want to use face + - goproto_stringer, if false, the message is generated without the default string method, this is useful for rather using stringer, or allowing you to write your own string method. + - goproto_extensions_map (beta), if false, the extensions field is generated as type []byte instead of type map[int32]proto.Extension + - goproto_unrecognized (beta), if false, XXX_unrecognized field is not generated. This is useful in conjunction with gogoproto.nullable=false, to generate structures completely devoid of pointers and reduce GC pressure at the cost of losing information about unrecognized fields. + - goproto_registration (beta), if true, the generated files will register all messages and types against both gogo/protobuf and golang/protobuf. This is necessary when using third-party packages which read registrations from golang/protobuf (such as the grpc-gateway). + +Less Typing and Peace of Mind is explained in their specific plugin folders godoc: + + - github.com/gogo/protobuf/plugin/ + +If you do not use any of these extension the code that is generated +will be the same as if goprotobuf has generated it. + +The most complete way to see examples is to look at + + github.com/gogo/protobuf/test/thetest.proto + +Gogoprototest is a seperate project, +because we want to keep gogoprotobuf independant of goprotobuf, +but we still want to test it thoroughly. + +*/ +package gogoproto diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go new file mode 100644 index 000000000..5765acb15 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go @@ -0,0 +1,804 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: gogo.proto + +/* +Package gogoproto is a generated protocol buffer package. + +It is generated from these files: + gogo.proto + +It has these top-level messages: +*/ +package gogoproto + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +var E_GoprotoEnumPrefix = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62001, + Name: "gogoproto.goproto_enum_prefix", + Tag: "varint,62001,opt,name=goproto_enum_prefix,json=goprotoEnumPrefix", + Filename: "gogo.proto", +} + +var E_GoprotoEnumStringer = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62021, + Name: "gogoproto.goproto_enum_stringer", + Tag: "varint,62021,opt,name=goproto_enum_stringer,json=goprotoEnumStringer", + Filename: "gogo.proto", +} + +var E_EnumStringer = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62022, + Name: "gogoproto.enum_stringer", + Tag: "varint,62022,opt,name=enum_stringer,json=enumStringer", + Filename: "gogo.proto", +} + +var E_EnumCustomname = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.EnumOptions)(nil), + ExtensionType: (*string)(nil), + Field: 62023, + Name: "gogoproto.enum_customname", + Tag: "bytes,62023,opt,name=enum_customname,json=enumCustomname", + Filename: "gogo.proto", +} + +var E_Enumdecl = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62024, + Name: "gogoproto.enumdecl", + Tag: "varint,62024,opt,name=enumdecl", + Filename: "gogo.proto", +} + +var E_EnumvalueCustomname = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.EnumValueOptions)(nil), + ExtensionType: (*string)(nil), + Field: 66001, + Name: "gogoproto.enumvalue_customname", + Tag: "bytes,66001,opt,name=enumvalue_customname,json=enumvalueCustomname", + Filename: "gogo.proto", +} + +var E_GoprotoGettersAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63001, + Name: "gogoproto.goproto_getters_all", + Tag: "varint,63001,opt,name=goproto_getters_all,json=goprotoGettersAll", + Filename: "gogo.proto", +} + +var E_GoprotoEnumPrefixAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63002, + Name: "gogoproto.goproto_enum_prefix_all", + Tag: "varint,63002,opt,name=goproto_enum_prefix_all,json=goprotoEnumPrefixAll", + Filename: "gogo.proto", +} + +var E_GoprotoStringerAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63003, + Name: "gogoproto.goproto_stringer_all", + Tag: "varint,63003,opt,name=goproto_stringer_all,json=goprotoStringerAll", + Filename: "gogo.proto", +} + +var E_VerboseEqualAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63004, + Name: "gogoproto.verbose_equal_all", + Tag: "varint,63004,opt,name=verbose_equal_all,json=verboseEqualAll", + Filename: "gogo.proto", +} + +var E_FaceAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63005, + Name: "gogoproto.face_all", + Tag: "varint,63005,opt,name=face_all,json=faceAll", + Filename: "gogo.proto", +} + +var E_GostringAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63006, + Name: "gogoproto.gostring_all", + Tag: "varint,63006,opt,name=gostring_all,json=gostringAll", + Filename: "gogo.proto", +} + +var E_PopulateAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63007, + Name: "gogoproto.populate_all", + Tag: "varint,63007,opt,name=populate_all,json=populateAll", + Filename: "gogo.proto", +} + +var E_StringerAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63008, + Name: "gogoproto.stringer_all", + Tag: "varint,63008,opt,name=stringer_all,json=stringerAll", + Filename: "gogo.proto", +} + +var E_OnlyoneAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63009, + Name: "gogoproto.onlyone_all", + Tag: "varint,63009,opt,name=onlyone_all,json=onlyoneAll", + Filename: "gogo.proto", +} + +var E_EqualAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63013, + Name: "gogoproto.equal_all", + Tag: "varint,63013,opt,name=equal_all,json=equalAll", + Filename: "gogo.proto", +} + +var E_DescriptionAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63014, + Name: "gogoproto.description_all", + Tag: "varint,63014,opt,name=description_all,json=descriptionAll", + Filename: "gogo.proto", +} + +var E_TestgenAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63015, + Name: "gogoproto.testgen_all", + Tag: "varint,63015,opt,name=testgen_all,json=testgenAll", + Filename: "gogo.proto", +} + +var E_BenchgenAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63016, + Name: "gogoproto.benchgen_all", + Tag: "varint,63016,opt,name=benchgen_all,json=benchgenAll", + Filename: "gogo.proto", +} + +var E_MarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63017, + Name: "gogoproto.marshaler_all", + Tag: "varint,63017,opt,name=marshaler_all,json=marshalerAll", + Filename: "gogo.proto", +} + +var E_UnmarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63018, + Name: "gogoproto.unmarshaler_all", + Tag: "varint,63018,opt,name=unmarshaler_all,json=unmarshalerAll", + Filename: "gogo.proto", +} + +var E_StableMarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63019, + Name: "gogoproto.stable_marshaler_all", + Tag: "varint,63019,opt,name=stable_marshaler_all,json=stableMarshalerAll", + Filename: "gogo.proto", +} + +var E_SizerAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63020, + Name: "gogoproto.sizer_all", + Tag: "varint,63020,opt,name=sizer_all,json=sizerAll", + Filename: "gogo.proto", +} + +var E_GoprotoEnumStringerAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63021, + Name: "gogoproto.goproto_enum_stringer_all", + Tag: "varint,63021,opt,name=goproto_enum_stringer_all,json=goprotoEnumStringerAll", + Filename: "gogo.proto", +} + +var E_EnumStringerAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63022, + Name: "gogoproto.enum_stringer_all", + Tag: "varint,63022,opt,name=enum_stringer_all,json=enumStringerAll", + Filename: "gogo.proto", +} + +var E_UnsafeMarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63023, + Name: "gogoproto.unsafe_marshaler_all", + Tag: "varint,63023,opt,name=unsafe_marshaler_all,json=unsafeMarshalerAll", + Filename: "gogo.proto", +} + +var E_UnsafeUnmarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63024, + Name: "gogoproto.unsafe_unmarshaler_all", + Tag: "varint,63024,opt,name=unsafe_unmarshaler_all,json=unsafeUnmarshalerAll", + Filename: "gogo.proto", +} + +var E_GoprotoExtensionsMapAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63025, + Name: "gogoproto.goproto_extensions_map_all", + Tag: "varint,63025,opt,name=goproto_extensions_map_all,json=goprotoExtensionsMapAll", + Filename: "gogo.proto", +} + +var E_GoprotoUnrecognizedAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63026, + Name: "gogoproto.goproto_unrecognized_all", + Tag: "varint,63026,opt,name=goproto_unrecognized_all,json=goprotoUnrecognizedAll", + Filename: "gogo.proto", +} + +var E_GogoprotoImport = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63027, + Name: "gogoproto.gogoproto_import", + Tag: "varint,63027,opt,name=gogoproto_import,json=gogoprotoImport", + Filename: "gogo.proto", +} + +var E_ProtosizerAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63028, + Name: "gogoproto.protosizer_all", + Tag: "varint,63028,opt,name=protosizer_all,json=protosizerAll", + Filename: "gogo.proto", +} + +var E_CompareAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63029, + Name: "gogoproto.compare_all", + Tag: "varint,63029,opt,name=compare_all,json=compareAll", + Filename: "gogo.proto", +} + +var E_TypedeclAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63030, + Name: "gogoproto.typedecl_all", + Tag: "varint,63030,opt,name=typedecl_all,json=typedeclAll", + Filename: "gogo.proto", +} + +var E_EnumdeclAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63031, + Name: "gogoproto.enumdecl_all", + Tag: "varint,63031,opt,name=enumdecl_all,json=enumdeclAll", + Filename: "gogo.proto", +} + +var E_GoprotoRegistration = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63032, + Name: "gogoproto.goproto_registration", + Tag: "varint,63032,opt,name=goproto_registration,json=goprotoRegistration", + Filename: "gogo.proto", +} + +var E_GoprotoGetters = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64001, + Name: "gogoproto.goproto_getters", + Tag: "varint,64001,opt,name=goproto_getters,json=goprotoGetters", + Filename: "gogo.proto", +} + +var E_GoprotoStringer = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64003, + Name: "gogoproto.goproto_stringer", + Tag: "varint,64003,opt,name=goproto_stringer,json=goprotoStringer", + Filename: "gogo.proto", +} + +var E_VerboseEqual = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64004, + Name: "gogoproto.verbose_equal", + Tag: "varint,64004,opt,name=verbose_equal,json=verboseEqual", + Filename: "gogo.proto", +} + +var E_Face = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64005, + Name: "gogoproto.face", + Tag: "varint,64005,opt,name=face", + Filename: "gogo.proto", +} + +var E_Gostring = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64006, + Name: "gogoproto.gostring", + Tag: "varint,64006,opt,name=gostring", + Filename: "gogo.proto", +} + +var E_Populate = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64007, + Name: "gogoproto.populate", + Tag: "varint,64007,opt,name=populate", + Filename: "gogo.proto", +} + +var E_Stringer = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 67008, + Name: "gogoproto.stringer", + Tag: "varint,67008,opt,name=stringer", + Filename: "gogo.proto", +} + +var E_Onlyone = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64009, + Name: "gogoproto.onlyone", + Tag: "varint,64009,opt,name=onlyone", + Filename: "gogo.proto", +} + +var E_Equal = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64013, + Name: "gogoproto.equal", + Tag: "varint,64013,opt,name=equal", + Filename: "gogo.proto", +} + +var E_Description = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64014, + Name: "gogoproto.description", + Tag: "varint,64014,opt,name=description", + Filename: "gogo.proto", +} + +var E_Testgen = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64015, + Name: "gogoproto.testgen", + Tag: "varint,64015,opt,name=testgen", + Filename: "gogo.proto", +} + +var E_Benchgen = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64016, + Name: "gogoproto.benchgen", + Tag: "varint,64016,opt,name=benchgen", + Filename: "gogo.proto", +} + +var E_Marshaler = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64017, + Name: "gogoproto.marshaler", + Tag: "varint,64017,opt,name=marshaler", + Filename: "gogo.proto", +} + +var E_Unmarshaler = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64018, + Name: "gogoproto.unmarshaler", + Tag: "varint,64018,opt,name=unmarshaler", + Filename: "gogo.proto", +} + +var E_StableMarshaler = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64019, + Name: "gogoproto.stable_marshaler", + Tag: "varint,64019,opt,name=stable_marshaler,json=stableMarshaler", + Filename: "gogo.proto", +} + +var E_Sizer = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64020, + Name: "gogoproto.sizer", + Tag: "varint,64020,opt,name=sizer", + Filename: "gogo.proto", +} + +var E_UnsafeMarshaler = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64023, + Name: "gogoproto.unsafe_marshaler", + Tag: "varint,64023,opt,name=unsafe_marshaler,json=unsafeMarshaler", + Filename: "gogo.proto", +} + +var E_UnsafeUnmarshaler = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64024, + Name: "gogoproto.unsafe_unmarshaler", + Tag: "varint,64024,opt,name=unsafe_unmarshaler,json=unsafeUnmarshaler", + Filename: "gogo.proto", +} + +var E_GoprotoExtensionsMap = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64025, + Name: "gogoproto.goproto_extensions_map", + Tag: "varint,64025,opt,name=goproto_extensions_map,json=goprotoExtensionsMap", + Filename: "gogo.proto", +} + +var E_GoprotoUnrecognized = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64026, + Name: "gogoproto.goproto_unrecognized", + Tag: "varint,64026,opt,name=goproto_unrecognized,json=goprotoUnrecognized", + Filename: "gogo.proto", +} + +var E_Protosizer = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64028, + Name: "gogoproto.protosizer", + Tag: "varint,64028,opt,name=protosizer", + Filename: "gogo.proto", +} + +var E_Compare = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64029, + Name: "gogoproto.compare", + Tag: "varint,64029,opt,name=compare", + Filename: "gogo.proto", +} + +var E_Typedecl = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64030, + Name: "gogoproto.typedecl", + Tag: "varint,64030,opt,name=typedecl", + Filename: "gogo.proto", +} + +var E_Nullable = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65001, + Name: "gogoproto.nullable", + Tag: "varint,65001,opt,name=nullable", + Filename: "gogo.proto", +} + +var E_Embed = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65002, + Name: "gogoproto.embed", + Tag: "varint,65002,opt,name=embed", + Filename: "gogo.proto", +} + +var E_Customtype = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65003, + Name: "gogoproto.customtype", + Tag: "bytes,65003,opt,name=customtype", + Filename: "gogo.proto", +} + +var E_Customname = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65004, + Name: "gogoproto.customname", + Tag: "bytes,65004,opt,name=customname", + Filename: "gogo.proto", +} + +var E_Jsontag = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65005, + Name: "gogoproto.jsontag", + Tag: "bytes,65005,opt,name=jsontag", + Filename: "gogo.proto", +} + +var E_Moretags = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65006, + Name: "gogoproto.moretags", + Tag: "bytes,65006,opt,name=moretags", + Filename: "gogo.proto", +} + +var E_Casttype = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65007, + Name: "gogoproto.casttype", + Tag: "bytes,65007,opt,name=casttype", + Filename: "gogo.proto", +} + +var E_Castkey = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65008, + Name: "gogoproto.castkey", + Tag: "bytes,65008,opt,name=castkey", + Filename: "gogo.proto", +} + +var E_Castvalue = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65009, + Name: "gogoproto.castvalue", + Tag: "bytes,65009,opt,name=castvalue", + Filename: "gogo.proto", +} + +var E_Stdtime = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65010, + Name: "gogoproto.stdtime", + Tag: "varint,65010,opt,name=stdtime", + Filename: "gogo.proto", +} + +var E_Stdduration = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65011, + Name: "gogoproto.stdduration", + Tag: "varint,65011,opt,name=stdduration", + Filename: "gogo.proto", +} + +func init() { + proto.RegisterExtension(E_GoprotoEnumPrefix) + proto.RegisterExtension(E_GoprotoEnumStringer) + proto.RegisterExtension(E_EnumStringer) + proto.RegisterExtension(E_EnumCustomname) + proto.RegisterExtension(E_Enumdecl) + proto.RegisterExtension(E_EnumvalueCustomname) + proto.RegisterExtension(E_GoprotoGettersAll) + proto.RegisterExtension(E_GoprotoEnumPrefixAll) + proto.RegisterExtension(E_GoprotoStringerAll) + proto.RegisterExtension(E_VerboseEqualAll) + proto.RegisterExtension(E_FaceAll) + proto.RegisterExtension(E_GostringAll) + proto.RegisterExtension(E_PopulateAll) + proto.RegisterExtension(E_StringerAll) + proto.RegisterExtension(E_OnlyoneAll) + proto.RegisterExtension(E_EqualAll) + proto.RegisterExtension(E_DescriptionAll) + proto.RegisterExtension(E_TestgenAll) + proto.RegisterExtension(E_BenchgenAll) + proto.RegisterExtension(E_MarshalerAll) + proto.RegisterExtension(E_UnmarshalerAll) + proto.RegisterExtension(E_StableMarshalerAll) + proto.RegisterExtension(E_SizerAll) + proto.RegisterExtension(E_GoprotoEnumStringerAll) + proto.RegisterExtension(E_EnumStringerAll) + proto.RegisterExtension(E_UnsafeMarshalerAll) + proto.RegisterExtension(E_UnsafeUnmarshalerAll) + proto.RegisterExtension(E_GoprotoExtensionsMapAll) + proto.RegisterExtension(E_GoprotoUnrecognizedAll) + proto.RegisterExtension(E_GogoprotoImport) + proto.RegisterExtension(E_ProtosizerAll) + proto.RegisterExtension(E_CompareAll) + proto.RegisterExtension(E_TypedeclAll) + proto.RegisterExtension(E_EnumdeclAll) + proto.RegisterExtension(E_GoprotoRegistration) + proto.RegisterExtension(E_GoprotoGetters) + proto.RegisterExtension(E_GoprotoStringer) + proto.RegisterExtension(E_VerboseEqual) + proto.RegisterExtension(E_Face) + proto.RegisterExtension(E_Gostring) + proto.RegisterExtension(E_Populate) + proto.RegisterExtension(E_Stringer) + proto.RegisterExtension(E_Onlyone) + proto.RegisterExtension(E_Equal) + proto.RegisterExtension(E_Description) + proto.RegisterExtension(E_Testgen) + proto.RegisterExtension(E_Benchgen) + proto.RegisterExtension(E_Marshaler) + proto.RegisterExtension(E_Unmarshaler) + proto.RegisterExtension(E_StableMarshaler) + proto.RegisterExtension(E_Sizer) + proto.RegisterExtension(E_UnsafeMarshaler) + proto.RegisterExtension(E_UnsafeUnmarshaler) + proto.RegisterExtension(E_GoprotoExtensionsMap) + proto.RegisterExtension(E_GoprotoUnrecognized) + proto.RegisterExtension(E_Protosizer) + proto.RegisterExtension(E_Compare) + proto.RegisterExtension(E_Typedecl) + proto.RegisterExtension(E_Nullable) + proto.RegisterExtension(E_Embed) + proto.RegisterExtension(E_Customtype) + proto.RegisterExtension(E_Customname) + proto.RegisterExtension(E_Jsontag) + proto.RegisterExtension(E_Moretags) + proto.RegisterExtension(E_Casttype) + proto.RegisterExtension(E_Castkey) + proto.RegisterExtension(E_Castvalue) + proto.RegisterExtension(E_Stdtime) + proto.RegisterExtension(E_Stdduration) +} + +func init() { proto.RegisterFile("gogo.proto", fileDescriptorGogo) } + +var fileDescriptorGogo = []byte{ + // 1220 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x98, 0x4b, 0x6f, 0x1c, 0x45, + 0x10, 0x80, 0x85, 0x48, 0x14, 0x6f, 0xd9, 0x8e, 0xf1, 0xda, 0x98, 0x10, 0x81, 0x08, 0x9c, 0x38, + 0xd9, 0xa7, 0x08, 0xa5, 0xad, 0xc8, 0x72, 0x2c, 0xc7, 0x4a, 0x84, 0xc1, 0x98, 0x38, 0xbc, 0x0e, + 0xab, 0xd9, 0xdd, 0xf6, 0x78, 0x60, 0x66, 0x7a, 0x98, 0xe9, 0x89, 0xe2, 0xdc, 0x50, 0x78, 0x08, + 0x21, 0xde, 0x48, 0x90, 0x90, 0x04, 0x38, 0xf0, 0x7e, 0x86, 0xf7, 0x91, 0x0b, 0x8f, 0x2b, 0xff, + 0x81, 0x0b, 0x60, 0xde, 0xbe, 0xf9, 0x82, 0x6a, 0xb6, 0x6a, 0xb6, 0x67, 0xbd, 0x52, 0xf7, 0xde, + 0xc6, 0xeb, 0xfe, 0xbe, 0xad, 0xa9, 0x9a, 0xae, 0xea, 0x59, 0x00, 0x5f, 0xf9, 0x6a, 0x3a, 0x49, + 0x95, 0x56, 0xf5, 0x1a, 0x5e, 0x17, 0x97, 0x07, 0x0f, 0xf9, 0x4a, 0xf9, 0xa1, 0x9c, 0x29, 0xfe, + 0x6a, 0xe6, 0xeb, 0x33, 0x6d, 0x99, 0xb5, 0xd2, 0x20, 0xd1, 0x2a, 0xed, 0x2c, 0x16, 0x77, 0xc1, + 0x04, 0x2d, 0x6e, 0xc8, 0x38, 0x8f, 0x1a, 0x49, 0x2a, 0xd7, 0x83, 0xb3, 0xf5, 0x9b, 0xa6, 0x3b, + 0xe4, 0x34, 0x93, 0xd3, 0x8b, 0x71, 0x1e, 0xdd, 0x9d, 0xe8, 0x40, 0xc5, 0xd9, 0x81, 0xab, 0xbf, + 0x5c, 0x7b, 0xe8, 0x9a, 0xdb, 0x87, 0x56, 0xc7, 0x09, 0xc5, 0xff, 0xad, 0x14, 0xa0, 0x58, 0x85, + 0xeb, 0x2b, 0xbe, 0x4c, 0xa7, 0x41, 0xec, 0xcb, 0xd4, 0x62, 0xfc, 0x9e, 0x8c, 0x13, 0x86, 0xf1, + 0x5e, 0x42, 0xc5, 0x02, 0x8c, 0x0e, 0xe2, 0xfa, 0x81, 0x5c, 0x23, 0xd2, 0x94, 0x2c, 0xc1, 0x58, + 0x21, 0x69, 0xe5, 0x99, 0x56, 0x51, 0xec, 0x45, 0xd2, 0xa2, 0xf9, 0xb1, 0xd0, 0xd4, 0x56, 0xf7, + 0x23, 0xb6, 0x50, 0x52, 0x42, 0xc0, 0x10, 0x7e, 0xd2, 0x96, 0xad, 0xd0, 0x62, 0xf8, 0x89, 0x02, + 0x29, 0xd7, 0x8b, 0xd3, 0x30, 0x89, 0xd7, 0x67, 0xbc, 0x30, 0x97, 0x66, 0x24, 0xb7, 0xf6, 0xf5, + 0x9c, 0xc6, 0x65, 0x2c, 0xfb, 0xf9, 0xfc, 0x9e, 0x22, 0x9c, 0x89, 0x52, 0x60, 0xc4, 0x64, 0x54, + 0xd1, 0x97, 0x5a, 0xcb, 0x34, 0x6b, 0x78, 0x61, 0xbf, 0xf0, 0x8e, 0x07, 0x61, 0x69, 0xbc, 0xb0, + 0x55, 0xad, 0xe2, 0x52, 0x87, 0x9c, 0x0f, 0x43, 0xb1, 0x06, 0x37, 0xf4, 0x79, 0x2a, 0x1c, 0x9c, + 0x17, 0xc9, 0x39, 0xb9, 0xeb, 0xc9, 0x40, 0xed, 0x0a, 0xf0, 0xe7, 0x65, 0x2d, 0x1d, 0x9c, 0xaf, + 0x93, 0xb3, 0x4e, 0x2c, 0x97, 0x14, 0x8d, 0x27, 0x61, 0xfc, 0x8c, 0x4c, 0x9b, 0x2a, 0x93, 0x0d, + 0xf9, 0x68, 0xee, 0x85, 0x0e, 0xba, 0x4b, 0xa4, 0x1b, 0x23, 0x70, 0x11, 0x39, 0x74, 0x1d, 0x81, + 0xa1, 0x75, 0xaf, 0x25, 0x1d, 0x14, 0x97, 0x49, 0xb1, 0x0f, 0xd7, 0x23, 0x3a, 0x0f, 0x23, 0xbe, + 0xea, 0xdc, 0x92, 0x03, 0x7e, 0x85, 0xf0, 0x61, 0x66, 0x48, 0x91, 0xa8, 0x24, 0x0f, 0x3d, 0xed, + 0x12, 0xc1, 0x1b, 0xac, 0x60, 0x86, 0x14, 0x03, 0xa4, 0xf5, 0x4d, 0x56, 0x64, 0x46, 0x3e, 0xe7, + 0x60, 0x58, 0xc5, 0xe1, 0xa6, 0x8a, 0x5d, 0x82, 0x78, 0x8b, 0x0c, 0x40, 0x08, 0x0a, 0x66, 0xa1, + 0xe6, 0x5a, 0x88, 0xb7, 0xb7, 0x78, 0x7b, 0x70, 0x05, 0x96, 0x60, 0x8c, 0x1b, 0x54, 0xa0, 0x62, + 0x07, 0xc5, 0x3b, 0xa4, 0xd8, 0x6f, 0x60, 0x74, 0x1b, 0x5a, 0x66, 0xda, 0x97, 0x2e, 0x92, 0x77, + 0xf9, 0x36, 0x08, 0xa1, 0x54, 0x36, 0x65, 0xdc, 0xda, 0x70, 0x33, 0xbc, 0xc7, 0xa9, 0x64, 0x06, + 0x15, 0x0b, 0x30, 0x1a, 0x79, 0x69, 0xb6, 0xe1, 0x85, 0x4e, 0xe5, 0x78, 0x9f, 0x1c, 0x23, 0x25, + 0x44, 0x19, 0xc9, 0xe3, 0x41, 0x34, 0x1f, 0x70, 0x46, 0x0c, 0x8c, 0xb6, 0x5e, 0xa6, 0xbd, 0x66, + 0x28, 0x1b, 0x83, 0xd8, 0x3e, 0xe4, 0xad, 0xd7, 0x61, 0x97, 0x4d, 0xe3, 0x2c, 0xd4, 0xb2, 0xe0, + 0x9c, 0x93, 0xe6, 0x23, 0xae, 0x74, 0x01, 0x20, 0xfc, 0x00, 0xdc, 0xd8, 0x77, 0x4c, 0x38, 0xc8, + 0x3e, 0x26, 0xd9, 0x54, 0x9f, 0x51, 0x41, 0x2d, 0x61, 0x50, 0xe5, 0x27, 0xdc, 0x12, 0x64, 0x8f, + 0x6b, 0x05, 0x26, 0xf3, 0x38, 0xf3, 0xd6, 0x07, 0xcb, 0xda, 0xa7, 0x9c, 0xb5, 0x0e, 0x5b, 0xc9, + 0xda, 0x29, 0x98, 0x22, 0xe3, 0x60, 0x75, 0xfd, 0x8c, 0x1b, 0x6b, 0x87, 0x5e, 0xab, 0x56, 0xf7, + 0x21, 0x38, 0x58, 0xa6, 0xf3, 0xac, 0x96, 0x71, 0x86, 0x4c, 0x23, 0xf2, 0x12, 0x07, 0xf3, 0x55, + 0x32, 0x73, 0xc7, 0x5f, 0x2c, 0x05, 0xcb, 0x5e, 0x82, 0xf2, 0xfb, 0xe1, 0x00, 0xcb, 0xf3, 0x38, + 0x95, 0x2d, 0xe5, 0xc7, 0xc1, 0x39, 0xd9, 0x76, 0x50, 0x7f, 0xde, 0x53, 0xaa, 0x35, 0x03, 0x47, + 0xf3, 0x09, 0xb8, 0xae, 0x3c, 0xab, 0x34, 0x82, 0x28, 0x51, 0xa9, 0xb6, 0x18, 0xbf, 0xe0, 0x4a, + 0x95, 0xdc, 0x89, 0x02, 0x13, 0x8b, 0xb0, 0xbf, 0xf8, 0xd3, 0xf5, 0x91, 0xfc, 0x92, 0x44, 0xa3, + 0x5d, 0x8a, 0x1a, 0x47, 0x4b, 0x45, 0x89, 0x97, 0xba, 0xf4, 0xbf, 0xaf, 0xb8, 0x71, 0x10, 0x42, + 0x8d, 0x43, 0x6f, 0x26, 0x12, 0xa7, 0xbd, 0x83, 0xe1, 0x6b, 0x6e, 0x1c, 0xcc, 0x90, 0x82, 0x0f, + 0x0c, 0x0e, 0x8a, 0x6f, 0x58, 0xc1, 0x0c, 0x2a, 0xee, 0xe9, 0x0e, 0xda, 0x54, 0xfa, 0x41, 0xa6, + 0x53, 0x0f, 0x57, 0x5b, 0x54, 0xdf, 0x6e, 0x55, 0x0f, 0x61, 0xab, 0x06, 0x2a, 0x4e, 0xc2, 0x58, + 0xcf, 0x11, 0xa3, 0x7e, 0xcb, 0x2e, 0xdb, 0xb2, 0xcc, 0x32, 0xcf, 0x2f, 0x85, 0x8f, 0x6d, 0x53, + 0x33, 0xaa, 0x9e, 0x30, 0xc4, 0x9d, 0x58, 0xf7, 0xea, 0x39, 0xc0, 0x2e, 0x3b, 0xbf, 0x5d, 0x96, + 0xbe, 0x72, 0x0c, 0x10, 0xc7, 0x61, 0xb4, 0x72, 0x06, 0xb0, 0xab, 0x1e, 0x27, 0xd5, 0x88, 0x79, + 0x04, 0x10, 0x87, 0x61, 0x0f, 0xce, 0x73, 0x3b, 0xfe, 0x04, 0xe1, 0xc5, 0x72, 0x71, 0x14, 0x86, + 0x78, 0x8e, 0xdb, 0xd1, 0x27, 0x09, 0x2d, 0x11, 0xc4, 0x79, 0x86, 0xdb, 0xf1, 0xa7, 0x18, 0x67, + 0x04, 0x71, 0xf7, 0x14, 0x7e, 0xf7, 0xcc, 0x1e, 0xea, 0xc3, 0x9c, 0xbb, 0x59, 0xd8, 0x47, 0xc3, + 0xdb, 0x4e, 0x3f, 0x4d, 0x5f, 0xce, 0x84, 0xb8, 0x03, 0xf6, 0x3a, 0x26, 0xfc, 0x59, 0x42, 0x3b, + 0xeb, 0xc5, 0x02, 0x0c, 0x1b, 0x03, 0xdb, 0x8e, 0x3f, 0x47, 0xb8, 0x49, 0x61, 0xe8, 0x34, 0xb0, + 0xed, 0x82, 0xe7, 0x39, 0x74, 0x22, 0x30, 0x6d, 0x3c, 0xab, 0xed, 0xf4, 0x0b, 0x9c, 0x75, 0x46, + 0xc4, 0x1c, 0xd4, 0xca, 0xfe, 0x6b, 0xe7, 0x5f, 0x24, 0xbe, 0xcb, 0x60, 0x06, 0x8c, 0xfe, 0x6f, + 0x57, 0xbc, 0xc4, 0x19, 0x30, 0x28, 0xdc, 0x46, 0xbd, 0x33, 0xdd, 0x6e, 0x7a, 0x99, 0xb7, 0x51, + 0xcf, 0x48, 0xc7, 0x6a, 0x16, 0x6d, 0xd0, 0xae, 0x78, 0x85, 0xab, 0x59, 0xac, 0xc7, 0x30, 0x7a, + 0x87, 0xa4, 0xdd, 0xf1, 0x2a, 0x87, 0xd1, 0x33, 0x23, 0xc5, 0x0a, 0xd4, 0x77, 0x0f, 0x48, 0xbb, + 0xef, 0x35, 0xf2, 0x8d, 0xef, 0x9a, 0x8f, 0xe2, 0x3e, 0x98, 0xea, 0x3f, 0x1c, 0xed, 0xd6, 0x0b, + 0xdb, 0x3d, 0xaf, 0x33, 0xe6, 0x6c, 0x14, 0xa7, 0xba, 0x5d, 0xd6, 0x1c, 0x8c, 0x76, 0xed, 0xc5, + 0xed, 0x6a, 0xa3, 0x35, 0xe7, 0xa2, 0x98, 0x07, 0xe8, 0xce, 0x24, 0xbb, 0xeb, 0x12, 0xb9, 0x0c, + 0x08, 0xb7, 0x06, 0x8d, 0x24, 0x3b, 0x7f, 0x99, 0xb7, 0x06, 0x11, 0xb8, 0x35, 0x78, 0x1a, 0xd9, + 0xe9, 0x2b, 0xbc, 0x35, 0x18, 0x11, 0xb3, 0x30, 0x14, 0xe7, 0x61, 0x88, 0xcf, 0x56, 0xfd, 0xe6, + 0x3e, 0xe3, 0x46, 0x86, 0x6d, 0x86, 0x7f, 0xdd, 0x21, 0x98, 0x01, 0x71, 0x18, 0xf6, 0xca, 0xa8, + 0x29, 0xdb, 0x36, 0xf2, 0xb7, 0x1d, 0xee, 0x27, 0xb8, 0x5a, 0xcc, 0x01, 0x74, 0x5e, 0xa6, 0x31, + 0x0a, 0x1b, 0xfb, 0xfb, 0x4e, 0xe7, 0xbd, 0xde, 0x40, 0xba, 0x82, 0xe2, 0x6d, 0xdc, 0x22, 0xd8, + 0xaa, 0x0a, 0x8a, 0x17, 0xf0, 0x23, 0xb0, 0xef, 0xe1, 0x4c, 0xc5, 0xda, 0xf3, 0x6d, 0xf4, 0x1f, + 0x44, 0xf3, 0x7a, 0x4c, 0x58, 0xa4, 0x52, 0xa9, 0x3d, 0x3f, 0xb3, 0xb1, 0x7f, 0x12, 0x5b, 0x02, + 0x08, 0xb7, 0xbc, 0x4c, 0xbb, 0xdc, 0xf7, 0x5f, 0x0c, 0x33, 0x80, 0x41, 0xe3, 0xf5, 0x23, 0x72, + 0xd3, 0xc6, 0xfe, 0xcd, 0x41, 0xd3, 0x7a, 0x71, 0x14, 0x6a, 0x78, 0x59, 0xfc, 0x0e, 0x61, 0x83, + 0xff, 0x21, 0xb8, 0x4b, 0xe0, 0x37, 0x67, 0xba, 0xad, 0x03, 0x7b, 0xb2, 0xff, 0xa5, 0x4a, 0xf3, + 0x7a, 0x31, 0x0f, 0xc3, 0x99, 0x6e, 0xb7, 0x73, 0x3a, 0xd1, 0x58, 0xf0, 0xff, 0x76, 0xca, 0x97, + 0xdc, 0x92, 0x39, 0xb6, 0x08, 0x13, 0x2d, 0x15, 0xf5, 0x82, 0xc7, 0x60, 0x49, 0x2d, 0xa9, 0x95, + 0x62, 0x17, 0x3d, 0x78, 0x9b, 0x1f, 0xe8, 0x8d, 0xbc, 0x39, 0xdd, 0x52, 0xd1, 0x0c, 0x1e, 0x35, + 0xbb, 0xbf, 0xa0, 0x95, 0x07, 0xcf, 0xff, 0x03, 0x00, 0x00, 0xff, 0xff, 0xed, 0x5f, 0x6c, 0x20, + 0x74, 0x13, 0x00, 0x00, +} diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto b/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto new file mode 100644 index 000000000..7f0997935 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto @@ -0,0 +1,133 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; +package gogoproto; + +import "google/protobuf/descriptor.proto"; + +option java_package = "com.google.protobuf"; +option java_outer_classname = "GoGoProtos"; +option go_package = "github.com/gogo/protobuf/gogoproto"; + +extend google.protobuf.EnumOptions { + optional bool goproto_enum_prefix = 62001; + optional bool goproto_enum_stringer = 62021; + optional bool enum_stringer = 62022; + optional string enum_customname = 62023; + optional bool enumdecl = 62024; +} + +extend google.protobuf.EnumValueOptions { + optional string enumvalue_customname = 66001; +} + +extend google.protobuf.FileOptions { + optional bool goproto_getters_all = 63001; + optional bool goproto_enum_prefix_all = 63002; + optional bool goproto_stringer_all = 63003; + optional bool verbose_equal_all = 63004; + optional bool face_all = 63005; + optional bool gostring_all = 63006; + optional bool populate_all = 63007; + optional bool stringer_all = 63008; + optional bool onlyone_all = 63009; + + optional bool equal_all = 63013; + optional bool description_all = 63014; + optional bool testgen_all = 63015; + optional bool benchgen_all = 63016; + optional bool marshaler_all = 63017; + optional bool unmarshaler_all = 63018; + optional bool stable_marshaler_all = 63019; + + optional bool sizer_all = 63020; + + optional bool goproto_enum_stringer_all = 63021; + optional bool enum_stringer_all = 63022; + + optional bool unsafe_marshaler_all = 63023; + optional bool unsafe_unmarshaler_all = 63024; + + optional bool goproto_extensions_map_all = 63025; + optional bool goproto_unrecognized_all = 63026; + optional bool gogoproto_import = 63027; + optional bool protosizer_all = 63028; + optional bool compare_all = 63029; + optional bool typedecl_all = 63030; + optional bool enumdecl_all = 63031; + + optional bool goproto_registration = 63032; +} + +extend google.protobuf.MessageOptions { + optional bool goproto_getters = 64001; + optional bool goproto_stringer = 64003; + optional bool verbose_equal = 64004; + optional bool face = 64005; + optional bool gostring = 64006; + optional bool populate = 64007; + optional bool stringer = 67008; + optional bool onlyone = 64009; + + optional bool equal = 64013; + optional bool description = 64014; + optional bool testgen = 64015; + optional bool benchgen = 64016; + optional bool marshaler = 64017; + optional bool unmarshaler = 64018; + optional bool stable_marshaler = 64019; + + optional bool sizer = 64020; + + optional bool unsafe_marshaler = 64023; + optional bool unsafe_unmarshaler = 64024; + + optional bool goproto_extensions_map = 64025; + optional bool goproto_unrecognized = 64026; + + optional bool protosizer = 64028; + optional bool compare = 64029; + + optional bool typedecl = 64030; +} + +extend google.protobuf.FieldOptions { + optional bool nullable = 65001; + optional bool embed = 65002; + optional string customtype = 65003; + optional string customname = 65004; + optional string jsontag = 65005; + optional string moretags = 65006; + optional string casttype = 65007; + optional string castkey = 65008; + optional string castvalue = 65009; + + optional bool stdtime = 65010; + optional bool stdduration = 65011; +} diff --git a/vendor/github.com/gogo/protobuf/gogoproto/helper.go b/vendor/github.com/gogo/protobuf/gogoproto/helper.go new file mode 100644 index 000000000..6b851c562 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/helper.go @@ -0,0 +1,357 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gogoproto + +import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" +import proto "github.com/gogo/protobuf/proto" + +func IsEmbed(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Embed, false) +} + +func IsNullable(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Nullable, true) +} + +func IsStdTime(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Stdtime, false) +} + +func IsStdDuration(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Stdduration, false) +} + +func NeedsNilCheck(proto3 bool, field *google_protobuf.FieldDescriptorProto) bool { + nullable := IsNullable(field) + if field.IsMessage() || IsCustomType(field) { + return nullable + } + if proto3 { + return false + } + return nullable || *field.Type == google_protobuf.FieldDescriptorProto_TYPE_BYTES +} + +func IsCustomType(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCustomType(field) + if len(typ) > 0 { + return true + } + return false +} + +func IsCastType(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCastType(field) + if len(typ) > 0 { + return true + } + return false +} + +func IsCastKey(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCastKey(field) + if len(typ) > 0 { + return true + } + return false +} + +func IsCastValue(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCastValue(field) + if len(typ) > 0 { + return true + } + return false +} + +func HasEnumDecl(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_Enumdecl, proto.GetBoolExtension(file.Options, E_EnumdeclAll, true)) +} + +func HasTypeDecl(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Typedecl, proto.GetBoolExtension(file.Options, E_TypedeclAll, true)) +} + +func GetCustomType(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Customtype) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetCastType(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Casttype) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetCastKey(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Castkey) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetCastValue(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Castvalue) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func IsCustomName(field *google_protobuf.FieldDescriptorProto) bool { + name := GetCustomName(field) + if len(name) > 0 { + return true + } + return false +} + +func IsEnumCustomName(field *google_protobuf.EnumDescriptorProto) bool { + name := GetEnumCustomName(field) + if len(name) > 0 { + return true + } + return false +} + +func IsEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) bool { + name := GetEnumValueCustomName(field) + if len(name) > 0 { + return true + } + return false +} + +func GetCustomName(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Customname) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetEnumCustomName(field *google_protobuf.EnumDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_EnumCustomname) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_EnumvalueCustomname) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetJsonTag(field *google_protobuf.FieldDescriptorProto) *string { + if field == nil { + return nil + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Jsontag) + if err == nil && v.(*string) != nil { + return (v.(*string)) + } + } + return nil +} + +func GetMoreTags(field *google_protobuf.FieldDescriptorProto) *string { + if field == nil { + return nil + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Moretags) + if err == nil && v.(*string) != nil { + return (v.(*string)) + } + } + return nil +} + +type EnableFunc func(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool + +func EnabledGoEnumPrefix(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_GoprotoEnumPrefix, proto.GetBoolExtension(file.Options, E_GoprotoEnumPrefixAll, true)) +} + +func EnabledGoStringer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoStringer, proto.GetBoolExtension(file.Options, E_GoprotoStringerAll, true)) +} + +func HasGoGetters(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoGetters, proto.GetBoolExtension(file.Options, E_GoprotoGettersAll, true)) +} + +func IsUnion(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Onlyone, proto.GetBoolExtension(file.Options, E_OnlyoneAll, false)) +} + +func HasGoString(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Gostring, proto.GetBoolExtension(file.Options, E_GostringAll, false)) +} + +func HasEqual(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Equal, proto.GetBoolExtension(file.Options, E_EqualAll, false)) +} + +func HasVerboseEqual(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_VerboseEqual, proto.GetBoolExtension(file.Options, E_VerboseEqualAll, false)) +} + +func IsStringer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Stringer, proto.GetBoolExtension(file.Options, E_StringerAll, false)) +} + +func IsFace(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Face, proto.GetBoolExtension(file.Options, E_FaceAll, false)) +} + +func HasDescription(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Description, proto.GetBoolExtension(file.Options, E_DescriptionAll, false)) +} + +func HasPopulate(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Populate, proto.GetBoolExtension(file.Options, E_PopulateAll, false)) +} + +func HasTestGen(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Testgen, proto.GetBoolExtension(file.Options, E_TestgenAll, false)) +} + +func HasBenchGen(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Benchgen, proto.GetBoolExtension(file.Options, E_BenchgenAll, false)) +} + +func IsMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Marshaler, proto.GetBoolExtension(file.Options, E_MarshalerAll, false)) +} + +func IsUnmarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Unmarshaler, proto.GetBoolExtension(file.Options, E_UnmarshalerAll, false)) +} + +func IsStableMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_StableMarshaler, proto.GetBoolExtension(file.Options, E_StableMarshalerAll, false)) +} + +func IsSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Sizer, proto.GetBoolExtension(file.Options, E_SizerAll, false)) +} + +func IsProtoSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Protosizer, proto.GetBoolExtension(file.Options, E_ProtosizerAll, false)) +} + +func IsGoEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_GoprotoEnumStringer, proto.GetBoolExtension(file.Options, E_GoprotoEnumStringerAll, true)) +} + +func IsEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_EnumStringer, proto.GetBoolExtension(file.Options, E_EnumStringerAll, false)) +} + +func IsUnsafeMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_UnsafeMarshaler, proto.GetBoolExtension(file.Options, E_UnsafeMarshalerAll, false)) +} + +func IsUnsafeUnmarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_UnsafeUnmarshaler, proto.GetBoolExtension(file.Options, E_UnsafeUnmarshalerAll, false)) +} + +func HasExtensionsMap(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoExtensionsMap, proto.GetBoolExtension(file.Options, E_GoprotoExtensionsMapAll, true)) +} + +func HasUnrecognized(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + if IsProto3(file) { + return false + } + return proto.GetBoolExtension(message.Options, E_GoprotoUnrecognized, proto.GetBoolExtension(file.Options, E_GoprotoUnrecognizedAll, true)) +} + +func IsProto3(file *google_protobuf.FileDescriptorProto) bool { + return file.GetSyntax() == "proto3" +} + +func ImportsGoGoProto(file *google_protobuf.FileDescriptorProto) bool { + return proto.GetBoolExtension(file.Options, E_GogoprotoImport, true) +} + +func HasCompare(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Compare, proto.GetBoolExtension(file.Options, E_CompareAll, false)) +} + +func RegistersGolangProto(file *google_protobuf.FileDescriptorProto) bool { + return proto.GetBoolExtension(file.Options, E_GoprotoRegistration, false) +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go new file mode 100644 index 000000000..a85bf1984 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go @@ -0,0 +1,118 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package descriptor provides functions for obtaining protocol buffer +// descriptors for generated Go types. +// +// These functions cannot go in package proto because they depend on the +// generated protobuf descriptor messages, which themselves depend on proto. +package descriptor + +import ( + "bytes" + "compress/gzip" + "fmt" + "io/ioutil" + + "github.com/gogo/protobuf/proto" +) + +// extractFile extracts a FileDescriptorProto from a gzip'd buffer. +func extractFile(gz []byte) (*FileDescriptorProto, error) { + r, err := gzip.NewReader(bytes.NewReader(gz)) + if err != nil { + return nil, fmt.Errorf("failed to open gzip reader: %v", err) + } + defer r.Close() + + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, fmt.Errorf("failed to uncompress descriptor: %v", err) + } + + fd := new(FileDescriptorProto) + if err := proto.Unmarshal(b, fd); err != nil { + return nil, fmt.Errorf("malformed FileDescriptorProto: %v", err) + } + + return fd, nil +} + +// Message is a proto.Message with a method to return its descriptor. +// +// Message types generated by the protocol compiler always satisfy +// the Message interface. +type Message interface { + proto.Message + Descriptor() ([]byte, []int) +} + +// ForMessage returns a FileDescriptorProto and a DescriptorProto from within it +// describing the given message. +func ForMessage(msg Message) (fd *FileDescriptorProto, md *DescriptorProto) { + gz, path := msg.Descriptor() + fd, err := extractFile(gz) + if err != nil { + panic(fmt.Sprintf("invalid FileDescriptorProto for %T: %v", msg, err)) + } + + md = fd.MessageType[path[0]] + for _, i := range path[1:] { + md = md.NestedType[i] + } + return fd, md +} + +// Is this field a scalar numeric type? +func (field *FieldDescriptorProto) IsScalar() bool { + if field.Type == nil { + return false + } + switch *field.Type { + case FieldDescriptorProto_TYPE_DOUBLE, + FieldDescriptorProto_TYPE_FLOAT, + FieldDescriptorProto_TYPE_INT64, + FieldDescriptorProto_TYPE_UINT64, + FieldDescriptorProto_TYPE_INT32, + FieldDescriptorProto_TYPE_FIXED64, + FieldDescriptorProto_TYPE_FIXED32, + FieldDescriptorProto_TYPE_BOOL, + FieldDescriptorProto_TYPE_UINT32, + FieldDescriptorProto_TYPE_ENUM, + FieldDescriptorProto_TYPE_SFIXED32, + FieldDescriptorProto_TYPE_SFIXED64, + FieldDescriptorProto_TYPE_SINT32, + FieldDescriptorProto_TYPE_SINT64: + return true + default: + return false + } +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go new file mode 100644 index 000000000..4174cbd9f --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go @@ -0,0 +1,2280 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: descriptor.proto + +/* +Package descriptor is a generated protocol buffer package. + +It is generated from these files: + descriptor.proto + +It has these top-level messages: + FileDescriptorSet + FileDescriptorProto + DescriptorProto + ExtensionRangeOptions + FieldDescriptorProto + OneofDescriptorProto + EnumDescriptorProto + EnumValueDescriptorProto + ServiceDescriptorProto + MethodDescriptorProto + FileOptions + MessageOptions + FieldOptions + OneofOptions + EnumOptions + EnumValueOptions + ServiceOptions + MethodOptions + UninterpretedOption + SourceCodeInfo + GeneratedCodeInfo +*/ +package descriptor + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type FieldDescriptorProto_Type int32 + +const ( + // 0 is reserved for errors. + // Order is weird for historical reasons. + FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1 + FieldDescriptorProto_TYPE_FLOAT FieldDescriptorProto_Type = 2 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT64 FieldDescriptorProto_Type = 3 + FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT32 FieldDescriptorProto_Type = 5 + FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6 + FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7 + FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 + FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 + // Tag-delimited aggregate. + // Group type is deprecated and not supported in proto3. However, Proto3 + // implementations should still be able to parse the group wire format and + // treat group fields as unknown fields. + FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 + FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 + // New in version 2. + FieldDescriptorProto_TYPE_BYTES FieldDescriptorProto_Type = 12 + FieldDescriptorProto_TYPE_UINT32 FieldDescriptorProto_Type = 13 + FieldDescriptorProto_TYPE_ENUM FieldDescriptorProto_Type = 14 + FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15 + FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16 + FieldDescriptorProto_TYPE_SINT32 FieldDescriptorProto_Type = 17 + FieldDescriptorProto_TYPE_SINT64 FieldDescriptorProto_Type = 18 +) + +var FieldDescriptorProto_Type_name = map[int32]string{ + 1: "TYPE_DOUBLE", + 2: "TYPE_FLOAT", + 3: "TYPE_INT64", + 4: "TYPE_UINT64", + 5: "TYPE_INT32", + 6: "TYPE_FIXED64", + 7: "TYPE_FIXED32", + 8: "TYPE_BOOL", + 9: "TYPE_STRING", + 10: "TYPE_GROUP", + 11: "TYPE_MESSAGE", + 12: "TYPE_BYTES", + 13: "TYPE_UINT32", + 14: "TYPE_ENUM", + 15: "TYPE_SFIXED32", + 16: "TYPE_SFIXED64", + 17: "TYPE_SINT32", + 18: "TYPE_SINT64", +} +var FieldDescriptorProto_Type_value = map[string]int32{ + "TYPE_DOUBLE": 1, + "TYPE_FLOAT": 2, + "TYPE_INT64": 3, + "TYPE_UINT64": 4, + "TYPE_INT32": 5, + "TYPE_FIXED64": 6, + "TYPE_FIXED32": 7, + "TYPE_BOOL": 8, + "TYPE_STRING": 9, + "TYPE_GROUP": 10, + "TYPE_MESSAGE": 11, + "TYPE_BYTES": 12, + "TYPE_UINT32": 13, + "TYPE_ENUM": 14, + "TYPE_SFIXED32": 15, + "TYPE_SFIXED64": 16, + "TYPE_SINT32": 17, + "TYPE_SINT64": 18, +} + +func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type { + p := new(FieldDescriptorProto_Type) + *p = x + return p +} +func (x FieldDescriptorProto_Type) String() string { + return proto.EnumName(FieldDescriptorProto_Type_name, int32(x)) +} +func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Type_value, data, "FieldDescriptorProto_Type") + if err != nil { + return err + } + *x = FieldDescriptorProto_Type(value) + return nil +} +func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptorDescriptor, []int{4, 0} +} + +type FieldDescriptorProto_Label int32 + +const ( + // 0 is reserved for errors + FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1 + FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 + FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3 +) + +var FieldDescriptorProto_Label_name = map[int32]string{ + 1: "LABEL_OPTIONAL", + 2: "LABEL_REQUIRED", + 3: "LABEL_REPEATED", +} +var FieldDescriptorProto_Label_value = map[string]int32{ + "LABEL_OPTIONAL": 1, + "LABEL_REQUIRED": 2, + "LABEL_REPEATED": 3, +} + +func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label { + p := new(FieldDescriptorProto_Label) + *p = x + return p +} +func (x FieldDescriptorProto_Label) String() string { + return proto.EnumName(FieldDescriptorProto_Label_name, int32(x)) +} +func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Label_value, data, "FieldDescriptorProto_Label") + if err != nil { + return err + } + *x = FieldDescriptorProto_Label(value) + return nil +} +func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) { + return fileDescriptorDescriptor, []int{4, 1} +} + +// Generated classes can be optimized for speed or code size. +type FileOptions_OptimizeMode int32 + +const ( + FileOptions_SPEED FileOptions_OptimizeMode = 1 + // etc. + FileOptions_CODE_SIZE FileOptions_OptimizeMode = 2 + FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3 +) + +var FileOptions_OptimizeMode_name = map[int32]string{ + 1: "SPEED", + 2: "CODE_SIZE", + 3: "LITE_RUNTIME", +} +var FileOptions_OptimizeMode_value = map[string]int32{ + "SPEED": 1, + "CODE_SIZE": 2, + "LITE_RUNTIME": 3, +} + +func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode { + p := new(FileOptions_OptimizeMode) + *p = x + return p +} +func (x FileOptions_OptimizeMode) String() string { + return proto.EnumName(FileOptions_OptimizeMode_name, int32(x)) +} +func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FileOptions_OptimizeMode_value, data, "FileOptions_OptimizeMode") + if err != nil { + return err + } + *x = FileOptions_OptimizeMode(value) + return nil +} +func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { + return fileDescriptorDescriptor, []int{10, 0} +} + +type FieldOptions_CType int32 + +const ( + // Default mode. + FieldOptions_STRING FieldOptions_CType = 0 + FieldOptions_CORD FieldOptions_CType = 1 + FieldOptions_STRING_PIECE FieldOptions_CType = 2 +) + +var FieldOptions_CType_name = map[int32]string{ + 0: "STRING", + 1: "CORD", + 2: "STRING_PIECE", +} +var FieldOptions_CType_value = map[string]int32{ + "STRING": 0, + "CORD": 1, + "STRING_PIECE": 2, +} + +func (x FieldOptions_CType) Enum() *FieldOptions_CType { + p := new(FieldOptions_CType) + *p = x + return p +} +func (x FieldOptions_CType) String() string { + return proto.EnumName(FieldOptions_CType_name, int32(x)) +} +func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldOptions_CType_value, data, "FieldOptions_CType") + if err != nil { + return err + } + *x = FieldOptions_CType(value) + return nil +} +func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { + return fileDescriptorDescriptor, []int{12, 0} +} + +type FieldOptions_JSType int32 + +const ( + // Use the default type. + FieldOptions_JS_NORMAL FieldOptions_JSType = 0 + // Use JavaScript strings. + FieldOptions_JS_STRING FieldOptions_JSType = 1 + // Use JavaScript numbers. + FieldOptions_JS_NUMBER FieldOptions_JSType = 2 +) + +var FieldOptions_JSType_name = map[int32]string{ + 0: "JS_NORMAL", + 1: "JS_STRING", + 2: "JS_NUMBER", +} +var FieldOptions_JSType_value = map[string]int32{ + "JS_NORMAL": 0, + "JS_STRING": 1, + "JS_NUMBER": 2, +} + +func (x FieldOptions_JSType) Enum() *FieldOptions_JSType { + p := new(FieldOptions_JSType) + *p = x + return p +} +func (x FieldOptions_JSType) String() string { + return proto.EnumName(FieldOptions_JSType_name, int32(x)) +} +func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldOptions_JSType_value, data, "FieldOptions_JSType") + if err != nil { + return err + } + *x = FieldOptions_JSType(value) + return nil +} +func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { + return fileDescriptorDescriptor, []int{12, 1} +} + +// Is this method side-effect-free (or safe in HTTP parlance), or idempotent, +// or neither? HTTP based RPC implementation may choose GET verb for safe +// methods, and PUT verb for idempotent methods instead of the default POST. +type MethodOptions_IdempotencyLevel int32 + +const ( + MethodOptions_IDEMPOTENCY_UNKNOWN MethodOptions_IdempotencyLevel = 0 + MethodOptions_NO_SIDE_EFFECTS MethodOptions_IdempotencyLevel = 1 + MethodOptions_IDEMPOTENT MethodOptions_IdempotencyLevel = 2 +) + +var MethodOptions_IdempotencyLevel_name = map[int32]string{ + 0: "IDEMPOTENCY_UNKNOWN", + 1: "NO_SIDE_EFFECTS", + 2: "IDEMPOTENT", +} +var MethodOptions_IdempotencyLevel_value = map[string]int32{ + "IDEMPOTENCY_UNKNOWN": 0, + "NO_SIDE_EFFECTS": 1, + "IDEMPOTENT": 2, +} + +func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel { + p := new(MethodOptions_IdempotencyLevel) + *p = x + return p +} +func (x MethodOptions_IdempotencyLevel) String() string { + return proto.EnumName(MethodOptions_IdempotencyLevel_name, int32(x)) +} +func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MethodOptions_IdempotencyLevel_value, data, "MethodOptions_IdempotencyLevel") + if err != nil { + return err + } + *x = MethodOptions_IdempotencyLevel(value) + return nil +} +func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { + return fileDescriptorDescriptor, []int{17, 0} +} + +// The protocol compiler can output a FileDescriptorSet containing the .proto +// files it parses. +type FileDescriptorSet struct { + File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} } +func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorSet) ProtoMessage() {} +func (*FileDescriptorSet) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{0} } + +func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto { + if m != nil { + return m.File + } + return nil +} + +// Describes a complete .proto file. +type FileDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` + // Names of files imported by this file. + Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"` + // Indexes of the public imported files in the dependency list above. + PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"` + // Indexes of the weak imported files in the dependency list. + // For Google-internal migration only. Do not use. + WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"` + // All top-level definitions in this file. + MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + Service []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"` + Options *FileOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + // This field contains optional information about the original source code. + // You may safely remove this entire field without harming runtime + // functionality of the descriptors -- the information is needed only by + // development tools. + SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"` + // The syntax of the proto file. + // The supported values are "proto2" and "proto3". + Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} } +func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorProto) ProtoMessage() {} +func (*FileDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{1} } + +func (m *FileDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FileDescriptorProto) GetPackage() string { + if m != nil && m.Package != nil { + return *m.Package + } + return "" +} + +func (m *FileDescriptorProto) GetDependency() []string { + if m != nil { + return m.Dependency + } + return nil +} + +func (m *FileDescriptorProto) GetPublicDependency() []int32 { + if m != nil { + return m.PublicDependency + } + return nil +} + +func (m *FileDescriptorProto) GetWeakDependency() []int32 { + if m != nil { + return m.WeakDependency + } + return nil +} + +func (m *FileDescriptorProto) GetMessageType() []*DescriptorProto { + if m != nil { + return m.MessageType + } + return nil +} + +func (m *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto { + if m != nil { + return m.EnumType + } + return nil +} + +func (m *FileDescriptorProto) GetService() []*ServiceDescriptorProto { + if m != nil { + return m.Service + } + return nil +} + +func (m *FileDescriptorProto) GetExtension() []*FieldDescriptorProto { + if m != nil { + return m.Extension + } + return nil +} + +func (m *FileDescriptorProto) GetOptions() *FileOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo { + if m != nil { + return m.SourceCodeInfo + } + return nil +} + +func (m *FileDescriptorProto) GetSyntax() string { + if m != nil && m.Syntax != nil { + return *m.Syntax + } + return "" +} + +// Describes a message type. +type DescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"` + NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"` + OneofDecl []*OneofDescriptorProto `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"` + Options *MessageOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"` + ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved field names, which may not be used by fields in the same message. + // A given name may only be reserved once. + ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DescriptorProto) Reset() { *m = DescriptorProto{} } +func (m *DescriptorProto) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto) ProtoMessage() {} +func (*DescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{2} } + +func (m *DescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *DescriptorProto) GetField() []*FieldDescriptorProto { + if m != nil { + return m.Field + } + return nil +} + +func (m *DescriptorProto) GetExtension() []*FieldDescriptorProto { + if m != nil { + return m.Extension + } + return nil +} + +func (m *DescriptorProto) GetNestedType() []*DescriptorProto { + if m != nil { + return m.NestedType + } + return nil +} + +func (m *DescriptorProto) GetEnumType() []*EnumDescriptorProto { + if m != nil { + return m.EnumType + } + return nil +} + +func (m *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange { + if m != nil { + return m.ExtensionRange + } + return nil +} + +func (m *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto { + if m != nil { + return m.OneofDecl + } + return nil +} + +func (m *DescriptorProto) GetOptions() *MessageOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange { + if m != nil { + return m.ReservedRange + } + return nil +} + +func (m *DescriptorProto) GetReservedName() []string { + if m != nil { + return m.ReservedName + } + return nil +} + +type DescriptorProto_ExtensionRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_ExtensionRange{} } +func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto_ExtensionRange) ProtoMessage() {} +func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) { + return fileDescriptorDescriptor, []int{2, 0} +} + +func (m *DescriptorProto_ExtensionRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *DescriptorProto_ExtensionRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +func (m *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions { + if m != nil { + return m.Options + } + return nil +} + +// Range of reserved tag numbers. Reserved tag numbers may not be used by +// fields or extension ranges in the same message. Reserved ranges may +// not overlap. +type DescriptorProto_ReservedRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DescriptorProto_ReservedRange) Reset() { *m = DescriptorProto_ReservedRange{} } +func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto_ReservedRange) ProtoMessage() {} +func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) { + return fileDescriptorDescriptor, []int{2, 1} +} + +func (m *DescriptorProto_ReservedRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *DescriptorProto_ReservedRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +type ExtensionRangeOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ExtensionRangeOptions) Reset() { *m = ExtensionRangeOptions{} } +func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) } +func (*ExtensionRangeOptions) ProtoMessage() {} +func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{3} } + +var extRange_ExtensionRangeOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*ExtensionRangeOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ExtensionRangeOptions +} + +func (m *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +// Describes a field within a message. +type FieldDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"` + Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"` + // If type_name is set, this need not be set. If both this and type_name + // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. + Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"` + // For message and enum types, this is the name of the type. If the name + // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping + // rules are used to find the type (i.e. first the nested types within this + // message are searched, then within the parent, on up to the root + // namespace). + TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"` + // For extensions, this is the name of the type being extended. It is + // resolved in the same manner as type_name. + Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"` + // For numeric types, contains the original text representation of the value. + // For booleans, "true" or "false". + // For strings, contains the default text contents (not escaped in any way). + // For bytes, contains the C escaped value. All bytes >= 128 are escaped. + // TODO(kenton): Base-64 encode? + DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"` + // If set, gives the index of a oneof in the containing type's oneof_decl + // list. This field is a member of that oneof. + OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"` + // JSON name of this field. The value is set by protocol compiler. If the + // user has set a "json_name" option on this field, that option's value + // will be used. Otherwise, it's deduced from the field's name by converting + // it to camelCase. + JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"` + Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} } +func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FieldDescriptorProto) ProtoMessage() {} +func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{4} } + +func (m *FieldDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FieldDescriptorProto) GetNumber() int32 { + if m != nil && m.Number != nil { + return *m.Number + } + return 0 +} + +func (m *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label { + if m != nil && m.Label != nil { + return *m.Label + } + return FieldDescriptorProto_LABEL_OPTIONAL +} + +func (m *FieldDescriptorProto) GetType() FieldDescriptorProto_Type { + if m != nil && m.Type != nil { + return *m.Type + } + return FieldDescriptorProto_TYPE_DOUBLE +} + +func (m *FieldDescriptorProto) GetTypeName() string { + if m != nil && m.TypeName != nil { + return *m.TypeName + } + return "" +} + +func (m *FieldDescriptorProto) GetExtendee() string { + if m != nil && m.Extendee != nil { + return *m.Extendee + } + return "" +} + +func (m *FieldDescriptorProto) GetDefaultValue() string { + if m != nil && m.DefaultValue != nil { + return *m.DefaultValue + } + return "" +} + +func (m *FieldDescriptorProto) GetOneofIndex() int32 { + if m != nil && m.OneofIndex != nil { + return *m.OneofIndex + } + return 0 +} + +func (m *FieldDescriptorProto) GetJsonName() string { + if m != nil && m.JsonName != nil { + return *m.JsonName + } + return "" +} + +func (m *FieldDescriptorProto) GetOptions() *FieldOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a oneof. +type OneofDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } +func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*OneofDescriptorProto) ProtoMessage() {} +func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{5} } + +func (m *OneofDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *OneofDescriptorProto) GetOptions() *OneofOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes an enum type. +type EnumDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` + Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + // Range of reserved numeric values. Reserved numeric values may not be used + // by enum values in the same enum declaration. Reserved ranges may not + // overlap. + ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved enum value names, which may not be reused. A given name may only + // be reserved once. + ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} } +func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto) ProtoMessage() {} +func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{6} } + +func (m *EnumDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto { + if m != nil { + return m.Value + } + return nil +} + +func (m *EnumDescriptorProto) GetOptions() *EnumOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *EnumDescriptorProto) GetReservedRange() []*EnumDescriptorProto_EnumReservedRange { + if m != nil { + return m.ReservedRange + } + return nil +} + +func (m *EnumDescriptorProto) GetReservedName() []string { + if m != nil { + return m.ReservedName + } + return nil +} + +// Range of reserved numeric values. Reserved values may not be used by +// entries in the same enum. Reserved ranges may not overlap. +// +// Note that this is distinct from DescriptorProto.ReservedRange in that it +// is inclusive such that it can appropriately represent the entire int32 +// domain. +type EnumDescriptorProto_EnumReservedRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *EnumDescriptorProto_EnumReservedRange) Reset() { *m = EnumDescriptorProto_EnumReservedRange{} } +func (m *EnumDescriptorProto_EnumReservedRange) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} +func (*EnumDescriptorProto_EnumReservedRange) Descriptor() ([]byte, []int) { + return fileDescriptorDescriptor, []int{6, 0} +} + +func (m *EnumDescriptorProto_EnumReservedRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +// Describes a value within an enum. +type EnumValueDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` + Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorProto{} } +func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumValueDescriptorProto) ProtoMessage() {} +func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptorDescriptor, []int{7} +} + +func (m *EnumValueDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *EnumValueDescriptorProto) GetNumber() int32 { + if m != nil && m.Number != nil { + return *m.Number + } + return 0 +} + +func (m *EnumValueDescriptorProto) GetOptions() *EnumValueOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a service. +type ServiceDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` + Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} } +func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*ServiceDescriptorProto) ProtoMessage() {} +func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{8} } + +func (m *ServiceDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto { + if m != nil { + return m.Method + } + return nil +} + +func (m *ServiceDescriptorProto) GetOptions() *ServiceOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a method of a service. +type MethodDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Input and output type names. These are resolved in the same way as + // FieldDescriptorProto.type_name, but must refer to a message type. + InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"` + OutputType *string `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"` + Options *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"` + // Identifies if client streams multiple client messages + ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"` + // Identifies if server streams multiple server messages + ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} } +func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*MethodDescriptorProto) ProtoMessage() {} +func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{9} } + +const Default_MethodDescriptorProto_ClientStreaming bool = false +const Default_MethodDescriptorProto_ServerStreaming bool = false + +func (m *MethodDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MethodDescriptorProto) GetInputType() string { + if m != nil && m.InputType != nil { + return *m.InputType + } + return "" +} + +func (m *MethodDescriptorProto) GetOutputType() string { + if m != nil && m.OutputType != nil { + return *m.OutputType + } + return "" +} + +func (m *MethodDescriptorProto) GetOptions() *MethodOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *MethodDescriptorProto) GetClientStreaming() bool { + if m != nil && m.ClientStreaming != nil { + return *m.ClientStreaming + } + return Default_MethodDescriptorProto_ClientStreaming +} + +func (m *MethodDescriptorProto) GetServerStreaming() bool { + if m != nil && m.ServerStreaming != nil { + return *m.ServerStreaming + } + return Default_MethodDescriptorProto_ServerStreaming +} + +type FileOptions struct { + // Sets the Java package where classes generated from this .proto will be + // placed. By default, the proto package is used, but this is often + // inappropriate because proto packages do not normally start with backwards + // domain names. + JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"` + // If set, all the classes from the .proto file are wrapped in a single + // outer class with the given name. This applies to both Proto1 + // (equivalent to the old "--one_java_file" option) and Proto2 (where + // a .proto always translates to a single class, but you may want to + // explicitly choose the class name). + JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"` + // If set true, then the Java code generator will generate a separate .java + // file for each top-level message, enum, and service defined in the .proto + // file. Thus, these types will *not* be nested inside the outer class + // named by java_outer_classname. However, the outer class will still be + // generated to contain the file's getDescriptor() method as well as any + // top-level extensions defined in the file. + JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` + // This option does nothing. + JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` + // If set true, then the Java2 code generator will generate code that + // throws an exception whenever an attempt is made to assign a non-UTF-8 + // byte sequence to a string field. + // Message reflection will do the same. + // However, an extension field still accepts non-UTF-8 byte sequences. + // This option has no effect on when used with the lite runtime. + JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"` + OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"` + // Sets the Go package where structs generated from this .proto will be + // placed. If omitted, the Go package will be derived from the following: + // - The basename of the package import path, if provided. + // - Otherwise, the package statement in the .proto file, if present. + // - Otherwise, the basename of the .proto file, without extension. + GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"` + // Should generic services be generated in each language? "Generic" services + // are not specific to any particular RPC system. They are generated by the + // main code generators in each language (without additional plugins). + // Generic services were the only kind of service generation supported by + // early versions of google.protobuf. + // + // Generic services are now considered deprecated in favor of using plugins + // that generate code specific to your particular RPC system. Therefore, + // these default to false. Old code which depends on generic services should + // explicitly set them to true. + CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` + JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` + PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` + PhpGenericServices *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"` + // Is this file deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for everything in the file, or it will be completely ignored; in the very + // least, this is a formalization for deprecating files. + Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Enables the use of arenas for the proto messages in this file. This applies + // only to generated classes for C++. + CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=0" json:"cc_enable_arenas,omitempty"` + // Sets the objective c class prefix which is prepended to all objective c + // generated classes from this .proto. There is no default. + ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"` + // Namespace for generated classes; defaults to the package. + CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"` + // By default Swift generators will take the proto package and CamelCase it + // replacing '.' with underscore and use that to prefix the types/symbols + // defined. When this options is provided, they will use this value instead + // to prefix the types/symbols defined. + SwiftPrefix *string `protobuf:"bytes,39,opt,name=swift_prefix,json=swiftPrefix" json:"swift_prefix,omitempty"` + // Sets the php class prefix which is prepended to all php generated classes + // from this .proto. Default is empty. + PhpClassPrefix *string `protobuf:"bytes,40,opt,name=php_class_prefix,json=phpClassPrefix" json:"php_class_prefix,omitempty"` + // Use this option to change the namespace of php generated classes. Default + // is empty. When this option is empty, the package name will be used for + // determining the namespace. + PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FileOptions) Reset() { *m = FileOptions{} } +func (m *FileOptions) String() string { return proto.CompactTextString(m) } +func (*FileOptions) ProtoMessage() {} +func (*FileOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{10} } + +var extRange_FileOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FileOptions +} + +const Default_FileOptions_JavaMultipleFiles bool = false +const Default_FileOptions_JavaStringCheckUtf8 bool = false +const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPEED +const Default_FileOptions_CcGenericServices bool = false +const Default_FileOptions_JavaGenericServices bool = false +const Default_FileOptions_PyGenericServices bool = false +const Default_FileOptions_PhpGenericServices bool = false +const Default_FileOptions_Deprecated bool = false +const Default_FileOptions_CcEnableArenas bool = false + +func (m *FileOptions) GetJavaPackage() string { + if m != nil && m.JavaPackage != nil { + return *m.JavaPackage + } + return "" +} + +func (m *FileOptions) GetJavaOuterClassname() string { + if m != nil && m.JavaOuterClassname != nil { + return *m.JavaOuterClassname + } + return "" +} + +func (m *FileOptions) GetJavaMultipleFiles() bool { + if m != nil && m.JavaMultipleFiles != nil { + return *m.JavaMultipleFiles + } + return Default_FileOptions_JavaMultipleFiles +} + +func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool { + if m != nil && m.JavaGenerateEqualsAndHash != nil { + return *m.JavaGenerateEqualsAndHash + } + return false +} + +func (m *FileOptions) GetJavaStringCheckUtf8() bool { + if m != nil && m.JavaStringCheckUtf8 != nil { + return *m.JavaStringCheckUtf8 + } + return Default_FileOptions_JavaStringCheckUtf8 +} + +func (m *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode { + if m != nil && m.OptimizeFor != nil { + return *m.OptimizeFor + } + return Default_FileOptions_OptimizeFor +} + +func (m *FileOptions) GetGoPackage() string { + if m != nil && m.GoPackage != nil { + return *m.GoPackage + } + return "" +} + +func (m *FileOptions) GetCcGenericServices() bool { + if m != nil && m.CcGenericServices != nil { + return *m.CcGenericServices + } + return Default_FileOptions_CcGenericServices +} + +func (m *FileOptions) GetJavaGenericServices() bool { + if m != nil && m.JavaGenericServices != nil { + return *m.JavaGenericServices + } + return Default_FileOptions_JavaGenericServices +} + +func (m *FileOptions) GetPyGenericServices() bool { + if m != nil && m.PyGenericServices != nil { + return *m.PyGenericServices + } + return Default_FileOptions_PyGenericServices +} + +func (m *FileOptions) GetPhpGenericServices() bool { + if m != nil && m.PhpGenericServices != nil { + return *m.PhpGenericServices + } + return Default_FileOptions_PhpGenericServices +} + +func (m *FileOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_FileOptions_Deprecated +} + +func (m *FileOptions) GetCcEnableArenas() bool { + if m != nil && m.CcEnableArenas != nil { + return *m.CcEnableArenas + } + return Default_FileOptions_CcEnableArenas +} + +func (m *FileOptions) GetObjcClassPrefix() string { + if m != nil && m.ObjcClassPrefix != nil { + return *m.ObjcClassPrefix + } + return "" +} + +func (m *FileOptions) GetCsharpNamespace() string { + if m != nil && m.CsharpNamespace != nil { + return *m.CsharpNamespace + } + return "" +} + +func (m *FileOptions) GetSwiftPrefix() string { + if m != nil && m.SwiftPrefix != nil { + return *m.SwiftPrefix + } + return "" +} + +func (m *FileOptions) GetPhpClassPrefix() string { + if m != nil && m.PhpClassPrefix != nil { + return *m.PhpClassPrefix + } + return "" +} + +func (m *FileOptions) GetPhpNamespace() string { + if m != nil && m.PhpNamespace != nil { + return *m.PhpNamespace + } + return "" +} + +func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type MessageOptions struct { + // Set true to use the old proto1 MessageSet wire format for extensions. + // This is provided for backwards-compatibility with the MessageSet wire + // format. You should not use this for any other reason: It's less + // efficient, has fewer features, and is more complicated. + // + // The message must be defined exactly as follows: + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // Note that the message cannot have any defined fields; MessageSets only + // have extensions. + // + // All extensions of your type must be singular messages; e.g. they cannot + // be int32s, enums, or repeated messages. + // + // Because this is an option, the above two restrictions are not enforced by + // the protocol compiler. + MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"` + // Disables the generation of the standard "descriptor()" accessor, which can + // conflict with a field of the same name. This is meant to make migration + // from proto1 easier; new code should avoid fields named "descriptor". + NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"` + // Is this message deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the message, or it will be completely ignored; in the very least, + // this is a formalization for deprecating messages. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Whether the message is an automatically generated map entry type for the + // maps field. + // + // For maps fields: + // map map_field = 1; + // The parsed descriptor looks like: + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; + // + // Implementations may choose not to generate the map_entry=true message, but + // use a native map in the target language to hold the keys and values. + // The reflection APIs in such implementions still need to work as + // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MessageOptions) Reset() { *m = MessageOptions{} } +func (m *MessageOptions) String() string { return proto.CompactTextString(m) } +func (*MessageOptions) ProtoMessage() {} +func (*MessageOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{11} } + +var extRange_MessageOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MessageOptions +} + +const Default_MessageOptions_MessageSetWireFormat bool = false +const Default_MessageOptions_NoStandardDescriptorAccessor bool = false +const Default_MessageOptions_Deprecated bool = false + +func (m *MessageOptions) GetMessageSetWireFormat() bool { + if m != nil && m.MessageSetWireFormat != nil { + return *m.MessageSetWireFormat + } + return Default_MessageOptions_MessageSetWireFormat +} + +func (m *MessageOptions) GetNoStandardDescriptorAccessor() bool { + if m != nil && m.NoStandardDescriptorAccessor != nil { + return *m.NoStandardDescriptorAccessor + } + return Default_MessageOptions_NoStandardDescriptorAccessor +} + +func (m *MessageOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_MessageOptions_Deprecated +} + +func (m *MessageOptions) GetMapEntry() bool { + if m != nil && m.MapEntry != nil { + return *m.MapEntry + } + return false +} + +func (m *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type FieldOptions struct { + // The ctype option instructs the C++ code generator to use a different + // representation of the field than it normally would. See the specific + // options below. This option is not yet implemented in the open source + // release -- sorry, we'll try to include it in a future version! + Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"` + // The packed option can be enabled for repeated primitive fields to enable + // a more efficient representation on the wire. Rather than repeatedly + // writing the tag and type for each element, the entire array is encoded as + // a single length-delimited blob. In proto3, only explicit setting it to + // false will avoid using packed encoding. + Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` + // The jstype option determines the JavaScript type used for values of the + // field. The option is permitted only for 64 bit integral and fixed types + // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING + // is represented as JavaScript string, which avoids loss of precision that + // can happen when a large value is converted to a floating point JavaScript. + // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to + // use the JavaScript "number" type. The behavior of the default option + // JS_NORMAL is implementation dependent. + // + // This option is an enum to permit additional types to be added, e.g. + // goog.math.Integer. + Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"` + // Should this field be parsed lazily? Lazy applies only to message-type + // fields. It means that when the outer message is initially parsed, the + // inner message's contents will not be parsed but instead stored in encoded + // form. The inner message will actually be parsed when it is first accessed. + // + // This is only a hint. Implementations are free to choose whether to use + // eager or lazy parsing regardless of the value of this option. However, + // setting this option true suggests that the protocol author believes that + // using lazy parsing on this field is worth the additional bookkeeping + // overhead typically needed to implement it. + // + // This option does not affect the public interface of any generated code; + // all method signatures remain the same. Furthermore, thread-safety of the + // interface is not affected by this option; const methods remain safe to + // call from multiple threads concurrently, while non-const methods continue + // to require exclusive access. + // + // + // Note that implementations may choose not to check required fields within + // a lazy sub-message. That is, calling IsInitialized() on the outer message + // may return true even if the inner message has missing required fields. + // This is necessary because otherwise the inner message would have to be + // parsed in order to perform the check, defeating the purpose of lazy + // parsing. An implementation which chooses not to check required fields + // must be consistent about it. That is, for any particular sub-message, the + // implementation must either *always* check its required fields, or *never* + // check its required fields, regardless of whether or not the message has + // been parsed. + Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` + // Is this field deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for accessors, or it will be completely ignored; in the very least, this + // is a formalization for deprecating fields. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // For Google-internal migration only. Do not use. + Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FieldOptions) Reset() { *m = FieldOptions{} } +func (m *FieldOptions) String() string { return proto.CompactTextString(m) } +func (*FieldOptions) ProtoMessage() {} +func (*FieldOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{12} } + +var extRange_FieldOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FieldOptions +} + +const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING +const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL +const Default_FieldOptions_Lazy bool = false +const Default_FieldOptions_Deprecated bool = false +const Default_FieldOptions_Weak bool = false + +func (m *FieldOptions) GetCtype() FieldOptions_CType { + if m != nil && m.Ctype != nil { + return *m.Ctype + } + return Default_FieldOptions_Ctype +} + +func (m *FieldOptions) GetPacked() bool { + if m != nil && m.Packed != nil { + return *m.Packed + } + return false +} + +func (m *FieldOptions) GetJstype() FieldOptions_JSType { + if m != nil && m.Jstype != nil { + return *m.Jstype + } + return Default_FieldOptions_Jstype +} + +func (m *FieldOptions) GetLazy() bool { + if m != nil && m.Lazy != nil { + return *m.Lazy + } + return Default_FieldOptions_Lazy +} + +func (m *FieldOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_FieldOptions_Deprecated +} + +func (m *FieldOptions) GetWeak() bool { + if m != nil && m.Weak != nil { + return *m.Weak + } + return Default_FieldOptions_Weak +} + +func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type OneofOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OneofOptions) Reset() { *m = OneofOptions{} } +func (m *OneofOptions) String() string { return proto.CompactTextString(m) } +func (*OneofOptions) ProtoMessage() {} +func (*OneofOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{13} } + +var extRange_OneofOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_OneofOptions +} + +func (m *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type EnumOptions struct { + // Set this option to true to allow mapping different tag names to the same + // value. + AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"` + // Is this enum deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum, or it will be completely ignored; in the very least, this + // is a formalization for deprecating enums. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *EnumOptions) Reset() { *m = EnumOptions{} } +func (m *EnumOptions) String() string { return proto.CompactTextString(m) } +func (*EnumOptions) ProtoMessage() {} +func (*EnumOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{14} } + +var extRange_EnumOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumOptions +} + +const Default_EnumOptions_Deprecated bool = false + +func (m *EnumOptions) GetAllowAlias() bool { + if m != nil && m.AllowAlias != nil { + return *m.AllowAlias + } + return false +} + +func (m *EnumOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_EnumOptions_Deprecated +} + +func (m *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type EnumValueOptions struct { + // Is this enum value deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum value, or it will be completely ignored; in the very least, + // this is a formalization for deprecating enum values. + Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } +func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } +func (*EnumValueOptions) ProtoMessage() {} +func (*EnumValueOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{15} } + +var extRange_EnumValueOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumValueOptions +} + +const Default_EnumValueOptions_Deprecated bool = false + +func (m *EnumValueOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_EnumValueOptions_Deprecated +} + +func (m *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type ServiceOptions struct { + // Is this service deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the service, or it will be completely ignored; in the very least, + // this is a formalization for deprecating services. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } +func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } +func (*ServiceOptions) ProtoMessage() {} +func (*ServiceOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{16} } + +var extRange_ServiceOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ServiceOptions +} + +const Default_ServiceOptions_Deprecated bool = false + +func (m *ServiceOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_ServiceOptions_Deprecated +} + +func (m *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type MethodOptions struct { + // Is this method deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the method, or it will be completely ignored; in the very least, + // this is a formalization for deprecating methods. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MethodOptions) Reset() { *m = MethodOptions{} } +func (m *MethodOptions) String() string { return proto.CompactTextString(m) } +func (*MethodOptions) ProtoMessage() {} +func (*MethodOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{17} } + +var extRange_MethodOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MethodOptions +} + +const Default_MethodOptions_Deprecated bool = false +const Default_MethodOptions_IdempotencyLevel MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN + +func (m *MethodOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_MethodOptions_Deprecated +} + +func (m *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel { + if m != nil && m.IdempotencyLevel != nil { + return *m.IdempotencyLevel + } + return Default_MethodOptions_IdempotencyLevel +} + +func (m *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +// A message representing a option the parser does not recognize. This only +// appears in options protos created by the compiler::Parser class. +// DescriptorPool resolves these when building Descriptor objects. Therefore, +// options protos in descriptor objects (e.g. returned by Descriptor::options(), +// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions +// in them. +type UninterpretedOption struct { + Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` + // The value of the uninterpreted option, in whatever type the tokenizer + // identified it as during parsing. Exactly one of these should be set. + IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` + PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"` + NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"` + DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` + StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` + AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } +func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption) ProtoMessage() {} +func (*UninterpretedOption) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{18} } + +func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { + if m != nil { + return m.Name + } + return nil +} + +func (m *UninterpretedOption) GetIdentifierValue() string { + if m != nil && m.IdentifierValue != nil { + return *m.IdentifierValue + } + return "" +} + +func (m *UninterpretedOption) GetPositiveIntValue() uint64 { + if m != nil && m.PositiveIntValue != nil { + return *m.PositiveIntValue + } + return 0 +} + +func (m *UninterpretedOption) GetNegativeIntValue() int64 { + if m != nil && m.NegativeIntValue != nil { + return *m.NegativeIntValue + } + return 0 +} + +func (m *UninterpretedOption) GetDoubleValue() float64 { + if m != nil && m.DoubleValue != nil { + return *m.DoubleValue + } + return 0 +} + +func (m *UninterpretedOption) GetStringValue() []byte { + if m != nil { + return m.StringValue + } + return nil +} + +func (m *UninterpretedOption) GetAggregateValue() string { + if m != nil && m.AggregateValue != nil { + return *m.AggregateValue + } + return "" +} + +// The name of the uninterpreted option. Each string represents a segment in +// a dot-separated name. is_extension is true iff a segment represents an +// extension (denoted with parentheses in options specs in .proto files). +// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents +// "foo.(bar.baz).qux". +type UninterpretedOption_NamePart struct { + NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` + IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOption_NamePart{} } +func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption_NamePart) ProtoMessage() {} +func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { + return fileDescriptorDescriptor, []int{18, 0} +} + +func (m *UninterpretedOption_NamePart) GetNamePart() string { + if m != nil && m.NamePart != nil { + return *m.NamePart + } + return "" +} + +func (m *UninterpretedOption_NamePart) GetIsExtension() bool { + if m != nil && m.IsExtension != nil { + return *m.IsExtension + } + return false +} + +// Encapsulates information about the original source file from which a +// FileDescriptorProto was generated. +type SourceCodeInfo struct { + // A Location identifies a piece of source code in a .proto file which + // corresponds to a particular definition. This information is intended + // to be useful to IDEs, code indexers, documentation generators, and similar + // tools. + // + // For example, say we have a file like: + // message Foo { + // optional string foo = 1; + // } + // Let's look at just the field definition: + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // We have the following locations: + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // Notes: + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendent. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. + Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } +func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo) ProtoMessage() {} +func (*SourceCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{19} } + +func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { + if m != nil { + return m.Location + } + return nil +} + +type SourceCodeInfo_Location struct { + // Identifies which part of the FileDescriptorProto was defined at this + // location. + // + // Each element is a field number or an index. They form a path from + // the root FileDescriptorProto to the place where the definition. For + // example, this path: + // [ 4, 3, 2, 7, 1 ] + // refers to: + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // This is because FileDescriptorProto.message_type has field number 4: + // repeated DescriptorProto message_type = 4; + // and DescriptorProto.field has field number 2: + // repeated FieldDescriptorProto field = 2; + // and FieldDescriptorProto.name has field number 1: + // optional string name = 1; + // + // Thus, the above path gives the location of a field name. If we removed + // the last element: + // [ 4, 3, 2, 7 ] + // this path refers to the whole field declaration (from the beginning + // of the label to the terminating semicolon). + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Always has exactly three or four elements: start line, start column, + // end line (optional, otherwise assumed same as start line), end column. + // These are packed into a single field for efficiency. Note that line + // and column numbers are zero-based -- typically you will want to add + // 1 to each before displaying to a user. + Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"` + // If this SourceCodeInfo represents a complete declaration, these are any + // comments appearing before and after the declaration which appear to be + // attached to the declaration. + // + // A series of line comments appearing on consecutive lines, with no other + // tokens appearing on those lines, will be treated as a single comment. + // + // leading_detached_comments will keep paragraphs of comments that appear + // before (but not connected to) the current element. Each paragraph, + // separated by empty lines, will be one comment element in the repeated + // field. + // + // Only the comment content is provided; comment markers (e.g. //) are + // stripped out. For block comments, leading whitespace and an asterisk + // will be stripped from the beginning of each line other than the first. + // Newlines are included in the output. + // + // Examples: + // + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; + // + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. + // + // // Comment attached to qux. + // // + // // Another line attached to qux. + // optional double qux = 4; + // + // // Detached comment for corge. This is not leading or trailing comments + // // to qux or corge because there are blank lines separating it from + // // both. + // + // // Detached comment for corge paragraph 2. + // + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; + // + // // ignored detached comments. + LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` + TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` + LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} } +func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo_Location) ProtoMessage() {} +func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { + return fileDescriptorDescriptor, []int{19, 0} +} + +func (m *SourceCodeInfo_Location) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *SourceCodeInfo_Location) GetSpan() []int32 { + if m != nil { + return m.Span + } + return nil +} + +func (m *SourceCodeInfo_Location) GetLeadingComments() string { + if m != nil && m.LeadingComments != nil { + return *m.LeadingComments + } + return "" +} + +func (m *SourceCodeInfo_Location) GetTrailingComments() string { + if m != nil && m.TrailingComments != nil { + return *m.TrailingComments + } + return "" +} + +func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { + if m != nil { + return m.LeadingDetachedComments + } + return nil +} + +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +type GeneratedCodeInfo struct { + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} } +func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo) ProtoMessage() {} +func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{20} } + +func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { + if m != nil { + return m.Annotation + } + return nil +} + +type GeneratedCodeInfo_Annotation struct { + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Identifies the filesystem path to the original source .proto. + SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"` + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"` + // Identifies the ending offset in bytes in the generated code that + // relates to the identified offset. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GeneratedCodeInfo_Annotation) Reset() { *m = GeneratedCodeInfo_Annotation{} } +func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} +func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { + return fileDescriptorDescriptor, []int{20, 0} +} + +func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *GeneratedCodeInfo_Annotation) GetSourceFile() string { + if m != nil && m.SourceFile != nil { + return *m.SourceFile + } + return "" +} + +func (m *GeneratedCodeInfo_Annotation) GetBegin() int32 { + if m != nil && m.Begin != nil { + return *m.Begin + } + return 0 +} + +func (m *GeneratedCodeInfo_Annotation) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +func init() { + proto.RegisterType((*FileDescriptorSet)(nil), "google.protobuf.FileDescriptorSet") + proto.RegisterType((*FileDescriptorProto)(nil), "google.protobuf.FileDescriptorProto") + proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto") + proto.RegisterType((*DescriptorProto_ExtensionRange)(nil), "google.protobuf.DescriptorProto.ExtensionRange") + proto.RegisterType((*DescriptorProto_ReservedRange)(nil), "google.protobuf.DescriptorProto.ReservedRange") + proto.RegisterType((*ExtensionRangeOptions)(nil), "google.protobuf.ExtensionRangeOptions") + proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto") + proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto") + proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto") + proto.RegisterType((*EnumDescriptorProto_EnumReservedRange)(nil), "google.protobuf.EnumDescriptorProto.EnumReservedRange") + proto.RegisterType((*EnumValueDescriptorProto)(nil), "google.protobuf.EnumValueDescriptorProto") + proto.RegisterType((*ServiceDescriptorProto)(nil), "google.protobuf.ServiceDescriptorProto") + proto.RegisterType((*MethodDescriptorProto)(nil), "google.protobuf.MethodDescriptorProto") + proto.RegisterType((*FileOptions)(nil), "google.protobuf.FileOptions") + proto.RegisterType((*MessageOptions)(nil), "google.protobuf.MessageOptions") + proto.RegisterType((*FieldOptions)(nil), "google.protobuf.FieldOptions") + proto.RegisterType((*OneofOptions)(nil), "google.protobuf.OneofOptions") + proto.RegisterType((*EnumOptions)(nil), "google.protobuf.EnumOptions") + proto.RegisterType((*EnumValueOptions)(nil), "google.protobuf.EnumValueOptions") + proto.RegisterType((*ServiceOptions)(nil), "google.protobuf.ServiceOptions") + proto.RegisterType((*MethodOptions)(nil), "google.protobuf.MethodOptions") + proto.RegisterType((*UninterpretedOption)(nil), "google.protobuf.UninterpretedOption") + proto.RegisterType((*UninterpretedOption_NamePart)(nil), "google.protobuf.UninterpretedOption.NamePart") + proto.RegisterType((*SourceCodeInfo)(nil), "google.protobuf.SourceCodeInfo") + proto.RegisterType((*SourceCodeInfo_Location)(nil), "google.protobuf.SourceCodeInfo.Location") + proto.RegisterType((*GeneratedCodeInfo)(nil), "google.protobuf.GeneratedCodeInfo") + proto.RegisterType((*GeneratedCodeInfo_Annotation)(nil), "google.protobuf.GeneratedCodeInfo.Annotation") + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value) + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value) + proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value) + proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value) + proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value) + proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value) +} + +func init() { proto.RegisterFile("descriptor.proto", fileDescriptorDescriptor) } + +var fileDescriptorDescriptor = []byte{ + // 2487 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xcd, 0x6f, 0xdb, 0xc8, + 0x15, 0x5f, 0x7d, 0x5a, 0x7a, 0x92, 0xe5, 0xf1, 0xd8, 0x9b, 0x30, 0xde, 0x8f, 0x38, 0xda, 0x8f, + 0x38, 0x49, 0xab, 0x2c, 0x9c, 0xc4, 0xc9, 0x3a, 0xc5, 0xb6, 0xb2, 0xc4, 0x78, 0x95, 0xca, 0x92, + 0x4a, 0xc9, 0xdd, 0x64, 0x8b, 0x82, 0x18, 0x93, 0x23, 0x89, 0x09, 0x45, 0x72, 0x49, 0x2a, 0x89, + 0x83, 0x1e, 0x02, 0xf4, 0xd4, 0xff, 0xa0, 0x28, 0x8a, 0x1e, 0x7a, 0x59, 0xa0, 0xd7, 0x02, 0x05, + 0xda, 0x7b, 0xaf, 0x05, 0x7a, 0xef, 0xa1, 0x40, 0x0b, 0xb4, 0x7f, 0x42, 0x8f, 0xc5, 0xcc, 0x90, + 0x14, 0xf5, 0x95, 0x78, 0x17, 0x48, 0xf6, 0x64, 0xcf, 0xef, 0xfd, 0xde, 0xe3, 0x9b, 0x37, 0x6f, + 0xde, 0xbc, 0x19, 0x01, 0xd2, 0xa9, 0xa7, 0xb9, 0x86, 0xe3, 0xdb, 0x6e, 0xc5, 0x71, 0x6d, 0xdf, + 0xc6, 0x6b, 0x03, 0xdb, 0x1e, 0x98, 0x54, 0x8c, 0x4e, 0xc6, 0xfd, 0xf2, 0x11, 0xac, 0xdf, 0x33, + 0x4c, 0x5a, 0x8f, 0x88, 0x5d, 0xea, 0xe3, 0x3b, 0x90, 0xee, 0x1b, 0x26, 0x95, 0x12, 0xdb, 0xa9, + 0x9d, 0xc2, 0xee, 0x87, 0x95, 0x19, 0xa5, 0xca, 0xb4, 0x46, 0x87, 0xc1, 0x0a, 0xd7, 0x28, 0xff, + 0x3b, 0x0d, 0x1b, 0x0b, 0xa4, 0x18, 0x43, 0xda, 0x22, 0x23, 0x66, 0x31, 0xb1, 0x93, 0x57, 0xf8, + 0xff, 0x58, 0x82, 0x15, 0x87, 0x68, 0x8f, 0xc9, 0x80, 0x4a, 0x49, 0x0e, 0x87, 0x43, 0xfc, 0x3e, + 0x80, 0x4e, 0x1d, 0x6a, 0xe9, 0xd4, 0xd2, 0x4e, 0xa5, 0xd4, 0x76, 0x6a, 0x27, 0xaf, 0xc4, 0x10, + 0x7c, 0x0d, 0xd6, 0x9d, 0xf1, 0x89, 0x69, 0x68, 0x6a, 0x8c, 0x06, 0xdb, 0xa9, 0x9d, 0x8c, 0x82, + 0x84, 0xa0, 0x3e, 0x21, 0x5f, 0x86, 0xb5, 0xa7, 0x94, 0x3c, 0x8e, 0x53, 0x0b, 0x9c, 0x5a, 0x62, + 0x70, 0x8c, 0x58, 0x83, 0xe2, 0x88, 0x7a, 0x1e, 0x19, 0x50, 0xd5, 0x3f, 0x75, 0xa8, 0x94, 0xe6, + 0xb3, 0xdf, 0x9e, 0x9b, 0xfd, 0xec, 0xcc, 0x0b, 0x81, 0x56, 0xef, 0xd4, 0xa1, 0xb8, 0x0a, 0x79, + 0x6a, 0x8d, 0x47, 0xc2, 0x42, 0x66, 0x49, 0xfc, 0x64, 0x6b, 0x3c, 0x9a, 0xb5, 0x92, 0x63, 0x6a, + 0x81, 0x89, 0x15, 0x8f, 0xba, 0x4f, 0x0c, 0x8d, 0x4a, 0x59, 0x6e, 0xe0, 0xf2, 0x9c, 0x81, 0xae, + 0x90, 0xcf, 0xda, 0x08, 0xf5, 0x70, 0x0d, 0xf2, 0xf4, 0x99, 0x4f, 0x2d, 0xcf, 0xb0, 0x2d, 0x69, + 0x85, 0x1b, 0xf9, 0x68, 0xc1, 0x2a, 0x52, 0x53, 0x9f, 0x35, 0x31, 0xd1, 0xc3, 0x7b, 0xb0, 0x62, + 0x3b, 0xbe, 0x61, 0x5b, 0x9e, 0x94, 0xdb, 0x4e, 0xec, 0x14, 0x76, 0xdf, 0x5d, 0x98, 0x08, 0x6d, + 0xc1, 0x51, 0x42, 0x32, 0x6e, 0x00, 0xf2, 0xec, 0xb1, 0xab, 0x51, 0x55, 0xb3, 0x75, 0xaa, 0x1a, + 0x56, 0xdf, 0x96, 0xf2, 0xdc, 0xc0, 0xc5, 0xf9, 0x89, 0x70, 0x62, 0xcd, 0xd6, 0x69, 0xc3, 0xea, + 0xdb, 0x4a, 0xc9, 0x9b, 0x1a, 0xe3, 0x73, 0x90, 0xf5, 0x4e, 0x2d, 0x9f, 0x3c, 0x93, 0x8a, 0x3c, + 0x43, 0x82, 0x51, 0xf9, 0xcf, 0x59, 0x58, 0x3b, 0x4b, 0x8a, 0xdd, 0x85, 0x4c, 0x9f, 0xcd, 0x52, + 0x4a, 0x7e, 0x93, 0x18, 0x08, 0x9d, 0xe9, 0x20, 0x66, 0xbf, 0x65, 0x10, 0xab, 0x50, 0xb0, 0xa8, + 0xe7, 0x53, 0x5d, 0x64, 0x44, 0xea, 0x8c, 0x39, 0x05, 0x42, 0x69, 0x3e, 0xa5, 0xd2, 0xdf, 0x2a, + 0xa5, 0x1e, 0xc0, 0x5a, 0xe4, 0x92, 0xea, 0x12, 0x6b, 0x10, 0xe6, 0xe6, 0xf5, 0x57, 0x79, 0x52, + 0x91, 0x43, 0x3d, 0x85, 0xa9, 0x29, 0x25, 0x3a, 0x35, 0xc6, 0x75, 0x00, 0xdb, 0xa2, 0x76, 0x5f, + 0xd5, 0xa9, 0x66, 0x4a, 0xb9, 0x25, 0x51, 0x6a, 0x33, 0xca, 0x5c, 0x94, 0x6c, 0x81, 0x6a, 0x26, + 0xfe, 0x74, 0x92, 0x6a, 0x2b, 0x4b, 0x32, 0xe5, 0x48, 0x6c, 0xb2, 0xb9, 0x6c, 0x3b, 0x86, 0x92, + 0x4b, 0x59, 0xde, 0x53, 0x3d, 0x98, 0x59, 0x9e, 0x3b, 0x51, 0x79, 0xe5, 0xcc, 0x94, 0x40, 0x4d, + 0x4c, 0x6c, 0xd5, 0x8d, 0x0f, 0xf1, 0x07, 0x10, 0x01, 0x2a, 0x4f, 0x2b, 0xe0, 0x55, 0xa8, 0x18, + 0x82, 0x2d, 0x32, 0xa2, 0x5b, 0xcf, 0xa1, 0x34, 0x1d, 0x1e, 0xbc, 0x09, 0x19, 0xcf, 0x27, 0xae, + 0xcf, 0xb3, 0x30, 0xa3, 0x88, 0x01, 0x46, 0x90, 0xa2, 0x96, 0xce, 0xab, 0x5c, 0x46, 0x61, 0xff, + 0xe2, 0x1f, 0x4d, 0x26, 0x9c, 0xe2, 0x13, 0xfe, 0x78, 0x7e, 0x45, 0xa7, 0x2c, 0xcf, 0xce, 0x7b, + 0xeb, 0x36, 0xac, 0x4e, 0x4d, 0xe0, 0xac, 0x9f, 0x2e, 0xff, 0x02, 0xde, 0x5e, 0x68, 0x1a, 0x3f, + 0x80, 0xcd, 0xb1, 0x65, 0x58, 0x3e, 0x75, 0x1d, 0x97, 0xb2, 0x8c, 0x15, 0x9f, 0x92, 0xfe, 0xb3, + 0xb2, 0x24, 0xe7, 0x8e, 0xe3, 0x6c, 0x61, 0x45, 0xd9, 0x18, 0xcf, 0x83, 0x57, 0xf3, 0xb9, 0xff, + 0xae, 0xa0, 0x17, 0x2f, 0x5e, 0xbc, 0x48, 0x96, 0x7f, 0x9d, 0x85, 0xcd, 0x45, 0x7b, 0x66, 0xe1, + 0xf6, 0x3d, 0x07, 0x59, 0x6b, 0x3c, 0x3a, 0xa1, 0x2e, 0x0f, 0x52, 0x46, 0x09, 0x46, 0xb8, 0x0a, + 0x19, 0x93, 0x9c, 0x50, 0x53, 0x4a, 0x6f, 0x27, 0x76, 0x4a, 0xbb, 0xd7, 0xce, 0xb4, 0x2b, 0x2b, + 0x4d, 0xa6, 0xa2, 0x08, 0x4d, 0xfc, 0x19, 0xa4, 0x83, 0x12, 0xcd, 0x2c, 0x5c, 0x3d, 0x9b, 0x05, + 0xb6, 0x97, 0x14, 0xae, 0x87, 0xdf, 0x81, 0x3c, 0xfb, 0x2b, 0x72, 0x23, 0xcb, 0x7d, 0xce, 0x31, + 0x80, 0xe5, 0x05, 0xde, 0x82, 0x1c, 0xdf, 0x26, 0x3a, 0x0d, 0x8f, 0xb6, 0x68, 0xcc, 0x12, 0x4b, + 0xa7, 0x7d, 0x32, 0x36, 0x7d, 0xf5, 0x09, 0x31, 0xc7, 0x94, 0x27, 0x7c, 0x5e, 0x29, 0x06, 0xe0, + 0x4f, 0x19, 0x86, 0x2f, 0x42, 0x41, 0xec, 0x2a, 0xc3, 0xd2, 0xe9, 0x33, 0x5e, 0x3d, 0x33, 0x8a, + 0xd8, 0x68, 0x0d, 0x86, 0xb0, 0xcf, 0x3f, 0xf2, 0x6c, 0x2b, 0x4c, 0x4d, 0xfe, 0x09, 0x06, 0xf0, + 0xcf, 0xdf, 0x9e, 0x2d, 0xdc, 0xef, 0x2d, 0x9e, 0xde, 0x6c, 0x4e, 0x95, 0xff, 0x94, 0x84, 0x34, + 0xaf, 0x17, 0x6b, 0x50, 0xe8, 0x3d, 0xec, 0xc8, 0x6a, 0xbd, 0x7d, 0x7c, 0xd0, 0x94, 0x51, 0x02, + 0x97, 0x00, 0x38, 0x70, 0xaf, 0xd9, 0xae, 0xf6, 0x50, 0x32, 0x1a, 0x37, 0x5a, 0xbd, 0xbd, 0x9b, + 0x28, 0x15, 0x29, 0x1c, 0x0b, 0x20, 0x1d, 0x27, 0xdc, 0xd8, 0x45, 0x19, 0x8c, 0xa0, 0x28, 0x0c, + 0x34, 0x1e, 0xc8, 0xf5, 0xbd, 0x9b, 0x28, 0x3b, 0x8d, 0xdc, 0xd8, 0x45, 0x2b, 0x78, 0x15, 0xf2, + 0x1c, 0x39, 0x68, 0xb7, 0x9b, 0x28, 0x17, 0xd9, 0xec, 0xf6, 0x94, 0x46, 0xeb, 0x10, 0xe5, 0x23, + 0x9b, 0x87, 0x4a, 0xfb, 0xb8, 0x83, 0x20, 0xb2, 0x70, 0x24, 0x77, 0xbb, 0xd5, 0x43, 0x19, 0x15, + 0x22, 0xc6, 0xc1, 0xc3, 0x9e, 0xdc, 0x45, 0xc5, 0x29, 0xb7, 0x6e, 0xec, 0xa2, 0xd5, 0xe8, 0x13, + 0x72, 0xeb, 0xf8, 0x08, 0x95, 0xf0, 0x3a, 0xac, 0x8a, 0x4f, 0x84, 0x4e, 0xac, 0xcd, 0x40, 0x7b, + 0x37, 0x11, 0x9a, 0x38, 0x22, 0xac, 0xac, 0x4f, 0x01, 0x7b, 0x37, 0x11, 0x2e, 0xd7, 0x20, 0xc3, + 0xb3, 0x0b, 0x63, 0x28, 0x35, 0xab, 0x07, 0x72, 0x53, 0x6d, 0x77, 0x7a, 0x8d, 0x76, 0xab, 0xda, + 0x44, 0x89, 0x09, 0xa6, 0xc8, 0x3f, 0x39, 0x6e, 0x28, 0x72, 0x1d, 0x25, 0xe3, 0x58, 0x47, 0xae, + 0xf6, 0xe4, 0x3a, 0x4a, 0x95, 0x35, 0xd8, 0x5c, 0x54, 0x27, 0x17, 0xee, 0x8c, 0xd8, 0x12, 0x27, + 0x97, 0x2c, 0x31, 0xb7, 0x35, 0xb7, 0xc4, 0xff, 0x4a, 0xc2, 0xc6, 0x82, 0xb3, 0x62, 0xe1, 0x47, + 0x7e, 0x08, 0x19, 0x91, 0xa2, 0xe2, 0xf4, 0xbc, 0xb2, 0xf0, 0xd0, 0xe1, 0x09, 0x3b, 0x77, 0x82, + 0x72, 0xbd, 0x78, 0x07, 0x91, 0x5a, 0xd2, 0x41, 0x30, 0x13, 0x73, 0x35, 0xfd, 0xe7, 0x73, 0x35, + 0x5d, 0x1c, 0x7b, 0x7b, 0x67, 0x39, 0xf6, 0x38, 0xf6, 0xcd, 0x6a, 0x7b, 0x66, 0x41, 0x6d, 0xbf, + 0x0b, 0xeb, 0x73, 0x86, 0xce, 0x5c, 0x63, 0x7f, 0x99, 0x00, 0x69, 0x59, 0x70, 0x5e, 0x51, 0xe9, + 0x92, 0x53, 0x95, 0xee, 0xee, 0x6c, 0x04, 0x2f, 0x2d, 0x5f, 0x84, 0xb9, 0xb5, 0xfe, 0x3a, 0x01, + 0xe7, 0x16, 0x77, 0x8a, 0x0b, 0x7d, 0xf8, 0x0c, 0xb2, 0x23, 0xea, 0x0f, 0xed, 0xb0, 0x5b, 0xfa, + 0x78, 0xc1, 0x19, 0xcc, 0xc4, 0xb3, 0x8b, 0x1d, 0x68, 0xc5, 0x0f, 0xf1, 0xd4, 0xb2, 0x76, 0x4f, + 0x78, 0x33, 0xe7, 0xe9, 0xaf, 0x92, 0xf0, 0xf6, 0x42, 0xe3, 0x0b, 0x1d, 0x7d, 0x0f, 0xc0, 0xb0, + 0x9c, 0xb1, 0x2f, 0x3a, 0x22, 0x51, 0x60, 0xf3, 0x1c, 0xe1, 0xc5, 0x8b, 0x15, 0xcf, 0xb1, 0x1f, + 0xc9, 0x53, 0x5c, 0x0e, 0x02, 0xe2, 0x84, 0x3b, 0x13, 0x47, 0xd3, 0xdc, 0xd1, 0xf7, 0x97, 0xcc, + 0x74, 0x2e, 0x31, 0x3f, 0x01, 0xa4, 0x99, 0x06, 0xb5, 0x7c, 0xd5, 0xf3, 0x5d, 0x4a, 0x46, 0x86, + 0x35, 0xe0, 0x27, 0x48, 0x6e, 0x3f, 0xd3, 0x27, 0xa6, 0x47, 0x95, 0x35, 0x21, 0xee, 0x86, 0x52, + 0xa6, 0xc1, 0x13, 0xc8, 0x8d, 0x69, 0x64, 0xa7, 0x34, 0x84, 0x38, 0xd2, 0x28, 0xff, 0x31, 0x07, + 0x85, 0x58, 0x5f, 0x8d, 0x2f, 0x41, 0xf1, 0x11, 0x79, 0x42, 0xd4, 0xf0, 0xae, 0x24, 0x22, 0x51, + 0x60, 0x58, 0x27, 0xb8, 0x2f, 0x7d, 0x02, 0x9b, 0x9c, 0x62, 0x8f, 0x7d, 0xea, 0xaa, 0x9a, 0x49, + 0x3c, 0x8f, 0x07, 0x2d, 0xc7, 0xa9, 0x98, 0xc9, 0xda, 0x4c, 0x54, 0x0b, 0x25, 0xf8, 0x16, 0x6c, + 0x70, 0x8d, 0xd1, 0xd8, 0xf4, 0x0d, 0xc7, 0xa4, 0x2a, 0xbb, 0xbd, 0x79, 0xfc, 0x24, 0x89, 0x3c, + 0x5b, 0x67, 0x8c, 0xa3, 0x80, 0xc0, 0x3c, 0xf2, 0x70, 0x1d, 0xde, 0xe3, 0x6a, 0x03, 0x6a, 0x51, + 0x97, 0xf8, 0x54, 0xa5, 0x5f, 0x8d, 0x89, 0xe9, 0xa9, 0xc4, 0xd2, 0xd5, 0x21, 0xf1, 0x86, 0xd2, + 0x26, 0x33, 0x70, 0x90, 0x94, 0x12, 0xca, 0x05, 0x46, 0x3c, 0x0c, 0x78, 0x32, 0xa7, 0x55, 0x2d, + 0xfd, 0x73, 0xe2, 0x0d, 0xf1, 0x3e, 0x9c, 0xe3, 0x56, 0x3c, 0xdf, 0x35, 0xac, 0x81, 0xaa, 0x0d, + 0xa9, 0xf6, 0x58, 0x1d, 0xfb, 0xfd, 0x3b, 0xd2, 0x3b, 0xf1, 0xef, 0x73, 0x0f, 0xbb, 0x9c, 0x53, + 0x63, 0x94, 0x63, 0xbf, 0x7f, 0x07, 0x77, 0xa1, 0xc8, 0x16, 0x63, 0x64, 0x3c, 0xa7, 0x6a, 0xdf, + 0x76, 0xf9, 0xd1, 0x58, 0x5a, 0x50, 0x9a, 0x62, 0x11, 0xac, 0xb4, 0x03, 0x85, 0x23, 0x5b, 0xa7, + 0xfb, 0x99, 0x6e, 0x47, 0x96, 0xeb, 0x4a, 0x21, 0xb4, 0x72, 0xcf, 0x76, 0x59, 0x42, 0x0d, 0xec, + 0x28, 0xc0, 0x05, 0x91, 0x50, 0x03, 0x3b, 0x0c, 0xef, 0x2d, 0xd8, 0xd0, 0x34, 0x31, 0x67, 0x43, + 0x53, 0x83, 0x3b, 0x96, 0x27, 0xa1, 0xa9, 0x60, 0x69, 0xda, 0xa1, 0x20, 0x04, 0x39, 0xee, 0xe1, + 0x4f, 0xe1, 0xed, 0x49, 0xb0, 0xe2, 0x8a, 0xeb, 0x73, 0xb3, 0x9c, 0x55, 0xbd, 0x05, 0x1b, 0xce, + 0xe9, 0xbc, 0x22, 0x9e, 0xfa, 0xa2, 0x73, 0x3a, 0xab, 0x76, 0x1b, 0x36, 0x9d, 0xa1, 0x33, 0xaf, + 0x77, 0x35, 0xae, 0x87, 0x9d, 0xa1, 0x33, 0xab, 0xf8, 0x11, 0xbf, 0x70, 0xbb, 0x54, 0x23, 0x3e, + 0xd5, 0xa5, 0xf3, 0x71, 0x7a, 0x4c, 0x80, 0xaf, 0x03, 0xd2, 0x34, 0x95, 0x5a, 0xe4, 0xc4, 0xa4, + 0x2a, 0x71, 0xa9, 0x45, 0x3c, 0xe9, 0x62, 0x9c, 0x5c, 0xd2, 0x34, 0x99, 0x4b, 0xab, 0x5c, 0x88, + 0xaf, 0xc2, 0xba, 0x7d, 0xf2, 0x48, 0x13, 0x29, 0xa9, 0x3a, 0x2e, 0xed, 0x1b, 0xcf, 0xa4, 0x0f, + 0x79, 0x7c, 0xd7, 0x98, 0x80, 0x27, 0x64, 0x87, 0xc3, 0xf8, 0x0a, 0x20, 0xcd, 0x1b, 0x12, 0xd7, + 0xe1, 0x35, 0xd9, 0x73, 0x88, 0x46, 0xa5, 0x8f, 0x04, 0x55, 0xe0, 0xad, 0x10, 0x66, 0x5b, 0xc2, + 0x7b, 0x6a, 0xf4, 0xfd, 0xd0, 0xe2, 0x65, 0xb1, 0x25, 0x38, 0x16, 0x58, 0xdb, 0x01, 0xc4, 0x42, + 0x31, 0xf5, 0xe1, 0x1d, 0x4e, 0x2b, 0x39, 0x43, 0x27, 0xfe, 0xdd, 0x0f, 0x60, 0x95, 0x31, 0x27, + 0x1f, 0xbd, 0x22, 0x1a, 0x32, 0x67, 0x18, 0xfb, 0xe2, 0x6b, 0xeb, 0x8d, 0xcb, 0xfb, 0x50, 0x8c, + 0xe7, 0x27, 0xce, 0x83, 0xc8, 0x50, 0x94, 0x60, 0xcd, 0x4a, 0xad, 0x5d, 0x67, 0x6d, 0xc6, 0x97, + 0x32, 0x4a, 0xb2, 0x76, 0xa7, 0xd9, 0xe8, 0xc9, 0xaa, 0x72, 0xdc, 0xea, 0x35, 0x8e, 0x64, 0x94, + 0x8a, 0xf7, 0xd5, 0x7f, 0x4d, 0x42, 0x69, 0xfa, 0x8a, 0x84, 0x7f, 0x00, 0xe7, 0xc3, 0xf7, 0x0c, + 0x8f, 0xfa, 0xea, 0x53, 0xc3, 0xe5, 0x5b, 0x66, 0x44, 0xc4, 0xf1, 0x15, 0x2d, 0xda, 0x66, 0xc0, + 0xea, 0x52, 0xff, 0x0b, 0xc3, 0x65, 0x1b, 0x62, 0x44, 0x7c, 0xdc, 0x84, 0x8b, 0x96, 0xad, 0x7a, + 0x3e, 0xb1, 0x74, 0xe2, 0xea, 0xea, 0xe4, 0x25, 0x49, 0x25, 0x9a, 0x46, 0x3d, 0xcf, 0x16, 0x47, + 0x55, 0x64, 0xe5, 0x5d, 0xcb, 0xee, 0x06, 0xe4, 0x49, 0x0d, 0xaf, 0x06, 0xd4, 0x99, 0x04, 0x4b, + 0x2d, 0x4b, 0xb0, 0x77, 0x20, 0x3f, 0x22, 0x8e, 0x4a, 0x2d, 0xdf, 0x3d, 0xe5, 0x8d, 0x71, 0x4e, + 0xc9, 0x8d, 0x88, 0x23, 0xb3, 0xf1, 0x9b, 0xb9, 0x9f, 0xfc, 0x23, 0x05, 0xc5, 0x78, 0x73, 0xcc, + 0xee, 0x1a, 0x1a, 0x3f, 0x47, 0x12, 0xbc, 0xd2, 0x7c, 0xf0, 0xd2, 0x56, 0xba, 0x52, 0x63, 0x07, + 0xcc, 0x7e, 0x56, 0xb4, 0xac, 0x8a, 0xd0, 0x64, 0x87, 0x3b, 0xab, 0x2d, 0x54, 0xb4, 0x08, 0x39, + 0x25, 0x18, 0xe1, 0x43, 0xc8, 0x3e, 0xf2, 0xb8, 0xed, 0x2c, 0xb7, 0xfd, 0xe1, 0xcb, 0x6d, 0xdf, + 0xef, 0x72, 0xe3, 0xf9, 0xfb, 0x5d, 0xb5, 0xd5, 0x56, 0x8e, 0xaa, 0x4d, 0x25, 0x50, 0xc7, 0x17, + 0x20, 0x6d, 0x92, 0xe7, 0xa7, 0xd3, 0x47, 0x11, 0x87, 0xce, 0x1a, 0xf8, 0x0b, 0x90, 0x7e, 0x4a, + 0xc9, 0xe3, 0xe9, 0x03, 0x80, 0x43, 0xaf, 0x31, 0xf5, 0xaf, 0x43, 0x86, 0xc7, 0x0b, 0x03, 0x04, + 0x11, 0x43, 0x6f, 0xe1, 0x1c, 0xa4, 0x6b, 0x6d, 0x85, 0xa5, 0x3f, 0x82, 0xa2, 0x40, 0xd5, 0x4e, + 0x43, 0xae, 0xc9, 0x28, 0x59, 0xbe, 0x05, 0x59, 0x11, 0x04, 0xb6, 0x35, 0xa2, 0x30, 0xa0, 0xb7, + 0x82, 0x61, 0x60, 0x23, 0x11, 0x4a, 0x8f, 0x8f, 0x0e, 0x64, 0x05, 0x25, 0xe3, 0xcb, 0xeb, 0x41, + 0x31, 0xde, 0x17, 0xbf, 0x99, 0x9c, 0xfa, 0x4b, 0x02, 0x0a, 0xb1, 0x3e, 0x97, 0x35, 0x28, 0xc4, + 0x34, 0xed, 0xa7, 0x2a, 0x31, 0x0d, 0xe2, 0x05, 0x49, 0x01, 0x1c, 0xaa, 0x32, 0xe4, 0xac, 0x8b, + 0xf6, 0x46, 0x9c, 0xff, 0x5d, 0x02, 0xd0, 0x6c, 0x8b, 0x39, 0xe3, 0x60, 0xe2, 0x3b, 0x75, 0xf0, + 0xb7, 0x09, 0x28, 0x4d, 0xf7, 0x95, 0x33, 0xee, 0x5d, 0xfa, 0x4e, 0xdd, 0xfb, 0x67, 0x12, 0x56, + 0xa7, 0xba, 0xc9, 0xb3, 0x7a, 0xf7, 0x15, 0xac, 0x1b, 0x3a, 0x1d, 0x39, 0xb6, 0x4f, 0x2d, 0xed, + 0x54, 0x35, 0xe9, 0x13, 0x6a, 0x4a, 0x65, 0x5e, 0x28, 0xae, 0xbf, 0xbc, 0x5f, 0xad, 0x34, 0x26, + 0x7a, 0x4d, 0xa6, 0xb6, 0xbf, 0xd1, 0xa8, 0xcb, 0x47, 0x9d, 0x76, 0x4f, 0x6e, 0xd5, 0x1e, 0xaa, + 0xc7, 0xad, 0x1f, 0xb7, 0xda, 0x5f, 0xb4, 0x14, 0x64, 0xcc, 0xd0, 0x5e, 0xe3, 0x56, 0xef, 0x00, + 0x9a, 0x75, 0x0a, 0x9f, 0x87, 0x45, 0x6e, 0xa1, 0xb7, 0xf0, 0x06, 0xac, 0xb5, 0xda, 0x6a, 0xb7, + 0x51, 0x97, 0x55, 0xf9, 0xde, 0x3d, 0xb9, 0xd6, 0xeb, 0x8a, 0x17, 0x88, 0x88, 0xdd, 0x9b, 0xde, + 0xd4, 0xbf, 0x49, 0xc1, 0xc6, 0x02, 0x4f, 0x70, 0x35, 0xb8, 0x3b, 0x88, 0xeb, 0xcc, 0xf7, 0xcf, + 0xe2, 0x7d, 0x85, 0x1d, 0xf9, 0x1d, 0xe2, 0xfa, 0xc1, 0x55, 0xe3, 0x0a, 0xb0, 0x28, 0x59, 0xbe, + 0xd1, 0x37, 0xa8, 0x1b, 0x3c, 0xd8, 0x88, 0x0b, 0xc5, 0xda, 0x04, 0x17, 0x6f, 0x36, 0xdf, 0x03, + 0xec, 0xd8, 0x9e, 0xe1, 0x1b, 0x4f, 0xa8, 0x6a, 0x58, 0xe1, 0xeb, 0x0e, 0xbb, 0x60, 0xa4, 0x15, + 0x14, 0x4a, 0x1a, 0x96, 0x1f, 0xb1, 0x2d, 0x3a, 0x20, 0x33, 0x6c, 0x56, 0xc0, 0x53, 0x0a, 0x0a, + 0x25, 0x11, 0xfb, 0x12, 0x14, 0x75, 0x7b, 0xcc, 0xba, 0x2e, 0xc1, 0x63, 0xe7, 0x45, 0x42, 0x29, + 0x08, 0x2c, 0xa2, 0x04, 0xfd, 0xf4, 0xe4, 0x59, 0xa9, 0xa8, 0x14, 0x04, 0x26, 0x28, 0x97, 0x61, + 0x8d, 0x0c, 0x06, 0x2e, 0x33, 0x1e, 0x1a, 0x12, 0x37, 0x84, 0x52, 0x04, 0x73, 0xe2, 0xd6, 0x7d, + 0xc8, 0x85, 0x71, 0x60, 0x47, 0x32, 0x8b, 0x84, 0xea, 0x88, 0x6b, 0x6f, 0x72, 0x27, 0xaf, 0xe4, + 0xac, 0x50, 0x78, 0x09, 0x8a, 0x86, 0xa7, 0x4e, 0x5e, 0xc9, 0x93, 0xdb, 0xc9, 0x9d, 0x9c, 0x52, + 0x30, 0xbc, 0xe8, 0x85, 0xb1, 0xfc, 0x75, 0x12, 0x4a, 0xd3, 0xaf, 0xfc, 0xb8, 0x0e, 0x39, 0xd3, + 0xd6, 0x08, 0x4f, 0x2d, 0xf1, 0x13, 0xd3, 0xce, 0x2b, 0x7e, 0x18, 0xa8, 0x34, 0x03, 0xbe, 0x12, + 0x69, 0x6e, 0xfd, 0x2d, 0x01, 0xb9, 0x10, 0xc6, 0xe7, 0x20, 0xed, 0x10, 0x7f, 0xc8, 0xcd, 0x65, + 0x0e, 0x92, 0x28, 0xa1, 0xf0, 0x31, 0xc3, 0x3d, 0x87, 0x58, 0x3c, 0x05, 0x02, 0x9c, 0x8d, 0xd9, + 0xba, 0x9a, 0x94, 0xe8, 0xfc, 0xfa, 0x61, 0x8f, 0x46, 0xd4, 0xf2, 0xbd, 0x70, 0x5d, 0x03, 0xbc, + 0x16, 0xc0, 0xf8, 0x1a, 0xac, 0xfb, 0x2e, 0x31, 0xcc, 0x29, 0x6e, 0x9a, 0x73, 0x51, 0x28, 0x88, + 0xc8, 0xfb, 0x70, 0x21, 0xb4, 0xab, 0x53, 0x9f, 0x68, 0x43, 0xaa, 0x4f, 0x94, 0xb2, 0xfc, 0x99, + 0xe1, 0x7c, 0x40, 0xa8, 0x07, 0xf2, 0x50, 0xb7, 0xfc, 0xf7, 0x04, 0xac, 0x87, 0x17, 0x26, 0x3d, + 0x0a, 0xd6, 0x11, 0x00, 0xb1, 0x2c, 0xdb, 0x8f, 0x87, 0x6b, 0x3e, 0x95, 0xe7, 0xf4, 0x2a, 0xd5, + 0x48, 0x49, 0x89, 0x19, 0xd8, 0x1a, 0x01, 0x4c, 0x24, 0x4b, 0xc3, 0x76, 0x11, 0x0a, 0xc1, 0x4f, + 0x38, 0xfc, 0x77, 0x40, 0x71, 0xc5, 0x06, 0x01, 0xb1, 0x9b, 0x15, 0xde, 0x84, 0xcc, 0x09, 0x1d, + 0x18, 0x56, 0xf0, 0x30, 0x2b, 0x06, 0xe1, 0x43, 0x48, 0x3a, 0x7a, 0x08, 0x39, 0xf8, 0x19, 0x6c, + 0x68, 0xf6, 0x68, 0xd6, 0xdd, 0x03, 0x34, 0x73, 0xcd, 0xf7, 0x3e, 0x4f, 0x7c, 0x09, 0x93, 0x16, + 0xf3, 0x7f, 0x89, 0xc4, 0xef, 0x93, 0xa9, 0xc3, 0xce, 0xc1, 0x1f, 0x92, 0x5b, 0x87, 0x42, 0xb5, + 0x13, 0xce, 0x54, 0xa1, 0x7d, 0x93, 0x6a, 0xcc, 0xfb, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff, 0xa3, + 0x58, 0x22, 0x30, 0xdf, 0x1c, 0x00, 0x00, +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go new file mode 100644 index 000000000..3b95a7757 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go @@ -0,0 +1,772 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: descriptor.proto + +/* +Package descriptor is a generated protocol buffer package. + +It is generated from these files: + descriptor.proto + +It has these top-level messages: + FileDescriptorSet + FileDescriptorProto + DescriptorProto + ExtensionRangeOptions + FieldDescriptorProto + OneofDescriptorProto + EnumDescriptorProto + EnumValueDescriptorProto + ServiceDescriptorProto + MethodDescriptorProto + FileOptions + MessageOptions + FieldOptions + OneofOptions + EnumOptions + EnumValueOptions + ServiceOptions + MethodOptions + UninterpretedOption + SourceCodeInfo + GeneratedCodeInfo +*/ +package descriptor + +import fmt "fmt" +import strings "strings" +import proto "github.com/gogo/protobuf/proto" +import sort "sort" +import strconv "strconv" +import reflect "reflect" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +func (this *FileDescriptorSet) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.FileDescriptorSet{") + if this.File != nil { + s = append(s, "File: "+fmt.Sprintf("%#v", this.File)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FileDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 16) + s = append(s, "&descriptor.FileDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Package != nil { + s = append(s, "Package: "+valueToGoStringDescriptor(this.Package, "string")+",\n") + } + if this.Dependency != nil { + s = append(s, "Dependency: "+fmt.Sprintf("%#v", this.Dependency)+",\n") + } + if this.PublicDependency != nil { + s = append(s, "PublicDependency: "+fmt.Sprintf("%#v", this.PublicDependency)+",\n") + } + if this.WeakDependency != nil { + s = append(s, "WeakDependency: "+fmt.Sprintf("%#v", this.WeakDependency)+",\n") + } + if this.MessageType != nil { + s = append(s, "MessageType: "+fmt.Sprintf("%#v", this.MessageType)+",\n") + } + if this.EnumType != nil { + s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n") + } + if this.Service != nil { + s = append(s, "Service: "+fmt.Sprintf("%#v", this.Service)+",\n") + } + if this.Extension != nil { + s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.SourceCodeInfo != nil { + s = append(s, "SourceCodeInfo: "+fmt.Sprintf("%#v", this.SourceCodeInfo)+",\n") + } + if this.Syntax != nil { + s = append(s, "Syntax: "+valueToGoStringDescriptor(this.Syntax, "string")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 14) + s = append(s, "&descriptor.DescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Field != nil { + s = append(s, "Field: "+fmt.Sprintf("%#v", this.Field)+",\n") + } + if this.Extension != nil { + s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n") + } + if this.NestedType != nil { + s = append(s, "NestedType: "+fmt.Sprintf("%#v", this.NestedType)+",\n") + } + if this.EnumType != nil { + s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n") + } + if this.ExtensionRange != nil { + s = append(s, "ExtensionRange: "+fmt.Sprintf("%#v", this.ExtensionRange)+",\n") + } + if this.OneofDecl != nil { + s = append(s, "OneofDecl: "+fmt.Sprintf("%#v", this.OneofDecl)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.ReservedRange != nil { + s = append(s, "ReservedRange: "+fmt.Sprintf("%#v", this.ReservedRange)+",\n") + } + if this.ReservedName != nil { + s = append(s, "ReservedName: "+fmt.Sprintf("%#v", this.ReservedName)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DescriptorProto_ExtensionRange) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.DescriptorProto_ExtensionRange{") + if this.Start != nil { + s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DescriptorProto_ReservedRange) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.DescriptorProto_ReservedRange{") + if this.Start != nil { + s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ExtensionRangeOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.ExtensionRangeOptions{") + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FieldDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 14) + s = append(s, "&descriptor.FieldDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Number != nil { + s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n") + } + if this.Label != nil { + s = append(s, "Label: "+valueToGoStringDescriptor(this.Label, "FieldDescriptorProto_Label")+",\n") + } + if this.Type != nil { + s = append(s, "Type: "+valueToGoStringDescriptor(this.Type, "FieldDescriptorProto_Type")+",\n") + } + if this.TypeName != nil { + s = append(s, "TypeName: "+valueToGoStringDescriptor(this.TypeName, "string")+",\n") + } + if this.Extendee != nil { + s = append(s, "Extendee: "+valueToGoStringDescriptor(this.Extendee, "string")+",\n") + } + if this.DefaultValue != nil { + s = append(s, "DefaultValue: "+valueToGoStringDescriptor(this.DefaultValue, "string")+",\n") + } + if this.OneofIndex != nil { + s = append(s, "OneofIndex: "+valueToGoStringDescriptor(this.OneofIndex, "int32")+",\n") + } + if this.JsonName != nil { + s = append(s, "JsonName: "+valueToGoStringDescriptor(this.JsonName, "string")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *OneofDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.OneofDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&descriptor.EnumDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Value != nil { + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.ReservedRange != nil { + s = append(s, "ReservedRange: "+fmt.Sprintf("%#v", this.ReservedRange)+",\n") + } + if this.ReservedName != nil { + s = append(s, "ReservedName: "+fmt.Sprintf("%#v", this.ReservedName)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumDescriptorProto_EnumReservedRange) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.EnumDescriptorProto_EnumReservedRange{") + if this.Start != nil { + s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumValueDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.EnumValueDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Number != nil { + s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ServiceDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.ServiceDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Method != nil { + s = append(s, "Method: "+fmt.Sprintf("%#v", this.Method)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MethodDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 10) + s = append(s, "&descriptor.MethodDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.InputType != nil { + s = append(s, "InputType: "+valueToGoStringDescriptor(this.InputType, "string")+",\n") + } + if this.OutputType != nil { + s = append(s, "OutputType: "+valueToGoStringDescriptor(this.OutputType, "string")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.ClientStreaming != nil { + s = append(s, "ClientStreaming: "+valueToGoStringDescriptor(this.ClientStreaming, "bool")+",\n") + } + if this.ServerStreaming != nil { + s = append(s, "ServerStreaming: "+valueToGoStringDescriptor(this.ServerStreaming, "bool")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FileOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 23) + s = append(s, "&descriptor.FileOptions{") + if this.JavaPackage != nil { + s = append(s, "JavaPackage: "+valueToGoStringDescriptor(this.JavaPackage, "string")+",\n") + } + if this.JavaOuterClassname != nil { + s = append(s, "JavaOuterClassname: "+valueToGoStringDescriptor(this.JavaOuterClassname, "string")+",\n") + } + if this.JavaMultipleFiles != nil { + s = append(s, "JavaMultipleFiles: "+valueToGoStringDescriptor(this.JavaMultipleFiles, "bool")+",\n") + } + if this.JavaGenerateEqualsAndHash != nil { + s = append(s, "JavaGenerateEqualsAndHash: "+valueToGoStringDescriptor(this.JavaGenerateEqualsAndHash, "bool")+",\n") + } + if this.JavaStringCheckUtf8 != nil { + s = append(s, "JavaStringCheckUtf8: "+valueToGoStringDescriptor(this.JavaStringCheckUtf8, "bool")+",\n") + } + if this.OptimizeFor != nil { + s = append(s, "OptimizeFor: "+valueToGoStringDescriptor(this.OptimizeFor, "FileOptions_OptimizeMode")+",\n") + } + if this.GoPackage != nil { + s = append(s, "GoPackage: "+valueToGoStringDescriptor(this.GoPackage, "string")+",\n") + } + if this.CcGenericServices != nil { + s = append(s, "CcGenericServices: "+valueToGoStringDescriptor(this.CcGenericServices, "bool")+",\n") + } + if this.JavaGenericServices != nil { + s = append(s, "JavaGenericServices: "+valueToGoStringDescriptor(this.JavaGenericServices, "bool")+",\n") + } + if this.PyGenericServices != nil { + s = append(s, "PyGenericServices: "+valueToGoStringDescriptor(this.PyGenericServices, "bool")+",\n") + } + if this.PhpGenericServices != nil { + s = append(s, "PhpGenericServices: "+valueToGoStringDescriptor(this.PhpGenericServices, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.CcEnableArenas != nil { + s = append(s, "CcEnableArenas: "+valueToGoStringDescriptor(this.CcEnableArenas, "bool")+",\n") + } + if this.ObjcClassPrefix != nil { + s = append(s, "ObjcClassPrefix: "+valueToGoStringDescriptor(this.ObjcClassPrefix, "string")+",\n") + } + if this.CsharpNamespace != nil { + s = append(s, "CsharpNamespace: "+valueToGoStringDescriptor(this.CsharpNamespace, "string")+",\n") + } + if this.SwiftPrefix != nil { + s = append(s, "SwiftPrefix: "+valueToGoStringDescriptor(this.SwiftPrefix, "string")+",\n") + } + if this.PhpClassPrefix != nil { + s = append(s, "PhpClassPrefix: "+valueToGoStringDescriptor(this.PhpClassPrefix, "string")+",\n") + } + if this.PhpNamespace != nil { + s = append(s, "PhpNamespace: "+valueToGoStringDescriptor(this.PhpNamespace, "string")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MessageOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&descriptor.MessageOptions{") + if this.MessageSetWireFormat != nil { + s = append(s, "MessageSetWireFormat: "+valueToGoStringDescriptor(this.MessageSetWireFormat, "bool")+",\n") + } + if this.NoStandardDescriptorAccessor != nil { + s = append(s, "NoStandardDescriptorAccessor: "+valueToGoStringDescriptor(this.NoStandardDescriptorAccessor, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.MapEntry != nil { + s = append(s, "MapEntry: "+valueToGoStringDescriptor(this.MapEntry, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FieldOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&descriptor.FieldOptions{") + if this.Ctype != nil { + s = append(s, "Ctype: "+valueToGoStringDescriptor(this.Ctype, "FieldOptions_CType")+",\n") + } + if this.Packed != nil { + s = append(s, "Packed: "+valueToGoStringDescriptor(this.Packed, "bool")+",\n") + } + if this.Jstype != nil { + s = append(s, "Jstype: "+valueToGoStringDescriptor(this.Jstype, "FieldOptions_JSType")+",\n") + } + if this.Lazy != nil { + s = append(s, "Lazy: "+valueToGoStringDescriptor(this.Lazy, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.Weak != nil { + s = append(s, "Weak: "+valueToGoStringDescriptor(this.Weak, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *OneofOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.OneofOptions{") + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.EnumOptions{") + if this.AllowAlias != nil { + s = append(s, "AllowAlias: "+valueToGoStringDescriptor(this.AllowAlias, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumValueOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.EnumValueOptions{") + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ServiceOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.ServiceOptions{") + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MethodOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.MethodOptions{") + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.IdempotencyLevel != nil { + s = append(s, "IdempotencyLevel: "+valueToGoStringDescriptor(this.IdempotencyLevel, "MethodOptions_IdempotencyLevel")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UninterpretedOption) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&descriptor.UninterpretedOption{") + if this.Name != nil { + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + } + if this.IdentifierValue != nil { + s = append(s, "IdentifierValue: "+valueToGoStringDescriptor(this.IdentifierValue, "string")+",\n") + } + if this.PositiveIntValue != nil { + s = append(s, "PositiveIntValue: "+valueToGoStringDescriptor(this.PositiveIntValue, "uint64")+",\n") + } + if this.NegativeIntValue != nil { + s = append(s, "NegativeIntValue: "+valueToGoStringDescriptor(this.NegativeIntValue, "int64")+",\n") + } + if this.DoubleValue != nil { + s = append(s, "DoubleValue: "+valueToGoStringDescriptor(this.DoubleValue, "float64")+",\n") + } + if this.StringValue != nil { + s = append(s, "StringValue: "+valueToGoStringDescriptor(this.StringValue, "byte")+",\n") + } + if this.AggregateValue != nil { + s = append(s, "AggregateValue: "+valueToGoStringDescriptor(this.AggregateValue, "string")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UninterpretedOption_NamePart) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.UninterpretedOption_NamePart{") + if this.NamePart != nil { + s = append(s, "NamePart: "+valueToGoStringDescriptor(this.NamePart, "string")+",\n") + } + if this.IsExtension != nil { + s = append(s, "IsExtension: "+valueToGoStringDescriptor(this.IsExtension, "bool")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *SourceCodeInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.SourceCodeInfo{") + if this.Location != nil { + s = append(s, "Location: "+fmt.Sprintf("%#v", this.Location)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *SourceCodeInfo_Location) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&descriptor.SourceCodeInfo_Location{") + if this.Path != nil { + s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") + } + if this.Span != nil { + s = append(s, "Span: "+fmt.Sprintf("%#v", this.Span)+",\n") + } + if this.LeadingComments != nil { + s = append(s, "LeadingComments: "+valueToGoStringDescriptor(this.LeadingComments, "string")+",\n") + } + if this.TrailingComments != nil { + s = append(s, "TrailingComments: "+valueToGoStringDescriptor(this.TrailingComments, "string")+",\n") + } + if this.LeadingDetachedComments != nil { + s = append(s, "LeadingDetachedComments: "+fmt.Sprintf("%#v", this.LeadingDetachedComments)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *GeneratedCodeInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.GeneratedCodeInfo{") + if this.Annotation != nil { + s = append(s, "Annotation: "+fmt.Sprintf("%#v", this.Annotation)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *GeneratedCodeInfo_Annotation) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&descriptor.GeneratedCodeInfo_Annotation{") + if this.Path != nil { + s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") + } + if this.SourceFile != nil { + s = append(s, "SourceFile: "+valueToGoStringDescriptor(this.SourceFile, "string")+",\n") + } + if this.Begin != nil { + s = append(s, "Begin: "+valueToGoStringDescriptor(this.Begin, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringDescriptor(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func extensionToGoStringDescriptor(m proto.Message) string { + e := proto.GetUnsafeExtensionsMap(m) + if e == nil { + return "nil" + } + s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{" + keys := make([]int, 0, len(e)) + for k := range e { + keys = append(keys, int(k)) + } + sort.Ints(keys) + ss := []string{} + for _, k := range keys { + ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString()) + } + s += strings.Join(ss, ",") + "})" + return s +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go new file mode 100644 index 000000000..e0846a357 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go @@ -0,0 +1,390 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package descriptor + +import ( + "strings" +) + +func (msg *DescriptorProto) GetMapFields() (*FieldDescriptorProto, *FieldDescriptorProto) { + if !msg.GetOptions().GetMapEntry() { + return nil, nil + } + return msg.GetField()[0], msg.GetField()[1] +} + +func dotToUnderscore(r rune) rune { + if r == '.' { + return '_' + } + return r +} + +func (field *FieldDescriptorProto) WireType() (wire int) { + switch *field.Type { + case FieldDescriptorProto_TYPE_DOUBLE: + return 1 + case FieldDescriptorProto_TYPE_FLOAT: + return 5 + case FieldDescriptorProto_TYPE_INT64: + return 0 + case FieldDescriptorProto_TYPE_UINT64: + return 0 + case FieldDescriptorProto_TYPE_INT32: + return 0 + case FieldDescriptorProto_TYPE_UINT32: + return 0 + case FieldDescriptorProto_TYPE_FIXED64: + return 1 + case FieldDescriptorProto_TYPE_FIXED32: + return 5 + case FieldDescriptorProto_TYPE_BOOL: + return 0 + case FieldDescriptorProto_TYPE_STRING: + return 2 + case FieldDescriptorProto_TYPE_GROUP: + return 2 + case FieldDescriptorProto_TYPE_MESSAGE: + return 2 + case FieldDescriptorProto_TYPE_BYTES: + return 2 + case FieldDescriptorProto_TYPE_ENUM: + return 0 + case FieldDescriptorProto_TYPE_SFIXED32: + return 5 + case FieldDescriptorProto_TYPE_SFIXED64: + return 1 + case FieldDescriptorProto_TYPE_SINT32: + return 0 + case FieldDescriptorProto_TYPE_SINT64: + return 0 + } + panic("unreachable") +} + +func (field *FieldDescriptorProto) GetKeyUint64() (x uint64) { + packed := field.IsPacked() + wireType := field.WireType() + fieldNumber := field.GetNumber() + if packed { + wireType = 2 + } + x = uint64(uint32(fieldNumber)<<3 | uint32(wireType)) + return x +} + +func (field *FieldDescriptorProto) GetKey3Uint64() (x uint64) { + packed := field.IsPacked3() + wireType := field.WireType() + fieldNumber := field.GetNumber() + if packed { + wireType = 2 + } + x = uint64(uint32(fieldNumber)<<3 | uint32(wireType)) + return x +} + +func (field *FieldDescriptorProto) GetKey() []byte { + x := field.GetKeyUint64() + i := 0 + keybuf := make([]byte, 0) + for i = 0; x > 127; i++ { + keybuf = append(keybuf, 0x80|uint8(x&0x7F)) + x >>= 7 + } + keybuf = append(keybuf, uint8(x)) + return keybuf +} + +func (field *FieldDescriptorProto) GetKey3() []byte { + x := field.GetKey3Uint64() + i := 0 + keybuf := make([]byte, 0) + for i = 0; x > 127; i++ { + keybuf = append(keybuf, 0x80|uint8(x&0x7F)) + x >>= 7 + } + keybuf = append(keybuf, uint8(x)) + return keybuf +} + +func (desc *FileDescriptorSet) GetField(packageName, messageName, fieldName string) *FieldDescriptorProto { + msg := desc.GetMessage(packageName, messageName) + if msg == nil { + return nil + } + for _, field := range msg.GetField() { + if field.GetName() == fieldName { + return field + } + } + return nil +} + +func (file *FileDescriptorProto) GetMessage(typeName string) *DescriptorProto { + for _, msg := range file.GetMessageType() { + if msg.GetName() == typeName { + return msg + } + nes := file.GetNestedMessage(msg, strings.TrimPrefix(typeName, msg.GetName()+".")) + if nes != nil { + return nes + } + } + return nil +} + +func (file *FileDescriptorProto) GetNestedMessage(msg *DescriptorProto, typeName string) *DescriptorProto { + for _, nes := range msg.GetNestedType() { + if nes.GetName() == typeName { + return nes + } + res := file.GetNestedMessage(nes, strings.TrimPrefix(typeName, nes.GetName()+".")) + if res != nil { + return res + } + } + return nil +} + +func (desc *FileDescriptorSet) GetMessage(packageName string, typeName string) *DescriptorProto { + for _, file := range desc.GetFile() { + if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { + continue + } + for _, msg := range file.GetMessageType() { + if msg.GetName() == typeName { + return msg + } + } + for _, msg := range file.GetMessageType() { + for _, nes := range msg.GetNestedType() { + if nes.GetName() == typeName { + return nes + } + if msg.GetName()+"."+nes.GetName() == typeName { + return nes + } + } + } + } + return nil +} + +func (desc *FileDescriptorSet) IsProto3(packageName string, typeName string) bool { + for _, file := range desc.GetFile() { + if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { + continue + } + for _, msg := range file.GetMessageType() { + if msg.GetName() == typeName { + return file.GetSyntax() == "proto3" + } + } + for _, msg := range file.GetMessageType() { + for _, nes := range msg.GetNestedType() { + if nes.GetName() == typeName { + return file.GetSyntax() == "proto3" + } + if msg.GetName()+"."+nes.GetName() == typeName { + return file.GetSyntax() == "proto3" + } + } + } + } + return false +} + +func (msg *DescriptorProto) IsExtendable() bool { + return len(msg.GetExtensionRange()) > 0 +} + +func (desc *FileDescriptorSet) FindExtension(packageName string, typeName string, fieldName string) (extPackageName string, field *FieldDescriptorProto) { + parent := desc.GetMessage(packageName, typeName) + if parent == nil { + return "", nil + } + if !parent.IsExtendable() { + return "", nil + } + extendee := "." + packageName + "." + typeName + for _, file := range desc.GetFile() { + for _, ext := range file.GetExtension() { + if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) { + if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) { + continue + } + } else { + if ext.GetExtendee() != extendee { + continue + } + } + if ext.GetName() == fieldName { + return file.GetPackage(), ext + } + } + } + return "", nil +} + +func (desc *FileDescriptorSet) FindExtensionByFieldNumber(packageName string, typeName string, fieldNum int32) (extPackageName string, field *FieldDescriptorProto) { + parent := desc.GetMessage(packageName, typeName) + if parent == nil { + return "", nil + } + if !parent.IsExtendable() { + return "", nil + } + extendee := "." + packageName + "." + typeName + for _, file := range desc.GetFile() { + for _, ext := range file.GetExtension() { + if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) { + if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) { + continue + } + } else { + if ext.GetExtendee() != extendee { + continue + } + } + if ext.GetNumber() == fieldNum { + return file.GetPackage(), ext + } + } + } + return "", nil +} + +func (desc *FileDescriptorSet) FindMessage(packageName string, typeName string, fieldName string) (msgPackageName string, msgName string) { + parent := desc.GetMessage(packageName, typeName) + if parent == nil { + return "", "" + } + field := parent.GetFieldDescriptor(fieldName) + if field == nil { + var extPackageName string + extPackageName, field = desc.FindExtension(packageName, typeName, fieldName) + if field == nil { + return "", "" + } + packageName = extPackageName + } + typeNames := strings.Split(field.GetTypeName(), ".") + if len(typeNames) == 1 { + msg := desc.GetMessage(packageName, typeName) + if msg == nil { + return "", "" + } + return packageName, msg.GetName() + } + if len(typeNames) > 2 { + for i := 1; i < len(typeNames)-1; i++ { + packageName = strings.Join(typeNames[1:len(typeNames)-i], ".") + typeName = strings.Join(typeNames[len(typeNames)-i:], ".") + msg := desc.GetMessage(packageName, typeName) + if msg != nil { + typeNames := strings.Split(msg.GetName(), ".") + if len(typeNames) == 1 { + return packageName, msg.GetName() + } + return strings.Join(typeNames[1:len(typeNames)-1], "."), typeNames[len(typeNames)-1] + } + } + } + return "", "" +} + +func (msg *DescriptorProto) GetFieldDescriptor(fieldName string) *FieldDescriptorProto { + for _, field := range msg.GetField() { + if field.GetName() == fieldName { + return field + } + } + return nil +} + +func (desc *FileDescriptorSet) GetEnum(packageName string, typeName string) *EnumDescriptorProto { + for _, file := range desc.GetFile() { + if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { + continue + } + for _, enum := range file.GetEnumType() { + if enum.GetName() == typeName { + return enum + } + } + } + return nil +} + +func (f *FieldDescriptorProto) IsEnum() bool { + return *f.Type == FieldDescriptorProto_TYPE_ENUM +} + +func (f *FieldDescriptorProto) IsMessage() bool { + return *f.Type == FieldDescriptorProto_TYPE_MESSAGE +} + +func (f *FieldDescriptorProto) IsBytes() bool { + return *f.Type == FieldDescriptorProto_TYPE_BYTES +} + +func (f *FieldDescriptorProto) IsRepeated() bool { + return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REPEATED +} + +func (f *FieldDescriptorProto) IsString() bool { + return *f.Type == FieldDescriptorProto_TYPE_STRING +} + +func (f *FieldDescriptorProto) IsBool() bool { + return *f.Type == FieldDescriptorProto_TYPE_BOOL +} + +func (f *FieldDescriptorProto) IsRequired() bool { + return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REQUIRED +} + +func (f *FieldDescriptorProto) IsPacked() bool { + return f.Options != nil && f.GetOptions().GetPacked() +} + +func (f *FieldDescriptorProto) IsPacked3() bool { + if f.IsRepeated() && f.IsScalar() { + if f.Options == nil || f.GetOptions().Packed == nil { + return true + } + return f.Options != nil && f.GetOptions().GetPacked() + } + return false +} + +func (m *DescriptorProto) HasExtension() bool { + return len(m.ExtensionRange) > 0 +} -- cgit v1.2.3 From 7f8172edf583e0d26bee5e06578a442c7507ba6f Mon Sep 17 00:00:00 2001 From: Adin Scannell Date: Tue, 5 May 2020 18:06:46 -0700 Subject: Restructure shim packages for merging. --- .gitignore | 1 - .travis.yml | 29 - CODEOWNERS | 1 - CONTRIBUTING.md | 28 - LICENSE | 202 ------- Makefile | 42 -- README.md | 26 - cmd/containerd-shim-runsc-v1/main.go | 24 - cmd/gvisor-containerd-shim/config.go | 42 -- cmd/gvisor-containerd-shim/main.go | 320 ---------- docs/README.md | 9 - docs/configure-containerd-shim-runsc-v1.md | 72 --- docs/configure-gvisor-containerd-shim.md | 42 -- docs/runtime-handler-quickstart.md | 251 -------- docs/runtime-handler-shim-v2-quickstart.md | 232 -------- docs/untrusted-workload-quickstart.md | 212 ------- go.mod | 35 -- go.sum | 67 --- pkg/go-runsc/runsc.go | 513 ---------------- pkg/go-runsc/utils.go | 46 -- pkg/shim/runsc/runsc.go | 511 ++++++++++++++++ pkg/shim/runsc/utils.go | 44 ++ pkg/shim/v1/proc/deleted_state.go | 49 ++ pkg/shim/v1/proc/exec.go | 282 +++++++++ pkg/shim/v1/proc/exec_state.go | 154 +++++ pkg/shim/v1/proc/init.go | 462 ++++++++++++++ pkg/shim/v1/proc/init_state.go | 182 ++++++ pkg/shim/v1/proc/io.go | 167 ++++++ pkg/shim/v1/proc/process.go | 37 ++ pkg/shim/v1/proc/types.go | 70 +++ pkg/shim/v1/proc/utils.go | 92 +++ pkg/shim/v1/shim/platform.go | 110 ++++ pkg/shim/v1/shim/service.go | 579 ++++++++++++++++++ pkg/shim/v1/utils/utils.go | 57 ++ pkg/shim/v1/utils/volumes.go | 156 +++++ pkg/shim/v1/utils/volumes_test.go | 309 ++++++++++ pkg/shim/v2/epoll.go | 128 ++++ pkg/shim/v2/options/options.go | 33 + pkg/shim/v2/service.go | 821 +++++++++++++++++++++++++ pkg/shim/v2/service_linux.go | 112 ++++ pkg/v1/proc/deleted_state.go | 54 -- pkg/v1/proc/exec.go | 284 --------- pkg/v1/proc/exec_state.go | 156 ----- pkg/v1/proc/init.go | 464 --------------- pkg/v1/proc/init_state.go | 184 ------ pkg/v1/proc/io.go | 169 ------ pkg/v1/proc/process.go | 39 -- pkg/v1/proc/types.go | 72 --- pkg/v1/proc/utils.go | 94 --- pkg/v1/shim/platform.go | 112 ---- pkg/v1/shim/service.go | 581 ------------------ pkg/v1/utils/utils.go | 59 -- pkg/v1/utils/volumes.go | 158 ----- pkg/v1/utils/volumes_test.go | 311 ---------- pkg/v2/epoll.go | 129 ---- pkg/v2/options/options.go | 34 -- pkg/v2/service.go | 826 -------------------------- pkg/v2/service_linux.go | 111 ---- shim/README.md | 16 + shim/configure-containerd-shim-runsc-v1.md | 72 +++ shim/configure-gvisor-containerd-shim.md | 42 ++ shim/runtime-handler-quickstart.md | 251 ++++++++ shim/runtime-handler-shim-v2-quickstart.md | 232 ++++++++ shim/untrusted-workload-quickstart.md | 212 +++++++ shim/v1/main.go | 26 + shim/v2/config.go | 40 ++ shim/v2/main.go | 318 ++++++++++ test/e2e/containerd-install.sh | 44 -- test/e2e/crictl-install.sh | 17 - test/e2e/run-container.sh | 30 - test/e2e/runsc-install.sh | 8 - test/e2e/runtime-handler-shim-v2/install.sh | 21 - test/e2e/runtime-handler-shim-v2/test.sh | 34 -- test/e2e/runtime-handler-shim-v2/validate.sh | 7 - test/e2e/runtime-handler/install.sh | 24 - test/e2e/runtime-handler/test.sh | 33 - test/e2e/runtime-handler/usage.sh | 30 - test/e2e/runtimeclass-install.sh | 33 - test/e2e/shim-install.sh | 28 - test/e2e/untrusted-workload/install.sh | 27 - test/e2e/untrusted-workload/test.sh | 33 - test/e2e/untrusted-workload/usage.sh | 33 - test/e2e/validate.sh | 17 - test/shim/containerd-install.sh | 44 ++ test/shim/crictl-install.sh | 17 + test/shim/run-container.sh | 30 + test/shim/runsc-install.sh | 8 + test/shim/runtime-handler-shim-v2/install.sh | 21 + test/shim/runtime-handler-shim-v2/test.sh | 34 ++ test/shim/runtime-handler-shim-v2/validate.sh | 7 + test/shim/runtime-handler/install.sh | 24 + test/shim/runtime-handler/test.sh | 33 + test/shim/runtime-handler/usage.sh | 30 + test/shim/runtimeclass-install.sh | 33 + test/shim/shim-install.sh | 28 + test/shim/untrusted-workload/install.sh | 27 + test/shim/untrusted-workload/test.sh | 33 + test/shim/untrusted-workload/usage.sh | 33 + test/shim/validate.sh | 17 + 99 files changed, 5983 insertions(+), 6450 deletions(-) delete mode 100644 .gitignore delete mode 100644 .travis.yml delete mode 100644 CODEOWNERS delete mode 100644 CONTRIBUTING.md delete mode 100644 LICENSE delete mode 100644 Makefile delete mode 100644 README.md delete mode 100644 cmd/containerd-shim-runsc-v1/main.go delete mode 100644 cmd/gvisor-containerd-shim/config.go delete mode 100644 cmd/gvisor-containerd-shim/main.go delete mode 100644 docs/README.md delete mode 100644 docs/configure-containerd-shim-runsc-v1.md delete mode 100644 docs/configure-gvisor-containerd-shim.md delete mode 100644 docs/runtime-handler-quickstart.md delete mode 100644 docs/runtime-handler-shim-v2-quickstart.md delete mode 100644 docs/untrusted-workload-quickstart.md delete mode 100644 go.mod delete mode 100644 go.sum delete mode 100644 pkg/go-runsc/runsc.go delete mode 100644 pkg/go-runsc/utils.go create mode 100644 pkg/shim/runsc/runsc.go create mode 100644 pkg/shim/runsc/utils.go create mode 100644 pkg/shim/v1/proc/deleted_state.go create mode 100644 pkg/shim/v1/proc/exec.go create mode 100644 pkg/shim/v1/proc/exec_state.go create mode 100644 pkg/shim/v1/proc/init.go create mode 100644 pkg/shim/v1/proc/init_state.go create mode 100644 pkg/shim/v1/proc/io.go create mode 100644 pkg/shim/v1/proc/process.go create mode 100644 pkg/shim/v1/proc/types.go create mode 100644 pkg/shim/v1/proc/utils.go create mode 100644 pkg/shim/v1/shim/platform.go create mode 100644 pkg/shim/v1/shim/service.go create mode 100644 pkg/shim/v1/utils/utils.go create mode 100644 pkg/shim/v1/utils/volumes.go create mode 100644 pkg/shim/v1/utils/volumes_test.go create mode 100644 pkg/shim/v2/epoll.go create mode 100644 pkg/shim/v2/options/options.go create mode 100644 pkg/shim/v2/service.go create mode 100644 pkg/shim/v2/service_linux.go delete mode 100644 pkg/v1/proc/deleted_state.go delete mode 100644 pkg/v1/proc/exec.go delete mode 100644 pkg/v1/proc/exec_state.go delete mode 100644 pkg/v1/proc/init.go delete mode 100644 pkg/v1/proc/init_state.go delete mode 100644 pkg/v1/proc/io.go delete mode 100644 pkg/v1/proc/process.go delete mode 100644 pkg/v1/proc/types.go delete mode 100644 pkg/v1/proc/utils.go delete mode 100644 pkg/v1/shim/platform.go delete mode 100644 pkg/v1/shim/service.go delete mode 100644 pkg/v1/utils/utils.go delete mode 100644 pkg/v1/utils/volumes.go delete mode 100644 pkg/v1/utils/volumes_test.go delete mode 100644 pkg/v2/epoll.go delete mode 100644 pkg/v2/options/options.go delete mode 100644 pkg/v2/service.go delete mode 100644 pkg/v2/service_linux.go create mode 100644 shim/README.md create mode 100644 shim/configure-containerd-shim-runsc-v1.md create mode 100644 shim/configure-gvisor-containerd-shim.md create mode 100644 shim/runtime-handler-quickstart.md create mode 100644 shim/runtime-handler-shim-v2-quickstart.md create mode 100644 shim/untrusted-workload-quickstart.md create mode 100644 shim/v1/main.go create mode 100644 shim/v2/config.go create mode 100644 shim/v2/main.go delete mode 100755 test/e2e/containerd-install.sh delete mode 100755 test/e2e/crictl-install.sh delete mode 100755 test/e2e/run-container.sh delete mode 100755 test/e2e/runsc-install.sh delete mode 100755 test/e2e/runtime-handler-shim-v2/install.sh delete mode 100755 test/e2e/runtime-handler-shim-v2/test.sh delete mode 100755 test/e2e/runtime-handler-shim-v2/validate.sh delete mode 100755 test/e2e/runtime-handler/install.sh delete mode 100755 test/e2e/runtime-handler/test.sh delete mode 100755 test/e2e/runtime-handler/usage.sh delete mode 100755 test/e2e/runtimeclass-install.sh delete mode 100755 test/e2e/shim-install.sh delete mode 100755 test/e2e/untrusted-workload/install.sh delete mode 100755 test/e2e/untrusted-workload/test.sh delete mode 100755 test/e2e/untrusted-workload/usage.sh delete mode 100755 test/e2e/validate.sh create mode 100755 test/shim/containerd-install.sh create mode 100755 test/shim/crictl-install.sh create mode 100755 test/shim/run-container.sh create mode 100755 test/shim/runsc-install.sh create mode 100755 test/shim/runtime-handler-shim-v2/install.sh create mode 100755 test/shim/runtime-handler-shim-v2/test.sh create mode 100755 test/shim/runtime-handler-shim-v2/validate.sh create mode 100755 test/shim/runtime-handler/install.sh create mode 100755 test/shim/runtime-handler/test.sh create mode 100755 test/shim/runtime-handler/usage.sh create mode 100755 test/shim/runtimeclass-install.sh create mode 100755 test/shim/shim-install.sh create mode 100755 test/shim/untrusted-workload/install.sh create mode 100755 test/shim/untrusted-workload/test.sh create mode 100755 test/shim/untrusted-workload/usage.sh create mode 100755 test/shim/validate.sh (limited to 'pkg/shim') diff --git a/.gitignore b/.gitignore deleted file mode 100644 index ae3c17260..000000000 --- a/.gitignore +++ /dev/null @@ -1 +0,0 @@ -/bin/ diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index d9ef5e33f..000000000 --- a/.travis.yml +++ /dev/null @@ -1,29 +0,0 @@ -dist: bionic -sudo: required -language: go -go: "1.14.x" - -env: - - CONTAINERD_VERSION=1.1.8 RUNSC_VERSION=release/20200219.0 TEST=untrusted-workload - - CONTAINERD_VERSION=1.2.13 RUNSC_VERSION=release/20200219.0 TEST=untrusted-workload - - CONTAINERD_VERSION=1.2.13 RUNSC_VERSION=release/20200219.0 TEST=runtime-handler - - CONTAINERD_VERSION=1.2.13 RUNSC_VERSION=release/20200219.0 TEST=runtime-handler-shim-v2 - - CONTAINERD_VERSION=1.3.3 RUNSC_VERSION=release/20200219.0 TEST=runtime-handler - - CONTAINERD_VERSION=1.3.3 RUNSC_VERSION=release/20200219.0 TEST=runtime-handler-shim-v2 - -go_import_path: github.com/google/gvisor-containerd-shim - -addons: - apt: - packages: - - socat - - conntrack - - ipset - - libseccomp-dev - -before_install: - - uname -r - -script: - - make test - - ./test/e2e/${TEST}/test.sh diff --git a/CODEOWNERS b/CODEOWNERS deleted file mode 100644 index 7bf84e6dd..000000000 --- a/CODEOWNERS +++ /dev/null @@ -1 +0,0 @@ -* @google/gvisor diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md deleted file mode 100644 index 939e5341e..000000000 --- a/CONTRIBUTING.md +++ /dev/null @@ -1,28 +0,0 @@ -# How to Contribute - -We'd love to accept your patches and contributions to this project. There are -just a few small guidelines you need to follow. - -## Contributor License Agreement - -Contributions to this project must be accompanied by a Contributor License -Agreement. You (or your employer) retain the copyright to your contribution; -this simply gives us permission to use and redistribute your contributions as -part of the project. Head over to to see -your current agreements on file or to sign a new one. - -You generally only need to submit a CLA once, so if you've already submitted one -(even if it was for a different project), you probably don't need to do it -again. - -## Code reviews - -All submissions, including submissions by project members, require review. We -use GitHub pull requests for this purpose. Consult -[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more -information on using pull requests. - -## Community Guidelines - -This project follows [Google's Open Source Community -Guidelines](https://opensource.google.com/conduct/). diff --git a/LICENSE b/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/Makefile b/Makefile deleted file mode 100644 index abfadc1b5..000000000 --- a/Makefile +++ /dev/null @@ -1,42 +0,0 @@ -# Base path used to install. -DESTDIR=/usr/local -GC=go -GO_BUILD_FLAGS= -GO_TAGS= -GO_LDFLAGS=-ldflags '-s -w -extldflags "-static"' -SOURCES=$(shell find cmd/ pkg/ -name '*.go') -DEPLOY_PATH=cri-containerd-staging/gvisor-containerd-shim -VERSION=$(shell git rev-parse HEAD) -GO_MODULE=on - -all: binaries - -binaries: bin/gvisor-containerd-shim bin/containerd-shim-runsc-v1 - -bin/gvisor-containerd-shim: $(SOURCES) - GO111MODULE=${GO_MODULE} CGO_ENABLED=0 ${GC} build ${GO_BUILD_FLAGS} -o bin/gvisor-containerd-shim ${SHIM_GO_LDFLAGS} ${GO_TAGS} ./cmd/gvisor-containerd-shim - -bin/containerd-shim-runsc-v1: $(SOURCES) - GO111MODULE=${GO_MODULE} CGO_ENABLED=0 ${GC} build ${GO_BUILD_FLAGS} -o bin/containerd-shim-runsc-v1 ${SHIM_GO_LDFLAGS} ${GO_TAGS} ./cmd/containerd-shim-runsc-v1 - -install: bin/gvisor-containerd-shim - mkdir -p $(DESTDIR)/bin - install bin/gvisor-containerd-shim $(DESTDIR)/bin - install bin/containerd-shim-runsc-v1 $(DESTDIR)/bin - -uninstall: - rm -f $(DESTDIR)/bin/gvisor-containerd-shim - rm -f $(DESTDIR)/bin/containerd-shim-runsc-v1 - -clean: - rm -rf bin/* - -push: binaries - gsutil cp ./bin/gvisor-containerd-shim gs://$(DEPLOY_PATH)/gvisor-containerd-shim-$(VERSION) - gsutil cp ./bin/containerd-shim-runsc-v1 gs://$(DEPLOY_PATH)/containerd-shim-runsc-v1-$(VERSION) - echo "$(VERSION)" | gsutil cp - "gs://$(DEPLOY_PATH)/latest" - -.PHONY: test - -test: - GO111MODULE=${GO_MODULE} CGO_ENABLED=0 ${GC} test -v ./pkg/... ${SHIM_GO_LDFLAGS} ${GO_TAGS} diff --git a/README.md b/README.md deleted file mode 100644 index 40a474c4f..000000000 --- a/README.md +++ /dev/null @@ -1,26 +0,0 @@ -# gvisor-containerd-shim - -[![Build Status](https://travis-ci.org/google/gvisor-containerd-shim.svg?branch=master)](https://travis-ci.org/google/gvisor-containerd-shim) -[![Go Report Card](https://goreportcard.com/badge/github.com/google/gvisor-containerd-shim)](https://goreportcard.com/report/github.com/google/gvisor-containerd-shim) - -gvisor-containerd-shim is a containerd shim for [gVisor](https://github.com/google/gvisor/). It implements the containerd v1 shim API. It can be used as a drop-in replacement for [containerd-shim](https://github.com/containerd/containerd/tree/master/cmd/containerd-shim) (though containerd-shim must still be installed). It allows the use of both gVisor (runsc) and normal containers in the same containerd installation by deferring to the runc shim if the desired runtime engine is not runsc. - -## Requirements - -- gvisor (runsc) >= 2018-12-07 -- containerd, containerd-shim >= 1.1 - -## Installation - -- [Untrusted Workload Quick Start (containerd >=1.1)](docs/untrusted-workload-quickstart.md) -- [Runtime Handler/RuntimeClass Quick Start (containerd >=1.2)](docs/runtime-handler-quickstart.md) -- [Runtime Handler/RuntimeClass Quick Start (shim v2) (containerd >=1.2)](docs/runtime-handler-shim-v2-quickstart.md) - -## Configuration - -- [Configure containerd-shim-runsc-v1 (shim v2) (containerd >= 1.3)](docs/configure-containerd-shim-runsc-v1.md) -- [Configure gvisor-containerd-shim (shim v1) (containerd <= 1.2)](docs/configure-gvisor-containerd-shim.md) - -# Contributing - -See [CONTRIBUTING.md](CONTRIBUTING.md). diff --git a/cmd/containerd-shim-runsc-v1/main.go b/cmd/containerd-shim-runsc-v1/main.go deleted file mode 100644 index ec73edbd2..000000000 --- a/cmd/containerd-shim-runsc-v1/main.go +++ /dev/null @@ -1,24 +0,0 @@ -/* - Copyright The containerd Authors. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package main - -import ( - "github.com/containerd/containerd/runtime/v2/shim" - - runsc "github.com/google/gvisor-containerd-shim/pkg/v2" -) - -func main() { - shim.Run("io.containerd.runsc.v1", runsc.New) -} diff --git a/cmd/gvisor-containerd-shim/config.go b/cmd/gvisor-containerd-shim/config.go deleted file mode 100644 index f03690304..000000000 --- a/cmd/gvisor-containerd-shim/config.go +++ /dev/null @@ -1,42 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -import "github.com/BurntSushi/toml" - -// config is the configuration for gvisor containerd shim. -type config struct { - // RuncShim is the shim binary path for standard containerd-shim for runc. - // When the runtime is `runc`, gvisor containerd shim will exec current - // process to standard containerd-shim. This is a work around for containerd - // 1.1. In containerd 1.2, containerd will choose different containerd-shims - // based on runtime. - RuncShim string `toml:"runc_shim"` - // RunscConfig is configuration for runsc. The key value will be converted - // to runsc flags --key=value directly. - RunscConfig map[string]string `toml:"runsc_config"` -} - -// loadConfig load gvisor containerd shim config from config file. -func loadConfig(path string) (*config, error) { - var c config - _, err := toml.DecodeFile(path, &c) - if err != nil { - return &c, err - } - return &c, nil -} diff --git a/cmd/gvisor-containerd-shim/main.go b/cmd/gvisor-containerd-shim/main.go deleted file mode 100644 index ea26aa105..000000000 --- a/cmd/gvisor-containerd-shim/main.go +++ /dev/null @@ -1,320 +0,0 @@ -/* -Copyright The containerd Authors. -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -import ( - "bytes" - "context" - "flag" - "fmt" - "net" - "os" - "os/exec" - "os/signal" - "path/filepath" - "runtime" - "runtime/debug" - "strings" - "sync" - "syscall" - "time" - - "github.com/containerd/containerd/events" - "github.com/containerd/containerd/namespaces" - "github.com/containerd/containerd/runtime/v1/linux/proc" - containerdshim "github.com/containerd/containerd/runtime/v1/shim" - shimapi "github.com/containerd/containerd/runtime/v1/shim/v1" - "github.com/containerd/ttrpc" - "github.com/containerd/typeurl" - ptypes "github.com/gogo/protobuf/types" - "github.com/opencontainers/runc/libcontainer/system" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" - - runsc "github.com/google/gvisor-containerd-shim/pkg/go-runsc" - "github.com/google/gvisor-containerd-shim/pkg/v1/shim" -) - -var ( - debugFlag bool - namespaceFlag string - socketFlag string - addressFlag string - workdirFlag string - runtimeRootFlag string - containerdBinaryFlag string - shimConfigFlag string -) - -// ShimConfigPath is the default shim config file path. -const ShimConfigPath = "/etc/containerd/gvisor-containerd-shim.toml" - -func init() { - flag.BoolVar(&debugFlag, "debug", false, "enable debug output in logs") - flag.StringVar(&namespaceFlag, "namespace", "", "namespace that owns the shim") - flag.StringVar(&socketFlag, "socket", "", "abstract socket path to serve") - flag.StringVar(&addressFlag, "address", "", "grpc address back to main containerd") - flag.StringVar(&workdirFlag, "workdir", "", "path used to storge large temporary data") - // Containerd default to runc, unless another runtime is explicitly specified. - // We keep the same default to make the default behavior consistent. - flag.StringVar(&runtimeRootFlag, "runtime-root", proc.RuncRoot, "root directory for the runtime") - // currently, the `containerd publish` utility is embedded in the daemon binary. - // The daemon invokes `containerd-shim -containerd-binary ...` with its own os.Executable() path. - flag.StringVar(&containerdBinaryFlag, "containerd-binary", "containerd", "path to containerd binary (used for `containerd publish`)") - flag.StringVar(&shimConfigFlag, "config", ShimConfigPath, "path to the shim configuration file") - flag.Parse() -} - -func main() { - // This is a hack. Exec current process to run standard containerd-shim - // if runtime root is not `runsc`. We don't need this for shim v2 api. - if filepath.Base(runtimeRootFlag) != "runsc" { - if err := executeRuncShim(); err != nil { - fmt.Fprintf(os.Stderr, "gvisor-containerd-shim: %s\n", err) - os.Exit(1) - } - } - - debug.SetGCPercent(40) - go func() { - for range time.Tick(30 * time.Second) { - debug.FreeOSMemory() - } - }() - - if debugFlag { - logrus.SetLevel(logrus.DebugLevel) - } - - if os.Getenv("GOMAXPROCS") == "" { - // If GOMAXPROCS hasn't been set, we default to a value of 2 to reduce - // the number of Go stacks present in the shim. - runtime.GOMAXPROCS(2) - } - - // Run regular shim if needed. - if err := executeShim(); err != nil { - fmt.Fprintf(os.Stderr, "gvisor-containerd-shim: %s\n", err) - os.Exit(1) - } -} - -// executeRuncShim execs current process to a containerd-shim process and -// retains all flags and envs. -func executeRuncShim() error { - c, err := loadConfig(shimConfigFlag) - if err != nil && !os.IsNotExist(err) { - return errors.Wrap(err, "failed to load shim config") - } - shimPath := c.RuncShim - if shimPath == "" { - shimPath, err = exec.LookPath("containerd-shim") - if err != nil { - return errors.Wrapf(err, "lookup containerd-shim") - } - } - - args := append([]string{shimPath}, os.Args[1:]...) - if err := syscall.Exec(shimPath, args, os.Environ()); err != nil { - return errors.Wrapf(err, "exec containerd-shim %q", shimPath) - } - return nil -} - -func executeShim() error { - // start handling signals as soon as possible so that things are properly reaped - // or if runtime exits before we hit the handler - signals, err := setupSignals() - if err != nil { - return err - } - dump := make(chan os.Signal, 32) - signal.Notify(dump, syscall.SIGUSR1) - - path, err := os.Getwd() - if err != nil { - return err - } - server, err := ttrpc.NewServer(ttrpc.WithServerHandshaker(ttrpc.UnixSocketRequireSameUser())) - if err != nil { - return errors.Wrap(err, "failed creating server") - } - c, err := loadConfig(shimConfigFlag) - if err != nil && !os.IsNotExist(err) { - return errors.Wrap(err, "failed to load shim config") - } - sv, err := shim.NewService( - shim.Config{ - Path: path, - Namespace: namespaceFlag, - WorkDir: workdirFlag, - RuntimeRoot: runtimeRootFlag, - RunscConfig: c.RunscConfig, - }, - &remoteEventsPublisher{address: addressFlag}, - ) - if err != nil { - return err - } - logrus.Debug("registering ttrpc server") - shimapi.RegisterShimService(server, sv) - - socket := socketFlag - if err := serve(server, socket); err != nil { - return err - } - logger := logrus.WithFields(logrus.Fields{ - "pid": os.Getpid(), - "path": path, - "namespace": namespaceFlag, - }) - go func() { - for range dump { - dumpStacks(logger) - } - }() - return handleSignals(logger, signals, server, sv) -} - -// serve serves the ttrpc API over a unix socket at the provided path -// this function does not block -func serve(server *ttrpc.Server, path string) error { - var ( - l net.Listener - err error - ) - if path == "" { - l, err = net.FileListener(os.NewFile(3, "socket")) - path = "[inherited from parent]" - } else { - if len(path) > 106 { - return errors.Errorf("%q: unix socket path too long (> 106)", path) - } - l, err = net.Listen("unix", "\x00"+path) - } - if err != nil { - return err - } - logrus.WithField("socket", path).Debug("serving api on unix socket") - go func() { - defer l.Close() - if err := server.Serve(context.Background(), l); err != nil && - !strings.Contains(err.Error(), "use of closed network connection") { - logrus.WithError(err).Fatal("gvisor-containerd-shim: ttrpc server failure") - } - }() - return nil -} - -// setupSignals creates a new signal handler for all signals and sets the shim as a -// sub-reaper so that the container processes are reparented -func setupSignals() (chan os.Signal, error) { - signals := make(chan os.Signal, 32) - signal.Notify(signals, unix.SIGTERM, unix.SIGINT, unix.SIGCHLD, unix.SIGPIPE) - // make sure runc is setup to use the monitor - // for waiting on processes - // TODO(random-liu): Move shim/reaper.go to a separate package. - runsc.Monitor = containerdshim.Default - // set the shim as the subreaper for all orphaned processes created by the container - if err := system.SetSubreaper(1); err != nil { - return nil, err - } - return signals, nil -} - -func handleSignals(logger *logrus.Entry, signals chan os.Signal, server *ttrpc.Server, sv *shim.Service) error { - var ( - termOnce sync.Once - done = make(chan struct{}) - ) - - for { - select { - case <-done: - return nil - case s := <-signals: - switch s { - case unix.SIGCHLD: - if err := containerdshim.Reap(); err != nil { - logger.WithError(err).Error("reap exit status") - } - case unix.SIGTERM, unix.SIGINT: - go termOnce.Do(func() { - ctx := context.TODO() - if err := server.Shutdown(ctx); err != nil { - logger.WithError(err).Error("failed to shutdown server") - } - // Ensure our child is dead if any - sv.Kill(ctx, &shimapi.KillRequest{ - Signal: uint32(syscall.SIGKILL), - All: true, - }) - sv.Delete(context.Background(), &ptypes.Empty{}) - close(done) - }) - case unix.SIGPIPE: - } - } - } -} - -func dumpStacks(logger *logrus.Entry) { - var ( - buf []byte - stackSize int - ) - bufferLen := 16384 - for stackSize == len(buf) { - buf = make([]byte, bufferLen) - stackSize = runtime.Stack(buf, true) - bufferLen *= 2 - } - buf = buf[:stackSize] - logger.Infof("=== BEGIN goroutine stack dump ===\n%s\n=== END goroutine stack dump ===", buf) -} - -type remoteEventsPublisher struct { - address string -} - -func (l *remoteEventsPublisher) Publish(ctx context.Context, topic string, event events.Event) error { - ns, _ := namespaces.Namespace(ctx) - encoded, err := typeurl.MarshalAny(event) - if err != nil { - return err - } - data, err := encoded.Marshal() - if err != nil { - return err - } - cmd := exec.CommandContext(ctx, containerdBinaryFlag, "--address", l.address, "publish", "--topic", topic, "--namespace", ns) - cmd.Stdin = bytes.NewReader(data) - c, err := containerdshim.Default.Start(cmd) - if err != nil { - return err - } - status, err := containerdshim.Default.Wait(cmd, c) - if err != nil { - return err - } - if status != 0 { - return errors.New("failed to publish event") - } - return nil -} diff --git a/docs/README.md b/docs/README.md deleted file mode 100644 index f1091f934..000000000 --- a/docs/README.md +++ /dev/null @@ -1,9 +0,0 @@ -# gvisor-containerd-shim docs - -Everything you need to know about gvisor-containerd-shim - -- [Untrusted Workload Quick Start (containerd >=1.1)](untrusted-workload-quickstart.md) -- [Runtime Handler Quick Start (containerd >=1.2)](runtime-handler-quickstart.md) -- [Runtime Handler Quick Start (shim v2) (containerd >=1.2)](runtime-handler-shim-v2-quickstart.md) -- [Configure containerd-shim-runsc-v1 (shim v2) (containerd >= 1.3)](configure-containerd-shim-runsc-v1.md) -- [Configure gvisor-containerd-shim (shim v1) (containerd <= 1.2)](configure-gvisor-containerd-shim.md) diff --git a/docs/configure-containerd-shim-runsc-v1.md b/docs/configure-containerd-shim-runsc-v1.md deleted file mode 100644 index 977ceacbd..000000000 --- a/docs/configure-containerd-shim-runsc-v1.md +++ /dev/null @@ -1,72 +0,0 @@ -# Configure containerd-shim-runsc-v1 (Shim V2) - -This document describes how to configure runtime options for -`containerd-shim-runsc-v1`. This is follows on to the instructions of -[Runtime Handler Quick Start (shim v2) (containerd >=1.2)](runtime-handler-shim-v2-quickstart.md) -and requires containerd 1.3 or later. - -### Update `/etc/containerd/config.toml` to point to a configuration file for `containerd-shim-runsc-v1`. - -`containerd-shim-runsc-v1` supports a few different configuration options based -on the version of containerd that is used. For versions >= 1.3, it supports a -configurable config path in the containerd runtime configuration. - -```shell -{ # Step 1: Update runtime options for runsc in containerd config.toml -cat < -[embedmd]:# (../test/e2e/shim-install.sh shell /{ # Step 1\(dev\)/ /^}/) -```shell -{ # Step 1(dev): Build and install gvisor-containerd-shim and containerd-shim-runsc-v1 - make - sudo make install -} -``` - -### Configure containerd - -1. Update `/etc/containerd/config.toml`. Make sure `containerd-shim-runsc-v1` is - in `${PATH}`. - -[embedmd]:# (../test/e2e/runtime-handler-shim-v2/install.sh shell /{ # Step 1/ /^}/) -```shell -{ # Step 1: Create containerd config.toml -cat <: -// -func (r *Runsc) runOrError(cmd *exec.Cmd) error { - if cmd.Stdout != nil || cmd.Stderr != nil { - ec, err := Monitor.Start(cmd) - if err != nil { - return err - } - status, err := Monitor.Wait(cmd, ec) - if err == nil && status != 0 { - err = fmt.Errorf("%s did not terminate sucessfully", cmd.Args[0]) - } - return err - } - data, err := cmdOutput(cmd, true) - if err != nil { - return fmt.Errorf("%s: %s", err, data) - } - return nil -} - -func (r *Runsc) command(context context.Context, args ...string) *exec.Cmd { - command := r.Command - if command == "" { - command = DefaultCommand - } - cmd := exec.CommandContext(context, command, append(r.args(), args...)...) - cmd.SysProcAttr = &syscall.SysProcAttr{ - Setpgid: r.Setpgid, - } - if r.PdeathSignal != 0 { - cmd.SysProcAttr.Pdeathsig = r.PdeathSignal - } - - return cmd -} - -func cmdOutput(cmd *exec.Cmd, combined bool) ([]byte, error) { - b := getBuf() - defer putBuf(b) - - cmd.Stdout = b - if combined { - cmd.Stderr = b - } - ec, err := Monitor.Start(cmd) - if err != nil { - return nil, err - } - - status, err := Monitor.Wait(cmd, ec) - if err == nil && status != 0 { - err = fmt.Errorf("%s did not terminate sucessfully", cmd.Args[0]) - } - - return b.Bytes(), err -} diff --git a/pkg/go-runsc/utils.go b/pkg/go-runsc/utils.go deleted file mode 100644 index e4c3c937a..000000000 --- a/pkg/go-runsc/utils.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright The containerd Authors. -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package runsc - -import ( - "bytes" - "strings" - "sync" -) - -var bytesBufferPool = sync.Pool{ - New: func() interface{} { - return bytes.NewBuffer(nil) - }, -} - -func getBuf() *bytes.Buffer { - return bytesBufferPool.Get().(*bytes.Buffer) -} - -func putBuf(b *bytes.Buffer) { - b.Reset() - bytesBufferPool.Put(b) -} - -// FormatLogPath parses runsc config, and fill in %ID% in the log path. -func FormatLogPath(id string, config map[string]string) { - if path, ok := config["debug-log"]; ok { - config["debug-log"] = strings.Replace(path, "%ID%", id, -1) - } -} diff --git a/pkg/shim/runsc/runsc.go b/pkg/shim/runsc/runsc.go new file mode 100644 index 000000000..41ba9c3af --- /dev/null +++ b/pkg/shim/runsc/runsc.go @@ -0,0 +1,511 @@ +// Copyright 2018 The containerd Authors. +// Copyright 2018 The gVisor authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runsc + +import ( + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "strconv" + "syscall" + "time" + + runc "github.com/containerd/go-runc" + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +var Monitor runc.ProcessMonitor = runc.Monitor + +// DefaultCommand is the default command for Runsc +const DefaultCommand = "runsc" + +// Runsc is the client to the runsc cli +type Runsc struct { + Command string + PdeathSignal syscall.Signal + Setpgid bool + Root string + Log string + LogFormat runc.Format + Config map[string]string +} + +// List returns all containers created inside the provided runsc root directory +func (r *Runsc) List(context context.Context) ([]*runc.Container, error) { + data, err := cmdOutput(r.command(context, "list", "--format=json"), false) + if err != nil { + return nil, err + } + var out []*runc.Container + if err := json.Unmarshal(data, &out); err != nil { + return nil, err + } + return out, nil +} + +// State returns the state for the container provided by id +func (r *Runsc) State(context context.Context, id string) (*runc.Container, error) { + data, err := cmdOutput(r.command(context, "state", id), true) + if err != nil { + return nil, fmt.Errorf("%s: %s", err, data) + } + var c runc.Container + if err := json.Unmarshal(data, &c); err != nil { + return nil, err + } + return &c, nil +} + +type CreateOpts struct { + runc.IO + // PidFile is a path to where a pid file should be created + PidFile string + ConsoleSocket runc.ConsoleSocket + // UserLog is a path to where runsc user log should be generated. + UserLog string +} + +func (o *CreateOpts) args() (out []string, err error) { + if o.PidFile != "" { + abs, err := filepath.Abs(o.PidFile) + if err != nil { + return nil, err + } + out = append(out, "--pid-file", abs) + } + if o.ConsoleSocket != nil { + out = append(out, "--console-socket", o.ConsoleSocket.Path()) + } + if o.UserLog != "" { + out = append(out, "--user-log", o.UserLog) + } + return out, nil +} + +// Create creates a new container and returns its pid if it was created successfully +func (r *Runsc) Create(context context.Context, id, bundle string, opts *CreateOpts) error { + args := []string{"create", "--bundle", bundle} + if opts != nil { + oargs, err := opts.args() + if err != nil { + return err + } + args = append(args, oargs...) + } + cmd := r.command(context, append(args, id)...) + if opts != nil && opts.IO != nil { + opts.Set(cmd) + } + + if cmd.Stdout == nil && cmd.Stderr == nil { + data, err := cmdOutput(cmd, true) + if err != nil { + return fmt.Errorf("%s: %s", err, data) + } + return nil + } + ec, err := Monitor.Start(cmd) + if err != nil { + return err + } + if opts != nil && opts.IO != nil { + if c, ok := opts.IO.(runc.StartCloser); ok { + if err := c.CloseAfterStart(); err != nil { + return err + } + } + } + status, err := Monitor.Wait(cmd, ec) + if err == nil && status != 0 { + err = fmt.Errorf("%s did not terminate sucessfully", cmd.Args[0]) + } + + return err +} + +// Start will start an already created container +func (r *Runsc) Start(context context.Context, id string, cio runc.IO) error { + cmd := r.command(context, "start", id) + if cio != nil { + cio.Set(cmd) + } + + if cmd.Stdout == nil && cmd.Stderr == nil { + data, err := cmdOutput(cmd, true) + if err != nil { + return fmt.Errorf("%s: %s", err, data) + } + return nil + } + + ec, err := Monitor.Start(cmd) + if err != nil { + return err + } + if cio != nil { + if c, ok := cio.(runc.StartCloser); ok { + if err := c.CloseAfterStart(); err != nil { + return err + } + } + } + status, err := Monitor.Wait(cmd, ec) + if err == nil && status != 0 { + err = fmt.Errorf("%s did not terminate sucessfully", cmd.Args[0]) + } + + return err +} + +type waitResult struct { + ID string `json:"id"` + ExitStatus int `json:"exitStatus"` +} + +// Wait will wait for a running container, and return its exit status. +// TODO(random-liu): Add exec process support. +func (r *Runsc) Wait(context context.Context, id string) (int, error) { + data, err := cmdOutput(r.command(context, "wait", id), true) + if err != nil { + return 0, fmt.Errorf("%s: %s", err, data) + } + var res waitResult + if err := json.Unmarshal(data, &res); err != nil { + return 0, err + } + return res.ExitStatus, nil +} + +type ExecOpts struct { + runc.IO + PidFile string + InternalPidFile string + ConsoleSocket runc.ConsoleSocket + Detach bool +} + +func (o *ExecOpts) args() (out []string, err error) { + if o.ConsoleSocket != nil { + out = append(out, "--console-socket", o.ConsoleSocket.Path()) + } + if o.Detach { + out = append(out, "--detach") + } + if o.PidFile != "" { + abs, err := filepath.Abs(o.PidFile) + if err != nil { + return nil, err + } + out = append(out, "--pid-file", abs) + } + if o.InternalPidFile != "" { + abs, err := filepath.Abs(o.InternalPidFile) + if err != nil { + return nil, err + } + out = append(out, "--internal-pid-file", abs) + } + return out, nil +} + +// Exec executres and additional process inside the container based on a full +// OCI Process specification +func (r *Runsc) Exec(context context.Context, id string, spec specs.Process, opts *ExecOpts) error { + f, err := ioutil.TempFile(os.Getenv("XDG_RUNTIME_DIR"), "runsc-process") + if err != nil { + return err + } + defer os.Remove(f.Name()) + err = json.NewEncoder(f).Encode(spec) + f.Close() + if err != nil { + return err + } + args := []string{"exec", "--process", f.Name()} + if opts != nil { + oargs, err := opts.args() + if err != nil { + return err + } + args = append(args, oargs...) + } + cmd := r.command(context, append(args, id)...) + if opts != nil && opts.IO != nil { + opts.Set(cmd) + } + if cmd.Stdout == nil && cmd.Stderr == nil { + data, err := cmdOutput(cmd, true) + if err != nil { + return fmt.Errorf("%s: %s", err, data) + } + return nil + } + ec, err := Monitor.Start(cmd) + if err != nil { + return err + } + if opts != nil && opts.IO != nil { + if c, ok := opts.IO.(runc.StartCloser); ok { + if err := c.CloseAfterStart(); err != nil { + return err + } + } + } + status, err := Monitor.Wait(cmd, ec) + if err == nil && status != 0 { + err = fmt.Errorf("%s did not terminate sucessfully", cmd.Args[0]) + } + return err +} + +// Run runs the create, start, delete lifecycle of the container +// and returns its exit status after it has exited +func (r *Runsc) Run(context context.Context, id, bundle string, opts *CreateOpts) (int, error) { + args := []string{"run", "--bundle", bundle} + if opts != nil { + oargs, err := opts.args() + if err != nil { + return -1, err + } + args = append(args, oargs...) + } + cmd := r.command(context, append(args, id)...) + if opts != nil && opts.IO != nil { + opts.Set(cmd) + } + ec, err := Monitor.Start(cmd) + if err != nil { + return -1, err + } + return Monitor.Wait(cmd, ec) +} + +type DeleteOpts struct { + Force bool +} + +func (o *DeleteOpts) args() (out []string) { + if o.Force { + out = append(out, "--force") + } + return out +} + +// Delete deletes the container +func (r *Runsc) Delete(context context.Context, id string, opts *DeleteOpts) error { + args := []string{"delete"} + if opts != nil { + args = append(args, opts.args()...) + } + return r.runOrError(r.command(context, append(args, id)...)) +} + +// KillOpts specifies options for killing a container and its processes +type KillOpts struct { + All bool + Pid int +} + +func (o *KillOpts) args() (out []string) { + if o.All { + out = append(out, "--all") + } + if o.Pid != 0 { + out = append(out, "--pid", strconv.Itoa(o.Pid)) + } + return out +} + +// Kill sends the specified signal to the container +func (r *Runsc) Kill(context context.Context, id string, sig int, opts *KillOpts) error { + args := []string{ + "kill", + } + if opts != nil { + args = append(args, opts.args()...) + } + return r.runOrError(r.command(context, append(args, id, strconv.Itoa(sig))...)) +} + +// Stats return the stats for a container like cpu, memory, and io +func (r *Runsc) Stats(context context.Context, id string) (*runc.Stats, error) { + cmd := r.command(context, "events", "--stats", id) + rd, err := cmd.StdoutPipe() + if err != nil { + return nil, err + } + ec, err := Monitor.Start(cmd) + if err != nil { + return nil, err + } + defer func() { + rd.Close() + Monitor.Wait(cmd, ec) + }() + var e runc.Event + if err := json.NewDecoder(rd).Decode(&e); err != nil { + return nil, err + } + return e.Stats, nil +} + +// Events returns an event stream from runsc for a container with stats and OOM notifications +func (r *Runsc) Events(context context.Context, id string, interval time.Duration) (chan *runc.Event, error) { + cmd := r.command(context, "events", fmt.Sprintf("--interval=%ds", int(interval.Seconds())), id) + rd, err := cmd.StdoutPipe() + if err != nil { + return nil, err + } + ec, err := Monitor.Start(cmd) + if err != nil { + rd.Close() + return nil, err + } + var ( + dec = json.NewDecoder(rd) + c = make(chan *runc.Event, 128) + ) + go func() { + defer func() { + close(c) + rd.Close() + Monitor.Wait(cmd, ec) + }() + for { + var e runc.Event + if err := dec.Decode(&e); err != nil { + if err == io.EOF { + return + } + e = runc.Event{ + Type: "error", + Err: err, + } + } + c <- &e + } + }() + return c, nil +} + +// Ps lists all the processes inside the container returning their pids +func (r *Runsc) Ps(context context.Context, id string) ([]int, error) { + data, err := cmdOutput(r.command(context, "ps", "--format", "json", id), true) + if err != nil { + return nil, fmt.Errorf("%s: %s", err, data) + } + var pids []int + if err := json.Unmarshal(data, &pids); err != nil { + return nil, err + } + return pids, nil +} + +// Top lists all the processes inside the container returning the full ps data +func (r *Runsc) Top(context context.Context, id string) (*runc.TopResults, error) { + data, err := cmdOutput(r.command(context, "ps", "--format", "table", id), true) + if err != nil { + return nil, fmt.Errorf("%s: %s", err, data) + } + + topResults, err := runc.ParsePSOutput(data) + if err != nil { + return nil, fmt.Errorf("%s: ", err) + } + return topResults, nil +} + +func (r *Runsc) args() []string { + var args []string + if r.Root != "" { + args = append(args, fmt.Sprintf("--root=%s", r.Root)) + } + if r.Log != "" { + args = append(args, fmt.Sprintf("--log=%s", r.Log)) + } + if r.LogFormat != "" { + args = append(args, fmt.Sprintf("--log-format=%s", r.LogFormat)) + } + for k, v := range r.Config { + args = append(args, fmt.Sprintf("--%s=%s", k, v)) + } + return args +} + +// runOrError will run the provided command. If an error is +// encountered and neither Stdout or Stderr was set the error and the +// stderr of the command will be returned in the format of : +// +func (r *Runsc) runOrError(cmd *exec.Cmd) error { + if cmd.Stdout != nil || cmd.Stderr != nil { + ec, err := Monitor.Start(cmd) + if err != nil { + return err + } + status, err := Monitor.Wait(cmd, ec) + if err == nil && status != 0 { + err = fmt.Errorf("%s did not terminate sucessfully", cmd.Args[0]) + } + return err + } + data, err := cmdOutput(cmd, true) + if err != nil { + return fmt.Errorf("%s: %s", err, data) + } + return nil +} + +func (r *Runsc) command(context context.Context, args ...string) *exec.Cmd { + command := r.Command + if command == "" { + command = DefaultCommand + } + cmd := exec.CommandContext(context, command, append(r.args(), args...)...) + cmd.SysProcAttr = &syscall.SysProcAttr{ + Setpgid: r.Setpgid, + } + if r.PdeathSignal != 0 { + cmd.SysProcAttr.Pdeathsig = r.PdeathSignal + } + + return cmd +} + +func cmdOutput(cmd *exec.Cmd, combined bool) ([]byte, error) { + b := getBuf() + defer putBuf(b) + + cmd.Stdout = b + if combined { + cmd.Stderr = b + } + ec, err := Monitor.Start(cmd) + if err != nil { + return nil, err + } + + status, err := Monitor.Wait(cmd, ec) + if err == nil && status != 0 { + err = fmt.Errorf("%s did not terminate sucessfully", cmd.Args[0]) + } + + return b.Bytes(), err +} diff --git a/pkg/shim/runsc/utils.go b/pkg/shim/runsc/utils.go new file mode 100644 index 000000000..2732d5e98 --- /dev/null +++ b/pkg/shim/runsc/utils.go @@ -0,0 +1,44 @@ +// Copyright 2018 The containerd Authors. +// Copyright 2018 The gVisor authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runsc + +import ( + "bytes" + "strings" + "sync" +) + +var bytesBufferPool = sync.Pool{ + New: func() interface{} { + return bytes.NewBuffer(nil) + }, +} + +func getBuf() *bytes.Buffer { + return bytesBufferPool.Get().(*bytes.Buffer) +} + +func putBuf(b *bytes.Buffer) { + b.Reset() + bytesBufferPool.Put(b) +} + +// FormatLogPath parses runsc config, and fill in %ID% in the log path. +func FormatLogPath(id string, config map[string]string) { + if path, ok := config["debug-log"]; ok { + config["debug-log"] = strings.Replace(path, "%ID%", id, -1) + } +} diff --git a/pkg/shim/v1/proc/deleted_state.go b/pkg/shim/v1/proc/deleted_state.go new file mode 100644 index 000000000..0196c96dd --- /dev/null +++ b/pkg/shim/v1/proc/deleted_state.go @@ -0,0 +1,49 @@ +// Copyright 2018 The containerd Authors. +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proc + +import ( + "context" + + "github.com/containerd/console" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/runtime/proc" + "github.com/pkg/errors" +) + +type deletedState struct{} + +func (*deletedState) Resize(ws console.WinSize) error { + return errors.Errorf("cannot resize a deleted process") +} + +func (*deletedState) Start(ctx context.Context) error { + return errors.Errorf("cannot start a deleted process") +} + +func (*deletedState) Delete(ctx context.Context) error { + return errors.Wrap(errdefs.ErrNotFound, "cannot delete a deleted process") +} + +func (*deletedState) Kill(ctx context.Context, sig uint32, all bool) error { + return errors.Wrap(errdefs.ErrNotFound, "cannot kill a deleted process") +} + +func (*deletedState) SetExited(status int) {} + +func (*deletedState) Exec(ctx context.Context, path string, r *ExecConfig) (proc.Process, error) { + return nil, errors.Errorf("cannot exec in a deleted state") +} diff --git a/pkg/shim/v1/proc/exec.go b/pkg/shim/v1/proc/exec.go new file mode 100644 index 000000000..17199960e --- /dev/null +++ b/pkg/shim/v1/proc/exec.go @@ -0,0 +1,282 @@ +// Copyright 2018 The containerd Authors. +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proc + +import ( + "context" + "fmt" + "io" + "os" + "path/filepath" + "sync" + "syscall" + "time" + + "github.com/containerd/console" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/runtime/proc" + "github.com/containerd/fifo" + runc "github.com/containerd/go-runc" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" + "golang.org/x/sys/unix" + + runsc "github.com/google/gvisor-containerd-shim/pkg/go-runsc" +) + +type execProcess struct { + wg sync.WaitGroup + + execState execState + + mu sync.Mutex + id string + console console.Console + io runc.IO + status int + exited time.Time + pid int + internalPid int + closers []io.Closer + stdin io.Closer + stdio proc.Stdio + path string + spec specs.Process + + parent *Init + waitBlock chan struct{} +} + +func (e *execProcess) Wait() { + <-e.waitBlock +} + +func (e *execProcess) ID() string { + return e.id +} + +func (e *execProcess) Pid() int { + e.mu.Lock() + defer e.mu.Unlock() + return e.pid +} + +func (e *execProcess) ExitStatus() int { + e.mu.Lock() + defer e.mu.Unlock() + return e.status +} + +func (e *execProcess) ExitedAt() time.Time { + e.mu.Lock() + defer e.mu.Unlock() + return e.exited +} + +func (e *execProcess) SetExited(status int) { + e.mu.Lock() + defer e.mu.Unlock() + + e.execState.SetExited(status) +} + +func (e *execProcess) setExited(status int) { + e.status = status + e.exited = time.Now() + e.parent.Platform.ShutdownConsole(context.Background(), e.console) + close(e.waitBlock) +} + +func (e *execProcess) Delete(ctx context.Context) error { + e.mu.Lock() + defer e.mu.Unlock() + + return e.execState.Delete(ctx) +} + +func (e *execProcess) delete(ctx context.Context) error { + e.wg.Wait() + if e.io != nil { + for _, c := range e.closers { + c.Close() + } + e.io.Close() + } + pidfile := filepath.Join(e.path, fmt.Sprintf("%s.pid", e.id)) + // silently ignore error + os.Remove(pidfile) + internalPidfile := filepath.Join(e.path, fmt.Sprintf("%s-internal.pid", e.id)) + // silently ignore error + os.Remove(internalPidfile) + return nil +} + +func (e *execProcess) Resize(ws console.WinSize) error { + e.mu.Lock() + defer e.mu.Unlock() + + return e.execState.Resize(ws) +} + +func (e *execProcess) resize(ws console.WinSize) error { + if e.console == nil { + return nil + } + return e.console.Resize(ws) +} + +func (e *execProcess) Kill(ctx context.Context, sig uint32, _ bool) error { + e.mu.Lock() + defer e.mu.Unlock() + + return e.execState.Kill(ctx, sig, false) +} + +func (e *execProcess) kill(ctx context.Context, sig uint32, _ bool) error { + internalPid := e.internalPid + if internalPid != 0 { + if err := e.parent.runtime.Kill(ctx, e.parent.id, int(sig), &runsc.KillOpts{ + Pid: internalPid, + }); err != nil { + // If this returns error, consider the process has already stopped. + // TODO: Fix after signal handling is fixed. + return errors.Wrapf(errdefs.ErrNotFound, err.Error()) + } + } + return nil +} + +func (e *execProcess) Stdin() io.Closer { + return e.stdin +} + +func (e *execProcess) Stdio() proc.Stdio { + return e.stdio +} + +func (e *execProcess) Start(ctx context.Context) error { + e.mu.Lock() + defer e.mu.Unlock() + + return e.execState.Start(ctx) +} + +func (e *execProcess) start(ctx context.Context) (err error) { + var ( + socket *runc.Socket + pidfile = filepath.Join(e.path, fmt.Sprintf("%s.pid", e.id)) + internalPidfile = filepath.Join(e.path, fmt.Sprintf("%s-internal.pid", e.id)) + ) + if e.stdio.Terminal { + if socket, err = runc.NewTempConsoleSocket(); err != nil { + return errors.Wrap(err, "failed to create runc console socket") + } + defer socket.Close() + } else if e.stdio.IsNull() { + if e.io, err = runc.NewNullIO(); err != nil { + return errors.Wrap(err, "creating new NULL IO") + } + } else { + if e.io, err = runc.NewPipeIO(e.parent.IoUID, e.parent.IoGID, withConditionalIO(e.stdio)); err != nil { + return errors.Wrap(err, "failed to create runc io pipes") + } + } + opts := &runsc.ExecOpts{ + PidFile: pidfile, + InternalPidFile: internalPidfile, + IO: e.io, + Detach: true, + } + if socket != nil { + opts.ConsoleSocket = socket + } + eventCh := e.parent.Monitor.Subscribe() + defer func() { + // Unsubscribe if an error is returned. + if err != nil { + e.parent.Monitor.Unsubscribe(eventCh) + } + }() + if err := e.parent.runtime.Exec(ctx, e.parent.id, e.spec, opts); err != nil { + close(e.waitBlock) + return e.parent.runtimeError(err, "OCI runtime exec failed") + } + if e.stdio.Stdin != "" { + sc, err := fifo.OpenFifo(context.Background(), e.stdio.Stdin, syscall.O_WRONLY|syscall.O_NONBLOCK, 0) + if err != nil { + return errors.Wrapf(err, "failed to open stdin fifo %s", e.stdio.Stdin) + } + e.closers = append(e.closers, sc) + e.stdin = sc + } + var copyWaitGroup sync.WaitGroup + ctx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() + if socket != nil { + console, err := socket.ReceiveMaster() + if err != nil { + return errors.Wrap(err, "failed to retrieve console master") + } + if e.console, err = e.parent.Platform.CopyConsole(ctx, console, e.stdio.Stdin, e.stdio.Stdout, e.stdio.Stderr, &e.wg, ©WaitGroup); err != nil { + return errors.Wrap(err, "failed to start console copy") + } + } else if !e.stdio.IsNull() { + if err := copyPipes(ctx, e.io, e.stdio.Stdin, e.stdio.Stdout, e.stdio.Stderr, &e.wg, ©WaitGroup); err != nil { + return errors.Wrap(err, "failed to start io pipe copy") + } + } + copyWaitGroup.Wait() + pid, err := runc.ReadPidFile(opts.PidFile) + if err != nil { + return errors.Wrap(err, "failed to retrieve OCI runtime exec pid") + } + e.pid = pid + internalPid, err := runc.ReadPidFile(opts.InternalPidFile) + if err != nil { + return errors.Wrap(err, "failed to retrieve OCI runtime exec internal pid") + } + e.internalPid = internalPid + go func() { + defer e.parent.Monitor.Unsubscribe(eventCh) + for event := range eventCh { + if event.Pid == e.pid { + ExitCh <- Exit{ + Timestamp: event.Timestamp, + ID: e.id, + Status: event.Status, + } + break + } + } + }() + return nil +} + +func (e *execProcess) Status(ctx context.Context) (string, error) { + e.mu.Lock() + defer e.mu.Unlock() + // if we don't have a pid then the exec process has just been created + if e.pid == 0 { + return "created", nil + } + // if we have a pid and it can be signaled, the process is running + // TODO(random-liu): Use `runsc kill --pid`. + if err := unix.Kill(e.pid, 0); err == nil { + return "running", nil + } + // else if we have a pid but it can nolonger be signaled, it has stopped + return "stopped", nil +} diff --git a/pkg/shim/v1/proc/exec_state.go b/pkg/shim/v1/proc/exec_state.go new file mode 100644 index 000000000..5416cb601 --- /dev/null +++ b/pkg/shim/v1/proc/exec_state.go @@ -0,0 +1,154 @@ +// Copyright 2018 The containerd Authors. +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proc + +import ( + "context" + + "github.com/containerd/console" + "github.com/pkg/errors" +) + +type execState interface { + Resize(console.WinSize) error + Start(context.Context) error + Delete(context.Context) error + Kill(context.Context, uint32, bool) error + SetExited(int) +} + +type execCreatedState struct { + p *execProcess +} + +func (s *execCreatedState) transition(name string) error { + switch name { + case "running": + s.p.execState = &execRunningState{p: s.p} + case "stopped": + s.p.execState = &execStoppedState{p: s.p} + case "deleted": + s.p.execState = &deletedState{} + default: + return errors.Errorf("invalid state transition %q to %q", stateName(s), name) + } + return nil +} + +func (s *execCreatedState) Resize(ws console.WinSize) error { + return s.p.resize(ws) +} + +func (s *execCreatedState) Start(ctx context.Context) error { + if err := s.p.start(ctx); err != nil { + return err + } + return s.transition("running") +} + +func (s *execCreatedState) Delete(ctx context.Context) error { + if err := s.p.delete(ctx); err != nil { + return err + } + return s.transition("deleted") +} + +func (s *execCreatedState) Kill(ctx context.Context, sig uint32, all bool) error { + return s.p.kill(ctx, sig, all) +} + +func (s *execCreatedState) SetExited(status int) { + s.p.setExited(status) + + if err := s.transition("stopped"); err != nil { + panic(err) + } +} + +type execRunningState struct { + p *execProcess +} + +func (s *execRunningState) transition(name string) error { + switch name { + case "stopped": + s.p.execState = &execStoppedState{p: s.p} + default: + return errors.Errorf("invalid state transition %q to %q", stateName(s), name) + } + return nil +} + +func (s *execRunningState) Resize(ws console.WinSize) error { + return s.p.resize(ws) +} + +func (s *execRunningState) Start(ctx context.Context) error { + return errors.Errorf("cannot start a running process") +} + +func (s *execRunningState) Delete(ctx context.Context) error { + return errors.Errorf("cannot delete a running process") +} + +func (s *execRunningState) Kill(ctx context.Context, sig uint32, all bool) error { + return s.p.kill(ctx, sig, all) +} + +func (s *execRunningState) SetExited(status int) { + s.p.setExited(status) + + if err := s.transition("stopped"); err != nil { + panic(err) + } +} + +type execStoppedState struct { + p *execProcess +} + +func (s *execStoppedState) transition(name string) error { + switch name { + case "deleted": + s.p.execState = &deletedState{} + default: + return errors.Errorf("invalid state transition %q to %q", stateName(s), name) + } + return nil +} + +func (s *execStoppedState) Resize(ws console.WinSize) error { + return errors.Errorf("cannot resize a stopped container") +} + +func (s *execStoppedState) Start(ctx context.Context) error { + return errors.Errorf("cannot start a stopped process") +} + +func (s *execStoppedState) Delete(ctx context.Context) error { + if err := s.p.delete(ctx); err != nil { + return err + } + return s.transition("deleted") +} + +func (s *execStoppedState) Kill(ctx context.Context, sig uint32, all bool) error { + return s.p.kill(ctx, sig, all) +} + +func (s *execStoppedState) SetExited(status int) { + // no op +} diff --git a/pkg/shim/v1/proc/init.go b/pkg/shim/v1/proc/init.go new file mode 100644 index 000000000..fadfc9146 --- /dev/null +++ b/pkg/shim/v1/proc/init.go @@ -0,0 +1,462 @@ +// Copyright 2018 The containerd Authors. +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proc + +import ( + "context" + "encoding/json" + "io" + "path/filepath" + "strings" + "sync" + "syscall" + "time" + + "github.com/containerd/console" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/mount" + "github.com/containerd/containerd/runtime/proc" + "github.com/containerd/fifo" + runc "github.com/containerd/go-runc" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" + + runsc "github.com/google/gvisor-containerd-shim/pkg/go-runsc" +) + +// InitPidFile name of the file that contains the init pid +const InitPidFile = "init.pid" + +// Init represents an initial process for a container +type Init struct { + wg sync.WaitGroup + initState initState + + // mu is used to ensure that `Start()` and `Exited()` calls return in + // the right order when invoked in separate go routines. + // This is the case within the shim implementation as it makes use of + // the reaper interface. + mu sync.Mutex + + waitBlock chan struct{} + + WorkDir string + + id string + Bundle string + console console.Console + Platform proc.Platform + io runc.IO + runtime *runsc.Runsc + status int + exited time.Time + pid int + closers []io.Closer + stdin io.Closer + stdio proc.Stdio + Rootfs string + IoUID int + IoGID int + Sandbox bool + UserLog string + Monitor ProcessMonitor +} + +// NewRunsc returns a new runsc instance for a process +func NewRunsc(root, path, namespace, runtime string, config map[string]string) *runsc.Runsc { + if root == "" { + root = RunscRoot + } + return &runsc.Runsc{ + Command: runtime, + PdeathSignal: syscall.SIGKILL, + Log: filepath.Join(path, "log.json"), + LogFormat: runc.JSON, + Root: filepath.Join(root, namespace), + Config: config, + } +} + +// New returns a new init process +func New(id string, runtime *runsc.Runsc, stdio proc.Stdio) *Init { + p := &Init{ + id: id, + runtime: runtime, + stdio: stdio, + status: 0, + waitBlock: make(chan struct{}), + } + p.initState = &createdState{p: p} + return p +} + +// Create the process with the provided config +func (p *Init) Create(ctx context.Context, r *CreateConfig) (err error) { + var socket *runc.Socket + if r.Terminal { + if socket, err = runc.NewTempConsoleSocket(); err != nil { + return errors.Wrap(err, "failed to create OCI runtime console socket") + } + defer socket.Close() + } else if hasNoIO(r) { + if p.io, err = runc.NewNullIO(); err != nil { + return errors.Wrap(err, "creating new NULL IO") + } + } else { + if p.io, err = runc.NewPipeIO(p.IoUID, p.IoGID, withConditionalIO(p.stdio)); err != nil { + return errors.Wrap(err, "failed to create OCI runtime io pipes") + } + } + pidFile := filepath.Join(p.Bundle, InitPidFile) + opts := &runsc.CreateOpts{ + PidFile: pidFile, + } + if socket != nil { + opts.ConsoleSocket = socket + } + if p.Sandbox { + opts.IO = p.io + // UserLog is only useful for sandbox. + opts.UserLog = p.UserLog + } + if err := p.runtime.Create(ctx, r.ID, r.Bundle, opts); err != nil { + return p.runtimeError(err, "OCI runtime create failed") + } + if r.Stdin != "" { + sc, err := fifo.OpenFifo(context.Background(), r.Stdin, syscall.O_WRONLY|syscall.O_NONBLOCK, 0) + if err != nil { + return errors.Wrapf(err, "failed to open stdin fifo %s", r.Stdin) + } + p.stdin = sc + p.closers = append(p.closers, sc) + } + var copyWaitGroup sync.WaitGroup + ctx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() + if socket != nil { + console, err := socket.ReceiveMaster() + if err != nil { + return errors.Wrap(err, "failed to retrieve console master") + } + console, err = p.Platform.CopyConsole(ctx, console, r.Stdin, r.Stdout, r.Stderr, &p.wg, ©WaitGroup) + if err != nil { + return errors.Wrap(err, "failed to start console copy") + } + p.console = console + } else if !hasNoIO(r) { + if err := copyPipes(ctx, p.io, r.Stdin, r.Stdout, r.Stderr, &p.wg, ©WaitGroup); err != nil { + return errors.Wrap(err, "failed to start io pipe copy") + } + } + + copyWaitGroup.Wait() + pid, err := runc.ReadPidFile(pidFile) + if err != nil { + return errors.Wrap(err, "failed to retrieve OCI runtime container pid") + } + p.pid = pid + return nil +} + +// Wait for the process to exit +func (p *Init) Wait() { + <-p.waitBlock +} + +// ID of the process +func (p *Init) ID() string { + return p.id +} + +// Pid of the process +func (p *Init) Pid() int { + return p.pid +} + +// ExitStatus of the process +func (p *Init) ExitStatus() int { + p.mu.Lock() + defer p.mu.Unlock() + return p.status +} + +// ExitedAt at time when the process exited +func (p *Init) ExitedAt() time.Time { + p.mu.Lock() + defer p.mu.Unlock() + return p.exited +} + +// Status of the process +func (p *Init) Status(ctx context.Context) (string, error) { + p.mu.Lock() + defer p.mu.Unlock() + c, err := p.runtime.State(ctx, p.id) + if err != nil { + if strings.Contains(err.Error(), "does not exist") { + return "stopped", nil + } + return "", p.runtimeError(err, "OCI runtime state failed") + } + return p.convertStatus(c.Status), nil +} + +// Start the init process +func (p *Init) Start(ctx context.Context) error { + p.mu.Lock() + defer p.mu.Unlock() + + return p.initState.Start(ctx) +} + +func (p *Init) start(ctx context.Context) error { + var cio runc.IO + if !p.Sandbox { + cio = p.io + } + if err := p.runtime.Start(ctx, p.id, cio); err != nil { + return p.runtimeError(err, "OCI runtime start failed") + } + go func() { + status, err := p.runtime.Wait(context.Background(), p.id) + if err != nil { + log.G(ctx).WithError(err).Errorf("Failed to wait for container %q", p.id) + // TODO(random-liu): Handle runsc kill error. + if err := p.killAll(ctx); err != nil { + log.G(ctx).WithError(err).Errorf("Failed to kill container %q", p.id) + } + status = internalErrorCode + } + ExitCh <- Exit{ + Timestamp: time.Now(), + ID: p.id, + Status: status, + } + }() + return nil +} + +// SetExited of the init process with the next status +func (p *Init) SetExited(status int) { + p.mu.Lock() + defer p.mu.Unlock() + + p.initState.SetExited(status) +} + +func (p *Init) setExited(status int) { + p.exited = time.Now() + p.status = status + p.Platform.ShutdownConsole(context.Background(), p.console) + close(p.waitBlock) +} + +// Delete the init process +func (p *Init) Delete(ctx context.Context) error { + p.mu.Lock() + defer p.mu.Unlock() + + return p.initState.Delete(ctx) +} + +func (p *Init) delete(ctx context.Context) error { + p.killAll(ctx) + p.wg.Wait() + err := p.runtime.Delete(ctx, p.id, nil) + // ignore errors if a runtime has already deleted the process + // but we still hold metadata and pipes + // + // this is common during a checkpoint, runc will delete the container state + // after a checkpoint and the container will no longer exist within runc + if err != nil { + if strings.Contains(err.Error(), "does not exist") { + err = nil + } else { + err = p.runtimeError(err, "failed to delete task") + } + } + if p.io != nil { + for _, c := range p.closers { + c.Close() + } + p.io.Close() + } + if err2 := mount.UnmountAll(p.Rootfs, 0); err2 != nil { + log.G(ctx).WithError(err2).Warn("failed to cleanup rootfs mount") + if err == nil { + err = errors.Wrap(err2, "failed rootfs umount") + } + } + return err +} + +// Resize the init processes console +func (p *Init) Resize(ws console.WinSize) error { + p.mu.Lock() + defer p.mu.Unlock() + + if p.console == nil { + return nil + } + return p.console.Resize(ws) +} + +func (p *Init) resize(ws console.WinSize) error { + if p.console == nil { + return nil + } + return p.console.Resize(ws) +} + +// Kill the init process +func (p *Init) Kill(ctx context.Context, signal uint32, all bool) error { + p.mu.Lock() + defer p.mu.Unlock() + + return p.initState.Kill(ctx, signal, all) +} + +func (p *Init) kill(context context.Context, signal uint32, all bool) error { + var ( + killErr error + backoff = 100 * time.Millisecond + ) + timeout := 1 * time.Second + for start := time.Now(); time.Now().Sub(start) < timeout; { + c, err := p.runtime.State(context, p.id) + if err != nil { + if strings.Contains(err.Error(), "does not exist") { + return errors.Wrapf(errdefs.ErrNotFound, "no such process") + } + return p.runtimeError(err, "OCI runtime state failed") + } + // For runsc, signal only works when container is running state. + // If the container is not in running state, directly return + // "no such process" + if p.convertStatus(c.Status) == "stopped" { + return errors.Wrapf(errdefs.ErrNotFound, "no such process") + } + killErr = p.runtime.Kill(context, p.id, int(signal), &runsc.KillOpts{ + All: all, + }) + if killErr == nil { + return nil + } + time.Sleep(backoff) + backoff *= 2 + } + return p.runtimeError(killErr, "kill timeout") +} + +// KillAll processes belonging to the init process +func (p *Init) KillAll(context context.Context) error { + p.mu.Lock() + defer p.mu.Unlock() + return p.killAll(context) +} + +func (p *Init) killAll(context context.Context) error { + p.runtime.Kill(context, p.id, int(syscall.SIGKILL), &runsc.KillOpts{ + All: true, + }) + // Ignore error handling for `runsc kill --all` for now. + // * If it doesn't return error, it is good; + // * If it returns error, consider the container has already stopped. + // TODO: Fix `runsc kill --all` error handling. + return nil +} + +// Stdin of the process +func (p *Init) Stdin() io.Closer { + return p.stdin +} + +// Runtime returns the OCI runtime configured for the init process +func (p *Init) Runtime() *runsc.Runsc { + return p.runtime +} + +// Exec returns a new child process +func (p *Init) Exec(ctx context.Context, path string, r *ExecConfig) (proc.Process, error) { + p.mu.Lock() + defer p.mu.Unlock() + + return p.initState.Exec(ctx, path, r) +} + +// exec returns a new exec'd process +func (p *Init) exec(ctx context.Context, path string, r *ExecConfig) (proc.Process, error) { + // process exec request + var spec specs.Process + if err := json.Unmarshal(r.Spec.Value, &spec); err != nil { + return nil, err + } + spec.Terminal = r.Terminal + + e := &execProcess{ + id: r.ID, + path: path, + parent: p, + spec: spec, + stdio: proc.Stdio{ + Stdin: r.Stdin, + Stdout: r.Stdout, + Stderr: r.Stderr, + Terminal: r.Terminal, + }, + waitBlock: make(chan struct{}), + } + e.execState = &execCreatedState{p: e} + return e, nil +} + +// Stdio of the process +func (p *Init) Stdio() proc.Stdio { + return p.stdio +} + +func (p *Init) runtimeError(rErr error, msg string) error { + if rErr == nil { + return nil + } + + rMsg, err := getLastRuntimeError(p.runtime) + switch { + case err != nil: + return errors.Wrapf(rErr, "%s: %s (%s)", msg, "unable to retrieve OCI runtime error", err.Error()) + case rMsg == "": + return errors.Wrap(rErr, msg) + default: + return errors.Errorf("%s: %s", msg, rMsg) + } +} + +func (p *Init) convertStatus(status string) string { + if status == "created" && !p.Sandbox && p.status == internalErrorCode { + // Treat start failure state for non-root container as stopped. + return "stopped" + } + return status +} + +func withConditionalIO(c proc.Stdio) runc.IOOpt { + return func(o *runc.IOOption) { + o.OpenStdin = c.Stdin != "" + o.OpenStdout = c.Stdout != "" + o.OpenStderr = c.Stderr != "" + } +} diff --git a/pkg/shim/v1/proc/init_state.go b/pkg/shim/v1/proc/init_state.go new file mode 100644 index 000000000..868646b6c --- /dev/null +++ b/pkg/shim/v1/proc/init_state.go @@ -0,0 +1,182 @@ +// Copyright 2018 The containerd Authors. +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proc + +import ( + "context" + + "github.com/containerd/console" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/runtime/proc" + "github.com/pkg/errors" +) + +type initState interface { + Resize(console.WinSize) error + Start(context.Context) error + Delete(context.Context) error + Exec(context.Context, string, *ExecConfig) (proc.Process, error) + Kill(context.Context, uint32, bool) error + SetExited(int) +} + +type createdState struct { + p *Init +} + +func (s *createdState) transition(name string) error { + switch name { + case "running": + s.p.initState = &runningState{p: s.p} + case "stopped": + s.p.initState = &stoppedState{p: s.p} + case "deleted": + s.p.initState = &deletedState{} + default: + return errors.Errorf("invalid state transition %q to %q", stateName(s), name) + } + return nil +} + +func (s *createdState) Resize(ws console.WinSize) error { + return s.p.resize(ws) +} + +func (s *createdState) Start(ctx context.Context) error { + if err := s.p.start(ctx); err != nil { + // Containerd doesn't allow deleting container in created state. + // However, for gvisor, a non-root container in created state can + // only go to running state. If the container can't be started, + // it can only stay in created state, and never be deleted. + // To work around that, we treat non-root container in start failure + // state as stopped. + if !s.p.Sandbox { + s.p.io.Close() + s.p.setExited(internalErrorCode) + if err := s.transition("stopped"); err != nil { + panic(err) + } + } + return err + } + return s.transition("running") +} + +func (s *createdState) Delete(ctx context.Context) error { + if err := s.p.delete(ctx); err != nil { + return err + } + return s.transition("deleted") +} + +func (s *createdState) Kill(ctx context.Context, sig uint32, all bool) error { + return s.p.kill(ctx, sig, all) +} + +func (s *createdState) SetExited(status int) { + s.p.setExited(status) + + if err := s.transition("stopped"); err != nil { + panic(err) + } +} + +func (s *createdState) Exec(ctx context.Context, path string, r *ExecConfig) (proc.Process, error) { + return s.p.exec(ctx, path, r) +} + +type runningState struct { + p *Init +} + +func (s *runningState) transition(name string) error { + switch name { + case "stopped": + s.p.initState = &stoppedState{p: s.p} + default: + return errors.Errorf("invalid state transition %q to %q", stateName(s), name) + } + return nil +} + +func (s *runningState) Resize(ws console.WinSize) error { + return s.p.resize(ws) +} + +func (s *runningState) Start(ctx context.Context) error { + return errors.Errorf("cannot start a running process") +} + +func (s *runningState) Delete(ctx context.Context) error { + return errors.Errorf("cannot delete a running process") +} + +func (s *runningState) Kill(ctx context.Context, sig uint32, all bool) error { + return s.p.kill(ctx, sig, all) +} + +func (s *runningState) SetExited(status int) { + s.p.setExited(status) + + if err := s.transition("stopped"); err != nil { + panic(err) + } +} + +func (s *runningState) Exec(ctx context.Context, path string, r *ExecConfig) (proc.Process, error) { + return s.p.exec(ctx, path, r) +} + +type stoppedState struct { + p *Init +} + +func (s *stoppedState) transition(name string) error { + switch name { + case "deleted": + s.p.initState = &deletedState{} + default: + return errors.Errorf("invalid state transition %q to %q", stateName(s), name) + } + return nil +} + +func (s *stoppedState) Resize(ws console.WinSize) error { + return errors.Errorf("cannot resize a stopped container") +} + +func (s *stoppedState) Start(ctx context.Context) error { + return errors.Errorf("cannot start a stopped process") +} + +func (s *stoppedState) Delete(ctx context.Context) error { + if err := s.p.delete(ctx); err != nil { + return err + } + return s.transition("deleted") +} + +func (s *stoppedState) Kill(ctx context.Context, sig uint32, all bool) error { + return errdefs.ToGRPCf(errdefs.ErrNotFound, "process %s not found", s.p.id) +} + +func (s *stoppedState) SetExited(status int) { + // no op +} + +func (s *stoppedState) Exec(ctx context.Context, path string, r *ExecConfig) (proc.Process, error) { + return nil, errors.Errorf("cannot exec in a stopped state") +} diff --git a/pkg/shim/v1/proc/io.go b/pkg/shim/v1/proc/io.go new file mode 100644 index 000000000..2677b4e54 --- /dev/null +++ b/pkg/shim/v1/proc/io.go @@ -0,0 +1,167 @@ +// Copyright 2018 The containerd Authors. +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proc + +import ( + "context" + "fmt" + "io" + "os" + "sync" + "sync/atomic" + "syscall" + + "github.com/containerd/containerd/log" + "github.com/containerd/fifo" + runc "github.com/containerd/go-runc" +) + +// TODO(random-liu): This file can be a util. + +var bufPool = sync.Pool{ + New: func() interface{} { + buffer := make([]byte, 32<<10) + return &buffer + }, +} + +func copyPipes(ctx context.Context, rio runc.IO, stdin, stdout, stderr string, wg, cwg *sync.WaitGroup) error { + var sameFile *countingWriteCloser + for _, i := range []struct { + name string + dest func(wc io.WriteCloser, rc io.Closer) + }{ + { + name: stdout, + dest: func(wc io.WriteCloser, rc io.Closer) { + wg.Add(1) + cwg.Add(1) + go func() { + cwg.Done() + p := bufPool.Get().(*[]byte) + defer bufPool.Put(p) + if _, err := io.CopyBuffer(wc, rio.Stdout(), *p); err != nil { + log.G(ctx).Warn("error copying stdout") + } + wg.Done() + wc.Close() + if rc != nil { + rc.Close() + } + }() + }, + }, { + name: stderr, + dest: func(wc io.WriteCloser, rc io.Closer) { + wg.Add(1) + cwg.Add(1) + go func() { + cwg.Done() + p := bufPool.Get().(*[]byte) + defer bufPool.Put(p) + if _, err := io.CopyBuffer(wc, rio.Stderr(), *p); err != nil { + log.G(ctx).Warn("error copying stderr") + } + wg.Done() + wc.Close() + if rc != nil { + rc.Close() + } + }() + }, + }, + } { + ok, err := isFifo(i.name) + if err != nil { + return err + } + var ( + fw io.WriteCloser + fr io.Closer + ) + if ok { + if fw, err = fifo.OpenFifo(ctx, i.name, syscall.O_WRONLY, 0); err != nil { + return fmt.Errorf("gvisor-containerd-shim: opening %s failed: %s", i.name, err) + } + if fr, err = fifo.OpenFifo(ctx, i.name, syscall.O_RDONLY, 0); err != nil { + return fmt.Errorf("gvisor-containerd-shim: opening %s failed: %s", i.name, err) + } + } else { + if sameFile != nil { + sameFile.count++ + i.dest(sameFile, nil) + continue + } + if fw, err = os.OpenFile(i.name, syscall.O_WRONLY|syscall.O_APPEND, 0); err != nil { + return fmt.Errorf("gvisor-containerd-shim: opening %s failed: %s", i.name, err) + } + if stdout == stderr { + sameFile = &countingWriteCloser{ + WriteCloser: fw, + count: 1, + } + } + } + i.dest(fw, fr) + } + if stdin == "" { + return nil + } + f, err := fifo.OpenFifo(context.Background(), stdin, syscall.O_RDONLY|syscall.O_NONBLOCK, 0) + if err != nil { + return fmt.Errorf("gvisor-containerd-shim: opening %s failed: %s", stdin, err) + } + cwg.Add(1) + go func() { + cwg.Done() + p := bufPool.Get().(*[]byte) + defer bufPool.Put(p) + + io.CopyBuffer(rio.Stdin(), f, *p) + rio.Stdin().Close() + f.Close() + }() + return nil +} + +// countingWriteCloser masks io.Closer() until close has been invoked a certain number of times. +type countingWriteCloser struct { + io.WriteCloser + count int64 +} + +func (c *countingWriteCloser) Close() error { + if atomic.AddInt64(&c.count, -1) > 0 { + return nil + } + return c.WriteCloser.Close() +} + +// isFifo checks if a file is a fifo +// if the file does not exist then it returns false +func isFifo(path string) (bool, error) { + stat, err := os.Stat(path) + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + if stat.Mode()&os.ModeNamedPipe == os.ModeNamedPipe { + return true, nil + } + return false, nil +} diff --git a/pkg/shim/v1/proc/process.go b/pkg/shim/v1/proc/process.go new file mode 100644 index 000000000..1bfa99f4c --- /dev/null +++ b/pkg/shim/v1/proc/process.go @@ -0,0 +1,37 @@ +// Copyright 2018 The containerd Authors. +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proc + +import ( + "github.com/pkg/errors" +) + +// RunscRoot is the path to the root runsc state directory +const RunscRoot = "/run/containerd/runsc" + +func stateName(v interface{}) string { + switch v.(type) { + case *runningState, *execRunningState: + return "running" + case *createdState, *execCreatedState: + return "created" + case *deletedState: + return "deleted" + case *stoppedState: + return "stopped" + } + panic(errors.Errorf("invalid state %v", v)) +} diff --git a/pkg/shim/v1/proc/types.go b/pkg/shim/v1/proc/types.go new file mode 100644 index 000000000..dcd43bcca --- /dev/null +++ b/pkg/shim/v1/proc/types.go @@ -0,0 +1,70 @@ +// Copyright 2018 The containerd Authors. +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proc + +import ( + "time" + + google_protobuf "github.com/gogo/protobuf/types" + + runc "github.com/containerd/go-runc" +) + +// Mount holds filesystem mount configuration +type Mount struct { + Type string + Source string + Target string + Options []string +} + +// CreateConfig hold task creation configuration +type CreateConfig struct { + ID string + Bundle string + Runtime string + Rootfs []Mount + Terminal bool + Stdin string + Stdout string + Stderr string + Options *google_protobuf.Any +} + +// ExecConfig holds exec creation configuration +type ExecConfig struct { + ID string + Terminal bool + Stdin string + Stdout string + Stderr string + Spec *google_protobuf.Any +} + +// Exit is the type of exit events +type Exit struct { + Timestamp time.Time + ID string + Status int +} + +// ProcessMonitor monitors process exit changes +type ProcessMonitor interface { + // Subscribe to process exit changes + Subscribe() chan runc.Exit + // Unsubscribe to process exit changes + Unsubscribe(c chan runc.Exit) +} diff --git a/pkg/shim/v1/proc/utils.go b/pkg/shim/v1/proc/utils.go new file mode 100644 index 000000000..84bd702ab --- /dev/null +++ b/pkg/shim/v1/proc/utils.go @@ -0,0 +1,92 @@ +// Copyright 2018 The containerd Authors. +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proc + +import ( + "encoding/json" + "io" + "os" + "strings" + "time" + + runsc "github.com/google/gvisor-containerd-shim/pkg/go-runsc" +) + +const ( + internalErrorCode = 128 + bufferSize = 32 +) + +// ExitCh is the exit events channel for containers and exec processes +// inside the sandbox. +var ExitCh = make(chan Exit, bufferSize) + +// TODO(random-liu): This can be a utility. + +// TODO(mlaventure): move to runc package? +func getLastRuntimeError(r *runsc.Runsc) (string, error) { + if r.Log == "" { + return "", nil + } + + f, err := os.OpenFile(r.Log, os.O_RDONLY, 0400) + if err != nil { + return "", err + } + + var ( + errMsg string + log struct { + Level string + Msg string + Time time.Time + } + ) + + dec := json.NewDecoder(f) + for err = nil; err == nil; { + if err = dec.Decode(&log); err != nil && err != io.EOF { + return "", err + } + if log.Level == "error" { + errMsg = strings.TrimSpace(log.Msg) + } + } + + return errMsg, nil +} + +func copyFile(to, from string) error { + ff, err := os.Open(from) + if err != nil { + return err + } + defer ff.Close() + tt, err := os.Create(to) + if err != nil { + return err + } + defer tt.Close() + + p := bufPool.Get().(*[]byte) + defer bufPool.Put(p) + _, err = io.CopyBuffer(tt, ff, *p) + return err +} + +func hasNoIO(r *CreateConfig) bool { + return r.Stdin == "" && r.Stdout == "" && r.Stderr == "" +} diff --git a/pkg/shim/v1/shim/platform.go b/pkg/shim/v1/shim/platform.go new file mode 100644 index 000000000..86252c3f5 --- /dev/null +++ b/pkg/shim/v1/shim/platform.go @@ -0,0 +1,110 @@ +// Copyright 2018 The containerd Authors. +// Copyright 2019 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package shim + +import ( + "context" + "io" + "sync" + "syscall" + + "github.com/containerd/console" + "github.com/containerd/fifo" + "github.com/pkg/errors" +) + +type linuxPlatform struct { + epoller *console.Epoller +} + +func (p *linuxPlatform) CopyConsole(ctx context.Context, console console.Console, stdin, stdout, stderr string, wg, cwg *sync.WaitGroup) (console.Console, error) { + if p.epoller == nil { + return nil, errors.New("uninitialized epoller") + } + + epollConsole, err := p.epoller.Add(console) + if err != nil { + return nil, err + } + + if stdin != "" { + in, err := fifo.OpenFifo(ctx, stdin, syscall.O_RDONLY, 0) + if err != nil { + return nil, err + } + cwg.Add(1) + go func() { + cwg.Done() + p := bufPool.Get().(*[]byte) + defer bufPool.Put(p) + io.CopyBuffer(epollConsole, in, *p) + }() + } + + outw, err := fifo.OpenFifo(ctx, stdout, syscall.O_WRONLY, 0) + if err != nil { + return nil, err + } + outr, err := fifo.OpenFifo(ctx, stdout, syscall.O_RDONLY, 0) + if err != nil { + return nil, err + } + wg.Add(1) + cwg.Add(1) + go func() { + cwg.Done() + p := bufPool.Get().(*[]byte) + defer bufPool.Put(p) + io.CopyBuffer(outw, epollConsole, *p) + epollConsole.Close() + outr.Close() + outw.Close() + wg.Done() + }() + return epollConsole, nil +} + +func (p *linuxPlatform) ShutdownConsole(ctx context.Context, cons console.Console) error { + if p.epoller == nil { + return errors.New("uninitialized epoller") + } + epollConsole, ok := cons.(*console.EpollConsole) + if !ok { + return errors.Errorf("expected EpollConsole, got %#v", cons) + } + return epollConsole.Shutdown(p.epoller.CloseConsole) +} + +func (p *linuxPlatform) Close() error { + return p.epoller.Close() +} + +// initialize a single epoll fd to manage our consoles. `initPlatform` should +// only be called once. +func (s *Service) initPlatform() error { + if s.platform != nil { + return nil + } + epoller, err := console.NewEpoller() + if err != nil { + return errors.Wrap(err, "failed to initialize epoller") + } + s.platform = &linuxPlatform{ + epoller: epoller, + } + go epoller.Wait() + return nil +} diff --git a/pkg/shim/v1/shim/service.go b/pkg/shim/v1/shim/service.go new file mode 100644 index 000000000..27e915c14 --- /dev/null +++ b/pkg/shim/v1/shim/service.go @@ -0,0 +1,579 @@ +// Copyright 2018 The containerd Authors. +// Copyright 2019 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package shim + +import ( + "context" + "fmt" + "os" + "path/filepath" + "sync" + + "github.com/containerd/console" + eventstypes "github.com/containerd/containerd/api/events" + "github.com/containerd/containerd/api/types/task" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/events" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/mount" + "github.com/containerd/containerd/namespaces" + "github.com/containerd/containerd/runtime" + "github.com/containerd/containerd/runtime/linux/runctypes" + rproc "github.com/containerd/containerd/runtime/proc" + "github.com/containerd/containerd/runtime/v1/shim" + shimapi "github.com/containerd/containerd/runtime/v1/shim/v1" + "github.com/containerd/typeurl" + ptypes "github.com/gogo/protobuf/types" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + runsc "github.com/google/gvisor-containerd-shim/pkg/go-runsc" + "github.com/google/gvisor-containerd-shim/pkg/v1/proc" + "github.com/google/gvisor-containerd-shim/pkg/v1/utils" +) + +var ( + empty = &ptypes.Empty{} + bufPool = sync.Pool{ + New: func() interface{} { + buffer := make([]byte, 32<<10) + return &buffer + }, + } +) + +// Config contains shim specific configuration +type Config struct { + Path string + Namespace string + WorkDir string + RuntimeRoot string + RunscConfig map[string]string +} + +// NewService returns a new shim service that can be used via GRPC +func NewService(config Config, publisher events.Publisher) (*Service, error) { + if config.Namespace == "" { + return nil, fmt.Errorf("shim namespace cannot be empty") + } + ctx := namespaces.WithNamespace(context.Background(), config.Namespace) + ctx = log.WithLogger(ctx, logrus.WithFields(logrus.Fields{ + "namespace": config.Namespace, + "path": config.Path, + "pid": os.Getpid(), + })) + s := &Service{ + config: config, + context: ctx, + processes: make(map[string]rproc.Process), + events: make(chan interface{}, 128), + ec: proc.ExitCh, + } + go s.processExits() + if err := s.initPlatform(); err != nil { + return nil, errors.Wrap(err, "failed to initialized platform behavior") + } + go s.forward(publisher) + return s, nil +} + +// Service is the shim implementation of a remote shim over GRPC +type Service struct { + mu sync.Mutex + + config Config + context context.Context + processes map[string]rproc.Process + events chan interface{} + platform rproc.Platform + ec chan proc.Exit + + // Filled by Create() + id string + bundle string +} + +// Create a new initial process and container with the underlying OCI runtime +func (s *Service) Create(ctx context.Context, r *shimapi.CreateTaskRequest) (_ *shimapi.CreateTaskResponse, err error) { + s.mu.Lock() + defer s.mu.Unlock() + + var mounts []proc.Mount + for _, m := range r.Rootfs { + mounts = append(mounts, proc.Mount{ + Type: m.Type, + Source: m.Source, + Target: m.Target, + Options: m.Options, + }) + } + + rootfs := filepath.Join(r.Bundle, "rootfs") + if err := os.Mkdir(rootfs, 0711); err != nil && !os.IsExist(err) { + return nil, err + } + + config := &proc.CreateConfig{ + ID: r.ID, + Bundle: r.Bundle, + Runtime: r.Runtime, + Rootfs: mounts, + Terminal: r.Terminal, + Stdin: r.Stdin, + Stdout: r.Stdout, + Stderr: r.Stderr, + Options: r.Options, + } + defer func() { + if err != nil { + if err2 := mount.UnmountAll(rootfs, 0); err2 != nil { + log.G(ctx).WithError(err2).Warn("Failed to cleanup rootfs mount") + } + } + }() + for _, rm := range mounts { + m := &mount.Mount{ + Type: rm.Type, + Source: rm.Source, + Options: rm.Options, + } + if err := m.Mount(rootfs); err != nil { + return nil, errors.Wrapf(err, "failed to mount rootfs component %v", m) + } + } + process, err := newInit( + ctx, + s.config.Path, + s.config.WorkDir, + s.config.RuntimeRoot, + s.config.Namespace, + s.config.RunscConfig, + s.platform, + config, + ) + if err := process.Create(ctx, config); err != nil { + return nil, errdefs.ToGRPC(err) + } + // save the main task id and bundle to the shim for additional requests + s.id = r.ID + s.bundle = r.Bundle + pid := process.Pid() + s.processes[r.ID] = process + return &shimapi.CreateTaskResponse{ + Pid: uint32(pid), + }, nil +} + +// Start a process +func (s *Service) Start(ctx context.Context, r *shimapi.StartRequest) (*shimapi.StartResponse, error) { + p, err := s.getExecProcess(r.ID) + if err != nil { + return nil, err + } + if err := p.Start(ctx); err != nil { + return nil, err + } + return &shimapi.StartResponse{ + ID: p.ID(), + Pid: uint32(p.Pid()), + }, nil +} + +// Delete the initial process and container +func (s *Service) Delete(ctx context.Context, r *ptypes.Empty) (*shimapi.DeleteResponse, error) { + p, err := s.getInitProcess() + if err != nil { + return nil, err + } + if err := p.Delete(ctx); err != nil { + return nil, err + } + s.mu.Lock() + delete(s.processes, s.id) + s.mu.Unlock() + s.platform.Close() + return &shimapi.DeleteResponse{ + ExitStatus: uint32(p.ExitStatus()), + ExitedAt: p.ExitedAt(), + Pid: uint32(p.Pid()), + }, nil +} + +// DeleteProcess deletes an exec'd process +func (s *Service) DeleteProcess(ctx context.Context, r *shimapi.DeleteProcessRequest) (*shimapi.DeleteResponse, error) { + if r.ID == s.id { + return nil, status.Errorf(codes.InvalidArgument, "cannot delete init process with DeleteProcess") + } + p, err := s.getExecProcess(r.ID) + if err != nil { + return nil, err + } + if err := p.Delete(ctx); err != nil { + return nil, err + } + s.mu.Lock() + delete(s.processes, r.ID) + s.mu.Unlock() + return &shimapi.DeleteResponse{ + ExitStatus: uint32(p.ExitStatus()), + ExitedAt: p.ExitedAt(), + Pid: uint32(p.Pid()), + }, nil +} + +// Exec an additional process inside the container +func (s *Service) Exec(ctx context.Context, r *shimapi.ExecProcessRequest) (*ptypes.Empty, error) { + s.mu.Lock() + + if p := s.processes[r.ID]; p != nil { + s.mu.Unlock() + return nil, errdefs.ToGRPCf(errdefs.ErrAlreadyExists, "id %s", r.ID) + } + + p := s.processes[s.id] + s.mu.Unlock() + if p == nil { + return nil, errdefs.ToGRPCf(errdefs.ErrFailedPrecondition, "container must be created") + } + + process, err := p.(*proc.Init).Exec(ctx, s.config.Path, &proc.ExecConfig{ + ID: r.ID, + Terminal: r.Terminal, + Stdin: r.Stdin, + Stdout: r.Stdout, + Stderr: r.Stderr, + Spec: r.Spec, + }) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + s.mu.Lock() + s.processes[r.ID] = process + s.mu.Unlock() + return empty, nil +} + +// ResizePty of a process +func (s *Service) ResizePty(ctx context.Context, r *shimapi.ResizePtyRequest) (*ptypes.Empty, error) { + if r.ID == "" { + return nil, errdefs.ToGRPCf(errdefs.ErrInvalidArgument, "id not provided") + } + ws := console.WinSize{ + Width: uint16(r.Width), + Height: uint16(r.Height), + } + p, err := s.getExecProcess(r.ID) + if err != nil { + return nil, err + } + if err := p.Resize(ws); err != nil { + return nil, errdefs.ToGRPC(err) + } + return empty, nil +} + +// State returns runtime state information for a process +func (s *Service) State(ctx context.Context, r *shimapi.StateRequest) (*shimapi.StateResponse, error) { + p, err := s.getExecProcess(r.ID) + if err != nil { + return nil, err + } + st, err := p.Status(ctx) + if err != nil { + return nil, err + } + status := task.StatusUnknown + switch st { + case "created": + status = task.StatusCreated + case "running": + status = task.StatusRunning + case "stopped": + status = task.StatusStopped + } + sio := p.Stdio() + return &shimapi.StateResponse{ + ID: p.ID(), + Bundle: s.bundle, + Pid: uint32(p.Pid()), + Status: status, + Stdin: sio.Stdin, + Stdout: sio.Stdout, + Stderr: sio.Stderr, + Terminal: sio.Terminal, + ExitStatus: uint32(p.ExitStatus()), + ExitedAt: p.ExitedAt(), + }, nil +} + +// Pause the container +func (s *Service) Pause(ctx context.Context, r *ptypes.Empty) (*ptypes.Empty, error) { + return empty, errdefs.ToGRPC(errdefs.ErrNotImplemented) +} + +// Resume the container +func (s *Service) Resume(ctx context.Context, r *ptypes.Empty) (*ptypes.Empty, error) { + return empty, errdefs.ToGRPC(errdefs.ErrNotImplemented) +} + +// Kill a process with the provided signal +func (s *Service) Kill(ctx context.Context, r *shimapi.KillRequest) (*ptypes.Empty, error) { + if r.ID == "" { + p, err := s.getInitProcess() + if err != nil { + return nil, err + } + if err := p.Kill(ctx, r.Signal, r.All); err != nil { + return nil, errdefs.ToGRPC(err) + } + return empty, nil + } + + p, err := s.getExecProcess(r.ID) + if err != nil { + return nil, err + } + if err := p.Kill(ctx, r.Signal, r.All); err != nil { + return nil, errdefs.ToGRPC(err) + } + return empty, nil +} + +// ListPids returns all pids inside the container +func (s *Service) ListPids(ctx context.Context, r *shimapi.ListPidsRequest) (*shimapi.ListPidsResponse, error) { + pids, err := s.getContainerPids(ctx, r.ID) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + var processes []*task.ProcessInfo + for _, pid := range pids { + pInfo := task.ProcessInfo{ + Pid: pid, + } + for _, p := range s.processes { + if p.Pid() == int(pid) { + d := &runctypes.ProcessDetails{ + ExecID: p.ID(), + } + a, err := typeurl.MarshalAny(d) + if err != nil { + return nil, errors.Wrapf(err, "failed to marshal process %d info", pid) + } + pInfo.Info = a + break + } + } + processes = append(processes, &pInfo) + } + return &shimapi.ListPidsResponse{ + Processes: processes, + }, nil +} + +// CloseIO of a process +func (s *Service) CloseIO(ctx context.Context, r *shimapi.CloseIORequest) (*ptypes.Empty, error) { + p, err := s.getExecProcess(r.ID) + if err != nil { + return nil, err + } + if stdin := p.Stdin(); stdin != nil { + if err := stdin.Close(); err != nil { + return nil, errors.Wrap(err, "close stdin") + } + } + return empty, nil +} + +// Checkpoint the container +func (s *Service) Checkpoint(ctx context.Context, r *shimapi.CheckpointTaskRequest) (*ptypes.Empty, error) { + return empty, errdefs.ToGRPC(errdefs.ErrNotImplemented) +} + +// ShimInfo returns shim information such as the shim's pid +func (s *Service) ShimInfo(ctx context.Context, r *ptypes.Empty) (*shimapi.ShimInfoResponse, error) { + return &shimapi.ShimInfoResponse{ + ShimPid: uint32(os.Getpid()), + }, nil +} + +// Update a running container +func (s *Service) Update(ctx context.Context, r *shimapi.UpdateTaskRequest) (*ptypes.Empty, error) { + return empty, errdefs.ToGRPC(errdefs.ErrNotImplemented) +} + +// Wait for a process to exit +func (s *Service) Wait(ctx context.Context, r *shimapi.WaitRequest) (*shimapi.WaitResponse, error) { + p, err := s.getExecProcess(r.ID) + if err != nil { + return nil, err + } + p.Wait() + + return &shimapi.WaitResponse{ + ExitStatus: uint32(p.ExitStatus()), + ExitedAt: p.ExitedAt(), + }, nil +} + +func (s *Service) processExits() { + for e := range s.ec { + s.checkProcesses(e) + } +} + +func (s *Service) allProcesses() []rproc.Process { + s.mu.Lock() + defer s.mu.Unlock() + + res := make([]rproc.Process, 0, len(s.processes)) + for _, p := range s.processes { + res = append(res, p) + } + return res +} + +func (s *Service) checkProcesses(e proc.Exit) { + for _, p := range s.allProcesses() { + if p.ID() == e.ID { + if ip, ok := p.(*proc.Init); ok { + // Ensure all children are killed + if err := ip.KillAll(s.context); err != nil { + log.G(s.context).WithError(err).WithField("id", ip.ID()). + Error("failed to kill init's children") + } + } + p.SetExited(e.Status) + s.events <- &eventstypes.TaskExit{ + ContainerID: s.id, + ID: p.ID(), + Pid: uint32(p.Pid()), + ExitStatus: uint32(e.Status), + ExitedAt: p.ExitedAt(), + } + return + } + } +} + +func (s *Service) getContainerPids(ctx context.Context, id string) ([]uint32, error) { + p, err := s.getInitProcess() + if err != nil { + return nil, err + } + + ps, err := p.(*proc.Init).Runtime().Ps(ctx, id) + if err != nil { + return nil, err + } + pids := make([]uint32, 0, len(ps)) + for _, pid := range ps { + pids = append(pids, uint32(pid)) + } + return pids, nil +} + +func (s *Service) forward(publisher events.Publisher) { + for e := range s.events { + if err := publisher.Publish(s.context, getTopic(s.context, e), e); err != nil { + log.G(s.context).WithError(err).Error("post event") + } + } +} + +// getInitProcess returns initial process +func (s *Service) getInitProcess() (rproc.Process, error) { + s.mu.Lock() + defer s.mu.Unlock() + p := s.processes[s.id] + if p == nil { + return nil, errdefs.ToGRPCf(errdefs.ErrFailedPrecondition, "container must be created") + } + return p, nil +} + +// getExecProcess returns exec process +func (s *Service) getExecProcess(id string) (rproc.Process, error) { + s.mu.Lock() + defer s.mu.Unlock() + p := s.processes[id] + if p == nil { + return nil, errdefs.ToGRPCf(errdefs.ErrNotFound, "process %s does not exist", id) + } + return p, nil +} + +func getTopic(ctx context.Context, e interface{}) string { + switch e.(type) { + case *eventstypes.TaskCreate: + return runtime.TaskCreateEventTopic + case *eventstypes.TaskStart: + return runtime.TaskStartEventTopic + case *eventstypes.TaskOOM: + return runtime.TaskOOMEventTopic + case *eventstypes.TaskExit: + return runtime.TaskExitEventTopic + case *eventstypes.TaskDelete: + return runtime.TaskDeleteEventTopic + case *eventstypes.TaskExecAdded: + return runtime.TaskExecAddedEventTopic + case *eventstypes.TaskExecStarted: + return runtime.TaskExecStartedEventTopic + default: + logrus.Warnf("no topic for type %#v", e) + } + return runtime.TaskUnknownTopic +} + +func newInit(ctx context.Context, path, workDir, runtimeRoot, namespace string, config map[string]string, platform rproc.Platform, r *proc.CreateConfig) (*proc.Init, error) { + var options runctypes.CreateOptions + if r.Options != nil { + v, err := typeurl.UnmarshalAny(r.Options) + if err != nil { + return nil, err + } + options = *v.(*runctypes.CreateOptions) + } + + spec, err := utils.ReadSpec(r.Bundle) + if err != nil { + return nil, errors.Wrap(err, "read oci spec") + } + if err := utils.UpdateVolumeAnnotations(r.Bundle, spec); err != nil { + return nil, errors.Wrap(err, "update volume annotations") + } + + runsc.FormatLogPath(r.ID, config) + rootfs := filepath.Join(path, "rootfs") + runtime := proc.NewRunsc(runtimeRoot, path, namespace, r.Runtime, config) + p := proc.New(r.ID, runtime, rproc.Stdio{ + Stdin: r.Stdin, + Stdout: r.Stdout, + Stderr: r.Stderr, + Terminal: r.Terminal, + }) + p.Bundle = r.Bundle + p.Platform = platform + p.Rootfs = rootfs + p.WorkDir = workDir + p.IoUID = int(options.IoUid) + p.IoGID = int(options.IoGid) + p.Sandbox = utils.IsSandbox(spec) + p.UserLog = utils.UserLogPath(spec) + p.Monitor = shim.Default + return p, nil +} diff --git a/pkg/shim/v1/utils/utils.go b/pkg/shim/v1/utils/utils.go new file mode 100644 index 000000000..7a400af1c --- /dev/null +++ b/pkg/shim/v1/utils/utils.go @@ -0,0 +1,57 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import ( + "encoding/json" + "io/ioutil" + "os" + "path/filepath" + + "github.com/containerd/cri/pkg/annotations" + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +// ReadSpec reads OCI spec from the bundle directory. +func ReadSpec(bundle string) (*specs.Spec, error) { + f, err := os.Open(filepath.Join(bundle, "config.json")) + if err != nil { + return nil, err + } + b, err := ioutil.ReadAll(f) + if err != nil { + return nil, err + } + var spec specs.Spec + if err := json.Unmarshal(b, &spec); err != nil { + return nil, err + } + return &spec, nil +} + +// IsSandbox checks whether a container is a sandbox container. +func IsSandbox(spec *specs.Spec) bool { + t, ok := spec.Annotations[annotations.ContainerType] + return !ok || t == annotations.ContainerTypeSandbox +} + +// UserLogPath gets user log path from OCI annotation. +func UserLogPath(spec *specs.Spec) string { + sandboxLogDir := spec.Annotations[annotations.SandboxLogDir] + if sandboxLogDir == "" { + return "" + } + return filepath.Join(sandboxLogDir, "gvisor.log") +} diff --git a/pkg/shim/v1/utils/volumes.go b/pkg/shim/v1/utils/volumes.go new file mode 100644 index 000000000..7323e7245 --- /dev/null +++ b/pkg/shim/v1/utils/volumes.go @@ -0,0 +1,156 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "path/filepath" + "strings" + + "github.com/containerd/cri/pkg/annotations" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const volumeKeyPrefix = "dev.gvisor.spec.mount." + +var kubeletPodsDir = "/var/lib/kubelet/pods" + +// volumeName gets volume name from volume annotation key, example: +// dev.gvisor.spec.mount.NAME.share +func volumeName(k string) string { + return strings.SplitN(strings.TrimPrefix(k, volumeKeyPrefix), ".", 2)[0] +} + +// volumeFieldName gets volume field name from volume annotation key, example: +// `type` is the field of dev.gvisor.spec.mount.NAME.type +func volumeFieldName(k string) string { + parts := strings.Split(strings.TrimPrefix(k, volumeKeyPrefix), ".") + return parts[len(parts)-1] +} + +// podUID gets pod UID from the pod log path. +func podUID(s *specs.Spec) (string, error) { + sandboxLogDir := s.Annotations[annotations.SandboxLogDir] + if sandboxLogDir == "" { + return "", errors.New("no sandbox log path annotation") + } + fields := strings.Split(filepath.Base(sandboxLogDir), "_") + switch len(fields) { + case 1: // This is the old CRI logging path + return fields[0], nil + case 3: // This is the new CRI logging path + return fields[2], nil + } + return "", errors.Errorf("unexpected sandbox log path %q", sandboxLogDir) +} + +// isVolumeKey checks whether an annotation key is for volume. +func isVolumeKey(k string) bool { + return strings.HasPrefix(k, volumeKeyPrefix) +} + +// volumeSourceKey constructs the annotation key for volume source. +func volumeSourceKey(volume string) string { + return volumeKeyPrefix + volume + ".source" +} + +// volumePath searches the volume path in the kubelet pod directory. +func volumePath(volume, uid string) (string, error) { + // TODO: Support subpath when gvisor supports pod volume bind mount. + volumeSearchPath := fmt.Sprintf("%s/%s/volumes/*/%s", kubeletPodsDir, uid, volume) + dirs, err := filepath.Glob(volumeSearchPath) + if err != nil { + return "", err + } + if len(dirs) != 1 { + return "", errors.Errorf("unexpected matched volume list %v", dirs) + } + return dirs[0], nil +} + +// isVolumePath checks whether a string is the volume path. +func isVolumePath(volume, path string) (bool, error) { + // TODO: Support subpath when gvisor supports pod volume bind mount. + volumeSearchPath := fmt.Sprintf("%s/*/volumes/*/%s", kubeletPodsDir, volume) + return filepath.Match(volumeSearchPath, path) +} + +// UpdateVolumeAnnotations add necessary OCI annotations for gvisor +// volume optimization. +func UpdateVolumeAnnotations(bundle string, s *specs.Spec) error { + var ( + uid string + err error + ) + if IsSandbox(s) { + uid, err = podUID(s) + if err != nil { + // Skip if we can't get pod UID, because this doesn't work + // for containerd 1.1. + logrus.WithError(err).Error("Can't get pod uid") + return nil + } + } + var updated bool + for k, v := range s.Annotations { + if !isVolumeKey(k) { + continue + } + if volumeFieldName(k) != "type" { + continue + } + volume := volumeName(k) + if uid != "" { + // This is a sandbox + path, err := volumePath(volume, uid) + if err != nil { + return errors.Wrapf(err, "get volume path for %q", volume) + } + s.Annotations[volumeSourceKey(volume)] = path + updated = true + } else { + // This is a container + for i := range s.Mounts { + // An error is returned for sandbox if source annotation + // is not successfully applied, so it is guaranteed that + // the source annotation for sandbox has already been + // successfully applied at this point. + // The volume name is unique inside a pod, so matching without + // podUID is fine here. + // TODO: Pass podUID down to shim for containers to do + // more accurate matching. + if yes, _ := isVolumePath(volume, s.Mounts[i].Source); yes { + // gVisor requires the container mount type to match + // sandbox mount type. + s.Mounts[i].Type = v + updated = true + } + } + } + } + if !updated { + return nil + } + // Update bundle + b, err := json.Marshal(s) + if err != nil { + return err + } + return ioutil.WriteFile(filepath.Join(bundle, "config.json"), b, 0666) +} diff --git a/pkg/shim/v1/utils/volumes_test.go b/pkg/shim/v1/utils/volumes_test.go new file mode 100644 index 000000000..4b2639545 --- /dev/null +++ b/pkg/shim/v1/utils/volumes_test.go @@ -0,0 +1,309 @@ +// Copyright 2019 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "testing" + + "github.com/containerd/cri/pkg/annotations" + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +func TestUpdateVolumeAnnotations(t *testing.T) { + dir, err := ioutil.TempDir("", "test-update-volume-annotations") + if err != nil { + t.Fatalf("create tempdir: %v", err) + } + defer os.RemoveAll(dir) + kubeletPodsDir = dir + + const ( + testPodUID = "testuid" + testVolumeName = "testvolume" + testLogDirPath = "/var/log/pods/testns_testname_" + testPodUID + testLegacyLogDirPath = "/var/log/pods/" + testPodUID + ) + testVolumePath := fmt.Sprintf("%s/%s/volumes/kubernetes.io~empty-dir/%s", dir, testPodUID, testVolumeName) + + if err := os.MkdirAll(testVolumePath, 0755); err != nil { + t.Fatalf("Create test volume: %v", err) + } + + for _, test := range []struct { + desc string + spec *specs.Spec + expected *specs.Spec + expectErr bool + expectUpdate bool + }{ + { + desc: "volume annotations for sandbox", + spec: &specs.Spec{ + Annotations: map[string]string{ + annotations.SandboxLogDir: testLogDirPath, + annotations.ContainerType: annotations.ContainerTypeSandbox, + "dev.gvisor.spec.mount." + testVolumeName + ".share": "pod", + "dev.gvisor.spec.mount." + testVolumeName + ".type": "tmpfs", + "dev.gvisor.spec.mount." + testVolumeName + ".options": "ro", + }, + }, + expected: &specs.Spec{ + Annotations: map[string]string{ + annotations.SandboxLogDir: testLogDirPath, + annotations.ContainerType: annotations.ContainerTypeSandbox, + "dev.gvisor.spec.mount." + testVolumeName + ".share": "pod", + "dev.gvisor.spec.mount." + testVolumeName + ".type": "tmpfs", + "dev.gvisor.spec.mount." + testVolumeName + ".options": "ro", + "dev.gvisor.spec.mount." + testVolumeName + ".source": testVolumePath, + }, + }, + expectUpdate: true, + }, + { + desc: "volume annotations for sandbox with legacy log path", + spec: &specs.Spec{ + Annotations: map[string]string{ + annotations.SandboxLogDir: testLegacyLogDirPath, + annotations.ContainerType: annotations.ContainerTypeSandbox, + "dev.gvisor.spec.mount." + testVolumeName + ".share": "pod", + "dev.gvisor.spec.mount." + testVolumeName + ".type": "tmpfs", + "dev.gvisor.spec.mount." + testVolumeName + ".options": "ro", + }, + }, + expected: &specs.Spec{ + Annotations: map[string]string{ + annotations.SandboxLogDir: testLegacyLogDirPath, + annotations.ContainerType: annotations.ContainerTypeSandbox, + "dev.gvisor.spec.mount." + testVolumeName + ".share": "pod", + "dev.gvisor.spec.mount." + testVolumeName + ".type": "tmpfs", + "dev.gvisor.spec.mount." + testVolumeName + ".options": "ro", + "dev.gvisor.spec.mount." + testVolumeName + ".source": testVolumePath, + }, + }, + expectUpdate: true, + }, + { + desc: "tmpfs: volume annotations for container", + spec: &specs.Spec{ + Mounts: []specs.Mount{ + { + Destination: "/test", + Type: "bind", + Source: testVolumePath, + Options: []string{"ro"}, + }, + { + Destination: "/random", + Type: "bind", + Source: "/random", + Options: []string{"ro"}, + }, + }, + Annotations: map[string]string{ + annotations.ContainerType: annotations.ContainerTypeContainer, + "dev.gvisor.spec.mount." + testVolumeName + ".share": "pod", + "dev.gvisor.spec.mount." + testVolumeName + ".type": "tmpfs", + "dev.gvisor.spec.mount." + testVolumeName + ".options": "ro", + }, + }, + expected: &specs.Spec{ + Mounts: []specs.Mount{ + { + Destination: "/test", + Type: "tmpfs", + Source: testVolumePath, + Options: []string{"ro"}, + }, + { + Destination: "/random", + Type: "bind", + Source: "/random", + Options: []string{"ro"}, + }, + }, + Annotations: map[string]string{ + annotations.ContainerType: annotations.ContainerTypeContainer, + "dev.gvisor.spec.mount." + testVolumeName + ".share": "pod", + "dev.gvisor.spec.mount." + testVolumeName + ".type": "tmpfs", + "dev.gvisor.spec.mount." + testVolumeName + ".options": "ro", + }, + }, + expectUpdate: true, + }, + { + desc: "bind: volume annotations for container", + spec: &specs.Spec{ + Mounts: []specs.Mount{ + { + Destination: "/test", + Type: "bind", + Source: testVolumePath, + Options: []string{"ro"}, + }, + }, + Annotations: map[string]string{ + annotations.ContainerType: annotations.ContainerTypeContainer, + "dev.gvisor.spec.mount." + testVolumeName + ".share": "container", + "dev.gvisor.spec.mount." + testVolumeName + ".type": "bind", + "dev.gvisor.spec.mount." + testVolumeName + ".options": "ro", + }, + }, + expected: &specs.Spec{ + Mounts: []specs.Mount{ + { + Destination: "/test", + Type: "bind", + Source: testVolumePath, + Options: []string{"ro"}, + }, + }, + Annotations: map[string]string{ + annotations.ContainerType: annotations.ContainerTypeContainer, + "dev.gvisor.spec.mount." + testVolumeName + ".share": "container", + "dev.gvisor.spec.mount." + testVolumeName + ".type": "bind", + "dev.gvisor.spec.mount." + testVolumeName + ".options": "ro", + }, + }, + expectUpdate: true, + }, + { + desc: "should not return error without pod log directory", + spec: &specs.Spec{ + Annotations: map[string]string{ + annotations.ContainerType: annotations.ContainerTypeSandbox, + "dev.gvisor.spec.mount." + testVolumeName + ".share": "pod", + "dev.gvisor.spec.mount." + testVolumeName + ".type": "tmpfs", + "dev.gvisor.spec.mount." + testVolumeName + ".options": "ro", + }, + }, + expected: &specs.Spec{ + Annotations: map[string]string{ + annotations.ContainerType: annotations.ContainerTypeSandbox, + "dev.gvisor.spec.mount." + testVolumeName + ".share": "pod", + "dev.gvisor.spec.mount." + testVolumeName + ".type": "tmpfs", + "dev.gvisor.spec.mount." + testVolumeName + ".options": "ro", + }, + }, + }, + { + desc: "should return error if volume path does not exist", + spec: &specs.Spec{ + Annotations: map[string]string{ + annotations.SandboxLogDir: testLogDirPath, + annotations.ContainerType: annotations.ContainerTypeSandbox, + "dev.gvisor.spec.mount.notexist.share": "pod", + "dev.gvisor.spec.mount.notexist.type": "tmpfs", + "dev.gvisor.spec.mount.notexist.options": "ro", + }, + }, + expectErr: true, + }, + { + desc: "no volume annotations for sandbox", + spec: &specs.Spec{ + Annotations: map[string]string{ + annotations.SandboxLogDir: testLogDirPath, + annotations.ContainerType: annotations.ContainerTypeSandbox, + }, + }, + expected: &specs.Spec{ + Annotations: map[string]string{ + annotations.SandboxLogDir: testLogDirPath, + annotations.ContainerType: annotations.ContainerTypeSandbox, + }, + }, + }, + { + desc: "no volume annotations for container", + spec: &specs.Spec{ + Mounts: []specs.Mount{ + { + Destination: "/test", + Type: "bind", + Source: "/test", + Options: []string{"ro"}, + }, + { + Destination: "/random", + Type: "bind", + Source: "/random", + Options: []string{"ro"}, + }, + }, + Annotations: map[string]string{ + annotations.ContainerType: annotations.ContainerTypeContainer, + }, + }, + expected: &specs.Spec{ + Mounts: []specs.Mount{ + { + Destination: "/test", + Type: "bind", + Source: "/test", + Options: []string{"ro"}, + }, + { + Destination: "/random", + Type: "bind", + Source: "/random", + Options: []string{"ro"}, + }, + }, + Annotations: map[string]string{ + annotations.ContainerType: annotations.ContainerTypeContainer, + }, + }, + }, + } { + t.Run(test.desc, func(t *testing.T) { + bundle, err := ioutil.TempDir(dir, "test-bundle") + if err != nil { + t.Fatalf("Create test bundle: %v", err) + } + err = UpdateVolumeAnnotations(bundle, test.spec) + if test.expectErr { + if err == nil { + t.Fatal("Expected error, but got nil") + } + return + } + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if !reflect.DeepEqual(test.expected, test.spec) { + t.Fatalf("Expected %+v, got %+v", test.expected, test.spec) + } + if test.expectUpdate { + b, err := ioutil.ReadFile(filepath.Join(bundle, "config.json")) + if err != nil { + t.Fatalf("Read spec from bundle: %v", err) + } + var spec specs.Spec + if err := json.Unmarshal(b, &spec); err != nil { + t.Fatalf("Unmarshal spec: %v", err) + } + if !reflect.DeepEqual(test.expected, &spec) { + t.Fatalf("Expected %+v, got %+v", test.expected, &spec) + } + } + }) + } +} diff --git a/pkg/shim/v2/epoll.go b/pkg/shim/v2/epoll.go new file mode 100644 index 000000000..57a2c5452 --- /dev/null +++ b/pkg/shim/v2/epoll.go @@ -0,0 +1,128 @@ +// Copyright 2018 The containerd Authors. +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux + +package v2 + +import ( + "context" + "sync" + + "github.com/containerd/cgroups" + eventstypes "github.com/containerd/containerd/api/events" + "github.com/containerd/containerd/events" + "github.com/containerd/containerd/runtime" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +func newOOMEpoller(publisher events.Publisher) (*epoller, error) { + fd, err := unix.EpollCreate1(unix.EPOLL_CLOEXEC) + if err != nil { + return nil, err + } + return &epoller{ + fd: fd, + publisher: publisher, + set: make(map[uintptr]*item), + }, nil +} + +type epoller struct { + mu sync.Mutex + + fd int + publisher events.Publisher + set map[uintptr]*item +} + +type item struct { + id string + cg cgroups.Cgroup +} + +func (e *epoller) Close() error { + return unix.Close(e.fd) +} + +func (e *epoller) run(ctx context.Context) { + var events [128]unix.EpollEvent + for { + select { + case <-ctx.Done(): + e.Close() + return + default: + n, err := unix.EpollWait(e.fd, events[:], -1) + if err != nil { + if err == unix.EINTR { + continue + } + logrus.WithError(err).Error("cgroups: epoll wait") + } + for i := 0; i < n; i++ { + e.process(ctx, uintptr(events[i].Fd)) + } + } + } +} + +func (e *epoller) add(id string, cg cgroups.Cgroup) error { + e.mu.Lock() + defer e.mu.Unlock() + fd, err := cg.OOMEventFD() + if err != nil { + return err + } + e.set[fd] = &item{ + id: id, + cg: cg, + } + event := unix.EpollEvent{ + Fd: int32(fd), + Events: unix.EPOLLHUP | unix.EPOLLIN | unix.EPOLLERR, + } + return unix.EpollCtl(e.fd, unix.EPOLL_CTL_ADD, int(fd), &event) +} + +func (e *epoller) process(ctx context.Context, fd uintptr) { + flush(fd) + e.mu.Lock() + i, ok := e.set[fd] + if !ok { + e.mu.Unlock() + return + } + e.mu.Unlock() + if i.cg.State() == cgroups.Deleted { + e.mu.Lock() + delete(e.set, fd) + e.mu.Unlock() + unix.Close(int(fd)) + return + } + if err := e.publisher.Publish(ctx, runtime.TaskOOMEventTopic, &eventstypes.TaskOOM{ + ContainerID: i.id, + }); err != nil { + logrus.WithError(err).Error("publish OOM event") + } +} + +func flush(fd uintptr) error { + var buf [8]byte + _, err := unix.Read(int(fd), buf[:]) + return err +} diff --git a/pkg/shim/v2/options/options.go b/pkg/shim/v2/options/options.go new file mode 100644 index 000000000..de09f2f79 --- /dev/null +++ b/pkg/shim/v2/options/options.go @@ -0,0 +1,33 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package options + +const OptionType = "io.containerd.runsc.v1.options" + +// Options is runtime options for io.containerd.runsc.v1. +type Options struct { + // ShimCgroup is the cgroup the shim should be in. + ShimCgroup string `toml:"shim_cgroup"` + // IoUid is the I/O's pipes uid. + IoUid uint32 `toml:"io_uid"` + // IoUid is the I/O's pipes gid. + IoGid uint32 `toml:"io_gid"` + // BinaryName is the binary name of the runsc binary. + BinaryName string `toml:"binary_name"` + // Root is the runsc root directory. + Root string `toml:"root"` + // RunscConfig is a key/value map of all runsc flags. + RunscConfig map[string]string `toml:"runsc_config"` +} diff --git a/pkg/shim/v2/service.go b/pkg/shim/v2/service.go new file mode 100644 index 000000000..3d226e29c --- /dev/null +++ b/pkg/shim/v2/service.go @@ -0,0 +1,821 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v2 + +import ( + "context" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "sync" + "syscall" + "time" + + "github.com/BurntSushi/toml" + "github.com/containerd/cgroups" + "github.com/containerd/console" + eventstypes "github.com/containerd/containerd/api/events" + "github.com/containerd/containerd/api/types/task" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/events" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/mount" + "github.com/containerd/containerd/namespaces" + "github.com/containerd/containerd/runtime" + "github.com/containerd/containerd/runtime/linux/runctypes" + rproc "github.com/containerd/containerd/runtime/proc" + "github.com/containerd/containerd/runtime/v2/shim" + taskAPI "github.com/containerd/containerd/runtime/v2/task" + runtimeoptions "github.com/containerd/cri/pkg/api/runtimeoptions/v1" + "github.com/containerd/typeurl" + ptypes "github.com/gogo/protobuf/types" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" + + runsc "github.com/google/gvisor-containerd-shim/pkg/go-runsc" + "github.com/google/gvisor-containerd-shim/pkg/v1/proc" + "github.com/google/gvisor-containerd-shim/pkg/v1/utils" + "github.com/google/gvisor-containerd-shim/pkg/v2/options" +) + +var ( + empty = &ptypes.Empty{} + bufPool = sync.Pool{ + New: func() interface{} { + buffer := make([]byte, 32<<10) + return &buffer + }, + } +) + +var _ = (taskAPI.TaskService)(&service{}) + +// configFile is the default config file name. For containerd 1.2, +// we assume that a config.toml should exist in the runtime root. +const configFile = "config.toml" + +// New returns a new shim service that can be used via GRPC +func New(ctx context.Context, id string, publisher events.Publisher) (shim.Shim, error) { + ep, err := newOOMEpoller(publisher) + if err != nil { + return nil, err + } + ctx, cancel := context.WithCancel(ctx) + go ep.run(ctx) + s := &service{ + id: id, + context: ctx, + processes: make(map[string]rproc.Process), + events: make(chan interface{}, 128), + ec: proc.ExitCh, + oomPoller: ep, + cancel: cancel, + } + go s.processExits() + runsc.Monitor = shim.Default + if err := s.initPlatform(); err != nil { + cancel() + return nil, errors.Wrap(err, "failed to initialized platform behavior") + } + go s.forward(publisher) + return s, nil +} + +// service is the shim implementation of a remote shim over GRPC +type service struct { + mu sync.Mutex + + context context.Context + task rproc.Process + processes map[string]rproc.Process + events chan interface{} + platform rproc.Platform + opts options.Options + ec chan proc.Exit + oomPoller *epoller + + id string + bundle string + cancel func() +} + +func newCommand(ctx context.Context, containerdBinary, containerdAddress string) (*exec.Cmd, error) { + ns, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return nil, err + } + self, err := os.Executable() + if err != nil { + return nil, err + } + cwd, err := os.Getwd() + if err != nil { + return nil, err + } + args := []string{ + "-namespace", ns, + "-address", containerdAddress, + "-publish-binary", containerdBinary, + } + cmd := exec.Command(self, args...) + cmd.Dir = cwd + cmd.Env = append(os.Environ(), "GOMAXPROCS=2") + cmd.SysProcAttr = &syscall.SysProcAttr{ + Setpgid: true, + } + return cmd, nil +} + +func (s *service) StartShim(ctx context.Context, id, containerdBinary, containerdAddress string) (string, error) { + cmd, err := newCommand(ctx, containerdBinary, containerdAddress) + if err != nil { + return "", err + } + address, err := shim.SocketAddress(ctx, id) + if err != nil { + return "", err + } + socket, err := shim.NewSocket(address) + if err != nil { + return "", err + } + defer socket.Close() + f, err := socket.File() + if err != nil { + return "", err + } + defer f.Close() + + cmd.ExtraFiles = append(cmd.ExtraFiles, f) + + if err := cmd.Start(); err != nil { + return "", err + } + defer func() { + if err != nil { + cmd.Process.Kill() + } + }() + // make sure to wait after start + go cmd.Wait() + if err := shim.WritePidFile("shim.pid", cmd.Process.Pid); err != nil { + return "", err + } + if err := shim.WriteAddress("address", address); err != nil { + return "", err + } + if err := shim.SetScore(cmd.Process.Pid); err != nil { + return "", errors.Wrap(err, "failed to set OOM Score on shim") + } + return address, nil +} + +func (s *service) Cleanup(ctx context.Context) (*taskAPI.DeleteResponse, error) { + path, err := os.Getwd() + if err != nil { + return nil, err + } + ns, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return nil, err + } + runtime, err := s.readRuntime(path) + if err != nil { + return nil, err + } + r := proc.NewRunsc(s.opts.Root, path, ns, runtime, nil) + if err := r.Delete(ctx, s.id, &runsc.DeleteOpts{ + Force: true, + }); err != nil { + logrus.WithError(err).Warn("failed to remove runc container") + } + if err := mount.UnmountAll(filepath.Join(path, "rootfs"), 0); err != nil { + logrus.WithError(err).Warn("failed to cleanup rootfs mount") + } + return &taskAPI.DeleteResponse{ + ExitedAt: time.Now(), + ExitStatus: 128 + uint32(unix.SIGKILL), + }, nil +} + +func (s *service) readRuntime(path string) (string, error) { + data, err := ioutil.ReadFile(filepath.Join(path, "runtime")) + if err != nil { + return "", err + } + return string(data), nil +} + +func (s *service) writeRuntime(path, runtime string) error { + return ioutil.WriteFile(filepath.Join(path, "runtime"), []byte(runtime), 0600) +} + +// Create a new initial process and container with the underlying OCI runtime +func (s *service) Create(ctx context.Context, r *taskAPI.CreateTaskRequest) (_ *taskAPI.CreateTaskResponse, err error) { + s.mu.Lock() + defer s.mu.Unlock() + + ns, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return nil, errors.Wrap(err, "create namespace") + } + + // Read from root for now. + var opts options.Options + if r.Options != nil { + v, err := typeurl.UnmarshalAny(r.Options) + if err != nil { + return nil, err + } + var path string + switch o := v.(type) { + case *runctypes.CreateOptions: // containerd 1.2.x + opts.IoUid = o.IoUid + opts.IoGid = o.IoGid + opts.ShimCgroup = o.ShimCgroup + case *runctypes.RuncOptions: // containerd 1.2.x + root := proc.RunscRoot + if o.RuntimeRoot != "" { + root = o.RuntimeRoot + } + + opts.BinaryName = o.Runtime + + path = filepath.Join(root, configFile) + if _, err := os.Stat(path); err != nil { + if !os.IsNotExist(err) { + return nil, errors.Wrapf(err, "stat config file %q", path) + } + // A config file in runtime root is not required. + path = "" + } + case *runtimeoptions.Options: // containerd 1.3.x+ + if o.ConfigPath == "" { + break + } + if o.TypeUrl != options.OptionType { + return nil, errors.Errorf("unsupported runtimeoptions %q", o.TypeUrl) + } + path = o.ConfigPath + default: + return nil, errors.Errorf("unsupported option type %q", r.Options.TypeUrl) + } + if path != "" { + if _, err = toml.DecodeFile(path, &opts); err != nil { + return nil, errors.Wrapf(err, "decode config file %q", path) + } + } + } + + var mounts []proc.Mount + for _, m := range r.Rootfs { + mounts = append(mounts, proc.Mount{ + Type: m.Type, + Source: m.Source, + Target: m.Target, + Options: m.Options, + }) + } + + rootfs := filepath.Join(r.Bundle, "rootfs") + if err := os.Mkdir(rootfs, 0711); err != nil && !os.IsExist(err) { + return nil, err + } + + config := &proc.CreateConfig{ + ID: r.ID, + Bundle: r.Bundle, + Runtime: opts.BinaryName, + Rootfs: mounts, + Terminal: r.Terminal, + Stdin: r.Stdin, + Stdout: r.Stdout, + Stderr: r.Stderr, + Options: r.Options, + } + if err := s.writeRuntime(r.Bundle, opts.BinaryName); err != nil { + return nil, err + } + defer func() { + if err != nil { + if err2 := mount.UnmountAll(rootfs, 0); err2 != nil { + logrus.WithError(err2).Warn("failed to cleanup rootfs mount") + } + } + }() + for _, rm := range mounts { + m := &mount.Mount{ + Type: rm.Type, + Source: rm.Source, + Options: rm.Options, + } + if err := m.Mount(rootfs); err != nil { + return nil, errors.Wrapf(err, "failed to mount rootfs component %v", m) + } + } + process, err := newInit( + ctx, + r.Bundle, + filepath.Join(r.Bundle, "work"), + ns, + s.platform, + config, + &opts, + rootfs, + ) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + if err := process.Create(ctx, config); err != nil { + return nil, errdefs.ToGRPC(err) + } + // save the main task id and bundle to the shim for additional requests + s.id = r.ID + s.bundle = r.Bundle + + // Set up OOM notification on the sandbox's cgroup. This is done on sandbox + // create since the sandbox process will be created here. + pid := process.Pid() + if pid > 0 { + cg, err := cgroups.Load(cgroups.V1, cgroups.PidPath(pid)) + if err != nil { + return nil, errors.Wrapf(err, "loading cgroup for %d", pid) + } + if err := s.oomPoller.add(s.id, cg); err != nil { + return nil, errors.Wrapf(err, "add cg to OOM monitor") + } + } + s.task = process + s.opts = opts + return &taskAPI.CreateTaskResponse{ + Pid: uint32(process.Pid()), + }, nil + +} + +// Start a process +func (s *service) Start(ctx context.Context, r *taskAPI.StartRequest) (*taskAPI.StartResponse, error) { + p, err := s.getProcess(r.ExecID) + if err != nil { + return nil, err + } + if err := p.Start(ctx); err != nil { + return nil, err + } + // TODO: Set the cgroup and oom notifications on restore. + // https://github.com/google/gvisor-containerd-shim/issues/58 + return &taskAPI.StartResponse{ + Pid: uint32(p.Pid()), + }, nil +} + +// Delete the initial process and container +func (s *service) Delete(ctx context.Context, r *taskAPI.DeleteRequest) (*taskAPI.DeleteResponse, error) { + p, err := s.getProcess(r.ExecID) + if err != nil { + return nil, err + } + if p == nil { + return nil, errdefs.ToGRPCf(errdefs.ErrFailedPrecondition, "container must be created") + } + if err := p.Delete(ctx); err != nil { + return nil, err + } + isTask := r.ExecID == "" + if !isTask { + s.mu.Lock() + delete(s.processes, r.ExecID) + s.mu.Unlock() + } + if isTask && s.platform != nil { + s.platform.Close() + } + return &taskAPI.DeleteResponse{ + ExitStatus: uint32(p.ExitStatus()), + ExitedAt: p.ExitedAt(), + Pid: uint32(p.Pid()), + }, nil +} + +// Exec an additional process inside the container +func (s *service) Exec(ctx context.Context, r *taskAPI.ExecProcessRequest) (*ptypes.Empty, error) { + s.mu.Lock() + p := s.processes[r.ExecID] + s.mu.Unlock() + if p != nil { + return nil, errdefs.ToGRPCf(errdefs.ErrAlreadyExists, "id %s", r.ExecID) + } + p = s.task + if p == nil { + return nil, errdefs.ToGRPCf(errdefs.ErrFailedPrecondition, "container must be created") + } + process, err := p.(*proc.Init).Exec(ctx, s.bundle, &proc.ExecConfig{ + ID: r.ExecID, + Terminal: r.Terminal, + Stdin: r.Stdin, + Stdout: r.Stdout, + Stderr: r.Stderr, + Spec: r.Spec, + }) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + s.mu.Lock() + s.processes[r.ExecID] = process + s.mu.Unlock() + return empty, nil +} + +// ResizePty of a process +func (s *service) ResizePty(ctx context.Context, r *taskAPI.ResizePtyRequest) (*ptypes.Empty, error) { + p, err := s.getProcess(r.ExecID) + if err != nil { + return nil, err + } + ws := console.WinSize{ + Width: uint16(r.Width), + Height: uint16(r.Height), + } + if err := p.Resize(ws); err != nil { + return nil, errdefs.ToGRPC(err) + } + return empty, nil +} + +// State returns runtime state information for a process +func (s *service) State(ctx context.Context, r *taskAPI.StateRequest) (*taskAPI.StateResponse, error) { + p, err := s.getProcess(r.ExecID) + if err != nil { + return nil, err + } + st, err := p.Status(ctx) + if err != nil { + return nil, err + } + status := task.StatusUnknown + switch st { + case "created": + status = task.StatusCreated + case "running": + status = task.StatusRunning + case "stopped": + status = task.StatusStopped + } + sio := p.Stdio() + return &taskAPI.StateResponse{ + ID: p.ID(), + Bundle: s.bundle, + Pid: uint32(p.Pid()), + Status: status, + Stdin: sio.Stdin, + Stdout: sio.Stdout, + Stderr: sio.Stderr, + Terminal: sio.Terminal, + ExitStatus: uint32(p.ExitStatus()), + ExitedAt: p.ExitedAt(), + }, nil +} + +// Pause the container +func (s *service) Pause(ctx context.Context, r *taskAPI.PauseRequest) (*ptypes.Empty, error) { + return empty, errdefs.ToGRPC(errdefs.ErrNotImplemented) +} + +// Resume the container +func (s *service) Resume(ctx context.Context, r *taskAPI.ResumeRequest) (*ptypes.Empty, error) { + return empty, errdefs.ToGRPC(errdefs.ErrNotImplemented) +} + +// Kill a process with the provided signal +func (s *service) Kill(ctx context.Context, r *taskAPI.KillRequest) (*ptypes.Empty, error) { + p, err := s.getProcess(r.ExecID) + if err != nil { + return nil, err + } + if p == nil { + return nil, errdefs.ToGRPCf(errdefs.ErrFailedPrecondition, "container must be created") + } + if err := p.Kill(ctx, r.Signal, r.All); err != nil { + return nil, errdefs.ToGRPC(err) + } + return empty, nil +} + +// Pids returns all pids inside the container +func (s *service) Pids(ctx context.Context, r *taskAPI.PidsRequest) (*taskAPI.PidsResponse, error) { + pids, err := s.getContainerPids(ctx, r.ID) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + var processes []*task.ProcessInfo + for _, pid := range pids { + pInfo := task.ProcessInfo{ + Pid: pid, + } + for _, p := range s.processes { + if p.Pid() == int(pid) { + d := &runctypes.ProcessDetails{ + ExecID: p.ID(), + } + a, err := typeurl.MarshalAny(d) + if err != nil { + return nil, errors.Wrapf(err, "failed to marshal process %d info", pid) + } + pInfo.Info = a + break + } + } + processes = append(processes, &pInfo) + } + return &taskAPI.PidsResponse{ + Processes: processes, + }, nil +} + +// CloseIO of a process +func (s *service) CloseIO(ctx context.Context, r *taskAPI.CloseIORequest) (*ptypes.Empty, error) { + p, err := s.getProcess(r.ExecID) + if err != nil { + return nil, err + } + if stdin := p.Stdin(); stdin != nil { + if err := stdin.Close(); err != nil { + return nil, errors.Wrap(err, "close stdin") + } + } + return empty, nil +} + +// Checkpoint the container +func (s *service) Checkpoint(ctx context.Context, r *taskAPI.CheckpointTaskRequest) (*ptypes.Empty, error) { + return empty, errdefs.ToGRPC(errdefs.ErrNotImplemented) +} + +// Connect returns shim information such as the shim's pid +func (s *service) Connect(ctx context.Context, r *taskAPI.ConnectRequest) (*taskAPI.ConnectResponse, error) { + var pid int + if s.task != nil { + pid = s.task.Pid() + } + return &taskAPI.ConnectResponse{ + ShimPid: uint32(os.Getpid()), + TaskPid: uint32(pid), + }, nil +} + +func (s *service) Shutdown(ctx context.Context, r *taskAPI.ShutdownRequest) (*ptypes.Empty, error) { + s.cancel() + os.Exit(0) + return empty, nil +} + +func (s *service) Stats(ctx context.Context, r *taskAPI.StatsRequest) (*taskAPI.StatsResponse, error) { + path, err := os.Getwd() + if err != nil { + return nil, err + } + ns, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return nil, err + } + runtime, err := s.readRuntime(path) + if err != nil { + return nil, err + } + rs := proc.NewRunsc(s.opts.Root, path, ns, runtime, nil) + stats, err := rs.Stats(ctx, s.id) + if err != nil { + return nil, err + } + + // gvisor currently (as of 2020-03-03) only returns the total memory + // usage and current PID value[0]. However, we copy the common fields here + // so that future updates will propagate correct information. We're + // using the cgroups.Metrics structure so we're returning the same type + // as runc. + // + // [0]: https://github.com/google/gvisor/blob/277a0d5a1fbe8272d4729c01ee4c6e374d047ebc/runsc/boot/events.go#L61-L81 + data, err := typeurl.MarshalAny(&cgroups.Metrics{ + CPU: &cgroups.CPUStat{ + Usage: &cgroups.CPUUsage{ + Total: stats.Cpu.Usage.Total, + Kernel: stats.Cpu.Usage.Kernel, + User: stats.Cpu.Usage.User, + PerCPU: stats.Cpu.Usage.Percpu, + }, + Throttling: &cgroups.Throttle{ + Periods: stats.Cpu.Throttling.Periods, + ThrottledPeriods: stats.Cpu.Throttling.ThrottledPeriods, + ThrottledTime: stats.Cpu.Throttling.ThrottledTime, + }, + }, + Memory: &cgroups.MemoryStat{ + Cache: stats.Memory.Cache, + Usage: &cgroups.MemoryEntry{ + Limit: stats.Memory.Usage.Limit, + Usage: stats.Memory.Usage.Usage, + Max: stats.Memory.Usage.Max, + Failcnt: stats.Memory.Usage.Failcnt, + }, + Swap: &cgroups.MemoryEntry{ + Limit: stats.Memory.Swap.Limit, + Usage: stats.Memory.Swap.Usage, + Max: stats.Memory.Swap.Max, + Failcnt: stats.Memory.Swap.Failcnt, + }, + Kernel: &cgroups.MemoryEntry{ + Limit: stats.Memory.Kernel.Limit, + Usage: stats.Memory.Kernel.Usage, + Max: stats.Memory.Kernel.Max, + Failcnt: stats.Memory.Kernel.Failcnt, + }, + KernelTCP: &cgroups.MemoryEntry{ + Limit: stats.Memory.KernelTCP.Limit, + Usage: stats.Memory.KernelTCP.Usage, + Max: stats.Memory.KernelTCP.Max, + Failcnt: stats.Memory.KernelTCP.Failcnt, + }, + }, + Pids: &cgroups.PidsStat{ + Current: stats.Pids.Current, + Limit: stats.Pids.Limit, + }, + }) + if err != nil { + return nil, err + } + return &taskAPI.StatsResponse{ + Stats: data, + }, nil +} + +// Update a running container +func (s *service) Update(ctx context.Context, r *taskAPI.UpdateTaskRequest) (*ptypes.Empty, error) { + return empty, errdefs.ToGRPC(errdefs.ErrNotImplemented) +} + +// Wait for a process to exit +func (s *service) Wait(ctx context.Context, r *taskAPI.WaitRequest) (*taskAPI.WaitResponse, error) { + p, err := s.getProcess(r.ExecID) + if err != nil { + return nil, err + } + if p == nil { + return nil, errdefs.ToGRPCf(errdefs.ErrFailedPrecondition, "container must be created") + } + p.Wait() + + return &taskAPI.WaitResponse{ + ExitStatus: uint32(p.ExitStatus()), + ExitedAt: p.ExitedAt(), + }, nil +} + +func (s *service) processExits() { + for e := range s.ec { + s.checkProcesses(e) + } +} + +func (s *service) checkProcesses(e proc.Exit) { + // TODO(random-liu): Add `shouldKillAll` logic if container pid + // namespace is supported. + for _, p := range s.allProcesses() { + if p.ID() == e.ID { + if ip, ok := p.(*proc.Init); ok { + // Ensure all children are killed + if err := ip.KillAll(s.context); err != nil { + log.G(s.context).WithError(err).WithField("id", ip.ID()). + Error("failed to kill init's children") + } + } + p.SetExited(e.Status) + s.events <- &eventstypes.TaskExit{ + ContainerID: s.id, + ID: p.ID(), + Pid: uint32(p.Pid()), + ExitStatus: uint32(e.Status), + ExitedAt: p.ExitedAt(), + } + return + } + } +} + +func (s *service) allProcesses() (o []rproc.Process) { + s.mu.Lock() + defer s.mu.Unlock() + for _, p := range s.processes { + o = append(o, p) + } + if s.task != nil { + o = append(o, s.task) + } + return o +} + +func (s *service) getContainerPids(ctx context.Context, id string) ([]uint32, error) { + s.mu.Lock() + p := s.task + s.mu.Unlock() + if p == nil { + return nil, errors.Wrapf(errdefs.ErrFailedPrecondition, "container must be created") + } + ps, err := p.(*proc.Init).Runtime().Ps(ctx, id) + if err != nil { + return nil, err + } + pids := make([]uint32, 0, len(ps)) + for _, pid := range ps { + pids = append(pids, uint32(pid)) + } + return pids, nil +} + +func (s *service) forward(publisher events.Publisher) { + for e := range s.events { + ctx, cancel := context.WithTimeout(s.context, 5*time.Second) + err := publisher.Publish(ctx, getTopic(e), e) + cancel() + if err != nil { + logrus.WithError(err).Error("post event") + } + } +} + +func (s *service) getProcess(execID string) (rproc.Process, error) { + s.mu.Lock() + defer s.mu.Unlock() + if execID == "" { + return s.task, nil + } + p := s.processes[execID] + if p == nil { + return nil, errdefs.ToGRPCf(errdefs.ErrNotFound, "process does not exist %s", execID) + } + return p, nil +} + +func getTopic(e interface{}) string { + switch e.(type) { + case *eventstypes.TaskCreate: + return runtime.TaskCreateEventTopic + case *eventstypes.TaskStart: + return runtime.TaskStartEventTopic + case *eventstypes.TaskOOM: + return runtime.TaskOOMEventTopic + case *eventstypes.TaskExit: + return runtime.TaskExitEventTopic + case *eventstypes.TaskDelete: + return runtime.TaskDeleteEventTopic + case *eventstypes.TaskExecAdded: + return runtime.TaskExecAddedEventTopic + case *eventstypes.TaskExecStarted: + return runtime.TaskExecStartedEventTopic + default: + logrus.Warnf("no topic for type %#v", e) + } + return runtime.TaskUnknownTopic +} + +func newInit(ctx context.Context, path, workDir, namespace string, platform rproc.Platform, r *proc.CreateConfig, options *options.Options, rootfs string) (*proc.Init, error) { + spec, err := utils.ReadSpec(r.Bundle) + if err != nil { + return nil, errors.Wrap(err, "read oci spec") + } + if err := utils.UpdateVolumeAnnotations(r.Bundle, spec); err != nil { + return nil, errors.Wrap(err, "update volume annotations") + } + runsc.FormatLogPath(r.ID, options.RunscConfig) + runtime := proc.NewRunsc(options.Root, path, namespace, options.BinaryName, options.RunscConfig) + p := proc.New(r.ID, runtime, rproc.Stdio{ + Stdin: r.Stdin, + Stdout: r.Stdout, + Stderr: r.Stderr, + Terminal: r.Terminal, + }) + p.Bundle = r.Bundle + p.Platform = platform + p.Rootfs = rootfs + p.WorkDir = workDir + p.IoUID = int(options.IoUid) + p.IoGID = int(options.IoGid) + p.Sandbox = utils.IsSandbox(spec) + p.UserLog = utils.UserLogPath(spec) + p.Monitor = shim.Default + return p, nil +} diff --git a/pkg/shim/v2/service_linux.go b/pkg/shim/v2/service_linux.go new file mode 100644 index 000000000..cd259cd44 --- /dev/null +++ b/pkg/shim/v2/service_linux.go @@ -0,0 +1,112 @@ +// Copyright 2018 The containerd Authors. +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux + +package v2 + +import ( + "context" + "io" + "sync" + "syscall" + + "github.com/containerd/console" + "github.com/containerd/fifo" + "github.com/pkg/errors" +) + +type linuxPlatform struct { + epoller *console.Epoller +} + +func (p *linuxPlatform) CopyConsole(ctx context.Context, console console.Console, stdin, stdout, stderr string, wg, cwg *sync.WaitGroup) (console.Console, error) { + if p.epoller == nil { + return nil, errors.New("uninitialized epoller") + } + + epollConsole, err := p.epoller.Add(console) + if err != nil { + return nil, err + } + + if stdin != "" { + in, err := fifo.OpenFifo(context.Background(), stdin, syscall.O_RDONLY|syscall.O_NONBLOCK, 0) + if err != nil { + return nil, err + } + cwg.Add(1) + go func() { + cwg.Done() + p := bufPool.Get().(*[]byte) + defer bufPool.Put(p) + io.CopyBuffer(epollConsole, in, *p) + }() + } + + outw, err := fifo.OpenFifo(ctx, stdout, syscall.O_WRONLY, 0) + if err != nil { + return nil, err + } + outr, err := fifo.OpenFifo(ctx, stdout, syscall.O_RDONLY, 0) + if err != nil { + return nil, err + } + wg.Add(1) + cwg.Add(1) + go func() { + cwg.Done() + p := bufPool.Get().(*[]byte) + defer bufPool.Put(p) + io.CopyBuffer(outw, epollConsole, *p) + epollConsole.Close() + outr.Close() + outw.Close() + wg.Done() + }() + return epollConsole, nil +} + +func (p *linuxPlatform) ShutdownConsole(ctx context.Context, cons console.Console) error { + if p.epoller == nil { + return errors.New("uninitialized epoller") + } + epollConsole, ok := cons.(*console.EpollConsole) + if !ok { + return errors.Errorf("expected EpollConsole, got %#v", cons) + } + return epollConsole.Shutdown(p.epoller.CloseConsole) +} + +func (p *linuxPlatform) Close() error { + return p.epoller.Close() +} + +// initialize a single epoll fd to manage our consoles. `initPlatform` should +// only be called once. +func (s *service) initPlatform() error { + if s.platform != nil { + return nil + } + epoller, err := console.NewEpoller() + if err != nil { + return errors.Wrap(err, "failed to initialize epoller") + } + s.platform = &linuxPlatform{ + epoller: epoller, + } + go epoller.Wait() + return nil +} diff --git a/pkg/v1/proc/deleted_state.go b/pkg/v1/proc/deleted_state.go deleted file mode 100644 index b0233841d..000000000 --- a/pkg/v1/proc/deleted_state.go +++ /dev/null @@ -1,54 +0,0 @@ -/* -Copyright The containerd Authors. -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package proc - -import ( - "context" - - "github.com/containerd/console" - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/runtime/proc" - "github.com/pkg/errors" -) - -type deletedState struct { -} - -func (s *deletedState) Resize(ws console.WinSize) error { - return errors.Errorf("cannot resize a deleted process") -} - -func (s *deletedState) Start(ctx context.Context) error { - return errors.Errorf("cannot start a deleted process") -} - -func (s *deletedState) Delete(ctx context.Context) error { - return errors.Wrap(errdefs.ErrNotFound, "cannot delete a deleted process") -} - -func (s *deletedState) Kill(ctx context.Context, sig uint32, all bool) error { - return errors.Wrap(errdefs.ErrNotFound, "cannot kill a deleted process") -} - -func (s *deletedState) SetExited(status int) { - // no op -} - -func (s *deletedState) Exec(ctx context.Context, path string, r *ExecConfig) (proc.Process, error) { - return nil, errors.Errorf("cannot exec in a deleted state") -} diff --git a/pkg/v1/proc/exec.go b/pkg/v1/proc/exec.go deleted file mode 100644 index f02b73bb2..000000000 --- a/pkg/v1/proc/exec.go +++ /dev/null @@ -1,284 +0,0 @@ -/* -Copyright The containerd Authors. -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package proc - -import ( - "context" - "fmt" - "io" - "os" - "path/filepath" - "sync" - "syscall" - "time" - - "github.com/containerd/console" - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/runtime/proc" - "github.com/containerd/fifo" - runc "github.com/containerd/go-runc" - specs "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" - "golang.org/x/sys/unix" - - runsc "github.com/google/gvisor-containerd-shim/pkg/go-runsc" -) - -type execProcess struct { - wg sync.WaitGroup - - execState execState - - mu sync.Mutex - id string - console console.Console - io runc.IO - status int - exited time.Time - pid int - internalPid int - closers []io.Closer - stdin io.Closer - stdio proc.Stdio - path string - spec specs.Process - - parent *Init - waitBlock chan struct{} -} - -func (e *execProcess) Wait() { - <-e.waitBlock -} - -func (e *execProcess) ID() string { - return e.id -} - -func (e *execProcess) Pid() int { - e.mu.Lock() - defer e.mu.Unlock() - return e.pid -} - -func (e *execProcess) ExitStatus() int { - e.mu.Lock() - defer e.mu.Unlock() - return e.status -} - -func (e *execProcess) ExitedAt() time.Time { - e.mu.Lock() - defer e.mu.Unlock() - return e.exited -} - -func (e *execProcess) SetExited(status int) { - e.mu.Lock() - defer e.mu.Unlock() - - e.execState.SetExited(status) -} - -func (e *execProcess) setExited(status int) { - e.status = status - e.exited = time.Now() - e.parent.Platform.ShutdownConsole(context.Background(), e.console) - close(e.waitBlock) -} - -func (e *execProcess) Delete(ctx context.Context) error { - e.mu.Lock() - defer e.mu.Unlock() - - return e.execState.Delete(ctx) -} - -func (e *execProcess) delete(ctx context.Context) error { - e.wg.Wait() - if e.io != nil { - for _, c := range e.closers { - c.Close() - } - e.io.Close() - } - pidfile := filepath.Join(e.path, fmt.Sprintf("%s.pid", e.id)) - // silently ignore error - os.Remove(pidfile) - internalPidfile := filepath.Join(e.path, fmt.Sprintf("%s-internal.pid", e.id)) - // silently ignore error - os.Remove(internalPidfile) - return nil -} - -func (e *execProcess) Resize(ws console.WinSize) error { - e.mu.Lock() - defer e.mu.Unlock() - - return e.execState.Resize(ws) -} - -func (e *execProcess) resize(ws console.WinSize) error { - if e.console == nil { - return nil - } - return e.console.Resize(ws) -} - -func (e *execProcess) Kill(ctx context.Context, sig uint32, _ bool) error { - e.mu.Lock() - defer e.mu.Unlock() - - return e.execState.Kill(ctx, sig, false) -} - -func (e *execProcess) kill(ctx context.Context, sig uint32, _ bool) error { - internalPid := e.internalPid - if internalPid != 0 { - if err := e.parent.runtime.Kill(ctx, e.parent.id, int(sig), &runsc.KillOpts{ - Pid: internalPid, - }); err != nil { - // If this returns error, consider the process has already stopped. - // TODO: Fix after signal handling is fixed. - return errors.Wrapf(errdefs.ErrNotFound, err.Error()) - } - } - return nil -} - -func (e *execProcess) Stdin() io.Closer { - return e.stdin -} - -func (e *execProcess) Stdio() proc.Stdio { - return e.stdio -} - -func (e *execProcess) Start(ctx context.Context) error { - e.mu.Lock() - defer e.mu.Unlock() - - return e.execState.Start(ctx) -} - -func (e *execProcess) start(ctx context.Context) (err error) { - var ( - socket *runc.Socket - pidfile = filepath.Join(e.path, fmt.Sprintf("%s.pid", e.id)) - internalPidfile = filepath.Join(e.path, fmt.Sprintf("%s-internal.pid", e.id)) - ) - if e.stdio.Terminal { - if socket, err = runc.NewTempConsoleSocket(); err != nil { - return errors.Wrap(err, "failed to create runc console socket") - } - defer socket.Close() - } else if e.stdio.IsNull() { - if e.io, err = runc.NewNullIO(); err != nil { - return errors.Wrap(err, "creating new NULL IO") - } - } else { - if e.io, err = runc.NewPipeIO(e.parent.IoUID, e.parent.IoGID, withConditionalIO(e.stdio)); err != nil { - return errors.Wrap(err, "failed to create runc io pipes") - } - } - opts := &runsc.ExecOpts{ - PidFile: pidfile, - InternalPidFile: internalPidfile, - IO: e.io, - Detach: true, - } - if socket != nil { - opts.ConsoleSocket = socket - } - eventCh := e.parent.Monitor.Subscribe() - defer func() { - // Unsubscribe if an error is returned. - if err != nil { - e.parent.Monitor.Unsubscribe(eventCh) - } - }() - if err := e.parent.runtime.Exec(ctx, e.parent.id, e.spec, opts); err != nil { - close(e.waitBlock) - return e.parent.runtimeError(err, "OCI runtime exec failed") - } - if e.stdio.Stdin != "" { - sc, err := fifo.OpenFifo(context.Background(), e.stdio.Stdin, syscall.O_WRONLY|syscall.O_NONBLOCK, 0) - if err != nil { - return errors.Wrapf(err, "failed to open stdin fifo %s", e.stdio.Stdin) - } - e.closers = append(e.closers, sc) - e.stdin = sc - } - var copyWaitGroup sync.WaitGroup - ctx, cancel := context.WithTimeout(ctx, 30*time.Second) - defer cancel() - if socket != nil { - console, err := socket.ReceiveMaster() - if err != nil { - return errors.Wrap(err, "failed to retrieve console master") - } - if e.console, err = e.parent.Platform.CopyConsole(ctx, console, e.stdio.Stdin, e.stdio.Stdout, e.stdio.Stderr, &e.wg, ©WaitGroup); err != nil { - return errors.Wrap(err, "failed to start console copy") - } - } else if !e.stdio.IsNull() { - if err := copyPipes(ctx, e.io, e.stdio.Stdin, e.stdio.Stdout, e.stdio.Stderr, &e.wg, ©WaitGroup); err != nil { - return errors.Wrap(err, "failed to start io pipe copy") - } - } - copyWaitGroup.Wait() - pid, err := runc.ReadPidFile(opts.PidFile) - if err != nil { - return errors.Wrap(err, "failed to retrieve OCI runtime exec pid") - } - e.pid = pid - internalPid, err := runc.ReadPidFile(opts.InternalPidFile) - if err != nil { - return errors.Wrap(err, "failed to retrieve OCI runtime exec internal pid") - } - e.internalPid = internalPid - go func() { - defer e.parent.Monitor.Unsubscribe(eventCh) - for event := range eventCh { - if event.Pid == e.pid { - ExitCh <- Exit{ - Timestamp: event.Timestamp, - ID: e.id, - Status: event.Status, - } - break - } - } - }() - return nil -} - -func (e *execProcess) Status(ctx context.Context) (string, error) { - e.mu.Lock() - defer e.mu.Unlock() - // if we don't have a pid then the exec process has just been created - if e.pid == 0 { - return "created", nil - } - // if we have a pid and it can be signaled, the process is running - // TODO(random-liu): Use `runsc kill --pid`. - if err := unix.Kill(e.pid, 0); err == nil { - return "running", nil - } - // else if we have a pid but it can nolonger be signaled, it has stopped - return "stopped", nil -} diff --git a/pkg/v1/proc/exec_state.go b/pkg/v1/proc/exec_state.go deleted file mode 100644 index e10954670..000000000 --- a/pkg/v1/proc/exec_state.go +++ /dev/null @@ -1,156 +0,0 @@ -/* -Copyright The containerd Authors. -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package proc - -import ( - "context" - - "github.com/containerd/console" - "github.com/pkg/errors" -) - -type execState interface { - Resize(console.WinSize) error - Start(context.Context) error - Delete(context.Context) error - Kill(context.Context, uint32, bool) error - SetExited(int) -} - -type execCreatedState struct { - p *execProcess -} - -func (s *execCreatedState) transition(name string) error { - switch name { - case "running": - s.p.execState = &execRunningState{p: s.p} - case "stopped": - s.p.execState = &execStoppedState{p: s.p} - case "deleted": - s.p.execState = &deletedState{} - default: - return errors.Errorf("invalid state transition %q to %q", stateName(s), name) - } - return nil -} - -func (s *execCreatedState) Resize(ws console.WinSize) error { - return s.p.resize(ws) -} - -func (s *execCreatedState) Start(ctx context.Context) error { - if err := s.p.start(ctx); err != nil { - return err - } - return s.transition("running") -} - -func (s *execCreatedState) Delete(ctx context.Context) error { - if err := s.p.delete(ctx); err != nil { - return err - } - return s.transition("deleted") -} - -func (s *execCreatedState) Kill(ctx context.Context, sig uint32, all bool) error { - return s.p.kill(ctx, sig, all) -} - -func (s *execCreatedState) SetExited(status int) { - s.p.setExited(status) - - if err := s.transition("stopped"); err != nil { - panic(err) - } -} - -type execRunningState struct { - p *execProcess -} - -func (s *execRunningState) transition(name string) error { - switch name { - case "stopped": - s.p.execState = &execStoppedState{p: s.p} - default: - return errors.Errorf("invalid state transition %q to %q", stateName(s), name) - } - return nil -} - -func (s *execRunningState) Resize(ws console.WinSize) error { - return s.p.resize(ws) -} - -func (s *execRunningState) Start(ctx context.Context) error { - return errors.Errorf("cannot start a running process") -} - -func (s *execRunningState) Delete(ctx context.Context) error { - return errors.Errorf("cannot delete a running process") -} - -func (s *execRunningState) Kill(ctx context.Context, sig uint32, all bool) error { - return s.p.kill(ctx, sig, all) -} - -func (s *execRunningState) SetExited(status int) { - s.p.setExited(status) - - if err := s.transition("stopped"); err != nil { - panic(err) - } -} - -type execStoppedState struct { - p *execProcess -} - -func (s *execStoppedState) transition(name string) error { - switch name { - case "deleted": - s.p.execState = &deletedState{} - default: - return errors.Errorf("invalid state transition %q to %q", stateName(s), name) - } - return nil -} - -func (s *execStoppedState) Resize(ws console.WinSize) error { - return errors.Errorf("cannot resize a stopped container") -} - -func (s *execStoppedState) Start(ctx context.Context) error { - return errors.Errorf("cannot start a stopped process") -} - -func (s *execStoppedState) Delete(ctx context.Context) error { - if err := s.p.delete(ctx); err != nil { - return err - } - return s.transition("deleted") -} - -func (s *execStoppedState) Kill(ctx context.Context, sig uint32, all bool) error { - return s.p.kill(ctx, sig, all) -} - -func (s *execStoppedState) SetExited(status int) { - // no op -} diff --git a/pkg/v1/proc/init.go b/pkg/v1/proc/init.go deleted file mode 100644 index 5dbb1daab..000000000 --- a/pkg/v1/proc/init.go +++ /dev/null @@ -1,464 +0,0 @@ -/* -Copyright The containerd Authors. -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package proc - -import ( - "context" - "encoding/json" - "io" - "path/filepath" - "strings" - "sync" - "syscall" - "time" - - "github.com/containerd/console" - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/log" - "github.com/containerd/containerd/mount" - "github.com/containerd/containerd/runtime/proc" - "github.com/containerd/fifo" - runc "github.com/containerd/go-runc" - specs "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" - - runsc "github.com/google/gvisor-containerd-shim/pkg/go-runsc" -) - -// InitPidFile name of the file that contains the init pid -const InitPidFile = "init.pid" - -// Init represents an initial process for a container -type Init struct { - wg sync.WaitGroup - initState initState - - // mu is used to ensure that `Start()` and `Exited()` calls return in - // the right order when invoked in separate go routines. - // This is the case within the shim implementation as it makes use of - // the reaper interface. - mu sync.Mutex - - waitBlock chan struct{} - - WorkDir string - - id string - Bundle string - console console.Console - Platform proc.Platform - io runc.IO - runtime *runsc.Runsc - status int - exited time.Time - pid int - closers []io.Closer - stdin io.Closer - stdio proc.Stdio - Rootfs string - IoUID int - IoGID int - Sandbox bool - UserLog string - Monitor ProcessMonitor -} - -// NewRunsc returns a new runsc instance for a process -func NewRunsc(root, path, namespace, runtime string, config map[string]string) *runsc.Runsc { - if root == "" { - root = RunscRoot - } - return &runsc.Runsc{ - Command: runtime, - PdeathSignal: syscall.SIGKILL, - Log: filepath.Join(path, "log.json"), - LogFormat: runc.JSON, - Root: filepath.Join(root, namespace), - Config: config, - } -} - -// New returns a new init process -func New(id string, runtime *runsc.Runsc, stdio proc.Stdio) *Init { - p := &Init{ - id: id, - runtime: runtime, - stdio: stdio, - status: 0, - waitBlock: make(chan struct{}), - } - p.initState = &createdState{p: p} - return p -} - -// Create the process with the provided config -func (p *Init) Create(ctx context.Context, r *CreateConfig) (err error) { - var socket *runc.Socket - if r.Terminal { - if socket, err = runc.NewTempConsoleSocket(); err != nil { - return errors.Wrap(err, "failed to create OCI runtime console socket") - } - defer socket.Close() - } else if hasNoIO(r) { - if p.io, err = runc.NewNullIO(); err != nil { - return errors.Wrap(err, "creating new NULL IO") - } - } else { - if p.io, err = runc.NewPipeIO(p.IoUID, p.IoGID, withConditionalIO(p.stdio)); err != nil { - return errors.Wrap(err, "failed to create OCI runtime io pipes") - } - } - pidFile := filepath.Join(p.Bundle, InitPidFile) - opts := &runsc.CreateOpts{ - PidFile: pidFile, - } - if socket != nil { - opts.ConsoleSocket = socket - } - if p.Sandbox { - opts.IO = p.io - // UserLog is only useful for sandbox. - opts.UserLog = p.UserLog - } - if err := p.runtime.Create(ctx, r.ID, r.Bundle, opts); err != nil { - return p.runtimeError(err, "OCI runtime create failed") - } - if r.Stdin != "" { - sc, err := fifo.OpenFifo(context.Background(), r.Stdin, syscall.O_WRONLY|syscall.O_NONBLOCK, 0) - if err != nil { - return errors.Wrapf(err, "failed to open stdin fifo %s", r.Stdin) - } - p.stdin = sc - p.closers = append(p.closers, sc) - } - var copyWaitGroup sync.WaitGroup - ctx, cancel := context.WithTimeout(ctx, 30*time.Second) - defer cancel() - if socket != nil { - console, err := socket.ReceiveMaster() - if err != nil { - return errors.Wrap(err, "failed to retrieve console master") - } - console, err = p.Platform.CopyConsole(ctx, console, r.Stdin, r.Stdout, r.Stderr, &p.wg, ©WaitGroup) - if err != nil { - return errors.Wrap(err, "failed to start console copy") - } - p.console = console - } else if !hasNoIO(r) { - if err := copyPipes(ctx, p.io, r.Stdin, r.Stdout, r.Stderr, &p.wg, ©WaitGroup); err != nil { - return errors.Wrap(err, "failed to start io pipe copy") - } - } - - copyWaitGroup.Wait() - pid, err := runc.ReadPidFile(pidFile) - if err != nil { - return errors.Wrap(err, "failed to retrieve OCI runtime container pid") - } - p.pid = pid - return nil -} - -// Wait for the process to exit -func (p *Init) Wait() { - <-p.waitBlock -} - -// ID of the process -func (p *Init) ID() string { - return p.id -} - -// Pid of the process -func (p *Init) Pid() int { - return p.pid -} - -// ExitStatus of the process -func (p *Init) ExitStatus() int { - p.mu.Lock() - defer p.mu.Unlock() - return p.status -} - -// ExitedAt at time when the process exited -func (p *Init) ExitedAt() time.Time { - p.mu.Lock() - defer p.mu.Unlock() - return p.exited -} - -// Status of the process -func (p *Init) Status(ctx context.Context) (string, error) { - p.mu.Lock() - defer p.mu.Unlock() - c, err := p.runtime.State(ctx, p.id) - if err != nil { - if strings.Contains(err.Error(), "does not exist") { - return "stopped", nil - } - return "", p.runtimeError(err, "OCI runtime state failed") - } - return p.convertStatus(c.Status), nil -} - -// Start the init process -func (p *Init) Start(ctx context.Context) error { - p.mu.Lock() - defer p.mu.Unlock() - - return p.initState.Start(ctx) -} - -func (p *Init) start(ctx context.Context) error { - var cio runc.IO - if !p.Sandbox { - cio = p.io - } - if err := p.runtime.Start(ctx, p.id, cio); err != nil { - return p.runtimeError(err, "OCI runtime start failed") - } - go func() { - status, err := p.runtime.Wait(context.Background(), p.id) - if err != nil { - log.G(ctx).WithError(err).Errorf("Failed to wait for container %q", p.id) - // TODO(random-liu): Handle runsc kill error. - if err := p.killAll(ctx); err != nil { - log.G(ctx).WithError(err).Errorf("Failed to kill container %q", p.id) - } - status = internalErrorCode - } - ExitCh <- Exit{ - Timestamp: time.Now(), - ID: p.id, - Status: status, - } - }() - return nil -} - -// SetExited of the init process with the next status -func (p *Init) SetExited(status int) { - p.mu.Lock() - defer p.mu.Unlock() - - p.initState.SetExited(status) -} - -func (p *Init) setExited(status int) { - p.exited = time.Now() - p.status = status - p.Platform.ShutdownConsole(context.Background(), p.console) - close(p.waitBlock) -} - -// Delete the init process -func (p *Init) Delete(ctx context.Context) error { - p.mu.Lock() - defer p.mu.Unlock() - - return p.initState.Delete(ctx) -} - -func (p *Init) delete(ctx context.Context) error { - p.killAll(ctx) - p.wg.Wait() - err := p.runtime.Delete(ctx, p.id, nil) - // ignore errors if a runtime has already deleted the process - // but we still hold metadata and pipes - // - // this is common during a checkpoint, runc will delete the container state - // after a checkpoint and the container will no longer exist within runc - if err != nil { - if strings.Contains(err.Error(), "does not exist") { - err = nil - } else { - err = p.runtimeError(err, "failed to delete task") - } - } - if p.io != nil { - for _, c := range p.closers { - c.Close() - } - p.io.Close() - } - if err2 := mount.UnmountAll(p.Rootfs, 0); err2 != nil { - log.G(ctx).WithError(err2).Warn("failed to cleanup rootfs mount") - if err == nil { - err = errors.Wrap(err2, "failed rootfs umount") - } - } - return err -} - -// Resize the init processes console -func (p *Init) Resize(ws console.WinSize) error { - p.mu.Lock() - defer p.mu.Unlock() - - if p.console == nil { - return nil - } - return p.console.Resize(ws) -} - -func (p *Init) resize(ws console.WinSize) error { - if p.console == nil { - return nil - } - return p.console.Resize(ws) -} - -// Kill the init process -func (p *Init) Kill(ctx context.Context, signal uint32, all bool) error { - p.mu.Lock() - defer p.mu.Unlock() - - return p.initState.Kill(ctx, signal, all) -} - -func (p *Init) kill(context context.Context, signal uint32, all bool) error { - var ( - killErr error - backoff = 100 * time.Millisecond - ) - timeout := 1 * time.Second - for start := time.Now(); time.Now().Sub(start) < timeout; { - c, err := p.runtime.State(context, p.id) - if err != nil { - if strings.Contains(err.Error(), "does not exist") { - return errors.Wrapf(errdefs.ErrNotFound, "no such process") - } - return p.runtimeError(err, "OCI runtime state failed") - } - // For runsc, signal only works when container is running state. - // If the container is not in running state, directly return - // "no such process" - if p.convertStatus(c.Status) == "stopped" { - return errors.Wrapf(errdefs.ErrNotFound, "no such process") - } - killErr = p.runtime.Kill(context, p.id, int(signal), &runsc.KillOpts{ - All: all, - }) - if killErr == nil { - return nil - } - time.Sleep(backoff) - backoff *= 2 - } - return p.runtimeError(killErr, "kill timeout") -} - -// KillAll processes belonging to the init process -func (p *Init) KillAll(context context.Context) error { - p.mu.Lock() - defer p.mu.Unlock() - return p.killAll(context) -} - -func (p *Init) killAll(context context.Context) error { - p.runtime.Kill(context, p.id, int(syscall.SIGKILL), &runsc.KillOpts{ - All: true, - }) - // Ignore error handling for `runsc kill --all` for now. - // * If it doesn't return error, it is good; - // * If it returns error, consider the container has already stopped. - // TODO: Fix `runsc kill --all` error handling. - return nil -} - -// Stdin of the process -func (p *Init) Stdin() io.Closer { - return p.stdin -} - -// Runtime returns the OCI runtime configured for the init process -func (p *Init) Runtime() *runsc.Runsc { - return p.runtime -} - -// Exec returns a new child process -func (p *Init) Exec(ctx context.Context, path string, r *ExecConfig) (proc.Process, error) { - p.mu.Lock() - defer p.mu.Unlock() - - return p.initState.Exec(ctx, path, r) -} - -// exec returns a new exec'd process -func (p *Init) exec(ctx context.Context, path string, r *ExecConfig) (proc.Process, error) { - // process exec request - var spec specs.Process - if err := json.Unmarshal(r.Spec.Value, &spec); err != nil { - return nil, err - } - spec.Terminal = r.Terminal - - e := &execProcess{ - id: r.ID, - path: path, - parent: p, - spec: spec, - stdio: proc.Stdio{ - Stdin: r.Stdin, - Stdout: r.Stdout, - Stderr: r.Stderr, - Terminal: r.Terminal, - }, - waitBlock: make(chan struct{}), - } - e.execState = &execCreatedState{p: e} - return e, nil -} - -// Stdio of the process -func (p *Init) Stdio() proc.Stdio { - return p.stdio -} - -func (p *Init) runtimeError(rErr error, msg string) error { - if rErr == nil { - return nil - } - - rMsg, err := getLastRuntimeError(p.runtime) - switch { - case err != nil: - return errors.Wrapf(rErr, "%s: %s (%s)", msg, "unable to retrieve OCI runtime error", err.Error()) - case rMsg == "": - return errors.Wrap(rErr, msg) - default: - return errors.Errorf("%s: %s", msg, rMsg) - } -} - -func (p *Init) convertStatus(status string) string { - if status == "created" && !p.Sandbox && p.status == internalErrorCode { - // Treat start failure state for non-root container as stopped. - return "stopped" - } - return status -} - -func withConditionalIO(c proc.Stdio) runc.IOOpt { - return func(o *runc.IOOption) { - o.OpenStdin = c.Stdin != "" - o.OpenStdout = c.Stdout != "" - o.OpenStderr = c.Stderr != "" - } -} diff --git a/pkg/v1/proc/init_state.go b/pkg/v1/proc/init_state.go deleted file mode 100644 index f56f6fe28..000000000 --- a/pkg/v1/proc/init_state.go +++ /dev/null @@ -1,184 +0,0 @@ -/* -Copyright The containerd Authors. -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package proc - -import ( - "context" - - "github.com/containerd/console" - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/runtime/proc" - "github.com/pkg/errors" -) - -type initState interface { - Resize(console.WinSize) error - Start(context.Context) error - Delete(context.Context) error - Exec(context.Context, string, *ExecConfig) (proc.Process, error) - Kill(context.Context, uint32, bool) error - SetExited(int) -} - -type createdState struct { - p *Init -} - -func (s *createdState) transition(name string) error { - switch name { - case "running": - s.p.initState = &runningState{p: s.p} - case "stopped": - s.p.initState = &stoppedState{p: s.p} - case "deleted": - s.p.initState = &deletedState{} - default: - return errors.Errorf("invalid state transition %q to %q", stateName(s), name) - } - return nil -} - -func (s *createdState) Resize(ws console.WinSize) error { - return s.p.resize(ws) -} - -func (s *createdState) Start(ctx context.Context) error { - if err := s.p.start(ctx); err != nil { - // Containerd doesn't allow deleting container in created state. - // However, for gvisor, a non-root container in created state can - // only go to running state. If the container can't be started, - // it can only stay in created state, and never be deleted. - // To work around that, we treat non-root container in start failure - // state as stopped. - if !s.p.Sandbox { - s.p.io.Close() - s.p.setExited(internalErrorCode) - if err := s.transition("stopped"); err != nil { - panic(err) - } - } - return err - } - return s.transition("running") -} - -func (s *createdState) Delete(ctx context.Context) error { - if err := s.p.delete(ctx); err != nil { - return err - } - return s.transition("deleted") -} - -func (s *createdState) Kill(ctx context.Context, sig uint32, all bool) error { - return s.p.kill(ctx, sig, all) -} - -func (s *createdState) SetExited(status int) { - s.p.setExited(status) - - if err := s.transition("stopped"); err != nil { - panic(err) - } -} - -func (s *createdState) Exec(ctx context.Context, path string, r *ExecConfig) (proc.Process, error) { - return s.p.exec(ctx, path, r) -} - -type runningState struct { - p *Init -} - -func (s *runningState) transition(name string) error { - switch name { - case "stopped": - s.p.initState = &stoppedState{p: s.p} - default: - return errors.Errorf("invalid state transition %q to %q", stateName(s), name) - } - return nil -} - -func (s *runningState) Resize(ws console.WinSize) error { - return s.p.resize(ws) -} - -func (s *runningState) Start(ctx context.Context) error { - return errors.Errorf("cannot start a running process") -} - -func (s *runningState) Delete(ctx context.Context) error { - return errors.Errorf("cannot delete a running process") -} - -func (s *runningState) Kill(ctx context.Context, sig uint32, all bool) error { - return s.p.kill(ctx, sig, all) -} - -func (s *runningState) SetExited(status int) { - s.p.setExited(status) - - if err := s.transition("stopped"); err != nil { - panic(err) - } -} - -func (s *runningState) Exec(ctx context.Context, path string, r *ExecConfig) (proc.Process, error) { - return s.p.exec(ctx, path, r) -} - -type stoppedState struct { - p *Init -} - -func (s *stoppedState) transition(name string) error { - switch name { - case "deleted": - s.p.initState = &deletedState{} - default: - return errors.Errorf("invalid state transition %q to %q", stateName(s), name) - } - return nil -} - -func (s *stoppedState) Resize(ws console.WinSize) error { - return errors.Errorf("cannot resize a stopped container") -} - -func (s *stoppedState) Start(ctx context.Context) error { - return errors.Errorf("cannot start a stopped process") -} - -func (s *stoppedState) Delete(ctx context.Context) error { - if err := s.p.delete(ctx); err != nil { - return err - } - return s.transition("deleted") -} - -func (s *stoppedState) Kill(ctx context.Context, sig uint32, all bool) error { - return errdefs.ToGRPCf(errdefs.ErrNotFound, "process %s not found", s.p.id) -} - -func (s *stoppedState) SetExited(status int) { - // no op -} - -func (s *stoppedState) Exec(ctx context.Context, path string, r *ExecConfig) (proc.Process, error) { - return nil, errors.Errorf("cannot exec in a stopped state") -} diff --git a/pkg/v1/proc/io.go b/pkg/v1/proc/io.go deleted file mode 100644 index 4afa94cf2..000000000 --- a/pkg/v1/proc/io.go +++ /dev/null @@ -1,169 +0,0 @@ -/* -Copyright The containerd Authors. -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package proc - -import ( - "context" - "fmt" - "io" - "os" - "sync" - "sync/atomic" - "syscall" - - "github.com/containerd/containerd/log" - "github.com/containerd/fifo" - runc "github.com/containerd/go-runc" -) - -// TODO(random-liu): This file can be a util. - -var bufPool = sync.Pool{ - New: func() interface{} { - buffer := make([]byte, 32<<10) - return &buffer - }, -} - -func copyPipes(ctx context.Context, rio runc.IO, stdin, stdout, stderr string, wg, cwg *sync.WaitGroup) error { - var sameFile *countingWriteCloser - for _, i := range []struct { - name string - dest func(wc io.WriteCloser, rc io.Closer) - }{ - { - name: stdout, - dest: func(wc io.WriteCloser, rc io.Closer) { - wg.Add(1) - cwg.Add(1) - go func() { - cwg.Done() - p := bufPool.Get().(*[]byte) - defer bufPool.Put(p) - if _, err := io.CopyBuffer(wc, rio.Stdout(), *p); err != nil { - log.G(ctx).Warn("error copying stdout") - } - wg.Done() - wc.Close() - if rc != nil { - rc.Close() - } - }() - }, - }, { - name: stderr, - dest: func(wc io.WriteCloser, rc io.Closer) { - wg.Add(1) - cwg.Add(1) - go func() { - cwg.Done() - p := bufPool.Get().(*[]byte) - defer bufPool.Put(p) - if _, err := io.CopyBuffer(wc, rio.Stderr(), *p); err != nil { - log.G(ctx).Warn("error copying stderr") - } - wg.Done() - wc.Close() - if rc != nil { - rc.Close() - } - }() - }, - }, - } { - ok, err := isFifo(i.name) - if err != nil { - return err - } - var ( - fw io.WriteCloser - fr io.Closer - ) - if ok { - if fw, err = fifo.OpenFifo(ctx, i.name, syscall.O_WRONLY, 0); err != nil { - return fmt.Errorf("gvisor-containerd-shim: opening %s failed: %s", i.name, err) - } - if fr, err = fifo.OpenFifo(ctx, i.name, syscall.O_RDONLY, 0); err != nil { - return fmt.Errorf("gvisor-containerd-shim: opening %s failed: %s", i.name, err) - } - } else { - if sameFile != nil { - sameFile.count++ - i.dest(sameFile, nil) - continue - } - if fw, err = os.OpenFile(i.name, syscall.O_WRONLY|syscall.O_APPEND, 0); err != nil { - return fmt.Errorf("gvisor-containerd-shim: opening %s failed: %s", i.name, err) - } - if stdout == stderr { - sameFile = &countingWriteCloser{ - WriteCloser: fw, - count: 1, - } - } - } - i.dest(fw, fr) - } - if stdin == "" { - return nil - } - f, err := fifo.OpenFifo(context.Background(), stdin, syscall.O_RDONLY|syscall.O_NONBLOCK, 0) - if err != nil { - return fmt.Errorf("gvisor-containerd-shim: opening %s failed: %s", stdin, err) - } - cwg.Add(1) - go func() { - cwg.Done() - p := bufPool.Get().(*[]byte) - defer bufPool.Put(p) - - io.CopyBuffer(rio.Stdin(), f, *p) - rio.Stdin().Close() - f.Close() - }() - return nil -} - -// countingWriteCloser masks io.Closer() until close has been invoked a certain number of times. -type countingWriteCloser struct { - io.WriteCloser - count int64 -} - -func (c *countingWriteCloser) Close() error { - if atomic.AddInt64(&c.count, -1) > 0 { - return nil - } - return c.WriteCloser.Close() -} - -// isFifo checks if a file is a fifo -// if the file does not exist then it returns false -func isFifo(path string) (bool, error) { - stat, err := os.Stat(path) - if err != nil { - if os.IsNotExist(err) { - return false, nil - } - return false, err - } - if stat.Mode()&os.ModeNamedPipe == os.ModeNamedPipe { - return true, nil - } - return false, nil -} diff --git a/pkg/v1/proc/process.go b/pkg/v1/proc/process.go deleted file mode 100644 index 7dbcd823d..000000000 --- a/pkg/v1/proc/process.go +++ /dev/null @@ -1,39 +0,0 @@ -/* -Copyright The containerd Authors. -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package proc - -import ( - "github.com/pkg/errors" -) - -// RunscRoot is the path to the root runsc state directory -const RunscRoot = "/run/containerd/runsc" - -func stateName(v interface{}) string { - switch v.(type) { - case *runningState, *execRunningState: - return "running" - case *createdState, *execCreatedState: - return "created" - case *deletedState: - return "deleted" - case *stoppedState: - return "stopped" - } - panic(errors.Errorf("invalid state %v", v)) -} diff --git a/pkg/v1/proc/types.go b/pkg/v1/proc/types.go deleted file mode 100644 index 1d7c8ade3..000000000 --- a/pkg/v1/proc/types.go +++ /dev/null @@ -1,72 +0,0 @@ -/* -Copyright The containerd Authors. -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package proc - -import ( - "time" - - google_protobuf "github.com/gogo/protobuf/types" - - runc "github.com/containerd/go-runc" -) - -// Mount holds filesystem mount configuration -type Mount struct { - Type string - Source string - Target string - Options []string -} - -// CreateConfig hold task creation configuration -type CreateConfig struct { - ID string - Bundle string - Runtime string - Rootfs []Mount - Terminal bool - Stdin string - Stdout string - Stderr string - Options *google_protobuf.Any -} - -// ExecConfig holds exec creation configuration -type ExecConfig struct { - ID string - Terminal bool - Stdin string - Stdout string - Stderr string - Spec *google_protobuf.Any -} - -// Exit is the type of exit events -type Exit struct { - Timestamp time.Time - ID string - Status int -} - -// ProcessMonitor monitors process exit changes -type ProcessMonitor interface { - // Subscribe to process exit changes - Subscribe() chan runc.Exit - // Unsubscribe to process exit changes - Unsubscribe(c chan runc.Exit) -} diff --git a/pkg/v1/proc/utils.go b/pkg/v1/proc/utils.go deleted file mode 100644 index e770a6810..000000000 --- a/pkg/v1/proc/utils.go +++ /dev/null @@ -1,94 +0,0 @@ -/* -Copyright The containerd Authors. -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package proc - -import ( - "encoding/json" - "io" - "os" - "strings" - "time" - - runsc "github.com/google/gvisor-containerd-shim/pkg/go-runsc" -) - -const ( - internalErrorCode = 128 - bufferSize = 32 -) - -// ExitCh is the exit events channel for containers and exec processes -// inside the sandbox. -var ExitCh = make(chan Exit, bufferSize) - -// TODO(random-liu): This can be a utility. - -// TODO(mlaventure): move to runc package? -func getLastRuntimeError(r *runsc.Runsc) (string, error) { - if r.Log == "" { - return "", nil - } - - f, err := os.OpenFile(r.Log, os.O_RDONLY, 0400) - if err != nil { - return "", err - } - - var ( - errMsg string - log struct { - Level string - Msg string - Time time.Time - } - ) - - dec := json.NewDecoder(f) - for err = nil; err == nil; { - if err = dec.Decode(&log); err != nil && err != io.EOF { - return "", err - } - if log.Level == "error" { - errMsg = strings.TrimSpace(log.Msg) - } - } - - return errMsg, nil -} - -func copyFile(to, from string) error { - ff, err := os.Open(from) - if err != nil { - return err - } - defer ff.Close() - tt, err := os.Create(to) - if err != nil { - return err - } - defer tt.Close() - - p := bufPool.Get().(*[]byte) - defer bufPool.Put(p) - _, err = io.CopyBuffer(tt, ff, *p) - return err -} - -func hasNoIO(r *CreateConfig) bool { - return r.Stdin == "" && r.Stdout == "" && r.Stderr == "" -} diff --git a/pkg/v1/shim/platform.go b/pkg/v1/shim/platform.go deleted file mode 100644 index 10c54958f..000000000 --- a/pkg/v1/shim/platform.go +++ /dev/null @@ -1,112 +0,0 @@ -/* -Copyright The containerd Authors. -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package shim - -import ( - "context" - "io" - "sync" - "syscall" - - "github.com/containerd/console" - "github.com/containerd/fifo" - "github.com/pkg/errors" -) - -type linuxPlatform struct { - epoller *console.Epoller -} - -func (p *linuxPlatform) CopyConsole(ctx context.Context, console console.Console, stdin, stdout, stderr string, wg, cwg *sync.WaitGroup) (console.Console, error) { - if p.epoller == nil { - return nil, errors.New("uninitialized epoller") - } - - epollConsole, err := p.epoller.Add(console) - if err != nil { - return nil, err - } - - if stdin != "" { - in, err := fifo.OpenFifo(ctx, stdin, syscall.O_RDONLY, 0) - if err != nil { - return nil, err - } - cwg.Add(1) - go func() { - cwg.Done() - p := bufPool.Get().(*[]byte) - defer bufPool.Put(p) - io.CopyBuffer(epollConsole, in, *p) - }() - } - - outw, err := fifo.OpenFifo(ctx, stdout, syscall.O_WRONLY, 0) - if err != nil { - return nil, err - } - outr, err := fifo.OpenFifo(ctx, stdout, syscall.O_RDONLY, 0) - if err != nil { - return nil, err - } - wg.Add(1) - cwg.Add(1) - go func() { - cwg.Done() - p := bufPool.Get().(*[]byte) - defer bufPool.Put(p) - io.CopyBuffer(outw, epollConsole, *p) - epollConsole.Close() - outr.Close() - outw.Close() - wg.Done() - }() - return epollConsole, nil -} - -func (p *linuxPlatform) ShutdownConsole(ctx context.Context, cons console.Console) error { - if p.epoller == nil { - return errors.New("uninitialized epoller") - } - epollConsole, ok := cons.(*console.EpollConsole) - if !ok { - return errors.Errorf("expected EpollConsole, got %#v", cons) - } - return epollConsole.Shutdown(p.epoller.CloseConsole) -} - -func (p *linuxPlatform) Close() error { - return p.epoller.Close() -} - -// initialize a single epoll fd to manage our consoles. `initPlatform` should -// only be called once. -func (s *Service) initPlatform() error { - if s.platform != nil { - return nil - } - epoller, err := console.NewEpoller() - if err != nil { - return errors.Wrap(err, "failed to initialize epoller") - } - s.platform = &linuxPlatform{ - epoller: epoller, - } - go epoller.Wait() - return nil -} diff --git a/pkg/v1/shim/service.go b/pkg/v1/shim/service.go deleted file mode 100644 index e06a5562c..000000000 --- a/pkg/v1/shim/service.go +++ /dev/null @@ -1,581 +0,0 @@ -/* -Copyright The containerd Authors. -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package shim - -import ( - "context" - "fmt" - "os" - "path/filepath" - "sync" - - "github.com/containerd/console" - eventstypes "github.com/containerd/containerd/api/events" - "github.com/containerd/containerd/api/types/task" - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/events" - "github.com/containerd/containerd/log" - "github.com/containerd/containerd/mount" - "github.com/containerd/containerd/namespaces" - "github.com/containerd/containerd/runtime" - "github.com/containerd/containerd/runtime/linux/runctypes" - rproc "github.com/containerd/containerd/runtime/proc" - "github.com/containerd/containerd/runtime/v1/shim" - shimapi "github.com/containerd/containerd/runtime/v1/shim/v1" - "github.com/containerd/typeurl" - ptypes "github.com/gogo/protobuf/types" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - - runsc "github.com/google/gvisor-containerd-shim/pkg/go-runsc" - "github.com/google/gvisor-containerd-shim/pkg/v1/proc" - "github.com/google/gvisor-containerd-shim/pkg/v1/utils" -) - -var ( - empty = &ptypes.Empty{} - bufPool = sync.Pool{ - New: func() interface{} { - buffer := make([]byte, 32<<10) - return &buffer - }, - } -) - -// Config contains shim specific configuration -type Config struct { - Path string - Namespace string - WorkDir string - RuntimeRoot string - RunscConfig map[string]string -} - -// NewService returns a new shim service that can be used via GRPC -func NewService(config Config, publisher events.Publisher) (*Service, error) { - if config.Namespace == "" { - return nil, fmt.Errorf("shim namespace cannot be empty") - } - ctx := namespaces.WithNamespace(context.Background(), config.Namespace) - ctx = log.WithLogger(ctx, logrus.WithFields(logrus.Fields{ - "namespace": config.Namespace, - "path": config.Path, - "pid": os.Getpid(), - })) - s := &Service{ - config: config, - context: ctx, - processes: make(map[string]rproc.Process), - events: make(chan interface{}, 128), - ec: proc.ExitCh, - } - go s.processExits() - if err := s.initPlatform(); err != nil { - return nil, errors.Wrap(err, "failed to initialized platform behavior") - } - go s.forward(publisher) - return s, nil -} - -// Service is the shim implementation of a remote shim over GRPC -type Service struct { - mu sync.Mutex - - config Config - context context.Context - processes map[string]rproc.Process - events chan interface{} - platform rproc.Platform - ec chan proc.Exit - - // Filled by Create() - id string - bundle string -} - -// Create a new initial process and container with the underlying OCI runtime -func (s *Service) Create(ctx context.Context, r *shimapi.CreateTaskRequest) (_ *shimapi.CreateTaskResponse, err error) { - s.mu.Lock() - defer s.mu.Unlock() - - var mounts []proc.Mount - for _, m := range r.Rootfs { - mounts = append(mounts, proc.Mount{ - Type: m.Type, - Source: m.Source, - Target: m.Target, - Options: m.Options, - }) - } - - rootfs := filepath.Join(r.Bundle, "rootfs") - if err := os.Mkdir(rootfs, 0711); err != nil && !os.IsExist(err) { - return nil, err - } - - config := &proc.CreateConfig{ - ID: r.ID, - Bundle: r.Bundle, - Runtime: r.Runtime, - Rootfs: mounts, - Terminal: r.Terminal, - Stdin: r.Stdin, - Stdout: r.Stdout, - Stderr: r.Stderr, - Options: r.Options, - } - defer func() { - if err != nil { - if err2 := mount.UnmountAll(rootfs, 0); err2 != nil { - log.G(ctx).WithError(err2).Warn("Failed to cleanup rootfs mount") - } - } - }() - for _, rm := range mounts { - m := &mount.Mount{ - Type: rm.Type, - Source: rm.Source, - Options: rm.Options, - } - if err := m.Mount(rootfs); err != nil { - return nil, errors.Wrapf(err, "failed to mount rootfs component %v", m) - } - } - process, err := newInit( - ctx, - s.config.Path, - s.config.WorkDir, - s.config.RuntimeRoot, - s.config.Namespace, - s.config.RunscConfig, - s.platform, - config, - ) - if err := process.Create(ctx, config); err != nil { - return nil, errdefs.ToGRPC(err) - } - // save the main task id and bundle to the shim for additional requests - s.id = r.ID - s.bundle = r.Bundle - pid := process.Pid() - s.processes[r.ID] = process - return &shimapi.CreateTaskResponse{ - Pid: uint32(pid), - }, nil -} - -// Start a process -func (s *Service) Start(ctx context.Context, r *shimapi.StartRequest) (*shimapi.StartResponse, error) { - p, err := s.getExecProcess(r.ID) - if err != nil { - return nil, err - } - if err := p.Start(ctx); err != nil { - return nil, err - } - return &shimapi.StartResponse{ - ID: p.ID(), - Pid: uint32(p.Pid()), - }, nil -} - -// Delete the initial process and container -func (s *Service) Delete(ctx context.Context, r *ptypes.Empty) (*shimapi.DeleteResponse, error) { - p, err := s.getInitProcess() - if err != nil { - return nil, err - } - if err := p.Delete(ctx); err != nil { - return nil, err - } - s.mu.Lock() - delete(s.processes, s.id) - s.mu.Unlock() - s.platform.Close() - return &shimapi.DeleteResponse{ - ExitStatus: uint32(p.ExitStatus()), - ExitedAt: p.ExitedAt(), - Pid: uint32(p.Pid()), - }, nil -} - -// DeleteProcess deletes an exec'd process -func (s *Service) DeleteProcess(ctx context.Context, r *shimapi.DeleteProcessRequest) (*shimapi.DeleteResponse, error) { - if r.ID == s.id { - return nil, status.Errorf(codes.InvalidArgument, "cannot delete init process with DeleteProcess") - } - p, err := s.getExecProcess(r.ID) - if err != nil { - return nil, err - } - if err := p.Delete(ctx); err != nil { - return nil, err - } - s.mu.Lock() - delete(s.processes, r.ID) - s.mu.Unlock() - return &shimapi.DeleteResponse{ - ExitStatus: uint32(p.ExitStatus()), - ExitedAt: p.ExitedAt(), - Pid: uint32(p.Pid()), - }, nil -} - -// Exec an additional process inside the container -func (s *Service) Exec(ctx context.Context, r *shimapi.ExecProcessRequest) (*ptypes.Empty, error) { - s.mu.Lock() - - if p := s.processes[r.ID]; p != nil { - s.mu.Unlock() - return nil, errdefs.ToGRPCf(errdefs.ErrAlreadyExists, "id %s", r.ID) - } - - p := s.processes[s.id] - s.mu.Unlock() - if p == nil { - return nil, errdefs.ToGRPCf(errdefs.ErrFailedPrecondition, "container must be created") - } - - process, err := p.(*proc.Init).Exec(ctx, s.config.Path, &proc.ExecConfig{ - ID: r.ID, - Terminal: r.Terminal, - Stdin: r.Stdin, - Stdout: r.Stdout, - Stderr: r.Stderr, - Spec: r.Spec, - }) - if err != nil { - return nil, errdefs.ToGRPC(err) - } - s.mu.Lock() - s.processes[r.ID] = process - s.mu.Unlock() - return empty, nil -} - -// ResizePty of a process -func (s *Service) ResizePty(ctx context.Context, r *shimapi.ResizePtyRequest) (*ptypes.Empty, error) { - if r.ID == "" { - return nil, errdefs.ToGRPCf(errdefs.ErrInvalidArgument, "id not provided") - } - ws := console.WinSize{ - Width: uint16(r.Width), - Height: uint16(r.Height), - } - p, err := s.getExecProcess(r.ID) - if err != nil { - return nil, err - } - if err := p.Resize(ws); err != nil { - return nil, errdefs.ToGRPC(err) - } - return empty, nil -} - -// State returns runtime state information for a process -func (s *Service) State(ctx context.Context, r *shimapi.StateRequest) (*shimapi.StateResponse, error) { - p, err := s.getExecProcess(r.ID) - if err != nil { - return nil, err - } - st, err := p.Status(ctx) - if err != nil { - return nil, err - } - status := task.StatusUnknown - switch st { - case "created": - status = task.StatusCreated - case "running": - status = task.StatusRunning - case "stopped": - status = task.StatusStopped - } - sio := p.Stdio() - return &shimapi.StateResponse{ - ID: p.ID(), - Bundle: s.bundle, - Pid: uint32(p.Pid()), - Status: status, - Stdin: sio.Stdin, - Stdout: sio.Stdout, - Stderr: sio.Stderr, - Terminal: sio.Terminal, - ExitStatus: uint32(p.ExitStatus()), - ExitedAt: p.ExitedAt(), - }, nil -} - -// Pause the container -func (s *Service) Pause(ctx context.Context, r *ptypes.Empty) (*ptypes.Empty, error) { - return empty, errdefs.ToGRPC(errdefs.ErrNotImplemented) -} - -// Resume the container -func (s *Service) Resume(ctx context.Context, r *ptypes.Empty) (*ptypes.Empty, error) { - return empty, errdefs.ToGRPC(errdefs.ErrNotImplemented) -} - -// Kill a process with the provided signal -func (s *Service) Kill(ctx context.Context, r *shimapi.KillRequest) (*ptypes.Empty, error) { - if r.ID == "" { - p, err := s.getInitProcess() - if err != nil { - return nil, err - } - if err := p.Kill(ctx, r.Signal, r.All); err != nil { - return nil, errdefs.ToGRPC(err) - } - return empty, nil - } - - p, err := s.getExecProcess(r.ID) - if err != nil { - return nil, err - } - if err := p.Kill(ctx, r.Signal, r.All); err != nil { - return nil, errdefs.ToGRPC(err) - } - return empty, nil -} - -// ListPids returns all pids inside the container -func (s *Service) ListPids(ctx context.Context, r *shimapi.ListPidsRequest) (*shimapi.ListPidsResponse, error) { - pids, err := s.getContainerPids(ctx, r.ID) - if err != nil { - return nil, errdefs.ToGRPC(err) - } - var processes []*task.ProcessInfo - for _, pid := range pids { - pInfo := task.ProcessInfo{ - Pid: pid, - } - for _, p := range s.processes { - if p.Pid() == int(pid) { - d := &runctypes.ProcessDetails{ - ExecID: p.ID(), - } - a, err := typeurl.MarshalAny(d) - if err != nil { - return nil, errors.Wrapf(err, "failed to marshal process %d info", pid) - } - pInfo.Info = a - break - } - } - processes = append(processes, &pInfo) - } - return &shimapi.ListPidsResponse{ - Processes: processes, - }, nil -} - -// CloseIO of a process -func (s *Service) CloseIO(ctx context.Context, r *shimapi.CloseIORequest) (*ptypes.Empty, error) { - p, err := s.getExecProcess(r.ID) - if err != nil { - return nil, err - } - if stdin := p.Stdin(); stdin != nil { - if err := stdin.Close(); err != nil { - return nil, errors.Wrap(err, "close stdin") - } - } - return empty, nil -} - -// Checkpoint the container -func (s *Service) Checkpoint(ctx context.Context, r *shimapi.CheckpointTaskRequest) (*ptypes.Empty, error) { - return empty, errdefs.ToGRPC(errdefs.ErrNotImplemented) -} - -// ShimInfo returns shim information such as the shim's pid -func (s *Service) ShimInfo(ctx context.Context, r *ptypes.Empty) (*shimapi.ShimInfoResponse, error) { - return &shimapi.ShimInfoResponse{ - ShimPid: uint32(os.Getpid()), - }, nil -} - -// Update a running container -func (s *Service) Update(ctx context.Context, r *shimapi.UpdateTaskRequest) (*ptypes.Empty, error) { - return empty, errdefs.ToGRPC(errdefs.ErrNotImplemented) -} - -// Wait for a process to exit -func (s *Service) Wait(ctx context.Context, r *shimapi.WaitRequest) (*shimapi.WaitResponse, error) { - p, err := s.getExecProcess(r.ID) - if err != nil { - return nil, err - } - p.Wait() - - return &shimapi.WaitResponse{ - ExitStatus: uint32(p.ExitStatus()), - ExitedAt: p.ExitedAt(), - }, nil -} - -func (s *Service) processExits() { - for e := range s.ec { - s.checkProcesses(e) - } -} - -func (s *Service) allProcesses() []rproc.Process { - s.mu.Lock() - defer s.mu.Unlock() - - res := make([]rproc.Process, 0, len(s.processes)) - for _, p := range s.processes { - res = append(res, p) - } - return res -} - -func (s *Service) checkProcesses(e proc.Exit) { - for _, p := range s.allProcesses() { - if p.ID() == e.ID { - if ip, ok := p.(*proc.Init); ok { - // Ensure all children are killed - if err := ip.KillAll(s.context); err != nil { - log.G(s.context).WithError(err).WithField("id", ip.ID()). - Error("failed to kill init's children") - } - } - p.SetExited(e.Status) - s.events <- &eventstypes.TaskExit{ - ContainerID: s.id, - ID: p.ID(), - Pid: uint32(p.Pid()), - ExitStatus: uint32(e.Status), - ExitedAt: p.ExitedAt(), - } - return - } - } -} - -func (s *Service) getContainerPids(ctx context.Context, id string) ([]uint32, error) { - p, err := s.getInitProcess() - if err != nil { - return nil, err - } - - ps, err := p.(*proc.Init).Runtime().Ps(ctx, id) - if err != nil { - return nil, err - } - pids := make([]uint32, 0, len(ps)) - for _, pid := range ps { - pids = append(pids, uint32(pid)) - } - return pids, nil -} - -func (s *Service) forward(publisher events.Publisher) { - for e := range s.events { - if err := publisher.Publish(s.context, getTopic(s.context, e), e); err != nil { - log.G(s.context).WithError(err).Error("post event") - } - } -} - -// getInitProcess returns initial process -func (s *Service) getInitProcess() (rproc.Process, error) { - s.mu.Lock() - defer s.mu.Unlock() - p := s.processes[s.id] - if p == nil { - return nil, errdefs.ToGRPCf(errdefs.ErrFailedPrecondition, "container must be created") - } - return p, nil -} - -// getExecProcess returns exec process -func (s *Service) getExecProcess(id string) (rproc.Process, error) { - s.mu.Lock() - defer s.mu.Unlock() - p := s.processes[id] - if p == nil { - return nil, errdefs.ToGRPCf(errdefs.ErrNotFound, "process %s does not exist", id) - } - return p, nil -} - -func getTopic(ctx context.Context, e interface{}) string { - switch e.(type) { - case *eventstypes.TaskCreate: - return runtime.TaskCreateEventTopic - case *eventstypes.TaskStart: - return runtime.TaskStartEventTopic - case *eventstypes.TaskOOM: - return runtime.TaskOOMEventTopic - case *eventstypes.TaskExit: - return runtime.TaskExitEventTopic - case *eventstypes.TaskDelete: - return runtime.TaskDeleteEventTopic - case *eventstypes.TaskExecAdded: - return runtime.TaskExecAddedEventTopic - case *eventstypes.TaskExecStarted: - return runtime.TaskExecStartedEventTopic - default: - logrus.Warnf("no topic for type %#v", e) - } - return runtime.TaskUnknownTopic -} - -func newInit(ctx context.Context, path, workDir, runtimeRoot, namespace string, config map[string]string, platform rproc.Platform, r *proc.CreateConfig) (*proc.Init, error) { - var options runctypes.CreateOptions - if r.Options != nil { - v, err := typeurl.UnmarshalAny(r.Options) - if err != nil { - return nil, err - } - options = *v.(*runctypes.CreateOptions) - } - - spec, err := utils.ReadSpec(r.Bundle) - if err != nil { - return nil, errors.Wrap(err, "read oci spec") - } - if err := utils.UpdateVolumeAnnotations(r.Bundle, spec); err != nil { - return nil, errors.Wrap(err, "update volume annotations") - } - - runsc.FormatLogPath(r.ID, config) - rootfs := filepath.Join(path, "rootfs") - runtime := proc.NewRunsc(runtimeRoot, path, namespace, r.Runtime, config) - p := proc.New(r.ID, runtime, rproc.Stdio{ - Stdin: r.Stdin, - Stdout: r.Stdout, - Stderr: r.Stderr, - Terminal: r.Terminal, - }) - p.Bundle = r.Bundle - p.Platform = platform - p.Rootfs = rootfs - p.WorkDir = workDir - p.IoUID = int(options.IoUid) - p.IoGID = int(options.IoGid) - p.Sandbox = utils.IsSandbox(spec) - p.UserLog = utils.UserLogPath(spec) - p.Monitor = shim.Default - return p, nil -} diff --git a/pkg/v1/utils/utils.go b/pkg/v1/utils/utils.go deleted file mode 100644 index 83840c047..000000000 --- a/pkg/v1/utils/utils.go +++ /dev/null @@ -1,59 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package utils - -import ( - "encoding/json" - "io/ioutil" - "os" - "path/filepath" - - "github.com/containerd/cri/pkg/annotations" - specs "github.com/opencontainers/runtime-spec/specs-go" -) - -// ReadSpec reads OCI spec from the bundle directory. -func ReadSpec(bundle string) (*specs.Spec, error) { - f, err := os.Open(filepath.Join(bundle, "config.json")) - if err != nil { - return nil, err - } - b, err := ioutil.ReadAll(f) - if err != nil { - return nil, err - } - var spec specs.Spec - if err := json.Unmarshal(b, &spec); err != nil { - return nil, err - } - return &spec, nil -} - -// IsSandbox checks whether a container is a sandbox container. -func IsSandbox(spec *specs.Spec) bool { - t, ok := spec.Annotations[annotations.ContainerType] - return !ok || t == annotations.ContainerTypeSandbox -} - -// UserLogPath gets user log path from OCI annotation. -func UserLogPath(spec *specs.Spec) string { - sandboxLogDir := spec.Annotations[annotations.SandboxLogDir] - if sandboxLogDir == "" { - return "" - } - return filepath.Join(sandboxLogDir, "gvisor.log") -} diff --git a/pkg/v1/utils/volumes.go b/pkg/v1/utils/volumes.go deleted file mode 100644 index 61adeaa54..000000000 --- a/pkg/v1/utils/volumes.go +++ /dev/null @@ -1,158 +0,0 @@ -/* -Copyright 2019 Google LLC. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package utils - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "path/filepath" - "strings" - - "github.com/containerd/cri/pkg/annotations" - specs "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -const volumeKeyPrefix = "dev.gvisor.spec.mount." - -var kubeletPodsDir = "/var/lib/kubelet/pods" - -// volumeName gets volume name from volume annotation key, example: -// dev.gvisor.spec.mount.NAME.share -func volumeName(k string) string { - return strings.SplitN(strings.TrimPrefix(k, volumeKeyPrefix), ".", 2)[0] -} - -// volumeFieldName gets volume field name from volume annotation key, example: -// `type` is the field of dev.gvisor.spec.mount.NAME.type -func volumeFieldName(k string) string { - parts := strings.Split(strings.TrimPrefix(k, volumeKeyPrefix), ".") - return parts[len(parts)-1] -} - -// podUID gets pod UID from the pod log path. -func podUID(s *specs.Spec) (string, error) { - sandboxLogDir := s.Annotations[annotations.SandboxLogDir] - if sandboxLogDir == "" { - return "", errors.New("no sandbox log path annotation") - } - fields := strings.Split(filepath.Base(sandboxLogDir), "_") - switch len(fields) { - case 1: // This is the old CRI logging path - return fields[0], nil - case 3: // This is the new CRI logging path - return fields[2], nil - } - return "", errors.Errorf("unexpected sandbox log path %q", sandboxLogDir) -} - -// isVolumeKey checks whether an annotation key is for volume. -func isVolumeKey(k string) bool { - return strings.HasPrefix(k, volumeKeyPrefix) -} - -// volumeSourceKey constructs the annotation key for volume source. -func volumeSourceKey(volume string) string { - return volumeKeyPrefix + volume + ".source" -} - -// volumePath searches the volume path in the kubelet pod directory. -func volumePath(volume, uid string) (string, error) { - // TODO: Support subpath when gvisor supports pod volume bind mount. - volumeSearchPath := fmt.Sprintf("%s/%s/volumes/*/%s", kubeletPodsDir, uid, volume) - dirs, err := filepath.Glob(volumeSearchPath) - if err != nil { - return "", err - } - if len(dirs) != 1 { - return "", errors.Errorf("unexpected matched volume list %v", dirs) - } - return dirs[0], nil -} - -// isVolumePath checks whether a string is the volume path. -func isVolumePath(volume, path string) (bool, error) { - // TODO: Support subpath when gvisor supports pod volume bind mount. - volumeSearchPath := fmt.Sprintf("%s/*/volumes/*/%s", kubeletPodsDir, volume) - return filepath.Match(volumeSearchPath, path) -} - -// UpdateVolumeAnnotations add necessary OCI annotations for gvisor -// volume optimization. -func UpdateVolumeAnnotations(bundle string, s *specs.Spec) error { - var ( - uid string - err error - ) - if IsSandbox(s) { - uid, err = podUID(s) - if err != nil { - // Skip if we can't get pod UID, because this doesn't work - // for containerd 1.1. - logrus.WithError(err).Error("Can't get pod uid") - return nil - } - } - var updated bool - for k, v := range s.Annotations { - if !isVolumeKey(k) { - continue - } - if volumeFieldName(k) != "type" { - continue - } - volume := volumeName(k) - if uid != "" { - // This is a sandbox - path, err := volumePath(volume, uid) - if err != nil { - return errors.Wrapf(err, "get volume path for %q", volume) - } - s.Annotations[volumeSourceKey(volume)] = path - updated = true - } else { - // This is a container - for i := range s.Mounts { - // An error is returned for sandbox if source annotation - // is not successfully applied, so it is guaranteed that - // the source annotation for sandbox has already been - // successfully applied at this point. - // The volume name is unique inside a pod, so matching without - // podUID is fine here. - // TODO: Pass podUID down to shim for containers to do - // more accurate matching. - if yes, _ := isVolumePath(volume, s.Mounts[i].Source); yes { - // gVisor requires the container mount type to match - // sandbox mount type. - s.Mounts[i].Type = v - updated = true - } - } - } - } - if !updated { - return nil - } - // Update bundle - b, err := json.Marshal(s) - if err != nil { - return err - } - return ioutil.WriteFile(filepath.Join(bundle, "config.json"), b, 0666) -} diff --git a/pkg/v1/utils/volumes_test.go b/pkg/v1/utils/volumes_test.go deleted file mode 100644 index 472cd6e86..000000000 --- a/pkg/v1/utils/volumes_test.go +++ /dev/null @@ -1,311 +0,0 @@ -/* -Copyright 2019 Google LLC. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package utils - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "reflect" - "testing" - - "github.com/containerd/cri/pkg/annotations" - specs "github.com/opencontainers/runtime-spec/specs-go" -) - -func TestUpdateVolumeAnnotations(t *testing.T) { - dir, err := ioutil.TempDir("", "test-update-volume-annotations") - if err != nil { - t.Fatalf("create tempdir: %v", err) - } - defer os.RemoveAll(dir) - kubeletPodsDir = dir - - const ( - testPodUID = "testuid" - testVolumeName = "testvolume" - testLogDirPath = "/var/log/pods/testns_testname_" + testPodUID - testLegacyLogDirPath = "/var/log/pods/" + testPodUID - ) - testVolumePath := fmt.Sprintf("%s/%s/volumes/kubernetes.io~empty-dir/%s", dir, testPodUID, testVolumeName) - - if err := os.MkdirAll(testVolumePath, 0755); err != nil { - t.Fatalf("Create test volume: %v", err) - } - - for _, test := range []struct { - desc string - spec *specs.Spec - expected *specs.Spec - expectErr bool - expectUpdate bool - }{ - { - desc: "volume annotations for sandbox", - spec: &specs.Spec{ - Annotations: map[string]string{ - annotations.SandboxLogDir: testLogDirPath, - annotations.ContainerType: annotations.ContainerTypeSandbox, - "dev.gvisor.spec.mount." + testVolumeName + ".share": "pod", - "dev.gvisor.spec.mount." + testVolumeName + ".type": "tmpfs", - "dev.gvisor.spec.mount." + testVolumeName + ".options": "ro", - }, - }, - expected: &specs.Spec{ - Annotations: map[string]string{ - annotations.SandboxLogDir: testLogDirPath, - annotations.ContainerType: annotations.ContainerTypeSandbox, - "dev.gvisor.spec.mount." + testVolumeName + ".share": "pod", - "dev.gvisor.spec.mount." + testVolumeName + ".type": "tmpfs", - "dev.gvisor.spec.mount." + testVolumeName + ".options": "ro", - "dev.gvisor.spec.mount." + testVolumeName + ".source": testVolumePath, - }, - }, - expectUpdate: true, - }, - { - desc: "volume annotations for sandbox with legacy log path", - spec: &specs.Spec{ - Annotations: map[string]string{ - annotations.SandboxLogDir: testLegacyLogDirPath, - annotations.ContainerType: annotations.ContainerTypeSandbox, - "dev.gvisor.spec.mount." + testVolumeName + ".share": "pod", - "dev.gvisor.spec.mount." + testVolumeName + ".type": "tmpfs", - "dev.gvisor.spec.mount." + testVolumeName + ".options": "ro", - }, - }, - expected: &specs.Spec{ - Annotations: map[string]string{ - annotations.SandboxLogDir: testLegacyLogDirPath, - annotations.ContainerType: annotations.ContainerTypeSandbox, - "dev.gvisor.spec.mount." + testVolumeName + ".share": "pod", - "dev.gvisor.spec.mount." + testVolumeName + ".type": "tmpfs", - "dev.gvisor.spec.mount." + testVolumeName + ".options": "ro", - "dev.gvisor.spec.mount." + testVolumeName + ".source": testVolumePath, - }, - }, - expectUpdate: true, - }, - { - desc: "tmpfs: volume annotations for container", - spec: &specs.Spec{ - Mounts: []specs.Mount{ - { - Destination: "/test", - Type: "bind", - Source: testVolumePath, - Options: []string{"ro"}, - }, - { - Destination: "/random", - Type: "bind", - Source: "/random", - Options: []string{"ro"}, - }, - }, - Annotations: map[string]string{ - annotations.ContainerType: annotations.ContainerTypeContainer, - "dev.gvisor.spec.mount." + testVolumeName + ".share": "pod", - "dev.gvisor.spec.mount." + testVolumeName + ".type": "tmpfs", - "dev.gvisor.spec.mount." + testVolumeName + ".options": "ro", - }, - }, - expected: &specs.Spec{ - Mounts: []specs.Mount{ - { - Destination: "/test", - Type: "tmpfs", - Source: testVolumePath, - Options: []string{"ro"}, - }, - { - Destination: "/random", - Type: "bind", - Source: "/random", - Options: []string{"ro"}, - }, - }, - Annotations: map[string]string{ - annotations.ContainerType: annotations.ContainerTypeContainer, - "dev.gvisor.spec.mount." + testVolumeName + ".share": "pod", - "dev.gvisor.spec.mount." + testVolumeName + ".type": "tmpfs", - "dev.gvisor.spec.mount." + testVolumeName + ".options": "ro", - }, - }, - expectUpdate: true, - }, - { - desc: "bind: volume annotations for container", - spec: &specs.Spec{ - Mounts: []specs.Mount{ - { - Destination: "/test", - Type: "bind", - Source: testVolumePath, - Options: []string{"ro"}, - }, - }, - Annotations: map[string]string{ - annotations.ContainerType: annotations.ContainerTypeContainer, - "dev.gvisor.spec.mount." + testVolumeName + ".share": "container", - "dev.gvisor.spec.mount." + testVolumeName + ".type": "bind", - "dev.gvisor.spec.mount." + testVolumeName + ".options": "ro", - }, - }, - expected: &specs.Spec{ - Mounts: []specs.Mount{ - { - Destination: "/test", - Type: "bind", - Source: testVolumePath, - Options: []string{"ro"}, - }, - }, - Annotations: map[string]string{ - annotations.ContainerType: annotations.ContainerTypeContainer, - "dev.gvisor.spec.mount." + testVolumeName + ".share": "container", - "dev.gvisor.spec.mount." + testVolumeName + ".type": "bind", - "dev.gvisor.spec.mount." + testVolumeName + ".options": "ro", - }, - }, - expectUpdate: true, - }, - { - desc: "should not return error without pod log directory", - spec: &specs.Spec{ - Annotations: map[string]string{ - annotations.ContainerType: annotations.ContainerTypeSandbox, - "dev.gvisor.spec.mount." + testVolumeName + ".share": "pod", - "dev.gvisor.spec.mount." + testVolumeName + ".type": "tmpfs", - "dev.gvisor.spec.mount." + testVolumeName + ".options": "ro", - }, - }, - expected: &specs.Spec{ - Annotations: map[string]string{ - annotations.ContainerType: annotations.ContainerTypeSandbox, - "dev.gvisor.spec.mount." + testVolumeName + ".share": "pod", - "dev.gvisor.spec.mount." + testVolumeName + ".type": "tmpfs", - "dev.gvisor.spec.mount." + testVolumeName + ".options": "ro", - }, - }, - }, - { - desc: "should return error if volume path does not exist", - spec: &specs.Spec{ - Annotations: map[string]string{ - annotations.SandboxLogDir: testLogDirPath, - annotations.ContainerType: annotations.ContainerTypeSandbox, - "dev.gvisor.spec.mount.notexist.share": "pod", - "dev.gvisor.spec.mount.notexist.type": "tmpfs", - "dev.gvisor.spec.mount.notexist.options": "ro", - }, - }, - expectErr: true, - }, - { - desc: "no volume annotations for sandbox", - spec: &specs.Spec{ - Annotations: map[string]string{ - annotations.SandboxLogDir: testLogDirPath, - annotations.ContainerType: annotations.ContainerTypeSandbox, - }, - }, - expected: &specs.Spec{ - Annotations: map[string]string{ - annotations.SandboxLogDir: testLogDirPath, - annotations.ContainerType: annotations.ContainerTypeSandbox, - }, - }, - }, - { - desc: "no volume annotations for container", - spec: &specs.Spec{ - Mounts: []specs.Mount{ - { - Destination: "/test", - Type: "bind", - Source: "/test", - Options: []string{"ro"}, - }, - { - Destination: "/random", - Type: "bind", - Source: "/random", - Options: []string{"ro"}, - }, - }, - Annotations: map[string]string{ - annotations.ContainerType: annotations.ContainerTypeContainer, - }, - }, - expected: &specs.Spec{ - Mounts: []specs.Mount{ - { - Destination: "/test", - Type: "bind", - Source: "/test", - Options: []string{"ro"}, - }, - { - Destination: "/random", - Type: "bind", - Source: "/random", - Options: []string{"ro"}, - }, - }, - Annotations: map[string]string{ - annotations.ContainerType: annotations.ContainerTypeContainer, - }, - }, - }, - } { - t.Run(test.desc, func(t *testing.T) { - bundle, err := ioutil.TempDir(dir, "test-bundle") - if err != nil { - t.Fatalf("Create test bundle: %v", err) - } - err = UpdateVolumeAnnotations(bundle, test.spec) - if test.expectErr { - if err == nil { - t.Fatal("Expected error, but got nil") - } - return - } - if err != nil { - t.Fatalf("Unexpected error: %v", err) - } - if !reflect.DeepEqual(test.expected, test.spec) { - t.Fatalf("Expected %+v, got %+v", test.expected, test.spec) - } - if test.expectUpdate { - b, err := ioutil.ReadFile(filepath.Join(bundle, "config.json")) - if err != nil { - t.Fatalf("Read spec from bundle: %v", err) - } - var spec specs.Spec - if err := json.Unmarshal(b, &spec); err != nil { - t.Fatalf("Unmarshal spec: %v", err) - } - if !reflect.DeepEqual(test.expected, &spec) { - t.Fatalf("Expected %+v, got %+v", test.expected, &spec) - } - } - }) - } -} diff --git a/pkg/v2/epoll.go b/pkg/v2/epoll.go deleted file mode 100644 index 76c7b54d6..000000000 --- a/pkg/v2/epoll.go +++ /dev/null @@ -1,129 +0,0 @@ -// +build linux - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package v2 - -import ( - "context" - "sync" - - "github.com/containerd/cgroups" - eventstypes "github.com/containerd/containerd/api/events" - "github.com/containerd/containerd/events" - "github.com/containerd/containerd/runtime" - "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" -) - -func newOOMEpoller(publisher events.Publisher) (*epoller, error) { - fd, err := unix.EpollCreate1(unix.EPOLL_CLOEXEC) - if err != nil { - return nil, err - } - return &epoller{ - fd: fd, - publisher: publisher, - set: make(map[uintptr]*item), - }, nil -} - -type epoller struct { - mu sync.Mutex - - fd int - publisher events.Publisher - set map[uintptr]*item -} - -type item struct { - id string - cg cgroups.Cgroup -} - -func (e *epoller) Close() error { - return unix.Close(e.fd) -} - -func (e *epoller) run(ctx context.Context) { - var events [128]unix.EpollEvent - for { - select { - case <-ctx.Done(): - e.Close() - return - default: - n, err := unix.EpollWait(e.fd, events[:], -1) - if err != nil { - if err == unix.EINTR { - continue - } - logrus.WithError(err).Error("cgroups: epoll wait") - } - for i := 0; i < n; i++ { - e.process(ctx, uintptr(events[i].Fd)) - } - } - } -} - -func (e *epoller) add(id string, cg cgroups.Cgroup) error { - e.mu.Lock() - defer e.mu.Unlock() - fd, err := cg.OOMEventFD() - if err != nil { - return err - } - e.set[fd] = &item{ - id: id, - cg: cg, - } - event := unix.EpollEvent{ - Fd: int32(fd), - Events: unix.EPOLLHUP | unix.EPOLLIN | unix.EPOLLERR, - } - return unix.EpollCtl(e.fd, unix.EPOLL_CTL_ADD, int(fd), &event) -} - -func (e *epoller) process(ctx context.Context, fd uintptr) { - flush(fd) - e.mu.Lock() - i, ok := e.set[fd] - if !ok { - e.mu.Unlock() - return - } - e.mu.Unlock() - if i.cg.State() == cgroups.Deleted { - e.mu.Lock() - delete(e.set, fd) - e.mu.Unlock() - unix.Close(int(fd)) - return - } - if err := e.publisher.Publish(ctx, runtime.TaskOOMEventTopic, &eventstypes.TaskOOM{ - ContainerID: i.id, - }); err != nil { - logrus.WithError(err).Error("publish OOM event") - } -} - -func flush(fd uintptr) error { - var buf [8]byte - _, err := unix.Read(int(fd), buf[:]) - return err -} diff --git a/pkg/v2/options/options.go b/pkg/v2/options/options.go deleted file mode 100644 index 34b002ac4..000000000 --- a/pkg/v2/options/options.go +++ /dev/null @@ -1,34 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package options - -const OptionType = "io.containerd.runsc.v1.options" - -// Options is runtime options for io.containerd.runsc.v1. -type Options struct { - // ShimCgroup is the cgroup the shim should be in. - ShimCgroup string `toml:"shim_cgroup"` - // IoUid is the I/O's pipes uid. - IoUid uint32 `toml:"io_uid"` - // IoUid is the I/O's pipes gid. - IoGid uint32 `toml:"io_gid"` - // BinaryName is the binary name of the runsc binary. - BinaryName string `toml:"binary_name"` - // Root is the runsc root directory. - Root string `toml:"root"` - // RunscConfig is a key/value map of all runsc flags. - RunscConfig map[string]string `toml:"runsc_config"` -} diff --git a/pkg/v2/service.go b/pkg/v2/service.go deleted file mode 100644 index f99456f63..000000000 --- a/pkg/v2/service.go +++ /dev/null @@ -1,826 +0,0 @@ -// +build linux - -/* - Copyright The containerd Authors. - Copyright 2018 Google LLC - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package v2 - -import ( - "context" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "sync" - "syscall" - "time" - - "github.com/BurntSushi/toml" - "github.com/containerd/cgroups" - "github.com/containerd/console" - eventstypes "github.com/containerd/containerd/api/events" - "github.com/containerd/containerd/api/types/task" - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/events" - "github.com/containerd/containerd/log" - "github.com/containerd/containerd/mount" - "github.com/containerd/containerd/namespaces" - "github.com/containerd/containerd/runtime" - "github.com/containerd/containerd/runtime/linux/runctypes" - rproc "github.com/containerd/containerd/runtime/proc" - "github.com/containerd/containerd/runtime/v2/shim" - taskAPI "github.com/containerd/containerd/runtime/v2/task" - runtimeoptions "github.com/containerd/cri/pkg/api/runtimeoptions/v1" - "github.com/containerd/typeurl" - ptypes "github.com/gogo/protobuf/types" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" - - runsc "github.com/google/gvisor-containerd-shim/pkg/go-runsc" - "github.com/google/gvisor-containerd-shim/pkg/v1/proc" - "github.com/google/gvisor-containerd-shim/pkg/v1/utils" - "github.com/google/gvisor-containerd-shim/pkg/v2/options" -) - -var ( - empty = &ptypes.Empty{} - bufPool = sync.Pool{ - New: func() interface{} { - buffer := make([]byte, 32<<10) - return &buffer - }, - } -) - -var _ = (taskAPI.TaskService)(&service{}) - -// configFile is the default config file name. For containerd 1.2, -// we assume that a config.toml should exist in the runtime root. -const configFile = "config.toml" - -// New returns a new shim service that can be used via GRPC -func New(ctx context.Context, id string, publisher events.Publisher) (shim.Shim, error) { - ep, err := newOOMEpoller(publisher) - if err != nil { - return nil, err - } - ctx, cancel := context.WithCancel(ctx) - go ep.run(ctx) - s := &service{ - id: id, - context: ctx, - processes: make(map[string]rproc.Process), - events: make(chan interface{}, 128), - ec: proc.ExitCh, - oomPoller: ep, - cancel: cancel, - } - go s.processExits() - runsc.Monitor = shim.Default - if err := s.initPlatform(); err != nil { - cancel() - return nil, errors.Wrap(err, "failed to initialized platform behavior") - } - go s.forward(publisher) - return s, nil -} - -// service is the shim implementation of a remote shim over GRPC -type service struct { - mu sync.Mutex - - context context.Context - task rproc.Process - processes map[string]rproc.Process - events chan interface{} - platform rproc.Platform - opts options.Options - ec chan proc.Exit - oomPoller *epoller - - id string - bundle string - cancel func() -} - -func newCommand(ctx context.Context, containerdBinary, containerdAddress string) (*exec.Cmd, error) { - ns, err := namespaces.NamespaceRequired(ctx) - if err != nil { - return nil, err - } - self, err := os.Executable() - if err != nil { - return nil, err - } - cwd, err := os.Getwd() - if err != nil { - return nil, err - } - args := []string{ - "-namespace", ns, - "-address", containerdAddress, - "-publish-binary", containerdBinary, - } - cmd := exec.Command(self, args...) - cmd.Dir = cwd - cmd.Env = append(os.Environ(), "GOMAXPROCS=2") - cmd.SysProcAttr = &syscall.SysProcAttr{ - Setpgid: true, - } - return cmd, nil -} - -func (s *service) StartShim(ctx context.Context, id, containerdBinary, containerdAddress string) (string, error) { - cmd, err := newCommand(ctx, containerdBinary, containerdAddress) - if err != nil { - return "", err - } - address, err := shim.SocketAddress(ctx, id) - if err != nil { - return "", err - } - socket, err := shim.NewSocket(address) - if err != nil { - return "", err - } - defer socket.Close() - f, err := socket.File() - if err != nil { - return "", err - } - defer f.Close() - - cmd.ExtraFiles = append(cmd.ExtraFiles, f) - - if err := cmd.Start(); err != nil { - return "", err - } - defer func() { - if err != nil { - cmd.Process.Kill() - } - }() - // make sure to wait after start - go cmd.Wait() - if err := shim.WritePidFile("shim.pid", cmd.Process.Pid); err != nil { - return "", err - } - if err := shim.WriteAddress("address", address); err != nil { - return "", err - } - if err := shim.SetScore(cmd.Process.Pid); err != nil { - return "", errors.Wrap(err, "failed to set OOM Score on shim") - } - return address, nil -} - -func (s *service) Cleanup(ctx context.Context) (*taskAPI.DeleteResponse, error) { - path, err := os.Getwd() - if err != nil { - return nil, err - } - ns, err := namespaces.NamespaceRequired(ctx) - if err != nil { - return nil, err - } - runtime, err := s.readRuntime(path) - if err != nil { - return nil, err - } - r := proc.NewRunsc(s.opts.Root, path, ns, runtime, nil) - if err := r.Delete(ctx, s.id, &runsc.DeleteOpts{ - Force: true, - }); err != nil { - logrus.WithError(err).Warn("failed to remove runc container") - } - if err := mount.UnmountAll(filepath.Join(path, "rootfs"), 0); err != nil { - logrus.WithError(err).Warn("failed to cleanup rootfs mount") - } - return &taskAPI.DeleteResponse{ - ExitedAt: time.Now(), - ExitStatus: 128 + uint32(unix.SIGKILL), - }, nil -} - -func (s *service) readRuntime(path string) (string, error) { - data, err := ioutil.ReadFile(filepath.Join(path, "runtime")) - if err != nil { - return "", err - } - return string(data), nil -} - -func (s *service) writeRuntime(path, runtime string) error { - return ioutil.WriteFile(filepath.Join(path, "runtime"), []byte(runtime), 0600) -} - -// Create a new initial process and container with the underlying OCI runtime -func (s *service) Create(ctx context.Context, r *taskAPI.CreateTaskRequest) (_ *taskAPI.CreateTaskResponse, err error) { - s.mu.Lock() - defer s.mu.Unlock() - - ns, err := namespaces.NamespaceRequired(ctx) - if err != nil { - return nil, errors.Wrap(err, "create namespace") - } - - // Read from root for now. - var opts options.Options - if r.Options != nil { - v, err := typeurl.UnmarshalAny(r.Options) - if err != nil { - return nil, err - } - var path string - switch o := v.(type) { - case *runctypes.CreateOptions: // containerd 1.2.x - opts.IoUid = o.IoUid - opts.IoGid = o.IoGid - opts.ShimCgroup = o.ShimCgroup - case *runctypes.RuncOptions: // containerd 1.2.x - root := proc.RunscRoot - if o.RuntimeRoot != "" { - root = o.RuntimeRoot - } - - opts.BinaryName = o.Runtime - - path = filepath.Join(root, configFile) - if _, err := os.Stat(path); err != nil { - if !os.IsNotExist(err) { - return nil, errors.Wrapf(err, "stat config file %q", path) - } - // A config file in runtime root is not required. - path = "" - } - case *runtimeoptions.Options: // containerd 1.3.x+ - if o.ConfigPath == "" { - break - } - if o.TypeUrl != options.OptionType { - return nil, errors.Errorf("unsupported runtimeoptions %q", o.TypeUrl) - } - path = o.ConfigPath - default: - return nil, errors.Errorf("unsupported option type %q", r.Options.TypeUrl) - } - if path != "" { - if _, err = toml.DecodeFile(path, &opts); err != nil { - return nil, errors.Wrapf(err, "decode config file %q", path) - } - } - } - - var mounts []proc.Mount - for _, m := range r.Rootfs { - mounts = append(mounts, proc.Mount{ - Type: m.Type, - Source: m.Source, - Target: m.Target, - Options: m.Options, - }) - } - - rootfs := filepath.Join(r.Bundle, "rootfs") - if err := os.Mkdir(rootfs, 0711); err != nil && !os.IsExist(err) { - return nil, err - } - - config := &proc.CreateConfig{ - ID: r.ID, - Bundle: r.Bundle, - Runtime: opts.BinaryName, - Rootfs: mounts, - Terminal: r.Terminal, - Stdin: r.Stdin, - Stdout: r.Stdout, - Stderr: r.Stderr, - Options: r.Options, - } - if err := s.writeRuntime(r.Bundle, opts.BinaryName); err != nil { - return nil, err - } - defer func() { - if err != nil { - if err2 := mount.UnmountAll(rootfs, 0); err2 != nil { - logrus.WithError(err2).Warn("failed to cleanup rootfs mount") - } - } - }() - for _, rm := range mounts { - m := &mount.Mount{ - Type: rm.Type, - Source: rm.Source, - Options: rm.Options, - } - if err := m.Mount(rootfs); err != nil { - return nil, errors.Wrapf(err, "failed to mount rootfs component %v", m) - } - } - process, err := newInit( - ctx, - r.Bundle, - filepath.Join(r.Bundle, "work"), - ns, - s.platform, - config, - &opts, - rootfs, - ) - if err != nil { - return nil, errdefs.ToGRPC(err) - } - if err := process.Create(ctx, config); err != nil { - return nil, errdefs.ToGRPC(err) - } - // save the main task id and bundle to the shim for additional requests - s.id = r.ID - s.bundle = r.Bundle - - // Set up OOM notification on the sandbox's cgroup. This is done on sandbox - // create since the sandbox process will be created here. - pid := process.Pid() - if pid > 0 { - cg, err := cgroups.Load(cgroups.V1, cgroups.PidPath(pid)) - if err != nil { - return nil, errors.Wrapf(err, "loading cgroup for %d", pid) - } - if err := s.oomPoller.add(s.id, cg); err != nil { - return nil, errors.Wrapf(err, "add cg to OOM monitor") - } - } - s.task = process - s.opts = opts - return &taskAPI.CreateTaskResponse{ - Pid: uint32(process.Pid()), - }, nil - -} - -// Start a process -func (s *service) Start(ctx context.Context, r *taskAPI.StartRequest) (*taskAPI.StartResponse, error) { - p, err := s.getProcess(r.ExecID) - if err != nil { - return nil, err - } - if err := p.Start(ctx); err != nil { - return nil, err - } - // TODO: Set the cgroup and oom notifications on restore. - // https://github.com/google/gvisor-containerd-shim/issues/58 - return &taskAPI.StartResponse{ - Pid: uint32(p.Pid()), - }, nil -} - -// Delete the initial process and container -func (s *service) Delete(ctx context.Context, r *taskAPI.DeleteRequest) (*taskAPI.DeleteResponse, error) { - p, err := s.getProcess(r.ExecID) - if err != nil { - return nil, err - } - if p == nil { - return nil, errdefs.ToGRPCf(errdefs.ErrFailedPrecondition, "container must be created") - } - if err := p.Delete(ctx); err != nil { - return nil, err - } - isTask := r.ExecID == "" - if !isTask { - s.mu.Lock() - delete(s.processes, r.ExecID) - s.mu.Unlock() - } - if isTask && s.platform != nil { - s.platform.Close() - } - return &taskAPI.DeleteResponse{ - ExitStatus: uint32(p.ExitStatus()), - ExitedAt: p.ExitedAt(), - Pid: uint32(p.Pid()), - }, nil -} - -// Exec an additional process inside the container -func (s *service) Exec(ctx context.Context, r *taskAPI.ExecProcessRequest) (*ptypes.Empty, error) { - s.mu.Lock() - p := s.processes[r.ExecID] - s.mu.Unlock() - if p != nil { - return nil, errdefs.ToGRPCf(errdefs.ErrAlreadyExists, "id %s", r.ExecID) - } - p = s.task - if p == nil { - return nil, errdefs.ToGRPCf(errdefs.ErrFailedPrecondition, "container must be created") - } - process, err := p.(*proc.Init).Exec(ctx, s.bundle, &proc.ExecConfig{ - ID: r.ExecID, - Terminal: r.Terminal, - Stdin: r.Stdin, - Stdout: r.Stdout, - Stderr: r.Stderr, - Spec: r.Spec, - }) - if err != nil { - return nil, errdefs.ToGRPC(err) - } - s.mu.Lock() - s.processes[r.ExecID] = process - s.mu.Unlock() - return empty, nil -} - -// ResizePty of a process -func (s *service) ResizePty(ctx context.Context, r *taskAPI.ResizePtyRequest) (*ptypes.Empty, error) { - p, err := s.getProcess(r.ExecID) - if err != nil { - return nil, err - } - ws := console.WinSize{ - Width: uint16(r.Width), - Height: uint16(r.Height), - } - if err := p.Resize(ws); err != nil { - return nil, errdefs.ToGRPC(err) - } - return empty, nil -} - -// State returns runtime state information for a process -func (s *service) State(ctx context.Context, r *taskAPI.StateRequest) (*taskAPI.StateResponse, error) { - p, err := s.getProcess(r.ExecID) - if err != nil { - return nil, err - } - st, err := p.Status(ctx) - if err != nil { - return nil, err - } - status := task.StatusUnknown - switch st { - case "created": - status = task.StatusCreated - case "running": - status = task.StatusRunning - case "stopped": - status = task.StatusStopped - } - sio := p.Stdio() - return &taskAPI.StateResponse{ - ID: p.ID(), - Bundle: s.bundle, - Pid: uint32(p.Pid()), - Status: status, - Stdin: sio.Stdin, - Stdout: sio.Stdout, - Stderr: sio.Stderr, - Terminal: sio.Terminal, - ExitStatus: uint32(p.ExitStatus()), - ExitedAt: p.ExitedAt(), - }, nil -} - -// Pause the container -func (s *service) Pause(ctx context.Context, r *taskAPI.PauseRequest) (*ptypes.Empty, error) { - return empty, errdefs.ToGRPC(errdefs.ErrNotImplemented) -} - -// Resume the container -func (s *service) Resume(ctx context.Context, r *taskAPI.ResumeRequest) (*ptypes.Empty, error) { - return empty, errdefs.ToGRPC(errdefs.ErrNotImplemented) -} - -// Kill a process with the provided signal -func (s *service) Kill(ctx context.Context, r *taskAPI.KillRequest) (*ptypes.Empty, error) { - p, err := s.getProcess(r.ExecID) - if err != nil { - return nil, err - } - if p == nil { - return nil, errdefs.ToGRPCf(errdefs.ErrFailedPrecondition, "container must be created") - } - if err := p.Kill(ctx, r.Signal, r.All); err != nil { - return nil, errdefs.ToGRPC(err) - } - return empty, nil -} - -// Pids returns all pids inside the container -func (s *service) Pids(ctx context.Context, r *taskAPI.PidsRequest) (*taskAPI.PidsResponse, error) { - pids, err := s.getContainerPids(ctx, r.ID) - if err != nil { - return nil, errdefs.ToGRPC(err) - } - var processes []*task.ProcessInfo - for _, pid := range pids { - pInfo := task.ProcessInfo{ - Pid: pid, - } - for _, p := range s.processes { - if p.Pid() == int(pid) { - d := &runctypes.ProcessDetails{ - ExecID: p.ID(), - } - a, err := typeurl.MarshalAny(d) - if err != nil { - return nil, errors.Wrapf(err, "failed to marshal process %d info", pid) - } - pInfo.Info = a - break - } - } - processes = append(processes, &pInfo) - } - return &taskAPI.PidsResponse{ - Processes: processes, - }, nil -} - -// CloseIO of a process -func (s *service) CloseIO(ctx context.Context, r *taskAPI.CloseIORequest) (*ptypes.Empty, error) { - p, err := s.getProcess(r.ExecID) - if err != nil { - return nil, err - } - if stdin := p.Stdin(); stdin != nil { - if err := stdin.Close(); err != nil { - return nil, errors.Wrap(err, "close stdin") - } - } - return empty, nil -} - -// Checkpoint the container -func (s *service) Checkpoint(ctx context.Context, r *taskAPI.CheckpointTaskRequest) (*ptypes.Empty, error) { - return empty, errdefs.ToGRPC(errdefs.ErrNotImplemented) -} - -// Connect returns shim information such as the shim's pid -func (s *service) Connect(ctx context.Context, r *taskAPI.ConnectRequest) (*taskAPI.ConnectResponse, error) { - var pid int - if s.task != nil { - pid = s.task.Pid() - } - return &taskAPI.ConnectResponse{ - ShimPid: uint32(os.Getpid()), - TaskPid: uint32(pid), - }, nil -} - -func (s *service) Shutdown(ctx context.Context, r *taskAPI.ShutdownRequest) (*ptypes.Empty, error) { - s.cancel() - os.Exit(0) - return empty, nil -} - -func (s *service) Stats(ctx context.Context, r *taskAPI.StatsRequest) (*taskAPI.StatsResponse, error) { - path, err := os.Getwd() - if err != nil { - return nil, err - } - ns, err := namespaces.NamespaceRequired(ctx) - if err != nil { - return nil, err - } - runtime, err := s.readRuntime(path) - if err != nil { - return nil, err - } - rs := proc.NewRunsc(s.opts.Root, path, ns, runtime, nil) - stats, err := rs.Stats(ctx, s.id) - if err != nil { - return nil, err - } - - // gvisor currently (as of 2020-03-03) only returns the total memory - // usage and current PID value[0]. However, we copy the common fields here - // so that future updates will propagate correct information. We're - // using the cgroups.Metrics structure so we're returning the same type - // as runc. - // - // [0]: https://github.com/google/gvisor/blob/277a0d5a1fbe8272d4729c01ee4c6e374d047ebc/runsc/boot/events.go#L61-L81 - data, err := typeurl.MarshalAny(&cgroups.Metrics{ - CPU: &cgroups.CPUStat{ - Usage: &cgroups.CPUUsage{ - Total: stats.Cpu.Usage.Total, - Kernel: stats.Cpu.Usage.Kernel, - User: stats.Cpu.Usage.User, - PerCPU: stats.Cpu.Usage.Percpu, - }, - Throttling: &cgroups.Throttle{ - Periods: stats.Cpu.Throttling.Periods, - ThrottledPeriods: stats.Cpu.Throttling.ThrottledPeriods, - ThrottledTime: stats.Cpu.Throttling.ThrottledTime, - }, - }, - Memory: &cgroups.MemoryStat{ - Cache: stats.Memory.Cache, - Usage: &cgroups.MemoryEntry{ - Limit: stats.Memory.Usage.Limit, - Usage: stats.Memory.Usage.Usage, - Max: stats.Memory.Usage.Max, - Failcnt: stats.Memory.Usage.Failcnt, - }, - Swap: &cgroups.MemoryEntry{ - Limit: stats.Memory.Swap.Limit, - Usage: stats.Memory.Swap.Usage, - Max: stats.Memory.Swap.Max, - Failcnt: stats.Memory.Swap.Failcnt, - }, - Kernel: &cgroups.MemoryEntry{ - Limit: stats.Memory.Kernel.Limit, - Usage: stats.Memory.Kernel.Usage, - Max: stats.Memory.Kernel.Max, - Failcnt: stats.Memory.Kernel.Failcnt, - }, - KernelTCP: &cgroups.MemoryEntry{ - Limit: stats.Memory.KernelTCP.Limit, - Usage: stats.Memory.KernelTCP.Usage, - Max: stats.Memory.KernelTCP.Max, - Failcnt: stats.Memory.KernelTCP.Failcnt, - }, - }, - Pids: &cgroups.PidsStat{ - Current: stats.Pids.Current, - Limit: stats.Pids.Limit, - }, - }) - if err != nil { - return nil, err - } - return &taskAPI.StatsResponse{ - Stats: data, - }, nil -} - -// Update a running container -func (s *service) Update(ctx context.Context, r *taskAPI.UpdateTaskRequest) (*ptypes.Empty, error) { - return empty, errdefs.ToGRPC(errdefs.ErrNotImplemented) -} - -// Wait for a process to exit -func (s *service) Wait(ctx context.Context, r *taskAPI.WaitRequest) (*taskAPI.WaitResponse, error) { - p, err := s.getProcess(r.ExecID) - if err != nil { - return nil, err - } - if p == nil { - return nil, errdefs.ToGRPCf(errdefs.ErrFailedPrecondition, "container must be created") - } - p.Wait() - - return &taskAPI.WaitResponse{ - ExitStatus: uint32(p.ExitStatus()), - ExitedAt: p.ExitedAt(), - }, nil -} - -func (s *service) processExits() { - for e := range s.ec { - s.checkProcesses(e) - } -} - -func (s *service) checkProcesses(e proc.Exit) { - // TODO(random-liu): Add `shouldKillAll` logic if container pid - // namespace is supported. - for _, p := range s.allProcesses() { - if p.ID() == e.ID { - if ip, ok := p.(*proc.Init); ok { - // Ensure all children are killed - if err := ip.KillAll(s.context); err != nil { - log.G(s.context).WithError(err).WithField("id", ip.ID()). - Error("failed to kill init's children") - } - } - p.SetExited(e.Status) - s.events <- &eventstypes.TaskExit{ - ContainerID: s.id, - ID: p.ID(), - Pid: uint32(p.Pid()), - ExitStatus: uint32(e.Status), - ExitedAt: p.ExitedAt(), - } - return - } - } -} - -func (s *service) allProcesses() (o []rproc.Process) { - s.mu.Lock() - defer s.mu.Unlock() - for _, p := range s.processes { - o = append(o, p) - } - if s.task != nil { - o = append(o, s.task) - } - return o -} - -func (s *service) getContainerPids(ctx context.Context, id string) ([]uint32, error) { - s.mu.Lock() - p := s.task - s.mu.Unlock() - if p == nil { - return nil, errors.Wrapf(errdefs.ErrFailedPrecondition, "container must be created") - } - ps, err := p.(*proc.Init).Runtime().Ps(ctx, id) - if err != nil { - return nil, err - } - pids := make([]uint32, 0, len(ps)) - for _, pid := range ps { - pids = append(pids, uint32(pid)) - } - return pids, nil -} - -func (s *service) forward(publisher events.Publisher) { - for e := range s.events { - ctx, cancel := context.WithTimeout(s.context, 5*time.Second) - err := publisher.Publish(ctx, getTopic(e), e) - cancel() - if err != nil { - logrus.WithError(err).Error("post event") - } - } -} - -func (s *service) getProcess(execID string) (rproc.Process, error) { - s.mu.Lock() - defer s.mu.Unlock() - if execID == "" { - return s.task, nil - } - p := s.processes[execID] - if p == nil { - return nil, errdefs.ToGRPCf(errdefs.ErrNotFound, "process does not exist %s", execID) - } - return p, nil -} - -func getTopic(e interface{}) string { - switch e.(type) { - case *eventstypes.TaskCreate: - return runtime.TaskCreateEventTopic - case *eventstypes.TaskStart: - return runtime.TaskStartEventTopic - case *eventstypes.TaskOOM: - return runtime.TaskOOMEventTopic - case *eventstypes.TaskExit: - return runtime.TaskExitEventTopic - case *eventstypes.TaskDelete: - return runtime.TaskDeleteEventTopic - case *eventstypes.TaskExecAdded: - return runtime.TaskExecAddedEventTopic - case *eventstypes.TaskExecStarted: - return runtime.TaskExecStartedEventTopic - default: - logrus.Warnf("no topic for type %#v", e) - } - return runtime.TaskUnknownTopic -} - -func newInit(ctx context.Context, path, workDir, namespace string, platform rproc.Platform, r *proc.CreateConfig, options *options.Options, rootfs string) (*proc.Init, error) { - spec, err := utils.ReadSpec(r.Bundle) - if err != nil { - return nil, errors.Wrap(err, "read oci spec") - } - if err := utils.UpdateVolumeAnnotations(r.Bundle, spec); err != nil { - return nil, errors.Wrap(err, "update volume annotations") - } - runsc.FormatLogPath(r.ID, options.RunscConfig) - runtime := proc.NewRunsc(options.Root, path, namespace, options.BinaryName, options.RunscConfig) - p := proc.New(r.ID, runtime, rproc.Stdio{ - Stdin: r.Stdin, - Stdout: r.Stdout, - Stderr: r.Stderr, - Terminal: r.Terminal, - }) - p.Bundle = r.Bundle - p.Platform = platform - p.Rootfs = rootfs - p.WorkDir = workDir - p.IoUID = int(options.IoUid) - p.IoGID = int(options.IoGid) - p.Sandbox = utils.IsSandbox(spec) - p.UserLog = utils.UserLogPath(spec) - p.Monitor = shim.Default - return p, nil -} diff --git a/pkg/v2/service_linux.go b/pkg/v2/service_linux.go deleted file mode 100644 index cbd431537..000000000 --- a/pkg/v2/service_linux.go +++ /dev/null @@ -1,111 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package v2 - -import ( - "context" - "io" - "sync" - "syscall" - - "github.com/containerd/console" - "github.com/containerd/fifo" - "github.com/pkg/errors" -) - -type linuxPlatform struct { - epoller *console.Epoller -} - -func (p *linuxPlatform) CopyConsole(ctx context.Context, console console.Console, stdin, stdout, stderr string, wg, cwg *sync.WaitGroup) (console.Console, error) { - if p.epoller == nil { - return nil, errors.New("uninitialized epoller") - } - - epollConsole, err := p.epoller.Add(console) - if err != nil { - return nil, err - } - - if stdin != "" { - in, err := fifo.OpenFifo(context.Background(), stdin, syscall.O_RDONLY|syscall.O_NONBLOCK, 0) - if err != nil { - return nil, err - } - cwg.Add(1) - go func() { - cwg.Done() - p := bufPool.Get().(*[]byte) - defer bufPool.Put(p) - io.CopyBuffer(epollConsole, in, *p) - }() - } - - outw, err := fifo.OpenFifo(ctx, stdout, syscall.O_WRONLY, 0) - if err != nil { - return nil, err - } - outr, err := fifo.OpenFifo(ctx, stdout, syscall.O_RDONLY, 0) - if err != nil { - return nil, err - } - wg.Add(1) - cwg.Add(1) - go func() { - cwg.Done() - p := bufPool.Get().(*[]byte) - defer bufPool.Put(p) - io.CopyBuffer(outw, epollConsole, *p) - epollConsole.Close() - outr.Close() - outw.Close() - wg.Done() - }() - return epollConsole, nil -} - -func (p *linuxPlatform) ShutdownConsole(ctx context.Context, cons console.Console) error { - if p.epoller == nil { - return errors.New("uninitialized epoller") - } - epollConsole, ok := cons.(*console.EpollConsole) - if !ok { - return errors.Errorf("expected EpollConsole, got %#v", cons) - } - return epollConsole.Shutdown(p.epoller.CloseConsole) -} - -func (p *linuxPlatform) Close() error { - return p.epoller.Close() -} - -// initialize a single epoll fd to manage our consoles. `initPlatform` should -// only be called once. -func (s *service) initPlatform() error { - if s.platform != nil { - return nil - } - epoller, err := console.NewEpoller() - if err != nil { - return errors.Wrap(err, "failed to initialize epoller") - } - s.platform = &linuxPlatform{ - epoller: epoller, - } - go epoller.Wait() - return nil -} diff --git a/shim/README.md b/shim/README.md new file mode 100644 index 000000000..e446ec970 --- /dev/null +++ b/shim/README.md @@ -0,0 +1,16 @@ +# gvisor-containerd-shim + +gvisor-containerd-shim is a containerd shim. It implements the containerd v1 +shim API. It can be used as a drop-in replacement for +[containerd-shim][containerd-shim] +(though containerd-shim must still be installed). It allows the use of both +gVisor (runsc) and normal containers in the same containerd installation by +deferring to the runc shim if the desired runtime engine is not runsc. + +- [Untrusted Workload Quick Start (containerd >=1.1)](docs/untrusted-workload-quickstart.md) +- [Runtime Handler/RuntimeClass Quick Start (containerd >=1.2)](docs/runtime-handler-quickstart.md) +- [Runtime Handler/RuntimeClass Quick Start (shim v2) (containerd >=1.2)](docs/runtime-handler-shim-v2-quickstart.md) +- [Configure containerd-shim-runsc-v1 (shim v2) (containerd >= 1.3)](docs/configure-containerd-shim-runsc-v1.md) +- [Configure gvisor-containerd-shim (shim v1) (containerd <= 1.2)](docs/configure-gvisor-containerd-shim.md) + +[containerd-shim]: https://github.com/containerd/containerd/tree/master/cmd/containerd-shim diff --git a/shim/configure-containerd-shim-runsc-v1.md b/shim/configure-containerd-shim-runsc-v1.md new file mode 100644 index 000000000..977ceacbd --- /dev/null +++ b/shim/configure-containerd-shim-runsc-v1.md @@ -0,0 +1,72 @@ +# Configure containerd-shim-runsc-v1 (Shim V2) + +This document describes how to configure runtime options for +`containerd-shim-runsc-v1`. This is follows on to the instructions of +[Runtime Handler Quick Start (shim v2) (containerd >=1.2)](runtime-handler-shim-v2-quickstart.md) +and requires containerd 1.3 or later. + +### Update `/etc/containerd/config.toml` to point to a configuration file for `containerd-shim-runsc-v1`. + +`containerd-shim-runsc-v1` supports a few different configuration options based +on the version of containerd that is used. For versions >= 1.3, it supports a +configurable config path in the containerd runtime configuration. + +```shell +{ # Step 1: Update runtime options for runsc in containerd config.toml +cat < +[embedmd]:# (../test/e2e/shim-install.sh shell /{ # Step 1\(dev\)/ /^}/) +```shell +{ # Step 1(dev): Build and install gvisor-containerd-shim and containerd-shim-runsc-v1 + make + sudo make install +} +``` + +### Configure containerd + +1. Update `/etc/containerd/config.toml`. Make sure `containerd-shim-runsc-v1` is + in `${PATH}`. + +[embedmd]:# (../test/e2e/runtime-handler-shim-v2/install.sh shell /{ # Step 1/ /^}/) +```shell +{ # Step 1: Create containerd config.toml +cat < 106 { + return errors.Errorf("%q: unix socket path too long (> 106)", path) + } + l, err = net.Listen("unix", "\x00"+path) + } + if err != nil { + return err + } + logrus.WithField("socket", path).Debug("serving api on unix socket") + go func() { + defer l.Close() + if err := server.Serve(context.Background(), l); err != nil && + !strings.Contains(err.Error(), "use of closed network connection") { + logrus.WithError(err).Fatal("gvisor-containerd-shim: ttrpc server failure") + } + }() + return nil +} + +// setupSignals creates a new signal handler for all signals and sets the shim as a +// sub-reaper so that the container processes are reparented +func setupSignals() (chan os.Signal, error) { + signals := make(chan os.Signal, 32) + signal.Notify(signals, unix.SIGTERM, unix.SIGINT, unix.SIGCHLD, unix.SIGPIPE) + // make sure runc is setup to use the monitor + // for waiting on processes + // TODO(random-liu): Move shim/reaper.go to a separate package. + runsc.Monitor = containerdshim.Default + // set the shim as the subreaper for all orphaned processes created by the container + if err := system.SetSubreaper(1); err != nil { + return nil, err + } + return signals, nil +} + +func handleSignals(logger *logrus.Entry, signals chan os.Signal, server *ttrpc.Server, sv *shim.Service) error { + var ( + termOnce sync.Once + done = make(chan struct{}) + ) + + for { + select { + case <-done: + return nil + case s := <-signals: + switch s { + case unix.SIGCHLD: + if err := containerdshim.Reap(); err != nil { + logger.WithError(err).Error("reap exit status") + } + case unix.SIGTERM, unix.SIGINT: + go termOnce.Do(func() { + ctx := context.TODO() + if err := server.Shutdown(ctx); err != nil { + logger.WithError(err).Error("failed to shutdown server") + } + // Ensure our child is dead if any + sv.Kill(ctx, &shimapi.KillRequest{ + Signal: uint32(syscall.SIGKILL), + All: true, + }) + sv.Delete(context.Background(), &ptypes.Empty{}) + close(done) + }) + case unix.SIGPIPE: + } + } + } +} + +func dumpStacks(logger *logrus.Entry) { + var ( + buf []byte + stackSize int + ) + bufferLen := 16384 + for stackSize == len(buf) { + buf = make([]byte, bufferLen) + stackSize = runtime.Stack(buf, true) + bufferLen *= 2 + } + buf = buf[:stackSize] + logger.Infof("=== BEGIN goroutine stack dump ===\n%s\n=== END goroutine stack dump ===", buf) +} + +type remoteEventsPublisher struct { + address string +} + +func (l *remoteEventsPublisher) Publish(ctx context.Context, topic string, event events.Event) error { + ns, _ := namespaces.Namespace(ctx) + encoded, err := typeurl.MarshalAny(event) + if err != nil { + return err + } + data, err := encoded.Marshal() + if err != nil { + return err + } + cmd := exec.CommandContext(ctx, containerdBinaryFlag, "--address", l.address, "publish", "--topic", topic, "--namespace", ns) + cmd.Stdin = bytes.NewReader(data) + c, err := containerdshim.Default.Start(cmd) + if err != nil { + return err + } + status, err := containerdshim.Default.Wait(cmd, c) + if err != nil { + return err + } + if status != 0 { + return errors.New("failed to publish event") + } + return nil +} diff --git a/test/e2e/containerd-install.sh b/test/e2e/containerd-install.sh deleted file mode 100755 index 400819245..000000000 --- a/test/e2e/containerd-install.sh +++ /dev/null @@ -1,44 +0,0 @@ -#!/bin/bash - -# A script to install containerd and CNI plugins for e2e testing - -wget -q --https-only \ - https://github.com/containerd/containerd/releases/download/v${CONTAINERD_VERSION}/containerd-${CONTAINERD_VERSION}.linux-amd64.tar.gz \ - https://github.com/containernetworking/plugins/releases/download/v0.7.0/cni-plugins-amd64-v0.7.0.tgz - -sudo mkdir -p /etc/containerd /etc/cni/net.d /opt/cni/bin -sudo tar -xvf cni-plugins-amd64-v0.7.0.tgz -C /opt/cni/bin/ -sudo tar -xvf containerd-${CONTAINERD_VERSION}.linux-amd64.tar.gz -C / - -cat </tmp/containerd-cri.log & diff --git a/test/e2e/crictl-install.sh b/test/e2e/crictl-install.sh deleted file mode 100755 index 1d63c889b..000000000 --- a/test/e2e/crictl-install.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash - -# A sample script for installing crictl. - -set -ex - -{ # Step 1: Download crictl -wget https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.13.0/crictl-v1.13.0-linux-amd64.tar.gz -tar xf crictl-v1.13.0-linux-amd64.tar.gz -sudo mv crictl /usr/local/bin -} - -{ # Step 2: Configure crictl -cat < /tmp/containerd-cri.log & -} diff --git a/test/e2e/runtime-handler-shim-v2/test.sh b/test/e2e/runtime-handler-shim-v2/test.sh deleted file mode 100755 index e33655ec1..000000000 --- a/test/e2e/runtime-handler-shim-v2/test.sh +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/bash - -# Runs end-to-end tests for gvisor-containerd-shim to test the use of runtime -# handler. This should work on containerd 1.2+ - -# This is meant to be run in a VM as it makes a fairly invasive install of -# containerd. - -set -ex - -# Install containerd -. ./test/e2e/containerd-install.sh - -# Install gVisor -. ./test/e2e/runsc-install.sh - -# Install gvisor-containerd-shim -. ./test/e2e/shim-install.sh - -# Test installation/configuration -. ./test/e2e/runtime-handler-shim-v2/install.sh - -# Install crictl -. ./test/e2e/crictl-install.sh - -# Test usage (the same with runtime-handler) -. ./test/e2e/runtime-handler/usage.sh - -# Run a container in the sandbox -. ./test/e2e/run-container.sh - -# Validate the pod and container -. ./test/e2e/validate.sh -. ./test/e2e/runtime-handler-shim-v2/validate.sh diff --git a/test/e2e/runtime-handler-shim-v2/validate.sh b/test/e2e/runtime-handler-shim-v2/validate.sh deleted file mode 100755 index b74a059ef..000000000 --- a/test/e2e/runtime-handler-shim-v2/validate.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -# A sample script to validating the running containerd-shim-runsc-v1. - -set -ex - -ps aux | grep [c]ontainerd-shim-runsc-v1 diff --git a/test/e2e/runtime-handler/install.sh b/test/e2e/runtime-handler/install.sh deleted file mode 100755 index ebe9d3580..000000000 --- a/test/e2e/runtime-handler/install.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/bash - -# A sample script for installing and configuring the gvisor-containerd-shim to -# use the containerd runtime handler. - -set -ex - -{ # Step 1: Create containerd config.toml -cat < /tmp/containerd-cri.log & -} diff --git a/test/e2e/runtime-handler/test.sh b/test/e2e/runtime-handler/test.sh deleted file mode 100755 index 99f3565b6..000000000 --- a/test/e2e/runtime-handler/test.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash - -# Runs end-to-end tests for gvisor-containerd-shim to test the use of runtime -# handler. This should work on containerd 1.2+ - -# This is meant to be run in a VM as it makes a fairly invasive install of -# containerd. - -set -ex - -# Install containerd -. ./test/e2e/containerd-install.sh - -# Install gVisor -. ./test/e2e/runsc-install.sh - -# Install gvisor-containerd-shim -. ./test/e2e/shim-install.sh - -# Test installation/configuration -. ./test/e2e/runtime-handler/install.sh - -# Install crictl -. ./test/e2e/crictl-install.sh - -# Test usage -. ./test/e2e/runtime-handler/usage.sh - -# Run a container in the sandbox -. ./test/e2e/run-container.sh - -# Validate the pod and container -. ./test/e2e/validate.sh diff --git a/test/e2e/runtime-handler/usage.sh b/test/e2e/runtime-handler/usage.sh deleted file mode 100755 index 350c720c2..000000000 --- a/test/e2e/runtime-handler/usage.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/bash - -# A sample script for testing the gvisor-containerd-shim -# using runtime handler. - -set -ex - -{ # Step 1: Pull the nginx image -sudo crictl pull nginx -} - -{ # Step 2: Create sandbox.json -cat </tmp/containerd-cri.log & -} diff --git a/test/e2e/untrusted-workload/test.sh b/test/e2e/untrusted-workload/test.sh deleted file mode 100755 index 6e312cf6d..000000000 --- a/test/e2e/untrusted-workload/test.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash - -# Runs end-to-end tests for gvisor-containerd-shim to test using the -# untrusted workload extension. This should work on containerd 1.1+ - -# This is meant to be run in a VM as it makes a fairly invasive install of -# containerd. - -set -ex - -# Install containerd -. ./test/e2e/containerd-install.sh - -# Install gVisor -. ./test/e2e/runsc-install.sh - -# Install gvisor-containerd-shim -. ./test/e2e/shim-install.sh - -# Test installation/configuration -. ./test/e2e/untrusted-workload/install.sh - -# Install crictl -. ./test/e2e/crictl-install.sh - -# Test usage -. ./test/e2e/untrusted-workload/usage.sh - -# Run a container in the sandbox -. ./test/e2e/run-container.sh - -# Validate the pod and container -. ./test/e2e/validate.sh diff --git a/test/e2e/untrusted-workload/usage.sh b/test/e2e/untrusted-workload/usage.sh deleted file mode 100755 index db8206964..000000000 --- a/test/e2e/untrusted-workload/usage.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash - -# A sample script for testing the gvisor-containerd-shim # using untrusted -# workload extension. - -set -ex - -{ # Step 1: Pull the nginx image -sudo crictl pull nginx -} - -{ # Step 2: Create sandbox.json -cat </tmp/containerd-cri.log & diff --git a/test/shim/crictl-install.sh b/test/shim/crictl-install.sh new file mode 100755 index 000000000..1d63c889b --- /dev/null +++ b/test/shim/crictl-install.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +# A sample script for installing crictl. + +set -ex + +{ # Step 1: Download crictl +wget https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.13.0/crictl-v1.13.0-linux-amd64.tar.gz +tar xf crictl-v1.13.0-linux-amd64.tar.gz +sudo mv crictl /usr/local/bin +} + +{ # Step 2: Configure crictl +cat < /tmp/containerd-cri.log & +} diff --git a/test/shim/runtime-handler-shim-v2/test.sh b/test/shim/runtime-handler-shim-v2/test.sh new file mode 100755 index 000000000..e33655ec1 --- /dev/null +++ b/test/shim/runtime-handler-shim-v2/test.sh @@ -0,0 +1,34 @@ +#!/bin/bash + +# Runs end-to-end tests for gvisor-containerd-shim to test the use of runtime +# handler. This should work on containerd 1.2+ + +# This is meant to be run in a VM as it makes a fairly invasive install of +# containerd. + +set -ex + +# Install containerd +. ./test/e2e/containerd-install.sh + +# Install gVisor +. ./test/e2e/runsc-install.sh + +# Install gvisor-containerd-shim +. ./test/e2e/shim-install.sh + +# Test installation/configuration +. ./test/e2e/runtime-handler-shim-v2/install.sh + +# Install crictl +. ./test/e2e/crictl-install.sh + +# Test usage (the same with runtime-handler) +. ./test/e2e/runtime-handler/usage.sh + +# Run a container in the sandbox +. ./test/e2e/run-container.sh + +# Validate the pod and container +. ./test/e2e/validate.sh +. ./test/e2e/runtime-handler-shim-v2/validate.sh diff --git a/test/shim/runtime-handler-shim-v2/validate.sh b/test/shim/runtime-handler-shim-v2/validate.sh new file mode 100755 index 000000000..b74a059ef --- /dev/null +++ b/test/shim/runtime-handler-shim-v2/validate.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +# A sample script to validating the running containerd-shim-runsc-v1. + +set -ex + +ps aux | grep [c]ontainerd-shim-runsc-v1 diff --git a/test/shim/runtime-handler/install.sh b/test/shim/runtime-handler/install.sh new file mode 100755 index 000000000..ebe9d3580 --- /dev/null +++ b/test/shim/runtime-handler/install.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +# A sample script for installing and configuring the gvisor-containerd-shim to +# use the containerd runtime handler. + +set -ex + +{ # Step 1: Create containerd config.toml +cat < /tmp/containerd-cri.log & +} diff --git a/test/shim/runtime-handler/test.sh b/test/shim/runtime-handler/test.sh new file mode 100755 index 000000000..99f3565b6 --- /dev/null +++ b/test/shim/runtime-handler/test.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +# Runs end-to-end tests for gvisor-containerd-shim to test the use of runtime +# handler. This should work on containerd 1.2+ + +# This is meant to be run in a VM as it makes a fairly invasive install of +# containerd. + +set -ex + +# Install containerd +. ./test/e2e/containerd-install.sh + +# Install gVisor +. ./test/e2e/runsc-install.sh + +# Install gvisor-containerd-shim +. ./test/e2e/shim-install.sh + +# Test installation/configuration +. ./test/e2e/runtime-handler/install.sh + +# Install crictl +. ./test/e2e/crictl-install.sh + +# Test usage +. ./test/e2e/runtime-handler/usage.sh + +# Run a container in the sandbox +. ./test/e2e/run-container.sh + +# Validate the pod and container +. ./test/e2e/validate.sh diff --git a/test/shim/runtime-handler/usage.sh b/test/shim/runtime-handler/usage.sh new file mode 100755 index 000000000..350c720c2 --- /dev/null +++ b/test/shim/runtime-handler/usage.sh @@ -0,0 +1,30 @@ +#!/bin/bash + +# A sample script for testing the gvisor-containerd-shim +# using runtime handler. + +set -ex + +{ # Step 1: Pull the nginx image +sudo crictl pull nginx +} + +{ # Step 2: Create sandbox.json +cat </tmp/containerd-cri.log & +} diff --git a/test/shim/untrusted-workload/test.sh b/test/shim/untrusted-workload/test.sh new file mode 100755 index 000000000..6e312cf6d --- /dev/null +++ b/test/shim/untrusted-workload/test.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +# Runs end-to-end tests for gvisor-containerd-shim to test using the +# untrusted workload extension. This should work on containerd 1.1+ + +# This is meant to be run in a VM as it makes a fairly invasive install of +# containerd. + +set -ex + +# Install containerd +. ./test/e2e/containerd-install.sh + +# Install gVisor +. ./test/e2e/runsc-install.sh + +# Install gvisor-containerd-shim +. ./test/e2e/shim-install.sh + +# Test installation/configuration +. ./test/e2e/untrusted-workload/install.sh + +# Install crictl +. ./test/e2e/crictl-install.sh + +# Test usage +. ./test/e2e/untrusted-workload/usage.sh + +# Run a container in the sandbox +. ./test/e2e/run-container.sh + +# Validate the pod and container +. ./test/e2e/validate.sh diff --git a/test/shim/untrusted-workload/usage.sh b/test/shim/untrusted-workload/usage.sh new file mode 100755 index 000000000..db8206964 --- /dev/null +++ b/test/shim/untrusted-workload/usage.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +# A sample script for testing the gvisor-containerd-shim # using untrusted +# workload extension. + +set -ex + +{ # Step 1: Pull the nginx image +sudo crictl pull nginx +} + +{ # Step 2: Create sandbox.json +cat < Date: Tue, 5 May 2020 18:11:56 -0700 Subject: Update canonical paths. --- pkg/shim/v1/proc/exec.go | 2 +- pkg/shim/v1/proc/init.go | 2 +- pkg/shim/v1/proc/utils.go | 2 +- pkg/shim/v1/shim/service.go | 6 +++--- pkg/shim/v2/service.go | 8 ++++---- shim/v1/main.go | 2 +- shim/v2/main.go | 4 ++-- 7 files changed, 13 insertions(+), 13 deletions(-) (limited to 'pkg/shim') diff --git a/pkg/shim/v1/proc/exec.go b/pkg/shim/v1/proc/exec.go index 17199960e..6821e09cd 100644 --- a/pkg/shim/v1/proc/exec.go +++ b/pkg/shim/v1/proc/exec.go @@ -34,7 +34,7 @@ import ( "github.com/pkg/errors" "golang.org/x/sys/unix" - runsc "github.com/google/gvisor-containerd-shim/pkg/go-runsc" + "gvisor.dev/gvisor/pkg/shim/runsc" ) type execProcess struct { diff --git a/pkg/shim/v1/proc/init.go b/pkg/shim/v1/proc/init.go index fadfc9146..0771540a9 100644 --- a/pkg/shim/v1/proc/init.go +++ b/pkg/shim/v1/proc/init.go @@ -35,7 +35,7 @@ import ( specs "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" - runsc "github.com/google/gvisor-containerd-shim/pkg/go-runsc" + "gvisor.dev/gvisor/pkg/shim/runsc" ) // InitPidFile name of the file that contains the init pid diff --git a/pkg/shim/v1/proc/utils.go b/pkg/shim/v1/proc/utils.go index 84bd702ab..6e7dbdad7 100644 --- a/pkg/shim/v1/proc/utils.go +++ b/pkg/shim/v1/proc/utils.go @@ -22,7 +22,7 @@ import ( "strings" "time" - runsc "github.com/google/gvisor-containerd-shim/pkg/go-runsc" + "gvisor.dev/gvisor/pkg/shim/runsc" ) const ( diff --git a/pkg/shim/v1/shim/service.go b/pkg/shim/v1/shim/service.go index 27e915c14..aac172801 100644 --- a/pkg/shim/v1/shim/service.go +++ b/pkg/shim/v1/shim/service.go @@ -42,9 +42,9 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - runsc "github.com/google/gvisor-containerd-shim/pkg/go-runsc" - "github.com/google/gvisor-containerd-shim/pkg/v1/proc" - "github.com/google/gvisor-containerd-shim/pkg/v1/utils" + "gvisor.dev/gvisor/pkg/shim/runsc" + "gvisor.dev/gvisor/pkg/shim/v1/proc" + "gvisor.dev/gvisor/pkg/shim/v1/utils" ) var ( diff --git a/pkg/shim/v2/service.go b/pkg/shim/v2/service.go index 3d226e29c..0cff82a89 100644 --- a/pkg/shim/v2/service.go +++ b/pkg/shim/v2/service.go @@ -46,10 +46,10 @@ import ( "github.com/sirupsen/logrus" "golang.org/x/sys/unix" - runsc "github.com/google/gvisor-containerd-shim/pkg/go-runsc" - "github.com/google/gvisor-containerd-shim/pkg/v1/proc" - "github.com/google/gvisor-containerd-shim/pkg/v1/utils" - "github.com/google/gvisor-containerd-shim/pkg/v2/options" + "gvisor.dev/gvisor/pkg/shim/runsc" + "gvisor.dev/gvisor/pkg/shim/v1/proc" + "gvisor.dev/gvisor/pkg/shim/v1/utils" + "gvisor.dev/gvisor/pkg/shim/v2/options" ) var ( diff --git a/shim/v1/main.go b/shim/v1/main.go index bdcf4e6d4..41c77394a 100644 --- a/shim/v1/main.go +++ b/shim/v1/main.go @@ -18,7 +18,7 @@ package main import ( "github.com/containerd/containerd/runtime/v2/shim" - runsc "github.com/google/gvisor-containerd-shim/pkg/v2" + runsc "gvisor.dev/gvisor/pkg/shim/v2" ) func main() { diff --git a/shim/v2/main.go b/shim/v2/main.go index 500bdddee..118b6f6e5 100644 --- a/shim/v2/main.go +++ b/shim/v2/main.go @@ -45,8 +45,8 @@ import ( "github.com/sirupsen/logrus" "golang.org/x/sys/unix" - runsc "github.com/google/gvisor-containerd-shim/pkg/go-runsc" - "github.com/google/gvisor-containerd-shim/pkg/v1/shim" + "gvisor.dev/gvisor/pkg/shim/runsc" + "gvisor.dev/gvisor/pkg/shim/v1/shim" ) var ( -- cgit v1.2.3 From 2afff44403e046078301de39f0252bb57fc018c7 Mon Sep 17 00:00:00 2001 From: Adin Scannell Date: Tue, 5 May 2020 22:00:14 -0700 Subject: Update shim to build using bazel. The go.mod dependency tree for the shim was somehow contradictory. After resolving these issues (e.g. explicitly imported k8s 1.14, pulling a specific dbus version), and adding all dependencies, the shim can now be build as part of the regular bazel tree. As part of this process, minor cleanup was done in all the source files: headers were standardized (and include "The gVisor Authors" in addition to the "The containerd Authors" if originally derived from containerd sources), and comments were cleaned up to meet coding standards. This change makes the containerd installation dynamic, so that multiple versions can be tested, and drops the static installer for the VM image itself. This change also updates test/root/crictl_test.go and related utilities, so that the containerd tests can be run on any version (and in cases where it applies, they can be run on both v1 and v2 as parameterized tests). --- BUILD | 3 + Makefile | 23 +- WORKSPACE | 597 +++++++++++++++++++++++--- g3doc/user_guide/BUILD | 9 + g3doc/user_guide/runtimeclass.md | 44 ++ go.mod | 22 +- go.sum | 123 +++++- pkg/shim/runsc/BUILD | 16 + pkg/shim/runsc/runsc.go | 49 ++- pkg/shim/v1/proc/BUILD | 35 ++ pkg/shim/v1/proc/deleted_state.go | 12 +- pkg/shim/v1/proc/exec.go | 25 +- pkg/shim/v1/proc/exec_state.go | 16 +- pkg/shim/v1/proc/init.go | 80 ++-- pkg/shim/v1/proc/init_state.go | 18 +- pkg/shim/v1/proc/io.go | 5 +- pkg/shim/v1/proc/process.go | 6 +- pkg/shim/v1/proc/types.go | 10 +- pkg/shim/v1/proc/utils.go | 2 - pkg/shim/v1/shim/BUILD | 38 ++ pkg/shim/v1/shim/platform.go | 10 +- pkg/shim/v1/shim/service.go | 68 ++- pkg/shim/v1/utils/BUILD | 26 ++ pkg/shim/v1/utils/volumes.go | 41 +- pkg/shim/v2/BUILD | 41 ++ pkg/shim/v2/epoll.go | 6 +- pkg/shim/v2/options/BUILD | 11 + pkg/shim/v2/service.go | 115 ++--- pkg/shim/v2/service_linux.go | 10 +- pkg/test/criutil/criutil.go | 66 ++- runsc/BUILD | 12 +- runsc/debian/postinst.sh | 9 +- shim/BUILD | 15 + shim/README.md | 21 +- shim/configure-containerd-shim-runsc-v1.md | 72 ---- shim/configure-gvisor-containerd-shim.md | 42 -- shim/runsc.toml | 6 + shim/runtime-handler-quickstart.md | 251 ----------- shim/runtime-handler-shim-v2-quickstart.md | 232 ---------- shim/untrusted-workload-quickstart.md | 212 --------- shim/v1/BUILD | 43 ++ shim/v1/README.md | 50 +++ shim/v1/config.go | 40 ++ shim/v1/main.go | 247 ++++++++++- shim/v2/BUILD | 32 ++ shim/v2/README.md | 90 ++++ shim/v2/config.go | 40 -- shim/v2/main.go | 298 +------------ shim/v2/runtime-handler-shim-v2-quickstart.md | 232 ++++++++++ test/root/BUILD | 5 +- test/root/crictl_test.go | 452 ++++++++++++------- test/shim/containerd-install.sh | 44 -- test/shim/crictl-install.sh | 17 - test/shim/run-container.sh | 30 -- test/shim/runsc-install.sh | 8 - test/shim/runtime-handler-shim-v2/install.sh | 21 - test/shim/runtime-handler-shim-v2/test.sh | 34 -- test/shim/runtime-handler-shim-v2/validate.sh | 7 - test/shim/runtime-handler/install.sh | 24 -- test/shim/runtime-handler/test.sh | 33 -- test/shim/runtime-handler/usage.sh | 30 -- test/shim/runtimeclass-install.sh | 33 -- test/shim/shim-install.sh | 28 -- test/shim/untrusted-workload/install.sh | 27 -- test/shim/untrusted-workload/test.sh | 33 -- test/shim/untrusted-workload/usage.sh | 33 -- test/shim/validate.sh | 17 - tools/bazel.mk | 2 +- tools/installers/BUILD | 10 +- tools/installers/containerd.sh | 114 +++++ tools/installers/head.sh | 8 +- tools/vm/ubuntu1604/25_docker.sh | 65 --- tools/vm/ubuntu1604/30_containerd.sh | 86 ---- tools/vm/ubuntu1604/30_docker.sh | 65 +++ website/BUILD | 3 + 75 files changed, 2503 insertions(+), 2197 deletions(-) create mode 100644 g3doc/user_guide/runtimeclass.md create mode 100644 pkg/shim/runsc/BUILD create mode 100644 pkg/shim/v1/proc/BUILD create mode 100644 pkg/shim/v1/shim/BUILD create mode 100644 pkg/shim/v1/utils/BUILD create mode 100644 pkg/shim/v2/BUILD create mode 100644 pkg/shim/v2/options/BUILD create mode 100644 shim/BUILD delete mode 100644 shim/configure-containerd-shim-runsc-v1.md delete mode 100644 shim/configure-gvisor-containerd-shim.md create mode 100644 shim/runsc.toml delete mode 100644 shim/runtime-handler-quickstart.md delete mode 100644 shim/runtime-handler-shim-v2-quickstart.md delete mode 100644 shim/untrusted-workload-quickstart.md create mode 100644 shim/v1/BUILD create mode 100644 shim/v1/README.md create mode 100644 shim/v1/config.go create mode 100644 shim/v2/BUILD create mode 100644 shim/v2/README.md delete mode 100644 shim/v2/config.go create mode 100644 shim/v2/runtime-handler-shim-v2-quickstart.md delete mode 100755 test/shim/containerd-install.sh delete mode 100755 test/shim/crictl-install.sh delete mode 100755 test/shim/run-container.sh delete mode 100755 test/shim/runsc-install.sh delete mode 100755 test/shim/runtime-handler-shim-v2/install.sh delete mode 100755 test/shim/runtime-handler-shim-v2/test.sh delete mode 100755 test/shim/runtime-handler-shim-v2/validate.sh delete mode 100755 test/shim/runtime-handler/install.sh delete mode 100755 test/shim/runtime-handler/test.sh delete mode 100755 test/shim/runtime-handler/usage.sh delete mode 100755 test/shim/runtimeclass-install.sh delete mode 100755 test/shim/shim-install.sh delete mode 100755 test/shim/untrusted-workload/install.sh delete mode 100755 test/shim/untrusted-workload/test.sh delete mode 100755 test/shim/untrusted-workload/usage.sh delete mode 100755 test/shim/validate.sh create mode 100755 tools/installers/containerd.sh delete mode 100755 tools/vm/ubuntu1604/25_docker.sh delete mode 100755 tools/vm/ubuntu1604/30_containerd.sh create mode 100755 tools/vm/ubuntu1604/30_docker.sh (limited to 'pkg/shim') diff --git a/BUILD b/BUILD index 962d54821..5d0dbde4c 100644 --- a/BUILD +++ b/BUILD @@ -69,7 +69,10 @@ go_path( name = "gopath", mode = "link", deps = [ + # Main binary. "//runsc", + "//shim/v1:gvisor-containerd-shim", + "//shim/v2:containerd-shim-runsc-v1", # Packages that are not dependencies of //runsc. "//pkg/sentry/kernel/memevent", diff --git a/Makefile b/Makefile index 85818ebea..58648f5d2 100644 --- a/Makefile +++ b/Makefile @@ -121,6 +121,22 @@ tests: ## Runs all local ptrace system call tests. @$(MAKE) test OPTIONS="--test_tag_filters runsc_ptrace test/syscalls/..." .PHONY: tests +containerd-test-%: ## Runs a local containerd test. +containerd-test-%: load-basic_alpine load-basic_python load-basic_busybox load-basic_resolv load-basic_httpd +containerd-test-%: install-test-runtime + @CONTAINERD_VERSION=$* $(MAKE) sudo TARGETS="tools/installers:containerd" + @$(MAKE) sudo TARGETS="test/root:root_test" ARGS="-test.v" + +# Note that we can't run containerd-test-1.1.8 tests here. +# +# Containerd 1.1.8 should work, but because of a bug in loading images locally +# (https://github.com/kubernetes-sigs/cri-tools/issues/421), we are unable to +# actually drive the tests. The v1 API is tested exclusively through 1.2.13. +containerd-tests: ## Runs all supported containerd version tests. +containerd-tests: containerd-test-1.2.13 +containerd-tests: containerd-test-1.3.4 +containerd-tests: containerd-test-1.4.0-beta.0 + ## ## Website & documentation helpers. ## @@ -233,11 +249,14 @@ dev: ## Installs a set of local runtimes. Requires sudo. refresh: ## Refreshes the runtime binary (for development only). Must have called 'dev' or 'test-install' first. @mkdir -p "$(RUNTIME_DIR)" - @$(MAKE) copy TARGETS=runsc DESTINATION="$(RUNTIME_BIN)" && chmod 0755 "$(RUNTIME_BIN)" + @$(MAKE) copy TARGETS=runsc DESTINATION="$(RUNTIME_BIN)" + @$(MAKE) copy TARGETS=shim/v1:gvisor-containerd-shim DESTINATION="$(RUNTIME_DIR)" + @$(MAKE) copy TARGETS=shim/v2:containerd-shim-runsc-v1 DESTINATION="$(RUNTIME_DIR)" .PHONY: install -test-install: ## Installs the runtime for testing. Requires sudo. +install-test-runtime: ## Installs the runtime for testing. Requires sudo. @$(MAKE) refresh ARGS="--net-raw --TESTONLY-test-name-env=RUNSC_TEST_NAME --debug --strace --log-packets $(ARGS)" + @$(MAKE) configure RUNTIME=runsc @$(MAKE) configure @sudo systemctl restart docker .PHONY: install-test diff --git a/WORKSPACE b/WORKSPACE index 417ec6100..b3e97b0cc 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -4,11 +4,11 @@ load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository") # Bazel/starlark utilities. http_archive( name = "bazel_skylib", + sha256 = "97e70364e9249702246c0e9444bccdc4b847bed1eb03c5a3ece4f83dfe6abc44", urls = [ "https://mirror.bazel.build/github.com/bazelbuild/bazel-skylib/releases/download/1.0.2/bazel-skylib-1.0.2.tar.gz", "https://github.com/bazelbuild/bazel-skylib/releases/download/1.0.2/bazel-skylib-1.0.2.tar.gz", ], - sha256 = "97e70364e9249702246c0e9444bccdc4b847bed1eb03c5a3ece4f83dfe6abc44", ) load("@bazel_skylib//:workspace.bzl", "bazel_skylib_workspace") @@ -159,7 +159,38 @@ load("@com_github_grpc_grpc//bazel:grpc_extra_deps.bzl", "grpc_extra_deps") grpc_extra_deps() -# External repositories, in sorted order. +# System Call test dependencies. +http_archive( + name = "com_google_absl", + sha256 = "56775f1283a59e6274c28d99981a9717ff4e0b1161e9129fdb2fcf22531d8d93", + strip_prefix = "abseil-cpp-a0d1e098c2f99694fa399b175a7ccf920762030e", + urls = [ + "https://mirror.bazel.build/github.com/abseil/abseil-cpp/archive/a0d1e098c2f99694fa399b175a7ccf920762030e.tar.gz", + "https://github.com/abseil/abseil-cpp/archive/a0d1e098c2f99694fa399b175a7ccf920762030e.tar.gz", + ], +) + +http_archive( + name = "com_google_googletest", + sha256 = "0a10bea96d8670e5eef948d79d824162b1577bb7889539e49ec786bfc3e48912", + strip_prefix = "googletest-565f1b848215b77c3732bca345fe76a0431d8b34", + urls = [ + "https://mirror.bazel.build/github.com/google/googletest/archive/565f1b848215b77c3732bca345fe76a0431d8b34.tar.gz", + "https://github.com/google/googletest/archive/565f1b848215b77c3732bca345fe76a0431d8b34.tar.gz", + ], +) + +http_archive( + name = "com_google_benchmark", + sha256 = "3c6a165b6ecc948967a1ead710d4a181d7b0fbcaa183ef7ea84604994966221a", + strip_prefix = "benchmark-1.5.0", + urls = [ + "https://mirror.bazel.build/github.com/google/benchmark/archive/v1.5.0.tar.gz", + "https://github.com/google/benchmark/archive/v1.5.0.tar.gz", + ], +) + +# Go repositories, in gazelle order. go_repository( name = "com_github_cenkalti_backoff", importpath = "github.com/cenkalti/backoff", @@ -177,15 +208,15 @@ go_repository( go_repository( name = "com_github_golang_mock", importpath = "github.com/golang/mock", - sum = "h1:qGJ6qTW+x6xX/my+8YUVl4WNpX9B7+/l2tRsHGZ7f2s=", - version = "v1.3.1", + sum = "h1:G5FRp8JnTd7RQH5kemVNlMeyXQAztQ3mOWV95KxsXH8=", + version = "v1.1.1", ) go_repository( name = "com_github_google_go-cmp", importpath = "github.com/google/go-cmp", - sum = "h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=", - version = "v0.2.0", + sum = "h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg=", + version = "v0.3.1", ) go_repository( @@ -232,8 +263,8 @@ go_repository( go_repository( name = "com_github_opencontainers_runtime-spec", importpath = "github.com/opencontainers/runtime-spec", - sum = "h1:d9F+LNYwMyi3BDN4GzZdaSiq4otb8duVEWyZjeUtOQI=", - version = "v0.1.2-0.20171211145439-b2d941ef6a78", + sum = "h1:UfAcuLBJB9Coz72x1hgl8O5RVzTdNiaglX6v2DM6FI0=", + version = "v1.0.2", ) go_repository( @@ -261,8 +292,8 @@ go_repository( name = "org_golang_google_grpc", build_file_proto_mode = "disable", importpath = "google.golang.org/grpc", - sum = "h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk=", - version = "v1.27.1", + sum = "h1:Mm8atZtkT+P6R43n/dqNDWkPPu5BwRVu/1rJnJCeZH8=", + version = "v1.12.0", ) go_repository( @@ -289,8 +320,8 @@ go_repository( go_repository( name = "org_golang_x_net", importpath = "golang.org/x/net", - sum = "h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI=", - version = "v0.0.0-20190620200207-3b0461eec859", + sum = "h1:FCqk7JXVeupwwnGVopQCC0a0xRK0Rj7SL5AyjjWo4pk=", + version = "v0.0.0-20170716174642-b3756b4b77d7", ) go_repository( @@ -303,8 +334,8 @@ go_repository( go_repository( name = "org_golang_x_text", importpath = "golang.org/x/text", - sum = "h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=", - version = "v0.3.0", + sum = "h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=", + version = "v0.3.2", ) go_repository( @@ -317,8 +348,8 @@ go_repository( go_repository( name = "org_golang_x_tools", importpath = "golang.org/x/tools", - sum = "h1:Uglradbb4KfUWaYasZhlsDsGRwHHvRsHoNAEONef0W8=", - version = "v0.0.0-20200131233409-575de47986ce", + sum = "h1:NIou6eNFigscvKJmsbyez16S2cIS6idossORlFtSt2E=", + version = "v0.0.0-20181030221726-6c7e314b6563", ) go_repository( @@ -352,8 +383,8 @@ go_repository( go_repository( name = "org_golang_x_oauth2", importpath = "golang.org/x/oauth2", - sum = "h1:pE8b58s1HRDMi8RDc79m0HISf9D4TzseP40cEA6IGfs=", - version = "v0.0.0-20191202225959-858c2ad4c8b6", + sum = "h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs=", + version = "v0.0.0-20180821212333-d2e6202438be", ) go_repository( @@ -497,12 +528,11 @@ go_repository( version = "v1.5.0", ) -# BigQuery Dependencies for Benchmarks go_repository( name = "com_google_cloud_go", importpath = "cloud.google.com/go", - sum = "h1:eoz/lYxKSL4CNAiaUJ0ZfD1J3bfMYbU5B3rwM1C1EIU=", - version = "v0.55.0", + sum = "h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ=", + version = "v0.26.0", ) go_repository( @@ -515,8 +545,8 @@ go_repository( go_repository( name = "io_opencensus_go", importpath = "go.opencensus.io", - sum = "h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8=", - version = "v0.22.3", + sum = "h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4=", + version = "v0.22.0", ) go_repository( @@ -526,34 +556,505 @@ go_repository( version = "v0.0.0-20200121045136-8c9f03a8e57e", ) -# System Call test dependencies. -http_archive( - name = "com_google_absl", - sha256 = "56775f1283a59e6274c28d99981a9717ff4e0b1161e9129fdb2fcf22531d8d93", - strip_prefix = "abseil-cpp-a0d1e098c2f99694fa399b175a7ccf920762030e", - urls = [ - "https://mirror.bazel.build/github.com/abseil/abseil-cpp/archive/a0d1e098c2f99694fa399b175a7ccf920762030e.tar.gz", - "https://github.com/abseil/abseil-cpp/archive/a0d1e098c2f99694fa399b175a7ccf920762030e.tar.gz", - ], +go_repository( + name = "com_github_census_instrumentation_opencensus_proto", + importpath = "github.com/census-instrumentation/opencensus-proto", + sum = "h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk=", + version = "v0.2.1", ) -http_archive( - name = "com_google_googletest", - sha256 = "0a10bea96d8670e5eef948d79d824162b1577bb7889539e49ec786bfc3e48912", - strip_prefix = "googletest-565f1b848215b77c3732bca345fe76a0431d8b34", - urls = [ - "https://mirror.bazel.build/github.com/google/googletest/archive/565f1b848215b77c3732bca345fe76a0431d8b34.tar.gz", - "https://github.com/google/googletest/archive/565f1b848215b77c3732bca345fe76a0431d8b34.tar.gz", - ], +go_repository( + name = "com_github_client9_misspell", + importpath = "github.com/client9/misspell", + sum = "h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=", + version = "v0.3.4", ) -http_archive( - name = "com_google_benchmark", - sha256 = "3c6a165b6ecc948967a1ead710d4a181d7b0fbcaa183ef7ea84604994966221a", - strip_prefix = "benchmark-1.5.0", - urls = [ - "https://mirror.bazel.build/github.com/google/benchmark/archive/v1.5.0.tar.gz", - "https://github.com/google/benchmark/archive/v1.5.0.tar.gz", - ], +go_repository( + name = "com_github_cncf_udpa_go", + importpath = "github.com/cncf/udpa/go", + sum = "h1:WBZRG4aNOuI15bLRrCgN8fCq8E5Xuty6jGbmSNEvSsU=", + version = "v0.0.0-20191209042840-269d4d468f6f", +) + +# K8s must be pinned to 1.14, prior to APIs being split into separate +# repositories. The containerd versions below assume the existence of the cri +# API within the kubelet package. +go_repository( + name = "io_k8s_kubernetes", + importpath = "k8s.io/kubernetes", + sum = "h1:6T2iAEoOYQnzQb3WvPlUkcczEEXZ7+YPlAO8olwujRw=", + version = "v1.14.0", +) + +go_repository( + name = "com_github_containerd_cgroups", + build_file_proto_mode = "disable", + importpath = "github.com/containerd/cgroups", + sum = "h1:5/YbYIVboEqSpFSC/iMO9FOIP/q8RIuKfYS2TA1M8WE=", + version = "v0.0.0-20200520162414-666f4a009ffb", +) + +go_repository( + name = "com_github_containerd_console", + build_file_proto_mode = "disable", + importpath = "github.com/containerd/console", + sum = "h1:uict5mhHFTzKLUCufdSLym7z/J0CbBJT59lYbP9wtbg=", + version = "v0.0.0-20180822173158-c12b1e7919c1", +) + +go_repository( + name = "com_github_containerd_containerd", + build_file_proto_mode = "disable", + importpath = "github.com/containerd/containerd", + sum = "h1:BmZa1bGjKctYrIbyjbhZJlGvHceJASpdW5pIDSQcw1E=", + version = "v0.0.0-20190510190154-d0319ec44af6", +) + +go_repository( + name = "com_github_containerd_continuity", + build_file_proto_mode = "disable", + importpath = "github.com/containerd/continuity", + sum = "h1:tN9D97v5A5QuKdcKHKt+UMKrkQ5YXUnD8iM7IAAjEfI=", + version = "v0.0.0-20190815185530-f2a389ac0a02", +) + +go_repository( + name = "com_github_containerd_cri", + build_file_proto_mode = "disable", + importpath = "github.com/containerd/cri", + sum = "h1:+bW7GQb2q32/Liy0ZiR6pkpRXdDHShUXRoWg8OGVWZs=", + version = "v0.0.0-20190308093238-8a0bd84b9a4c", +) + +go_repository( + name = "com_github_containerd_fifo", + build_file_proto_mode = "disable", + importpath = "github.com/containerd/fifo", + sum = "h1:XGyg7oTtD0DoRFhbpV6x1WfV0flKC4UxXU7ab1zC08U=", + version = "v0.0.0-20180307165137-3d5202aec260", +) + +go_repository( + name = "com_github_containerd_go_runc", + build_file_proto_mode = "disable", + importpath = "github.com/containerd/go-runc", + sum = "h1:esQOJREg8nw8aXj6uCN5dfW5cKUBiEJ/+nni1Q/D/sw=", + version = "v0.0.0-20180907222934-5a6d9f37cfa3", +) + +go_repository( + name = "com_github_containerd_ttrpc", + build_file_proto_mode = "disable", + importpath = "github.com/containerd/ttrpc", + sum = "h1:SKDlsIhYxNE1LO0xwuOR+3QWj3zRibVQu5jWIMQmOfU=", + version = "v0.0.0-20190411181408-699c4e40d1e7", +) + +go_repository( + name = "com_github_containerd_typeurl", + build_file_proto_mode = "disable", + importpath = "github.com/containerd/typeurl", + sum = "h1:JNn81o/xG+8NEo3bC/vx9pbi/g2WI8mtP2/nXzu297Y=", + version = "v0.0.0-20180627222232-a93fcdb778cd", +) + +go_repository( + name = "com_github_coreos_go_systemd", + importpath = "github.com/coreos/go-systemd", + sum = "h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU=", + version = "v0.0.0-20191104093116-d3cd4ed1dbcf", +) + +go_repository( + name = "com_github_davecgh_go_spew", + importpath = "github.com/davecgh/go-spew", + sum = "h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=", + version = "v1.1.1", +) + +go_repository( + name = "com_github_docker_distribution", + importpath = "github.com/docker/distribution", + sum = "h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=", + version = "v2.7.1+incompatible", +) + +go_repository( + name = "com_github_docker_go_events", + importpath = "github.com/docker/go-events", + sum = "h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8=", + version = "v0.0.0-20190806004212-e31b211e4f1c", +) + +go_repository( + name = "com_github_docker_go_units", + importpath = "github.com/docker/go-units", + sum = "h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=", + version = "v0.4.0", +) + +go_repository( + name = "com_github_dustin_go_humanize", + importpath = "github.com/dustin/go-humanize", + sum = "h1:qk/FSDDxo05wdJH28W+p5yivv7LuLYLRXPPD8KQCtZs=", + version = "v0.0.0-20171111073723-bb3d318650d4", +) + +go_repository( + name = "com_github_envoyproxy_go_control_plane", + importpath = "github.com/envoyproxy/go-control-plane", + sum = "h1:rEvIZUSZ3fx39WIi3JkQqQBitGwpELBIYWeBVh6wn+E=", + version = "v0.9.4", +) + +go_repository( + name = "com_github_envoyproxy_protoc_gen_validate", + importpath = "github.com/envoyproxy/protoc-gen-validate", + sum = "h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A=", + version = "v0.1.0", +) + +go_repository( + name = "com_github_fsnotify_fsnotify", + importpath = "github.com/fsnotify/fsnotify", + sum = "h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=", + version = "v1.4.7", +) + +go_repository( + name = "com_github_godbus_dbus", + importpath = "github.com/godbus/dbus", + sum = "h1:ZpnhV/YsD2/4cESfV5+Hoeu/iUR3ruzNvZ+yQfO03a0=", + version = "v0.0.0-20190726142602-4481cbc300e2", +) + +go_repository( + name = "com_github_gogo_googleapis", + importpath = "github.com/gogo/googleapis", + sum = "h1:zgVt4UpGxcqVOw97aRGxT4svlcmdK35fynLNctY32zI=", + version = "v1.4.0", +) + +go_repository( + name = "com_github_gogo_protobuf", + importpath = "github.com/gogo/protobuf", + sum = "h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=", + version = "v1.3.1", +) + +go_repository( + name = "com_github_golang_glog", + importpath = "github.com/golang/glog", + sum = "h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=", + version = "v0.0.0-20160126235308-23def4e6c14b", +) + +go_repository( + name = "com_github_hashicorp_golang_lru", + importpath = "github.com/hashicorp/golang-lru", + sum = "h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=", + version = "v0.5.1", +) + +go_repository( + name = "com_github_hpcloud_tail", + importpath = "github.com/hpcloud/tail", + sum = "h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=", + version = "v1.0.0", +) + +go_repository( + name = "com_github_inconshreveable_mousetrap", + importpath = "github.com/inconshreveable/mousetrap", + sum = "h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=", + version = "v1.0.0", +) + +go_repository( + name = "com_github_kisielk_errcheck", + importpath = "github.com/kisielk/errcheck", + sum = "h1:reN85Pxc5larApoH1keMBiu2GWtPqXQ1nc9gx+jOU+E=", + version = "v1.2.0", +) + +go_repository( + name = "com_github_kisielk_gotool", + importpath = "github.com/kisielk/gotool", + sum = "h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg=", + version = "v1.0.0", +) + +go_repository( + name = "com_github_konsorten_go_windows_terminal_sequences", + importpath = "github.com/konsorten/go-windows-terminal-sequences", + sum = "h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s=", + version = "v1.0.2", +) + +go_repository( + name = "com_github_microsoft_go_winio", + importpath = "github.com/Microsoft/go-winio", + sum = "h1:+hMXMk01us9KgxGb7ftKQt2Xpf5hH/yky+TDA+qxleU=", + version = "v0.4.14", +) + +go_repository( + name = "com_github_microsoft_hcsshim", + importpath = "github.com/Microsoft/hcsshim", + sum = "h1:ZfF0+zZeYdzMIVMZHKtDKJvLHj76XCuVae/jNkjj0IA=", + version = "v0.8.6", +) + +go_repository( + name = "com_github_onsi_ginkgo", + importpath = "github.com/onsi/ginkgo", + sum = "h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo=", + version = "v1.10.1", +) + +go_repository( + name = "com_github_onsi_gomega", + importpath = "github.com/onsi/gomega", + sum = "h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=", + version = "v1.7.0", +) + +go_repository( + name = "com_github_opencontainers_go_digest", + importpath = "github.com/opencontainers/go-digest", + sum = "h1:QhPf3A2AZW3tTGvHPg0TA+CR3oHbVLlXUhlghqISp1I=", + version = "v0.0.0-20180430190053-c9281466c8b2", +) + +go_repository( + name = "com_github_opencontainers_image_spec", + importpath = "github.com/opencontainers/image-spec", + sum = "h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=", + version = "v1.0.1", +) + +go_repository( + name = "com_github_opencontainers_runc", + importpath = "github.com/opencontainers/runc", + sum = "h1:dDCFes8Hj1r/i5qnypONo5jdOme/8HWZC/aNDyhECt0=", + version = "v1.0.0-rc8", +) + +go_repository( + name = "com_github_pkg_errors", + importpath = "github.com/pkg/errors", + sum = "h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=", + version = "v0.9.1", +) + +go_repository( + name = "com_github_pmezard_go_difflib", + importpath = "github.com/pmezard/go-difflib", + sum = "h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=", + version = "v1.0.0", +) + +go_repository( + name = "com_github_prometheus_client_model", + importpath = "github.com/prometheus/client_model", + sum = "h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM=", + version = "v0.0.0-20190812154241-14fe0d1b01d4", +) + +go_repository( + name = "com_github_prometheus_procfs", + importpath = "github.com/prometheus/procfs", + sum = "h1:hhvfGDVThBnd4kYisSFmYuHYeUhglxcwag7FhVPH9zM=", + version = "v0.0.0-20180125133057-cb4147076ac7", +) + +go_repository( + name = "com_github_sirupsen_logrus", + importpath = "github.com/sirupsen/logrus", + sum = "h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=", + version = "v1.4.2", +) + +go_repository( + name = "com_github_spf13_cobra", + importpath = "github.com/spf13/cobra", + sum = "h1:GQkkv3XSnxhAMjdq2wLfEnptEVr+2BNvmHizILHn+d4=", + version = "v0.0.2-0.20171109065643-2da4a54c5cee", +) + +go_repository( + name = "com_github_spf13_pflag", + importpath = "github.com/spf13/pflag", + sum = "h1:j8jxLbQ0+T1DFggy6XoGvyUnrJWPR/JybflPvu5rwS4=", + version = "v1.0.1-0.20171106142849-4c012f6dcd95", +) + +go_repository( + name = "com_github_stretchr_objx", + importpath = "github.com/stretchr/objx", + sum = "h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=", + version = "v0.1.1", +) + +go_repository( + name = "com_github_stretchr_testify", + importpath = "github.com/stretchr/testify", + sum = "h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=", + version = "v1.2.2", +) + +go_repository( + name = "com_github_urfave_cli", + importpath = "github.com/urfave/cli", + sum = "h1:gsqYFH8bb9ekPA12kRo0hfjngWQjkJPlN9R0N78BoUo=", + version = "v1.22.2", +) + +go_repository( + name = "in_gopkg_airbrake_gobrake_v2", + importpath = "gopkg.in/airbrake/gobrake.v2", + sum = "h1:7z2uVWwn7oVeeugY1DtlPAy5H+KYgB1KeKTnqjNatLo=", + version = "v2.0.9", +) + +go_repository( + name = "in_gopkg_fsnotify_v1", + importpath = "gopkg.in/fsnotify.v1", + sum = "h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=", + version = "v1.4.7", +) + +go_repository( + name = "in_gopkg_gemnasium_logrus_airbrake_hook_v2", + importpath = "gopkg.in/gemnasium/logrus-airbrake-hook.v2", + sum = "h1:OAj3g0cR6Dx/R07QgQe8wkA9RNjB2u4i700xBkIT4e0=", + version = "v2.1.2", +) + +go_repository( + name = "in_gopkg_tomb_v1", + importpath = "gopkg.in/tomb.v1", + sum = "h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=", + version = "v1.0.0-20141024135613-dd632973f1e7", +) + +go_repository( + name = "in_gopkg_yaml_v2", + importpath = "gopkg.in/yaml.v2", + sum = "h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=", + version = "v2.2.2", +) + +go_repository( + name = "org_bazil_fuse", + importpath = "bazil.org/fuse", + sum = "h1:SC+c6A1qTFstO9qmB86mPV2IpYme/2ZoEQ0hrP+wo+Q=", + version = "v0.0.0-20160811212531-371fbbdaa898", +) + +go_repository( + name = "org_golang_google_appengine", + importpath = "google.golang.org/appengine", + sum = "h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=", + version = "v1.4.0", +) + +go_repository( + name = "org_golang_google_genproto", + importpath = "google.golang.org/genproto", + sum = "h1:wVJP1pATLVPNxCz4R2mTO6HUJgfGE0PmIu2E10RuhCw=", + version = "v0.0.0-20170523043604-d80a6e20e776", +) + +go_repository( + name = "org_golang_x_exp", + importpath = "golang.org/x/exp", + sum = "h1:c2HOrn5iMezYjSlGPncknSEr/8x5LELb/ilJbXi9DEA=", + version = "v0.0.0-20190121172915-509febef88a4", +) + +go_repository( + name = "org_golang_x_lint", + importpath = "golang.org/x/lint", + sum = "h1:XQyxROzUlZH+WIQwySDgnISgOivlhjIEwaQaJEJrrN0=", + version = "v0.0.0-20190313153728-d0100b6bd8b3", +) + +go_repository( + name = "tools_gotest", + importpath = "gotest.tools", + sum = "h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=", + version = "v2.2.0+incompatible", +) + +go_repository( + name = "co_honnef_go_tools", + importpath = "honnef.co/go/tools", + sum = "h1:/hemPrYIhOhy8zYrNj+069zDB68us2sMGsfkFJO0iZs=", + version = "v0.0.0-20190523083050-ea95bdfd59fc", +) + +go_repository( + name = "com_github_burntsushi_toml", + importpath = "github.com/BurntSushi/toml", + sum = "h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=", + version = "v0.3.1", +) + +go_repository( + name = "io_k8s_cri_api", + importpath = "k8s.io/cri-api", + sum = "h1:bykYbClh5Bnjo2EMjlYbYQ3ksxHjjLcbriKPm831hVk=", + version = "v0.18.2", +) + +go_repository( + name = "com_github_docker_docker", + importpath = "github.com/docker/docker", + sum = "h1:Bh3QS4GYuVi8QeNskrV3ivn8p0bupmk0PfY4xmVulo4=", + version = "v17.12.0-ce-rc1.0.20200514230353-811a247d06e8+incompatible", +) + +go_repository( + name = "com_github_cilium_ebpf", + importpath = "github.com/cilium/ebpf", + sum = "h1:i8+1fuPLjSgAYXUyBlHNhFwjcfAsP4ufiuH1+PWkyDU=", + version = "v0.0.0-20200110133405-4032b1d8aae3", +) + +go_repository( + name = "com_github_coreos_go_systemd_v22", + importpath = "github.com/coreos/go-systemd/v22", + sum = "h1:XJIw/+VlJ+87J+doOxznsAWIdmWuViOVhkQamW5YV28=", + version = "v22.0.0", +) + +go_repository( + name = "com_github_cpuguy83_go_md2man_v2", + importpath = "github.com/cpuguy83/go-md2man/v2", + sum = "h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM=", + version = "v2.0.0", +) + +go_repository( + name = "com_github_godbus_dbus_v5", + importpath = "github.com/godbus/dbus/v5", + sum = "h1:ZqHaoEF7TBzh4jzPmqVhE/5A1z9of6orkAe5uHoAeME=", + version = "v5.0.3", +) + +go_repository( + name = "com_github_russross_blackfriday_v2", + importpath = "github.com/russross/blackfriday/v2", + sum = "h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=", + version = "v2.0.1", +) + +go_repository( + name = "com_github_shurcool_sanitized_anchor_name", + importpath = "github.com/shurcooL/sanitized_anchor_name", + sum = "h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=", + version = "v1.0.0", ) diff --git a/g3doc/user_guide/BUILD b/g3doc/user_guide/BUILD index b69aee12c..355dd49b3 100644 --- a/g3doc/user_guide/BUILD +++ b/g3doc/user_guide/BUILD @@ -68,3 +68,12 @@ doc( permalink = "/docs/user_guide/platforms/", weight = "30", ) + +doc( + name = "runtimeclass", + src = "runtimeclass.md", + category = "User Guide", + permalink = "/docs/user_guide/runtimeclass/", + subcategory = "Advanced", + weight = "91", +) diff --git a/g3doc/user_guide/runtimeclass.md b/g3doc/user_guide/runtimeclass.md new file mode 100644 index 000000000..9f2d794c3 --- /dev/null +++ b/g3doc/user_guide/runtimeclass.md @@ -0,0 +1,44 @@ +# RuntimeClass + +First, follow the appropriate installation instructions for your version of +containerd. + +* For 1.1 or lower, use `gvisor-containerd-shim`. +* For 1.2 or higher, use `containerd-shim-runsc-v1`. + +# Set up the Kubernetes RuntimeClass + +Creating the RuntimeClass in kubernetes is simple once the runtime is available +for containerd: + +```shell +cat <: -// +// runOrError will run the provided command. +// +// If an error is encountered and neither Stdout or Stderr was set the error +// will be returned in the format of : . func (r *Runsc) runOrError(cmd *exec.Cmd) error { if cmd.Stdout != nil || cmd.Stderr != nil { ec, err := Monitor.Start(cmd) diff --git a/pkg/shim/v1/proc/BUILD b/pkg/shim/v1/proc/BUILD new file mode 100644 index 000000000..a59bf198d --- /dev/null +++ b/pkg/shim/v1/proc/BUILD @@ -0,0 +1,35 @@ +load("//tools:defs.bzl", "go_library") + +package(licenses = ["notice"]) + +go_library( + name = "proc", + srcs = [ + "deleted_state.go", + "exec.go", + "exec_state.go", + "init.go", + "init_state.go", + "io.go", + "process.go", + "types.go", + "utils.go", + ], + visibility = [ + "//pkg/shim:__subpackages__", + "//shim:__subpackages__", + ], + deps = [ + "//pkg/shim/runsc", + "@com_github_containerd_console//:go_default_library", + "@com_github_containerd_containerd//errdefs:go_default_library", + "@com_github_containerd_containerd//log:go_default_library", + "@com_github_containerd_containerd//mount:go_default_library", + "@com_github_containerd_containerd//runtime/proc:go_default_library", + "@com_github_containerd_fifo//:go_default_library", + "@com_github_containerd_go_runc//:go_default_library", + "@com_github_gogo_protobuf//types:go_default_library", + "@com_github_opencontainers_runtime-spec//specs-go:go_default_library", + "@org_golang_x_sys//unix:go_default_library", + ], +) diff --git a/pkg/shim/v1/proc/deleted_state.go b/pkg/shim/v1/proc/deleted_state.go index 0196c96dd..52aad40ac 100644 --- a/pkg/shim/v1/proc/deleted_state.go +++ b/pkg/shim/v1/proc/deleted_state.go @@ -17,33 +17,33 @@ package proc import ( "context" + "fmt" "github.com/containerd/console" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/runtime/proc" - "github.com/pkg/errors" ) type deletedState struct{} func (*deletedState) Resize(ws console.WinSize) error { - return errors.Errorf("cannot resize a deleted process") + return fmt.Errorf("cannot resize a deleted process") } func (*deletedState) Start(ctx context.Context) error { - return errors.Errorf("cannot start a deleted process") + return fmt.Errorf("cannot start a deleted process") } func (*deletedState) Delete(ctx context.Context) error { - return errors.Wrap(errdefs.ErrNotFound, "cannot delete a deleted process") + return fmt.Errorf("cannot delete a deleted process: %w", errdefs.ErrNotFound) } func (*deletedState) Kill(ctx context.Context, sig uint32, all bool) error { - return errors.Wrap(errdefs.ErrNotFound, "cannot kill a deleted process") + return fmt.Errorf("cannot kill a deleted process: %w", errdefs.ErrNotFound) } func (*deletedState) SetExited(status int) {} func (*deletedState) Exec(ctx context.Context, path string, r *ExecConfig) (proc.Process, error) { - return nil, errors.Errorf("cannot exec in a deleted state") + return nil, fmt.Errorf("cannot exec in a deleted state") } diff --git a/pkg/shim/v1/proc/exec.go b/pkg/shim/v1/proc/exec.go index 6821e09cd..4ef9cf6cf 100644 --- a/pkg/shim/v1/proc/exec.go +++ b/pkg/shim/v1/proc/exec.go @@ -31,7 +31,6 @@ import ( "github.com/containerd/fifo" runc "github.com/containerd/go-runc" specs "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" "golang.org/x/sys/unix" "gvisor.dev/gvisor/pkg/shim/runsc" @@ -151,9 +150,11 @@ func (e *execProcess) kill(ctx context.Context, sig uint32, _ bool) error { if err := e.parent.runtime.Kill(ctx, e.parent.id, int(sig), &runsc.KillOpts{ Pid: internalPid, }); err != nil { - // If this returns error, consider the process has already stopped. + // If this returns error, consider the process has + // already stopped. + // // TODO: Fix after signal handling is fixed. - return errors.Wrapf(errdefs.ErrNotFound, err.Error()) + return fmt.Errorf("%s: %w", err.Error(), errdefs.ErrNotFound) } } return nil @@ -182,16 +183,16 @@ func (e *execProcess) start(ctx context.Context) (err error) { ) if e.stdio.Terminal { if socket, err = runc.NewTempConsoleSocket(); err != nil { - return errors.Wrap(err, "failed to create runc console socket") + return fmt.Errorf("failed to create runc console socket: %w", err) } defer socket.Close() } else if e.stdio.IsNull() { if e.io, err = runc.NewNullIO(); err != nil { - return errors.Wrap(err, "creating new NULL IO") + return fmt.Errorf("creating new NULL IO: %w", err) } } else { if e.io, err = runc.NewPipeIO(e.parent.IoUID, e.parent.IoGID, withConditionalIO(e.stdio)); err != nil { - return errors.Wrap(err, "failed to create runc io pipes") + return fmt.Errorf("failed to create runc io pipes: %w", err) } } opts := &runsc.ExecOpts{ @@ -217,7 +218,7 @@ func (e *execProcess) start(ctx context.Context) (err error) { if e.stdio.Stdin != "" { sc, err := fifo.OpenFifo(context.Background(), e.stdio.Stdin, syscall.O_WRONLY|syscall.O_NONBLOCK, 0) if err != nil { - return errors.Wrapf(err, "failed to open stdin fifo %s", e.stdio.Stdin) + return fmt.Errorf("failed to open stdin fifo %s: %w", e.stdio.Stdin, err) } e.closers = append(e.closers, sc) e.stdin = sc @@ -228,25 +229,25 @@ func (e *execProcess) start(ctx context.Context) (err error) { if socket != nil { console, err := socket.ReceiveMaster() if err != nil { - return errors.Wrap(err, "failed to retrieve console master") + return fmt.Errorf("failed to retrieve console master: %w", err) } if e.console, err = e.parent.Platform.CopyConsole(ctx, console, e.stdio.Stdin, e.stdio.Stdout, e.stdio.Stderr, &e.wg, ©WaitGroup); err != nil { - return errors.Wrap(err, "failed to start console copy") + return fmt.Errorf("failed to start console copy: %w", err) } } else if !e.stdio.IsNull() { if err := copyPipes(ctx, e.io, e.stdio.Stdin, e.stdio.Stdout, e.stdio.Stderr, &e.wg, ©WaitGroup); err != nil { - return errors.Wrap(err, "failed to start io pipe copy") + return fmt.Errorf("failed to start io pipe copy: %w", err) } } copyWaitGroup.Wait() pid, err := runc.ReadPidFile(opts.PidFile) if err != nil { - return errors.Wrap(err, "failed to retrieve OCI runtime exec pid") + return fmt.Errorf("failed to retrieve OCI runtime exec pid: %w", err) } e.pid = pid internalPid, err := runc.ReadPidFile(opts.InternalPidFile) if err != nil { - return errors.Wrap(err, "failed to retrieve OCI runtime exec internal pid") + return fmt.Errorf("failed to retrieve OCI runtime exec internal pid: %w", err) } e.internalPid = internalPid go func() { diff --git a/pkg/shim/v1/proc/exec_state.go b/pkg/shim/v1/proc/exec_state.go index 5416cb601..4dcda8b44 100644 --- a/pkg/shim/v1/proc/exec_state.go +++ b/pkg/shim/v1/proc/exec_state.go @@ -17,9 +17,9 @@ package proc import ( "context" + "fmt" "github.com/containerd/console" - "github.com/pkg/errors" ) type execState interface { @@ -43,7 +43,7 @@ func (s *execCreatedState) transition(name string) error { case "deleted": s.p.execState = &deletedState{} default: - return errors.Errorf("invalid state transition %q to %q", stateName(s), name) + return fmt.Errorf("invalid state transition %q to %q", stateName(s), name) } return nil } @@ -87,7 +87,7 @@ func (s *execRunningState) transition(name string) error { case "stopped": s.p.execState = &execStoppedState{p: s.p} default: - return errors.Errorf("invalid state transition %q to %q", stateName(s), name) + return fmt.Errorf("invalid state transition %q to %q", stateName(s), name) } return nil } @@ -97,11 +97,11 @@ func (s *execRunningState) Resize(ws console.WinSize) error { } func (s *execRunningState) Start(ctx context.Context) error { - return errors.Errorf("cannot start a running process") + return fmt.Errorf("cannot start a running process") } func (s *execRunningState) Delete(ctx context.Context) error { - return errors.Errorf("cannot delete a running process") + return fmt.Errorf("cannot delete a running process") } func (s *execRunningState) Kill(ctx context.Context, sig uint32, all bool) error { @@ -125,17 +125,17 @@ func (s *execStoppedState) transition(name string) error { case "deleted": s.p.execState = &deletedState{} default: - return errors.Errorf("invalid state transition %q to %q", stateName(s), name) + return fmt.Errorf("invalid state transition %q to %q", stateName(s), name) } return nil } func (s *execStoppedState) Resize(ws console.WinSize) error { - return errors.Errorf("cannot resize a stopped container") + return fmt.Errorf("cannot resize a stopped container") } func (s *execStoppedState) Start(ctx context.Context) error { - return errors.Errorf("cannot start a stopped process") + return fmt.Errorf("cannot start a stopped process") } func (s *execStoppedState) Delete(ctx context.Context) error { diff --git a/pkg/shim/v1/proc/init.go b/pkg/shim/v1/proc/init.go index 0771540a9..b429cb94f 100644 --- a/pkg/shim/v1/proc/init.go +++ b/pkg/shim/v1/proc/init.go @@ -18,6 +18,7 @@ package proc import ( "context" "encoding/json" + "fmt" "io" "path/filepath" "strings" @@ -33,23 +34,22 @@ import ( "github.com/containerd/fifo" runc "github.com/containerd/go-runc" specs "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" "gvisor.dev/gvisor/pkg/shim/runsc" ) -// InitPidFile name of the file that contains the init pid +// InitPidFile name of the file that contains the init pid. const InitPidFile = "init.pid" -// Init represents an initial process for a container +// Init represents an initial process for a container. type Init struct { wg sync.WaitGroup initState initState // mu is used to ensure that `Start()` and `Exited()` calls return in - // the right order when invoked in separate go routines. - // This is the case within the shim implementation as it makes use of - // the reaper interface. + // the right order when invoked in separate go routines. This is the + // case within the shim implementation as it makes use of the reaper + // interface. mu sync.Mutex waitBlock chan struct{} @@ -76,7 +76,7 @@ type Init struct { Monitor ProcessMonitor } -// NewRunsc returns a new runsc instance for a process +// NewRunsc returns a new runsc instance for a process. func NewRunsc(root, path, namespace, runtime string, config map[string]string) *runsc.Runsc { if root == "" { root = RunscRoot @@ -91,7 +91,7 @@ func NewRunsc(root, path, namespace, runtime string, config map[string]string) * } } -// New returns a new init process +// New returns a new init process. func New(id string, runtime *runsc.Runsc, stdio proc.Stdio) *Init { p := &Init{ id: id, @@ -104,21 +104,21 @@ func New(id string, runtime *runsc.Runsc, stdio proc.Stdio) *Init { return p } -// Create the process with the provided config +// Create the process with the provided config. func (p *Init) Create(ctx context.Context, r *CreateConfig) (err error) { var socket *runc.Socket if r.Terminal { if socket, err = runc.NewTempConsoleSocket(); err != nil { - return errors.Wrap(err, "failed to create OCI runtime console socket") + return fmt.Errorf("failed to create OCI runtime console socket: %w", err) } defer socket.Close() } else if hasNoIO(r) { if p.io, err = runc.NewNullIO(); err != nil { - return errors.Wrap(err, "creating new NULL IO") + return fmt.Errorf("creating new NULL IO: %w", err) } } else { if p.io, err = runc.NewPipeIO(p.IoUID, p.IoGID, withConditionalIO(p.stdio)); err != nil { - return errors.Wrap(err, "failed to create OCI runtime io pipes") + return fmt.Errorf("failed to create OCI runtime io pipes: %w", err) } } pidFile := filepath.Join(p.Bundle, InitPidFile) @@ -139,7 +139,7 @@ func (p *Init) Create(ctx context.Context, r *CreateConfig) (err error) { if r.Stdin != "" { sc, err := fifo.OpenFifo(context.Background(), r.Stdin, syscall.O_WRONLY|syscall.O_NONBLOCK, 0) if err != nil { - return errors.Wrapf(err, "failed to open stdin fifo %s", r.Stdin) + return fmt.Errorf("failed to open stdin fifo %s: %w", r.Stdin, err) } p.stdin = sc p.closers = append(p.closers, sc) @@ -150,58 +150,58 @@ func (p *Init) Create(ctx context.Context, r *CreateConfig) (err error) { if socket != nil { console, err := socket.ReceiveMaster() if err != nil { - return errors.Wrap(err, "failed to retrieve console master") + return fmt.Errorf("failed to retrieve console master: %w", err) } console, err = p.Platform.CopyConsole(ctx, console, r.Stdin, r.Stdout, r.Stderr, &p.wg, ©WaitGroup) if err != nil { - return errors.Wrap(err, "failed to start console copy") + return fmt.Errorf("failed to start console copy: %w", err) } p.console = console } else if !hasNoIO(r) { if err := copyPipes(ctx, p.io, r.Stdin, r.Stdout, r.Stderr, &p.wg, ©WaitGroup); err != nil { - return errors.Wrap(err, "failed to start io pipe copy") + return fmt.Errorf("failed to start io pipe copy: %w", err) } } copyWaitGroup.Wait() pid, err := runc.ReadPidFile(pidFile) if err != nil { - return errors.Wrap(err, "failed to retrieve OCI runtime container pid") + return fmt.Errorf("failed to retrieve OCI runtime container pid: %w", err) } p.pid = pid return nil } -// Wait for the process to exit +// Wait waits for the process to exit. func (p *Init) Wait() { <-p.waitBlock } -// ID of the process +// ID returns the ID of the process. func (p *Init) ID() string { return p.id } -// Pid of the process +// Pid returns the PID of the process. func (p *Init) Pid() int { return p.pid } -// ExitStatus of the process +// ExitStatus returns the exit status of the process. func (p *Init) ExitStatus() int { p.mu.Lock() defer p.mu.Unlock() return p.status } -// ExitedAt at time when the process exited +// ExitedAt returns the time when the process exited. func (p *Init) ExitedAt() time.Time { p.mu.Lock() defer p.mu.Unlock() return p.exited } -// Status of the process +// Status returns the status of the process. func (p *Init) Status(ctx context.Context) (string, error) { p.mu.Lock() defer p.mu.Unlock() @@ -215,7 +215,7 @@ func (p *Init) Status(ctx context.Context) (string, error) { return p.convertStatus(c.Status), nil } -// Start the init process +// Start starts the init process. func (p *Init) Start(ctx context.Context) error { p.mu.Lock() defer p.mu.Unlock() @@ -250,7 +250,7 @@ func (p *Init) start(ctx context.Context) error { return nil } -// SetExited of the init process with the next status +// SetExited set the exit stauts of the init process. func (p *Init) SetExited(status int) { p.mu.Lock() defer p.mu.Unlock() @@ -265,7 +265,7 @@ func (p *Init) setExited(status int) { close(p.waitBlock) } -// Delete the init process +// Delete deletes the init process. func (p *Init) Delete(ctx context.Context) error { p.mu.Lock() defer p.mu.Unlock() @@ -298,13 +298,13 @@ func (p *Init) delete(ctx context.Context) error { if err2 := mount.UnmountAll(p.Rootfs, 0); err2 != nil { log.G(ctx).WithError(err2).Warn("failed to cleanup rootfs mount") if err == nil { - err = errors.Wrap(err2, "failed rootfs umount") + err = fmt.Errorf("failed rootfs umount: %w", err2) } } return err } -// Resize the init processes console +// Resize resizes the init processes console. func (p *Init) Resize(ws console.WinSize) error { p.mu.Lock() defer p.mu.Unlock() @@ -322,7 +322,7 @@ func (p *Init) resize(ws console.WinSize) error { return p.console.Resize(ws) } -// Kill the init process +// Kill kills the init process. func (p *Init) Kill(ctx context.Context, signal uint32, all bool) error { p.mu.Lock() defer p.mu.Unlock() @@ -340,7 +340,7 @@ func (p *Init) kill(context context.Context, signal uint32, all bool) error { c, err := p.runtime.State(context, p.id) if err != nil { if strings.Contains(err.Error(), "does not exist") { - return errors.Wrapf(errdefs.ErrNotFound, "no such process") + return fmt.Errorf("no such process: %w", errdefs.ErrNotFound) } return p.runtimeError(err, "OCI runtime state failed") } @@ -348,7 +348,7 @@ func (p *Init) kill(context context.Context, signal uint32, all bool) error { // If the container is not in running state, directly return // "no such process" if p.convertStatus(c.Status) == "stopped" { - return errors.Wrapf(errdefs.ErrNotFound, "no such process") + return fmt.Errorf("no such process: %w", errdefs.ErrNotFound) } killErr = p.runtime.Kill(context, p.id, int(signal), &runsc.KillOpts{ All: all, @@ -362,7 +362,7 @@ func (p *Init) kill(context context.Context, signal uint32, all bool) error { return p.runtimeError(killErr, "kill timeout") } -// KillAll processes belonging to the init process +// KillAll kills all processes belonging to the init process. func (p *Init) KillAll(context context.Context) error { p.mu.Lock() defer p.mu.Unlock() @@ -380,17 +380,17 @@ func (p *Init) killAll(context context.Context) error { return nil } -// Stdin of the process +// Stdin returns the stdin of the process. func (p *Init) Stdin() io.Closer { return p.stdin } -// Runtime returns the OCI runtime configured for the init process +// Runtime returns the OCI runtime configured for the init process. func (p *Init) Runtime() *runsc.Runsc { return p.runtime } -// Exec returns a new child process +// Exec returns a new child process. func (p *Init) Exec(ctx context.Context, path string, r *ExecConfig) (proc.Process, error) { p.mu.Lock() defer p.mu.Unlock() @@ -398,7 +398,7 @@ func (p *Init) Exec(ctx context.Context, path string, r *ExecConfig) (proc.Proce return p.initState.Exec(ctx, path, r) } -// exec returns a new exec'd process +// exec returns a new exec'd process. func (p *Init) exec(ctx context.Context, path string, r *ExecConfig) (proc.Process, error) { // process exec request var spec specs.Process @@ -424,7 +424,7 @@ func (p *Init) exec(ctx context.Context, path string, r *ExecConfig) (proc.Proce return e, nil } -// Stdio of the process +// Stdio returns the stdio of the process. func (p *Init) Stdio() proc.Stdio { return p.stdio } @@ -437,11 +437,11 @@ func (p *Init) runtimeError(rErr error, msg string) error { rMsg, err := getLastRuntimeError(p.runtime) switch { case err != nil: - return errors.Wrapf(rErr, "%s: %s (%s)", msg, "unable to retrieve OCI runtime error", err.Error()) + return fmt.Errorf("%s: %w (unable to retrieve OCI runtime error: %v)", msg, rErr, err) case rMsg == "": - return errors.Wrap(rErr, msg) + return fmt.Errorf("%s: %w", msg, rErr) default: - return errors.Errorf("%s: %s", msg, rMsg) + return fmt.Errorf("%s: %s", msg, rMsg) } } diff --git a/pkg/shim/v1/proc/init_state.go b/pkg/shim/v1/proc/init_state.go index 868646b6c..509f27762 100644 --- a/pkg/shim/v1/proc/init_state.go +++ b/pkg/shim/v1/proc/init_state.go @@ -17,11 +17,11 @@ package proc import ( "context" + "fmt" "github.com/containerd/console" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/runtime/proc" - "github.com/pkg/errors" ) type initState interface { @@ -46,7 +46,7 @@ func (s *createdState) transition(name string) error { case "deleted": s.p.initState = &deletedState{} default: - return errors.Errorf("invalid state transition %q to %q", stateName(s), name) + return fmt.Errorf("invalid state transition %q to %q", stateName(s), name) } return nil } @@ -107,7 +107,7 @@ func (s *runningState) transition(name string) error { case "stopped": s.p.initState = &stoppedState{p: s.p} default: - return errors.Errorf("invalid state transition %q to %q", stateName(s), name) + return fmt.Errorf("invalid state transition %q to %q", stateName(s), name) } return nil } @@ -117,11 +117,11 @@ func (s *runningState) Resize(ws console.WinSize) error { } func (s *runningState) Start(ctx context.Context) error { - return errors.Errorf("cannot start a running process") + return fmt.Errorf("cannot start a running process") } func (s *runningState) Delete(ctx context.Context) error { - return errors.Errorf("cannot delete a running process") + return fmt.Errorf("cannot delete a running process") } func (s *runningState) Kill(ctx context.Context, sig uint32, all bool) error { @@ -149,17 +149,17 @@ func (s *stoppedState) transition(name string) error { case "deleted": s.p.initState = &deletedState{} default: - return errors.Errorf("invalid state transition %q to %q", stateName(s), name) + return fmt.Errorf("invalid state transition %q to %q", stateName(s), name) } return nil } func (s *stoppedState) Resize(ws console.WinSize) error { - return errors.Errorf("cannot resize a stopped container") + return fmt.Errorf("cannot resize a stopped container") } func (s *stoppedState) Start(ctx context.Context) error { - return errors.Errorf("cannot start a stopped process") + return fmt.Errorf("cannot start a stopped process") } func (s *stoppedState) Delete(ctx context.Context) error { @@ -178,5 +178,5 @@ func (s *stoppedState) SetExited(status int) { } func (s *stoppedState) Exec(ctx context.Context, path string, r *ExecConfig) (proc.Process, error) { - return nil, errors.Errorf("cannot exec in a stopped state") + return nil, fmt.Errorf("cannot exec in a stopped state") } diff --git a/pkg/shim/v1/proc/io.go b/pkg/shim/v1/proc/io.go index 2677b4e54..5313c7a50 100644 --- a/pkg/shim/v1/proc/io.go +++ b/pkg/shim/v1/proc/io.go @@ -150,8 +150,9 @@ func (c *countingWriteCloser) Close() error { return c.WriteCloser.Close() } -// isFifo checks if a file is a fifo -// if the file does not exist then it returns false +// isFifo checks if a file is a fifo. +// +// If the file does not exist then it returns false. func isFifo(path string) (bool, error) { stat, err := os.Stat(path) if err != nil { diff --git a/pkg/shim/v1/proc/process.go b/pkg/shim/v1/proc/process.go index 1bfa99f4c..d462c3eef 100644 --- a/pkg/shim/v1/proc/process.go +++ b/pkg/shim/v1/proc/process.go @@ -16,10 +16,10 @@ package proc import ( - "github.com/pkg/errors" + "fmt" ) -// RunscRoot is the path to the root runsc state directory +// RunscRoot is the path to the root runsc state directory. const RunscRoot = "/run/containerd/runsc" func stateName(v interface{}) string { @@ -33,5 +33,5 @@ func stateName(v interface{}) string { case *stoppedState: return "stopped" } - panic(errors.Errorf("invalid state %v", v)) + panic(fmt.Errorf("invalid state %v", v)) } diff --git a/pkg/shim/v1/proc/types.go b/pkg/shim/v1/proc/types.go index dcd43bcca..5c215de5f 100644 --- a/pkg/shim/v1/proc/types.go +++ b/pkg/shim/v1/proc/types.go @@ -23,7 +23,7 @@ import ( runc "github.com/containerd/go-runc" ) -// Mount holds filesystem mount configuration +// Mount holds filesystem mount configuration. type Mount struct { Type string Source string @@ -31,7 +31,7 @@ type Mount struct { Options []string } -// CreateConfig hold task creation configuration +// CreateConfig hold task creation configuration. type CreateConfig struct { ID string Bundle string @@ -44,7 +44,7 @@ type CreateConfig struct { Options *google_protobuf.Any } -// ExecConfig holds exec creation configuration +// ExecConfig holds exec creation configuration. type ExecConfig struct { ID string Terminal bool @@ -54,14 +54,14 @@ type ExecConfig struct { Spec *google_protobuf.Any } -// Exit is the type of exit events +// Exit is the type of exit events. type Exit struct { Timestamp time.Time ID string Status int } -// ProcessMonitor monitors process exit changes +// ProcessMonitor monitors process exit changes. type ProcessMonitor interface { // Subscribe to process exit changes Subscribe() chan runc.Exit diff --git a/pkg/shim/v1/proc/utils.go b/pkg/shim/v1/proc/utils.go index 6e7dbdad7..716de2f59 100644 --- a/pkg/shim/v1/proc/utils.go +++ b/pkg/shim/v1/proc/utils.go @@ -34,8 +34,6 @@ const ( // inside the sandbox. var ExitCh = make(chan Exit, bufferSize) -// TODO(random-liu): This can be a utility. - // TODO(mlaventure): move to runc package? func getLastRuntimeError(r *runsc.Runsc) (string, error) { if r.Log == "" { diff --git a/pkg/shim/v1/shim/BUILD b/pkg/shim/v1/shim/BUILD new file mode 100644 index 000000000..02cffbb01 --- /dev/null +++ b/pkg/shim/v1/shim/BUILD @@ -0,0 +1,38 @@ +load("//tools:defs.bzl", "go_library") + +package(licenses = ["notice"]) + +go_library( + name = "shim", + srcs = [ + "platform.go", + "service.go", + ], + visibility = [ + "//pkg/shim:__subpackages__", + "//shim:__subpackages__", + ], + deps = [ + "//pkg/shim/runsc", + "//pkg/shim/v1/proc", + "//pkg/shim/v1/utils", + "@com_github_containerd_console//:go_default_library", + "@com_github_containerd_containerd//api/events:go_default_library", + "@com_github_containerd_containerd//api/types/task:go_default_library", + "@com_github_containerd_containerd//errdefs:go_default_library", + "@com_github_containerd_containerd//events:go_default_library", + "@com_github_containerd_containerd//log:go_default_library", + "@com_github_containerd_containerd//mount:go_default_library", + "@com_github_containerd_containerd//namespaces:go_default_library", + "@com_github_containerd_containerd//runtime:go_default_library", + "@com_github_containerd_containerd//runtime/linux/runctypes:go_default_library", + "@com_github_containerd_containerd//runtime/proc:go_default_library", + "@com_github_containerd_containerd//runtime/v1/shim:go_default_library", + "@com_github_containerd_containerd//runtime/v1/shim/v1:go_default_library", + "@com_github_containerd_fifo//:go_default_library", + "@com_github_containerd_typeurl//:go_default_library", + "@com_github_gogo_protobuf//types:go_default_library", + "@org_golang_google_grpc//codes:go_default_library", + "@org_golang_google_grpc//status:go_default_library", + ], +) diff --git a/pkg/shim/v1/shim/platform.go b/pkg/shim/v1/shim/platform.go index 86252c3f5..f6795563c 100644 --- a/pkg/shim/v1/shim/platform.go +++ b/pkg/shim/v1/shim/platform.go @@ -17,13 +17,13 @@ package shim import ( "context" + "fmt" "io" "sync" "syscall" "github.com/containerd/console" "github.com/containerd/fifo" - "github.com/pkg/errors" ) type linuxPlatform struct { @@ -32,7 +32,7 @@ type linuxPlatform struct { func (p *linuxPlatform) CopyConsole(ctx context.Context, console console.Console, stdin, stdout, stderr string, wg, cwg *sync.WaitGroup) (console.Console, error) { if p.epoller == nil { - return nil, errors.New("uninitialized epoller") + return nil, fmt.Errorf("uninitialized epoller") } epollConsole, err := p.epoller.Add(console) @@ -79,11 +79,11 @@ func (p *linuxPlatform) CopyConsole(ctx context.Context, console console.Console func (p *linuxPlatform) ShutdownConsole(ctx context.Context, cons console.Console) error { if p.epoller == nil { - return errors.New("uninitialized epoller") + return fmt.Errorf("uninitialized epoller") } epollConsole, ok := cons.(*console.EpollConsole) if !ok { - return errors.Errorf("expected EpollConsole, got %#v", cons) + return fmt.Errorf("expected EpollConsole, got %#v", cons) } return epollConsole.Shutdown(p.epoller.CloseConsole) } @@ -100,7 +100,7 @@ func (s *Service) initPlatform() error { } epoller, err := console.NewEpoller() if err != nil { - return errors.Wrap(err, "failed to initialize epoller") + return fmt.Errorf("failed to initialize epoller: %w", err) } s.platform = &linuxPlatform{ epoller: epoller, diff --git a/pkg/shim/v1/shim/service.go b/pkg/shim/v1/shim/service.go index aac172801..7b0280ed2 100644 --- a/pkg/shim/v1/shim/service.go +++ b/pkg/shim/v1/shim/service.go @@ -37,8 +37,6 @@ import ( shimapi "github.com/containerd/containerd/runtime/v1/shim/v1" "github.com/containerd/typeurl" ptypes "github.com/gogo/protobuf/types" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -57,7 +55,7 @@ var ( } ) -// Config contains shim specific configuration +// Config contains shim specific configuration. type Config struct { Path string Namespace string @@ -66,17 +64,12 @@ type Config struct { RunscConfig map[string]string } -// NewService returns a new shim service that can be used via GRPC +// NewService returns a new shim service that can be used via GRPC. func NewService(config Config, publisher events.Publisher) (*Service, error) { if config.Namespace == "" { return nil, fmt.Errorf("shim namespace cannot be empty") } ctx := namespaces.WithNamespace(context.Background(), config.Namespace) - ctx = log.WithLogger(ctx, logrus.WithFields(logrus.Fields{ - "namespace": config.Namespace, - "path": config.Path, - "pid": os.Getpid(), - })) s := &Service{ config: config, context: ctx, @@ -86,13 +79,13 @@ func NewService(config Config, publisher events.Publisher) (*Service, error) { } go s.processExits() if err := s.initPlatform(); err != nil { - return nil, errors.Wrap(err, "failed to initialized platform behavior") + return nil, fmt.Errorf("failed to initialized platform behavior: %w", err) } go s.forward(publisher) return s, nil } -// Service is the shim implementation of a remote shim over GRPC +// Service is the shim implementation of a remote shim over GRPC. type Service struct { mu sync.Mutex @@ -108,7 +101,7 @@ type Service struct { bundle string } -// Create a new initial process and container with the underlying OCI runtime +// Create creates a new initial process and container with the underlying OCI runtime. func (s *Service) Create(ctx context.Context, r *shimapi.CreateTaskRequest) (_ *shimapi.CreateTaskResponse, err error) { s.mu.Lock() defer s.mu.Unlock() @@ -153,7 +146,7 @@ func (s *Service) Create(ctx context.Context, r *shimapi.CreateTaskRequest) (_ * Options: rm.Options, } if err := m.Mount(rootfs); err != nil { - return nil, errors.Wrapf(err, "failed to mount rootfs component %v", m) + return nil, fmt.Errorf("failed to mount rootfs component %v: %w", m, err) } } process, err := newInit( @@ -169,7 +162,8 @@ func (s *Service) Create(ctx context.Context, r *shimapi.CreateTaskRequest) (_ * if err := process.Create(ctx, config); err != nil { return nil, errdefs.ToGRPC(err) } - // save the main task id and bundle to the shim for additional requests + // Save the main task id and bundle to the shim for additional + // requests. s.id = r.ID s.bundle = r.Bundle pid := process.Pid() @@ -179,7 +173,7 @@ func (s *Service) Create(ctx context.Context, r *shimapi.CreateTaskRequest) (_ * }, nil } -// Start a process +// Start starts a process. func (s *Service) Start(ctx context.Context, r *shimapi.StartRequest) (*shimapi.StartResponse, error) { p, err := s.getExecProcess(r.ID) if err != nil { @@ -194,7 +188,7 @@ func (s *Service) Start(ctx context.Context, r *shimapi.StartRequest) (*shimapi. }, nil } -// Delete the initial process and container +// Delete deletes the initial process and container. func (s *Service) Delete(ctx context.Context, r *ptypes.Empty) (*shimapi.DeleteResponse, error) { p, err := s.getInitProcess() if err != nil { @@ -214,7 +208,7 @@ func (s *Service) Delete(ctx context.Context, r *ptypes.Empty) (*shimapi.DeleteR }, nil } -// DeleteProcess deletes an exec'd process +// DeleteProcess deletes an exec'd process. func (s *Service) DeleteProcess(ctx context.Context, r *shimapi.DeleteProcessRequest) (*shimapi.DeleteResponse, error) { if r.ID == s.id { return nil, status.Errorf(codes.InvalidArgument, "cannot delete init process with DeleteProcess") @@ -236,7 +230,7 @@ func (s *Service) DeleteProcess(ctx context.Context, r *shimapi.DeleteProcessReq }, nil } -// Exec an additional process inside the container +// Exec spawns an additional process inside the container. func (s *Service) Exec(ctx context.Context, r *shimapi.ExecProcessRequest) (*ptypes.Empty, error) { s.mu.Lock() @@ -268,7 +262,7 @@ func (s *Service) Exec(ctx context.Context, r *shimapi.ExecProcessRequest) (*pty return empty, nil } -// ResizePty of a process +// ResizePty resises the terminal of a process. func (s *Service) ResizePty(ctx context.Context, r *shimapi.ResizePtyRequest) (*ptypes.Empty, error) { if r.ID == "" { return nil, errdefs.ToGRPCf(errdefs.ErrInvalidArgument, "id not provided") @@ -287,7 +281,7 @@ func (s *Service) ResizePty(ctx context.Context, r *shimapi.ResizePtyRequest) (* return empty, nil } -// State returns runtime state information for a process +// State returns runtime state information for a process. func (s *Service) State(ctx context.Context, r *shimapi.StateRequest) (*shimapi.StateResponse, error) { p, err := s.getExecProcess(r.ID) if err != nil { @@ -321,17 +315,17 @@ func (s *Service) State(ctx context.Context, r *shimapi.StateRequest) (*shimapi. }, nil } -// Pause the container +// Pause pauses the container. func (s *Service) Pause(ctx context.Context, r *ptypes.Empty) (*ptypes.Empty, error) { return empty, errdefs.ToGRPC(errdefs.ErrNotImplemented) } -// Resume the container +// Resume resumes the container. func (s *Service) Resume(ctx context.Context, r *ptypes.Empty) (*ptypes.Empty, error) { return empty, errdefs.ToGRPC(errdefs.ErrNotImplemented) } -// Kill a process with the provided signal +// Kill kills a process with the provided signal. func (s *Service) Kill(ctx context.Context, r *shimapi.KillRequest) (*ptypes.Empty, error) { if r.ID == "" { p, err := s.getInitProcess() @@ -354,7 +348,7 @@ func (s *Service) Kill(ctx context.Context, r *shimapi.KillRequest) (*ptypes.Emp return empty, nil } -// ListPids returns all pids inside the container +// ListPids returns all pids inside the container. func (s *Service) ListPids(ctx context.Context, r *shimapi.ListPidsRequest) (*shimapi.ListPidsResponse, error) { pids, err := s.getContainerPids(ctx, r.ID) if err != nil { @@ -372,7 +366,7 @@ func (s *Service) ListPids(ctx context.Context, r *shimapi.ListPidsRequest) (*sh } a, err := typeurl.MarshalAny(d) if err != nil { - return nil, errors.Wrapf(err, "failed to marshal process %d info", pid) + return nil, fmt.Errorf("failed to marshal process %d info: %w", pid, err) } pInfo.Info = a break @@ -385,7 +379,7 @@ func (s *Service) ListPids(ctx context.Context, r *shimapi.ListPidsRequest) (*sh }, nil } -// CloseIO of a process +// CloseIO closes the I/O context of a process. func (s *Service) CloseIO(ctx context.Context, r *shimapi.CloseIORequest) (*ptypes.Empty, error) { p, err := s.getExecProcess(r.ID) if err != nil { @@ -393,30 +387,30 @@ func (s *Service) CloseIO(ctx context.Context, r *shimapi.CloseIORequest) (*ptyp } if stdin := p.Stdin(); stdin != nil { if err := stdin.Close(); err != nil { - return nil, errors.Wrap(err, "close stdin") + return nil, fmt.Errorf("close stdin: %w", err) } } return empty, nil } -// Checkpoint the container +// Checkpoint checkpoints the container. func (s *Service) Checkpoint(ctx context.Context, r *shimapi.CheckpointTaskRequest) (*ptypes.Empty, error) { return empty, errdefs.ToGRPC(errdefs.ErrNotImplemented) } -// ShimInfo returns shim information such as the shim's pid +// ShimInfo returns shim information such as the shim's pid. func (s *Service) ShimInfo(ctx context.Context, r *ptypes.Empty) (*shimapi.ShimInfoResponse, error) { return &shimapi.ShimInfoResponse{ ShimPid: uint32(os.Getpid()), }, nil } -// Update a running container +// Update updates a running container. func (s *Service) Update(ctx context.Context, r *shimapi.UpdateTaskRequest) (*ptypes.Empty, error) { return empty, errdefs.ToGRPC(errdefs.ErrNotImplemented) } -// Wait for a process to exit +// Wait waits for a process to exit. func (s *Service) Wait(ctx context.Context, r *shimapi.WaitRequest) (*shimapi.WaitResponse, error) { p, err := s.getExecProcess(r.ID) if err != nil { @@ -451,7 +445,7 @@ func (s *Service) checkProcesses(e proc.Exit) { for _, p := range s.allProcesses() { if p.ID() == e.ID { if ip, ok := p.(*proc.Init); ok { - // Ensure all children are killed + // Ensure all children are killed. if err := ip.KillAll(s.context); err != nil { log.G(s.context).WithError(err).WithField("id", ip.ID()). Error("failed to kill init's children") @@ -495,7 +489,7 @@ func (s *Service) forward(publisher events.Publisher) { } } -// getInitProcess returns initial process +// getInitProcess returns the init process. func (s *Service) getInitProcess() (rproc.Process, error) { s.mu.Lock() defer s.mu.Unlock() @@ -506,7 +500,7 @@ func (s *Service) getInitProcess() (rproc.Process, error) { return p, nil } -// getExecProcess returns exec process +// getExecProcess returns the given exec process. func (s *Service) getExecProcess(id string) (rproc.Process, error) { s.mu.Lock() defer s.mu.Unlock() @@ -534,7 +528,7 @@ func getTopic(ctx context.Context, e interface{}) string { case *eventstypes.TaskExecStarted: return runtime.TaskExecStartedEventTopic default: - logrus.Warnf("no topic for type %#v", e) + log.L.Printf("no topic for type %#v", e) } return runtime.TaskUnknownTopic } @@ -551,10 +545,10 @@ func newInit(ctx context.Context, path, workDir, runtimeRoot, namespace string, spec, err := utils.ReadSpec(r.Bundle) if err != nil { - return nil, errors.Wrap(err, "read oci spec") + return nil, fmt.Errorf("read oci spec: %w", err) } if err := utils.UpdateVolumeAnnotations(r.Bundle, spec); err != nil { - return nil, errors.Wrap(err, "update volume annotations") + return nil, fmt.Errorf("update volume annotations: %w", err) } runsc.FormatLogPath(r.ID, config) diff --git a/pkg/shim/v1/utils/BUILD b/pkg/shim/v1/utils/BUILD new file mode 100644 index 000000000..9045781e1 --- /dev/null +++ b/pkg/shim/v1/utils/BUILD @@ -0,0 +1,26 @@ +load("//tools:defs.bzl", "go_library", "go_test") + +package(licenses = ["notice"]) + +go_library( + name = "utils", + srcs = [ + "utils.go", + "volumes.go", + ], + visibility = [ + "//pkg/shim:__subpackages__", + "//shim:__subpackages__", + ], + deps = [ + "@com_github_containerd_cri//pkg/annotations:go_default_library", + "@com_github_opencontainers_runtime-spec//specs-go:go_default_library", + ], +) + +go_test( + name = "utils_test", + size = "small", + srcs = ["volumes_test.go"], + library = ":utils", +) diff --git a/pkg/shim/v1/utils/volumes.go b/pkg/shim/v1/utils/volumes.go index 7323e7245..e4e9bf9b1 100644 --- a/pkg/shim/v1/utils/volumes.go +++ b/pkg/shim/v1/utils/volumes.go @@ -23,8 +23,6 @@ import ( "github.com/containerd/cri/pkg/annotations" specs "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) const volumeKeyPrefix = "dev.gvisor.spec.mount." @@ -32,13 +30,13 @@ const volumeKeyPrefix = "dev.gvisor.spec.mount." var kubeletPodsDir = "/var/lib/kubelet/pods" // volumeName gets volume name from volume annotation key, example: -// dev.gvisor.spec.mount.NAME.share +// dev.gvisor.spec.mount.NAME.share func volumeName(k string) string { return strings.SplitN(strings.TrimPrefix(k, volumeKeyPrefix), ".", 2)[0] } // volumeFieldName gets volume field name from volume annotation key, example: -// `type` is the field of dev.gvisor.spec.mount.NAME.type +// `type` is the field of dev.gvisor.spec.mount.NAME.type func volumeFieldName(k string) string { parts := strings.Split(strings.TrimPrefix(k, volumeKeyPrefix), ".") return parts[len(parts)-1] @@ -48,16 +46,16 @@ func volumeFieldName(k string) string { func podUID(s *specs.Spec) (string, error) { sandboxLogDir := s.Annotations[annotations.SandboxLogDir] if sandboxLogDir == "" { - return "", errors.New("no sandbox log path annotation") + return "", fmt.Errorf("no sandbox log path annotation") } fields := strings.Split(filepath.Base(sandboxLogDir), "_") switch len(fields) { - case 1: // This is the old CRI logging path + case 1: // This is the old CRI logging path. return fields[0], nil - case 3: // This is the new CRI logging path + case 3: // This is the new CRI logging path. return fields[2], nil } - return "", errors.Errorf("unexpected sandbox log path %q", sandboxLogDir) + return "", fmt.Errorf("unexpected sandbox log path %q", sandboxLogDir) } // isVolumeKey checks whether an annotation key is for volume. @@ -79,7 +77,7 @@ func volumePath(volume, uid string) (string, error) { return "", err } if len(dirs) != 1 { - return "", errors.Errorf("unexpected matched volume list %v", dirs) + return "", fmt.Errorf("unexpected matched volume list %v", dirs) } return dirs[0], nil } @@ -103,7 +101,7 @@ func UpdateVolumeAnnotations(bundle string, s *specs.Spec) error { if err != nil { // Skip if we can't get pod UID, because this doesn't work // for containerd 1.1. - logrus.WithError(err).Error("Can't get pod uid") + fmt.Errorf("Can't get pod uid: %w", err) return nil } } @@ -117,22 +115,25 @@ func UpdateVolumeAnnotations(bundle string, s *specs.Spec) error { } volume := volumeName(k) if uid != "" { - // This is a sandbox + // This is a sandbox. path, err := volumePath(volume, uid) if err != nil { - return errors.Wrapf(err, "get volume path for %q", volume) + return fmt.Errorf("get volume path for %q: %w", volume, err) } s.Annotations[volumeSourceKey(volume)] = path updated = true } else { - // This is a container + // This is a container. for i := range s.Mounts { - // An error is returned for sandbox if source annotation - // is not successfully applied, so it is guaranteed that - // the source annotation for sandbox has already been - // successfully applied at this point. - // The volume name is unique inside a pod, so matching without - // podUID is fine here. + // An error is returned for sandbox if source + // annotation is not successfully applied, so + // it is guaranteed that the source annotation + // for sandbox has already been successfully + // applied at this point. + // + // The volume name is unique inside a pod, so + // matching without podUID is fine here. + // // TODO: Pass podUID down to shim for containers to do // more accurate matching. if yes, _ := isVolumePath(volume, s.Mounts[i].Source); yes { @@ -147,7 +148,7 @@ func UpdateVolumeAnnotations(bundle string, s *specs.Spec) error { if !updated { return nil } - // Update bundle + // Update bundle. b, err := json.Marshal(s) if err != nil { return err diff --git a/pkg/shim/v2/BUILD b/pkg/shim/v2/BUILD new file mode 100644 index 000000000..450f62979 --- /dev/null +++ b/pkg/shim/v2/BUILD @@ -0,0 +1,41 @@ +load("//tools:defs.bzl", "go_library") + +package(licenses = ["notice"]) + +go_library( + name = "v2", + srcs = [ + "epoll.go", + "service.go", + "service_linux.go", + ], + visibility = ["//shim:__subpackages__"], + deps = [ + "//pkg/shim/runsc", + "//pkg/shim/v1/proc", + "//pkg/shim/v1/utils", + "//pkg/shim/v2/options", + "//runsc/specutils", + "@com_github_burntsushi_toml//:go_default_library", + "@com_github_containerd_cgroups//:go_default_library", + "@com_github_containerd_cgroups//stats/v1:go_default_library", + "@com_github_containerd_console//:go_default_library", + "@com_github_containerd_containerd//api/events:go_default_library", + "@com_github_containerd_containerd//api/types/task:go_default_library", + "@com_github_containerd_containerd//errdefs:go_default_library", + "@com_github_containerd_containerd//events:go_default_library", + "@com_github_containerd_containerd//log:go_default_library", + "@com_github_containerd_containerd//mount:go_default_library", + "@com_github_containerd_containerd//namespaces:go_default_library", + "@com_github_containerd_containerd//runtime:go_default_library", + "@com_github_containerd_containerd//runtime/linux/runctypes:go_default_library", + "@com_github_containerd_containerd//runtime/proc:go_default_library", + "@com_github_containerd_containerd//runtime/v2/shim:go_default_library", + "@com_github_containerd_containerd//runtime/v2/task:go_default_library", + "@com_github_containerd_cri//pkg/api/runtimeoptions/v1:go_default_library", + "@com_github_containerd_fifo//:go_default_library", + "@com_github_containerd_typeurl//:go_default_library", + "@com_github_gogo_protobuf//types:go_default_library", + "@org_golang_x_sys//unix:go_default_library", + ], +) diff --git a/pkg/shim/v2/epoll.go b/pkg/shim/v2/epoll.go index 57a2c5452..45cc38c2a 100644 --- a/pkg/shim/v2/epoll.go +++ b/pkg/shim/v2/epoll.go @@ -19,13 +19,13 @@ package v2 import ( "context" + "fmt" "sync" "github.com/containerd/cgroups" eventstypes "github.com/containerd/containerd/api/events" "github.com/containerd/containerd/events" "github.com/containerd/containerd/runtime" - "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) @@ -71,7 +71,7 @@ func (e *epoller) run(ctx context.Context) { if err == unix.EINTR { continue } - logrus.WithError(err).Error("cgroups: epoll wait") + fmt.Errorf("cgroups: epoll wait: %w", err) } for i := 0; i < n; i++ { e.process(ctx, uintptr(events[i].Fd)) @@ -117,7 +117,7 @@ func (e *epoller) process(ctx context.Context, fd uintptr) { if err := e.publisher.Publish(ctx, runtime.TaskOOMEventTopic, &eventstypes.TaskOOM{ ContainerID: i.id, }); err != nil { - logrus.WithError(err).Error("publish OOM event") + fmt.Errorf("publish OOM event: %w", err) } } diff --git a/pkg/shim/v2/options/BUILD b/pkg/shim/v2/options/BUILD new file mode 100644 index 000000000..ca212e874 --- /dev/null +++ b/pkg/shim/v2/options/BUILD @@ -0,0 +1,11 @@ +load("//tools:defs.bzl", "go_library") + +package(licenses = ["notice"]) + +go_library( + name = "options", + srcs = [ + "options.go", + ], + visibility = ["//:sandbox"], +) diff --git a/pkg/shim/v2/service.go b/pkg/shim/v2/service.go index 0cff82a89..c67b1beba 100644 --- a/pkg/shim/v2/service.go +++ b/pkg/shim/v2/service.go @@ -16,6 +16,7 @@ package v2 import ( "context" + "fmt" "io/ioutil" "os" "os/exec" @@ -26,6 +27,7 @@ import ( "github.com/BurntSushi/toml" "github.com/containerd/cgroups" + metrics "github.com/containerd/cgroups/stats/v1" "github.com/containerd/console" eventstypes "github.com/containerd/containerd/api/events" "github.com/containerd/containerd/api/types/task" @@ -42,14 +44,13 @@ import ( runtimeoptions "github.com/containerd/cri/pkg/api/runtimeoptions/v1" "github.com/containerd/typeurl" ptypes "github.com/gogo/protobuf/types" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" "golang.org/x/sys/unix" "gvisor.dev/gvisor/pkg/shim/runsc" "gvisor.dev/gvisor/pkg/shim/v1/proc" "gvisor.dev/gvisor/pkg/shim/v1/utils" "gvisor.dev/gvisor/pkg/shim/v2/options" + "gvisor.dev/gvisor/runsc/specutils" ) var ( @@ -68,7 +69,7 @@ var _ = (taskAPI.TaskService)(&service{}) // we assume that a config.toml should exist in the runtime root. const configFile = "config.toml" -// New returns a new shim service that can be used via GRPC +// New returns a new shim service that can be used via GRPC. func New(ctx context.Context, id string, publisher events.Publisher) (shim.Shim, error) { ep, err := newOOMEpoller(publisher) if err != nil { @@ -89,13 +90,13 @@ func New(ctx context.Context, id string, publisher events.Publisher) (shim.Shim, runsc.Monitor = shim.Default if err := s.initPlatform(); err != nil { cancel() - return nil, errors.Wrap(err, "failed to initialized platform behavior") + return nil, fmt.Errorf("failed to initialized platform behavior: %w", err) } go s.forward(publisher) return s, nil } -// service is the shim implementation of a remote shim over GRPC +// service is the shim implementation of a remote shim over GRPC. type service struct { mu sync.Mutex @@ -179,7 +180,7 @@ func (s *service) StartShim(ctx context.Context, id, containerdBinary, container return "", err } if err := shim.SetScore(cmd.Process.Pid); err != nil { - return "", errors.Wrap(err, "failed to set OOM Score on shim") + return "", fmt.Errorf("failed to set OOM Score on shim: %w", err) } return address, nil } @@ -201,10 +202,10 @@ func (s *service) Cleanup(ctx context.Context) (*taskAPI.DeleteResponse, error) if err := r.Delete(ctx, s.id, &runsc.DeleteOpts{ Force: true, }); err != nil { - logrus.WithError(err).Warn("failed to remove runc container") + log.L.Printf("failed to remove runc container: %v", err) } if err := mount.UnmountAll(filepath.Join(path, "rootfs"), 0); err != nil { - logrus.WithError(err).Warn("failed to cleanup rootfs mount") + log.L.Printf("failed to cleanup rootfs mount: %v", err) } return &taskAPI.DeleteResponse{ ExitedAt: time.Now(), @@ -224,14 +225,15 @@ func (s *service) writeRuntime(path, runtime string) error { return ioutil.WriteFile(filepath.Join(path, "runtime"), []byte(runtime), 0600) } -// Create a new initial process and container with the underlying OCI runtime +// Create creates a new initial process and container with the underlying OCI +// runtime. func (s *service) Create(ctx context.Context, r *taskAPI.CreateTaskRequest) (_ *taskAPI.CreateTaskResponse, err error) { s.mu.Lock() defer s.mu.Unlock() ns, err := namespaces.NamespaceRequired(ctx) if err != nil { - return nil, errors.Wrap(err, "create namespace") + return nil, fmt.Errorf("create namespace: %w", err) } // Read from root for now. @@ -258,7 +260,7 @@ func (s *service) Create(ctx context.Context, r *taskAPI.CreateTaskRequest) (_ * path = filepath.Join(root, configFile) if _, err := os.Stat(path); err != nil { if !os.IsNotExist(err) { - return nil, errors.Wrapf(err, "stat config file %q", path) + return nil, fmt.Errorf("stat config file %q: %w", path, err) } // A config file in runtime root is not required. path = "" @@ -268,15 +270,15 @@ func (s *service) Create(ctx context.Context, r *taskAPI.CreateTaskRequest) (_ * break } if o.TypeUrl != options.OptionType { - return nil, errors.Errorf("unsupported runtimeoptions %q", o.TypeUrl) + return nil, fmt.Errorf("unsupported runtimeoptions %q", o.TypeUrl) } path = o.ConfigPath default: - return nil, errors.Errorf("unsupported option type %q", r.Options.TypeUrl) + return nil, fmt.Errorf("unsupported option type %q", r.Options.TypeUrl) } if path != "" { if _, err = toml.DecodeFile(path, &opts); err != nil { - return nil, errors.Wrapf(err, "decode config file %q", path) + return nil, fmt.Errorf("decode config file %q: %w", path, err) } } } @@ -312,8 +314,8 @@ func (s *service) Create(ctx context.Context, r *taskAPI.CreateTaskRequest) (_ * } defer func() { if err != nil { - if err2 := mount.UnmountAll(rootfs, 0); err2 != nil { - logrus.WithError(err2).Warn("failed to cleanup rootfs mount") + if err := mount.UnmountAll(rootfs, 0); err != nil { + log.L.Printf("failed to cleanup rootfs mount: %v", err) } } }() @@ -324,7 +326,7 @@ func (s *service) Create(ctx context.Context, r *taskAPI.CreateTaskRequest) (_ * Options: rm.Options, } if err := m.Mount(rootfs); err != nil { - return nil, errors.Wrapf(err, "failed to mount rootfs component %v", m) + return nil, fmt.Errorf("failed to mount rootfs component %v: %w", m, err) } } process, err := newInit( @@ -343,20 +345,21 @@ func (s *service) Create(ctx context.Context, r *taskAPI.CreateTaskRequest) (_ * if err := process.Create(ctx, config); err != nil { return nil, errdefs.ToGRPC(err) } - // save the main task id and bundle to the shim for additional requests + // Save the main task id and bundle to the shim for additional + // requests. s.id = r.ID s.bundle = r.Bundle - // Set up OOM notification on the sandbox's cgroup. This is done on sandbox - // create since the sandbox process will be created here. + // Set up OOM notification on the sandbox's cgroup. This is done on + // sandbox create since the sandbox process will be created here. pid := process.Pid() if pid > 0 { cg, err := cgroups.Load(cgroups.V1, cgroups.PidPath(pid)) if err != nil { - return nil, errors.Wrapf(err, "loading cgroup for %d", pid) + return nil, fmt.Errorf("loading cgroup for %d: %w", pid, err) } if err := s.oomPoller.add(s.id, cg); err != nil { - return nil, errors.Wrapf(err, "add cg to OOM monitor") + return nil, fmt.Errorf("add cg to OOM monitor: %w", err) } } s.task = process @@ -367,7 +370,7 @@ func (s *service) Create(ctx context.Context, r *taskAPI.CreateTaskRequest) (_ * } -// Start a process +// Start starts a process. func (s *service) Start(ctx context.Context, r *taskAPI.StartRequest) (*taskAPI.StartResponse, error) { p, err := s.getProcess(r.ExecID) if err != nil { @@ -383,7 +386,7 @@ func (s *service) Start(ctx context.Context, r *taskAPI.StartRequest) (*taskAPI. }, nil } -// Delete the initial process and container +// Delete deletes the initial process and container. func (s *service) Delete(ctx context.Context, r *taskAPI.DeleteRequest) (*taskAPI.DeleteResponse, error) { p, err := s.getProcess(r.ExecID) if err != nil { @@ -411,7 +414,7 @@ func (s *service) Delete(ctx context.Context, r *taskAPI.DeleteRequest) (*taskAP }, nil } -// Exec an additional process inside the container +// Exec spawns an additional process inside the container. func (s *service) Exec(ctx context.Context, r *taskAPI.ExecProcessRequest) (*ptypes.Empty, error) { s.mu.Lock() p := s.processes[r.ExecID] @@ -440,7 +443,7 @@ func (s *service) Exec(ctx context.Context, r *taskAPI.ExecProcessRequest) (*pty return empty, nil } -// ResizePty of a process +// ResizePty resizes the terminal of a process. func (s *service) ResizePty(ctx context.Context, r *taskAPI.ResizePtyRequest) (*ptypes.Empty, error) { p, err := s.getProcess(r.ExecID) if err != nil { @@ -456,7 +459,7 @@ func (s *service) ResizePty(ctx context.Context, r *taskAPI.ResizePtyRequest) (* return empty, nil } -// State returns runtime state information for a process +// State returns runtime state information for a process. func (s *service) State(ctx context.Context, r *taskAPI.StateRequest) (*taskAPI.StateResponse, error) { p, err := s.getProcess(r.ExecID) if err != nil { @@ -490,17 +493,17 @@ func (s *service) State(ctx context.Context, r *taskAPI.StateRequest) (*taskAPI. }, nil } -// Pause the container +// Pause the container. func (s *service) Pause(ctx context.Context, r *taskAPI.PauseRequest) (*ptypes.Empty, error) { return empty, errdefs.ToGRPC(errdefs.ErrNotImplemented) } -// Resume the container +// Resume the container. func (s *service) Resume(ctx context.Context, r *taskAPI.ResumeRequest) (*ptypes.Empty, error) { return empty, errdefs.ToGRPC(errdefs.ErrNotImplemented) } -// Kill a process with the provided signal +// Kill a process with the provided signal. func (s *service) Kill(ctx context.Context, r *taskAPI.KillRequest) (*ptypes.Empty, error) { p, err := s.getProcess(r.ExecID) if err != nil { @@ -515,7 +518,7 @@ func (s *service) Kill(ctx context.Context, r *taskAPI.KillRequest) (*ptypes.Emp return empty, nil } -// Pids returns all pids inside the container +// Pids returns all pids inside the container. func (s *service) Pids(ctx context.Context, r *taskAPI.PidsRequest) (*taskAPI.PidsResponse, error) { pids, err := s.getContainerPids(ctx, r.ID) if err != nil { @@ -533,7 +536,7 @@ func (s *service) Pids(ctx context.Context, r *taskAPI.PidsRequest) (*taskAPI.Pi } a, err := typeurl.MarshalAny(d) if err != nil { - return nil, errors.Wrapf(err, "failed to marshal process %d info", pid) + return nil, fmt.Errorf("failed to marshal process %d info: %w", pid, err) } pInfo.Info = a break @@ -546,7 +549,7 @@ func (s *service) Pids(ctx context.Context, r *taskAPI.PidsRequest) (*taskAPI.Pi }, nil } -// CloseIO of a process +// CloseIO closes the I/O context of a process. func (s *service) CloseIO(ctx context.Context, r *taskAPI.CloseIORequest) (*ptypes.Empty, error) { p, err := s.getProcess(r.ExecID) if err != nil { @@ -554,18 +557,18 @@ func (s *service) CloseIO(ctx context.Context, r *taskAPI.CloseIORequest) (*ptyp } if stdin := p.Stdin(); stdin != nil { if err := stdin.Close(); err != nil { - return nil, errors.Wrap(err, "close stdin") + return nil, fmt.Errorf("close stdin: %w", err) } } return empty, nil } -// Checkpoint the container +// Checkpoint checkpoints the container. func (s *service) Checkpoint(ctx context.Context, r *taskAPI.CheckpointTaskRequest) (*ptypes.Empty, error) { return empty, errdefs.ToGRPC(errdefs.ErrNotImplemented) } -// Connect returns shim information such as the shim's pid +// Connect returns shim information such as the shim's pid. func (s *service) Connect(ctx context.Context, r *taskAPI.ConnectRequest) (*taskAPI.ConnectResponse, error) { var pid int if s.task != nil { @@ -605,52 +608,52 @@ func (s *service) Stats(ctx context.Context, r *taskAPI.StatsRequest) (*taskAPI. // gvisor currently (as of 2020-03-03) only returns the total memory // usage and current PID value[0]. However, we copy the common fields here // so that future updates will propagate correct information. We're - // using the cgroups.Metrics structure so we're returning the same type + // using the metrics.Metrics structure so we're returning the same type // as runc. // // [0]: https://github.com/google/gvisor/blob/277a0d5a1fbe8272d4729c01ee4c6e374d047ebc/runsc/boot/events.go#L61-L81 - data, err := typeurl.MarshalAny(&cgroups.Metrics{ - CPU: &cgroups.CPUStat{ - Usage: &cgroups.CPUUsage{ + data, err := typeurl.MarshalAny(&metrics.Metrics{ + CPU: &metrics.CPUStat{ + Usage: &metrics.CPUUsage{ Total: stats.Cpu.Usage.Total, Kernel: stats.Cpu.Usage.Kernel, User: stats.Cpu.Usage.User, PerCPU: stats.Cpu.Usage.Percpu, }, - Throttling: &cgroups.Throttle{ + Throttling: &metrics.Throttle{ Periods: stats.Cpu.Throttling.Periods, ThrottledPeriods: stats.Cpu.Throttling.ThrottledPeriods, ThrottledTime: stats.Cpu.Throttling.ThrottledTime, }, }, - Memory: &cgroups.MemoryStat{ + Memory: &metrics.MemoryStat{ Cache: stats.Memory.Cache, - Usage: &cgroups.MemoryEntry{ + Usage: &metrics.MemoryEntry{ Limit: stats.Memory.Usage.Limit, Usage: stats.Memory.Usage.Usage, Max: stats.Memory.Usage.Max, Failcnt: stats.Memory.Usage.Failcnt, }, - Swap: &cgroups.MemoryEntry{ + Swap: &metrics.MemoryEntry{ Limit: stats.Memory.Swap.Limit, Usage: stats.Memory.Swap.Usage, Max: stats.Memory.Swap.Max, Failcnt: stats.Memory.Swap.Failcnt, }, - Kernel: &cgroups.MemoryEntry{ + Kernel: &metrics.MemoryEntry{ Limit: stats.Memory.Kernel.Limit, Usage: stats.Memory.Kernel.Usage, Max: stats.Memory.Kernel.Max, Failcnt: stats.Memory.Kernel.Failcnt, }, - KernelTCP: &cgroups.MemoryEntry{ + KernelTCP: &metrics.MemoryEntry{ Limit: stats.Memory.KernelTCP.Limit, Usage: stats.Memory.KernelTCP.Usage, Max: stats.Memory.KernelTCP.Max, Failcnt: stats.Memory.KernelTCP.Failcnt, }, }, - Pids: &cgroups.PidsStat{ + Pids: &metrics.PidsStat{ Current: stats.Pids.Current, Limit: stats.Pids.Limit, }, @@ -663,12 +666,12 @@ func (s *service) Stats(ctx context.Context, r *taskAPI.StatsRequest) (*taskAPI. }, nil } -// Update a running container +// Update updates a running container. func (s *service) Update(ctx context.Context, r *taskAPI.UpdateTaskRequest) (*ptypes.Empty, error) { return empty, errdefs.ToGRPC(errdefs.ErrNotImplemented) } -// Wait for a process to exit +// Wait waits for a process to exit. func (s *service) Wait(ctx context.Context, r *taskAPI.WaitRequest) (*taskAPI.WaitResponse, error) { p, err := s.getProcess(r.ExecID) if err != nil { @@ -697,7 +700,7 @@ func (s *service) checkProcesses(e proc.Exit) { for _, p := range s.allProcesses() { if p.ID() == e.ID { if ip, ok := p.(*proc.Init); ok { - // Ensure all children are killed + // Ensure all children are killed. if err := ip.KillAll(s.context); err != nil { log.G(s.context).WithError(err).WithField("id", ip.ID()). Error("failed to kill init's children") @@ -733,7 +736,7 @@ func (s *service) getContainerPids(ctx context.Context, id string) ([]uint32, er p := s.task s.mu.Unlock() if p == nil { - return nil, errors.Wrapf(errdefs.ErrFailedPrecondition, "container must be created") + return nil, fmt.Errorf("container must be created: %w", errdefs.ErrFailedPrecondition) } ps, err := p.(*proc.Init).Runtime().Ps(ctx, id) if err != nil { @@ -752,7 +755,7 @@ func (s *service) forward(publisher events.Publisher) { err := publisher.Publish(ctx, getTopic(e), e) cancel() if err != nil { - logrus.WithError(err).Error("post event") + fmt.Errorf("post event: %w", err) } } } @@ -787,7 +790,7 @@ func getTopic(e interface{}) string { case *eventstypes.TaskExecStarted: return runtime.TaskExecStartedEventTopic default: - logrus.Warnf("no topic for type %#v", e) + log.L.Printf("no topic for type %#v", e) } return runtime.TaskUnknownTopic } @@ -795,10 +798,10 @@ func getTopic(e interface{}) string { func newInit(ctx context.Context, path, workDir, namespace string, platform rproc.Platform, r *proc.CreateConfig, options *options.Options, rootfs string) (*proc.Init, error) { spec, err := utils.ReadSpec(r.Bundle) if err != nil { - return nil, errors.Wrap(err, "read oci spec") + return nil, fmt.Errorf("read oci spec: %w", err) } if err := utils.UpdateVolumeAnnotations(r.Bundle, spec); err != nil { - return nil, errors.Wrap(err, "update volume annotations") + return nil, fmt.Errorf("update volume annotations: %w", err) } runsc.FormatLogPath(r.ID, options.RunscConfig) runtime := proc.NewRunsc(options.Root, path, namespace, options.BinaryName, options.RunscConfig) @@ -814,7 +817,7 @@ func newInit(ctx context.Context, path, workDir, namespace string, platform rpro p.WorkDir = workDir p.IoUID = int(options.IoUid) p.IoGID = int(options.IoGid) - p.Sandbox = utils.IsSandbox(spec) + p.Sandbox = specutils.SpecContainerType(spec) == specutils.ContainerTypeSandbox p.UserLog = utils.UserLogPath(spec) p.Monitor = shim.Default return p, nil diff --git a/pkg/shim/v2/service_linux.go b/pkg/shim/v2/service_linux.go index cd259cd44..257c58812 100644 --- a/pkg/shim/v2/service_linux.go +++ b/pkg/shim/v2/service_linux.go @@ -19,13 +19,13 @@ package v2 import ( "context" + "fmt" "io" "sync" "syscall" "github.com/containerd/console" "github.com/containerd/fifo" - "github.com/pkg/errors" ) type linuxPlatform struct { @@ -34,7 +34,7 @@ type linuxPlatform struct { func (p *linuxPlatform) CopyConsole(ctx context.Context, console console.Console, stdin, stdout, stderr string, wg, cwg *sync.WaitGroup) (console.Console, error) { if p.epoller == nil { - return nil, errors.New("uninitialized epoller") + return nil, fmt.Errorf("uninitialized epoller") } epollConsole, err := p.epoller.Add(console) @@ -81,11 +81,11 @@ func (p *linuxPlatform) CopyConsole(ctx context.Context, console console.Console func (p *linuxPlatform) ShutdownConsole(ctx context.Context, cons console.Console) error { if p.epoller == nil { - return errors.New("uninitialized epoller") + return fmt.Errorf("uninitialized epoller") } epollConsole, ok := cons.(*console.EpollConsole) if !ok { - return errors.Errorf("expected EpollConsole, got %#v", cons) + return fmt.Errorf("expected EpollConsole, got %#v", cons) } return epollConsole.Shutdown(p.epoller.CloseConsole) } @@ -102,7 +102,7 @@ func (s *service) initPlatform() error { } epoller, err := console.NewEpoller() if err != nil { - return errors.Wrap(err, "failed to initialize epoller") + return fmt.Errorf("failed to initialize epoller: %w", err) } s.platform = &linuxPlatform{ epoller: epoller, diff --git a/pkg/test/criutil/criutil.go b/pkg/test/criutil/criutil.go index 8fed29ff5..4c63d669a 100644 --- a/pkg/test/criutil/criutil.go +++ b/pkg/test/criutil/criutil.go @@ -22,6 +22,9 @@ import ( "fmt" "os" "os/exec" + "path" + "regexp" + "strconv" "strings" "time" @@ -33,28 +36,44 @@ import ( type Crictl struct { logger testutil.Logger endpoint string + runpArgs []string cleanup []func() } // resolvePath attempts to find binary paths. It may set the path to invalid, // which will cause the execution to fail with a sensible error. func resolvePath(executable string) string { + runtime, err := dockerutil.RuntimePath() + if err == nil { + // Check first the directory of the runtime itself. + if dir := path.Dir(runtime); dir != "" && dir != "." { + guess := path.Join(dir, executable) + if fi, err := os.Stat(guess); err == nil && (fi.Mode()&0111) != 0 { + return guess + } + } + } + + // Try to find via the path. guess, err := exec.LookPath(executable) - if err != nil { - guess = fmt.Sprintf("/usr/local/bin/%s", executable) + if err == nil { + return guess } - return guess + + // Return a default path. + return fmt.Sprintf("/usr/local/bin/%s", executable) } // NewCrictl returns a Crictl configured with a timeout and an endpoint over // which it will talk to containerd. -func NewCrictl(logger testutil.Logger, endpoint string) *Crictl { +func NewCrictl(logger testutil.Logger, endpoint string, runpArgs []string) *Crictl { // Attempt to find the executable, but don't bother propagating the // error at this point. The first command executed will return with a // binary not found error. return &Crictl{ logger: logger, endpoint: endpoint, + runpArgs: runpArgs, } } @@ -67,8 +86,8 @@ func (cc *Crictl) CleanUp() { } // RunPod creates a sandbox. It corresponds to `crictl runp`. -func (cc *Crictl) RunPod(sbSpecFile string) (string, error) { - podID, err := cc.run("runp", sbSpecFile) +func (cc *Crictl) RunPod(runtime, sbSpecFile string) (string, error) { + podID, err := cc.run("runp", "--runtime", runtime, sbSpecFile) if err != nil { return "", fmt.Errorf("runp failed: %v", err) } @@ -79,10 +98,39 @@ func (cc *Crictl) RunPod(sbSpecFile string) (string, error) { // Create creates a container within a sandbox. It corresponds to `crictl // create`. func (cc *Crictl) Create(podID, contSpecFile, sbSpecFile string) (string, error) { - podID, err := cc.run("create", podID, contSpecFile, sbSpecFile) + // In version 1.16.0, crictl annoying starting attempting to pull the + // container, even if it was already available locally. We therefore + // need to parse the version and add an appropriate --no-pull argument + // since the image has already been loaded locally. + out, err := cc.run("-v") + r := regexp.MustCompile("crictl version ([0-9]+)\\.([0-9]+)\\.([0-9+])") + vs := r.FindStringSubmatch(out) + if len(vs) != 4 { + return "", fmt.Errorf("crictl -v had unexpected output: %s", out) + } + major, err := strconv.ParseUint(vs[1], 10, 64) + if err != nil { + return "", fmt.Errorf("crictl had invalid version: %v (%s)", err, out) + } + minor, err := strconv.ParseUint(vs[2], 10, 64) if err != nil { + return "", fmt.Errorf("crictl had invalid version: %v (%s)", err, out) + } + + args := []string{"create"} + if (major == 1 && minor >= 16) || major > 1 { + args = append(args, "--no-pull") + } + args = append(args, podID) + args = append(args, contSpecFile) + args = append(args, sbSpecFile) + + podID, err = cc.run(args...) + if err != nil { + time.Sleep(10 * time.Minute) // XXX return "", fmt.Errorf("create failed: %v", err) } + // Strip the trailing newline from crictl output. return strings.TrimSpace(podID), nil } @@ -260,7 +308,7 @@ func (cc *Crictl) StopContainer(contID string) error { // StartPodAndContainer starts a sandbox and container in that sandbox. It // returns the pod ID and container ID. -func (cc *Crictl) StartPodAndContainer(image, sbSpec, contSpec string) (string, string, error) { +func (cc *Crictl) StartPodAndContainer(runtime, image, sbSpec, contSpec string) (string, string, error) { if err := cc.Import(image); err != nil { return "", "", err } @@ -277,7 +325,7 @@ func (cc *Crictl) StartPodAndContainer(image, sbSpec, contSpec string) (string, } cc.cleanup = append(cc.cleanup, cleanup) - podID, err := cc.RunPod(sbSpecFile) + podID, err := cc.RunPod(runtime, sbSpecFile) if err != nil { return "", "", err } diff --git a/runsc/BUILD b/runsc/BUILD index 757f6d44c..96f697a5f 100644 --- a/runsc/BUILD +++ b/runsc/BUILD @@ -62,18 +62,22 @@ go_binary( ) pkg_tar( - name = "runsc-bin", - srcs = [":runsc"], + name = "debian-bin", + srcs = [ + ":runsc", + "//shim/v1:gvisor-containerd-shim", + "//shim/v2:containerd-shim-runsc-v1", + ], mode = "0755", package_dir = "/usr/bin", - strip_prefix = "/runsc/linux_amd64_pure_stripped", ) pkg_tar( name = "debian-data", extension = "tar.gz", deps = [ - ":runsc-bin", + ":debian-bin", + "//shim:config", ], ) diff --git a/runsc/debian/postinst.sh b/runsc/debian/postinst.sh index dc7aeee87..d1e28e17b 100755 --- a/runsc/debian/postinst.sh +++ b/runsc/debian/postinst.sh @@ -18,7 +18,14 @@ if [ "$1" != configure ]; then exit 0 fi +# Update docker configuration. if [ -f /etc/docker/daemon.json ]; then runsc install - systemctl restart docker || echo "unable to restart docker; you must do so manually." >&2 + if systemctl status docker 2>/dev/null; then + systemctl restart docker || echo "unable to restart docker; you must do so manually." >&2 + fi fi + +# For containerd-based installers, we don't automatically update the +# configuration. If it uses a v2 shim, then it will find the package binaries +# automatically when provided the appropriate annotation. diff --git a/shim/BUILD b/shim/BUILD new file mode 100644 index 000000000..e581618b2 --- /dev/null +++ b/shim/BUILD @@ -0,0 +1,15 @@ +load("//tools:defs.bzl", "pkg_tar") + +package(licenses = ["notice"]) + +pkg_tar( + name = "config", + srcs = [ + "runsc.toml", + ], + mode = "0644", + package_dir = "/etc/containerd", + visibility = [ + "//runsc:__pkg__", + ], +) diff --git a/shim/README.md b/shim/README.md index e446ec970..c6824ebdc 100644 --- a/shim/README.md +++ b/shim/README.md @@ -1,16 +1,11 @@ -# gvisor-containerd-shim +# gVisor Containerd shims -gvisor-containerd-shim is a containerd shim. It implements the containerd v1 -shim API. It can be used as a drop-in replacement for -[containerd-shim][containerd-shim] -(though containerd-shim must still be installed). It allows the use of both -gVisor (runsc) and normal containers in the same containerd installation by -deferring to the runc shim if the desired runtime engine is not runsc. +There are various shims supported for differt versions of +[containerd][containerd]. -- [Untrusted Workload Quick Start (containerd >=1.1)](docs/untrusted-workload-quickstart.md) -- [Runtime Handler/RuntimeClass Quick Start (containerd >=1.2)](docs/runtime-handler-quickstart.md) -- [Runtime Handler/RuntimeClass Quick Start (shim v2) (containerd >=1.2)](docs/runtime-handler-shim-v2-quickstart.md) -- [Configure containerd-shim-runsc-v1 (shim v2) (containerd >= 1.3)](docs/configure-containerd-shim-runsc-v1.md) -- [Configure gvisor-containerd-shim (shim v1) (containerd <= 1.2)](docs/configure-gvisor-containerd-shim.md) +- [Configure gvisor-containerd-shim (shim v1) (containerd ≤ 1.2)](v1/configure-gvisor-containerd-shim.md) +- [Runtime Handler/RuntimeClass Quick Start (containerd >= 1.2)](v2/runtime-handler-quickstart.md) +- [Runtime Handler/RuntimeClass Quick Start (shim v2) (containerd >= 1.2)](v2/runtime-handler-shim-v2-quickstart.md) +- [Configure containerd-shim-runsc-v1 (shim v2) (containerd >= 1.3)](v2/configure-containerd-shim-runsc-v1.md) -[containerd-shim]: https://github.com/containerd/containerd/tree/master/cmd/containerd-shim +[containerd]: https://github.com/containerd/containerd diff --git a/shim/configure-containerd-shim-runsc-v1.md b/shim/configure-containerd-shim-runsc-v1.md deleted file mode 100644 index 977ceacbd..000000000 --- a/shim/configure-containerd-shim-runsc-v1.md +++ /dev/null @@ -1,72 +0,0 @@ -# Configure containerd-shim-runsc-v1 (Shim V2) - -This document describes how to configure runtime options for -`containerd-shim-runsc-v1`. This is follows on to the instructions of -[Runtime Handler Quick Start (shim v2) (containerd >=1.2)](runtime-handler-shim-v2-quickstart.md) -and requires containerd 1.3 or later. - -### Update `/etc/containerd/config.toml` to point to a configuration file for `containerd-shim-runsc-v1`. - -`containerd-shim-runsc-v1` supports a few different configuration options based -on the version of containerd that is used. For versions >= 1.3, it supports a -configurable config path in the containerd runtime configuration. - -```shell -{ # Step 1: Update runtime options for runsc in containerd config.toml -cat < -[embedmd]:# (../test/e2e/shim-install.sh shell /{ # Step 1\(dev\)/ /^}/) -```shell -{ # Step 1(dev): Build and install gvisor-containerd-shim and containerd-shim-runsc-v1 - make - sudo make install -} -``` - -### Configure containerd - -1. Update `/etc/containerd/config.toml`. Make sure `containerd-shim-runsc-v1` is - in `${PATH}`. - -[embedmd]:# (../test/e2e/runtime-handler-shim-v2/install.sh shell /{ # Step 1/ /^}/) -```shell -{ # Step 1: Create containerd config.toml -cat < Note: This shim version is supported only for containerd versions less than +> 1.2. If you are using a containerd version greater than or equal to 1.2, then +> please use `containerd-shim-runsc-v1` (Shim API v1). +> +> This containerd shim is supported only in a best-effort capacity. + +This document describes how to configure and use `gvisor-containerd-shim`. + +## Containerd Configuration + +To use this shim, you must configure `/etc/containerd/config.toml` as follows: + +``` +[plugins.linux] + shim = "/usr/bin/gvisor-containerd-shim" +[plugins.cri.containerd.runtimes.gvisor] + runtime_type = "io.containerd.runtime.v1.linux" + runtime_engine = "/usr/bin/runsc" + runtime_root = "/run/containerd/runsc" +``` + +In order to pick-up the new configuration, you may need to restart containerd: + +```shell +sudo systemctl restart containerd +``` + +## Shim Confguration + +The shim configuration is stored in `/etc/containerd/runsc.toml`. The +configuration file supports two values. + +* `runc_shim`: The path to the runc shim. This is used by + `gvisor-containerd-shim` to run standard containers. + +* `runsc_config`: This is a set of key/value pairs that are converted into + `runsc` command line flags. You can learn more about which flags are available + by running `runsc flags`. + +For example, a configuration might look as follows: + +``` +runc_shim = "/usr/local/bin/containerd-shim" +[runsc_config] +platform = "kvm" +debug = true +debug-log = /var/log/%ID%/gvisor.log +``` diff --git a/shim/v1/config.go b/shim/v1/config.go new file mode 100644 index 000000000..a72cc7754 --- /dev/null +++ b/shim/v1/config.go @@ -0,0 +1,40 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import "github.com/BurntSushi/toml" + +// config is the configuration for gvisor containerd shim. +type config struct { + // RuncShim is the shim binary path for standard containerd-shim for runc. + // When the runtime is `runc`, gvisor containerd shim will exec current + // process to standard containerd-shim. This is a work around for containerd + // 1.1. In containerd 1.2, containerd will choose different containerd-shims + // based on runtime. + RuncShim string `toml:"runc_shim"` + // RunscConfig is configuration for runsc. The key value will be converted + // to runsc flags --key=value directly. + RunscConfig map[string]string `toml:"runsc_config"` +} + +// loadConfig load gvisor containerd shim config from config file. +func loadConfig(path string) (*config, error) { + var c config + _, err := toml.DecodeFile(path, &c) + if err != nil { + return &c, err + } + return &c, nil +} diff --git a/shim/v1/main.go b/shim/v1/main.go index 41c77394a..43deee858 100644 --- a/shim/v1/main.go +++ b/shim/v1/main.go @@ -16,11 +16,252 @@ package main import ( - "github.com/containerd/containerd/runtime/v2/shim" + "bytes" + "context" + "flag" + "fmt" + "log" + "net" + "os" + "os/exec" + "os/signal" + "path/filepath" + "strings" + "sync" + "syscall" - runsc "gvisor.dev/gvisor/pkg/shim/v2" + "github.com/containerd/containerd/events" + "github.com/containerd/containerd/namespaces" + "github.com/containerd/containerd/runtime/v1/linux/proc" + containerdshim "github.com/containerd/containerd/runtime/v1/shim" + shimapi "github.com/containerd/containerd/runtime/v1/shim/v1" + "github.com/containerd/ttrpc" + "github.com/containerd/typeurl" + ptypes "github.com/gogo/protobuf/types" + "github.com/opencontainers/runc/libcontainer/system" + "golang.org/x/sys/unix" + + "gvisor.dev/gvisor/pkg/shim/runsc" + "gvisor.dev/gvisor/pkg/shim/v1/shim" +) + +var ( + debugFlag bool + namespaceFlag string + socketFlag string + addressFlag string + workdirFlag string + runtimeRootFlag string + containerdBinaryFlag string + shimConfigFlag string ) +func init() { + flag.BoolVar(&debugFlag, "debug", false, "enable debug output in logs") + flag.StringVar(&namespaceFlag, "namespace", "", "namespace that owns the shim") + flag.StringVar(&socketFlag, "socket", "", "abstract socket path to serve") + flag.StringVar(&addressFlag, "address", "", "grpc address back to main containerd") + flag.StringVar(&workdirFlag, "workdir", "", "path used to storge large temporary data") + + // Containerd default to runc, unless another runtime is explicitly + // specified. We keep the same default to make the default behavior + // consistent. + flag.StringVar(&runtimeRootFlag, "runtime-root", proc.RuncRoot, "root directory for the runtime") + + // Currently, the `containerd publish` utility is embedded in the + // daemon binary. The daemon invokes `containerd-shim + // -containerd-binary ...` with its own os.Executable() path. + flag.StringVar(&containerdBinaryFlag, "containerd-binary", "containerd", "path to containerd binary (used for `containerd publish`)") + flag.StringVar(&shimConfigFlag, "config", "/etc/containerd/runsc.toml", "path to the shim configuration file") +} + func main() { - shim.Run("io.containerd.runsc.v1", runsc.New) + flag.Parse() + + // This is a hack. Exec current process to run standard containerd-shim + // if runtime root is not `runsc`. We don't need this for shim v2 api. + if filepath.Base(runtimeRootFlag) != "runsc" { + if err := executeRuncShim(); err != nil { + fmt.Fprintf(os.Stderr, "gvisor-containerd-shim: %s\n", err) + os.Exit(1) + } + } + + // Run regular shim if needed. + if err := executeShim(); err != nil { + fmt.Fprintf(os.Stderr, "gvisor-containerd-shim: %s\n", err) + os.Exit(1) + } +} + +// executeRuncShim execs current process to a containerd-shim process and +// retains all flags and envs. +func executeRuncShim() error { + c, err := loadConfig(shimConfigFlag) + if err != nil && !os.IsNotExist(err) { + return fmt.Errorf("failed to load shim config: %w", err) + } + shimPath := c.RuncShim + if shimPath == "" { + shimPath, err = exec.LookPath("containerd-shim") + if err != nil { + return fmt.Errorf("lookup containerd-shim failed: %w", err) + } + } + + args := append([]string{shimPath}, os.Args[1:]...) + if err := syscall.Exec(shimPath, args, os.Environ()); err != nil { + return fmt.Errorf("exec containerd-shim @ %q failed: %w", shimPath, err) + } + return nil +} + +func executeShim() error { + // start handling signals as soon as possible so that things are + // properly reaped or if runtime exits before we hit the handler. + signals, err := setupSignals() + if err != nil { + return err + } + path, err := os.Getwd() + if err != nil { + return err + } + server, err := ttrpc.NewServer(ttrpc.WithServerHandshaker(ttrpc.UnixSocketRequireSameUser())) + if err != nil { + return fmt.Errorf("failed creating server: %w", err) + } + c, err := loadConfig(shimConfigFlag) + if err != nil && !os.IsNotExist(err) { + return fmt.Errorf("failed to load shim config: %w", err) + } + sv, err := shim.NewService( + shim.Config{ + Path: path, + Namespace: namespaceFlag, + WorkDir: workdirFlag, + RuntimeRoot: runtimeRootFlag, + RunscConfig: c.RunscConfig, + }, + &remoteEventsPublisher{address: addressFlag}, + ) + if err != nil { + return err + } + shimapi.RegisterShimService(server, sv) + if err := serve(server, socketFlag); err != nil { + return err + } + return handleSignals(signals, server, sv) +} + +// serve serves the ttrpc API over a unix socket at the provided path this +// function does not block. +func serve(server *ttrpc.Server, path string) error { + var ( + l net.Listener + err error + ) + if path == "" { + l, err = net.FileListener(os.NewFile(3, "socket")) + path = "[inherited from parent]" + } else { + if len(path) > 106 { + return fmt.Errorf("%q: unix socket path too long (> 106)", path) + } + l, err = net.Listen("unix", "\x00"+path) + } + if err != nil { + return err + } + go func() { + defer l.Close() + err := server.Serve(context.Background(), l) + if err != nil && !strings.Contains(err.Error(), "use of closed network connection") { + log.Fatalf("ttrpc server failure: %v", err) + } + }() + return nil +} + +// setupSignals creates a new signal handler for all signals and sets the shim +// as a sub-reaper so that the container processes are reparented. +func setupSignals() (chan os.Signal, error) { + signals := make(chan os.Signal, 32) + signal.Notify(signals, unix.SIGTERM, unix.SIGINT, unix.SIGCHLD, unix.SIGPIPE) + // make sure runc is setup to use the monitor for waiting on processes. + // TODO(random-liu): Move shim/reaper.go to a separate package. + runsc.Monitor = containerdshim.Default + // Set the shim as the subreaper for all orphaned processes created by + // the container. + if err := system.SetSubreaper(1); err != nil { + return nil, err + } + return signals, nil +} + +func handleSignals(signals chan os.Signal, server *ttrpc.Server, sv *shim.Service) error { + var ( + termOnce sync.Once + done = make(chan struct{}) + ) + + for { + select { + case <-done: + return nil + case s := <-signals: + switch s { + case unix.SIGCHLD: + if err := containerdshim.Reap(); err != nil { + log.Printf("reap exit status: %v") + } + case unix.SIGTERM, unix.SIGINT: + go termOnce.Do(func() { + ctx := context.TODO() + if err := server.Shutdown(ctx); err != nil { + log.Printf("failed to shutdown server: %v") + } + // Ensure our child is dead if any. + sv.Kill(ctx, &shimapi.KillRequest{ + Signal: uint32(syscall.SIGKILL), + All: true, + }) + sv.Delete(context.Background(), &ptypes.Empty{}) + close(done) + }) + case unix.SIGPIPE: + } + } + } +} + +type remoteEventsPublisher struct { + address string +} + +func (l *remoteEventsPublisher) Publish(ctx context.Context, topic string, event events.Event) error { + ns, _ := namespaces.Namespace(ctx) + encoded, err := typeurl.MarshalAny(event) + if err != nil { + return err + } + data, err := encoded.Marshal() + if err != nil { + return err + } + cmd := exec.CommandContext(ctx, containerdBinaryFlag, "--address", l.address, "publish", "--topic", topic, "--namespace", ns) + cmd.Stdin = bytes.NewReader(data) + c, err := containerdshim.Default.Start(cmd) + if err != nil { + return err + } + status, err := containerdshim.Default.Wait(cmd, c) + if err != nil { + return fmt.Errorf("failed to publish event: %w", err) + } + if status != 0 { + return fmt.Errorf("failed to publish event: status %d", status) + } + return nil } diff --git a/shim/v2/BUILD b/shim/v2/BUILD new file mode 100644 index 000000000..1e1947dab --- /dev/null +++ b/shim/v2/BUILD @@ -0,0 +1,32 @@ +load("//tools:defs.bzl", "go_binary", "pkg_tar") +load("//website:defs.bzl", "doc") + +package(licenses = ["notice"]) + +go_binary( + name = "containerd-shim-runsc-v1", + srcs = [ + "main.go", + ], + pure = True, + visibility = [ + "//visibility:public", + ], + deps = [ + "//pkg/shim/runsc", + "//pkg/shim/v1/shim", + "//pkg/shim/v2", + "@com_github_burntsushi_toml//:go_default_library", + "@com_github_containerd_containerd//runtime/v2/shim:go_default_library", + ], +) + +doc( + name = "doc", + src = "README.md", + category = "User Guide", + permalink = "/docs/user_guide/containerd-shim-runsc-v1/", + subcategory = "Advanced", + visibility = ["//website:__pkg__"], + weight = "92", +) diff --git a/shim/v2/README.md b/shim/v2/README.md new file mode 100644 index 000000000..2fd625415 --- /dev/null +++ b/shim/v2/README.md @@ -0,0 +1,90 @@ +# containerd-shim-runsc-v1 + +> Note: This shim version is the recommended shim for containerd versions +> greater than or equal to 1.2. For older versions of containerd, use +> `gvisor-containerd-shim`. + +This document describes how to configure and use `containerd-shim-runsc-v1`. + +## Configuring Containerd 1.2 + +To configure containerd 1.2 to use this shim, add the runtime to +`/etc/containerd/config.toml` as follows: + +``` +[plugins.cri.containerd.runtimes.runsc] + runtime_type = "io.containerd.runsc.v1" + runtime_root = "/run/containerd/runsc" +[plugins.cri.containerd.runtimes.runsc.options] + TypeUrl = "io.containerd.runsc.v1.options" +``` + +The configuration will optionally loaded from a file named `config.toml` in the +`runtime_root` configured above. + +In order to pick up the new configuration, you may need to restart containerd: + +```shell +sudo systemctl restart containerd +``` + +## Configuring Containerd 1.3 and above + +To configure containerd 1.3 to use this shim, add the runtime to +`/etc/containerd/config.toml` as follows: + +``` +[plugins.cri.containerd.runtimes.runsc] + runtime_type = "io.containerd.runsc.v1" +[plugins.cri.containerd.runtimes.runsc.options] + TypeUrl = "io.containerd.runsc.v1.options" + ConfigPath = "/etc/containerd/runsc.toml" +``` + +The `ConfigPath` above will be used to provide a pointer to the configuration +file to be loaded. + +> Note that there will be configuration file loaded if `ConfigPath` is not set. + +In order to pick up the new configuration, you may need to restart containerd: + +```shell +sudo systemctl restart containerd +``` +## Shim Confguration + +The shim configuration may carry the following options: + +* `shim_cgroup`: The cgroup to use for the shim itself. +* `io_uid`: The UID to use for pipes. +* `ui_gid`: The GID to use for pipes. +* `binary_name`: The runtime binary name (defaults to `runsc`). +* `root`: The root directory for the runtime. +* `runsc_config`: A dictionary of key-value pairs that will be passed to the + runtime as arguments. + +### Example: Enable the KVM platform + +gVisor enables the use of a number of platforms. This example shows how to +configure `containerd-shim-runsc-v1` to use gVisor with the KVM platform: + +```shell +cat < 106 { - return errors.Errorf("%q: unix socket path too long (> 106)", path) - } - l, err = net.Listen("unix", "\x00"+path) - } - if err != nil { - return err - } - logrus.WithField("socket", path).Debug("serving api on unix socket") - go func() { - defer l.Close() - if err := server.Serve(context.Background(), l); err != nil && - !strings.Contains(err.Error(), "use of closed network connection") { - logrus.WithError(err).Fatal("gvisor-containerd-shim: ttrpc server failure") - } - }() - return nil -} - -// setupSignals creates a new signal handler for all signals and sets the shim as a -// sub-reaper so that the container processes are reparented -func setupSignals() (chan os.Signal, error) { - signals := make(chan os.Signal, 32) - signal.Notify(signals, unix.SIGTERM, unix.SIGINT, unix.SIGCHLD, unix.SIGPIPE) - // make sure runc is setup to use the monitor - // for waiting on processes - // TODO(random-liu): Move shim/reaper.go to a separate package. - runsc.Monitor = containerdshim.Default - // set the shim as the subreaper for all orphaned processes created by the container - if err := system.SetSubreaper(1); err != nil { - return nil, err - } - return signals, nil -} - -func handleSignals(logger *logrus.Entry, signals chan os.Signal, server *ttrpc.Server, sv *shim.Service) error { - var ( - termOnce sync.Once - done = make(chan struct{}) - ) - - for { - select { - case <-done: - return nil - case s := <-signals: - switch s { - case unix.SIGCHLD: - if err := containerdshim.Reap(); err != nil { - logger.WithError(err).Error("reap exit status") - } - case unix.SIGTERM, unix.SIGINT: - go termOnce.Do(func() { - ctx := context.TODO() - if err := server.Shutdown(ctx); err != nil { - logger.WithError(err).Error("failed to shutdown server") - } - // Ensure our child is dead if any - sv.Kill(ctx, &shimapi.KillRequest{ - Signal: uint32(syscall.SIGKILL), - All: true, - }) - sv.Delete(context.Background(), &ptypes.Empty{}) - close(done) - }) - case unix.SIGPIPE: - } - } - } -} - -func dumpStacks(logger *logrus.Entry) { - var ( - buf []byte - stackSize int - ) - bufferLen := 16384 - for stackSize == len(buf) { - buf = make([]byte, bufferLen) - stackSize = runtime.Stack(buf, true) - bufferLen *= 2 - } - buf = buf[:stackSize] - logger.Infof("=== BEGIN goroutine stack dump ===\n%s\n=== END goroutine stack dump ===", buf) -} - -type remoteEventsPublisher struct { - address string -} - -func (l *remoteEventsPublisher) Publish(ctx context.Context, topic string, event events.Event) error { - ns, _ := namespaces.Namespace(ctx) - encoded, err := typeurl.MarshalAny(event) - if err != nil { - return err - } - data, err := encoded.Marshal() - if err != nil { - return err - } - cmd := exec.CommandContext(ctx, containerdBinaryFlag, "--address", l.address, "publish", "--topic", topic, "--namespace", ns) - cmd.Stdin = bytes.NewReader(data) - c, err := containerdshim.Default.Start(cmd) - if err != nil { - return err - } - status, err := containerdshim.Default.Wait(cmd, c) - if err != nil { - return err - } - if status != 0 { - return errors.New("failed to publish event") - } - return nil + shim.Run("io.containerd.runsc.v1", runsc.New) } diff --git a/shim/v2/runtime-handler-shim-v2-quickstart.md b/shim/v2/runtime-handler-shim-v2-quickstart.md new file mode 100644 index 000000000..ca8336089 --- /dev/null +++ b/shim/v2/runtime-handler-shim-v2-quickstart.md @@ -0,0 +1,232 @@ +# Runtime Handler Quickstart (Shim V2) + +This document describes how to install and run `containerd-shim-runsc-v1` using +the containerd runtime handler support. This requires containerd 1.2 or later. + +## Requirements + +- **runsc**: See the [gVisor documentation](https://github.com/google/gvisor) for information on how to install runsc. +- **containerd**: See the [containerd website](https://containerd.io/) for information on how to install containerd. + +## Install + +### Install containerd-shim-runsc-v1 + +1. Build and install `containerd-shim-runsc-v1`. + + +[embedmd]:# (../test/e2e/shim-install.sh shell /{ # Step 1\(dev\)/ /^}/) +```shell +{ # Step 1(dev): Build and install gvisor-containerd-shim and containerd-shim-runsc-v1 + make + sudo make install +} +``` + +### Configure containerd + +1. Update `/etc/containerd/config.toml`. Make sure `containerd-shim-runsc-v1` is + in `${PATH}`. + +[embedmd]:# (../test/e2e/runtime-handler-shim-v2/install.sh shell /{ # Step 1/ /^}/) +```shell +{ # Step 1: Create containerd config.toml +cat < 0 { // Omit if empty. s["command"] = cmd @@ -95,25 +98,29 @@ var Httpd = SimpleSpec("httpd", "basic/httpd", nil, nil) // TestCrictlSanity refers to b/112433158. func TestCrictlSanity(t *testing.T) { - // Setup containerd and crictl. - crictl, cleanup, err := setup(t) - if err != nil { - t.Fatalf("failed to setup crictl: %v", err) - } - defer cleanup() - podID, contID, err := crictl.StartPodAndContainer("basic/httpd", Sandbox("default"), Httpd) - if err != nil { - t.Fatalf("start failed: %v", err) - } - - // Look for the httpd page. - if err = httpGet(crictl, podID, "index.html"); err != nil { - t.Fatalf("failed to get page: %v", err) - } - - // Stop everything. - if err := crictl.StopPodAndContainer(podID, contID); err != nil { - t.Fatalf("stop failed: %v", err) + for _, version := range allVersions { + t.Run(version, func(t *testing.T) { + // Setup containerd and crictl. + crictl, cleanup, err := setup(t, version) + if err != nil { + t.Fatalf("failed to setup crictl: %v", err) + } + defer cleanup() + podID, contID, err := crictl.StartPodAndContainer(containerdRuntime, "basic/httpd", Sandbox("default"), Httpd) + if err != nil { + t.Fatalf("start failed: %v", err) + } + + // Look for the httpd page. + if err = httpGet(crictl, podID, "index.html"); err != nil { + t.Fatalf("failed to get page: %v", err) + } + + // Stop everything. + if err := crictl.StopPodAndContainer(podID, contID); err != nil { + t.Fatalf("stop failed: %v", err) + } + }) } } @@ -147,146 +154,179 @@ var HttpdMountPaths = SimpleSpec("httpd", "basic/httpd", nil, map[string]interfa // TestMountPaths refers to b/117635704. func TestMountPaths(t *testing.T) { - // Setup containerd and crictl. - crictl, cleanup, err := setup(t) - if err != nil { - t.Fatalf("failed to setup crictl: %v", err) - } - defer cleanup() - podID, contID, err := crictl.StartPodAndContainer("basic/httpd", Sandbox("default"), HttpdMountPaths) - if err != nil { - t.Fatalf("start failed: %v", err) - } - - // Look for the directory available at /test. - if err = httpGet(crictl, podID, "test"); err != nil { - t.Fatalf("failed to get page: %v", err) - } - - // Stop everything. - if err := crictl.StopPodAndContainer(podID, contID); err != nil { - t.Fatalf("stop failed: %v", err) + for _, version := range allVersions { + t.Run(version, func(t *testing.T) { + // Setup containerd and crictl. + crictl, cleanup, err := setup(t, version) + if err != nil { + t.Fatalf("failed to setup crictl: %v", err) + } + defer cleanup() + podID, contID, err := crictl.StartPodAndContainer(containerdRuntime, "basic/httpd", Sandbox("default"), HttpdMountPaths) + if err != nil { + t.Fatalf("start failed: %v", err) + } + + // Look for the directory available at /test. + if err = httpGet(crictl, podID, "test"); err != nil { + t.Fatalf("failed to get page: %v", err) + } + + // Stop everything. + if err := crictl.StopPodAndContainer(podID, contID); err != nil { + t.Fatalf("stop failed: %v", err) + } + }) } } // TestMountPaths refers to b/118728671. func TestMountOverSymlinks(t *testing.T) { - // Setup containerd and crictl. - crictl, cleanup, err := setup(t) - if err != nil { - t.Fatalf("failed to setup crictl: %v", err) - } - defer cleanup() - - spec := SimpleSpec("busybox", "basic/resolv", []string{"sleep", "1000"}, nil) - podID, contID, err := crictl.StartPodAndContainer("basic/resolv", Sandbox("default"), spec) - if err != nil { - t.Fatalf("start failed: %v", err) - } - - out, err := crictl.Exec(contID, "readlink", "/etc/resolv.conf") - if err != nil { - t.Fatalf("readlink failed: %v, out: %s", err, out) - } - if want := "/tmp/resolv.conf"; !strings.Contains(string(out), want) { - t.Fatalf("/etc/resolv.conf is not pointing to %q: %q", want, string(out)) - } - - etc, err := crictl.Exec(contID, "cat", "/etc/resolv.conf") - if err != nil { - t.Fatalf("cat failed: %v, out: %s", err, etc) - } - tmp, err := crictl.Exec(contID, "cat", "/tmp/resolv.conf") - if err != nil { - t.Fatalf("cat failed: %v, out: %s", err, out) - } - if tmp != etc { - t.Fatalf("file content doesn't match:\n\t/etc/resolv.conf: %s\n\t/tmp/resolv.conf: %s", string(etc), string(tmp)) - } - - // Stop everything. - if err := crictl.StopPodAndContainer(podID, contID); err != nil { - t.Fatalf("stop failed: %v", err) + for _, version := range allVersions { + t.Run(version, func(t *testing.T) { + // Setup containerd and crictl. + crictl, cleanup, err := setup(t, version) + if err != nil { + t.Fatalf("failed to setup crictl: %v", err) + } + defer cleanup() + + spec := SimpleSpec("busybox", "basic/resolv", []string{"sleep", "1000"}, nil) + podID, contID, err := crictl.StartPodAndContainer(containerdRuntime, "basic/resolv", Sandbox("default"), spec) + if err != nil { + t.Fatalf("start failed: %v", err) + } + + out, err := crictl.Exec(contID, "readlink", "/etc/resolv.conf") + if err != nil { + t.Fatalf("readlink failed: %v, out: %s", err, out) + } + if want := "/tmp/resolv.conf"; !strings.Contains(string(out), want) { + t.Fatalf("/etc/resolv.conf is not pointing to %q: %q", want, string(out)) + } + + etc, err := crictl.Exec(contID, "cat", "/etc/resolv.conf") + if err != nil { + t.Fatalf("cat failed: %v, out: %s", err, etc) + } + tmp, err := crictl.Exec(contID, "cat", "/tmp/resolv.conf") + if err != nil { + t.Fatalf("cat failed: %v, out: %s", err, out) + } + if tmp != etc { + t.Fatalf("file content doesn't match:\n\t/etc/resolv.conf: %s\n\t/tmp/resolv.conf: %s", string(etc), string(tmp)) + } + + // Stop everything. + if err := crictl.StopPodAndContainer(podID, contID); err != nil { + t.Fatalf("stop failed: %v", err) + } + }) } } // TestHomeDir tests that the HOME environment variable is set for // Pod containers. func TestHomeDir(t *testing.T) { - // Setup containerd and crictl. - crictl, cleanup, err := setup(t) - if err != nil { - t.Fatalf("failed to setup crictl: %v", err) + for _, version := range allVersions { + t.Run(version, func(t *testing.T) { + // Setup containerd and crictl. + crictl, cleanup, err := setup(t, version) + if err != nil { + t.Fatalf("failed to setup crictl: %v", err) + } + defer cleanup() + + // Note that container ID returned here is a sub-container. All Pod + // containers are sub-containers. The root container of the sandbox is the + // pause container. + t.Run("sub-container", func(t *testing.T) { + contSpec := SimpleSpec("subcontainer", "basic/busybox", []string{"sh", "-c", "echo $HOME"}, nil) + podID, contID, err := crictl.StartPodAndContainer(containerdRuntime, "basic/busybox", Sandbox("subcont-sandbox"), contSpec) + if err != nil { + t.Fatalf("start failed: %v", err) + } + + out, err := crictl.Logs(contID) + if err != nil { + t.Fatalf("failed retrieving container logs: %v, out: %s", err, out) + } + if got, want := strings.TrimSpace(string(out)), "/root"; got != want { + t.Fatalf("Home directory invalid. Got %q, Want : %q", got, want) + } + + // Stop everything; note that the pod may have already stopped. + crictl.StopPodAndContainer(podID, contID) + }) + + // Tests that HOME is set for the exec process. + t.Run("exec", func(t *testing.T) { + contSpec := SimpleSpec("exec", "basic/busybox", []string{"sleep", "1000"}, nil) + podID, contID, err := crictl.StartPodAndContainer(containerdRuntime, "basic/busybox", Sandbox("exec-sandbox"), contSpec) + if err != nil { + t.Fatalf("start failed: %v", err) + } + + out, err := crictl.Exec(contID, "sh", "-c", "echo $HOME") + if err != nil { + t.Fatalf("failed retrieving container logs: %v, out: %s", err, out) + } + if got, want := strings.TrimSpace(string(out)), "/root"; got != want { + t.Fatalf("Home directory invalid. Got %q, Want : %q", got, want) + } + + // Stop everything. + if err := crictl.StopPodAndContainer(podID, contID); err != nil { + t.Fatalf("stop failed: %v", err) + } + }) + }) } - defer cleanup() - - // Note that container ID returned here is a sub-container. All Pod - // containers are sub-containers. The root container of the sandbox is the - // pause container. - t.Run("sub-container", func(t *testing.T) { - contSpec := SimpleSpec("subcontainer", "basic/busybox", []string{"sh", "-c", "echo $HOME"}, nil) - podID, contID, err := crictl.StartPodAndContainer("basic/busybox", Sandbox("subcont-sandbox"), contSpec) - if err != nil { - t.Fatalf("start failed: %v", err) - } - - out, err := crictl.Logs(contID) - if err != nil { - t.Fatalf("failed retrieving container logs: %v, out: %s", err, out) - } - if got, want := strings.TrimSpace(string(out)), "/root"; got != want { - t.Fatalf("Home directory invalid. Got %q, Want : %q", got, want) - } - - // Stop everything. - if err := crictl.StopPodAndContainer(podID, contID); err != nil { - t.Fatalf("stop failed: %v", err) - } - }) - - // Tests that HOME is set for the exec process. - t.Run("exec", func(t *testing.T) { - contSpec := SimpleSpec("exec", "basic/busybox", []string{"sleep", "1000"}, nil) - podID, contID, err := crictl.StartPodAndContainer("basic/busybox", Sandbox("exec-sandbox"), contSpec) - if err != nil { - t.Fatalf("start failed: %v", err) - } - - out, err := crictl.Exec(contID, "sh", "-c", "echo $HOME") - if err != nil { - t.Fatalf("failed retrieving container logs: %v, out: %s", err, out) - } - if got, want := strings.TrimSpace(string(out)), "/root"; got != want { - t.Fatalf("Home directory invalid. Got %q, Want : %q", got, want) - } - - // Stop everything. - if err := crictl.StopPodAndContainer(podID, contID); err != nil { - t.Fatalf("stop failed: %v", err) - } - }) } -// containerdConfigTemplate is a .toml config for containerd. It contains a -// formatting verb so the runtime field can be set via fmt.Sprintf. -const containerdConfigTemplate = ` +const containerdRuntime = "runsc" + +const v1Template = ` disabled_plugins = ["restart"] +[plugins.cri] + disable_tcp_service = true [plugins.linux] - runtime = "%s" - runtime_root = "/tmp/test-containerd/runsc" - shim = "/usr/local/bin/gvisor-containerd-shim" + shim = "%s" shim_debug = true - -[plugins.cri.containerd.runtimes.runsc] +[plugins.cri.containerd.runtimes.` + containerdRuntime + `] runtime_type = "io.containerd.runtime.v1.linux" runtime_engine = "%s" + runtime_root = "%s/root/runsc" ` +const v2Template = ` +disabled_plugins = ["restart"] +[plugins.cri] + disable_tcp_service = true +[plugins.linux] + shim_debug = true +[plugins.cri.containerd.runtimes.` + containerdRuntime + `] + runtime_type = "io.containerd.` + containerdRuntime + `.v1" +[plugins.cri.containerd.runtimes.` + containerdRuntime + `.options] + TypeUrl = "io.containerd.` + containerdRuntime + `.v1.options" +` + +const ( + // v1 is the containerd API v1. + v1 string = "v1" + + // v1 is the containerd API v21. + v2 string = "v2" +) + +// allVersions is the set of known versions. +var allVersions = []string{v1, v2} + // setup sets up before a test. Specifically it: // * Creates directories and a socket for containerd to utilize. // * Runs containerd and waits for it to reach a "ready" state for testing. // * Returns a cleanup function that should be called at the end of the test. -func setup(t *testing.T) (*criutil.Crictl, func(), error) { +func setup(t *testing.T, version string) (*criutil.Crictl, func(), error) { // Create temporary containerd root and state directories, and a socket // via which crictl and containerd communicate. containerdRoot, err := ioutil.TempDir(testutil.TmpDir(), "containerd-root") @@ -295,13 +335,43 @@ func setup(t *testing.T) (*criutil.Crictl, func(), error) { } cu := cleanup.Make(func() { os.RemoveAll(containerdRoot) }) defer cu.Clean() + t.Logf("Using containerd root: %s", containerdRoot) containerdState, err := ioutil.TempDir(testutil.TmpDir(), "containerd-state") if err != nil { t.Fatalf("failed to create containerd state: %v", err) } cu.Add(func() { os.RemoveAll(containerdState) }) - sockAddr := filepath.Join(testutil.TmpDir(), "containerd-test.sock") + t.Logf("Using containerd state: %s", containerdState) + + sockDir, err := ioutil.TempDir(testutil.TmpDir(), "containerd-sock") + if err != nil { + t.Fatalf("failed to create containerd socket directory: %v", err) + } + cu.Add(func() { os.RemoveAll(sockDir) }) + sockAddr := path.Join(sockDir, "test.sock") + t.Logf("Using containerd socket: %s", sockAddr) + + // Extract the containerd version. + versionCmd := exec.Command(getContainerd(), "-v") + out, err := versionCmd.CombinedOutput() + if err != nil { + t.Fatalf("error extracting containerd version: %v (%s)", err, string(out)) + } + r := regexp.MustCompile(" v([0-9]+)\\.([0-9]+)\\.([0-9+])") + vs := r.FindStringSubmatch(string(out)) + if len(vs) != 4 { + t.Fatalf("error unexpected version string: %s", string(out)) + } + major, err := strconv.ParseUint(vs[1], 10, 64) + if err != nil { + t.Fatalf("error parsing containerd major version: %v (%s)", err, string(out)) + } + minor, err := strconv.ParseUint(vs[2], 10, 64) + if err != nil { + t.Fatalf("error parsing containerd minor version: %v (%s)", err, string(out)) + } + t.Logf("Using containerd version: %d.%d", major, minor) // We rewrite a configuration. This is based on the current docker // configuration for the runtime under test. @@ -309,28 +379,100 @@ func setup(t *testing.T) (*criutil.Crictl, func(), error) { if err != nil { t.Fatalf("error discovering runtime path: %v", err) } - config, configCleanup, err := testutil.WriteTmpFile("containerd-config", fmt.Sprintf(containerdConfigTemplate, runtime, runtime)) + t.Logf("Using runtime: %v", runtime) + + // Construct a PATH that includes the runtime directory. This is + // because the shims will be installed there, and containerd may infer + // the binary name and search the PATH. + runtimeDir := path.Dir(runtime) + modifiedPath := os.Getenv("PATH") + if modifiedPath != "" { + modifiedPath = ":" + modifiedPath // We prepend below. + } + modifiedPath = path.Dir(getContainerd()) + modifiedPath + modifiedPath = runtimeDir + ":" + modifiedPath + t.Logf("Using PATH: %v", modifiedPath) + + var ( + config string + runpArgs []string + ) + switch version { + case v1: + // This is only supported less than 1.3. + if major > 1 || (major == 1 && minor >= 3) { + t.Skipf("skipping unsupported containerd (want less than 1.3, got %d.%d)", major, minor) + } + + // We provide the shim, followed by the runtime, and then a + // temporary root directory. Note that we can safely assume + // that the shim has been installed in the same directory as + // the runtime (for test installs and for normal installs). + // Since this is v1, the binary name will be fixed. + config = fmt.Sprintf(v1Template, path.Join(runtimeDir, "gvisor-containerd-shim"), runtime, runtimeDir) + case v2: + // This is only supported past 1.2. + if major < 1 || (major == 1 && minor <= 1) { + t.Skipf("skipping incompatible containerd (want at least 1.2, got %d.%d)", major, minor) + } + + // The runtime is provided via parameter. Note that the v2 shim + // binary name is always containerd-shim-* so we don't actually + // care about the docker runtime name. + config = v2Template + default: + t.Fatalf("unknown version: %d", version) + } + t.Logf("Using config: %s", config) + + // Generate the configuration for the test. + configFile, configCleanup, err := testutil.WriteTmpFile("containerd-config", config) if err != nil { t.Fatalf("failed to write containerd config") } cu.Add(configCleanup) // Start containerd. - cmd := exec.Command(getContainerd(), - "--config", config, + args := []string{ + getContainerd(), + "--config", configFile, "--log-level", "debug", "--root", containerdRoot, "--state", containerdState, - "--address", sockAddr) + "--address", sockAddr, + } + t.Logf("Using args: %s", strings.Join(args, " ")) + cmd := exec.Command(args[0], args[1:]...) + cmd.Env = append(os.Environ(), "PATH="+modifiedPath) + + // Include output in logs. + stderrPipe, err := cmd.StderrPipe() + if err != nil { + t.Fatalf("failed to create stderr pipe: %v", err) + } + cu.Add(func() { stderrPipe.Close() }) + stdoutPipe, err := cmd.StdoutPipe() + if err != nil { + t.Fatalf("failed to create stdout pipe: %v", err) + } + cu.Add(func() { stdoutPipe.Close() }) + var ( + wg sync.WaitGroup + stderr bytes.Buffer + stdout bytes.Buffer + ) startupR, startupW := io.Pipe() - defer startupR.Close() - defer startupW.Close() - stderr := &bytes.Buffer{} - stdout := &bytes.Buffer{} - cmd.Stderr = io.MultiWriter(startupW, stderr) - cmd.Stdout = io.MultiWriter(startupW, stdout) + wg.Add(2) + go func() { + defer wg.Done() + io.Copy(io.MultiWriter(startupW, &stderr), stderrPipe) + }() + go func() { + defer wg.Done() + io.Copy(io.MultiWriter(startupW, &stdout), stdoutPipe) + }() cu.Add(func() { - // Log output in case of failure. + wg.Wait() t.Logf("containerd stdout: %s", stdout.String()) t.Logf("containerd stderr: %s", stderr.String()) }) @@ -345,13 +487,17 @@ func setup(t *testing.T) (*criutil.Crictl, func(), error) { t.Fatalf("failed to start containerd: %v", err) } + // Discard all subsequent data. + go io.Copy(ioutil.Discard, startupR) + + // Create the crictl interface. + cc := criutil.NewCrictl(t, sockAddr, runpArgs) + cu.Add(cc.CleanUp) + // Kill must be the last cleanup (as it will be executed first). - cc := criutil.NewCrictl(t, sockAddr) cu.Add(func() { - cc.CleanUp() // Remove tmp files, etc. - if err := testutil.KillCommand(cmd); err != nil { - log.Printf("error killing containerd: %v", err) - } + // Best effort: ignore errors. + testutil.KillCommand(cmd) }) return cc, cu.Release(), nil diff --git a/test/shim/containerd-install.sh b/test/shim/containerd-install.sh deleted file mode 100755 index 400819245..000000000 --- a/test/shim/containerd-install.sh +++ /dev/null @@ -1,44 +0,0 @@ -#!/bin/bash - -# A script to install containerd and CNI plugins for e2e testing - -wget -q --https-only \ - https://github.com/containerd/containerd/releases/download/v${CONTAINERD_VERSION}/containerd-${CONTAINERD_VERSION}.linux-amd64.tar.gz \ - https://github.com/containernetworking/plugins/releases/download/v0.7.0/cni-plugins-amd64-v0.7.0.tgz - -sudo mkdir -p /etc/containerd /etc/cni/net.d /opt/cni/bin -sudo tar -xvf cni-plugins-amd64-v0.7.0.tgz -C /opt/cni/bin/ -sudo tar -xvf containerd-${CONTAINERD_VERSION}.linux-amd64.tar.gz -C / - -cat </tmp/containerd-cri.log & diff --git a/test/shim/crictl-install.sh b/test/shim/crictl-install.sh deleted file mode 100755 index 1d63c889b..000000000 --- a/test/shim/crictl-install.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash - -# A sample script for installing crictl. - -set -ex - -{ # Step 1: Download crictl -wget https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.13.0/crictl-v1.13.0-linux-amd64.tar.gz -tar xf crictl-v1.13.0-linux-amd64.tar.gz -sudo mv crictl /usr/local/bin -} - -{ # Step 2: Configure crictl -cat < /tmp/containerd-cri.log & -} diff --git a/test/shim/runtime-handler-shim-v2/test.sh b/test/shim/runtime-handler-shim-v2/test.sh deleted file mode 100755 index e33655ec1..000000000 --- a/test/shim/runtime-handler-shim-v2/test.sh +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/bash - -# Runs end-to-end tests for gvisor-containerd-shim to test the use of runtime -# handler. This should work on containerd 1.2+ - -# This is meant to be run in a VM as it makes a fairly invasive install of -# containerd. - -set -ex - -# Install containerd -. ./test/e2e/containerd-install.sh - -# Install gVisor -. ./test/e2e/runsc-install.sh - -# Install gvisor-containerd-shim -. ./test/e2e/shim-install.sh - -# Test installation/configuration -. ./test/e2e/runtime-handler-shim-v2/install.sh - -# Install crictl -. ./test/e2e/crictl-install.sh - -# Test usage (the same with runtime-handler) -. ./test/e2e/runtime-handler/usage.sh - -# Run a container in the sandbox -. ./test/e2e/run-container.sh - -# Validate the pod and container -. ./test/e2e/validate.sh -. ./test/e2e/runtime-handler-shim-v2/validate.sh diff --git a/test/shim/runtime-handler-shim-v2/validate.sh b/test/shim/runtime-handler-shim-v2/validate.sh deleted file mode 100755 index b74a059ef..000000000 --- a/test/shim/runtime-handler-shim-v2/validate.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -# A sample script to validating the running containerd-shim-runsc-v1. - -set -ex - -ps aux | grep [c]ontainerd-shim-runsc-v1 diff --git a/test/shim/runtime-handler/install.sh b/test/shim/runtime-handler/install.sh deleted file mode 100755 index ebe9d3580..000000000 --- a/test/shim/runtime-handler/install.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/bash - -# A sample script for installing and configuring the gvisor-containerd-shim to -# use the containerd runtime handler. - -set -ex - -{ # Step 1: Create containerd config.toml -cat < /tmp/containerd-cri.log & -} diff --git a/test/shim/runtime-handler/test.sh b/test/shim/runtime-handler/test.sh deleted file mode 100755 index 99f3565b6..000000000 --- a/test/shim/runtime-handler/test.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash - -# Runs end-to-end tests for gvisor-containerd-shim to test the use of runtime -# handler. This should work on containerd 1.2+ - -# This is meant to be run in a VM as it makes a fairly invasive install of -# containerd. - -set -ex - -# Install containerd -. ./test/e2e/containerd-install.sh - -# Install gVisor -. ./test/e2e/runsc-install.sh - -# Install gvisor-containerd-shim -. ./test/e2e/shim-install.sh - -# Test installation/configuration -. ./test/e2e/runtime-handler/install.sh - -# Install crictl -. ./test/e2e/crictl-install.sh - -# Test usage -. ./test/e2e/runtime-handler/usage.sh - -# Run a container in the sandbox -. ./test/e2e/run-container.sh - -# Validate the pod and container -. ./test/e2e/validate.sh diff --git a/test/shim/runtime-handler/usage.sh b/test/shim/runtime-handler/usage.sh deleted file mode 100755 index 350c720c2..000000000 --- a/test/shim/runtime-handler/usage.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/bash - -# A sample script for testing the gvisor-containerd-shim -# using runtime handler. - -set -ex - -{ # Step 1: Pull the nginx image -sudo crictl pull nginx -} - -{ # Step 2: Create sandbox.json -cat </tmp/containerd-cri.log & -} diff --git a/test/shim/untrusted-workload/test.sh b/test/shim/untrusted-workload/test.sh deleted file mode 100755 index 6e312cf6d..000000000 --- a/test/shim/untrusted-workload/test.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash - -# Runs end-to-end tests for gvisor-containerd-shim to test using the -# untrusted workload extension. This should work on containerd 1.1+ - -# This is meant to be run in a VM as it makes a fairly invasive install of -# containerd. - -set -ex - -# Install containerd -. ./test/e2e/containerd-install.sh - -# Install gVisor -. ./test/e2e/runsc-install.sh - -# Install gvisor-containerd-shim -. ./test/e2e/shim-install.sh - -# Test installation/configuration -. ./test/e2e/untrusted-workload/install.sh - -# Install crictl -. ./test/e2e/crictl-install.sh - -# Test usage -. ./test/e2e/untrusted-workload/usage.sh - -# Run a container in the sandbox -. ./test/e2e/run-container.sh - -# Validate the pod and container -. ./test/e2e/validate.sh diff --git a/test/shim/untrusted-workload/usage.sh b/test/shim/untrusted-workload/usage.sh deleted file mode 100755 index db8206964..000000000 --- a/test/shim/untrusted-workload/usage.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash - -# A sample script for testing the gvisor-containerd-shim # using untrusted -# workload extension. - -set -ex - -{ # Step 1: Pull the nginx image -sudo crictl pull nginx -} - -{ # Step 2: Create sandbox.json -cat < ${shim_config_path} <<-EOF + runc_shim = "/usr/bin/containerd-shim" + +[runsc_config] + debug = "true" + debug-log = "/tmp/runsc-logs/" + strace = "true" + file-access = "shared" +EOF +fi + +# Configure CNI. +(cd "${GOPATH}" && src/github.com/containerd/containerd/script/setup/install-cni) +cat </dev/null; then + service docker restart +fi diff --git a/tools/vm/ubuntu1604/25_docker.sh b/tools/vm/ubuntu1604/25_docker.sh deleted file mode 100755 index 53d8ca588..000000000 --- a/tools/vm/ubuntu1604/25_docker.sh +++ /dev/null @@ -1,65 +0,0 @@ -#!/bin/bash - -# Copyright 2019 The gVisor Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Add dependencies. -while true; do - if (apt-get update && apt-get install -y \ - apt-transport-https \ - ca-certificates \ - curl \ - gnupg-agent \ - software-properties-common); then - break - fi - result=$? - if [[ $result -ne 100 ]]; then - exit $result - fi -done - -# Install the key. -curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - - -# Add the repository. -add-apt-repository \ - "deb [arch=amd64] https://download.docker.com/linux/ubuntu \ - $(lsb_release -cs) \ - stable" - -# Install docker. -while true; do - if (apt-get update && apt-get install -y \ - docker-ce \ - docker-ce-cli \ - containerd.io); then - break - fi - result=$? - if [[ $result -ne 100 ]]; then - exit $result - fi -done - -# Enable Docker IPv6. -cat > /etc/docker/daemon.json < ${shim_config_tmp_path} <<-EOF - runc_shim = "/usr/local/bin/containerd-shim" - -[runsc_config] - debug = "true" - debug-log = "/tmp/runsc-logs/" - strace = "true" - file-access = "shared" -EOF -mv ${shim_config_tmp_path} ${shim_config_path} - -# Configure CNI. -(cd "${GOPATH}" && GOPATH="${GOPATH}" \ - src/github.com/containerd/containerd/script/setup/install-cni) - -# Cleanup the above. -rm -rf "${GOPATH}" -rm -rf "${latest}" -rm -rf "${shim_path}" -rm -rf "${shim_config_tmp_path}" diff --git a/tools/vm/ubuntu1604/30_docker.sh b/tools/vm/ubuntu1604/30_docker.sh new file mode 100755 index 000000000..53d8ca588 --- /dev/null +++ b/tools/vm/ubuntu1604/30_docker.sh @@ -0,0 +1,65 @@ +#!/bin/bash + +# Copyright 2019 The gVisor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Add dependencies. +while true; do + if (apt-get update && apt-get install -y \ + apt-transport-https \ + ca-certificates \ + curl \ + gnupg-agent \ + software-properties-common); then + break + fi + result=$? + if [[ $result -ne 100 ]]; then + exit $result + fi +done + +# Install the key. +curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - + +# Add the repository. +add-apt-repository \ + "deb [arch=amd64] https://download.docker.com/linux/ubuntu \ + $(lsb_release -cs) \ + stable" + +# Install docker. +while true; do + if (apt-get update && apt-get install -y \ + docker-ce \ + docker-ce-cli \ + containerd.io); then + break + fi + result=$? + if [[ $result -ne 100 ]]; then + exit $result + fi +done + +# Enable Docker IPv6. +cat > /etc/docker/daemon.json <