summaryrefslogtreecommitdiffhomepage
path: root/pkg/sentry/kernel/sessions.go
diff options
context:
space:
mode:
authorMichael Pratt <mpratt@google.com>2019-04-03 16:21:38 -0700
committerShentubot <shentubot@google.com>2019-04-03 16:22:43 -0700
commit4968dd1341a04e93557bdd9f4b4b83eb508e026d (patch)
tree50ef3c28ec24fad937f029f257cbe3338222445f /pkg/sentry/kernel/sessions.go
parent82529becaee6f5050cb3ebb4aaa7a798357c1cf1 (diff)
Cache ThreadGroups in PIDNamespace
If there are thousands of threads, ThreadGroupsAppend becomes very expensive as it must iterate over all Tasks to find the ThreadGroup leaders. Reduce the cost by maintaining a map of ThreadGroups which can be used to grab them all directly. The one somewhat visible change is to convert PID namespace init children zapping to a group-directed SIGKILL, as Linux did in 82058d668465 "signal: Use group_send_sig_info to kill all processes in a pid namespace". In a benchmark that creates N threads which sleep for two minutes, we see approximately this much CPU time in ThreadGroupsAppend: Before: 1 thread: 0ms 1024 threads: 30ms - 9130ms 4096 threads: 50ms - 2000ms 8192 threads: 18160ms 16384 threads: 17210ms After: 1 thread: 0ms 1024 threads: 0ms 4096 threads: 0ms 8192 threads: 0ms 16384 threads: 0ms The profiling is actually extremely noisy (likely due to cache effects), as some runs show almost no samples at 1024, 4096 threads, but obviously this does not scale to lots of threads. PiperOrigin-RevId: 241828039 Change-Id: I17827c90045df4b3c49b3174f3a05bca3026a72c
Diffstat (limited to 'pkg/sentry/kernel/sessions.go')
-rw-r--r--pkg/sentry/kernel/sessions.go18
1 files changed, 9 insertions, 9 deletions
diff --git a/pkg/sentry/kernel/sessions.go b/pkg/sentry/kernel/sessions.go
index ae6daac60..65e2b73c4 100644
--- a/pkg/sentry/kernel/sessions.go
+++ b/pkg/sentry/kernel/sessions.go
@@ -240,14 +240,14 @@ func (pg *ProcessGroup) SendSignal(info *arch.SignalInfo) error {
defer tasks.mu.RUnlock()
var lastErr error
- for t := range tasks.Root.tids {
- if t == t.tg.leader && t.tg.ProcessGroup() == pg {
- t.tg.signalHandlers.mu.Lock()
- defer t.tg.signalHandlers.mu.Unlock()
+ for tg := range tasks.Root.tgids {
+ if tg.ProcessGroup() == pg {
+ tg.signalHandlers.mu.Lock()
infoCopy := *info
- if err := t.sendSignalLocked(&infoCopy, true /*group*/); err != nil {
+ if err := tg.leader.sendSignalLocked(&infoCopy, true /*group*/); err != nil {
lastErr = err
}
+ tg.signalHandlers.mu.Unlock()
}
}
return lastErr
@@ -268,7 +268,7 @@ func (tg *ThreadGroup) CreateSession() error {
// Precondition: callers must hold TaskSet.mu for writing.
func (tg *ThreadGroup) createSession() error {
// Get the ID for this thread in the current namespace.
- id := tg.pidns.tids[tg.leader]
+ id := tg.pidns.tgids[tg]
// Check if this ThreadGroup already leads a Session, or
// if the proposed group is already taken.
@@ -337,7 +337,7 @@ func (tg *ThreadGroup) createSession() error {
// Ensure a translation is added to all namespaces.
for ns := tg.pidns; ns != nil; ns = ns.parent {
- local := ns.tids[tg.leader]
+ local := ns.tgids[tg]
ns.sids[s] = SessionID(local)
ns.sessions[SessionID(local)] = s
ns.pgids[pg] = ProcessGroupID(local)
@@ -356,7 +356,7 @@ func (tg *ThreadGroup) CreateProcessGroup() error {
defer tg.pidns.owner.mu.Unlock()
// Get the ID for this thread in the current namespace.
- id := tg.pidns.tids[tg.leader]
+ id := tg.pidns.tgids[tg]
// Per above, check for a Session leader or existing group.
for s := tg.pidns.owner.sessions.Front(); s != nil; s = s.Next() {
@@ -401,7 +401,7 @@ func (tg *ThreadGroup) CreateProcessGroup() error {
// Ensure this translation is added to all namespaces.
for ns := tg.pidns; ns != nil; ns = ns.parent {
- local := ns.tids[tg.leader]
+ local := ns.tgids[tg]
ns.pgids[pg] = ProcessGroupID(local)
ns.processGroups[ProcessGroupID(local)] = pg
}