lkml.org 
[lkml]   [2022]   [Jan]   [3]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Date
Subject[PATCH 15/17] signal: Add JOBCTL_WILL_EXIT to mark exiting tasks
Mark tasks that need to exit with JOBCTL_WILL_EXIT instead of reusing
the per thread SIGKILL.

This removes the double meaning of the per thread SIGKILL and makes it
possible to detect when a task has already been scheduled for exiting
and to skip unnecessary work if the task is already scheduled to exit.

A jobctl flag was choosen for this purpose because jobctl changes are
protected by siglock, and updates are already careful not to change or
clear other bits in jobctl. Protection by a lock when changing the
value is necessary as JOBCTL_WILL_EXIT will not be limited to being
set by the current task. That task->jobctl is protected by siglock is
convenient as siglock is already held everywhere I want to set or reset
JOBCTL_WILL_EXIT.

Teach wants_signal and retarget_shared_pending to use
JOBCTL_TASK_EXITING to detect threads that have an exit pending and so
will not be processing any more signals.

Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
---
fs/coredump.c | 6 ++++--
include/linux/sched/jobctl.h | 2 ++
include/linux/sched/signal.h | 2 +-
kernel/exit.c | 4 ++--
kernel/signal.c | 19 +++++++++----------
5 files changed, 18 insertions(+), 15 deletions(-)

diff --git a/fs/coredump.c b/fs/coredump.c
index 9559e29daada..4e82ee51633d 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -352,7 +352,6 @@ static int zap_process(struct task_struct *start, int exit_code)
struct task_struct *t;
int nr = 0;

- /* Allow SIGKILL, see prepare_signal() */
start->signal->flags = SIGNAL_GROUP_EXIT;
start->signal->group_exit_code = exit_code;
start->signal->group_stop_count = 0;
@@ -376,9 +375,11 @@ static int zap_threads(struct task_struct *tsk,
if (!(signal->flags & SIGNAL_GROUP_EXIT) && !signal->group_exec_task) {
signal->core_state = core_state;
nr = zap_process(tsk, exit_code);
+ atomic_set(&core_state->nr_threads, nr);
+ /* Allow SIGKILL, see prepare_signal() */
clear_tsk_thread_flag(tsk, TIF_SIGPENDING);
tsk->flags |= PF_DUMPCORE;
- atomic_set(&core_state->nr_threads, nr);
+ tsk->jobctl &= ~JOBCTL_WILL_EXIT;
}
spin_unlock_irq(&tsk->sighand->siglock);
return nr;
@@ -425,6 +426,7 @@ static void coredump_finish(bool core_dumped)
current->signal->group_exit_code |= 0x80;
next = current->signal->core_state->dumper.next;
current->signal->core_state = NULL;
+ current->jobctl |= JOBCTL_WILL_EXIT;
spin_unlock_irq(&current->sighand->siglock);

while ((curr = next) != NULL) {
diff --git a/include/linux/sched/jobctl.h b/include/linux/sched/jobctl.h
index fa067de9f1a9..9887d737ccfb 100644
--- a/include/linux/sched/jobctl.h
+++ b/include/linux/sched/jobctl.h
@@ -19,6 +19,7 @@ struct task_struct;
#define JOBCTL_TRAPPING_BIT 21 /* switching to TRACED */
#define JOBCTL_LISTENING_BIT 22 /* ptracer is listening for events */
#define JOBCTL_TRAP_FREEZE_BIT 23 /* trap for cgroup freezer */
+#define JOBCTL_WILL_EXIT_BIT 31 /* task will exit */

#define JOBCTL_STOP_DEQUEUED (1UL << JOBCTL_STOP_DEQUEUED_BIT)
#define JOBCTL_STOP_PENDING (1UL << JOBCTL_STOP_PENDING_BIT)
@@ -28,6 +29,7 @@ struct task_struct;
#define JOBCTL_TRAPPING (1UL << JOBCTL_TRAPPING_BIT)
#define JOBCTL_LISTENING (1UL << JOBCTL_LISTENING_BIT)
#define JOBCTL_TRAP_FREEZE (1UL << JOBCTL_TRAP_FREEZE_BIT)
+#define JOBCTL_WILL_EXIT (1UL << JOBCTL_WILL_EXIT_BIT)

#define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
#define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index eed54f9ea2fc..989bb665f107 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -373,7 +373,7 @@ static inline int signal_pending(struct task_struct *p)

static inline int __fatal_signal_pending(struct task_struct *p)
{
- return unlikely(sigismember(&p->pending.signal, SIGKILL));
+ return unlikely(p->jobctl & JOBCTL_WILL_EXIT);
}

static inline int fatal_signal_pending(struct task_struct *p)
diff --git a/kernel/exit.c b/kernel/exit.c
index 27bc0ccfea78..7a7a0cbac28e 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -906,7 +906,7 @@ do_group_exit(int exit_code)

if (sig->flags & SIGNAL_GROUP_EXIT)
exit_code = sig->group_exit_code;
- else if (sig->group_exec_task)
+ else if (current->jobctl & JOBCTL_WILL_EXIT)
exit_code = 0;
else if (!thread_group_empty(current)) {
struct sighand_struct *const sighand = current->sighand;
@@ -915,7 +915,7 @@ do_group_exit(int exit_code)
if (sig->flags & SIGNAL_GROUP_EXIT)
/* Another thread got here before we took the lock. */
exit_code = sig->group_exit_code;
- else if (sig->group_exec_task)
+ else if (current->jobctl & JOBCTL_WILL_EXIT)
exit_code = 0;
else {
struct task_struct *t;
diff --git a/kernel/signal.c b/kernel/signal.c
index b0201e05be40..6179e34ce666 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -153,7 +153,8 @@ static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)

static bool recalc_sigpending_tsk(struct task_struct *t)
{
- if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
+ if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE |
+ JOBCTL_WILL_EXIT)) ||
PENDING(&t->pending, &t->blocked) ||
PENDING(&t->signal->shared_pending, &t->blocked) ||
cgroup_task_frozen(t)) {
@@ -911,7 +912,7 @@ static bool prepare_signal(int sig, struct task_struct *p, bool force)
if (core_state) {
if (sig == SIGKILL) {
struct task_struct *dumper = core_state->dumper.task;
- sigaddset(&dumper->pending.signal, SIGKILL);
+ dumper->jobctl |= JOBCTL_WILL_EXIT;
signal_wake_up(dumper, 1);
}
}
@@ -985,7 +986,7 @@ static inline bool wants_signal(int sig, struct task_struct *p)
if (sigismember(&p->blocked, sig))
return false;

- if (p->flags & PF_EXITING)
+ if (p->jobctl & JOBCTL_WILL_EXIT)
return false;

if (sig == SIGKILL)
@@ -1363,10 +1364,9 @@ int force_sig_info(struct kernel_siginfo *info)

void schedule_task_exit_locked(struct task_struct *task)
{
- task_clear_jobctl_pending(task, JOBCTL_PENDING_MASK);
- /* Only bother with threads that might be alive */
- if (!(task->flags & PF_POSTCOREDUMP)) {
- sigaddset(&task->pending.signal, SIGKILL);
+ if (!(task->jobctl & JOBCTL_WILL_EXIT)) {
+ task_clear_jobctl_pending(task, JOBCTL_PENDING_MASK);
+ task->jobctl |= JOBCTL_WILL_EXIT;
signal_wake_up(task, 1);
}
}
@@ -2695,9 +2695,8 @@ bool get_signal(struct ksignal *ksig)
int exit_code;

/* Has this task already been marked for death? */
- if (__fatal_signal_pending(current)) {
+ if (current->jobctl & JOBCTL_WILL_EXIT) {
ksig->info.si_signo = signr = SIGKILL;
- sigdelset(&current->pending.signal, SIGKILL);
trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
&sighand->action[SIGKILL - 1]);
recalc_sigpending();
@@ -2935,7 +2934,7 @@ static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)

t = tsk;
while_each_thread(tsk, t) {
- if (t->flags & PF_EXITING)
+ if (t->jobctl & JOBCTL_WILL_EXIT)
continue;

if (!has_pending_signals(&retarget, &t->blocked))
--
2.29.2
\
 
 \ /
  Last update: 2022-01-03 22:35    [W:0.729 / U:0.952 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site