lkml.org 
[lkml]   [2020]   [Oct]   [28]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH v2] kthread_worker: re-set CPU affinities if CPU come online
Date
From: Zqiang <qiang.zhang@windriver.com>

When someone CPU offlined, the 'kthread_worker' which bind this CPU,
will run anywhere, if this CPU online, recovery of 'kthread_worker'
affinity by cpuhp notifiers.

Signed-off-by: Zqiang <qiang.zhang@windriver.com>
---
v1->v2:
rename variable kworker_online to kthread_worker_online.
add 'cpuhp_node' and 'bind_cpu' init in KTHREAD_WORKER_INIT.
add a comment explaining for WARN_ON_ONCE.

include/linux/kthread.h | 4 ++++
kernel/kthread.c | 36 +++++++++++++++++++++++++++++++++++-
2 files changed, 39 insertions(+), 1 deletion(-)

diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index 65b81e0c494d..c28963e87b18 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -93,6 +93,8 @@ struct kthread_worker {
struct list_head delayed_work_list;
struct task_struct *task;
struct kthread_work *current_work;
+ struct hlist_node cpuhp_node;
+ int bind_cpu;
};

struct kthread_work {
@@ -112,6 +114,8 @@ struct kthread_delayed_work {
.lock = __RAW_SPIN_LOCK_UNLOCKED((worker).lock), \
.work_list = LIST_HEAD_INIT((worker).work_list), \
.delayed_work_list = LIST_HEAD_INIT((worker).delayed_work_list),\
+ .cpuhp_node = {.next = NULL, .pprev = NULL}, \
+ .bind_cpu = -1, \
}

#define KTHREAD_WORK_INIT(work, fn) { \
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 34516b0a6eb7..6c66df585225 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -28,8 +28,10 @@
#include <linux/uaccess.h>
#include <linux/numa.h>
#include <linux/sched/isolation.h>
+#include <linux/cpu.h>
#include <trace/events/sched.h>

+static enum cpuhp_state kthread_worker_online;

static DEFINE_SPINLOCK(kthread_create_lock);
static LIST_HEAD(kthread_create_list);
@@ -649,6 +651,8 @@ void __kthread_init_worker(struct kthread_worker *worker,
lockdep_set_class_and_name(&worker->lock, key, name);
INIT_LIST_HEAD(&worker->work_list);
INIT_LIST_HEAD(&worker->delayed_work_list);
+ worker->bind_cpu = -1;
+ INIT_HLIST_NODE(&worker->cpuhp_node);
}
EXPORT_SYMBOL_GPL(__kthread_init_worker);

@@ -744,8 +748,11 @@ __kthread_create_worker(int cpu, unsigned int flags,
if (IS_ERR(task))
goto fail_task;

- if (cpu >= 0)
+ if (cpu >= 0) {
+ cpuhp_state_add_instance_nocalls(kthread_worker_online, &worker->cpuhp_node);
kthread_bind(task, cpu);
+ worker->bind_cpu = cpu;
+ }

worker->flags = flags;
worker->task = task;
@@ -1230,6 +1237,9 @@ void kthread_destroy_worker(struct kthread_worker *worker)
if (WARN_ON(!task))
return;

+ if (worker->bind_cpu >= 0)
+ cpuhp_state_remove_instance_nocalls(kthread_worker_online, &worker->cpuhp_node);
+
kthread_flush_worker(worker);
kthread_stop(task);
WARN_ON(!list_empty(&worker->work_list));
@@ -1237,6 +1247,30 @@ void kthread_destroy_worker(struct kthread_worker *worker)
}
EXPORT_SYMBOL(kthread_destroy_worker);

+static int kthread_worker_cpu_online(unsigned int cpu, struct hlist_node *node)
+{
+ struct kthread_worker *worker = hlist_entry(node, struct kthread_worker, cpuhp_node);
+ struct task_struct *task = worker->task;
+
+ /* as we're called from CPU_ONLINE, the following shouldn't fail */
+ if (cpu == worker->bind_cpu)
+ WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpumask_of(cpu)) < 0);
+ return 0;
+}
+
+static __init int kthread_worker_hotplug_init(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "kthread-worker/online",
+ kthread_worker_cpu_online, NULL);
+ if (ret < 0)
+ return ret;
+ kthread_worker_online = ret;
+ return 0;
+}
+core_initcall(kthread_worker_hotplug_init);
+
/**
* kthread_use_mm - make the calling kthread operate on an address space
* @mm: address space to operate on
--
2.17.1
\
 
 \ /
  Last update: 2020-10-29 02:23    [W:0.866 / U:0.856 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site