Messages in this thread Patch in this message | | | From | Lai Jiangshan <> | Subject | [PATCH 09/10] workqueue: reorganize workqueue_offline_cpu() unbind_workers() | Date | Mon, 14 Dec 2020 23:54:56 +0800 |
| |
From: Lai Jiangshan <laijs@linux.alibaba.com>
Just move around the code, no functionality changed. Only wq_pool_attach_mutex protected region become a little larger.
It prepares for later patch protecting wq_online_cpumask in wq_pool_attach_mutex.
Signed-off-by: Lai Jiangshan <laijs@linux.alibaba.com> --- kernel/workqueue.c | 92 +++++++++++++++++++++++----------------------- 1 file changed, 46 insertions(+), 46 deletions(-)
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index fa29b7a083a6..5ef41c567c2b 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -4901,62 +4901,58 @@ void wq_worker_comm(char *buf, size_t size, struct task_struct *task) * cpu comes back online. */ -static void unbind_workers(int cpu) +static void unbind_workers(struct worker_pool *pool) { - struct worker_pool *pool; struct worker *worker; - for_each_cpu_worker_pool(pool, cpu) { - mutex_lock(&wq_pool_attach_mutex); - raw_spin_lock_irq(&pool->lock); + lockdep_assert_held(&wq_pool_attach_mutex); - /* - * We've blocked all attach/detach operations. Make all workers - * unbound and set DISASSOCIATED. Before this, all workers - * except for the ones which are still executing works from - * before the last CPU down must be on the cpu. After - * this, they may become diasporas. - */ - for_each_pool_worker(worker, pool) - worker->flags |= WORKER_UNBOUND; + raw_spin_lock_irq(&pool->lock); - pool->flags |= POOL_DISASSOCIATED; + /* + * We've blocked all attach/detach operations. Make all workers + * unbound and set DISASSOCIATED. Before this, all workers + * except for the ones which are still executing works from + * before the last CPU down must be on the cpu. After + * this, they may become diasporas. + */ + for_each_pool_worker(worker, pool) + worker->flags |= WORKER_UNBOUND; - raw_spin_unlock_irq(&pool->lock); + pool->flags |= POOL_DISASSOCIATED; - /* don't rely on the scheduler to force break affinity for us. */ - for_each_pool_worker(worker, pool) - WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0); + raw_spin_unlock_irq(&pool->lock); - mutex_unlock(&wq_pool_attach_mutex); + /* don't rely on the scheduler to force break affinity for us. */ + for_each_pool_worker(worker, pool) + WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0); - /* - * Call schedule() so that we cross rq->lock and thus can - * guarantee sched callbacks see the %WORKER_UNBOUND flag. - * This is necessary as scheduler callbacks may be invoked - * from other cpus. - */ - schedule(); + /* + * Call schedule() so that we cross rq->lock and thus can + * guarantee sched callbacks see the %WORKER_UNBOUND flag. + * This is necessary as scheduler callbacks may be invoked + * from other cpus. + */ + schedule(); - /* - * Sched callbacks are disabled now. Zap nr_running. - * After this, nr_running stays zero and need_more_worker() - * and keep_working() are always true as long as the - * worklist is not empty. This pool now behaves as an - * unbound (in terms of concurrency management) pool which - * are served by workers tied to the pool. - */ - atomic_set(&pool->nr_running, 0); + /* + * Sched callbacks are disabled now. Zap nr_running. + * After this, nr_running stays zero and need_more_worker() + * and keep_working() are always true as long as the + * worklist is not empty. This pool now behaves as an + * unbound (in terms of concurrency management) pool which + * are served by workers tied to the pool. + */ + atomic_set(&pool->nr_running, 0); - /* - * With concurrency management just turned off, a busy - * worker blocking could lead to lengthy stalls. Kick off - * unbound chain execution of currently pending work items. - */ - raw_spin_lock_irq(&pool->lock); - wake_up_worker(pool); - raw_spin_unlock_irq(&pool->lock); - } + /* + * With concurrency management just turned off, a busy + * worker blocking could lead to lengthy stalls. Kick off + * unbound chain execution of currently pending work items. + */ + raw_spin_lock_irq(&pool->lock); + wake_up_worker(pool); + raw_spin_unlock_irq(&pool->lock); } /** @@ -5119,7 +5115,11 @@ int workqueue_offline_cpu(unsigned int cpu) if (WARN_ON(cpu != smp_processor_id())) return -1; - unbind_workers(cpu); + for_each_cpu_worker_pool(pool, cpu) { + mutex_lock(&wq_pool_attach_mutex); + unbind_workers(pool); + mutex_unlock(&wq_pool_attach_mutex); + } mutex_lock(&wq_pool_mutex); cpumask_clear_cpu(cpu, wq_online_cpumask); -- 2.19.1.6.gb485710b
| |