Messages in this thread Patch in this message | | | Date | Thu, 23 Jun 2022 11:11:02 +0200 | Subject | Re: [PATCH v11 4/7] sched/fair: Rename select_idle_mask to select_rq_mask | From | Dietmar Eggemann <> |
| |
On 21/06/2022 11:04, Vincent Donnefort wrote: > From: Dietmar Eggemann <dietmar.eggemann@arm.com>
https://lkml.kernel.org/r/202206221253.ZVyGQvPX-lkp@intel.com discovered that this patch doesn't build anymore (on tip sched/core or linux-next) because of commit f5b2eeb499910 ("sched/fair: Consider CPU affinity when allowing NUMA imbalance in find_idlest_group()").
New version of [PATCH v11 4/7] sched/fair: Rename select_idle_mask to select_rq_mask below.
-- >8 --
Decouple the name of the per-cpu cpumask select_idle_mask from its usage in select_idle_[cpu/capacity]() of the CFS run-queue selection (select_task_rq_fair()).
This is to support the reuse of this cpumask in the Energy Aware Scheduling (EAS) path (find_energy_efficient_cpu()) of the CFS run-queue selection.
Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com> Reviewed-by: Vincent Guittot <vincent.guittot@linaro.org> Tested-by: Lukasz Luba <lukasz.luba@arm.com> --- kernel/sched/core.c | 4 ++-- kernel/sched/fair.c | 10 +++++----- 2 files changed, 7 insertions(+), 7 deletions(-)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 68bd9cd7aeb3..618cdb38ba11 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -9508,7 +9508,7 @@ static struct kmem_cache *task_group_cache __read_mostly; #endif DECLARE_PER_CPU(cpumask_var_t, load_balance_mask); -DECLARE_PER_CPU(cpumask_var_t, select_idle_mask); +DECLARE_PER_CPU(cpumask_var_t, select_rq_mask); void __init sched_init(void) { @@ -9557,7 +9557,7 @@ void __init sched_init(void) for_each_possible_cpu(i) { per_cpu(load_balance_mask, i) = (cpumask_var_t)kzalloc_node( cpumask_size(), GFP_KERNEL, cpu_to_node(i)); - per_cpu(select_idle_mask, i) = (cpumask_var_t)kzalloc_node( + per_cpu(select_rq_mask, i) = (cpumask_var_t)kzalloc_node( cpumask_size(), GFP_KERNEL, cpu_to_node(i)); } #endif /* CONFIG_CPUMASK_OFFSTACK */ diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index f81bad489a7e..e0af847320b0 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5894,7 +5894,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) /* Working cpumask for: load_balance, load_balance_newidle. */ DEFINE_PER_CPU(cpumask_var_t, load_balance_mask); -DEFINE_PER_CPU(cpumask_var_t, select_idle_mask); +DEFINE_PER_CPU(cpumask_var_t, select_rq_mask); #ifdef CONFIG_NO_HZ_COMMON @@ -6384,7 +6384,7 @@ static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd */ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool has_idle_core, int target) { - struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask); + struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_rq_mask); int i, cpu, idle_cpu = -1, nr = INT_MAX; struct rq *this_rq = this_rq(); int this = smp_processor_id(); @@ -6470,7 +6470,7 @@ select_idle_capacity(struct task_struct *p, struct sched_domain *sd, int target) int cpu, best_cpu = -1; struct cpumask *cpus; - cpus = this_cpu_cpumask_var_ptr(select_idle_mask); + cpus = this_cpu_cpumask_var_ptr(select_rq_mask); cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr); task_util = uclamp_task_util(p); @@ -6520,7 +6520,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) } /* - * per-cpu select_idle_mask usage + * per-cpu select_rq_mask usage */ lockdep_assert_irqs_disabled(); @@ -9243,7 +9243,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu) * take care of it. */ if (p->nr_cpus_allowed != NR_CPUS) { - struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask); + struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_rq_mask); cpumask_and(cpus, sched_group_span(local), p->cpus_ptr); imb_numa_nr = min(cpumask_weight(cpus), sd->imb_numa_nr); -- 2.25.1
| |