Messages in this thread Patch in this message | | | Date | Fri, 16 Feb 2024 22:55:47 +0100 | Subject | [PATCH 2/2] sched: add for_each_sched_group() and use everywhere | From | Michał Mirosław <> |
| |
Make code iterating over all sched_groups more straightforward.
Note: There's no point in WARN_ON in init_sched_groups_capacity() if we're going to unconditionally dereference a NULL pointer just a few instructions later.
Signed-off-by: Michał Mirosław <mirq-linux@rere.qmqm.pl> --- kernel/sched/fair.c | 13 +++++-------- kernel/sched/sched.h | 5 +++++ kernel/sched/topology.c | 19 ++++++------------- 3 files changed, 16 insertions(+), 21 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 7ac9f4b1d955..a8a011f24a6d 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -9566,15 +9566,13 @@ void update_group_capacity(struct sched_domain *sd, int cpu) * span the current group. */ - group = child->groups; - do { + for_each_sched_group(child, group) { struct sched_group_capacity *sgc = group->sgc; capacity += sgc->capacity; min_capacity = min(sgc->min_capacity, min_capacity); max_capacity = max(sgc->max_capacity, max_capacity); - group = group->next; - } while (group != child->groups); + } } sdg->sgc->capacity = capacity; @@ -10549,13 +10547,13 @@ static void update_idle_cpu_scan(struct lb_env *env, static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds) { - struct sched_group *sg = env->sd->groups; struct sg_lb_stats *local = &sds->local_stat; struct sg_lb_stats tmp_sgs; unsigned long sum_util = 0; + struct sched_group *sg; int sg_status = 0; - do { + for_each_sched_group(env->sd, sg) { struct sg_lb_stats *sgs = &tmp_sgs; int local_group; @@ -10586,8 +10584,7 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd sds->total_capacity += sgs->group_capacity; sum_util += sgs->group_util; - sg = sg->next; - } while (sg != env->sd->groups); + } /* * Indicate that the child domain of the busiest group prefers tasks diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 2e5a95486a42..88f3cba60b1e 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1902,6 +1902,11 @@ struct sched_group { unsigned long cpumask[]; }; +#define for_each_sched_group(sd, sg) \ + for ((sg) = NULL; \ + likely((sg)) ? (sg) != (sd)->groups : ((((sg) = (sd)->groups)), 1); \ + (void)((((sg) = (sg)->next)) || ({ BUG(); 0; }))) + static inline struct cpumask *sched_group_span(struct sched_group *sg) { return to_cpumask(sg->cpumask); diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 75b1a18783c2..dce29cf28f32 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -758,17 +758,15 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) sd = sd->parent; destroy_sched_domain(tmp); if (sd) { - struct sched_group *sg = sd->groups; + struct sched_group *sg; /* * sched groups hold the flags of the child sched * domain for convenience. Clear such flags since * the child is being destroyed. */ - do { + for_each_sched_group(sd, sg) sg->flags = 0; - sg = sg->next; - } while (sg != sd->groups); sd->child = NULL; } @@ -1290,12 +1288,10 @@ build_sched_groups(struct sched_domain *sd, int cpu) */ static void init_sched_groups_capacity(int cpu, struct sched_domain *sd) { - struct sched_group *sg = sd->groups; struct cpumask *mask = sched_domains_tmpmask2; + struct sched_group *sg; - WARN_ON(!sg); - - do { + for_each_sched_group(sd, sg) { int cpu, cores = 0, max_cpu = -1; sg->group_weight = cpumask_weight(sched_group_span(sg)); @@ -1310,7 +1306,7 @@ static void init_sched_groups_capacity(int cpu, struct sched_domain *sd) sg->cores = cores; if (!(sd->flags & SD_ASYM_PACKING)) - goto next; + continue; for_each_cpu(cpu, sched_group_span(sg)) { if (max_cpu < 0) @@ -1319,10 +1315,7 @@ static void init_sched_groups_capacity(int cpu, struct sched_domain *sd) max_cpu = cpu; } sg->asym_prefer_cpu = max_cpu; - -next: - sg = sg->next; - } while (sg != sd->groups); + } if (cpu != group_balance_cpu(sg)) return; -- 2.39.2
| |