Messages in this thread Patch in this message | | | Date | Wed, 18 May 2022 11:31:56 +0200 | From | Peter Zijlstra <> | Subject | Re: [PATCH 3/4] sched/numa: Apply imbalance limitations consistently |
| |
On Wed, May 11, 2022 at 03:30:37PM +0100, Mel Gorman wrote:
> @@ -9108,6 +9108,24 @@ static inline bool allow_numa_imbalance(int running, int imb_numa_nr) > return running <= imb_numa_nr; > } > > +#define NUMA_IMBALANCE_MIN 2 > + > +static inline long adjust_numa_imbalance(int imbalance, > + int dst_running, int imb_numa_nr) > +{ > + if (!allow_numa_imbalance(dst_running, imb_numa_nr)) > + return imbalance; > + > + /* > + * Allow a small imbalance based on a simple pair of communicating > + * tasks that remain local when the destination is lightly loaded. > + */ > + if (imbalance <= NUMA_IMBALANCE_MIN) > + return 0; > + > + return imbalance; > +}
> @@ -9334,24 +9356,6 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd > } > } > > -#define NUMA_IMBALANCE_MIN 2 > - > -static inline long adjust_numa_imbalance(int imbalance, > - int dst_running, int imb_numa_nr) > -{ > - if (!allow_numa_imbalance(dst_running, imb_numa_nr)) > - return imbalance; > - > - /* > - * Allow a small imbalance based on a simple pair of communicating > - * tasks that remain local when the destination is lightly loaded. > - */ > - if (imbalance <= NUMA_IMBALANCE_MIN) > - return 0; > - > - return imbalance; > -}
If we're going to move that one up and remove the only other caller of allow_numa_imbalance() we might as well move it up further still and fold the functions.
Hmm?
(Although I do wonder about that 25% figure in the comment; that doesn't seem to relate to any actual code anymore)
--- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1536,8 +1536,29 @@ struct task_numa_env {
static unsigned long cpu_load(struct rq *rq); static unsigned long cpu_runnable(struct rq *rq); -static inline long adjust_numa_imbalance(int imbalance, - int dst_running, int imb_numa_nr); + +#define NUMA_IMBALANCE_MIN 2 + +static inline long +adjust_numa_imbalance(int imbalance, int dst_running, int imb_numa_nr) +{ + /* + * Allow a NUMA imbalance if busy CPUs is less than 25% of the domain. + * This is an approximation as the number of running tasks may not be + * related to the number of busy CPUs due to sched_setaffinity. + */ + if (dst_running > imb_numa_nr) + return imbalance; + + /* + * Allow a small imbalance based on a simple pair of communicating + * tasks that remain local when the destination is lightly loaded. + */ + if (imbalance <= NUMA_IMBALANCE_MIN) + return 0; + + return imbalance; +}
static inline enum numa_type numa_classify(unsigned int imbalance_pct, @@ -9099,16 +9120,6 @@ static bool update_pick_idlest(struct sc }
/* - * Allow a NUMA imbalance if busy CPUs is less than 25% of the domain. - * This is an approximation as the number of running tasks may not be - * related to the number of busy CPUs due to sched_setaffinity. - */ -static inline bool allow_numa_imbalance(int running, int imb_numa_nr) -{ - return running <= imb_numa_nr; -} - -/* * find_idlest_group() finds and returns the least busy CPU group within the * domain. * @@ -9245,8 +9256,12 @@ find_idlest_group(struct sched_domain *s * allowed. If there is a real need of migration, * periodic load balance will take care of it. */ - if (allow_numa_imbalance(local_sgs.sum_nr_running + 1, sd->imb_numa_nr)) + imbalance = abs(local_sgs.idle_cpus - idlest_sgs.idle_cpus); + if (!adjust_numa_imbalance(imbalance, + local_sgs.sum_nr_running + 1, + sd->imb_numa_nr)) { return NULL; + } }
/* @@ -9334,24 +9349,6 @@ static inline void update_sd_lb_stats(st } }
-#define NUMA_IMBALANCE_MIN 2 - -static inline long adjust_numa_imbalance(int imbalance, - int dst_running, int imb_numa_nr) -{ - if (!allow_numa_imbalance(dst_running, imb_numa_nr)) - return imbalance; - - /* - * Allow a small imbalance based on a simple pair of communicating - * tasks that remain local when the destination is lightly loaded. - */ - if (imbalance <= NUMA_IMBALANCE_MIN) - return 0; - - return imbalance; -} - /** * calculate_imbalance - Calculate the amount of imbalance present within the * groups of a given sched_domain during load balance. @@ -9436,7 +9433,7 @@ static inline void calculate_imbalance(s */ env->migration_type = migrate_task; lsub_positive(&nr_diff, local->sum_nr_running); - env->imbalance = nr_diff >> 1; + env->imbalance = nr_diff; } else {
/* @@ -9444,16 +9441,20 @@ static inline void calculate_imbalance(s * idle cpus. */ env->migration_type = migrate_task; - env->imbalance = max_t(long, 0, (local->idle_cpus - - busiest->idle_cpus) >> 1); + env->imbalance = max_t(long, 0, + (local->idle_cpus - busiest->idle_cpus)); }
/* Consider allowing a small imbalance between NUMA groups */ if (env->sd->flags & SD_NUMA) { env->imbalance = adjust_numa_imbalance(env->imbalance, - local->sum_nr_running + 1, env->sd->imb_numa_nr); + local->sum_nr_running + 1, + env->sd->imb_numa_nr); }
+ /* Number of tasks to move to restore balance */ + env->imbalance >>= 1; + return; }
| |