lkml.org 
[lkml]   [2022]   [Jun]   [28]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
From
Date
SubjectRe: [PATCH v4] sched/fair: combine detach into dequeue when migrating task
On Mon, 20 Jun 2022 at 15:36, Chengming Zhou
<zhouchengming@bytedance.com> wrote:
>
> When we are migrating task out of the CPU, we can combine detach and
> propagation into dequeue_entity() to save the detach_entity_cfs_rq()
> -> propagate_entity_cfs_rq() call in detach_entity_cfs_rq() in
> migrate_task_rq_fair().
>
> This optimization is like combining DO_ATTACH in the enqueue_entity()
> when migrating task to the CPU.
> -> propagate_entity_cfs_rq() call in detach_entity_cfs_rq() in
> migrate_task_rq_fair().
>
>
> So we don't have to traverse the CFS tree extra time to do the
> detach_entity_cfs_rq() -> propagate_entity_cfs_rq() call, which
> wouldn't be called anymore with this patch's change.
>
> Copied from Dietmar's much clearer comment:

Although It's nice to keep the author of a comment, it should not be
part of the commit message.

> detach_task()
> deactivate_task()
> dequeue_task_fair()
> for_each_sched_entity(se)
> dequeue_entity()
> update_load_avg() /* (1) */
detach_entity_load_avg()

> set_task_cpu()
> migrate_task_rq_fair()
> detach_entity_cfs_rq() /* (2) */
update_load_avg();
detach_entity_load_avg();
propagate_entity_cfs_rq();
for_each_sched_entity()
update_load_avg()

Could you add the full call stack so we can see more easily the extra
loop that is saved
>
> This patch save the propagate_entity_cfs_rq(&p->se) call from (2)
> by doing the detach_entity_load_avg(), update_tg_load_avg() for
> a migrating task inside (1) (the task being the first se in the loop)
>
> Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>

Apart the small nit above:

Reviewed-by: Vincent Guittot <vincent.guittot@linaro.org>

> ---
> v4:
> - change the commit message a little.
> - remove the forward declaration of detach_entity_cfs_rq()
> - remove verbose comments in code.
>
> v3:
> - change to use task_on_rq_migrating() and put Dietmar's much clearer
> description in the commit message. Thanks!
>
> v2:
> - fix !CONFIG_SMP build error
> ---
> kernel/sched/fair.c | 30 +++++++++++++++++-------------
> 1 file changed, 17 insertions(+), 13 deletions(-)
>
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 8bed75757e65..31d53c11e244 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -3931,6 +3931,7 @@ static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
> #define UPDATE_TG 0x1
> #define SKIP_AGE_LOAD 0x2
> #define DO_ATTACH 0x4
> +#define DO_DETACH 0x8
>
> /* Update task and its cfs_rq load average */
> static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
> @@ -3948,7 +3949,14 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
> decayed = update_cfs_rq_load_avg(now, cfs_rq);
> decayed |= propagate_entity_load_avg(se);
>
> - if (!se->avg.last_update_time && (flags & DO_ATTACH)) {
> + if (flags & DO_DETACH) {
> + /*
> + * DO_DETACH means we're here from dequeue_entity()
> + * and we are migrating task out of the CPU.
> + */
> + detach_entity_load_avg(cfs_rq, se);
> + update_tg_load_avg(cfs_rq);
> + } else if (!se->avg.last_update_time && (flags & DO_ATTACH)) {
>
> /*
> * DO_ATTACH means we're here from enqueue_entity().
> @@ -4241,6 +4249,7 @@ static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
> #define UPDATE_TG 0x0
> #define SKIP_AGE_LOAD 0x0
> #define DO_ATTACH 0x0
> +#define DO_DETACH 0x0
>
> static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1)
> {
> @@ -4460,6 +4469,11 @@ static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
> static void
> dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
> {
> + int action = UPDATE_TG;
> +
> + if (entity_is_task(se) && task_on_rq_migrating(task_of(se)))
> + action |= DO_DETACH;
> +
> /*
> * Update run-time statistics of the 'current'.
> */
> @@ -4473,7 +4487,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
> * - For group entity, update its weight to reflect the new share
> * of its group cfs_rq.
> */
> - update_load_avg(cfs_rq, se, UPDATE_TG);
> + update_load_avg(cfs_rq, se, action);
> se_update_runnable(se);
>
> update_stats_dequeue_fair(cfs_rq, se, flags);
> @@ -6938,8 +6952,6 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags)
> return new_cpu;
> }
>
> -static void detach_entity_cfs_rq(struct sched_entity *se);
> -
> /*
> * Called immediately before a task is migrated to a new CPU; task_cpu(p) and
> * cfs_rq_of(p) references at time of call are still valid and identify the
> @@ -6973,15 +6985,7 @@ static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
> se->vruntime -= min_vruntime;
> }
>
> - if (p->on_rq == TASK_ON_RQ_MIGRATING) {
> - /*
> - * In case of TASK_ON_RQ_MIGRATING we in fact hold the 'old'
> - * rq->lock and can modify state directly.
> - */
> - lockdep_assert_rq_held(task_rq(p));
> - detach_entity_cfs_rq(&p->se);
> -
> - } else {
> + if (!task_on_rq_migrating(p)) {
> /*
> * We are supposed to update the task to "current" time, then
> * its up to date and ready to go to new CPU/cfs_rq. But we
> --
> 2.36.1
>

\
 
 \ /
  Last update: 2022-06-28 17:29    [W:0.070 / U:1.912 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site