lkml.org 
[lkml]   [2023]   [Nov]   [3]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
Date
SubjectRe: [PATCH v2 1/1] sched/fair: Track current se's EEVDF parameters
From
On 11/2/23 10:20 PM, Yiwei Lin Wrote:
> After dequeuing the current-picked scheduling entity with
> `__dequeue_entity`, its contribution to the EEVDF parameters
> cfs_rq->avg_vruntime and cfs_rq->avg_load are also removed.
> Because these should in fact be considered for the EEVDF algorithm,
> we took curr as the special case and inserted back the contributions
> when requests for cfs_rq->avg_vruntime and cfs_rq->avg_load.
>
> Functions like `entity_eligible` which is called insied a loop may
> therefore recalculate these statistics repeatly and require more effort.
> Instead, we could just avoid to remove these statistics from
> cfs_rq->avg_vruntime and cfs_rq->avg_load directly.
>
> Signed-off-by: Yiwei Lin <s921975628@gmail.com>
> ---
> kernel/sched/fair.c | 39 +++++++++++----------------------------
> 1 file changed, 11 insertions(+), 28 deletions(-)
>
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 876798824..a10a73603 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -655,17 +655,9 @@ void avg_vruntime_update(struct cfs_rq *cfs_rq, s64 delta)
> */
> u64 avg_vruntime(struct cfs_rq *cfs_rq)
> {
> - struct sched_entity *curr = cfs_rq->curr;
> s64 avg = cfs_rq->avg_vruntime;
> long load = cfs_rq->avg_load;
>
> - if (curr && curr->on_rq) {
> - unsigned long weight = scale_load_down(curr->load.weight);
> -
> - avg += entity_key(cfs_rq, curr) * weight;
> - load += weight;
> - }
> -
> if (load) {
> /* sign flips effective floor / ceil */
> if (avg < 0)
> @@ -722,17 +714,9 @@ static void update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se)
> */
> int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se)
> {
> - struct sched_entity *curr = cfs_rq->curr;
> s64 avg = cfs_rq->avg_vruntime;
> long load = cfs_rq->avg_load;
>
> - if (curr && curr->on_rq) {
> - unsigned long weight = scale_load_down(curr->load.weight);
> -
> - avg += entity_key(cfs_rq, curr) * weight;
> - load += weight;
> - }
> -
> return avg >= entity_key(cfs_rq, se) * load;
> }
>
> @@ -821,11 +805,12 @@ static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
> __entity_less, &min_deadline_cb);
> }
>
> -static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
> +static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, bool on_rq)

Please don't add such complexity. Since avg_vruntime includes the whole
cfs_rq after this change, you can simply avg_vruntime_add() when doing
enqueue_entity() and avg_vruntime_sub() in dequeue_entity().

> {
> rb_erase_augmented_cached(&se->run_node, &cfs_rq->tasks_timeline,
> &min_deadline_cb);
> - avg_vruntime_sub(cfs_rq, se);
> + if (!on_rq)
> + avg_vruntime_sub(cfs_rq, se);
> }
>
> struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
> @@ -1137,6 +1122,7 @@ static void update_curr(struct cfs_rq *cfs_rq)
> struct sched_entity *curr = cfs_rq->curr;
> u64 now = rq_clock_task(rq_of(cfs_rq));
> u64 delta_exec;
> + u64 delta_fair;
>
> if (unlikely(!curr))
> return;
> @@ -1158,7 +1144,9 @@ static void update_curr(struct cfs_rq *cfs_rq)
> curr->sum_exec_runtime += delta_exec;
> schedstat_add(cfs_rq->exec_clock, delta_exec);
>
> - curr->vruntime += calc_delta_fair(delta_exec, curr);
> + delta_fair = calc_delta_fair(delta_exec, curr);
> + curr->vruntime += delta_fair;
> + cfs_rq->avg_vruntime += delta_fair * scale_load_down(curr->load.weight);

What if curr->load.weight changes in this time slice?

> update_deadline(cfs_rq, curr);
> update_min_vruntime(cfs_rq);
>
> @@ -3675,8 +3663,7 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
> /* commit outstanding execution time */
> if (cfs_rq->curr == se)
> update_curr(cfs_rq);
> - else
> - avg_vruntime_sub(cfs_rq, se);
> + avg_vruntime_sub(cfs_rq, se);
> update_load_sub(&cfs_rq->load, se->load.weight);
> }
> dequeue_load_avg(cfs_rq, se);
> @@ -3712,8 +3699,7 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
> enqueue_load_avg(cfs_rq, se);
> if (se->on_rq) {
> update_load_add(&cfs_rq->load, se->load.weight);
> - if (cfs_rq->curr != se)
> - avg_vruntime_add(cfs_rq, se);
> + avg_vruntime_add(cfs_rq, se);
> }
> }
>
> @@ -5023,7 +5009,6 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
> * EEVDF: placement strategy #1 / #2
> */
> if (sched_feat(PLACE_LAG) && cfs_rq->nr_running) {
> - struct sched_entity *curr = cfs_rq->curr;
> unsigned long load;
>
> lag = se->vlag;
> @@ -5081,8 +5066,6 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
> * vl_i = (W + w_i)*vl'_i / W
> */
> load = cfs_rq->avg_load;
> - if (curr && curr->on_rq)
> - load += scale_load_down(curr->load.weight);
>
> lag *= load + scale_load_down(se->load.weight);
> if (WARN_ON_ONCE(!load))
> @@ -5229,7 +5212,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
>
> update_entity_lag(cfs_rq, se);
> if (se != cfs_rq->curr)
> - __dequeue_entity(cfs_rq, se);
> + __dequeue_entity(cfs_rq, se, 0);
> se->on_rq = 0;
> account_entity_dequeue(cfs_rq, se);
>
> @@ -5264,7 +5247,7 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
> * runqueue.
> */
> update_stats_wait_end_fair(cfs_rq, se);
> - __dequeue_entity(cfs_rq, se);
> + __dequeue_entity(cfs_rq, se, 1);
> update_load_avg(cfs_rq, se, UPDATE_TG);
> /*
> * HACK, stash a copy of deadline at the point of pick in vlag,

\
 
 \ /
  Last update: 2023-11-20 13:47    [W:0.031 / U:2.884 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site