Messages in this thread Patch in this message | | | Date | Thu, 14 May 2020 15:02:48 +0200 | From | Peter Zijlstra <> | Subject | Re: [PATCH updated v2] sched/fair: core wide cfs task priority comparison |
| |
On Fri, May 08, 2020 at 08:34:57PM +0800, Aaron Lu wrote: > With this said, I realized a workaround for the issue described above: > when the core went from 'compatible mode'(step 1-3) to 'incompatible > mode'(step 4), reset all root level sched entities' vruntime to be the > same as the core wide min_vruntime. After all, the core is transforming > from two runqueue mode to single runqueue mode... I think this can solve > the issue to some extent but I may miss other scenarios.
A little something like so, this syncs min_vruntime when we switch to single queue mode. This is very much SMT2 only, I got my head in twist when thikning about more siblings, I'll have to try again later.
This very much retains the horrible approximation of S we always do.
Also, it is _completely_ untested...
--- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -102,7 +102,6 @@ static inline int __task_prio(struct tas /* real prio, less is less */ static inline bool prio_less(struct task_struct *a, struct task_struct *b) { - int pa = __task_prio(a), pb = __task_prio(b); if (-pa < -pb) @@ -114,19 +113,8 @@ static inline bool prio_less(struct task if (pa == -1) /* dl_prio() doesn't work because of stop_class above */ return !dl_time_before(a->dl.deadline, b->dl.deadline); - if (pa == MAX_RT_PRIO + MAX_NICE) { /* fair */ - u64 vruntime = b->se.vruntime; - - /* - * Normalize the vruntime if tasks are in different cpus. - */ - if (task_cpu(a) != task_cpu(b)) { - vruntime -= task_cfs_rq(b)->min_vruntime; - vruntime += task_cfs_rq(a)->min_vruntime; - } - - return !((s64)(a->se.vruntime - vruntime) <= 0); - } + if (pa == MAX_RT_PRIO + MAX_NICE) + return cfs_prio_less(a, b); return false; } @@ -4293,10 +4281,11 @@ static struct task_struct * pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) { struct task_struct *next, *max = NULL; + int old_active = 0, new_active = 0; const struct sched_class *class; const struct cpumask *smt_mask; - int i, j, cpu; bool need_sync = false; + int i, j, cpu; cpu = cpu_of(rq); if (cpu_is_offline(cpu)) @@ -4349,10 +4338,14 @@ pick_next_task(struct rq *rq, struct tas rq_i->core_pick = NULL; if (rq_i->core_forceidle) { + // XXX is_idle_task(rq_i->curr) && rq_i->nr_running ?? need_sync = true; rq_i->core_forceidle = false; } + if (!is_idle_task(rq_i->curr)) + old_active++; + if (i != cpu) update_rq_clock(rq_i); } @@ -4463,8 +4456,12 @@ next_class:; WARN_ON_ONCE(!rq_i->core_pick); - if (is_idle_task(rq_i->core_pick) && rq_i->nr_running) - rq_i->core_forceidle = true; + if (is_idle_task(rq_i->core_pick)) { + if (rq_i->nr_running) + rq_i->core_forceidle = true; + } else { + new_active++; + } if (i == cpu) continue; @@ -4476,6 +4473,16 @@ next_class:; WARN_ON_ONCE(!cookie_match(next, rq_i->core_pick)); } + /* XXX SMT2 only */ + if (new_active == 1 && old_active > 1) { + /* + * We just dropped into single-rq mode, increment the sequence + * count to trigger the vruntime sync. + */ + rq->core->core_sync_seq++; + } + rq->core->core_active = new_active; + done: set_next_task(rq, next); return next; --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -386,6 +386,12 @@ is_same_group(struct sched_entity *se, s return NULL; } +static inline bool +is_same_tg(struct sched_entity *se, struct sched_entity *pse) +{ + return se->cfs_rq->tg == pse->cfs_rq->tg; +} + static inline struct sched_entity *parent_entity(struct sched_entity *se) { return se->parent; @@ -394,8 +400,6 @@ static inline struct sched_entity *paren static void find_matching_se(struct sched_entity **se, struct sched_entity **pse) { - int se_depth, pse_depth; - /* * preemption test can be made between sibling entities who are in the * same cfs_rq i.e who have a common parent. Walk up the hierarchy of @@ -403,23 +407,16 @@ find_matching_se(struct sched_entity **s * parent. */ - /* First walk up until both entities are at same depth */ - se_depth = (*se)->depth; - pse_depth = (*pse)->depth; - - while (se_depth > pse_depth) { - se_depth--; - *se = parent_entity(*se); - } - - while (pse_depth > se_depth) { - pse_depth--; - *pse = parent_entity(*pse); - } + /* XXX we now have 3 of these loops, C stinks */ while (!is_same_group(*se, *pse)) { - *se = parent_entity(*se); - *pse = parent_entity(*pse); + int se_depth = (*se)->depth; + int pse_depth = (*pse)->depth; + + if (se_depth <= pse_depth) + *pse = parent_entity(*pse); + if (se_depth >= pse_depth) + *se = parent_entity(*se); } } @@ -455,6 +452,12 @@ static inline struct sched_entity *paren return NULL; } +static inline bool +is_same_tg(struct sched_entity *se, struct sched_entity *pse) +{ + return true; +} + static inline void find_matching_se(struct sched_entity **se, struct sched_entity **pse) { @@ -462,6 +465,31 @@ find_matching_se(struct sched_entity **s #endif /* CONFIG_FAIR_GROUP_SCHED */ +bool cfs_prio_less(struct task_struct *a, struct task_struct *b) +{ + struct sched_entity *se_a = &a->se, *se_b = &b->se; + struct cfs_rq *cfs_rq_a, *cfa_rq_b; + u64 vruntime_a, vruntime_b; + + while (!is_same_tg(se_a, se_b)) { + int se_a_depth = se_a->depth; + int se_b_depth = se_b->depth; + + if (se_a_depth <= se_b_depth) + se_b = parent_entity(se_b); + if (se_a_depth >= se_b_depth) + se_a = parent_entity(se_a); + } + + cfs_rq_a = cfs_rq_of(se_a); + cfs_rq_b = cfs_rq_of(se_b); + + vruntime_a = se_a->vruntime - cfs_rq_a->core_vruntime; + vruntime_b = se_b->vruntime - cfs_rq_b->core_vruntime; + + return !((s64)(vruntime_a - vruntime_b) <= 0); +} + static __always_inline void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec); @@ -6891,6 +6919,18 @@ static void check_preempt_wakeup(struct set_last_buddy(se); } +static void core_sync_entity(struct rq *rq, struct cfs_rq *cfs_rq) +{ + if (!sched_core_enabled()) + return; + + if (rq->core->core_sync_seq == cfs_rq->core_sync_seq) + return; + + cfs_rq->core_sync_seq = rq->core->core_sync_seq; + cfs_rq->core_vruntime = cfs_rq->min_vruntime; +} + static struct task_struct *pick_task_fair(struct rq *rq) { struct cfs_rq *cfs_rq = &rq->cfs; @@ -6902,6 +6942,14 @@ static struct task_struct *pick_task_fai do { struct sched_entity *curr = cfs_rq->curr; + /* + * Propagate the sync state down to whatever cfs_rq we need, + * the active cfs_rq's will have been done by + * set_next_task_fair(), the rest is inactive and will not have + * changed due to the current running task. + */ + core_sync_entity(rq, cfs_rq); + se = pick_next_entity(cfs_rq, NULL); if (curr) { @@ -10825,7 +10873,8 @@ static void switched_to_fair(struct rq * } } -/* Account for a task changing its policy or group. +/* + * Account for a task changing its policy or group. * * This routine is mostly called to set cfs_rq->curr field when a task * migrates between groups/classes. @@ -10847,6 +10896,9 @@ static void set_next_task_fair(struct rq for_each_sched_entity(se) { struct cfs_rq *cfs_rq = cfs_rq_of(se); + /* snapshot vruntime before using it */ + core_sync_entity(rq, cfs_rq); + set_next_entity(cfs_rq, se); /* ensure bandwidth has been allocated on our new cfs_rq */ account_cfs_rq_runtime(cfs_rq, 0); --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -503,6 +503,10 @@ struct cfs_rq { unsigned int h_nr_running; /* SCHED_{NORMAL,BATCH,IDLE} */ unsigned int idle_h_nr_running; /* SCHED_IDLE */ +#ifdef CONFIG_SCHED_CORE + unsigned int core_sync_seq; + u64 core_vruntime; +#endif u64 exec_clock; u64 min_vruntime; #ifndef CONFIG_64BIT @@ -1035,12 +1039,15 @@ struct rq { unsigned int core_enabled; unsigned int core_sched_seq; struct rb_root core_tree; - bool core_forceidle; + unsigned int core_forceidle; /* shared state */ unsigned int core_task_seq; unsigned int core_pick_seq; unsigned long core_cookie; + unsigned int core_sync_seq; + unsigned int core_active; + #endif }; @@ -2592,6 +2599,8 @@ static inline bool sched_energy_enabled( #endif /* CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL */ +extern bool cfs_prio_less(struct task_struct *a, struct task_struct *b); + #ifdef CONFIG_MEMBARRIER /* * The scheduler provides memory barriers required by membarrier between:
| |