lkml.org 
[lkml]   [2020]   [Jun]   [30]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[RFC PATCH 07/16] sched/fair: Fix forced idle sibling starvation corner case
    Date
    From: vpillai <vpillai@digitalocean.com>

    If there is only one long running local task and the sibling is
    forced idle, it might not get a chance to run until a schedule
    event happens on any cpu in the core.

    So we check for this condition during a tick to see if a sibling
    is starved and then give it a chance to schedule.

    Signed-off-by: Vineeth Remanan Pillai <vpillai@digitalocean.com>
    Signed-off-by: Julien Desfossez <jdesfossez@digitalocean.com>
    ---
    kernel/sched/fair.c | 39 +++++++++++++++++++++++++++++++++++++++
    1 file changed, 39 insertions(+)

    diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
    index ae17507533a0..49fb93296e35 100644
    --- a/kernel/sched/fair.c
    +++ b/kernel/sched/fair.c
    @@ -10613,6 +10613,40 @@ static void rq_offline_fair(struct rq *rq)

    #endif /* CONFIG_SMP */

    +#ifdef CONFIG_SCHED_CORE
    +static inline bool
    +__entity_slice_used(struct sched_entity *se)
    +{
    + return (se->sum_exec_runtime - se->prev_sum_exec_runtime) >
    + sched_slice(cfs_rq_of(se), se);
    +}
    +
    +/*
    + * If runqueue has only one task which used up its slice and if the sibling
    + * is forced idle, then trigger schedule to give forced idle task a chance.
    + */
    +static void resched_forceidle_sibling(struct rq *rq, struct sched_entity *se)
    +{
    + int cpu = cpu_of(rq), sibling_cpu;
    +
    + if (rq->cfs.nr_running > 1 || !__entity_slice_used(se))
    + return;
    +
    + for_each_cpu(sibling_cpu, cpu_smt_mask(cpu)) {
    + struct rq *sibling_rq;
    + if (sibling_cpu == cpu)
    + continue;
    + if (cpu_is_offline(sibling_cpu))
    + continue;
    +
    + sibling_rq = cpu_rq(sibling_cpu);
    + if (sibling_rq->core_forceidle) {
    + resched_curr(sibling_rq);
    + }
    + }
    +}
    +#endif
    +
    /*
    * scheduler tick hitting a task of our scheduling class.
    *
    @@ -10636,6 +10670,11 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)

    update_misfit_status(curr, rq);
    update_overutilized_status(task_rq(curr));
    +
    +#ifdef CONFIG_SCHED_CORE
    + if (sched_core_enabled(rq))
    + resched_forceidle_sibling(rq, &curr->se);
    +#endif
    }

    /*
    --
    2.17.1
    \
     
     \ /
      Last update: 2020-06-30 23:34    [W:4.218 / U:0.036 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site