lkml.org 
[lkml]   [2013]   [Oct]   [18]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[RFC][PATCH v5 13/14] sched: differantiate idle cpu
    Date
    The cost for waking up of a core varies according to its current idle state.
    This includes C-state and intermediate state when some sync between cores is
    required to reach a deep C-state.
    Waking up a CPU in a deep C-state for running a short task is not efficient
    from both a power and a performance point of view. We should take into account
    the wake up latency of an idle CPU when the scheduler looks for the best CPU
    to use for a waking task.
    The wake up latency of a CPU is computed into a load that can be directly
    compared with task load and other CPUs load.

    Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
    ---
    kernel/sched/fair.c | 22 ++++++++++++++++++++++
    1 file changed, 22 insertions(+)

    diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
    index ad8b99a..4863dad 100644
    --- a/kernel/sched/fair.c
    +++ b/kernel/sched/fair.c
    @@ -394,6 +394,20 @@ static int check_nohz_packing(int cpu)

    return false;
    }
    +
    +int sched_get_idle_load(int cpu)
    +{
    + struct sched_pm *stat = &per_cpu(sched_stat, cpu);
    + int latency = atomic_read(&(stat->wake_latency));
    + /*
    + * Transform the current wakeup latency (us) into an idle load that
    + * will be compared to task load to decide if it's worth to wake up
    + * the cpu. The current formula is quite simple but give good
    + * approximation in the range [0:10ms]
    + */
    + return (latency * 21) >> 10;
    +}
    +
    #else /* CONFIG_SCHED_PACKING_TASKS */

    static inline bool is_packing_cpu(int cpu)
    @@ -416,6 +430,10 @@ static inline int check_nohz_packing(int cpu)
    return false;
    }

    +static inline int sched_get_idle_load(int cpu)
    +{
    + return 0;
    +}

    #endif /* CONFIG_SCHED_PACKING_TASKS */
    #endif /* CONFIG_SMP */
    @@ -3207,6 +3225,8 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
    /* Used instead of source_load when we know the type == 0 */
    static unsigned long weighted_cpuload(const int cpu)
    {
    + if (idle_cpu(cpu))
    + return sched_get_idle_load(cpu);
    return cpu_rq(cpu)->cfs.runnable_load_avg;
    }

    @@ -3655,6 +3675,8 @@ static int select_idle_sibling(struct task_struct *p, int target)
    if (i == target || !idle_cpu(i)
    || !is_packing_cpu(i))
    goto next;
    + if (weighted_cpuload(i) > p->se.avg.load_avg_contrib)
    + goto next;
    }

    target = cpumask_first_and(sched_group_cpus(sg),
    --
    1.7.9.5


    \
     
     \ /
      Last update: 2013-10-18 16:21    [W:3.392 / U:0.032 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site