lkml.org 
[lkml]   [2015]   [Jul]   [7]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[RFCv5 PATCH 46/46] sched/fair: cpufreq_sched triggers for load balancing
    Date
    From: Juri Lelli <juri.lelli@arm.com>

    As we don't trigger freq changes from {en,de}queue_task_fair() during load
    balancing, we need to do explicitly so on load balancing paths.

    cc: Ingo Molnar <mingo@redhat.com>
    cc: Peter Zijlstra <peterz@infradead.org>

    Signed-off-by: Juri Lelli <juri.lelli@arm.com>
    ---
    kernel/sched/fair.c | 64 +++++++++++++++++++++++++++++++++++++++++++++++++++--
    1 file changed, 62 insertions(+), 2 deletions(-)

    diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
    index c2d6de4..c513b19 100644
    --- a/kernel/sched/fair.c
    +++ b/kernel/sched/fair.c
    @@ -7741,6 +7741,20 @@ static int load_balance(int this_cpu, struct rq *this_rq,
    * ld_moved - cumulative load moved across iterations
    */
    cur_ld_moved = detach_tasks(&env);
    + /*
    + * We want to potentially update env.src_cpu's OPP.
    + *
    + * Add a margin (same ~20% used for the tipping point)
    + * to our request to provide some head room for the remaining
    + * tasks.
    + */
    + if (sched_energy_freq() && cur_ld_moved) {
    + unsigned long req_cap = get_cpu_usage(env.src_cpu);
    +
    + req_cap = req_cap * capacity_margin
    + >> SCHED_CAPACITY_SHIFT;
    + cpufreq_sched_set_cap(env.src_cpu, req_cap);
    + }

    /*
    * We've detached some tasks from busiest_rq. Every
    @@ -7755,6 +7769,21 @@ static int load_balance(int this_cpu, struct rq *this_rq,
    if (cur_ld_moved) {
    attach_tasks(&env);
    ld_moved += cur_ld_moved;
    + /*
    + * We want to potentially update env.dst_cpu's OPP.
    + *
    + * Add a margin (same ~20% used for the tipping point)
    + * to our request to provide some head room if p's
    + * utilization further increases.
    + */
    + if (sched_energy_freq()) {
    + unsigned long req_cap =
    + get_cpu_usage(env.dst_cpu);
    +
    + req_cap = req_cap * capacity_margin
    + >> SCHED_CAPACITY_SHIFT;
    + cpufreq_sched_set_cap(env.dst_cpu, req_cap);
    + }
    }

    local_irq_restore(flags);
    @@ -8114,8 +8143,24 @@ static int active_load_balance_cpu_stop(void *data)
    schedstat_inc(sd, alb_count);

    p = detach_one_task(&env);
    - if (p)
    + if (p) {
    schedstat_inc(sd, alb_pushed);
    + /*
    + * We want to potentially update env.src_cpu's OPP.
    + *
    + * Add a margin (same ~20% used for the tipping point)
    + * to our request to provide some head room for the
    + * remaining task.
    + */
    + if (sched_energy_freq()) {
    + unsigned long req_cap =
    + get_cpu_usage(env.src_cpu);
    +
    + req_cap = req_cap * capacity_margin
    + >> SCHED_CAPACITY_SHIFT;
    + cpufreq_sched_set_cap(env.src_cpu, req_cap);
    + }
    + }
    else
    schedstat_inc(sd, alb_failed);
    }
    @@ -8124,8 +8169,23 @@ static int active_load_balance_cpu_stop(void *data)
    busiest_rq->active_balance = 0;
    raw_spin_unlock(&busiest_rq->lock);

    - if (p)
    + if (p) {
    attach_one_task(target_rq, p);
    + /*
    + * We want to potentially update target_cpu's OPP.
    + *
    + * Add a margin (same ~20% used for the tipping point)
    + * to our request to provide some head room if p's utilization
    + * further increases.
    + */
    + if (sched_energy_freq()) {
    + unsigned long req_cap = get_cpu_usage(target_cpu);
    +
    + req_cap = req_cap * capacity_margin
    + >> SCHED_CAPACITY_SHIFT;
    + cpufreq_sched_set_cap(target_cpu, req_cap);
    + }
    + }

    local_irq_enable();

    --
    1.9.1


    \
     
     \ /
      Last update: 2015-07-07 21:01    [W:4.341 / U:0.044 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site