lkml.org 
[lkml]   [2012]   [Oct]   [24]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[tip:sched/core] sched: Compute load contribution by a group entity
    Commit-ID:  8165e145ceb62fc338e099c9b12b3239c83d2f8e
    Gitweb: http://git.kernel.org/tip/8165e145ceb62fc338e099c9b12b3239c83d2f8e
    Author: Paul Turner <pjt@google.com>
    AuthorDate: Thu, 4 Oct 2012 13:18:31 +0200
    Committer: Ingo Molnar <mingo@kernel.org>
    CommitDate: Wed, 24 Oct 2012 10:27:25 +0200

    sched: Compute load contribution by a group entity

    Unlike task entities who have a fixed weight, group entities instead own a
    fraction of their parenting task_group's shares as their contributed weight.

    Compute this fraction so that we can correctly account hierarchies and shared
    entity nodes.

    Signed-off-by: Paul Turner <pjt@google.com>
    Reviewed-by: Ben Segall <bsegall@google.com>
    Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
    Link: http://lkml.kernel.org/r/20120823141506.855074415@google.com
    Signed-off-by: Ingo Molnar <mingo@kernel.org>
    ---
    kernel/sched/fair.c | 33 +++++++++++++++++++++++++++------
    1 files changed, 27 insertions(+), 6 deletions(-)

    diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
    index db78822..e20cb26 100644
    --- a/kernel/sched/fair.c
    +++ b/kernel/sched/fair.c
    @@ -1117,22 +1117,43 @@ static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
    cfs_rq->tg_load_contrib += tg_contrib;
    }
    }
    +
    +static inline void __update_group_entity_contrib(struct sched_entity *se)
    +{
    + struct cfs_rq *cfs_rq = group_cfs_rq(se);
    + struct task_group *tg = cfs_rq->tg;
    + u64 contrib;
    +
    + contrib = cfs_rq->tg_load_contrib * tg->shares;
    + se->avg.load_avg_contrib = div64_u64(contrib,
    + atomic64_read(&tg->load_avg) + 1);
    +}
    #else
    static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
    int force_update) {}
    +static inline void __update_group_entity_contrib(struct sched_entity *se) {}
    #endif

    +static inline void __update_task_entity_contrib(struct sched_entity *se)
    +{
    + u32 contrib;
    +
    + /* avoid overflowing a 32-bit type w/ SCHED_LOAD_SCALE */
    + contrib = se->avg.runnable_avg_sum * scale_load_down(se->load.weight);
    + contrib /= (se->avg.runnable_avg_period + 1);
    + se->avg.load_avg_contrib = scale_load(contrib);
    +}
    +
    /* Compute the current contribution to load_avg by se, return any delta */
    static long __update_entity_load_avg_contrib(struct sched_entity *se)
    {
    long old_contrib = se->avg.load_avg_contrib;

    - if (!entity_is_task(se))
    - return 0;
    -
    - se->avg.load_avg_contrib = div64_u64(se->avg.runnable_avg_sum *
    - se->load.weight,
    - se->avg.runnable_avg_period + 1);
    + if (entity_is_task(se)) {
    + __update_task_entity_contrib(se);
    + } else {
    + __update_group_entity_contrib(se);
    + }

    return se->avg.load_avg_contrib - old_contrib;
    }

    \
     
     \ /
      Last update: 2012-10-28 00:01    [W:4.697 / U:0.056 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site