lkml.org 
[lkml]   [2018]   [Sep]   [7]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[RFC 14/60] sched: Refactor sync_throttle() to accept a CFS runqueue as argument
    Date
    Prepare for future changes and refactor sync_throttle() to work with
    a different set of arguments.

    Signed-off-by: Jan H. Schönherr <jschoenh@amazon.de>
    ---
    kernel/sched/fair.c | 13 ++++++-------
    1 file changed, 6 insertions(+), 7 deletions(-)

    diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
    index 5cad364e3a88..9f0ce4555c26 100644
    --- a/kernel/sched/fair.c
    +++ b/kernel/sched/fair.c
    @@ -4709,18 +4709,17 @@ static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
    throttle_cfs_rq(cfs_rq);
    }

    -static void sync_throttle(struct task_group *tg, int cpu)
    +static void sync_throttle(struct cfs_rq *cfs_rq)
    {
    - struct cfs_rq *pcfs_rq, *cfs_rq;
    + struct cfs_rq *pcfs_rq;

    if (!cfs_bandwidth_used())
    return;

    - cfs_rq = tg->cfs_rq[cpu];
    - pcfs_rq = tg->parent->cfs_rq[cpu];
    + pcfs_rq = cfs_rq->tg->parent->cfs_rq[cpu_of(rq_of(cfs_rq))];

    cfs_rq->throttle_count = pcfs_rq->throttle_count;
    - cfs_rq->throttled_clock_task = rq_clock_task(cpu_rq(cpu));
    + cfs_rq->throttled_clock_task = rq_clock_task(rq_of(cfs_rq));
    }

    /* conditionally throttle active cfs_rq's from put_prev_entity() */
    @@ -4887,7 +4886,7 @@ static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
    static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {}
    static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; }
    static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
    -static inline void sync_throttle(struct task_group *tg, int cpu) {}
    +static inline void sync_throttle(struct cfs_rq *cfs_rq) {}
    static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}

    static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
    @@ -9866,7 +9865,7 @@ void online_fair_sched_group(struct task_group *tg)
    raw_spin_lock_irq(&rq->lock);
    update_rq_clock(rq);
    attach_entity_cfs_rq(se);
    - sync_throttle(tg, i);
    + sync_throttle(tg->cfs_rq[i]);
    raw_spin_unlock_irq(&rq->lock);
    }
    }
    --
    2.9.3.1.gcba166c.dirty
    \
     
     \ /
      Last update: 2018-09-07 23:53    [W:4.037 / U:0.140 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site