lkml.org 
[lkml]   [2020]   [Apr]   [11]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    /
    From
    Date
    SubjectRe: [PATCH 2/2] sched: remove distribute_running from CFS bandwidth
    On Fri, Apr 10, 2020 at 3:52 PM Josh Don <joshdon@google.com> wrote:
    >
    > This is mostly a revert of baa9be4ff.
    >
    > The primary use of distribute_running was to determine whether to add
    > throttled entities to the head or the tail of the throttled list. Now
    > that we always add to the tail, we can remove this field.
    >
    > The other use of distribute_running is in the slack_timer, so that we
    > don't start a distribution while one is already running. However, even
    > in the event that this race occurs, it is fine to have two distributions
    > running (especially now that distribute grabs the cfs_b->lock to
    > determine remaining quota before assigning).
    >
    > Signed-off-by: Josh Don <joshdon@google.com>
    > ---
    > kernel/sched/fair.c | 13 +------------
    > kernel/sched/sched.h | 1 -
    > 2 files changed, 1 insertion(+), 13 deletions(-)
    >
    > diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
    > index 3fb986c52825..24b5e12efb40 100644
    > --- a/kernel/sched/fair.c
    > +++ b/kernel/sched/fair.c
    > @@ -4930,14 +4930,12 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun, u
    > /*
    > * This check is repeated as we release cfs_b->lock while we unthrottle.
    > */
    > - while (throttled && cfs_b->runtime > 0 && !cfs_b->distribute_running) {
    > - cfs_b->distribute_running = 1;
    > + while (throttled && cfs_b->runtime > 0) {
    > raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
    > /* we can't nest cfs_b->lock while distributing bandwidth */
    > distribute_cfs_runtime(cfs_b);
    > raw_spin_lock_irqsave(&cfs_b->lock, flags);
    >
    > - cfs_b->distribute_running = 0;
    > throttled = !list_empty(&cfs_b->throttled_cfs_rq);
    > }
    >
    > @@ -5051,10 +5049,6 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
    > /* confirm we're still not at a refresh boundary */
    > raw_spin_lock_irqsave(&cfs_b->lock, flags);
    > cfs_b->slack_started = false;
    > - if (cfs_b->distribute_running) {
    > - raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
    > - return;
    > - }
    >
    > if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) {
    > raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
    > @@ -5064,9 +5058,6 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
    > if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice)
    > runtime = cfs_b->runtime;
    >
    > - if (runtime)
    > - cfs_b->distribute_running = 1;
    > -
    > raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
    >
    > if (!runtime)
    > @@ -5075,7 +5066,6 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
    > distribute_cfs_runtime(cfs_b);
    >
    > raw_spin_lock_irqsave(&cfs_b->lock, flags);
    > - cfs_b->distribute_running = 0;
    > raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
    > }
    >
    > @@ -5217,7 +5207,6 @@ void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
    > cfs_b->period_timer.function = sched_cfs_period_timer;
    > hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
    > cfs_b->slack_timer.function = sched_cfs_slack_timer;
    > - cfs_b->distribute_running = 0;
    > cfs_b->slack_started = false;
    > }
    >
    > diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
    > index db3a57675ccf..7198683b2869 100644
    > --- a/kernel/sched/sched.h
    > +++ b/kernel/sched/sched.h
    > @@ -349,7 +349,6 @@ struct cfs_bandwidth {
    >
    > u8 idle;
    > u8 period_active;
    > - u8 distribute_running;
    > u8 slack_started;
    > struct hrtimer period_timer;
    > struct hrtimer slack_timer;
    > --
    > 2.26.0.110.g2183baf09c-goog
    >

    +pauld@redhat.com

    \
     
     \ /
      Last update: 2020-04-12 04:02    [W:3.123 / U:1.412 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site