lkml.org 
[lkml]   [2022]   [Jan]   [24]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 5.15 214/846] sched/rt: Try to restart rt period timer when rt runtime exceeded
    Date
    From: Li Hua <hucool.lihua@huawei.com>

    [ Upstream commit 9b58e976b3b391c0cf02e038d53dd0478ed3013c ]

    When rt_runtime is modified from -1 to a valid control value, it may
    cause the task to be throttled all the time. Operations like the following
    will trigger the bug. E.g:

    1. echo -1 > /proc/sys/kernel/sched_rt_runtime_us
    2. Run a FIFO task named A that executes while(1)
    3. echo 950000 > /proc/sys/kernel/sched_rt_runtime_us

    When rt_runtime is -1, The rt period timer will not be activated when task
    A enqueued. And then the task will be throttled after setting rt_runtime to
    950,000. The task will always be throttled because the rt period timer is
    not activated.

    Fixes: d0b27fa77854 ("sched: rt-group: synchonised bandwidth period")
    Reported-by: Hulk Robot <hulkci@huawei.com>
    Signed-off-by: Li Hua <hucool.lihua@huawei.com>
    Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
    Link: https://lkml.kernel.org/r/20211203033618.11895-1-hucool.lihua@huawei.com
    Signed-off-by: Sasha Levin <sashal@kernel.org>
    ---
    kernel/sched/rt.c | 23 ++++++++++++++++++-----
    1 file changed, 18 insertions(+), 5 deletions(-)

    diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
    index bfef3f39b5552..54f9bb3f15605 100644
    --- a/kernel/sched/rt.c
    +++ b/kernel/sched/rt.c
    @@ -52,11 +52,8 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
    rt_b->rt_period_timer.function = sched_rt_period_timer;
    }

    -static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
    +static inline void do_start_rt_bandwidth(struct rt_bandwidth *rt_b)
    {
    - if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
    - return;
    -
    raw_spin_lock(&rt_b->rt_runtime_lock);
    if (!rt_b->rt_period_active) {
    rt_b->rt_period_active = 1;
    @@ -75,6 +72,14 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
    raw_spin_unlock(&rt_b->rt_runtime_lock);
    }

    +static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
    +{
    + if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
    + return;
    +
    + do_start_rt_bandwidth(rt_b);
    +}
    +
    void init_rt_rq(struct rt_rq *rt_rq)
    {
    struct rt_prio_array *array;
    @@ -1029,13 +1034,17 @@ static void update_curr_rt(struct rq *rq)

    for_each_sched_rt_entity(rt_se) {
    struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
    + int exceeded;

    if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
    raw_spin_lock(&rt_rq->rt_runtime_lock);
    rt_rq->rt_time += delta_exec;
    - if (sched_rt_runtime_exceeded(rt_rq))
    + exceeded = sched_rt_runtime_exceeded(rt_rq);
    + if (exceeded)
    resched_curr(rq);
    raw_spin_unlock(&rt_rq->rt_runtime_lock);
    + if (exceeded)
    + do_start_rt_bandwidth(sched_rt_bandwidth(rt_rq));
    }
    }
    }
    @@ -2785,8 +2794,12 @@ static int sched_rt_global_validate(void)

    static void sched_rt_do_global(void)
    {
    + unsigned long flags;
    +
    + raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
    def_rt_bandwidth.rt_runtime = global_rt_runtime();
    def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period());
    + raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
    }

    int sched_rt_handler(struct ctl_table *table, int write, void *buffer,
    --
    2.34.1


    \
     
     \ /
      Last update: 2022-01-24 23:43    [W:4.251 / U:0.492 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site