lkml.org 
[lkml]   [2018]   [Sep]   [7]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[RFC 06/60] sched: Add a lock-free variant of resched_cpu()
    Date
    Add resched_cpu_locked(), which still works as expected, when it is called
    while we already hold a runqueue lock from a different CPU.

    There is some optimization potential by merging the logic of resched_curr()
    and resched_cpu_locked() to avoid IPIs when calls to both functions happen.

    Signed-off-by: Jan H. Schönherr <jschoenh@amazon.de>
    ---
    kernel/sched/core.c | 21 +++++++++++++++++++--
    kernel/sched/sched.h | 6 ++++++
    2 files changed, 25 insertions(+), 2 deletions(-)

    diff --git a/kernel/sched/core.c b/kernel/sched/core.c
    index fd1b0abd8474..c38a54f57e90 100644
    --- a/kernel/sched/core.c
    +++ b/kernel/sched/core.c
    @@ -486,6 +486,15 @@ void resched_cpu(int cpu)
    }

    #ifdef CONFIG_SMP
    +/* resched_cpu() when you're already holding a RQ lock of a different CPU */
    +void resched_cpu_locked(int cpu)
    +{
    + struct rq *rq = cpu_rq(cpu);
    +
    + if (!atomic_read(&rq->resched) && !atomic_xchg(&rq->resched, 1))
    + smp_send_reschedule(cpu);
    +}
    +
    #ifdef CONFIG_NO_HZ_COMMON
    /*
    * In the semi idle case, use the nearest busy CPU for migrating timers
    @@ -1744,6 +1753,14 @@ void sched_ttwu_pending(void)

    void scheduler_ipi(void)
    {
    + struct rq *rq = this_rq();
    +
    + /* Handle lock-free requests to reschedule the current task */
    + if (atomic_read(&rq->resched)) {
    + atomic_set(&rq->resched, 0);
    + set_thread_flag(TIF_NEED_RESCHED);
    + }
    +
    /*
    * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting
    * TIF_NEED_RESCHED remotely (for the first time) will also send
    @@ -1751,7 +1768,7 @@ void scheduler_ipi(void)
    */
    preempt_fold_need_resched();

    - if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick())
    + if (llist_empty(&rq->wake_list) && !got_nohz_idle_kick())
    return;

    /*
    @@ -1774,7 +1791,7 @@ void scheduler_ipi(void)
    * Check if someone kicked us for doing the nohz idle load balance.
    */
    if (unlikely(got_nohz_idle_kick())) {
    - this_rq()->idle_balance = 1;
    + rq->idle_balance = 1;
    raise_softirq_irqoff(SCHED_SOFTIRQ);
    }
    irq_exit();
    diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
    index f6da85447f3c..926a26d816a2 100644
    --- a/kernel/sched/sched.h
    +++ b/kernel/sched/sched.h
    @@ -850,6 +850,9 @@ struct rq {
    int cpu;
    int online;

    + /* Lock-free rescheduling request for this runqueue */
    + atomic_t resched;
    +
    struct list_head cfs_tasks;

    struct sched_avg avg_rt;
    @@ -1647,6 +1650,9 @@ extern void reweight_task(struct task_struct *p, int prio);

    extern void resched_curr(struct rq *rq);
    extern void resched_cpu(int cpu);
    +#ifdef CONFIG_SMP
    +void resched_cpu_locked(int cpu);
    +#endif

    extern struct rt_bandwidth def_rt_bandwidth;
    extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
    --
    2.9.3.1.gcba166c.dirty
    \
     
     \ /
      Last update: 2018-09-07 23:54    [W:4.132 / U:0.032 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site