lkml.org 
[lkml]   [2020]   [Nov]   [17]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH -tip 01/32] sched: Wrap rq::lock access
    Date
    From: Peter Zijlstra <peterz@infradead.org>

    In preparation of playing games with rq->lock, abstract the thing
    using an accessor.

    Tested-by: Julien Desfossez <jdesfossez@digitalocean.com>
    Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
    Signed-off-by: Vineeth Remanan Pillai <viremana@linux.microsoft.com>
    Signed-off-by: Julien Desfossez <jdesfossez@digitalocean.com>
    Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org>
    ---
    kernel/sched/core.c | 68 ++++++++++++-------------
    kernel/sched/cpuacct.c | 12 ++---
    kernel/sched/deadline.c | 22 ++++----
    kernel/sched/debug.c | 4 +-
    kernel/sched/fair.c | 38 +++++++-------
    kernel/sched/idle.c | 4 +-
    kernel/sched/pelt.h | 2 +-
    kernel/sched/rt.c | 16 +++---
    kernel/sched/sched.h | 108 +++++++++++++++++++++-------------------
    kernel/sched/topology.c | 4 +-
    10 files changed, 141 insertions(+), 137 deletions(-)

    diff --git a/kernel/sched/core.c b/kernel/sched/core.c
    index a6aaf9fb3400..db5cc05a68bc 100644
    --- a/kernel/sched/core.c
    +++ b/kernel/sched/core.c
    @@ -186,12 +186,12 @@ struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)

    for (;;) {
    rq = task_rq(p);
    - raw_spin_lock(&rq->lock);
    + raw_spin_lock(rq_lockp(rq));
    if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
    rq_pin_lock(rq, rf);
    return rq;
    }
    - raw_spin_unlock(&rq->lock);
    + raw_spin_unlock(rq_lockp(rq));

    while (unlikely(task_on_rq_migrating(p)))
    cpu_relax();
    @@ -210,7 +210,7 @@ struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
    for (;;) {
    raw_spin_lock_irqsave(&p->pi_lock, rf->flags);
    rq = task_rq(p);
    - raw_spin_lock(&rq->lock);
    + raw_spin_lock(rq_lockp(rq));
    /*
    * move_queued_task() task_rq_lock()
    *
    @@ -232,7 +232,7 @@ struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
    rq_pin_lock(rq, rf);
    return rq;
    }
    - raw_spin_unlock(&rq->lock);
    + raw_spin_unlock(rq_lockp(rq));
    raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);

    while (unlikely(task_on_rq_migrating(p)))
    @@ -302,7 +302,7 @@ void update_rq_clock(struct rq *rq)
    {
    s64 delta;

    - lockdep_assert_held(&rq->lock);
    + lockdep_assert_held(rq_lockp(rq));

    if (rq->clock_update_flags & RQCF_ACT_SKIP)
    return;
    @@ -611,7 +611,7 @@ void resched_curr(struct rq *rq)
    struct task_struct *curr = rq->curr;
    int cpu;

    - lockdep_assert_held(&rq->lock);
    + lockdep_assert_held(rq_lockp(rq));

    if (test_tsk_need_resched(curr))
    return;
    @@ -635,10 +635,10 @@ void resched_cpu(int cpu)
    struct rq *rq = cpu_rq(cpu);
    unsigned long flags;

    - raw_spin_lock_irqsave(&rq->lock, flags);
    + raw_spin_lock_irqsave(rq_lockp(rq), flags);
    if (cpu_online(cpu) || cpu == smp_processor_id())
    resched_curr(rq);
    - raw_spin_unlock_irqrestore(&rq->lock, flags);
    + raw_spin_unlock_irqrestore(rq_lockp(rq), flags);
    }

    #ifdef CONFIG_SMP
    @@ -1137,7 +1137,7 @@ static inline void uclamp_rq_inc_id(struct rq *rq, struct task_struct *p,
    struct uclamp_se *uc_se = &p->uclamp[clamp_id];
    struct uclamp_bucket *bucket;

    - lockdep_assert_held(&rq->lock);
    + lockdep_assert_held(rq_lockp(rq));

    /* Update task effective clamp */
    p->uclamp[clamp_id] = uclamp_eff_get(p, clamp_id);
    @@ -1177,7 +1177,7 @@ static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p,
    unsigned int bkt_clamp;
    unsigned int rq_clamp;

    - lockdep_assert_held(&rq->lock);
    + lockdep_assert_held(rq_lockp(rq));

    /*
    * If sched_uclamp_used was enabled after task @p was enqueued,
    @@ -1807,7 +1807,7 @@ static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
    static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf,
    struct task_struct *p, int new_cpu)
    {
    - lockdep_assert_held(&rq->lock);
    + lockdep_assert_held(rq_lockp(rq));

    deactivate_task(rq, p, DEQUEUE_NOCLOCK);
    set_task_cpu(p, new_cpu);
    @@ -1973,7 +1973,7 @@ int push_cpu_stop(void *arg)
    struct task_struct *p = arg;

    raw_spin_lock_irq(&p->pi_lock);
    - raw_spin_lock(&rq->lock);
    + raw_spin_lock(rq_lockp(rq));

    if (task_rq(p) != rq)
    goto out_unlock;
    @@ -2003,7 +2003,7 @@ int push_cpu_stop(void *arg)

    out_unlock:
    rq->push_busy = false;
    - raw_spin_unlock(&rq->lock);
    + raw_spin_unlock(rq_lockp(rq));
    raw_spin_unlock_irq(&p->pi_lock);

    put_task_struct(p);
    @@ -2056,7 +2056,7 @@ __do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask, u32
    * Because __kthread_bind() calls this on blocked tasks without
    * holding rq->lock.
    */
    - lockdep_assert_held(&rq->lock);
    + lockdep_assert_held(rq_lockp(rq));
    dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK);
    }
    if (running)
    @@ -2395,7 +2395,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
    * task_rq_lock().
    */
    WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
    - lockdep_is_held(&task_rq(p)->lock)));
    + lockdep_is_held(rq_lockp(task_rq(p)))));
    #endif
    /*
    * Clearly, migrating tasks to offline CPUs is a fairly daft thing.
    @@ -2941,7 +2941,7 @@ ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
    {
    int en_flags = ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK;

    - lockdep_assert_held(&rq->lock);
    + lockdep_assert_held(rq_lockp(rq));

    if (p->sched_contributes_to_load)
    rq->nr_uninterruptible--;
    @@ -3941,7 +3941,7 @@ static void do_balance_callbacks(struct rq *rq, struct callback_head *head)
    void (*func)(struct rq *rq);
    struct callback_head *next;

    - lockdep_assert_held(&rq->lock);
    + lockdep_assert_held(rq_lockp(rq));

    while (head) {
    func = (void (*)(struct rq *))head->func;
    @@ -3957,7 +3957,7 @@ static inline struct callback_head *splice_balance_callbacks(struct rq *rq)
    {
    struct callback_head *head = rq->balance_callback;

    - lockdep_assert_held(&rq->lock);
    + lockdep_assert_held(rq_lockp(rq));
    if (head) {
    rq->balance_callback = NULL;
    rq->balance_flags &= ~BALANCE_WORK;
    @@ -3976,9 +3976,9 @@ static inline void balance_callbacks(struct rq *rq, struct callback_head *head)
    unsigned long flags;

    if (unlikely(head)) {
    - raw_spin_lock_irqsave(&rq->lock, flags);
    + raw_spin_lock_irqsave(rq_lockp(rq), flags);
    do_balance_callbacks(rq, head);
    - raw_spin_unlock_irqrestore(&rq->lock, flags);
    + raw_spin_unlock_irqrestore(rq_lockp(rq), flags);
    }
    }

    @@ -4028,10 +4028,10 @@ prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf
    * do an early lockdep release here:
    */
    rq_unpin_lock(rq, rf);
    - spin_release(&rq->lock.dep_map, _THIS_IP_);
    + spin_release(&rq_lockp(rq)->dep_map, _THIS_IP_);
    #ifdef CONFIG_DEBUG_SPINLOCK
    /* this is a valid case when another task releases the spinlock */
    - rq->lock.owner = next;
    + rq_lockp(rq)->owner = next;
    #endif
    }

    @@ -4042,9 +4042,9 @@ static inline void finish_lock_switch(struct rq *rq)
    * fix up the runqueue lock - which gets 'carried over' from
    * prev into current:
    */
    - spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
    + spin_acquire(&rq_lockp(rq)->dep_map, 0, 0, _THIS_IP_);
    balance_switch(rq);
    - raw_spin_unlock_irq(&rq->lock);
    + raw_spin_unlock_irq(rq_lockp(rq));
    }

    /*
    @@ -5023,7 +5023,7 @@ static void __sched notrace __schedule(bool preempt)

    rq_unpin_lock(rq, &rf);
    __balance_callbacks(rq);
    - raw_spin_unlock_irq(&rq->lock);
    + raw_spin_unlock_irq(rq_lockp(rq));
    }
    }

    @@ -5438,7 +5438,7 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)

    rq_unpin_lock(rq, &rf);
    __balance_callbacks(rq);
    - raw_spin_unlock(&rq->lock);
    + raw_spin_unlock(rq_lockp(rq));

    preempt_enable();
    }
    @@ -7020,7 +7020,7 @@ void init_idle(struct task_struct *idle, int cpu)
    __sched_fork(0, idle);

    raw_spin_lock_irqsave(&idle->pi_lock, flags);
    - raw_spin_lock(&rq->lock);
    + raw_spin_lock(rq_lockp(rq));

    idle->state = TASK_RUNNING;
    idle->se.exec_start = sched_clock();
    @@ -7058,7 +7058,7 @@ void init_idle(struct task_struct *idle, int cpu)
    #ifdef CONFIG_SMP
    idle->on_cpu = 1;
    #endif
    - raw_spin_unlock(&rq->lock);
    + raw_spin_unlock(rq_lockp(rq));
    raw_spin_unlock_irqrestore(&idle->pi_lock, flags);

    /* Set the preempt count _outside_ the spinlocks! */
    @@ -7221,7 +7221,7 @@ static void balance_push(struct rq *rq)
    {
    struct task_struct *push_task = rq->curr;

    - lockdep_assert_held(&rq->lock);
    + lockdep_assert_held(rq_lockp(rq));
    SCHED_WARN_ON(rq->cpu != smp_processor_id());

    /*
    @@ -7242,9 +7242,9 @@ static void balance_push(struct rq *rq)
    */
    if (!rq->nr_running && !rq_has_pinned_tasks(rq) &&
    rcuwait_active(&rq->hotplug_wait)) {
    - raw_spin_unlock(&rq->lock);
    + raw_spin_unlock(rq_lockp(rq));
    rcuwait_wake_up(&rq->hotplug_wait);
    - raw_spin_lock(&rq->lock);
    + raw_spin_lock(rq_lockp(rq));
    }
    return;
    }
    @@ -7254,7 +7254,7 @@ static void balance_push(struct rq *rq)
    * Temporarily drop rq->lock such that we can wake-up the stop task.
    * Both preemption and IRQs are still disabled.
    */
    - raw_spin_unlock(&rq->lock);
    + raw_spin_unlock(rq_lockp(rq));
    stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task,
    this_cpu_ptr(&push_work));
    /*
    @@ -7262,7 +7262,7 @@ static void balance_push(struct rq *rq)
    * schedule(). The next pick is obviously going to be the stop task
    * which is_per_cpu_kthread() and will push this task away.
    */
    - raw_spin_lock(&rq->lock);
    + raw_spin_lock(rq_lockp(rq));
    }

    static void balance_push_set(int cpu, bool on)
    @@ -7682,7 +7682,7 @@ void __init sched_init(void)
    struct rq *rq;

    rq = cpu_rq(i);
    - raw_spin_lock_init(&rq->lock);
    + raw_spin_lock_init(&rq->__lock);
    rq->nr_running = 0;
    rq->calc_load_active = 0;
    rq->calc_load_update = jiffies + LOAD_FREQ;
    diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c
    index 941c28cf9738..38c1a68e91f0 100644
    --- a/kernel/sched/cpuacct.c
    +++ b/kernel/sched/cpuacct.c
    @@ -112,7 +112,7 @@ static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu,
    /*
    * Take rq->lock to make 64-bit read safe on 32-bit platforms.
    */
    - raw_spin_lock_irq(&cpu_rq(cpu)->lock);
    + raw_spin_lock_irq(rq_lockp(cpu_rq(cpu)));
    #endif

    if (index == CPUACCT_STAT_NSTATS) {
    @@ -126,7 +126,7 @@ static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu,
    }

    #ifndef CONFIG_64BIT
    - raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
    + raw_spin_unlock_irq(rq_lockp(cpu_rq(cpu)));
    #endif

    return data;
    @@ -141,14 +141,14 @@ static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
    /*
    * Take rq->lock to make 64-bit write safe on 32-bit platforms.
    */
    - raw_spin_lock_irq(&cpu_rq(cpu)->lock);
    + raw_spin_lock_irq(rq_lockp(cpu_rq(cpu)));
    #endif

    for (i = 0; i < CPUACCT_STAT_NSTATS; i++)
    cpuusage->usages[i] = val;

    #ifndef CONFIG_64BIT
    - raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
    + raw_spin_unlock_irq(rq_lockp(cpu_rq(cpu)));
    #endif
    }

    @@ -253,13 +253,13 @@ static int cpuacct_all_seq_show(struct seq_file *m, void *V)
    * Take rq->lock to make 64-bit read safe on 32-bit
    * platforms.
    */
    - raw_spin_lock_irq(&cpu_rq(cpu)->lock);
    + raw_spin_lock_irq(rq_lockp(cpu_rq(cpu)));
    #endif

    seq_printf(m, " %llu", cpuusage->usages[index]);

    #ifndef CONFIG_64BIT
    - raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
    + raw_spin_unlock_irq(rq_lockp(cpu_rq(cpu)));
    #endif
    }
    seq_puts(m, "\n");
    diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
    index 2a5836f440e0..0f2ea0a3664c 100644
    --- a/kernel/sched/deadline.c
    +++ b/kernel/sched/deadline.c
    @@ -135,7 +135,7 @@ void __add_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
    {
    u64 old = dl_rq->running_bw;

    - lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
    + lockdep_assert_held(rq_lockp(rq_of_dl_rq(dl_rq)));
    dl_rq->running_bw += dl_bw;
    SCHED_WARN_ON(dl_rq->running_bw < old); /* overflow */
    SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
    @@ -148,7 +148,7 @@ void __sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
    {
    u64 old = dl_rq->running_bw;

    - lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
    + lockdep_assert_held(rq_lockp(rq_of_dl_rq(dl_rq)));
    dl_rq->running_bw -= dl_bw;
    SCHED_WARN_ON(dl_rq->running_bw > old); /* underflow */
    if (dl_rq->running_bw > old)
    @@ -162,7 +162,7 @@ void __add_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
    {
    u64 old = dl_rq->this_bw;

    - lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
    + lockdep_assert_held(rq_lockp(rq_of_dl_rq(dl_rq)));
    dl_rq->this_bw += dl_bw;
    SCHED_WARN_ON(dl_rq->this_bw < old); /* overflow */
    }
    @@ -172,7 +172,7 @@ void __sub_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
    {
    u64 old = dl_rq->this_bw;

    - lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
    + lockdep_assert_held(rq_lockp(rq_of_dl_rq(dl_rq)));
    dl_rq->this_bw -= dl_bw;
    SCHED_WARN_ON(dl_rq->this_bw > old); /* underflow */
    if (dl_rq->this_bw > old)
    @@ -982,7 +982,7 @@ static int start_dl_timer(struct task_struct *p)
    ktime_t now, act;
    s64 delta;

    - lockdep_assert_held(&rq->lock);
    + lockdep_assert_held(rq_lockp(rq));

    /*
    * We want the timer to fire at the deadline, but considering
    @@ -1092,9 +1092,9 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
    * If the runqueue is no longer available, migrate the
    * task elsewhere. This necessarily changes rq.
    */
    - lockdep_unpin_lock(&rq->lock, rf.cookie);
    + lockdep_unpin_lock(rq_lockp(rq), rf.cookie);
    rq = dl_task_offline_migration(rq, p);
    - rf.cookie = lockdep_pin_lock(&rq->lock);
    + rf.cookie = lockdep_pin_lock(rq_lockp(rq));
    update_rq_clock(rq);

    /*
    @@ -1746,7 +1746,7 @@ static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused
    * from try_to_wake_up(). Hence, p->pi_lock is locked, but
    * rq->lock is not... So, lock it
    */
    - raw_spin_lock(&rq->lock);
    + raw_spin_lock(rq_lockp(rq));
    if (p->dl.dl_non_contending) {
    sub_running_bw(&p->dl, &rq->dl);
    p->dl.dl_non_contending = 0;
    @@ -1761,7 +1761,7 @@ static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused
    put_task_struct(p);
    }
    sub_rq_bw(&p->dl, &rq->dl);
    - raw_spin_unlock(&rq->lock);
    + raw_spin_unlock(rq_lockp(rq));
    }

    static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
    @@ -2306,10 +2306,10 @@ static void pull_dl_task(struct rq *this_rq)
    double_unlock_balance(this_rq, src_rq);

    if (push_task) {
    - raw_spin_unlock(&this_rq->lock);
    + raw_spin_unlock(rq_lockp(this_rq));
    stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
    push_task, &src_rq->push_work);
    - raw_spin_lock(&this_rq->lock);
    + raw_spin_lock(rq_lockp(this_rq));
    }
    }

    diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
    index 2357921580f9..60a922d3f46f 100644
    --- a/kernel/sched/debug.c
    +++ b/kernel/sched/debug.c
    @@ -551,7 +551,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
    SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock",
    SPLIT_NS(cfs_rq->exec_clock));

    - raw_spin_lock_irqsave(&rq->lock, flags);
    + raw_spin_lock_irqsave(rq_lockp(rq), flags);
    if (rb_first_cached(&cfs_rq->tasks_timeline))
    MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
    last = __pick_last_entity(cfs_rq);
    @@ -559,7 +559,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
    max_vruntime = last->vruntime;
    min_vruntime = cfs_rq->min_vruntime;
    rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
    - raw_spin_unlock_irqrestore(&rq->lock, flags);
    + raw_spin_unlock_irqrestore(rq_lockp(rq), flags);
    SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime",
    SPLIT_NS(MIN_vruntime));
    SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime",
    diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
    index 27536f37ba1a..52ddfec7cea6 100644
    --- a/kernel/sched/fair.c
    +++ b/kernel/sched/fair.c
    @@ -1110,7 +1110,7 @@ struct numa_group {
    static struct numa_group *deref_task_numa_group(struct task_struct *p)
    {
    return rcu_dereference_check(p->numa_group, p == current ||
    - (lockdep_is_held(&task_rq(p)->lock) && !READ_ONCE(p->on_cpu)));
    + (lockdep_is_held(rq_lockp(task_rq(p))) && !READ_ONCE(p->on_cpu)));
    }

    static struct numa_group *deref_curr_numa_group(struct task_struct *p)
    @@ -5309,7 +5309,7 @@ static void __maybe_unused update_runtime_enabled(struct rq *rq)
    {
    struct task_group *tg;

    - lockdep_assert_held(&rq->lock);
    + lockdep_assert_held(rq_lockp(rq));

    rcu_read_lock();
    list_for_each_entry_rcu(tg, &task_groups, list) {
    @@ -5328,7 +5328,7 @@ static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
    {
    struct task_group *tg;

    - lockdep_assert_held(&rq->lock);
    + lockdep_assert_held(rq_lockp(rq));

    rcu_read_lock();
    list_for_each_entry_rcu(tg, &task_groups, list) {
    @@ -6813,7 +6813,7 @@ static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
    * In case of TASK_ON_RQ_MIGRATING we in fact hold the 'old'
    * rq->lock and can modify state directly.
    */
    - lockdep_assert_held(&task_rq(p)->lock);
    + lockdep_assert_held(rq_lockp(task_rq(p)));
    detach_entity_cfs_rq(&p->se);

    } else {
    @@ -7441,7 +7441,7 @@ static int task_hot(struct task_struct *p, struct lb_env *env)
    {
    s64 delta;

    - lockdep_assert_held(&env->src_rq->lock);
    + lockdep_assert_held(rq_lockp(env->src_rq));

    if (p->sched_class != &fair_sched_class)
    return 0;
    @@ -7539,7 +7539,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
    {
    int tsk_cache_hot;

    - lockdep_assert_held(&env->src_rq->lock);
    + lockdep_assert_held(rq_lockp(env->src_rq));

    /*
    * We do not migrate tasks that are:
    @@ -7617,7 +7617,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
    */
    static void detach_task(struct task_struct *p, struct lb_env *env)
    {
    - lockdep_assert_held(&env->src_rq->lock);
    + lockdep_assert_held(rq_lockp(env->src_rq));

    deactivate_task(env->src_rq, p, DEQUEUE_NOCLOCK);
    set_task_cpu(p, env->dst_cpu);
    @@ -7633,7 +7633,7 @@ static struct task_struct *detach_one_task(struct lb_env *env)
    {
    struct task_struct *p;

    - lockdep_assert_held(&env->src_rq->lock);
    + lockdep_assert_held(rq_lockp(env->src_rq));

    list_for_each_entry_reverse(p,
    &env->src_rq->cfs_tasks, se.group_node) {
    @@ -7669,7 +7669,7 @@ static int detach_tasks(struct lb_env *env)
    struct task_struct *p;
    int detached = 0;

    - lockdep_assert_held(&env->src_rq->lock);
    + lockdep_assert_held(rq_lockp(env->src_rq));

    if (env->imbalance <= 0)
    return 0;
    @@ -7791,7 +7791,7 @@ static int detach_tasks(struct lb_env *env)
    */
    static void attach_task(struct rq *rq, struct task_struct *p)
    {
    - lockdep_assert_held(&rq->lock);
    + lockdep_assert_held(rq_lockp(rq));

    BUG_ON(task_rq(p) != rq);
    activate_task(rq, p, ENQUEUE_NOCLOCK);
    @@ -9726,7 +9726,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
    if (need_active_balance(&env)) {
    unsigned long flags;

    - raw_spin_lock_irqsave(&busiest->lock, flags);
    + raw_spin_lock_irqsave(rq_lockp(busiest), flags);

    /*
    * Don't kick the active_load_balance_cpu_stop,
    @@ -9734,7 +9734,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
    * moved to this_cpu:
    */
    if (!cpumask_test_cpu(this_cpu, busiest->curr->cpus_ptr)) {
    - raw_spin_unlock_irqrestore(&busiest->lock,
    + raw_spin_unlock_irqrestore(rq_lockp(busiest),
    flags);
    env.flags |= LBF_ALL_PINNED;
    goto out_one_pinned;
    @@ -9750,7 +9750,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
    busiest->push_cpu = this_cpu;
    active_balance = 1;
    }
    - raw_spin_unlock_irqrestore(&busiest->lock, flags);
    + raw_spin_unlock_irqrestore(rq_lockp(busiest), flags);

    if (active_balance) {
    stop_one_cpu_nowait(cpu_of(busiest),
    @@ -10506,7 +10506,7 @@ static void nohz_newidle_balance(struct rq *this_rq)
    time_before(jiffies, READ_ONCE(nohz.next_blocked)))
    return;

    - raw_spin_unlock(&this_rq->lock);
    + raw_spin_unlock(rq_lockp(this_rq));
    /*
    * This CPU is going to be idle and blocked load of idle CPUs
    * need to be updated. Run the ilb locally as it is a good
    @@ -10515,7 +10515,7 @@ static void nohz_newidle_balance(struct rq *this_rq)
    */
    if (!_nohz_idle_balance(this_rq, NOHZ_STATS_KICK, CPU_NEWLY_IDLE))
    kick_ilb(NOHZ_STATS_KICK);
    - raw_spin_lock(&this_rq->lock);
    + raw_spin_lock(rq_lockp(this_rq));
    }

    #else /* !CONFIG_NO_HZ_COMMON */
    @@ -10581,7 +10581,7 @@ static int newidle_balance(struct rq *this_rq, struct rq_flags *rf)
    goto out;
    }

    - raw_spin_unlock(&this_rq->lock);
    + raw_spin_unlock(rq_lockp(this_rq));

    update_blocked_averages(this_cpu);
    rcu_read_lock();
    @@ -10619,7 +10619,7 @@ static int newidle_balance(struct rq *this_rq, struct rq_flags *rf)
    }
    rcu_read_unlock();

    - raw_spin_lock(&this_rq->lock);
    + raw_spin_lock(rq_lockp(this_rq));

    if (curr_cost > this_rq->max_idle_balance_cost)
    this_rq->max_idle_balance_cost = curr_cost;
    @@ -11095,9 +11095,9 @@ void unregister_fair_sched_group(struct task_group *tg)

    rq = cpu_rq(cpu);

    - raw_spin_lock_irqsave(&rq->lock, flags);
    + raw_spin_lock_irqsave(rq_lockp(rq), flags);
    list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
    - raw_spin_unlock_irqrestore(&rq->lock, flags);
    + raw_spin_unlock_irqrestore(rq_lockp(rq), flags);
    }
    }

    diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
    index df91b198a74c..50e128b899c4 100644
    --- a/kernel/sched/idle.c
    +++ b/kernel/sched/idle.c
    @@ -422,10 +422,10 @@ struct task_struct *pick_next_task_idle(struct rq *rq)
    static void
    dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags)
    {
    - raw_spin_unlock_irq(&rq->lock);
    + raw_spin_unlock_irq(rq_lockp(rq));
    printk(KERN_ERR "bad: scheduling from the idle thread!\n");
    dump_stack();
    - raw_spin_lock_irq(&rq->lock);
    + raw_spin_lock_irq(rq_lockp(rq));
    }

    /*
    diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h
    index 795e43e02afc..e850bd71a8ce 100644
    --- a/kernel/sched/pelt.h
    +++ b/kernel/sched/pelt.h
    @@ -141,7 +141,7 @@ static inline void update_idle_rq_clock_pelt(struct rq *rq)

    static inline u64 rq_clock_pelt(struct rq *rq)
    {
    - lockdep_assert_held(&rq->lock);
    + lockdep_assert_held(rq_lockp(rq));
    assert_clock_updated(rq);

    return rq->clock_pelt - rq->lost_idle_time;
    diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
    index dbe4629cf7ba..a6f9d132c24f 100644
    --- a/kernel/sched/rt.c
    +++ b/kernel/sched/rt.c
    @@ -888,7 +888,7 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
    if (skip)
    continue;

    - raw_spin_lock(&rq->lock);
    + raw_spin_lock(rq_lockp(rq));
    update_rq_clock(rq);

    if (rt_rq->rt_time) {
    @@ -926,7 +926,7 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)

    if (enqueue)
    sched_rt_rq_enqueue(rt_rq);
    - raw_spin_unlock(&rq->lock);
    + raw_spin_unlock(rq_lockp(rq));
    }

    if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
    @@ -1894,10 +1894,10 @@ static int push_rt_task(struct rq *rq, bool pull)
    */
    push_task = get_push_task(rq);
    if (push_task) {
    - raw_spin_unlock(&rq->lock);
    + raw_spin_unlock(rq_lockp(rq));
    stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
    push_task, &rq->push_work);
    - raw_spin_lock(&rq->lock);
    + raw_spin_lock(rq_lockp(rq));
    }

    return 0;
    @@ -2122,10 +2122,10 @@ void rto_push_irq_work_func(struct irq_work *work)
    * When it gets updated, a check is made if a push is possible.
    */
    if (has_pushable_tasks(rq)) {
    - raw_spin_lock(&rq->lock);
    + raw_spin_lock(rq_lockp(rq));
    while (push_rt_task(rq, true))
    ;
    - raw_spin_unlock(&rq->lock);
    + raw_spin_unlock(rq_lockp(rq));
    }

    raw_spin_lock(&rd->rto_lock);
    @@ -2243,10 +2243,10 @@ static void pull_rt_task(struct rq *this_rq)
    double_unlock_balance(this_rq, src_rq);

    if (push_task) {
    - raw_spin_unlock(&this_rq->lock);
    + raw_spin_unlock(rq_lockp(this_rq));
    stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
    push_task, &src_rq->push_work);
    - raw_spin_lock(&this_rq->lock);
    + raw_spin_lock(rq_lockp(this_rq));
    }
    }

    diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
    index 590e6f27068c..f794c9337047 100644
    --- a/kernel/sched/sched.h
    +++ b/kernel/sched/sched.h
    @@ -896,7 +896,7 @@ DECLARE_STATIC_KEY_FALSE(sched_uclamp_used);
    */
    struct rq {
    /* runqueue lock: */
    - raw_spinlock_t lock;
    + raw_spinlock_t __lock;

    /*
    * nr_running and cpu_load should be in the same cacheline because
    @@ -1099,6 +1099,11 @@ static inline bool is_migration_disabled(struct task_struct *p)
    #endif
    }

    +static inline raw_spinlock_t *rq_lockp(struct rq *rq)
    +{
    + return &rq->__lock;
    +}
    +
    #ifdef CONFIG_SCHED_SMT
    extern void __update_idle_core(struct rq *rq);

    @@ -1165,7 +1170,7 @@ static inline void assert_clock_updated(struct rq *rq)

    static inline u64 rq_clock(struct rq *rq)
    {
    - lockdep_assert_held(&rq->lock);
    + lockdep_assert_held(rq_lockp(rq));
    assert_clock_updated(rq);

    return rq->clock;
    @@ -1173,7 +1178,7 @@ static inline u64 rq_clock(struct rq *rq)

    static inline u64 rq_clock_task(struct rq *rq)
    {
    - lockdep_assert_held(&rq->lock);
    + lockdep_assert_held(rq_lockp(rq));
    assert_clock_updated(rq);

    return rq->clock_task;
    @@ -1199,7 +1204,7 @@ static inline u64 rq_clock_thermal(struct rq *rq)

    static inline void rq_clock_skip_update(struct rq *rq)
    {
    - lockdep_assert_held(&rq->lock);
    + lockdep_assert_held(rq_lockp(rq));
    rq->clock_update_flags |= RQCF_REQ_SKIP;
    }

    @@ -1209,7 +1214,7 @@ static inline void rq_clock_skip_update(struct rq *rq)
    */
    static inline void rq_clock_cancel_skipupdate(struct rq *rq)
    {
    - lockdep_assert_held(&rq->lock);
    + lockdep_assert_held(rq_lockp(rq));
    rq->clock_update_flags &= ~RQCF_REQ_SKIP;
    }

    @@ -1238,7 +1243,7 @@ struct rq_flags {
    */
    static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf)
    {
    - rf->cookie = lockdep_pin_lock(&rq->lock);
    + rf->cookie = lockdep_pin_lock(rq_lockp(rq));

    #ifdef CONFIG_SCHED_DEBUG
    rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP);
    @@ -1256,12 +1261,12 @@ static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf)
    rf->clock_update_flags = RQCF_UPDATED;
    #endif

    - lockdep_unpin_lock(&rq->lock, rf->cookie);
    + lockdep_unpin_lock(rq_lockp(rq), rf->cookie);
    }

    static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf)
    {
    - lockdep_repin_lock(&rq->lock, rf->cookie);
    + lockdep_repin_lock(rq_lockp(rq), rf->cookie);

    #ifdef CONFIG_SCHED_DEBUG
    /*
    @@ -1282,7 +1287,7 @@ static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf)
    __releases(rq->lock)
    {
    rq_unpin_lock(rq, rf);
    - raw_spin_unlock(&rq->lock);
    + raw_spin_unlock(rq_lockp(rq));
    }

    static inline void
    @@ -1291,7 +1296,7 @@ task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
    __releases(p->pi_lock)
    {
    rq_unpin_lock(rq, rf);
    - raw_spin_unlock(&rq->lock);
    + raw_spin_unlock(rq_lockp(rq));
    raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
    }

    @@ -1299,7 +1304,7 @@ static inline void
    rq_lock_irqsave(struct rq *rq, struct rq_flags *rf)
    __acquires(rq->lock)
    {
    - raw_spin_lock_irqsave(&rq->lock, rf->flags);
    + raw_spin_lock_irqsave(rq_lockp(rq), rf->flags);
    rq_pin_lock(rq, rf);
    }

    @@ -1307,7 +1312,7 @@ static inline void
    rq_lock_irq(struct rq *rq, struct rq_flags *rf)
    __acquires(rq->lock)
    {
    - raw_spin_lock_irq(&rq->lock);
    + raw_spin_lock_irq(rq_lockp(rq));
    rq_pin_lock(rq, rf);
    }

    @@ -1315,7 +1320,7 @@ static inline void
    rq_lock(struct rq *rq, struct rq_flags *rf)
    __acquires(rq->lock)
    {
    - raw_spin_lock(&rq->lock);
    + raw_spin_lock(rq_lockp(rq));
    rq_pin_lock(rq, rf);
    }

    @@ -1323,7 +1328,7 @@ static inline void
    rq_relock(struct rq *rq, struct rq_flags *rf)
    __acquires(rq->lock)
    {
    - raw_spin_lock(&rq->lock);
    + raw_spin_lock(rq_lockp(rq));
    rq_repin_lock(rq, rf);
    }

    @@ -1332,7 +1337,7 @@ rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf)
    __releases(rq->lock)
    {
    rq_unpin_lock(rq, rf);
    - raw_spin_unlock_irqrestore(&rq->lock, rf->flags);
    + raw_spin_unlock_irqrestore(rq_lockp(rq), rf->flags);
    }

    static inline void
    @@ -1340,7 +1345,7 @@ rq_unlock_irq(struct rq *rq, struct rq_flags *rf)
    __releases(rq->lock)
    {
    rq_unpin_lock(rq, rf);
    - raw_spin_unlock_irq(&rq->lock);
    + raw_spin_unlock_irq(rq_lockp(rq));
    }

    static inline void
    @@ -1348,7 +1353,7 @@ rq_unlock(struct rq *rq, struct rq_flags *rf)
    __releases(rq->lock)
    {
    rq_unpin_lock(rq, rf);
    - raw_spin_unlock(&rq->lock);
    + raw_spin_unlock(rq_lockp(rq));
    }

    static inline struct rq *
    @@ -1416,7 +1421,7 @@ queue_balance_callback(struct rq *rq,
    struct callback_head *head,
    void (*func)(struct rq *rq))
    {
    - lockdep_assert_held(&rq->lock);
    + lockdep_assert_held(rq_lockp(rq));

    if (unlikely(head->next || (rq->balance_flags & BALANCE_PUSH)))
    return;
    @@ -1959,7 +1964,7 @@ static inline struct task_struct *get_push_task(struct rq *rq)
    {
    struct task_struct *p = rq->curr;

    - lockdep_assert_held(&rq->lock);
    + lockdep_assert_held(rq_lockp(rq));

    if (rq->push_busy)
    return NULL;
    @@ -2167,7 +2172,7 @@ static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
    __acquires(busiest->lock)
    __acquires(this_rq->lock)
    {
    - raw_spin_unlock(&this_rq->lock);
    + raw_spin_unlock(rq_lockp(this_rq));
    double_rq_lock(this_rq, busiest);

    return 1;
    @@ -2186,20 +2191,22 @@ static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
    __acquires(busiest->lock)
    __acquires(this_rq->lock)
    {
    - int ret = 0;
    -
    - if (unlikely(!raw_spin_trylock(&busiest->lock))) {
    - if (busiest < this_rq) {
    - raw_spin_unlock(&this_rq->lock);
    - raw_spin_lock(&busiest->lock);
    - raw_spin_lock_nested(&this_rq->lock,
    - SINGLE_DEPTH_NESTING);
    - ret = 1;
    - } else
    - raw_spin_lock_nested(&busiest->lock,
    - SINGLE_DEPTH_NESTING);
    + if (rq_lockp(this_rq) == rq_lockp(busiest))
    + return 0;
    +
    + if (likely(raw_spin_trylock(rq_lockp(busiest))))
    + return 0;
    +
    + if (rq_lockp(busiest) >= rq_lockp(this_rq)) {
    + raw_spin_lock_nested(rq_lockp(busiest), SINGLE_DEPTH_NESTING);
    + return 0;
    }
    - return ret;
    +
    + raw_spin_unlock(rq_lockp(this_rq));
    + raw_spin_lock(rq_lockp(busiest));
    + raw_spin_lock_nested(rq_lockp(this_rq), SINGLE_DEPTH_NESTING);
    +
    + return 1;
    }

    #endif /* CONFIG_PREEMPTION */
    @@ -2209,11 +2216,7 @@ static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
    */
    static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest)
    {
    - if (unlikely(!irqs_disabled())) {
    - /* printk() doesn't work well under rq->lock */
    - raw_spin_unlock(&this_rq->lock);
    - BUG_ON(1);
    - }
    + lockdep_assert_irqs_disabled();

    return _double_lock_balance(this_rq, busiest);
    }
    @@ -2221,8 +2224,9 @@ static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest)
    static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
    __releases(busiest->lock)
    {
    - raw_spin_unlock(&busiest->lock);
    - lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
    + if (rq_lockp(this_rq) != rq_lockp(busiest))
    + raw_spin_unlock(rq_lockp(busiest));
    + lock_set_subclass(&rq_lockp(this_rq)->dep_map, 0, _RET_IP_);
    }

    static inline void double_lock(spinlock_t *l1, spinlock_t *l2)
    @@ -2263,16 +2267,16 @@ static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
    __acquires(rq2->lock)
    {
    BUG_ON(!irqs_disabled());
    - if (rq1 == rq2) {
    - raw_spin_lock(&rq1->lock);
    + if (rq_lockp(rq1) == rq_lockp(rq2)) {
    + raw_spin_lock(rq_lockp(rq1));
    __acquire(rq2->lock); /* Fake it out ;) */
    } else {
    - if (rq1 < rq2) {
    - raw_spin_lock(&rq1->lock);
    - raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
    + if (rq_lockp(rq1) < rq_lockp(rq2)) {
    + raw_spin_lock(rq_lockp(rq1));
    + raw_spin_lock_nested(rq_lockp(rq2), SINGLE_DEPTH_NESTING);
    } else {
    - raw_spin_lock(&rq2->lock);
    - raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
    + raw_spin_lock(rq_lockp(rq2));
    + raw_spin_lock_nested(rq_lockp(rq1), SINGLE_DEPTH_NESTING);
    }
    }
    }
    @@ -2287,9 +2291,9 @@ static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
    __releases(rq1->lock)
    __releases(rq2->lock)
    {
    - raw_spin_unlock(&rq1->lock);
    - if (rq1 != rq2)
    - raw_spin_unlock(&rq2->lock);
    + raw_spin_unlock(rq_lockp(rq1));
    + if (rq_lockp(rq1) != rq_lockp(rq2))
    + raw_spin_unlock(rq_lockp(rq2));
    else
    __release(rq2->lock);
    }
    @@ -2312,7 +2316,7 @@ static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
    {
    BUG_ON(!irqs_disabled());
    BUG_ON(rq1 != rq2);
    - raw_spin_lock(&rq1->lock);
    + raw_spin_lock(rq_lockp(rq1));
    __acquire(rq2->lock); /* Fake it out ;) */
    }

    @@ -2327,7 +2331,7 @@ static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
    __releases(rq2->lock)
    {
    BUG_ON(rq1 != rq2);
    - raw_spin_unlock(&rq1->lock);
    + raw_spin_unlock(rq_lockp(rq1));
    __release(rq2->lock);
    }

    diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
    index 90f3e5558fa2..82924db74ccb 100644
    --- a/kernel/sched/topology.c
    +++ b/kernel/sched/topology.c
    @@ -454,7 +454,7 @@ void rq_attach_root(struct rq *rq, struct root_domain *rd)
    struct root_domain *old_rd = NULL;
    unsigned long flags;

    - raw_spin_lock_irqsave(&rq->lock, flags);
    + raw_spin_lock_irqsave(rq_lockp(rq), flags);

    if (rq->rd) {
    old_rd = rq->rd;
    @@ -480,7 +480,7 @@ void rq_attach_root(struct rq *rq, struct root_domain *rd)
    if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
    set_rq_online(rq);

    - raw_spin_unlock_irqrestore(&rq->lock, flags);
    + raw_spin_unlock_irqrestore(rq_lockp(rq), flags);

    if (old_rd)
    call_rcu(&old_rd->rcu, free_rootdomain);
    --
    2.29.2.299.gdc1121823c-goog
    \
     
     \ /
      Last update: 2020-11-18 00:22    [W:2.099 / U:0.488 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site