Messages in this thread Patch in this message | | | Date | Thu, 22 Apr 2021 14:05:05 +0200 | From | Peter Zijlstra <> | Subject | [PATCH 06/19] sched: Optimize rq_lockp() usage |
| |
rq_lockp() includes a static_branch(), which is asm-goto, which is asm volatile which defeats regular CSE. This means that:
if (!static_branch(&foo)) return simple;
if (static_branch(&foo) && cond) return complex;
Doesn't fold and we get horrible code. Introduce __rq_lockp() without the static_branch() on.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> --- kernel/sched/core.c | 16 ++++++++-------- kernel/sched/deadline.c | 4 ++-- kernel/sched/fair.c | 2 +- kernel/sched/sched.h | 33 +++++++++++++++++++++++++-------- 4 files changed, 36 insertions(+), 19 deletions(-)
--- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -281,9 +281,9 @@ void raw_spin_rq_lock_nested(struct rq * } for (;;) { - lock = rq_lockp(rq); + lock = __rq_lockp(rq); raw_spin_lock_nested(lock, subclass); - if (likely(lock == rq_lockp(rq))) + if (likely(lock == __rq_lockp(rq))) return; raw_spin_unlock(lock); } @@ -298,9 +298,9 @@ bool raw_spin_rq_trylock(struct rq *rq) return raw_spin_trylock(&rq->__lock); for (;;) { - lock = rq_lockp(rq); + lock = __rq_lockp(rq); ret = raw_spin_trylock(lock); - if (!ret || (likely(lock == rq_lockp(rq)))) + if (!ret || (likely(lock == __rq_lockp(rq)))) return ret; raw_spin_unlock(lock); } @@ -323,7 +323,7 @@ void double_rq_lock(struct rq *rq1, stru swap(rq1, rq2); raw_spin_rq_lock(rq1); - if (rq_lockp(rq1) == rq_lockp(rq2)) + if (__rq_lockp(rq1) == __rq_lockp(rq2)) return; raw_spin_rq_lock_nested(rq2, SINGLE_DEPTH_NESTING); @@ -2594,7 +2594,7 @@ void set_task_cpu(struct task_struct *p, * task_rq_lock(). */ WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) || - lockdep_is_held(rq_lockp(task_rq(p))))); + lockdep_is_held(__rq_lockp(task_rq(p))))); #endif /* * Clearly, migrating tasks to offline CPUs is a fairly daft thing. @@ -4220,7 +4220,7 @@ prepare_lock_switch(struct rq *rq, struc * do an early lockdep release here: */ rq_unpin_lock(rq, rf); - spin_release(&rq_lockp(rq)->dep_map, _THIS_IP_); + spin_release(&__rq_lockp(rq)->dep_map, _THIS_IP_); #ifdef CONFIG_DEBUG_SPINLOCK /* this is a valid case when another task releases the spinlock */ rq_lockp(rq)->owner = next; @@ -4234,7 +4234,7 @@ static inline void finish_lock_switch(st * fix up the runqueue lock - which gets 'carried over' from * prev into current: */ - spin_acquire(&rq_lockp(rq)->dep_map, 0, 0, _THIS_IP_); + spin_acquire(&__rq_lockp(rq)->dep_map, 0, 0, _THIS_IP_); __balance_callbacks(rq); raw_spin_rq_unlock_irq(rq); } --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -1097,9 +1097,9 @@ static enum hrtimer_restart dl_task_time * If the runqueue is no longer available, migrate the * task elsewhere. This necessarily changes rq. */ - lockdep_unpin_lock(rq_lockp(rq), rf.cookie); + lockdep_unpin_lock(__rq_lockp(rq), rf.cookie); rq = dl_task_offline_migration(rq, p); - rf.cookie = lockdep_pin_lock(rq_lockp(rq)); + rf.cookie = lockdep_pin_lock(__rq_lockp(rq)); update_rq_clock(rq); /* --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1107,7 +1107,7 @@ struct numa_group { static struct numa_group *deref_task_numa_group(struct task_struct *p) { return rcu_dereference_check(p->numa_group, p == current || - (lockdep_is_held(rq_lockp(task_rq(p))) && !READ_ONCE(p->on_cpu))); + (lockdep_is_held(__rq_lockp(task_rq(p))) && !READ_ONCE(p->on_cpu))); } static struct numa_group *deref_curr_numa_group(struct task_struct *p) --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1127,6 +1127,10 @@ static inline bool sched_core_disabled(v return !static_branch_unlikely(&__sched_core_enabled); } +/* + * Be careful with this function; not for general use. The return value isn't + * stable unless you actually hold a relevant rq->__lock. + */ static inline raw_spinlock_t *rq_lockp(struct rq *rq) { if (sched_core_enabled(rq)) @@ -1135,6 +1139,14 @@ static inline raw_spinlock_t *rq_lockp(s return &rq->__lock; } +static inline raw_spinlock_t *__rq_lockp(struct rq *rq) +{ + if (rq->core_enabled) + return &rq->core->__lock; + + return &rq->__lock; +} + #else /* !CONFIG_SCHED_CORE */ static inline bool sched_core_enabled(struct rq *rq) @@ -1152,11 +1164,16 @@ static inline raw_spinlock_t *rq_lockp(s return &rq->__lock; } +static inline raw_spinlock_t *__rq_lockp(struct rq *rq) +{ + return &rq->__lock; +} + #endif /* CONFIG_SCHED_CORE */ static inline void lockdep_assert_rq_held(struct rq *rq) { - lockdep_assert_held(rq_lockp(rq)); + lockdep_assert_held(__rq_lockp(rq)); } extern void raw_spin_rq_lock_nested(struct rq *rq, int subclass); @@ -1340,7 +1357,7 @@ extern struct callback_head balance_push */ static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf) { - rf->cookie = lockdep_pin_lock(rq_lockp(rq)); + rf->cookie = lockdep_pin_lock(__rq_lockp(rq)); #ifdef CONFIG_SCHED_DEBUG rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP); @@ -1358,12 +1375,12 @@ static inline void rq_unpin_lock(struct rf->clock_update_flags = RQCF_UPDATED; #endif - lockdep_unpin_lock(rq_lockp(rq), rf->cookie); + lockdep_unpin_lock(__rq_lockp(rq), rf->cookie); } static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf) { - lockdep_repin_lock(rq_lockp(rq), rf->cookie); + lockdep_repin_lock(__rq_lockp(rq), rf->cookie); #ifdef CONFIG_SCHED_DEBUG /* @@ -2306,7 +2323,7 @@ static inline int _double_lock_balance(s __acquires(busiest->lock) __acquires(this_rq->lock) { - if (rq_lockp(this_rq) == rq_lockp(busiest)) + if (__rq_lockp(this_rq) == __rq_lockp(busiest)) return 0; if (likely(raw_spin_rq_trylock(busiest))) @@ -2338,9 +2355,9 @@ static inline int double_lock_balance(st static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) __releases(busiest->lock) { - if (rq_lockp(this_rq) != rq_lockp(busiest)) + if (__rq_lockp(this_rq) != __rq_lockp(busiest)) raw_spin_rq_unlock(busiest); - lock_set_subclass(&rq_lockp(this_rq)->dep_map, 0, _RET_IP_); + lock_set_subclass(&__rq_lockp(this_rq)->dep_map, 0, _RET_IP_); } static inline void double_lock(spinlock_t *l1, spinlock_t *l2) @@ -2381,7 +2398,7 @@ static inline void double_rq_unlock(stru __releases(rq2->lock) { raw_spin_rq_unlock(rq1); - if (rq_lockp(rq1) != rq_lockp(rq2)) + if (__rq_lockp(rq1) != __rq_lockp(rq2)) raw_spin_rq_unlock(rq2); else __release(rq2->lock);
| |