Messages in this thread Patch in this message | | | From | Aubrey Li <> | Date | Sat, 8 May 2021 16:07:35 +0800 | Subject | Re: [PATCH v2 04/19] sched: Prepare for Core-wide rq->lock |
| |
On Fri, May 7, 2021 at 8:34 PM Peter Zijlstra <peterz@infradead.org> wrote: > > > When switching on core-sched, CPUs need to agree which lock to use for > their RQ. > > The new rule will be that rq->core_enabled will be toggled while > holding all rq->__locks that belong to a core. This means we need to > double check the rq->core_enabled value after each lock acquire and > retry if it changed. > > This also has implications for those sites that take multiple RQ > locks, they need to be careful that the second lock doesn't end up > being the first lock. > > Verify the lock pointer after acquiring the first lock, because if > they're on the same core, holding any of the rq->__lock instances will > pin the core state. > > While there, change the rq->__lock order to CPU number, instead of rq > address, this greatly simplifies the next patch. > > Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> > Tested-by: Don Hiatt <dhiatt@digitalocean.com> > Tested-by: Hongyu Ning <hongyu.ning@linux.intel.com> > --- > kernel/sched/core.c | 48 ++++++++++++++++++++++++++++++++++++++++++++++-- > kernel/sched/sched.h | 48 +++++++++++++++++------------------------------- > 2 files changed, 63 insertions(+), 33 deletions(-) > > --- a/kernel/sched/core.c > +++ b/kernel/sched/core.c > @@ -186,12 +186,37 @@ int sysctl_sched_rt_runtime = 950000; > > void raw_spin_rq_lock_nested(struct rq *rq, int subclass) > { > - raw_spin_lock_nested(rq_lockp(rq), subclass); > + raw_spinlock_t *lock; > + > + if (sched_core_disabled()) { > + raw_spin_lock_nested(&rq->__lock, subclass); > + return; > + } > + > + for (;;) { > + lock = rq_lockp(rq); > + raw_spin_lock_nested(lock, subclass); > + if (likely(lock == rq_lockp(rq))) > + return; > + raw_spin_unlock(lock); > + } > } > > bool raw_spin_rq_trylock(struct rq *rq) > { > - return raw_spin_trylock(rq_lockp(rq)); > + raw_spinlock_t *lock; > + bool ret; > + > + if (sched_core_disabled()) > + return raw_spin_trylock(&rq->__lock); > + > + for (;;) { > + lock = rq_lockp(rq); > + ret = raw_spin_trylock(lock); > + if (!ret || (likely(lock == rq_lockp(rq)))) > + return ret; > + raw_spin_unlock(lock); > + } > } > > void raw_spin_rq_unlock(struct rq *rq) > @@ -199,6 +224,25 @@ void raw_spin_rq_unlock(struct rq *rq) > raw_spin_unlock(rq_lockp(rq)); > } > > +#ifdef CONFIG_SMP > +/* > + * double_rq_lock - safely lock two runqueues > + */ > +void double_rq_lock(struct rq *rq1, struct rq *rq2)
Do we need the static lock checking here? __acquires(rq1->lock) __acquires(rq2->lock)
> +{ > + lockdep_assert_irqs_disabled(); > + > + if (rq_order_less(rq2, rq1)) > + swap(rq1, rq2); > + > + raw_spin_rq_lock(rq1); > + if (rq_lockp(rq1) == rq_lockp(rq2)) {
And here? __acquire(rq2->lock);
> + return; } > + > + raw_spin_rq_lock_nested(rq2, SINGLE_DEPTH_NESTING); > +} > +#endif > + > /* > * __task_rq_lock - lock the rq @p resides on. > */ > --- a/kernel/sched/sched.h > +++ b/kernel/sched/sched.h > @@ -1113,6 +1113,11 @@ static inline bool is_migration_disabled > #endif > } > > +static inline bool sched_core_disabled(void) > +{ > + return true; > +} > + > static inline raw_spinlock_t *rq_lockp(struct rq *rq) > { > return &rq->__lock; > @@ -2231,10 +2236,17 @@ unsigned long arch_scale_freq_capacity(i > } > #endif > > + > #ifdef CONFIG_SMP > -#ifdef CONFIG_PREEMPTION > > -static inline void double_rq_lock(struct rq *rq1, struct rq *rq2); > +static inline bool rq_order_less(struct rq *rq1, struct rq *rq2) > +{ > + return rq1->cpu < rq2->cpu; > +} > + > +extern void double_rq_lock(struct rq *rq1, struct rq *rq2); > + > +#ifdef CONFIG_PREEMPTION > > /* > * fair double_lock_balance: Safely acquires both rq->locks in a fair > @@ -2274,14 +2286,13 @@ static inline int _double_lock_balance(s > if (likely(raw_spin_rq_trylock(busiest))) > return 0; > > - if (rq_lockp(busiest) >= rq_lockp(this_rq)) { > + if (rq_order_less(this_rq, busiest)) { > raw_spin_rq_lock_nested(busiest, SINGLE_DEPTH_NESTING); > return 0; > } > > raw_spin_rq_unlock(this_rq); > - raw_spin_rq_lock(busiest); > - raw_spin_rq_lock_nested(this_rq, SINGLE_DEPTH_NESTING); > + double_rq_lock(this_rq, busiest); > > return 1; > } > @@ -2334,31 +2345,6 @@ static inline void double_raw_lock(raw_s > } > > /* > - * double_rq_lock - safely lock two runqueues > - * > - * Note this does not disable interrupts like task_rq_lock, > - * you need to do so manually before calling. > - */ > -static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) > - __acquires(rq1->lock) > - __acquires(rq2->lock) > -{ > - BUG_ON(!irqs_disabled()); > - if (rq_lockp(rq1) == rq_lockp(rq2)) { > - raw_spin_rq_lock(rq1); > - __acquire(rq2->lock); /* Fake it out ;) */ > - } else { > - if (rq_lockp(rq1) < rq_lockp(rq2)) { > - raw_spin_rq_lock(rq1); > - raw_spin_rq_lock_nested(rq2, SINGLE_DEPTH_NESTING); > - } else { > - raw_spin_rq_lock(rq2); > - raw_spin_rq_lock_nested(rq1, SINGLE_DEPTH_NESTING); > - } > - } > -} > - > -/* > * double_rq_unlock - safely unlock two runqueues > * > * Note this does not restore interrupts like task_rq_unlock, > @@ -2368,11 +2354,11 @@ static inline void double_rq_unlock(stru > __releases(rq1->lock) > __releases(rq2->lock) > { > - raw_spin_rq_unlock(rq1); > if (rq_lockp(rq1) != rq_lockp(rq2)) > raw_spin_rq_unlock(rq2); > else > __release(rq2->lock); > + raw_spin_rq_unlock(rq1);
This change seems not necessary, as the softlockup root cause is not the misorder lock release.
Thanks, -Aubrey
| |