lkml.org 
[lkml]   [2021]   [Apr]   [26]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
SubjectRe: [PATCH 04/19] sched: Prepare for Core-wide rq->lock
On Fri, Apr 23, 2021 at 06:22:52PM -0700, Josh Don wrote:
> Hi Peter,
>
> > --- a/kernel/sched/core.c
> > +++ b/kernel/sched/core.c
> > @@ -186,12 +186,37 @@ int sysctl_sched_rt_runtime = 950000;
> >
> > void raw_spin_rq_lock_nested(struct rq *rq, int subclass)
> > {
> > - raw_spin_lock_nested(rq_lockp(rq), subclass);
> > + raw_spinlock_t *lock;
> > +
> > + if (sched_core_disabled()) {
>
> Nothing to stop sched_core from being enabled right here? Leading to
> us potentially taking the wrong lock.
>
> > + raw_spin_lock_nested(&rq->__lock, subclass);
> > + return;
> > + }
> > +
> > + for (;;) {
> > + lock = rq_lockp(rq);
> > + raw_spin_lock_nested(lock, subclass);
> > + if (likely(lock == rq_lockp(rq)))
> > + return;
> > + raw_spin_unlock(lock);
> > + }
> > }

Very good; something like the below seems to be the best I can make of
it..

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index f732642e3e09..1a81e9cc9e5d 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -290,6 +290,10 @@ static void sched_core_assert_empty(void)
static void __sched_core_enable(void)
{
static_branch_enable(&__sched_core_enabled);
+ /*
+ * Ensure raw_spin_rq_*lock*() have completed before flipping.
+ */
+ synchronize_sched();
__sched_core_flip(true);
sched_core_assert_empty();
}
@@ -449,16 +453,22 @@ void raw_spin_rq_lock_nested(struct rq *rq, int subclass)
{
raw_spinlock_t *lock;

+ preempt_disable();
if (sched_core_disabled()) {
raw_spin_lock_nested(&rq->__lock, subclass);
+ /* preempt *MUST* still be disabled here */
+ preempt_enable_no_resched();
return;
}

for (;;) {
lock = __rq_lockp(rq);
raw_spin_lock_nested(lock, subclass);
- if (likely(lock == __rq_lockp(rq)))
+ if (likely(lock == __rq_lockp(rq))) {
+ /* preempt *MUST* still be disabled here */
+ preempt_enable_no_resched();
return;
+ }
raw_spin_unlock(lock);
}
}
@@ -468,14 +478,20 @@ bool raw_spin_rq_trylock(struct rq *rq)
raw_spinlock_t *lock;
bool ret;

- if (sched_core_disabled())
- return raw_spin_trylock(&rq->__lock);
+ preempt_disable();
+ if (sched_core_disabled()) {
+ ret = raw_spin_trylock(&rq->__lock);
+ preempt_enable();
+ return ret;
+ }

for (;;) {
lock = __rq_lockp(rq);
ret = raw_spin_trylock(lock);
- if (!ret || (likely(lock == __rq_lockp(rq))))
+ if (!ret || (likely(lock == __rq_lockp(rq)))) {
+ preempt_enable();
return ret;
+ }
raw_spin_unlock(lock);
}
}
\
 
 \ /
  Last update: 2021-04-26 10:32    [W:0.268 / U:0.272 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site