lkml.org 
[lkml]   [2019]   [Sep]   [11]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH RT v3 1/5] rcu: Acquire RCU lock when disabling BHs
Date
A plain local_bh_disable() is documented as creating an RCU critical
section, and (at least) rcutorture expects this to be the case. However,
in_softirq() doesn't block a grace period on PREEMPT_RT, since RCU checks
preempt_count() directly. Even if RCU were changed to check
in_softirq(), that wouldn't allow blocked BH disablers to be boosted.

Fix this by calling rcu_read_lock() from local_bh_disable(), and update
rcu_read_lock_bh_held() accordingly.

Signed-off-by: Scott Wood <swood@redhat.com>
---
v3: Remove change to rcu_read_lock_bh_held(), and move debug portions
of rcu_read_[un]lock_bh() to separate functions
---
include/linux/rcupdate.h | 40 ++++++++++++++++++++++++++++++++--------
kernel/softirq.c | 12 +++++++++---
2 files changed, 41 insertions(+), 11 deletions(-)

diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 388ace315f32..9ce7c5006a5e 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -600,6 +600,36 @@ static inline void rcu_read_unlock(void)
rcu_lock_release(&rcu_lock_map); /* Keep acq info for rls diags. */
}

+#ifdef CONFIG_PREEMPT_RT_FULL
+/*
+ * On RT, local_bh_disable() calls rcu_read_lock() -- no need to
+ * track it twice.
+ */
+static inline void rcu_bh_lock_acquire(void)
+{
+}
+
+static inline void rcu_bh_lock_release(void)
+{
+}
+#else
+static inline void rcu_bh_lock_acquire(void)
+{
+ __acquire(RCU_BH);
+ rcu_lock_acquire(&rcu_bh_lock_map);
+ RCU_LOCKDEP_WARN(!rcu_is_watching(),
+ "rcu_read_lock_bh() used illegally while idle");
+}
+
+static inline void rcu_bh_lock_release(void)
+{
+ RCU_LOCKDEP_WARN(!rcu_is_watching(),
+ "rcu_read_unlock_bh() used illegally while idle");
+ rcu_lock_release(&rcu_bh_lock_map);
+ __release(RCU_BH);
+}
+#endif
+
/**
* rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section
*
@@ -615,10 +645,7 @@ static inline void rcu_read_unlock(void)
static inline void rcu_read_lock_bh(void)
{
local_bh_disable();
- __acquire(RCU_BH);
- rcu_lock_acquire(&rcu_bh_lock_map);
- RCU_LOCKDEP_WARN(!rcu_is_watching(),
- "rcu_read_lock_bh() used illegally while idle");
+ rcu_bh_lock_acquire();
}

/*
@@ -628,10 +655,7 @@ static inline void rcu_read_lock_bh(void)
*/
static inline void rcu_read_unlock_bh(void)
{
- RCU_LOCKDEP_WARN(!rcu_is_watching(),
- "rcu_read_unlock_bh() used illegally while idle");
- rcu_lock_release(&rcu_bh_lock_map);
- __release(RCU_BH);
+ rcu_bh_lock_release();
local_bh_enable();
}

diff --git a/kernel/softirq.c b/kernel/softirq.c
index d16d080a74f7..6080c9328df1 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -115,8 +115,10 @@ void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
long soft_cnt;

WARN_ON_ONCE(in_irq());
- if (!in_atomic())
+ if (!in_atomic()) {
local_lock(bh_lock);
+ rcu_read_lock();
+ }
soft_cnt = this_cpu_inc_return(softirq_counter);
WARN_ON_ONCE(soft_cnt == 0);
current->softirq_count += SOFTIRQ_DISABLE_OFFSET;
@@ -151,8 +153,10 @@ void _local_bh_enable(void)
#endif

current->softirq_count -= SOFTIRQ_DISABLE_OFFSET;
- if (!in_atomic())
+ if (!in_atomic()) {
+ rcu_read_unlock();
local_unlock(bh_lock);
+ }
}

void _local_bh_enable_rt(void)
@@ -185,8 +189,10 @@ void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
WARN_ON_ONCE(count < 0);
local_irq_enable();

- if (!in_atomic())
+ if (!in_atomic()) {
+ rcu_read_unlock();
local_unlock(bh_lock);
+ }

current->softirq_count -= SOFTIRQ_DISABLE_OFFSET;
preempt_check_resched();
--
1.8.3.1
\
 
 \ /
  Last update: 2019-09-11 18:58    [W:0.153 / U:0.604 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site