lkml.org 
[lkml]   [2019]   [Feb]   [12]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 22/32] softirq: Check enabled vectors before processing
Date
There is no need to process softirqs if none of those pending are
enabled. Check about that early to avoid unnecessary overhead.

Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
Cc: Mauro Carvalho Chehab <mchehab@s-opensource.com>
Cc: Joel Fernandes <joel@joelfernandes.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Pavan Kondeti <pkondeti@codeaurora.org>
Cc: Paul E . McKenney <paulmck@linux.vnet.ibm.com>
Cc: David S . Miller <davem@davemloft.net>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
---
include/linux/interrupt.h | 5 +++++
kernel/softirq.c | 21 +++++++++++----------
2 files changed, 16 insertions(+), 10 deletions(-)

diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 346fb1e8e55b..161babfc1a0d 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -508,6 +508,11 @@ static inline void softirq_pending_set_mask(unsigned int pending)
{
__this_cpu_or(local_softirq_data_ref, pending);
}
+
+static inline int softirq_pending_enabled(void)
+{
+ return local_softirq_pending() & local_softirq_enabled();
+}
#endif /* local_softirq_pending */

/* map softirq index to softirq name. update 'softirq_to_name' in
diff --git a/kernel/softirq.c b/kernel/softirq.c
index d305b4c8d1a7..a2257a5aaa0b 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -181,7 +181,7 @@ void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
*/
preempt_count_sub(cnt - 1);

- if (unlikely(!in_interrupt() && local_softirq_pending())) {
+ if (unlikely(!in_interrupt() && softirq_pending_enabled())) {
/*
* Run softirq if any pending. And do it in its own stack
* as we may be calling this deep in a task call stack already.
@@ -250,7 +250,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
int max_restart = MAX_SOFTIRQ_RESTART;
struct softirq_action *h;
bool in_hardirq;
- __u32 pending;
+ __u32 pending, enabled;
int softirq_bit;

/*
@@ -260,7 +260,8 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
*/
current->flags &= ~PF_MEMALLOC;

- pending = local_softirq_pending();
+ enabled = local_softirq_enabled();
+ pending = local_softirq_pending() & enabled;
account_irq_enter_time(current);

__local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
@@ -268,7 +269,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)

restart:
/* Reset the pending bitmask before enabling irqs */
- softirq_pending_clear_mask(SOFTIRQ_ALL_MASK);
+ softirq_pending_clear_mask(pending);

local_irq_enable();

@@ -305,7 +306,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
rcu_softirq_qs();
local_irq_disable();

- pending = local_softirq_pending();
+ pending = local_softirq_pending() & enabled;
if (pending) {
if (time_before(jiffies, end) && !need_resched() &&
--max_restart)
@@ -331,7 +332,7 @@ asmlinkage __visible void do_softirq(void)

local_irq_save(flags);

- pending = local_softirq_pending();
+ pending = softirq_pending_enabled();

if (pending && !ksoftirqd_running(pending))
do_softirq_own_stack();
@@ -360,7 +361,7 @@ void irq_enter(void)

static inline void invoke_softirq(void)
{
- if (ksoftirqd_running(local_softirq_pending()))
+ if (ksoftirqd_running(softirq_pending_enabled()))
return;

if (!force_irqthreads) {
@@ -409,7 +410,7 @@ void irq_exit(void)
#endif
account_irq_exit_time(current);
preempt_count_sub(HARDIRQ_OFFSET);
- if (!in_interrupt() && local_softirq_pending())
+ if (!in_interrupt() && softirq_pending_enabled())
invoke_softirq();

tick_irq_exit();
@@ -640,13 +641,13 @@ void __init softirq_init(void)

static int ksoftirqd_should_run(unsigned int cpu)
{
- return local_softirq_pending();
+ return softirq_pending_enabled();
}

static void run_ksoftirqd(unsigned int cpu)
{
local_irq_disable();
- if (local_softirq_pending()) {
+ if (softirq_pending_enabled()) {
/*
* We can safely run softirq on inline stack, as we are not deep
* in the task stack here.
--
2.17.1
\
 
 \ /
  Last update: 2019-02-12 18:40    [W:0.185 / U:0.720 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site