lkml.org 
[lkml]   [2019]   [Dec]   [6]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
SubjectRe: Problem with WARN_ON in mutex_trylock() and rxrpc
On Thu, Dec 05, 2019 at 02:22:12PM +0100, Peter Zijlstra wrote:

> At the very least I'm going to do a lockdep patch that verifies the lock
> stack is 'empty' for the current irq_context when it changes.

Something like the below..

diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
index 21619c92c377..c0a314dc9969 100644
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -21,11 +21,13 @@
extern void trace_softirqs_off(unsigned long ip);
extern void lockdep_hardirqs_on(unsigned long ip);
extern void lockdep_hardirqs_off(unsigned long ip);
+ extern void lockdep_leave_irq_context(void);
#else
static inline void trace_softirqs_on(unsigned long ip) { }
static inline void trace_softirqs_off(unsigned long ip) { }
static inline void lockdep_hardirqs_on(unsigned long ip) { }
static inline void lockdep_hardirqs_off(unsigned long ip) { }
+ static inline void lockdep_leave_irq_context(void) { }
#endif

#ifdef CONFIG_TRACE_IRQFLAGS
@@ -41,6 +43,8 @@ do { \
} while (0)
# define trace_hardirq_exit() \
do { \
+ if (current->hardirq_context == 1) \
+ lockdep_leave_irq_context(); \
current->hardirq_context--; \
} while (0)
# define lockdep_softirq_enter() \
@@ -49,6 +53,8 @@ do { \
} while (0)
# define lockdep_softirq_exit() \
do { \
+ if (current->softirq_context == 1) \
+ lockdep_leave_irq_context(); \
current->softirq_context--; \
} while (0)
#else
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 32282e7112d3..5c1102967927 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -3600,6 +3600,43 @@ static inline unsigned int task_irq_context(struct task_struct *task)
return 2 * !!task->hardirq_context + !!task->softirq_context;
}

+/*
+ * Validate the current irqcontext holds no locks.
+ */
+void lockdep_leave_irq_context(void)
+{
+ struct task_struct *curr = current;
+ unsigned int irq_context = task_irq_context(curr);
+ int depth = curr->lockdep_depth;
+ struct held_lock *hlock;
+
+ if (unlikely(!debug_locks || curr->lockdep_recursion))
+ return;
+
+ if (!depth)
+ return;
+
+ if (curr->held_locks[depth-1].irq_context != irq_context)
+ return;
+
+ pr_warn("\n");
+ pr_warn("========================================================\n");
+ pr_warn("WARNING: Leaving (soft/hard) IRQ context with locks held\n");
+ print_kernel_ident();
+ pr_warn("--------------------------------------------------------\n");
+
+ for (; depth; depth--) {
+ hlock = curr->held_locks + depth - 1;
+ if (hlock->irq_context != irq_context)
+ break;
+ print_lock(hlock);
+ }
+
+ pr_warn("\nstack backtrace:\n");
+ dump_stack();
+}
+NOKPROBE_SYMBOL(lockdep_leave_irq_context);
+
static int separate_irq_context(struct task_struct *curr,
struct held_lock *hlock)
{
\
 
 \ /
  Last update: 2019-12-06 13:32    [W:0.067 / U:1.104 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site