lkml.org 
[lkml]   [2020]   [Jul]   [29]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
Subject[tip: locking/core] lockdep: Refactor IRQ trace events fields into struct
The following commit has been merged into the locking/core branch of tip:

Commit-ID: 9cd8b723f823d007bd70a3252e681fde07143f6d
Gitweb: https://git.kernel.org/tip/9cd8b723f823d007bd70a3252e681fde07143f6d
Author: Marco Elver <elver@google.com>
AuthorDate: Wed, 29 Jul 2020 13:09:15 +02:00
Committer: Ingo Molnar <mingo@kernel.org>
CommitterDate: Wed, 29 Jul 2020 16:30:40 +02:00

lockdep: Refactor IRQ trace events fields into struct

Refactor the IRQ trace events fields, used for printing information
about the IRQ trace events, into a separate struct 'irqtrace_events'.

This improves readability by separating the information only used in
reporting, as well as enables (simplified) storing/restoring of
irqtrace_events snapshots.

No functional change intended.

Signed-off-by: Marco Elver <elver@google.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Link: https://lore.kernel.org/r/20200729110916.3920464-1-elver@google.com
---
include/linux/irqflags.h | 13 +++++++++-
include/linux/sched.h | 11 +------
kernel/fork.c | 16 +++--------
kernel/locking/lockdep.c | 58 ++++++++++++++++++++-------------------
4 files changed, 50 insertions(+), 48 deletions(-)

diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
index 5811ee8..bd5c557 100644
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -33,6 +33,19 @@

#ifdef CONFIG_TRACE_IRQFLAGS

+/* Per-task IRQ trace events information. */
+struct irqtrace_events {
+ unsigned int irq_events;
+ unsigned long hardirq_enable_ip;
+ unsigned long hardirq_disable_ip;
+ unsigned int hardirq_enable_event;
+ unsigned int hardirq_disable_event;
+ unsigned long softirq_disable_ip;
+ unsigned long softirq_enable_ip;
+ unsigned int softirq_disable_event;
+ unsigned int softirq_enable_event;
+};
+
DECLARE_PER_CPU(int, hardirqs_enabled);
DECLARE_PER_CPU(int, hardirq_context);

diff --git a/include/linux/sched.h b/include/linux/sched.h
index 9a9d826..26adabe 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -18,6 +18,7 @@
#include <linux/mutex.h>
#include <linux/plist.h>
#include <linux/hrtimer.h>
+#include <linux/irqflags.h>
#include <linux/seccomp.h>
#include <linux/nodemask.h>
#include <linux/rcupdate.h>
@@ -980,17 +981,9 @@ struct task_struct {
#endif

#ifdef CONFIG_TRACE_IRQFLAGS
- unsigned int irq_events;
+ struct irqtrace_events irqtrace;
unsigned int hardirq_threaded;
- unsigned long hardirq_enable_ip;
- unsigned long hardirq_disable_ip;
- unsigned int hardirq_enable_event;
- unsigned int hardirq_disable_event;
u64 hardirq_chain_key;
- unsigned long softirq_disable_ip;
- unsigned long softirq_enable_ip;
- unsigned int softirq_disable_event;
- unsigned int softirq_enable_event;
int softirqs_enabled;
int softirq_context;
int irq_config;
diff --git a/kernel/fork.c b/kernel/fork.c
index fc72f09..f831b82 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -2035,17 +2035,11 @@ static __latent_entropy struct task_struct *copy_process(
seqcount_spinlock_init(&p->mems_allowed_seq, &p->alloc_lock);
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
- p->irq_events = 0;
- p->hardirq_enable_ip = 0;
- p->hardirq_enable_event = 0;
- p->hardirq_disable_ip = _THIS_IP_;
- p->hardirq_disable_event = 0;
- p->softirqs_enabled = 1;
- p->softirq_enable_ip = _THIS_IP_;
- p->softirq_enable_event = 0;
- p->softirq_disable_ip = 0;
- p->softirq_disable_event = 0;
- p->softirq_context = 0;
+ memset(&p->irqtrace, 0, sizeof(p->irqtrace));
+ p->irqtrace.hardirq_disable_ip = _THIS_IP_;
+ p->irqtrace.softirq_enable_ip = _THIS_IP_;
+ p->softirqs_enabled = 1;
+ p->softirq_context = 0;
#endif

p->pagefault_disabled = 0;
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index c9ea05e..7b58003 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -3484,19 +3484,21 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this,

void print_irqtrace_events(struct task_struct *curr)
{
- printk("irq event stamp: %u\n", curr->irq_events);
+ const struct irqtrace_events *trace = &curr->irqtrace;
+
+ printk("irq event stamp: %u\n", trace->irq_events);
printk("hardirqs last enabled at (%u): [<%px>] %pS\n",
- curr->hardirq_enable_event, (void *)curr->hardirq_enable_ip,
- (void *)curr->hardirq_enable_ip);
+ trace->hardirq_enable_event, (void *)trace->hardirq_enable_ip,
+ (void *)trace->hardirq_enable_ip);
printk("hardirqs last disabled at (%u): [<%px>] %pS\n",
- curr->hardirq_disable_event, (void *)curr->hardirq_disable_ip,
- (void *)curr->hardirq_disable_ip);
+ trace->hardirq_disable_event, (void *)trace->hardirq_disable_ip,
+ (void *)trace->hardirq_disable_ip);
printk("softirqs last enabled at (%u): [<%px>] %pS\n",
- curr->softirq_enable_event, (void *)curr->softirq_enable_ip,
- (void *)curr->softirq_enable_ip);
+ trace->softirq_enable_event, (void *)trace->softirq_enable_ip,
+ (void *)trace->softirq_enable_ip);
printk("softirqs last disabled at (%u): [<%px>] %pS\n",
- curr->softirq_disable_event, (void *)curr->softirq_disable_ip,
- (void *)curr->softirq_disable_ip);
+ trace->softirq_disable_event, (void *)trace->softirq_disable_ip,
+ (void *)trace->softirq_disable_ip);
}

static int HARDIRQ_verbose(struct lock_class *class)
@@ -3699,7 +3701,7 @@ EXPORT_SYMBOL_GPL(lockdep_hardirqs_on_prepare);

void noinstr lockdep_hardirqs_on(unsigned long ip)
{
- struct task_struct *curr = current;
+ struct irqtrace_events *trace = &current->irqtrace;

if (unlikely(!debug_locks))
return;
@@ -3752,8 +3754,8 @@ void noinstr lockdep_hardirqs_on(unsigned long ip)
skip_checks:
/* we'll do an OFF -> ON transition: */
this_cpu_write(hardirqs_enabled, 1);
- curr->hardirq_enable_ip = ip;
- curr->hardirq_enable_event = ++curr->irq_events;
+ trace->hardirq_enable_ip = ip;
+ trace->hardirq_enable_event = ++trace->irq_events;
debug_atomic_inc(hardirqs_on_events);
}
EXPORT_SYMBOL_GPL(lockdep_hardirqs_on);
@@ -3763,8 +3765,6 @@ EXPORT_SYMBOL_GPL(lockdep_hardirqs_on);
*/
void noinstr lockdep_hardirqs_off(unsigned long ip)
{
- struct task_struct *curr = current;
-
if (unlikely(!debug_locks))
return;

@@ -3784,12 +3784,14 @@ void noinstr lockdep_hardirqs_off(unsigned long ip)
return;

if (lockdep_hardirqs_enabled()) {
+ struct irqtrace_events *trace = &current->irqtrace;
+
/*
* We have done an ON -> OFF transition:
*/
this_cpu_write(hardirqs_enabled, 0);
- curr->hardirq_disable_ip = ip;
- curr->hardirq_disable_event = ++curr->irq_events;
+ trace->hardirq_disable_ip = ip;
+ trace->hardirq_disable_event = ++trace->irq_events;
debug_atomic_inc(hardirqs_off_events);
} else {
debug_atomic_inc(redundant_hardirqs_off);
@@ -3802,7 +3804,7 @@ EXPORT_SYMBOL_GPL(lockdep_hardirqs_off);
*/
void lockdep_softirqs_on(unsigned long ip)
{
- struct task_struct *curr = current;
+ struct irqtrace_events *trace = &current->irqtrace;

if (unlikely(!debug_locks || current->lockdep_recursion))
return;
@@ -3814,7 +3816,7 @@ void lockdep_softirqs_on(unsigned long ip)
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
return;

- if (curr->softirqs_enabled) {
+ if (current->softirqs_enabled) {
debug_atomic_inc(redundant_softirqs_on);
return;
}
@@ -3823,9 +3825,9 @@ void lockdep_softirqs_on(unsigned long ip)
/*
* We'll do an OFF -> ON transition:
*/
- curr->softirqs_enabled = 1;
- curr->softirq_enable_ip = ip;
- curr->softirq_enable_event = ++curr->irq_events;
+ current->softirqs_enabled = 1;
+ trace->softirq_enable_ip = ip;
+ trace->softirq_enable_event = ++trace->irq_events;
debug_atomic_inc(softirqs_on_events);
/*
* We are going to turn softirqs on, so set the
@@ -3833,7 +3835,7 @@ void lockdep_softirqs_on(unsigned long ip)
* enabled too:
*/
if (lockdep_hardirqs_enabled())
- mark_held_locks(curr, LOCK_ENABLED_SOFTIRQ);
+ mark_held_locks(current, LOCK_ENABLED_SOFTIRQ);
lockdep_recursion_finish();
}

@@ -3842,8 +3844,6 @@ void lockdep_softirqs_on(unsigned long ip)
*/
void lockdep_softirqs_off(unsigned long ip)
{
- struct task_struct *curr = current;
-
if (unlikely(!debug_locks || current->lockdep_recursion))
return;

@@ -3853,13 +3853,15 @@ void lockdep_softirqs_off(unsigned long ip)
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
return;

- if (curr->softirqs_enabled) {
+ if (current->softirqs_enabled) {
+ struct irqtrace_events *trace = &current->irqtrace;
+
/*
* We have done an ON -> OFF transition:
*/
- curr->softirqs_enabled = 0;
- curr->softirq_disable_ip = ip;
- curr->softirq_disable_event = ++curr->irq_events;
+ current->softirqs_enabled = 0;
+ trace->softirq_disable_ip = ip;
+ trace->softirq_disable_event = ++trace->irq_events;
debug_atomic_inc(softirqs_off_events);
/*
* Whoops, we wanted softirqs off, so why aren't they?
\
 
 \ /
  Last update: 2020-07-29 18:28    [W:0.038 / U:0.300 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site