lkml.org 
[lkml]   [2015]   [Nov]   [8]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
Subject[for-next][PATCH 2/2] tracing: Remove unused ftrace_cpu_disabled per cpu variable
From: Dmitry Safonov <0x7f454c46@gmail.com>

Since the ring buffer is lockless, there is no need to disable ftrace on
CPU. And no one doing so: after commit 68179686ac67cb ("tracing: Remove
ftrace_disable/enable_cpu()") ftrace_cpu_disabled stays the same after
initialization, nothing changes it.
ftrace_cpu_disabled shouldn't be used by any external module since it
disables only function and graph_function tracers but not any other
tracer.

Link: http://lkml.kernel.org/r/1446836846-22239-1-git-send-email-0x7f454c46@gmail.com

Signed-off-by: Dmitry Safonov <0x7f454c46@gmail.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
---
kernel/trace/trace.c | 6 ------
kernel/trace/trace.h | 1 -
kernel/trace/trace_functions_graph.c | 6 ------
3 files changed, 13 deletions(-)

diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 08af79c106e1..b11582618991 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -100,8 +100,6 @@ static DEFINE_PER_CPU(bool, trace_cmdline_save);
*/
static int tracing_disabled = 1;

-DEFINE_PER_CPU(int, ftrace_cpu_disabled);
-
cpumask_var_t __read_mostly tracing_buffer_mask;

/*
@@ -1775,10 +1773,6 @@ trace_function(struct trace_array *tr,
struct ring_buffer_event *event;
struct ftrace_entry *entry;

- /* If we are reading the ring buffer, don't trace */
- if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
- return;
-
event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
flags, pc);
if (!event)
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index dd7620802e72..919d9d07686f 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -667,7 +667,6 @@ extern int DYN_FTRACE_TEST_NAME2(void);

extern bool ring_buffer_expanded;
extern bool tracing_selftest_disabled;
-DECLARE_PER_CPU(int, ftrace_cpu_disabled);

#ifdef CONFIG_FTRACE_STARTUP_TEST
extern int trace_selftest_startup_function(struct tracer *trace,
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 92382af7a213..a663cbb84107 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -288,9 +288,6 @@ int __trace_graph_entry(struct trace_array *tr,
struct ring_buffer *buffer = tr->trace_buffer.buffer;
struct ftrace_graph_ent_entry *entry;

- if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
- return 0;
-
event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
sizeof(*entry), flags, pc);
if (!event)
@@ -403,9 +400,6 @@ void __trace_graph_return(struct trace_array *tr,
struct ring_buffer *buffer = tr->trace_buffer.buffer;
struct ftrace_graph_ret_entry *entry;

- if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
- return;
-
event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
sizeof(*entry), flags, pc);
if (!event)
--
2.6.1



\
 
 \ /
  Last update: 2015-11-08 18:41    [W:0.777 / U:2.144 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site