lkml.org 
[lkml]   [2021]   [May]   [21]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[RFC PATCH 1/6] trace/stack: Move code to save the stack trace into a separate function
Date
In preparation to add support for stack tracer to powerpc, move code to
save stack trace and to calculate the frame sizes into a separate weak
function. Also provide access to some of the data structures used by the
stack trace code so that architectures can update those.

Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
---
include/linux/ftrace.h | 8 ++++
kernel/trace/trace_stack.c | 98 ++++++++++++++++++++------------------
2 files changed, 60 insertions(+), 46 deletions(-)

diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index a69f363b61bf73..8263427379f05c 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -368,10 +368,18 @@ static inline void arch_ftrace_set_direct_caller(struct pt_regs *regs,

#ifdef CONFIG_STACK_TRACER

+#define STACK_TRACE_ENTRIES 500
+
+extern unsigned long stack_dump_trace[STACK_TRACE_ENTRIES];
+extern unsigned stack_trace_index[STACK_TRACE_ENTRIES];
+extern unsigned int stack_trace_nr_entries;
+extern unsigned long stack_trace_max_size;
extern int stack_tracer_enabled;

int stack_trace_sysctl(struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos);
+void stack_get_trace(unsigned long traced_ip, unsigned long *stack_ref,
+ unsigned long stack_size, int *tracer_frame);

/* DO NOT MODIFY THIS VARIABLE DIRECTLY! */
DECLARE_PER_CPU(int, disable_stack_tracer);
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 63c28504205162..5b63dbd37c8c25 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -19,13 +19,11 @@

#include "trace.h"

-#define STACK_TRACE_ENTRIES 500
+unsigned long stack_dump_trace[STACK_TRACE_ENTRIES];
+unsigned stack_trace_index[STACK_TRACE_ENTRIES];

-static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES];
-static unsigned stack_trace_index[STACK_TRACE_ENTRIES];
-
-static unsigned int stack_trace_nr_entries;
-static unsigned long stack_trace_max_size;
+unsigned int stack_trace_nr_entries;
+unsigned long stack_trace_max_size;
static arch_spinlock_t stack_trace_max_lock =
(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;

@@ -152,49 +150,19 @@ static void print_max_stack(void)
* Although the entry function is not displayed, the first function (sys_foo)
* will still include the stack size of it.
*/
-static void check_stack(unsigned long ip, unsigned long *stack)
+void __weak stack_get_trace(unsigned long traced_ip, unsigned long *stack_ref,
+ unsigned long stack_size, int *tracer_frame)
{
- unsigned long this_size, flags; unsigned long *p, *top, *start;
- static int tracer_frame;
- int frame_size = READ_ONCE(tracer_frame);
+ unsigned long *p, *top, *start;
int i, x;

- this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
- this_size = THREAD_SIZE - this_size;
- /* Remove the frame of the tracer */
- this_size -= frame_size;
-
- if (this_size <= stack_trace_max_size)
- return;
-
- /* we do not handle interrupt stacks yet */
- if (!object_is_on_stack(stack))
- return;
-
- /* Can't do this from NMI context (can cause deadlocks) */
- if (in_nmi())
- return;
-
- local_irq_save(flags);
- arch_spin_lock(&stack_trace_max_lock);
-
- /* In case another CPU set the tracer_frame on us */
- if (unlikely(!frame_size))
- this_size -= tracer_frame;
-
- /* a race could have already updated it */
- if (this_size <= stack_trace_max_size)
- goto out;
-
- stack_trace_max_size = this_size;
-
stack_trace_nr_entries = stack_trace_save(stack_dump_trace,
ARRAY_SIZE(stack_dump_trace) - 1,
0);

/* Skip over the overhead of the stack tracer itself */
for (i = 0; i < stack_trace_nr_entries; i++) {
- if (stack_dump_trace[i] == ip)
+ if (stack_dump_trace[i] == traced_ip)
break;
}

@@ -209,7 +177,7 @@ static void check_stack(unsigned long ip, unsigned long *stack)
* Now find where in the stack these are.
*/
x = 0;
- start = stack;
+ start = stack_ref;
top = (unsigned long *)
(((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);

@@ -223,7 +191,7 @@ static void check_stack(unsigned long ip, unsigned long *stack)
while (i < stack_trace_nr_entries) {
int found = 0;

- stack_trace_index[x] = this_size;
+ stack_trace_index[x] = stack_size;
p = start;

for (; p < top && i < stack_trace_nr_entries; p++) {
@@ -233,7 +201,7 @@ static void check_stack(unsigned long ip, unsigned long *stack)
*/
if ((READ_ONCE_NOCHECK(*p)) == stack_dump_trace[i]) {
stack_dump_trace[x] = stack_dump_trace[i++];
- this_size = stack_trace_index[x++] =
+ stack_size = stack_trace_index[x++] =
(top - p) * sizeof(unsigned long);
found = 1;
/* Start the search from here */
@@ -245,10 +213,10 @@ static void check_stack(unsigned long ip, unsigned long *stack)
* out what that is, then figure it out
* now.
*/
- if (unlikely(!tracer_frame)) {
- tracer_frame = (p - stack) *
+ if (unlikely(!*tracer_frame)) {
+ *tracer_frame = (p - stack_ref) *
sizeof(unsigned long);
- stack_trace_max_size -= tracer_frame;
+ stack_trace_max_size -= *tracer_frame;
}
}
}
@@ -272,6 +240,44 @@ static void check_stack(unsigned long ip, unsigned long *stack)
#endif

stack_trace_nr_entries = x;
+}
+
+static void check_stack(unsigned long ip, unsigned long *stack)
+{
+ unsigned long this_size, flags;
+ static int tracer_frame;
+ int frame_size = READ_ONCE(tracer_frame);
+
+ this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
+ this_size = THREAD_SIZE - this_size;
+ /* Remove the frame of the tracer */
+ this_size -= frame_size;
+
+ if (this_size <= stack_trace_max_size)
+ return;
+
+ /* we do not handle interrupt stacks yet */
+ if (!object_is_on_stack(stack))
+ return;
+
+ /* Can't do this from NMI context (can cause deadlocks) */
+ if (in_nmi())
+ return;
+
+ local_irq_save(flags);
+ arch_spin_lock(&stack_trace_max_lock);
+
+ /* In case another CPU set the tracer_frame on us */
+ if (unlikely(!frame_size))
+ this_size -= tracer_frame;
+
+ /* a race could have already updated it */
+ if (this_size <= stack_trace_max_size)
+ goto out;
+
+ stack_trace_max_size = this_size;
+
+ stack_get_trace(ip, stack, this_size, &tracer_frame);

if (task_stack_end_corrupted(current)) {
print_max_stack();
--
2.30.2
\
 
 \ /
  Last update: 2021-05-21 08:50    [W:0.382 / U:1.116 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site