lkml.org 
[lkml]   [2019]   [May]   [20]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[RFC][PATCH 11/14 v2] function_graph: Move graph depth stored data to shadow stack global var
    From: "Steven Rostedt (VMware)" <rostedt@goodmis.org>

    The use of the task->trace_recursion for the logic used for the function
    graph depth was a bit of an abuse of that variable. Now that there
    exists global vars that are per stack for registered graph traces, use that
    instead.

    Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
    ---
    kernel/trace/trace.h | 63 ++++++++++++++++++++++----------------------
    1 file changed, 32 insertions(+), 31 deletions(-)

    diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
    index 08e79334c8ca..c466c8a1a8cf 100644
    --- a/kernel/trace/trace.h
    +++ b/kernel/trace/trace.h
    @@ -567,25 +567,6 @@ enum {
    */
    TRACE_IRQ_BIT,

    - /*
    - * In the very unlikely case that an interrupt came in
    - * at a start of graph tracing, and we want to trace
    - * the function in that interrupt, the depth can be greater
    - * than zero, because of the preempted start of a previous
    - * trace. In an even more unlikely case, depth could be 2
    - * if a softirq interrupted the start of graph tracing,
    - * followed by an interrupt preempting a start of graph
    - * tracing in the softirq, and depth can even be 3
    - * if an NMI came in at the start of an interrupt function
    - * that preempted a softirq start of a function that
    - * preempted normal context!!!! Luckily, it can't be
    - * greater than 3, so the next two bits are a mask
    - * of what the depth is when we set TRACE_GRAPH_FL
    - */
    -
    - TRACE_GRAPH_DEPTH_START_BIT,
    - TRACE_GRAPH_DEPTH_END_BIT,
    -
    /*
    * To implement set_graph_notrace, if this bit is set, we ignore
    * function graph tracing of called functions, until the return
    @@ -598,16 +579,6 @@ enum {
    #define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(1<<(bit)); } while (0)
    #define trace_recursion_test(bit) ((current)->trace_recursion & (1<<(bit)))

    -#define trace_recursion_depth() \
    - (((current)->trace_recursion >> TRACE_GRAPH_DEPTH_START_BIT) & 3)
    -#define trace_recursion_set_depth(depth) \
    - do { \
    - current->trace_recursion &= \
    - ~(3 << TRACE_GRAPH_DEPTH_START_BIT); \
    - current->trace_recursion |= \
    - ((depth) & 3) << TRACE_GRAPH_DEPTH_START_BIT; \
    - } while (0)
    -
    #define TRACE_CONTEXT_BITS 4

    #define TRACE_FTRACE_START TRACE_FTRACE_BIT
    @@ -936,8 +907,38 @@ extern void free_fgraph_ops(struct trace_array *tr);

    enum {
    TRACE_GRAPH_FL = 1,
    +
    + /*
    + * In the very unlikely case that an interrupt came in
    + * at a start of graph tracing, and we want to trace
    + * the function in that interrupt, the depth can be greater
    + * than zero, because of the preempted start of a previous
    + * trace. In an even more unlikely case, depth could be 2
    + * if a softirq interrupted the start of graph tracing,
    + * followed by an interrupt preempting a start of graph
    + * tracing in the softirq, and depth can even be 3
    + * if an NMI came in at the start of an interrupt function
    + * that preempted a softirq start of a function that
    + * preempted normal context!!!! Luckily, it can't be
    + * greater than 3, so the next two bits are a mask
    + * of what the depth is when we set TRACE_GRAPH_FL
    + */
    +
    + TRACE_GRAPH_DEPTH_START_BIT,
    + TRACE_GRAPH_DEPTH_END_BIT,
    };

    +static inline unsigned long ftrace_graph_depth(unsigned long *task_var)
    +{
    + return (*task_var >> TRACE_GRAPH_DEPTH_START_BIT) & 3;
    +}
    +
    +static inline void ftrace_graph_set_depth(unsigned long *task_var, int depth)
    +{
    + *task_var &= ~(3 << TRACE_GRAPH_DEPTH_START_BIT);
    + *task_var |= (depth & 3) << TRACE_GRAPH_DEPTH_START_BIT;
    +}
    +
    #ifdef CONFIG_DYNAMIC_FTRACE
    extern struct ftrace_hash *ftrace_graph_hash;
    extern struct ftrace_hash *ftrace_graph_notrace_hash;
    @@ -961,7 +962,7 @@ ftrace_graph_addr(unsigned long *task_var, struct ftrace_graph_ent *trace)
    * when the depth is zero.
    */
    *task_var |= TRACE_GRAPH_FL;
    - trace_recursion_set_depth(trace->depth);
    + ftrace_graph_set_depth(task_var, trace->depth);

    /*
    * If no irqs are to be traced, but a set_graph_function
    @@ -986,7 +987,7 @@ ftrace_graph_addr_finish(struct fgraph_ops *gops, struct ftrace_graph_ret *trace
    unsigned long *task_var = fgraph_get_task_var(gops);

    if ((*task_var & TRACE_GRAPH_FL) &&
    - trace->depth == trace_recursion_depth())
    + trace->depth == ftrace_graph_depth(task_var))
    *task_var &= ~TRACE_GRAPH_FL;
    }

    --
    2.20.1

    \
     
     \ /
      Last update: 2019-05-20 16:23    [W:4.190 / U:0.040 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site