lkml.org 
[lkml]   [2020]   [May]   [5]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[patch V4 part 1 33/36] x86,tracing: Robustify ftrace_nmi_enter()
    From: Peter Zijlstra <peterz@infradead.org>

    ftrace_nmi_enter()
    trace_hwlat_callback()
    trace_clock_local()
    sched_clock()
    paravirt_sched_clock()
    native_sched_clock()

    All must not be traced or kprobed, it will be called from do_debug()
    before the kprobe handler.

    Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
    Acked-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
    ---
    arch/x86/include/asm/paravirt.h | 2 +-
    arch/x86/kernel/tsc.c | 4 ++--
    include/linux/ftrace_irq.h | 4 ++--
    kernel/trace/trace_clock.c | 3 ++-
    kernel/trace/trace_hwlat.c | 2 +-
    5 files changed, 8 insertions(+), 7 deletions(-)

    --- a/arch/x86/include/asm/paravirt.h
    +++ b/arch/x86/include/asm/paravirt.h
    @@ -17,7 +17,7 @@
    #include <linux/cpumask.h>
    #include <asm/frame.h>

    -static inline unsigned long long paravirt_sched_clock(void)
    +static __always_inline unsigned long long paravirt_sched_clock(void)
    {
    return PVOP_CALL0(unsigned long long, time.sched_clock);
    }
    --- a/arch/x86/kernel/tsc.c
    +++ b/arch/x86/kernel/tsc.c
    @@ -207,7 +207,7 @@ static void __init cyc2ns_init_secondary
    /*
    * Scheduler clock - returns current time in nanosec units.
    */
    -u64 native_sched_clock(void)
    +noinstr u64 native_sched_clock(void)
    {
    if (static_branch_likely(&__use_tsc)) {
    u64 tsc_now = rdtsc();
    @@ -240,7 +240,7 @@ u64 native_sched_clock_from_tsc(u64 tsc)
    /* We need to define a real function for sched_clock, to override the
    weak default version */
    #ifdef CONFIG_PARAVIRT
    -unsigned long long sched_clock(void)
    +noinstr unsigned long long sched_clock(void)
    {
    return paravirt_sched_clock();
    }
    --- a/include/linux/ftrace_irq.h
    +++ b/include/linux/ftrace_irq.h
    @@ -7,7 +7,7 @@ extern bool trace_hwlat_callback_enabled
    extern void trace_hwlat_callback(bool enter);
    #endif

    -static inline void ftrace_nmi_enter(void)
    +static __always_inline void ftrace_nmi_enter(void)
    {
    #ifdef CONFIG_HWLAT_TRACER
    if (trace_hwlat_callback_enabled)
    @@ -15,7 +15,7 @@ static inline void ftrace_nmi_enter(void
    #endif
    }

    -static inline void ftrace_nmi_exit(void)
    +static __always_inline void ftrace_nmi_exit(void)
    {
    #ifdef CONFIG_HWLAT_TRACER
    if (trace_hwlat_callback_enabled)
    --- a/kernel/trace/trace_clock.c
    +++ b/kernel/trace/trace_clock.c
    @@ -22,6 +22,7 @@
    #include <linux/sched/clock.h>
    #include <linux/ktime.h>
    #include <linux/trace_clock.h>
    +#include <linux/kprobes.h>

    /*
    * trace_clock_local(): the simplest and least coherent tracing clock.
    @@ -29,7 +30,7 @@
    * Useful for tracing that does not cross to other CPUs nor
    * does it go through idle events.
    */
    -u64 notrace trace_clock_local(void)
    +u64 noinstr trace_clock_local(void)
    {
    u64 clock;

    --- a/kernel/trace/trace_hwlat.c
    +++ b/kernel/trace/trace_hwlat.c
    @@ -139,7 +139,7 @@ static void trace_hwlat_sample(struct hw
    #define init_time(a, b) (a = b)
    #define time_u64(a) a

    -void trace_hwlat_callback(bool enter)
    +noinstr void trace_hwlat_callback(bool enter)
    {
    if (smp_processor_id() != nmi_cpu)
    return;
    \
     
     \ /
      Last update: 2020-05-05 16:15    [W:3.426 / U:0.200 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site