lkml.org 
[lkml]   [2020]   [Dec]   [16]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
Subject[RFC PATCH] tracing: Merge irqflags + preemt counter, add RT bits
PREEMPT_RT never reported "serving softirq". I took a look to see if it
could be changed. The tracing infrastructure examinates the preemtion
counter for that. PREEMPT_RT does not change the preemption counter
while disabling the bottom half or serving the softirqs in order to
remain preemptible. The in_serving_softirq() macro and the SOFTIRQ_OFFSET
define are still working but not on the preempt-counter.
I started to look how to integrate the RT bits regarding softirq.

The state of the interrupts (irqflags) and the preemption counter are
passed down to tracing_generic_entry_update(). However only one bit of
irqflags is actually required: The on/off state.
The irqflags and the preemption counter could be evaluated early and the
information stored in an integer `trace_ctx'.
tracing_generic_entry_update() would use the upper bits as the
TRACE_FLAG_* and the lower 16bit as the preemption counter (considering
that 1 must be substracted from the counter in some cases).

Whith this change the preemption counter is read in one place and the
relevant RT bits for softirq can be set there.

The actual preemption value is not used except for the tracing record.
The `irqflags' is also not used except for the _irqsave() locking in a
few spots.
As part of the patch I added __ to trace_event_buffer_commit() while
evaluating trace_event_buffer() for the struct trace_event_buffer usage
regarding the `pc' and `flags' members. It appears that those two can
also be merged into the `trace_ctx' integer.
With this change the callchain passes one argument less and evaluates
the flags early. A build with all tracers enabled on x86-64 with and
without the patch:

text data bss dec hex filename
24301717 22148594 13996284 60446595 39a5783 vmlinux.old
24301248 22148850 13996284 60446382 39a56ae vmlinux.new

data increased by 256 bytes, text shrank by 469 bytes.

Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/linux/trace_events.h | 27 +++-
include/trace/trace_events.h | 2 +-
kernel/trace/blktrace.c | 17 ++-
kernel/trace/trace.c | 209 ++++++++++++++++-----------
kernel/trace/trace.h | 38 +++--
kernel/trace/trace_event_perf.c | 5 +-
kernel/trace/trace_events.c | 15 +-
kernel/trace/trace_events_inject.c | 8 +-
kernel/trace/trace_events_synth.c | 4 +-
kernel/trace/trace_functions.c | 26 ++--
kernel/trace/trace_functions_graph.c | 32 ++--
kernel/trace/trace_hwlat.c | 7 +-
kernel/trace/trace_irqsoff.c | 62 ++++----
kernel/trace/trace_kprobe.c | 14 +-
kernel/trace/trace_mmiotrace.c | 14 +-
kernel/trace/trace_sched_wakeup.c | 61 ++++----
kernel/trace/trace_syscalls.c | 20 +--
kernel/trace/trace_uprobe.c | 4 +-
18 files changed, 293 insertions(+), 272 deletions(-)

diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index d321fe5ad1a14..a9821d573e32f 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -148,17 +148,29 @@ enum print_line_t {

enum print_line_t trace_handle_return(struct trace_seq *s);

-void tracing_generic_entry_update(struct trace_entry *entry,
- unsigned short type,
- unsigned long flags,
- int pc);
+static inline void tracing_generic_entry_update(struct trace_entry *entry,
+ unsigned short type,
+ unsigned int trace_ctx)
+{
+ struct task_struct *tsk = current;
+
+ entry->preempt_count = trace_ctx & 0xff;
+ entry->pid = (tsk) ? tsk->pid : 0;
+ entry->type = type;
+ entry->flags = trace_ctx >> 16;
+}
+
+unsigned int _tracing_gen_ctx_flags(unsigned long irqflags);
+unsigned int tracing_gen_ctx_flags(void);
+unsigned int tracing_gen_ctx_flags_dect(void);
+
struct trace_event_file;

struct ring_buffer_event *
trace_event_buffer_lock_reserve(struct trace_buffer **current_buffer,
struct trace_event_file *trace_file,
int type, unsigned long len,
- unsigned long flags, int pc);
+ unsigned int trace_ctx);

#define TRACE_RECORD_CMDLINE BIT(0)
#define TRACE_RECORD_TGID BIT(1)
@@ -232,8 +244,7 @@ struct trace_event_buffer {
struct ring_buffer_event *event;
struct trace_event_file *trace_file;
void *entry;
- unsigned long flags;
- int pc;
+ unsigned int trace_ctx;
struct pt_regs *regs;
};

@@ -241,7 +252,7 @@ void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
struct trace_event_file *trace_file,
unsigned long len);

-void trace_event_buffer_commit(struct trace_event_buffer *fbuffer);
+void trace_event_buffer_commit__(struct trace_event_buffer *fbuffer);

enum {
TRACE_EVENT_FL_FILTERED_BIT,
diff --git a/include/trace/trace_events.h b/include/trace/trace_events.h
index 7785961d82bae..ec5a7ff786717 100644
--- a/include/trace/trace_events.h
+++ b/include/trace/trace_events.h
@@ -694,7 +694,7 @@ trace_event_raw_event_##call(void *__data, proto) \
\
{ assign; } \
\
- trace_event_buffer_commit(&fbuffer); \
+ trace_event_buffer_commit__(&fbuffer); \
}
/*
* The ftrace_test_probe is compiled out, it is only here as a build time check
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index fb0fe4c66b84a..f5c4f1d72a885 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -72,17 +72,17 @@ static void trace_note(struct blk_trace *bt, pid_t pid, int action,
struct blk_io_trace *t;
struct ring_buffer_event *event = NULL;
struct trace_buffer *buffer = NULL;
- int pc = 0;
+ unsigned int trace_ctx = 0;
int cpu = smp_processor_id();
bool blk_tracer = blk_tracer_enabled;
ssize_t cgid_len = cgid ? sizeof(cgid) : 0;

if (blk_tracer) {
buffer = blk_tr->array_buffer.buffer;
- pc = preempt_count();
+ trace_ctx = _tracing_gen_ctx_flags(0);
event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
sizeof(*t) + len + cgid_len,
- 0, pc);
+ trace_ctx);
if (!event)
return;
t = ring_buffer_event_data(event);
@@ -107,7 +107,7 @@ static void trace_note(struct blk_trace *bt, pid_t pid, int action,
memcpy((void *) t + sizeof(*t) + cgid_len, data, len);

if (blk_tracer)
- trace_buffer_unlock_commit(blk_tr, buffer, event, 0, pc);
+ trace_buffer_unlock_commit(blk_tr, buffer, event, trace_ctx);
}
}

@@ -222,8 +222,9 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
struct blk_io_trace *t;
unsigned long flags = 0;
unsigned long *sequence;
+ unsigned int trace_ctx = 0;
pid_t pid;
- int cpu, pc = 0;
+ int cpu;
bool blk_tracer = blk_tracer_enabled;
ssize_t cgid_len = cgid ? sizeof(cgid) : 0;

@@ -252,10 +253,10 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
tracing_record_cmdline(current);

buffer = blk_tr->array_buffer.buffer;
- pc = preempt_count();
+ trace_ctx = _tracing_gen_ctx_flags(0);
event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
sizeof(*t) + pdu_len + cgid_len,
- 0, pc);
+ trace_ctx);
if (!event)
return;
t = ring_buffer_event_data(event);
@@ -301,7 +302,7 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
memcpy((void *)t + sizeof(*t) + cgid_len, pdu_data, pdu_len);

if (blk_tracer) {
- trace_buffer_unlock_commit(blk_tr, buffer, event, 0, pc);
+ trace_buffer_unlock_commit(blk_tr, buffer, event, trace_ctx);
return;
}
}
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index ea192b33ae3b2..bb0eea762fe33 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -165,7 +165,7 @@ static union trace_eval_map_item *trace_eval_maps;
int tracing_set_tracer(struct trace_array *tr, const char *buf);
static void ftrace_trace_userstack(struct trace_array *tr,
struct trace_buffer *buffer,
- unsigned long flags, int pc);
+ unsigned int trace_ctx);

#define MAX_TRACER_SIZE 100
static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
@@ -894,23 +894,23 @@ static inline void trace_access_lock_init(void)

#ifdef CONFIG_STACKTRACE
static void __ftrace_trace_stack(struct trace_buffer *buffer,
- unsigned long flags,
- int skip, int pc, struct pt_regs *regs);
+ unsigned int trace_ctx,
+ int skip, struct pt_regs *regs);
static inline void ftrace_trace_stack(struct trace_array *tr,
struct trace_buffer *buffer,
- unsigned long flags,
- int skip, int pc, struct pt_regs *regs);
+ unsigned int trace_ctx,
+ int skip, struct pt_regs *regs);

#else
static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
- unsigned long flags,
- int skip, int pc, struct pt_regs *regs)
+ unsigned int trace_ctx,
+ int skip, struct pt_regs *regs)
{
}
static inline void ftrace_trace_stack(struct trace_array *tr,
struct trace_buffer *buffer,
- unsigned long flags,
- int skip, int pc, struct pt_regs *regs)
+ unsigned long trace_ctx,
+ int skip, struct pt_regs *regs)
{
}

@@ -918,24 +918,24 @@ static inline void ftrace_trace_stack(struct trace_array *tr,

static __always_inline void
trace_event_setup(struct ring_buffer_event *event,
- int type, unsigned long flags, int pc)
+ int type, unsigned int trace_ctx)
{
struct trace_entry *ent = ring_buffer_event_data(event);

- tracing_generic_entry_update(ent, type, flags, pc);
+ tracing_generic_entry_update(ent, type, trace_ctx);
}

static __always_inline struct ring_buffer_event *
__trace_buffer_lock_reserve(struct trace_buffer *buffer,
int type,
unsigned long len,
- unsigned long flags, int pc)
+ unsigned int trace_ctx)
{
struct ring_buffer_event *event;

event = ring_buffer_lock_reserve(buffer, len);
if (event != NULL)
- trace_event_setup(event, type, flags, pc);
+ trace_event_setup(event, type, trace_ctx);

return event;
}
@@ -996,7 +996,7 @@ int __trace_puts(unsigned long ip, const char *str, int size)
struct ring_buffer_event *event;
struct trace_buffer *buffer;
struct print_entry *entry;
- unsigned long irq_flags;
+ unsigned int trace_ctx;
int alloc;
int pc;

@@ -1010,11 +1010,11 @@ int __trace_puts(unsigned long ip, const char *str, int size)

alloc = sizeof(*entry) + size + 2; /* possible \n added */

- local_save_flags(irq_flags);
+ trace_ctx = tracing_gen_ctx_flags();
buffer = global_trace.array_buffer.buffer;
ring_buffer_nest_start(buffer);
- event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
- irq_flags, pc);
+ event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
+ trace_ctx);
if (!event) {
size = 0;
goto out;
@@ -1033,7 +1033,7 @@ int __trace_puts(unsigned long ip, const char *str, int size)
entry->buf[size] = '\0';

__buffer_unlock_commit(buffer, event);
- ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
+ ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
out:
ring_buffer_nest_end(buffer);
return size;
@@ -1050,7 +1050,7 @@ int __trace_bputs(unsigned long ip, const char *str)
struct ring_buffer_event *event;
struct trace_buffer *buffer;
struct bputs_entry *entry;
- unsigned long irq_flags;
+ unsigned int trace_ctx;
int size = sizeof(struct bputs_entry);
int ret = 0;
int pc;
@@ -1063,12 +1063,12 @@ int __trace_bputs(unsigned long ip, const char *str)
if (unlikely(tracing_selftest_running || tracing_disabled))
return 0;

- local_save_flags(irq_flags);
+ trace_ctx = tracing_gen_ctx_flags();
buffer = global_trace.array_buffer.buffer;

ring_buffer_nest_start(buffer);
event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
- irq_flags, pc);
+ trace_ctx);
if (!event)
goto out;

@@ -1077,7 +1077,7 @@ int __trace_bputs(unsigned long ip, const char *str)
entry->str = str;

__buffer_unlock_commit(buffer, event);
- ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
+ ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);

ret = 1;
out:
@@ -2577,36 +2577,78 @@ enum print_line_t trace_handle_return(struct trace_seq *s)
}
EXPORT_SYMBOL_GPL(trace_handle_return);

-void
-tracing_generic_entry_update(struct trace_entry *entry, unsigned short type,
- unsigned long flags, int pc)
+static unsigned int __tracing_gen_ctx_flags(unsigned long irqflags)
{
- struct task_struct *tsk = current;
+ unsigned int trace_flags = 0;
+ unsigned int pc;
+
+ pc = preempt_count();

- entry->preempt_count = pc & 0xff;
- entry->pid = (tsk) ? tsk->pid : 0;
- entry->type = type;
- entry->flags =
#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
- (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
+ if (irqs_disabled_flags(irqflags))
+ trace_flags |= TRACE_FLAG_IRQS_OFF;
#else
- TRACE_FLAG_IRQS_NOSUPPORT |
+ trace_flags |= TRACE_FLAG_IRQS_NOSUPPORT;
#endif
- ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
- ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
- ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
- (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
- (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
+
+ if (pc & NMI_MASK)
+ trace_flags |= TRACE_FLAG_NMI;
+ if (pc & HARDIRQ_MASK)
+ trace_flags |= TRACE_FLAG_HARDIRQ;
+
+ if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
+ if (in_serving_softirq())
+ trace_flags |= TRACE_FLAG_SOFTIRQ;
+ } else {
+ if (pc & SOFTIRQ_OFFSET)
+ trace_flags |= TRACE_FLAG_SOFTIRQ;
+ }
+ if (tif_need_resched())
+ trace_flags |= TRACE_FLAG_NEED_RESCHED;
+ if (test_preempt_need_resched())
+ trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
+ return (trace_flags << 16) | (pc & 0xff);
+}
+
+unsigned int _tracing_gen_ctx_flags(unsigned long irqflags)
+{
+ return __tracing_gen_ctx_flags(irqflags);
+}
+
+unsigned int tracing_gen_ctx_flags(void)
+{
+ unsigned long irqflags;
+
+#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
+ local_save_flags(irqflags);
+#else
+ irqflags = 0;
+#endif
+ return _tracing_gen_ctx_flags(irqflags);
+}
+
+unsigned int tracing_gen_ctx_flags_dect(void)
+{
+ unsigned int trace_ctx;
+
+ trace_ctx = tracing_gen_ctx_flags();
+
+ /*
+ * Subtract one from the preeption counter if preemption is enabled,
+ * see trace_event_buffer_reserve()for details.
+ */
+ if (IS_ENABLED(CONFIG_PREEMPTION))
+ trace_ctx--;
+ return trace_ctx;
}
-EXPORT_SYMBOL_GPL(tracing_generic_entry_update);

struct ring_buffer_event *
trace_buffer_lock_reserve(struct trace_buffer *buffer,
int type,
unsigned long len,
- unsigned long flags, int pc)
+ unsigned int trace_ctx)
{
- return __trace_buffer_lock_reserve(buffer, type, len, flags, pc);
+ return __trace_buffer_lock_reserve(buffer, type, len, trace_ctx);
}

DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
@@ -2726,7 +2768,7 @@ struct ring_buffer_event *
trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
struct trace_event_file *trace_file,
int type, unsigned long len,
- unsigned long flags, int pc)
+ unsigned int trace_ctx)
{
struct ring_buffer_event *entry;
int val;
@@ -2739,15 +2781,15 @@ trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
/* Try to use the per cpu buffer first */
val = this_cpu_inc_return(trace_buffered_event_cnt);
if (val == 1) {
- trace_event_setup(entry, type, flags, pc);
+ trace_event_setup(entry, type, trace_ctx);
entry->array[0] = len;
return entry;
}
this_cpu_dec(trace_buffered_event_cnt);
}

- entry = __trace_buffer_lock_reserve(*current_rb,
- type, len, flags, pc);
+ entry = __trace_buffer_lock_reserve(*current_rb, type, len,
+ trace_ctx);
/*
* If tracing is off, but we have triggers enabled
* we still need to look at the event data. Use the temp_buffer
@@ -2756,8 +2798,8 @@ trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
*/
if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
*current_rb = temp_buffer;
- entry = __trace_buffer_lock_reserve(*current_rb,
- type, len, flags, pc);
+ entry = __trace_buffer_lock_reserve(*current_rb, type, len,
+ trace_ctx);
}
return entry;
}
@@ -2834,7 +2876,7 @@ int tracepoint_printk_sysctl(struct ctl_table *table, int write,
return ret;
}

-void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
+void trace_event_buffer_commit__(struct trace_event_buffer *fbuffer)
{
if (static_key_false(&tracepoint_printk_key.key))
output_printk(fbuffer);
@@ -2843,9 +2885,9 @@ void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
event_trigger_unlock_commit_regs(fbuffer->trace_file, fbuffer->buffer,
fbuffer->event, fbuffer->entry,
- fbuffer->flags, fbuffer->pc, fbuffer->regs);
+ fbuffer->trace_ctx, fbuffer->regs);
}
-EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
+EXPORT_SYMBOL_GPL(trace_event_buffer_commit__);

/*
* Skip 3:
@@ -2859,7 +2901,7 @@ EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
void trace_buffer_unlock_commit_regs(struct trace_array *tr,
struct trace_buffer *buffer,
struct ring_buffer_event *event,
- unsigned long flags, int pc,
+ unsigned int trace_ctx,
struct pt_regs *regs)
{
__buffer_unlock_commit(buffer, event);
@@ -2870,8 +2912,8 @@ void trace_buffer_unlock_commit_regs(struct trace_array *tr,
* and mmiotrace, but that's ok if they lose a function or
* two. They are not that meaningful.
*/
- ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs);
- ftrace_trace_userstack(tr, buffer, flags, pc);
+ ftrace_trace_stack(tr, buffer, trace_ctx, regs ? 0 : STACK_SKIP, regs);
+ ftrace_trace_userstack(tr, buffer, trace_ctx);
}

/*
@@ -2885,9 +2927,8 @@ trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
}

void
-trace_function(struct trace_array *tr,
- unsigned long ip, unsigned long parent_ip, unsigned long flags,
- int pc)
+trace_function(struct trace_array *tr, unsigned long ip, unsigned long
+ parent_ip, unsigned int trace_ctx)
{
struct trace_event_call *call = &event_function;
struct trace_buffer *buffer = tr->array_buffer.buffer;
@@ -2895,7 +2936,7 @@ trace_function(struct trace_array *tr,
struct ftrace_entry *entry;

event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
- flags, pc);
+ trace_ctx);
if (!event)
return;
entry = ring_buffer_event_data(event);
@@ -2929,8 +2970,8 @@ static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
static DEFINE_PER_CPU(int, ftrace_stack_reserve);

static void __ftrace_trace_stack(struct trace_buffer *buffer,
- unsigned long flags,
- int skip, int pc, struct pt_regs *regs)
+ unsigned int trace_ctx,
+ int skip, struct pt_regs *regs)
{
struct trace_event_call *call = &event_kernel_stack;
struct ring_buffer_event *event;
@@ -2977,7 +3018,7 @@ static void __ftrace_trace_stack(struct trace_buffer *buffer,

size = nr_entries * sizeof(unsigned long);
event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
- sizeof(*entry) + size, flags, pc);
+ sizeof(*entry) + size, trace_ctx);
if (!event)
goto out;
entry = ring_buffer_event_data(event);
@@ -2998,22 +3039,22 @@ static void __ftrace_trace_stack(struct trace_buffer *buffer,

static inline void ftrace_trace_stack(struct trace_array *tr,
struct trace_buffer *buffer,
- unsigned long flags,
- int skip, int pc, struct pt_regs *regs)
+ unsigned int trace_ctx,
+ int skip, struct pt_regs *regs)
{
if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
return;

- __ftrace_trace_stack(buffer, flags, skip, pc, regs);
+ __ftrace_trace_stack(buffer, trace_ctx, skip, regs);
}

-void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
- int pc)
+void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
+ int skip)
{
struct trace_buffer *buffer = tr->array_buffer.buffer;

if (rcu_is_watching()) {
- __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
+ __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
return;
}

@@ -3027,7 +3068,7 @@ void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
return;

rcu_irq_enter_irqson();
- __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
+ __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
rcu_irq_exit_irqson();
}

@@ -3049,7 +3090,7 @@ void trace_dump_stack(int skip)
skip++;
#endif
__ftrace_trace_stack(global_trace.array_buffer.buffer,
- flags, skip, preempt_count(), NULL);
+ tracing_gen_ctx_flags(), skip, NULL);
}
EXPORT_SYMBOL_GPL(trace_dump_stack);

@@ -3058,7 +3099,7 @@ static DEFINE_PER_CPU(int, user_stack_count);

static void
ftrace_trace_userstack(struct trace_array *tr,
- struct trace_buffer *buffer, unsigned long flags, int pc)
+ struct trace_buffer *buffer, unsigned int trace_ctx)
{
struct trace_event_call *call = &event_user_stack;
struct ring_buffer_event *event;
@@ -3085,7 +3126,7 @@ ftrace_trace_userstack(struct trace_array *tr,
__this_cpu_inc(user_stack_count);

event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
- sizeof(*entry), flags, pc);
+ sizeof(*entry), trace_ctx);
if (!event)
goto out_drop_count;
entry = ring_buffer_event_data(event);
@@ -3235,9 +3276,9 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
struct trace_buffer *buffer;
struct trace_array *tr = &global_trace;
struct bprint_entry *entry;
- unsigned long flags;
+ unsigned int trace_ctx;
char *tbuffer;
- int len = 0, size, pc;
+ int len = 0, size;

if (unlikely(tracing_selftest_running || tracing_disabled))
return 0;
@@ -3245,7 +3286,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
/* Don't pollute graph traces with trace_vprintk internals */
pause_graph_tracing();

- pc = preempt_count();
+ trace_ctx = tracing_gen_ctx_flags();
preempt_disable_notrace();

tbuffer = get_trace_buf();
@@ -3259,12 +3300,11 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
goto out_put;

- local_save_flags(flags);
size = sizeof(*entry) + sizeof(u32) * len;
buffer = tr->array_buffer.buffer;
ring_buffer_nest_start(buffer);
event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
- flags, pc);
+ trace_ctx);
if (!event)
goto out;
entry = ring_buffer_event_data(event);
@@ -3274,7 +3314,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
memcpy(entry->buf, tbuffer, sizeof(u32) * len);
if (!call_filter_check_discard(call, entry, buffer, event)) {
__buffer_unlock_commit(buffer, event);
- ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
+ ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL);
}

out:
@@ -3297,9 +3337,9 @@ __trace_array_vprintk(struct trace_buffer *buffer,
{
struct trace_event_call *call = &event_print;
struct ring_buffer_event *event;
- int len = 0, size, pc;
+ int len = 0, size;
struct print_entry *entry;
- unsigned long flags;
+ unsigned int trace_ctx;
char *tbuffer;

if (tracing_disabled || tracing_selftest_running)
@@ -3308,7 +3348,7 @@ __trace_array_vprintk(struct trace_buffer *buffer,
/* Don't pollute graph traces with trace_vprintk internals */
pause_graph_tracing();

- pc = preempt_count();
+ trace_ctx = tracing_gen_ctx_flags();
preempt_disable_notrace();


@@ -3320,11 +3360,10 @@ __trace_array_vprintk(struct trace_buffer *buffer,

len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);

- local_save_flags(flags);
size = sizeof(*entry) + len + 1;
ring_buffer_nest_start(buffer);
event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
- flags, pc);
+ trace_ctx);
if (!event)
goto out;
entry = ring_buffer_event_data(event);
@@ -3333,7 +3372,7 @@ __trace_array_vprintk(struct trace_buffer *buffer,
memcpy(&entry->buf, tbuffer, len + 1);
if (!call_filter_check_discard(call, entry, buffer, event)) {
__buffer_unlock_commit(buffer, event);
- ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
+ ftrace_trace_stack(&global_trace, buffer, trace_ctx, 6, NULL);
}

out:
@@ -6646,7 +6685,6 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
enum event_trigger_type tt = ETT_NONE;
struct trace_buffer *buffer;
struct print_entry *entry;
- unsigned long irq_flags;
ssize_t written;
int size;
int len;
@@ -6666,7 +6704,6 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,

BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);

- local_save_flags(irq_flags);
size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */

/* If less than "<faulted>", then make sure we can still add that */
@@ -6675,7 +6712,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,

buffer = tr->array_buffer.buffer;
event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
- irq_flags, preempt_count());
+ tracing_gen_ctx_flags());
if (unlikely(!event))
/* Ring buffer disabled, return as if not open for write */
return -EBADF;
@@ -6727,7 +6764,6 @@ tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
struct ring_buffer_event *event;
struct trace_buffer *buffer;
struct raw_data_entry *entry;
- unsigned long irq_flags;
ssize_t written;
int size;
int len;
@@ -6749,14 +6785,13 @@ tracing_mark_raw_write(struct file *filp, const char __user *ubuf,

BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);

- local_save_flags(irq_flags);
size = sizeof(*entry) + cnt;
if (cnt < FAULT_SIZE_ID)
size += FAULT_SIZE_ID - cnt;

buffer = tr->array_buffer.buffer;
event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
- irq_flags, preempt_count());
+ tracing_gen_ctx_flags());
if (!event)
/* Ring buffer disabled, return as if not open for write */
return -EBADF;
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 9462251cab92b..bbbb83fc72ed2 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -589,8 +589,7 @@ struct ring_buffer_event *
trace_buffer_lock_reserve(struct trace_buffer *buffer,
int type,
unsigned long len,
- unsigned long flags,
- int pc);
+ unsigned int trace_ctx);

struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
struct trace_array_cpu *data);
@@ -615,11 +614,11 @@ unsigned long trace_total_entries(struct trace_array *tr);
void trace_function(struct trace_array *tr,
unsigned long ip,
unsigned long parent_ip,
- unsigned long flags, int pc);
+ unsigned int trace_ctx);
void trace_graph_function(struct trace_array *tr,
unsigned long ip,
unsigned long parent_ip,
- unsigned long flags, int pc);
+ unsigned int trace_ctx);
void trace_latency_header(struct seq_file *m);
void trace_default_header(struct seq_file *m);
void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
@@ -687,11 +686,10 @@ static inline void latency_fsnotify(struct trace_array *tr) { }
#endif

#ifdef CONFIG_STACKTRACE
-void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
- int pc);
+void __trace_stack(struct trace_array *tr, unsigned int trace_ctx, int skip);
#else
-static inline void __trace_stack(struct trace_array *tr, unsigned long flags,
- int skip, int pc)
+static inline void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
+ int skip)
{
}
#endif /* CONFIG_STACKTRACE */
@@ -826,10 +824,10 @@ extern void graph_trace_open(struct trace_iterator *iter);
extern void graph_trace_close(struct trace_iterator *iter);
extern int __trace_graph_entry(struct trace_array *tr,
struct ftrace_graph_ent *trace,
- unsigned long flags, int pc);
+ unsigned int trace_ctx);
extern void __trace_graph_return(struct trace_array *tr,
struct ftrace_graph_ret *trace,
- unsigned long flags, int pc);
+ unsigned int trace_ctx);

#ifdef CONFIG_DYNAMIC_FTRACE
extern struct ftrace_hash __rcu *ftrace_graph_hash;
@@ -1292,15 +1290,15 @@ extern int call_filter_check_discard(struct trace_event_call *call, void *rec,
void trace_buffer_unlock_commit_regs(struct trace_array *tr,
struct trace_buffer *buffer,
struct ring_buffer_event *event,
- unsigned long flags, int pc,
+ unsigned int trcace_ctx,
struct pt_regs *regs);

static inline void trace_buffer_unlock_commit(struct trace_array *tr,
struct trace_buffer *buffer,
struct ring_buffer_event *event,
- unsigned long flags, int pc)
+ unsigned int trace_ctx)
{
- trace_buffer_unlock_commit_regs(tr, buffer, event, flags, pc, NULL);
+ trace_buffer_unlock_commit_regs(tr, buffer, event, trace_ctx, NULL);
}

DECLARE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
@@ -1361,8 +1359,7 @@ __event_trigger_test_discard(struct trace_event_file *file,
* @buffer: The ring buffer that the event is being written to
* @event: The event meta data in the ring buffer
* @entry: The event itself
- * @irq_flags: The state of the interrupts at the start of the event
- * @pc: The state of the preempt count at the start of the event.
+ * @trace_ctx: The tracing context flags.
*
* This is a helper function to handle triggers that require data
* from the event itself. It also tests the event against filters and
@@ -1372,12 +1369,12 @@ static inline void
event_trigger_unlock_commit(struct trace_event_file *file,
struct trace_buffer *buffer,
struct ring_buffer_event *event,
- void *entry, unsigned long irq_flags, int pc)
+ void *entry, unsigned int trace_ctx)
{
enum event_trigger_type tt = ETT_NONE;

if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
- trace_buffer_unlock_commit(file->tr, buffer, event, irq_flags, pc);
+ trace_buffer_unlock_commit(file->tr, buffer, event, trace_ctx);

if (tt)
event_triggers_post_call(file, tt);
@@ -1389,8 +1386,7 @@ event_trigger_unlock_commit(struct trace_event_file *file,
* @buffer: The ring buffer that the event is being written to
* @event: The event meta data in the ring buffer
* @entry: The event itself
- * @irq_flags: The state of the interrupts at the start of the event
- * @pc: The state of the preempt count at the start of the event.
+ * @trace_ctx: The tracing context flags.
*
* This is a helper function to handle triggers that require data
* from the event itself. It also tests the event against filters and
@@ -1403,14 +1399,14 @@ static inline void
event_trigger_unlock_commit_regs(struct trace_event_file *file,
struct trace_buffer *buffer,
struct ring_buffer_event *event,
- void *entry, unsigned long irq_flags, int pc,
+ void *entry, unsigned int trace_ctx,
struct pt_regs *regs)
{
enum event_trigger_type tt = ETT_NONE;

if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
trace_buffer_unlock_commit_regs(file->tr, buffer, event,
- irq_flags, pc, regs);
+ trace_ctx, regs);

if (tt)
event_triggers_post_call(file, tt);
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
index a711816559580..5e64b06ab5189 100644
--- a/kernel/trace/trace_event_perf.c
+++ b/kernel/trace/trace_event_perf.c
@@ -421,11 +421,8 @@ NOKPROBE_SYMBOL(perf_trace_buf_alloc);
void perf_trace_buf_update(void *record, u16 type)
{
struct trace_entry *entry = record;
- int pc = preempt_count();
- unsigned long flags;

- local_save_flags(flags);
- tracing_generic_entry_update(entry, type, flags, pc);
+ tracing_generic_entry_update(entry, type, tracing_gen_ctx_flags());
}
NOKPROBE_SYMBOL(perf_trace_buf_update);

diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index bd5d38ef2e920..f1dc63c1c2395 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -258,22 +258,19 @@ void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
trace_event_ignore_this_pid(trace_file))
return NULL;

- local_save_flags(fbuffer->flags);
- fbuffer->pc = preempt_count();
/*
* If CONFIG_PREEMPTION is enabled, then the tracepoint itself disables
* preemption (adding one to the preempt_count). Since we are
* interested in the preempt_count at the time the tracepoint was
* hit, we need to subtract one to offset the increment.
*/
- if (IS_ENABLED(CONFIG_PREEMPTION))
- fbuffer->pc--;
+ fbuffer->trace_ctx = tracing_gen_ctx_flags_dect();
fbuffer->trace_file = trace_file;

fbuffer->event =
trace_event_buffer_lock_reserve(&fbuffer->buffer, trace_file,
event_call->event.type, len,
- fbuffer->flags, fbuffer->pc);
+ fbuffer->trace_ctx);
if (!fbuffer->event)
return NULL;

@@ -3679,11 +3676,11 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip,
struct ring_buffer_event *event;
struct ftrace_entry *entry;
unsigned long flags;
+ unsigned int trace_ctx;
long disabled;
int cpu;
- int pc;

- pc = preempt_count();
+ trace_ctx = tracing_gen_ctx_flags();
preempt_disable_notrace();
cpu = raw_smp_processor_id();
disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
@@ -3695,7 +3692,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip,

event = trace_event_buffer_lock_reserve(&buffer, &event_trace_file,
TRACE_FN, sizeof(*entry),
- flags, pc);
+ trace_ctx);
if (!event)
goto out;
entry = ring_buffer_event_data(event);
@@ -3703,7 +3700,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip,
entry->parent_ip = parent_ip;

event_trigger_unlock_commit(&event_trace_file, buffer, event,
- entry, flags, pc);
+ entry, trace_ctx);
out:
atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
preempt_enable_notrace();
diff --git a/kernel/trace/trace_events_inject.c b/kernel/trace/trace_events_inject.c
index 22bcf7c51d1ee..a5465a93fc149 100644
--- a/kernel/trace/trace_events_inject.c
+++ b/kernel/trace/trace_events_inject.c
@@ -25,7 +25,7 @@ trace_inject_entry(struct trace_event_file *file, void *rec, int len)
if (entry) {
memcpy(entry, rec, len);
written = len;
- trace_event_buffer_commit(&fbuffer);
+ trace_event_buffer_commit__(&fbuffer);
}
rcu_read_unlock_sched();

@@ -192,7 +192,6 @@ static void *trace_alloc_entry(struct trace_event_call *call, int *size)
static int parse_entry(char *str, struct trace_event_call *call, void **pentry)
{
struct ftrace_event_field *field;
- unsigned long irq_flags;
void *entry = NULL;
int entry_size;
u64 val = 0;
@@ -203,9 +202,8 @@ static int parse_entry(char *str, struct trace_event_call *call, void **pentry)
if (!entry)
return -ENOMEM;

- local_save_flags(irq_flags);
- tracing_generic_entry_update(entry, call->event.type, irq_flags,
- preempt_count());
+ tracing_generic_entry_update(entry, call->event.type,
+ tracing_gen_ctx_flags());

while ((len = parse_field(str, call, &field, &val)) > 0) {
if (is_function_field(field))
diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c
index 5a8bc0b421f10..ed54d57027a07 100644
--- a/kernel/trace/trace_events_synth.c
+++ b/kernel/trace/trace_events_synth.c
@@ -504,7 +504,7 @@ static notrace void trace_event_raw_event_synth(void *__data,
}
}

- trace_event_buffer_commit(&fbuffer);
+ trace_event_buffer_commit__(&fbuffer);
out:
ring_buffer_nest_end(buffer);
}
@@ -1494,7 +1494,7 @@ __synth_event_trace_start(struct trace_event_file *file,
static inline void
__synth_event_trace_end(struct synth_event_trace_state *trace_state)
{
- trace_event_buffer_commit(&trace_state->fbuffer);
+ trace_event_buffer_commit__(&trace_state->fbuffer);

ring_buffer_nest_end(trace_state->buffer);
}
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index c5095dd28e20c..a09be095f0594 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -132,10 +132,9 @@ function_trace_call(unsigned long ip, unsigned long parent_ip,
{
struct trace_array *tr = op->private;
struct trace_array_cpu *data;
- unsigned long flags;
+ unsigned int trace_ctx;
int bit;
int cpu;
- int pc;

if (unlikely(!tr->function_enabled))
return;
@@ -144,15 +143,14 @@ function_trace_call(unsigned long ip, unsigned long parent_ip,
if (bit < 0)
return;

- pc = preempt_count();
+ trace_ctx = tracing_gen_ctx_flags();
preempt_disable_notrace();

cpu = smp_processor_id();
data = per_cpu_ptr(tr->array_buffer.data, cpu);
- if (!atomic_read(&data->disabled)) {
- local_save_flags(flags);
- trace_function(tr, ip, parent_ip, flags, pc);
- }
+ if (!atomic_read(&data->disabled))
+ trace_function(tr, ip, parent_ip, trace_ctx);
+
ftrace_test_recursion_unlock(bit);
preempt_enable_notrace();
}
@@ -184,7 +182,7 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
unsigned long flags;
long disabled;
int cpu;
- int pc;
+ int trace_ctx;

if (unlikely(!tr->function_enabled))
return;
@@ -199,9 +197,9 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
disabled = atomic_inc_return(&data->disabled);

if (likely(disabled == 1)) {
- pc = preempt_count();
- trace_function(tr, ip, parent_ip, flags, pc);
- __trace_stack(tr, flags, STACK_SKIP, pc);
+ trace_ctx = tracing_gen_ctx_flags();
+ trace_function(tr, ip, parent_ip, trace_ctx);
+ __trace_stack(tr, trace_ctx, STACK_SKIP);
}

atomic_dec(&data->disabled);
@@ -405,12 +403,12 @@ ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
static __always_inline void trace_stack(struct trace_array *tr)
{
unsigned long flags;
- int pc;
+ unsigned int trace_ctx;

local_save_flags(flags);
- pc = preempt_count();
+ trace_ctx = tracing_gen_ctx_flags();

- __trace_stack(tr, flags, FTRACE_STACK_SKIP, pc);
+ __trace_stack(tr, trace_ctx, FTRACE_STACK_SKIP);
}

static void
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index d874dec87131a..83d6be491d468 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -96,8 +96,7 @@ print_graph_duration(struct trace_array *tr, unsigned long long duration,

int __trace_graph_entry(struct trace_array *tr,
struct ftrace_graph_ent *trace,
- unsigned long flags,
- int pc)
+ unsigned int trace_ctx)
{
struct trace_event_call *call = &event_funcgraph_entry;
struct ring_buffer_event *event;
@@ -105,7 +104,7 @@ int __trace_graph_entry(struct trace_array *tr,
struct ftrace_graph_ent_entry *entry;

event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
- sizeof(*entry), flags, pc);
+ sizeof(*entry), trace_ctx);
if (!event)
return 0;
entry = ring_buffer_event_data(event);
@@ -129,10 +128,10 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
struct trace_array *tr = graph_array;
struct trace_array_cpu *data;
unsigned long flags;
+ unsigned int trace_ctx;
long disabled;
int ret;
int cpu;
- int pc;

if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT))
return 0;
@@ -174,8 +173,8 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
data = per_cpu_ptr(tr->array_buffer.data, cpu);
disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1)) {
- pc = preempt_count();
- ret = __trace_graph_entry(tr, trace, flags, pc);
+ trace_ctx = _tracing_gen_ctx_flags(flags);
+ ret = __trace_graph_entry(tr, trace, trace_ctx);
} else {
ret = 0;
}
@@ -188,7 +187,7 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)

static void
__trace_graph_function(struct trace_array *tr,
- unsigned long ip, unsigned long flags, int pc)
+ unsigned long ip, unsigned int trace_ctx)
{
u64 time = trace_clock_local();
struct ftrace_graph_ent ent = {
@@ -202,22 +201,21 @@ __trace_graph_function(struct trace_array *tr,
.rettime = time,
};

- __trace_graph_entry(tr, &ent, flags, pc);
- __trace_graph_return(tr, &ret, flags, pc);
+ __trace_graph_entry(tr, &ent, trace_ctx);
+ __trace_graph_return(tr, &ret, trace_ctx);
}

void
trace_graph_function(struct trace_array *tr,
unsigned long ip, unsigned long parent_ip,
- unsigned long flags, int pc)
+ unsigned int trace_ctx)
{
- __trace_graph_function(tr, ip, flags, pc);
+ __trace_graph_function(tr, ip, trace_ctx);
}

void __trace_graph_return(struct trace_array *tr,
struct ftrace_graph_ret *trace,
- unsigned long flags,
- int pc)
+ unsigned int trace_ctx)
{
struct trace_event_call *call = &event_funcgraph_exit;
struct ring_buffer_event *event;
@@ -225,7 +223,7 @@ void __trace_graph_return(struct trace_array *tr,
struct ftrace_graph_ret_entry *entry;

event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
- sizeof(*entry), flags, pc);
+ sizeof(*entry), trace_ctx);
if (!event)
return;
entry = ring_buffer_event_data(event);
@@ -239,9 +237,9 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
struct trace_array *tr = graph_array;
struct trace_array_cpu *data;
unsigned long flags;
+ unsigned int trace_ctx;
long disabled;
int cpu;
- int pc;

ftrace_graph_addr_finish(trace);

@@ -255,8 +253,8 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
data = per_cpu_ptr(tr->array_buffer.data, cpu);
disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1)) {
- pc = preempt_count();
- __trace_graph_return(tr, trace, flags, pc);
+ trace_ctx = _tracing_gen_ctx_flags(flags);
+ __trace_graph_return(tr, trace, trace_ctx);
}
atomic_dec(&data->disabled);
local_irq_restore(flags);
diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c
index c0df9b97f147d..375e7635baf67 100644
--- a/kernel/trace/trace_hwlat.c
+++ b/kernel/trace/trace_hwlat.c
@@ -108,14 +108,9 @@ static void trace_hwlat_sample(struct hwlat_sample *sample)
struct trace_buffer *buffer = tr->array_buffer.buffer;
struct ring_buffer_event *event;
struct hwlat_entry *entry;
- unsigned long flags;
- int pc;
-
- pc = preempt_count();
- local_save_flags(flags);

event = trace_buffer_lock_reserve(buffer, TRACE_HWLAT, sizeof(*entry),
- flags, pc);
+ tracing_gen_ctx_flags());
if (!event)
return;
entry = ring_buffer_event_data(event);
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index d06aab4dcbb8f..2f8dce047f049 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -143,11 +143,14 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
struct trace_array *tr = irqsoff_trace;
struct trace_array_cpu *data;
unsigned long flags;
+ unsigned int trace_ctx;

if (!func_prolog_dec(tr, &data, &flags))
return;

- trace_function(tr, ip, parent_ip, flags, preempt_count());
+ trace_ctx = _tracing_gen_ctx_flags(flags);
+
+ trace_function(tr, ip, parent_ip, trace_ctx);

atomic_dec(&data->disabled);
}
@@ -177,8 +180,8 @@ static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
struct trace_array *tr = irqsoff_trace;
struct trace_array_cpu *data;
unsigned long flags;
+ unsigned int trace_ctx;
int ret;
- int pc;

if (ftrace_graph_ignore_func(trace))
return 0;
@@ -195,8 +198,8 @@ static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
if (!func_prolog_dec(tr, &data, &flags))
return 0;

- pc = preempt_count();
- ret = __trace_graph_entry(tr, trace, flags, pc);
+ trace_ctx = _tracing_gen_ctx_flags(flags);
+ ret = __trace_graph_entry(tr, trace, trace_ctx);
atomic_dec(&data->disabled);

return ret;
@@ -207,15 +210,15 @@ static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
struct trace_array *tr = irqsoff_trace;
struct trace_array_cpu *data;
unsigned long flags;
- int pc;
+ unsigned int trace_ctx;

ftrace_graph_addr_finish(trace);

if (!func_prolog_dec(tr, &data, &flags))
return;

- pc = preempt_count();
- __trace_graph_return(tr, trace, flags, pc);
+ trace_ctx = _tracing_gen_ctx_flags(flags);
+ __trace_graph_return(tr, trace, trace_ctx);
atomic_dec(&data->disabled);
}

@@ -267,12 +270,12 @@ static void irqsoff_print_header(struct seq_file *s)
static void
__trace_function(struct trace_array *tr,
unsigned long ip, unsigned long parent_ip,
- unsigned long flags, int pc)
+ unsigned int trace_ctx)
{
if (is_graph(tr))
- trace_graph_function(tr, ip, parent_ip, flags, pc);
+ trace_graph_function(tr, ip, parent_ip, trace_ctx);
else
- trace_function(tr, ip, parent_ip, flags, pc);
+ trace_function(tr, ip, parent_ip, trace_ctx);
}

#else
@@ -322,15 +325,13 @@ check_critical_timing(struct trace_array *tr,
{
u64 T0, T1, delta;
unsigned long flags;
- int pc;
+ unsigned int trace_ctx;

T0 = data->preempt_timestamp;
T1 = ftrace_now(cpu);
delta = T1-T0;

- local_save_flags(flags);
-
- pc = preempt_count();
+ trace_ctx = tracing_gen_ctx_flags();

if (!report_latency(tr, delta))
goto out;
@@ -341,9 +342,9 @@ check_critical_timing(struct trace_array *tr,
if (!report_latency(tr, delta))
goto out_unlock;

- __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
+ __trace_function(tr, CALLER_ADDR0, parent_ip, trace_ctx);
/* Skip 5 functions to get to the irq/preempt enable function */
- __trace_stack(tr, flags, 5, pc);
+ __trace_stack(tr, trace_ctx, 5);

if (data->critical_sequence != max_sequence)
goto out_unlock;
@@ -363,16 +364,15 @@ check_critical_timing(struct trace_array *tr,
out:
data->critical_sequence = max_sequence;
data->preempt_timestamp = ftrace_now(cpu);
- __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
+ __trace_function(tr, CALLER_ADDR0, parent_ip, trace_ctx);
}

static nokprobe_inline void
-start_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
+start_critical_timing(unsigned long ip, unsigned long parent_ip)
{
int cpu;
struct trace_array *tr = irqsoff_trace;
struct trace_array_cpu *data;
- unsigned long flags;

if (!tracer_enabled || !tracing_is_enabled())
return;
@@ -393,9 +393,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
data->preempt_timestamp = ftrace_now(cpu);
data->critical_start = parent_ip ? : ip;

- local_save_flags(flags);
-
- __trace_function(tr, ip, parent_ip, flags, pc);
+ __trace_function(tr, ip, parent_ip, tracing_gen_ctx_flags());

per_cpu(tracing_cpu, cpu) = 1;

@@ -403,12 +401,12 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
}

static nokprobe_inline void
-stop_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
+stop_critical_timing(unsigned long ip, unsigned long parent_ip)
{
int cpu;
struct trace_array *tr = irqsoff_trace;
struct trace_array_cpu *data;
- unsigned long flags;
+ unsigned int trace_ctx;

cpu = raw_smp_processor_id();
/* Always clear the tracing cpu on stopping the trace */
@@ -428,8 +426,8 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)

atomic_inc(&data->disabled);

- local_save_flags(flags);
- __trace_function(tr, ip, parent_ip, flags, pc);
+ trace_ctx = tracing_gen_ctx_flags();
+ __trace_function(tr, ip, parent_ip, trace_ctx);
check_critical_timing(tr, data, parent_ip ? : ip, cpu);
data->critical_start = 0;
atomic_dec(&data->disabled);
@@ -441,7 +439,7 @@ void start_critical_timings(void)
int pc = preempt_count();

if (preempt_trace(pc) || irq_trace())
- start_critical_timing(CALLER_ADDR0, CALLER_ADDR1, pc);
+ start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
}
EXPORT_SYMBOL_GPL(start_critical_timings);
NOKPROBE_SYMBOL(start_critical_timings);
@@ -451,7 +449,7 @@ void stop_critical_timings(void)
int pc = preempt_count();

if (preempt_trace(pc) || irq_trace())
- stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1, pc);
+ stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
}
EXPORT_SYMBOL_GPL(stop_critical_timings);
NOKPROBE_SYMBOL(stop_critical_timings);
@@ -612,7 +610,7 @@ void tracer_hardirqs_on(unsigned long a0, unsigned long a1)
unsigned int pc = preempt_count();

if (!preempt_trace(pc) && irq_trace())
- stop_critical_timing(a0, a1, pc);
+ stop_critical_timing(a0, a1);
}
NOKPROBE_SYMBOL(tracer_hardirqs_on);

@@ -621,7 +619,7 @@ void tracer_hardirqs_off(unsigned long a0, unsigned long a1)
unsigned int pc = preempt_count();

if (!preempt_trace(pc) && irq_trace())
- start_critical_timing(a0, a1, pc);
+ start_critical_timing(a0, a1);
}
NOKPROBE_SYMBOL(tracer_hardirqs_off);

@@ -664,7 +662,7 @@ void tracer_preempt_on(unsigned long a0, unsigned long a1)
int pc = preempt_count();

if (preempt_trace(pc) && !irq_trace())
- stop_critical_timing(a0, a1, pc);
+ stop_critical_timing(a0, a1);
}

void tracer_preempt_off(unsigned long a0, unsigned long a1)
@@ -672,7 +670,7 @@ void tracer_preempt_off(unsigned long a0, unsigned long a1)
int pc = preempt_count();

if (preempt_trace(pc) && !irq_trace())
- start_critical_timing(a0, a1, pc);
+ start_critical_timing(a0, a1);
}

static int preemptoff_tracer_init(struct trace_array *tr)
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 97c7a7782db7a..c146bc7b5237c 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -1383,8 +1383,7 @@ __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
if (trace_trigger_soft_disabled(trace_file))
return;

- local_save_flags(fbuffer.flags);
- fbuffer.pc = preempt_count();
+ fbuffer.trace_ctx = tracing_gen_ctx_flags();
fbuffer.trace_file = trace_file;

dsize = __get_data_size(&tk->tp, regs);
@@ -1393,7 +1392,7 @@ __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
trace_event_buffer_lock_reserve(&fbuffer.buffer, trace_file,
call->event.type,
sizeof(*entry) + tk->tp.size + dsize,
- fbuffer.flags, fbuffer.pc);
+ fbuffer.trace_ctx);
if (!fbuffer.event)
return;

@@ -1402,7 +1401,7 @@ __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
entry->ip = (unsigned long)tk->rp.kp.addr;
store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);

- trace_event_buffer_commit(&fbuffer);
+ trace_event_buffer_commit__(&fbuffer);
}

static void
@@ -1431,8 +1430,7 @@ __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
if (trace_trigger_soft_disabled(trace_file))
return;

- local_save_flags(fbuffer.flags);
- fbuffer.pc = preempt_count();
+ fbuffer.trace_ctx = tracing_gen_ctx_flags();
fbuffer.trace_file = trace_file;

dsize = __get_data_size(&tk->tp, regs);
@@ -1440,7 +1438,7 @@ __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
trace_event_buffer_lock_reserve(&fbuffer.buffer, trace_file,
call->event.type,
sizeof(*entry) + tk->tp.size + dsize,
- fbuffer.flags, fbuffer.pc);
+ fbuffer.trace_ctx);
if (!fbuffer.event)
return;

@@ -1450,7 +1448,7 @@ __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
entry->ret_ip = (unsigned long)ri->ret_addr;
store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);

- trace_event_buffer_commit(&fbuffer);
+ trace_event_buffer_commit__(&fbuffer);
}

static void
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
index 84582bf1ed5fb..5a7d37bf6888c 100644
--- a/kernel/trace/trace_mmiotrace.c
+++ b/kernel/trace/trace_mmiotrace.c
@@ -300,10 +300,11 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
struct trace_buffer *buffer = tr->array_buffer.buffer;
struct ring_buffer_event *event;
struct trace_mmiotrace_rw *entry;
- int pc = preempt_count();
+ unsigned int trace_ctx;

+ trace_ctx = _tracing_gen_ctx_flags(0);
event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
- sizeof(*entry), 0, pc);
+ sizeof(*entry), trace_ctx);
if (!event) {
atomic_inc(&dropped_count);
return;
@@ -312,7 +313,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
entry->rw = *rw;

if (!call_filter_check_discard(call, entry, buffer, event))
- trace_buffer_unlock_commit(tr, buffer, event, 0, pc);
+ trace_buffer_unlock_commit(tr, buffer, event, trace_ctx);
}

void mmio_trace_rw(struct mmiotrace_rw *rw)
@@ -330,10 +331,11 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
struct trace_buffer *buffer = tr->array_buffer.buffer;
struct ring_buffer_event *event;
struct trace_mmiotrace_map *entry;
- int pc = preempt_count();
+ unsigned int trace_ctx;

+ trace_ctx = _tracing_gen_ctx_flags(0);
event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
- sizeof(*entry), 0, pc);
+ sizeof(*entry), trace_ctx);
if (!event) {
atomic_inc(&dropped_count);
return;
@@ -342,7 +344,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
entry->map = *map;

if (!call_filter_check_discard(call, entry, buffer, event))
- trace_buffer_unlock_commit(tr, buffer, event, 0, pc);
+ trace_buffer_unlock_commit(tr, buffer, event, trace_ctx);
}

void mmio_trace_mapping(struct mmiotrace_map *map)
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index c0181066dbe90..034b9e71f8aa1 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -67,7 +67,7 @@ static bool function_enabled;
static int
func_prolog_preempt_disable(struct trace_array *tr,
struct trace_array_cpu **data,
- int *pc)
+ unsigned int *trace_ctx)
{
long disabled;
int cpu;
@@ -75,7 +75,7 @@ func_prolog_preempt_disable(struct trace_array *tr,
if (likely(!wakeup_task))
return 0;

- *pc = preempt_count();
+ *trace_ctx = tracing_gen_ctx_flags();
preempt_disable_notrace();

cpu = raw_smp_processor_id();
@@ -117,7 +117,8 @@ static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
struct trace_array *tr = wakeup_trace;
struct trace_array_cpu *data;
unsigned long flags;
- int pc, ret = 0;
+ unsigned int trace_ctx;
+ int ret = 0;

if (ftrace_graph_ignore_func(trace))
return 0;
@@ -131,11 +132,11 @@ static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
if (ftrace_graph_notrace_addr(trace->func))
return 1;

- if (!func_prolog_preempt_disable(tr, &data, &pc))
+ if (!func_prolog_preempt_disable(tr, &data, &trace_ctx))
return 0;

local_save_flags(flags);
- ret = __trace_graph_entry(tr, trace, flags, pc);
+ ret = __trace_graph_entry(tr, trace, trace_ctx);
atomic_dec(&data->disabled);
preempt_enable_notrace();

@@ -147,15 +148,15 @@ static void wakeup_graph_return(struct ftrace_graph_ret *trace)
struct trace_array *tr = wakeup_trace;
struct trace_array_cpu *data;
unsigned long flags;
- int pc;
+ unsigned int trace_ctx;

ftrace_graph_addr_finish(trace);

- if (!func_prolog_preempt_disable(tr, &data, &pc))
+ if (!func_prolog_preempt_disable(tr, &data, &trace_ctx))
return;

local_save_flags(flags);
- __trace_graph_return(tr, trace, flags, pc);
+ __trace_graph_return(tr, trace, trace_ctx);
atomic_dec(&data->disabled);

preempt_enable_notrace();
@@ -217,13 +218,13 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip,
struct trace_array *tr = wakeup_trace;
struct trace_array_cpu *data;
unsigned long flags;
- int pc;
+ unsigned int trace_ctx;

- if (!func_prolog_preempt_disable(tr, &data, &pc))
+ if (!func_prolog_preempt_disable(tr, &data, &trace_ctx))
return;

local_irq_save(flags);
- trace_function(tr, ip, parent_ip, flags, pc);
+ trace_function(tr, ip, parent_ip, trace_ctx);
local_irq_restore(flags);

atomic_dec(&data->disabled);
@@ -303,12 +304,12 @@ static void wakeup_print_header(struct seq_file *s)
static void
__trace_function(struct trace_array *tr,
unsigned long ip, unsigned long parent_ip,
- unsigned long flags, int pc)
+ unsigned int trace_ctx)
{
if (is_graph(tr))
- trace_graph_function(tr, ip, parent_ip, flags, pc);
+ trace_graph_function(tr, ip, parent_ip, trace_ctx);
else
- trace_function(tr, ip, parent_ip, flags, pc);
+ trace_function(tr, ip, parent_ip, trace_ctx);
}

static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set)
@@ -375,7 +376,7 @@ static void
tracing_sched_switch_trace(struct trace_array *tr,
struct task_struct *prev,
struct task_struct *next,
- unsigned long flags, int pc)
+ unsigned int trace_ctx)
{
struct trace_event_call *call = &event_context_switch;
struct trace_buffer *buffer = tr->array_buffer.buffer;
@@ -383,7 +384,7 @@ tracing_sched_switch_trace(struct trace_array *tr,
struct ctx_switch_entry *entry;

event = trace_buffer_lock_reserve(buffer, TRACE_CTX,
- sizeof(*entry), flags, pc);
+ sizeof(*entry), trace_ctx);
if (!event)
return;
entry = ring_buffer_event_data(event);
@@ -396,14 +397,14 @@ tracing_sched_switch_trace(struct trace_array *tr,
entry->next_cpu = task_cpu(next);

if (!call_filter_check_discard(call, entry, buffer, event))
- trace_buffer_unlock_commit(tr, buffer, event, flags, pc);
+ trace_buffer_unlock_commit(tr, buffer, event, trace_ctx);
}

static void
tracing_sched_wakeup_trace(struct trace_array *tr,
struct task_struct *wakee,
struct task_struct *curr,
- unsigned long flags, int pc)
+ unsigned int trace_ctx)
{
struct trace_event_call *call = &event_wakeup;
struct ring_buffer_event *event;
@@ -411,7 +412,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
struct trace_buffer *buffer = tr->array_buffer.buffer;

event = trace_buffer_lock_reserve(buffer, TRACE_WAKE,
- sizeof(*entry), flags, pc);
+ sizeof(*entry), trace_ctx);
if (!event)
return;
entry = ring_buffer_event_data(event);
@@ -424,7 +425,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
entry->next_cpu = task_cpu(wakee);

if (!call_filter_check_discard(call, entry, buffer, event))
- trace_buffer_unlock_commit(tr, buffer, event, flags, pc);
+ trace_buffer_unlock_commit(tr, buffer, event, trace_ctx);
}

static void notrace
@@ -436,7 +437,7 @@ probe_wakeup_sched_switch(void *ignore, bool preempt,
unsigned long flags;
long disabled;
int cpu;
- int pc;
+ unsigned int trace_ctx;

tracing_record_cmdline(prev);

@@ -455,7 +456,7 @@ probe_wakeup_sched_switch(void *ignore, bool preempt,
if (next != wakeup_task)
return;

- pc = preempt_count();
+ trace_ctx = tracing_gen_ctx_flags();

/* disable local data, not wakeup_cpu data */
cpu = raw_smp_processor_id();
@@ -473,9 +474,9 @@ probe_wakeup_sched_switch(void *ignore, bool preempt,
/* The task we are waiting for is waking up */
data = per_cpu_ptr(wakeup_trace->array_buffer.data, wakeup_cpu);

- __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc);
- tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc);
- __trace_stack(wakeup_trace, flags, 0, pc);
+ __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, trace_ctx);
+ tracing_sched_switch_trace(wakeup_trace, prev, next, trace_ctx);
+ __trace_stack(wakeup_trace, trace_ctx, 0);

T0 = data->preempt_timestamp;
T1 = ftrace_now(cpu);
@@ -529,7 +530,7 @@ probe_wakeup(void *ignore, struct task_struct *p)
int cpu = smp_processor_id();
unsigned long flags;
long disabled;
- int pc;
+ unsigned int trace_ctx;

if (likely(!tracer_enabled))
return;
@@ -550,7 +551,7 @@ probe_wakeup(void *ignore, struct task_struct *p)
(!dl_task(p) && (p->prio >= wakeup_prio || p->prio >= current->prio)))
return;

- pc = preempt_count();
+ trace_ctx = tracing_gen_ctx_flags();
disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
if (unlikely(disabled != 1))
goto out;
@@ -585,15 +586,15 @@ probe_wakeup(void *ignore, struct task_struct *p)

data = per_cpu_ptr(wakeup_trace->array_buffer.data, wakeup_cpu);
data->preempt_timestamp = ftrace_now(cpu);
- tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc);
- __trace_stack(wakeup_trace, flags, 0, pc);
+ tracing_sched_wakeup_trace(wakeup_trace, p, current, trace_ctx);
+ __trace_stack(wakeup_trace, trace_ctx, 0);

/*
* We must be careful in using CALLER_ADDR2. But since wake_up
* is not called by an assembly function (where as schedule is)
* it should be safe to use it here.
*/
- __trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
+ __trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, trace_ctx);

out_locked:
arch_spin_unlock(&wakeup_lock);
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index d85a2f0f316b3..71a2e705970f0 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -298,9 +298,8 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
struct syscall_metadata *sys_data;
struct ring_buffer_event *event;
struct trace_buffer *buffer;
- unsigned long irq_flags;
+ unsigned int trace_ctx;
unsigned long args[6];
- int pc;
int syscall_nr;
int size;

@@ -322,12 +321,11 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)

size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args;

- local_save_flags(irq_flags);
- pc = preempt_count();
+ trace_ctx = tracing_gen_ctx_flags();

buffer = tr->array_buffer.buffer;
event = trace_buffer_lock_reserve(buffer,
- sys_data->enter_event->event.type, size, irq_flags, pc);
+ sys_data->enter_event->event.type, size, trace_ctx);
if (!event)
return;

@@ -337,7 +335,7 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
memcpy(entry->args, args, sizeof(unsigned long) * sys_data->nb_args);

event_trigger_unlock_commit(trace_file, buffer, event, entry,
- irq_flags, pc);
+ trace_ctx);
}

static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
@@ -348,8 +346,7 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
struct syscall_metadata *sys_data;
struct ring_buffer_event *event;
struct trace_buffer *buffer;
- unsigned long irq_flags;
- int pc;
+ unsigned int trace_ctx;
int syscall_nr;

syscall_nr = trace_get_syscall_nr(current, regs);
@@ -368,13 +365,12 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
if (!sys_data)
return;

- local_save_flags(irq_flags);
- pc = preempt_count();
+ trace_ctx = tracing_gen_ctx_flags();

buffer = tr->array_buffer.buffer;
event = trace_buffer_lock_reserve(buffer,
sys_data->exit_event->event.type, sizeof(*entry),
- irq_flags, pc);
+ trace_ctx);
if (!event)
return;

@@ -383,7 +379,7 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
entry->ret = syscall_get_return_value(current, regs);

event_trigger_unlock_commit(trace_file, buffer, event, entry,
- irq_flags, pc);
+ trace_ctx);
}

static int reg_event_syscall_enter(struct trace_event_file *file,
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index 3cf7128e1ad30..a1ed96a7a4624 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -961,7 +961,7 @@ static void __uprobe_trace_func(struct trace_uprobe *tu,
esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
size = esize + tu->tp.size + dsize;
event = trace_event_buffer_lock_reserve(&buffer, trace_file,
- call->event.type, size, 0, 0);
+ call->event.type, size, 0);
if (!event)
return;

@@ -977,7 +977,7 @@ static void __uprobe_trace_func(struct trace_uprobe *tu,

memcpy(data, ucb->buf, tu->tp.size + dsize);

- event_trigger_unlock_commit(trace_file, buffer, event, entry, 0, 0);
+ event_trigger_unlock_commit(trace_file, buffer, event, entry, 0);
}

/* uprobe handler */
--
2.29.2
\
 
 \ /
  Last update: 2020-12-16 18:24    [W:0.077 / U:2.872 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site