lkml.org 
[lkml]   [2013]   [Feb]   [4]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
Subject[PATCH 7/7] uprobes/perf: Avoid uprobe_apply() whenever possible
uprobe_perf_open/close call the costly uprobe_apply() every time,
we can avoid it if:

- "nr_systemwide != 0" is not changed.

- There is another process/thread with the same ->mm.

- copy_proccess() does inherit_event(). dup_mmap() preserves the
inserted breakpoints.

- event->attr.enable_on_exec == T, we can rely on uprobe_mmap()
called by exec/mmap paths.

- tp_target is exiting. Only _close() checks PF_EXITING, I don't
think TRACE_REG_PERF_OPEN can hit the dying task too often.

Signed-off-by: Oleg Nesterov <oleg@redhat.com>
---
kernel/trace/trace_uprobe.c | 42 ++++++++++++++++++++++++++++++++++++------
1 files changed, 36 insertions(+), 6 deletions(-)

diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index 1114619..e4aab34 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -677,30 +677,60 @@ __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
return false;
}

+static inline bool
+uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event)
+{
+ return __uprobe_perf_filter(&tu->filter, event->hw.tp_target->mm);
+}
+
static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
{
+ bool done;
+
write_lock(&tu->filter.rwlock);
- if (event->hw.tp_target)
+ if (event->hw.tp_target) {
+ /*
+ * event->parent != NULL means copy_process(), we can avoid
+ * uprobe_apply(). current->mm must be probed and we can rely
+ * on dup_mmap() which preserves the already installed bp's.
+ *
+ * attr.enable_on_exec means that exec/mmap will install the
+ * breakpoints we need.
+ */
+ done = tu->filter.nr_systemwide ||
+ event->parent || event->attr.enable_on_exec ||
+ uprobe_filter_event(tu, event);
list_add(&event->hw.tp_list, &tu->filter.perf_events);
- else
+ } else {
+ done = tu->filter.nr_systemwide;
tu->filter.nr_systemwide++;
+ }
write_unlock(&tu->filter.rwlock);

- uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
+ if (!done)
+ uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);

return 0;
}

static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event)
{
+ bool done;
+
write_lock(&tu->filter.rwlock);
- if (event->hw.tp_target)
+ if (event->hw.tp_target) {
list_del(&event->hw.tp_list);
- else
+ done = tu->filter.nr_systemwide ||
+ (event->hw.tp_target->flags & PF_EXITING) ||
+ uprobe_filter_event(tu, event);
+ } else {
tu->filter.nr_systemwide--;
+ done = tu->filter.nr_systemwide;
+ }
write_unlock(&tu->filter.rwlock);

- uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
+ if (!done)
+ uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);

return 0;
}
--
1.5.5.1


\
 
 \ /
  Last update: 2013-02-04 20:43    [W:0.149 / U:0.112 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site