lkml.org 
[lkml]   [2014]   [Jul]   [27]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
SubjectRe: [PATCH v3 6/9] perf, x86: handle multiple records in PEBS buffer
On 07/26/2014 12:40 AM, Andi Kleen wrote:
>> Suppose two pebs events, one has exclude_kernel set. It overflows,
>> before entering the kernel, the other event generates PEBS records from
>> inside the kernel with both events marked in the overflow field.
>>
>> And only once we leave the kernel can the exclude_kernel event tick
>> again and trigger the assist, finalyl clearing the bit.
>>
>> If you were to report the records to both events, one would get a lot of
>> kernel info he was not entitled to.
>
> Ok that case can be filtered in software. Shouldn't be too difficult.
> Perhaps just using ip
>
> if (event->attr.exclude_kernel && pebs->ip >= __PAGE_OFFSET)
> skip;
> if (event->attr.exclude_user && pebs->ip < __PAGE_OFFSET)
> skip;
>
> This would also help with the existing skid.
>
> Any other concerns?
>
> -Andi
>

how about following patch
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 33b4c0e..ea76507 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -1016,6 +1016,16 @@ static void setup_pebs_sample_data(struct perf_event *event,
data->br_stack = &cpuc->lbr_stack;
}

+static inline bool intel_pmu_pebs_filter(struct perf_event *event,
+ struct pebs_record_nhm *record)
+{
+ if (event->attr.exclude_user && !kernel_ip(record->ip))
+ return true;
+ if (event->attr.exclude_kernel && kernel_ip(record->ip))
+ return true;
+ return false;
+}
+
static void __intel_pmu_pebs_event(struct perf_event *event,
struct pt_regs *iregs,
void *at, void *top, int count)
@@ -1052,6 +1062,8 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
struct pebs_record_nhm *p = at;
if (!(p->status & (1 << event->hw.idx)))
continue;
+ if (intel_pmu_pebs_filter(event, p))
+ continue;

setup_pebs_sample_data(event, iregs, at, &data, &regs);
perf_output_sample(&handle, &header, &data, event);
@@ -1139,6 +1151,8 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
WARN_ON_ONCE(!event);
if (!event->attr.precise_ip)
continue;
+ if (intel_pmu_pebs_filter(event, p))
+ continue;
counts[bit]++;
}
}
@@ -1149,7 +1163,8 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
event = cpuc->events[bit];
for (at = base; at < top; at += x86_pmu.pebs_record_size) {
struct pebs_record_nhm *p = at;
- if (p->status & (1 << bit))
+ if ((p->status & (1 << bit)) &&
+ !intel_pmu_pebs_filter(event, p))
break;
}

---


\
 
 \ /
  Last update: 2014-07-28 06:01    [W:0.095 / U:0.244 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site