Messages in this thread | | | Date | Thu, 21 Mar 2019 22:13:55 +0100 | From | Peter Zijlstra <> | Subject | Re: [PATCH V2 04/23] perf/x86/intel: Support adaptive PEBSv4 |
| |
On Thu, Mar 21, 2019 at 01:56:44PM -0700, kan.liang@linux.intel.com wrote: > @@ -1434,20 +1692,20 @@ static void __intel_pmu_pebs_event(struct perf_event *event, > return; > > while (count > 1) { > - setup_pebs_sample_data(event, iregs, at, &data, ®s); > - perf_event_output(event, &data, ®s); > - at += x86_pmu.pebs_record_size; > + x86_pmu.setup_pebs_sample_data(event, iregs, at, &data, regs); > + perf_event_output(event, &data, regs); > + at = next_pebs_record(at); > at = get_next_pebs_record_by_bit(at, top, bit); > count--; > } > > - setup_pebs_sample_data(event, iregs, at, &data, ®s); > + x86_pmu.setup_pebs_sample_data(event, iregs, at, &data, regs);
If you make the setup_pebs_sample_data() a function pointer argument of this function.
> > /* > * All but the last records are processed. > * The last one is left to be able to call the overflow handler. > */ > - if (perf_event_overflow(event, &data, ®s)) { > + if (perf_event_overflow(event, &data, regs)) { > x86_pmu_stop(event, 0); > return; > } > @@ -1626,6 +1884,59 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) > } > } > > +static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs) > +{ > + short counts[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {}; > + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); > + struct debug_store *ds = cpuc->ds; > + struct perf_event *event; > + void *base, *at, *top; > + int bit, size; > + u64 mask; > + > + if (!x86_pmu.pebs_active) > + return; > + > + base = (struct pebs_basic *)(unsigned long)ds->pebs_buffer_base; > + top = (struct pebs_basic *)(unsigned long)ds->pebs_index; > + > + ds->pebs_index = ds->pebs_buffer_base; > + > + mask = ((1ULL << x86_pmu.max_pebs_events) - 1) | > + (((1ULL << x86_pmu.num_counters_fixed) - 1) << INTEL_PMC_IDX_FIXED); > + size = INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed; > + > + if (unlikely(base >= top)) { > + intel_pmu_pebs_event_update_no_drain(cpuc, size); > + return; > + } > + > + for (at = base; at < top; at = next_pebs_record(at)) { > + u64 pebs_status; > + > + pebs_status = get_pebs_status(at) & cpuc->pebs_enabled; > + pebs_status &= mask; > + > + for_each_set_bit(bit, (unsigned long *)&pebs_status, size) > + counts[bit]++; > + } > + > + for (bit = 0; bit < size; bit++) { > + if (counts[bit] == 0) > + continue; > + > + event = cpuc->events[bit]; > + if (WARN_ON_ONCE(!event)) > + continue; > + > + if (WARN_ON_ONCE(!event->attr.precise_ip)) > + continue; > + > + __intel_pmu_pebs_event(event, iregs, base, > + top, bit, counts[bit]);
__intel_pmu_pebs_event(event, iregs, base, top, bit, count[bits], setup_adaptibe_pebs_sample_data);
> + } > +}
And we can do away with that x86_pmu method..
| |