lkml.org 
[lkml]   [2022]   [Sep]   [9]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
Subject[tip: perf/core] perf/x86: Add two more x86_pmu methods
The following commit has been merged into the perf/core branch of tip:

Commit-ID: 73759c346341d39dfde39701476c0376dea0a98b
Gitweb: https://git.kernel.org/tip/73759c346341d39dfde39701476c0376dea0a98b
Author: Peter Zijlstra <peterz@infradead.org>
AuthorDate: Tue, 10 May 2022 21:27:22 +02:00
Committer: Peter Zijlstra <peterz@infradead.org>
CommitterDate: Wed, 07 Sep 2022 21:54:02 +02:00

perf/x86: Add two more x86_pmu methods

In order to clean up x86_perf_event_{set_period,update)() start by
adding them as x86_pmu methods.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20220829101321.440196408@infradead.org
---
arch/x86/events/core.c | 22 +++++++++++++++++-----
arch/x86/events/perf_event.h | 5 +++++
2 files changed, 22 insertions(+), 5 deletions(-)

diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index bb34a28..bb559b7 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -72,6 +72,9 @@ DEFINE_STATIC_CALL_NULL(x86_pmu_add, *x86_pmu.add);
DEFINE_STATIC_CALL_NULL(x86_pmu_del, *x86_pmu.del);
DEFINE_STATIC_CALL_NULL(x86_pmu_read, *x86_pmu.read);

+DEFINE_STATIC_CALL_NULL(x86_pmu_set_period, *x86_pmu.set_period);
+DEFINE_STATIC_CALL_NULL(x86_pmu_update, *x86_pmu.update);
+
DEFINE_STATIC_CALL_NULL(x86_pmu_schedule_events, *x86_pmu.schedule_events);
DEFINE_STATIC_CALL_NULL(x86_pmu_get_event_constraints, *x86_pmu.get_event_constraints);
DEFINE_STATIC_CALL_NULL(x86_pmu_put_event_constraints, *x86_pmu.put_event_constraints);
@@ -1518,7 +1521,7 @@ static void x86_pmu_start(struct perf_event *event, int flags)

if (flags & PERF_EF_RELOAD) {
WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
- x86_perf_event_set_period(event);
+ static_call(x86_pmu_set_period)(event);
}

event->hw.state = 0;
@@ -1610,7 +1613,7 @@ void x86_pmu_stop(struct perf_event *event, int flags)
* Drain the remaining delta count out of a event
* that we are disabling:
*/
- x86_perf_event_update(event);
+ static_call(x86_pmu_update)(event);
hwc->state |= PERF_HES_UPTODATE;
}
}
@@ -1700,7 +1703,7 @@ int x86_pmu_handle_irq(struct pt_regs *regs)

event = cpuc->events[idx];

- val = x86_perf_event_update(event);
+ val = static_call(x86_pmu_update)(event);
if (val & (1ULL << (x86_pmu.cntval_bits - 1)))
continue;

@@ -1709,7 +1712,7 @@ int x86_pmu_handle_irq(struct pt_regs *regs)
*/
handled++;

- if (!x86_perf_event_set_period(event))
+ if (!static_call(x86_pmu_set_period)(event))
continue;

perf_sample_data_init(&data, 0, event->hw.last_period);
@@ -2025,6 +2028,9 @@ static void x86_pmu_static_call_update(void)
static_call_update(x86_pmu_del, x86_pmu.del);
static_call_update(x86_pmu_read, x86_pmu.read);

+ static_call_update(x86_pmu_set_period, x86_pmu.set_period);
+ static_call_update(x86_pmu_update, x86_pmu.update);
+
static_call_update(x86_pmu_schedule_events, x86_pmu.schedule_events);
static_call_update(x86_pmu_get_event_constraints, x86_pmu.get_event_constraints);
static_call_update(x86_pmu_put_event_constraints, x86_pmu.put_event_constraints);
@@ -2044,7 +2050,7 @@ static void x86_pmu_static_call_update(void)

static void _x86_pmu_read(struct perf_event *event)
{
- x86_perf_event_update(event);
+ static_call(x86_pmu_update)(event);
}

void x86_pmu_show_pmu_cap(int num_counters, int num_counters_fixed,
@@ -2151,6 +2157,12 @@ static int __init init_hw_perf_events(void)
if (!x86_pmu.guest_get_msrs)
x86_pmu.guest_get_msrs = (void *)&__static_call_return0;

+ if (!x86_pmu.set_period)
+ x86_pmu.set_period = x86_perf_event_set_period;
+
+ if (!x86_pmu.update)
+ x86_pmu.update = x86_perf_event_update;
+
x86_pmu_static_call_update();

/*
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index 4a3dde2..7ae1a6c 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -743,6 +743,8 @@ struct x86_pmu {
void (*add)(struct perf_event *);
void (*del)(struct perf_event *);
void (*read)(struct perf_event *event);
+ int (*set_period)(struct perf_event *event);
+ u64 (*update)(struct perf_event *event);
int (*hw_config)(struct perf_event *event);
int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
unsigned eventsel;
@@ -1042,6 +1044,9 @@ static struct perf_pmu_format_hybrid_attr format_attr_hybrid_##_name = {\
struct pmu *x86_get_pmu(unsigned int cpu);
extern struct x86_pmu x86_pmu __read_mostly;

+DECLARE_STATIC_CALL(x86_pmu_set_period, *x86_pmu.set_period);
+DECLARE_STATIC_CALL(x86_pmu_update, *x86_pmu.update);
+
static __always_inline struct x86_perf_task_context_opt *task_context_opt(void *ctx)
{
if (static_cpu_has(X86_FEATURE_ARCH_LBR))
\
 
 \ /
  Last update: 2022-09-09 10:53    [W:0.128 / U:0.508 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site