lkml.org 
[lkml]   [2022]   [Aug]   [29]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
Subject[PATCH v2 3/9] perf/x86: Change x86_pmu::limit_period signature
In preparation for making it a static_call, change the signature.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
arch/x86/events/amd/core.c | 8 +++-----
arch/x86/events/core.c | 13 ++++++++-----
arch/x86/events/intel/core.c | 19 ++++++++-----------
arch/x86/events/perf_event.h | 2 +-
4 files changed, 20 insertions(+), 22 deletions(-)

--- a/arch/x86/events/amd/core.c
+++ b/arch/x86/events/amd/core.c
@@ -1222,16 +1222,14 @@ static ssize_t amd_event_sysfs_show(char
return x86_event_sysfs_show(page, config, event);
}

-static u64 amd_pmu_limit_period(struct perf_event *event, u64 left)
+static void amd_pmu_limit_period(struct perf_event *event, s64 *left)
{
/*
* Decrease period by the depth of the BRS feature to get the last N
* taken branches and approximate the desired period
*/
- if (has_branch_stack(event) && left > x86_pmu.lbr_nr)
- left -= x86_pmu.lbr_nr;
-
- return left;
+ if (has_branch_stack(event) && *left > x86_pmu.lbr_nr)
+ *left -= x86_pmu.lbr_nr;
}

static __initconst const struct x86_pmu amd_pmu = {
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -621,8 +621,9 @@ int x86_pmu_hw_config(struct perf_event
event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK;

if (event->attr.sample_period && x86_pmu.limit_period) {
- if (x86_pmu.limit_period(event, event->attr.sample_period) >
- event->attr.sample_period)
+ s64 left = event->attr.sample_period;
+ x86_pmu.limit_period(event, &left);
+ if (left > event->attr.sample_period)
return -EINVAL;
}

@@ -1396,9 +1397,9 @@ int x86_perf_event_set_period(struct per
left = x86_pmu.max_period;

if (x86_pmu.limit_period)
- left = x86_pmu.limit_period(event, left);
+ x86_pmu.limit_period(event, &left);

- per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
+ this_cpu_write(pmc_prev_left[idx], left);

/*
* The hw event starts counting from this event offset,
@@ -2675,7 +2676,9 @@ static int x86_pmu_check_period(struct p
return -EINVAL;

if (value && x86_pmu.limit_period) {
- if (x86_pmu.limit_period(event, value) > value)
+ s64 left = value;
+ x86_pmu.limit_period(event, &left);
+ if (left > value)
return -EINVAL;
}

--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -4342,28 +4342,25 @@ static u8 adl_get_hybrid_cpu_type(void)
* Therefore the effective (average) period matches the requested period,
* despite coarser hardware granularity.
*/
-static u64 bdw_limit_period(struct perf_event *event, u64 left)
+static void bdw_limit_period(struct perf_event *event, s64 *left)
{
if ((event->hw.config & INTEL_ARCH_EVENT_MASK) ==
X86_CONFIG(.event=0xc0, .umask=0x01)) {
- if (left < 128)
- left = 128;
- left &= ~0x3fULL;
+ if (*left < 128)
+ *left = 128;
+ *left &= ~0x3fULL;
}
- return left;
}

-static u64 nhm_limit_period(struct perf_event *event, u64 left)
+static void nhm_limit_period(struct perf_event *event, s64 *left)
{
- return max(left, 32ULL);
+ *left = max(*left, 32LL);
}

-static u64 spr_limit_period(struct perf_event *event, u64 left)
+static void spr_limit_period(struct perf_event *event, s64 *left)
{
if (event->attr.precise_ip == 3)
- return max(left, 128ULL);
-
- return left;
+ *left = max(*left, 128LL);
}

PMU_FORMAT_ATTR(event, "config:0-7" );
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -783,7 +783,7 @@ struct x86_pmu {
struct event_constraint *event_constraints;
struct x86_pmu_quirk *quirks;
int perfctr_second_write;
- u64 (*limit_period)(struct perf_event *event, u64 l);
+ void (*limit_period)(struct perf_event *event, s64 *l);

/* PMI handler bits */
unsigned int late_ack :1,

\
 
 \ /
  Last update: 2022-08-29 12:15    [W:0.154 / U:0.852 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site