lkml.org 
[lkml]   [2020]   [Jul]   [17]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH V6 06/14] perf/x86/intel: Use switch in intel_pmu_disable/enable_event
Date
From: Kan Liang <kan.liang@linux.intel.com>

Many items are checked in the intel_pmu_disable/enable_event. More items
will be added later, e.g. perf metrics events.

Use switch, which is more efficient, to replace the if-else.

If the idx is invalid, print a warning.

For the case INTEL_PMC_IDX_FIXED_BTS in intel_pmu_disable_event, don't
need to check the event->attr.precise_ip. Use return for the case.

Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
---
arch/x86/events/intel/core.c | 36 ++++++++++++++++++++++++++++--------
1 file changed, 28 insertions(+), 8 deletions(-)

diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index ac1408fe1aee..2b1701c08f46 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -2180,17 +2180,28 @@ static void intel_pmu_disable_event(struct perf_event *event)
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;

- if (idx < INTEL_PMC_IDX_FIXED) {
+ switch (idx) {
+ case 0 ... INTEL_PMC_IDX_FIXED - 1:
intel_clear_masks(event, idx);
x86_pmu_disable_event(event);
- } else if (idx < INTEL_PMC_IDX_FIXED_BTS) {
+ break;
+ case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS - 1:
intel_clear_masks(event, idx);
intel_pmu_disable_fixed(event);
- } else if (idx == INTEL_PMC_IDX_FIXED_BTS) {
+ break;
+ case INTEL_PMC_IDX_FIXED_BTS:
intel_pmu_disable_bts();
intel_pmu_drain_bts_buffer();
- } else if (idx == INTEL_PMC_IDX_FIXED_VLBR)
+ return;
+ case INTEL_PMC_IDX_FIXED_VLBR:
intel_clear_masks(event, idx);
+ break;
+ default:
+ intel_clear_masks(event, idx);
+ pr_warn("Failed to disable the event with invalid index %d\n",
+ idx);
+ return;
+ }

/*
* Needs to be called after x86_pmu_disable_event,
@@ -2262,18 +2273,27 @@ static void intel_pmu_enable_event(struct perf_event *event)
if (unlikely(event->attr.precise_ip))
intel_pmu_pebs_enable(event);

- if (idx < INTEL_PMC_IDX_FIXED) {
+ switch (idx) {
+ case 0 ... INTEL_PMC_IDX_FIXED - 1:
intel_set_masks(event, idx);
__x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
- } else if (idx < INTEL_PMC_IDX_FIXED_BTS) {
+ break;
+ case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS - 1:
intel_set_masks(event, idx);
intel_pmu_enable_fixed(event);
- } else if (idx == INTEL_PMC_IDX_FIXED_BTS) {
+ break;
+ case INTEL_PMC_IDX_FIXED_BTS:
if (!__this_cpu_read(cpu_hw_events.enabled))
return;
intel_pmu_enable_bts(hwc->config);
- } else if (idx == INTEL_PMC_IDX_FIXED_VLBR)
+ break;
+ case INTEL_PMC_IDX_FIXED_VLBR:
intel_set_masks(event, idx);
+ break;
+ default:
+ pr_warn("Failed to enable the event with invalid index %d\n",
+ idx);
+ }
}

static void intel_pmu_add_event(struct perf_event *event)
--
2.17.1
\
 
 \ /
  Last update: 2020-07-17 16:09    [W:0.334 / U:0.044 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site