lkml.org 
[lkml]   [2020]   [May]   [16]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 1/3] arm_pmu: Add support for perf NMI interrupts registration
Date
Register perf interrupts by request_nmi()/percpu_nmi() when both
ARM64_PSEUDO_NMI and ARM64_PSEUDO_NMI_PERF are enabled and nmi
cpufreature is active.

Signed-off-by: Lecopzer Chen <lecopzer.chen@mediatek.com>
---
drivers/perf/arm_pmu.c | 51 +++++++++++++++++++++++++++++++-----
include/linux/perf/arm_pmu.h | 6 +++++
2 files changed, 51 insertions(+), 6 deletions(-)

diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index df352b334ea7..fa37b72d19e2 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -559,6 +559,48 @@ void armpmu_free_irq(int irq, int cpu)
per_cpu(cpu_irq, cpu) = 0;
}

+static void armpmu_prepare_percpu_nmi_other(void *info)
+{
+ /*
+ * We don't need to disable preemption since smp_call_function()
+ * did this for us.
+ */
+ prepare_percpu_nmi((uintptr_t) info);
+}
+
+static int _armpmu_request_irq(unsigned int irq, irq_handler_t handler,
+ unsigned long flags, int cpu)
+{
+ if (armpmu_support_nmi())
+ return request_nmi(irq, handler, flags, "arm-pmu",
+ per_cpu_ptr(&cpu_armpmu, cpu));
+ return request_irq(irq, handler, flags, "arm-pmu",
+ per_cpu_ptr(&cpu_armpmu, cpu));
+}
+
+static int _armpmu_request_percpu_irq(unsigned int irq, irq_handler_t handler)
+{
+ if (armpmu_support_nmi()) {
+ int err;
+
+ err = request_percpu_nmi(irq, handler, "arm-pmu",
+ &cpu_armpmu);
+ if (err)
+ return err;
+
+ preempt_disable();
+ err = prepare_percpu_nmi(irq);
+ if (err) {
+ return err;
+ preempt_enable();
+ }
+ smp_call_function(armpmu_prepare_percpu_nmi_other,
+ (void *)(uintptr_t) irq, true);
+ preempt_enable();
+ }
+ return request_percpu_irq(irq, handler, "arm-pmu",
+ &cpu_armpmu);
+}
+
int armpmu_request_irq(int irq, int cpu)
{
int err = 0;
@@ -582,12 +624,9 @@ int armpmu_request_irq(int irq, int cpu)
IRQF_NO_THREAD;

irq_set_status_flags(irq, IRQ_NOAUTOEN);
- err = request_irq(irq, handler, irq_flags, "arm-pmu",
- per_cpu_ptr(&cpu_armpmu, cpu));
- } else if (armpmu_count_irq_users(irq) == 0) {
- err = request_percpu_irq(irq, handler, "arm-pmu",
- &cpu_armpmu);
- }
+ err = _armpmu_request_irq(irq, handler, irq_flags, cpu);
+ } else if (armpmu_count_irq_users(irq) == 0)
+ err = _armpmu_request_percpu_irq(irq, handler);

if (err)
goto err_out;
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index 5b616dde9a4c..5b878b5a22aa 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -160,6 +160,12 @@ int arm_pmu_acpi_probe(armpmu_init_fn init_fn);
static inline int arm_pmu_acpi_probe(armpmu_init_fn init_fn) { return 0; }
#endif

+static inline bool armpmu_support_nmi(void)
+{
+ return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI_PERF) &&
+ system_uses_irq_prio_masking();
+}
+
/* Internal functions only for core arm_pmu code */
struct arm_pmu *armpmu_alloc(void);
struct arm_pmu *armpmu_alloc_atomic(void);
--
2.25.1
\
 
 \ /
  Last update: 2020-05-16 14:50    [W:0.085 / U:0.624 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site