lkml.org 
[lkml]   [2013]   [Dec]   [11]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH v0 01/71] perf: Disable all pmus on unthrottling and rescheduling
Date
Currently, only one pmu in a context gets disabled during unthrottling
and event_sched_{out,in}, however, events in one context may belong to
different pmus, which results in pmus being reprogrammed while they are
still enabled. This patch temporarily disables pmus that correspond to
each event in the context while these events are being modified.

Signed-off-by: Alexander Shishkin <alexander.shishkin@linux.intel.com>
---
kernel/events/core.c | 27 ++++++++++++++++++++++++---
1 file changed, 24 insertions(+), 3 deletions(-)

diff --git a/kernel/events/core.c b/kernel/events/core.c
index 403b781..d656cd6 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1396,6 +1396,9 @@ event_sched_out(struct perf_event *event,
if (event->state != PERF_EVENT_STATE_ACTIVE)
return;

+ if (event->pmu != ctx->pmu)
+ perf_pmu_disable(event->pmu);
+
event->state = PERF_EVENT_STATE_INACTIVE;
if (event->pending_disable) {
event->pending_disable = 0;
@@ -1412,6 +1415,9 @@ event_sched_out(struct perf_event *event,
ctx->nr_freq--;
if (event->attr.exclusive || !cpuctx->active_oncpu)
cpuctx->exclusive = 0;
+
+ if (event->pmu != ctx->pmu)
+ perf_pmu_enable(event->pmu);
}

static void
@@ -1652,6 +1658,7 @@ event_sched_in(struct perf_event *event,
struct perf_event_context *ctx)
{
u64 tstamp = perf_event_time(event);
+ int ret = 0;

if (event->state <= PERF_EVENT_STATE_OFF)
return 0;
@@ -1674,10 +1681,14 @@ event_sched_in(struct perf_event *event,
*/
smp_wmb();

+ if (event->pmu != ctx->pmu)
+ perf_pmu_disable(event->pmu);
+
if (event->pmu->add(event, PERF_EF_START)) {
event->state = PERF_EVENT_STATE_INACTIVE;
event->oncpu = -1;
- return -EAGAIN;
+ ret = -EAGAIN;
+ goto out;
}

event->tstamp_running += tstamp - event->tstamp_stopped;
@@ -1693,7 +1704,11 @@ event_sched_in(struct perf_event *event,
if (event->attr.exclusive)
cpuctx->exclusive = 1;

- return 0;
+out:
+ if (event->pmu != ctx->pmu)
+ perf_pmu_enable(event->pmu);
+
+ return ret;
}

static int
@@ -2743,6 +2758,9 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
if (!event_filter_match(event))
continue;

+ if (ctx->pmu != event->pmu)
+ perf_pmu_disable(event->pmu);
+
hwc = &event->hw;

if (hwc->interrupts == MAX_INTERRUPTS) {
@@ -2752,7 +2770,7 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
}

if (!event->attr.freq || !event->attr.sample_freq)
- continue;
+ goto next;

/*
* stop the event and update event->count
@@ -2774,6 +2792,9 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
perf_adjust_period(event, period, delta, false);

event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
+ next:
+ if (ctx->pmu != event->pmu)
+ perf_pmu_enable(event->pmu);
}

perf_pmu_enable(ctx->pmu);
--
1.8.5.1


\
 
 \ /
  Last update: 2013-12-11 14:01    [W:0.515 / U:0.248 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site