Messages in this thread Patch in this message | | | Date | Mon, 13 Jun 2022 16:41:28 +0200 | From | Peter Zijlstra <> | Subject | Re: [RFC v2] perf: Rewrite core context handling |
| |
On Mon, Jun 13, 2022 at 04:35:11PM +0200, Peter Zijlstra wrote:
> @@ -3196,11 +3187,52 @@ static int perf_event_modify_attr(struct > return err; > } > > -static void ctx_sched_out(struct perf_event_context *ctx, > - struct perf_cpu_context *cpuctx, > - enum event_type_t event_type) > +static void __pmu_ctx_sched_out(struct perf_event_pmu_context *pmu_ctx, > + enum event_type_t event_type) > { > + struct perf_event_context *ctx = pmu_ctx->ctx; > struct perf_event *event, *tmp; > + struct pmu *pmu = pmu_ctx->pmu; > + > + if (ctx->task && !ctx->is_active) { > + struct perf_cpu_pmu_context *cpc; > + > + cpc = this_cpu_ptr(pmu->cpu_pmu_context); > + WARN_ON_ONCE(cpc->task_epc != pmu_ctx); > + cpc->task_epc = NULL; > + } > + > + if (!event_type) > + return; > + > + perf_pmu_disable(pmu); > + if (event_type & EVENT_PINNED) { > + list_for_each_entry_safe(event, tmp, > + &pmu_ctx->pinned_active, > + active_list) > + group_sched_out(event, ctx); > + } > + > + if (event_type & EVENT_FLEXIBLE) { > + list_for_each_entry_safe(event, tmp, > + &pmu_ctx->flexible_active, > + active_list) > + group_sched_out(event, ctx); > + /* > + * Since we cleared EVENT_FLEXIBLE, also clear > + * rotate_necessary, is will be reset by > + * ctx_flexible_sched_in() when needed. > + */ > + pmu_ctx->rotate_necessary = 0; > + } > + perf_pmu_enable(pmu); > +} > + > +static void > +ctx_sched_out(struct perf_event_context *ctx, enum event_type_t event_type) > +{ > + struct perf_cpu_context *cpuctx = this_cpu_ptr(&cpu_context); > + struct perf_event_pmu_context *pmu_ctx; > int is_active = ctx->is_active; > > lockdep_assert_held(&ctx->lock); > @@ -3251,24 +3283,8 @@ static void ctx_sched_out(struct perf_ev > if (!ctx->nr_active || !(is_active & EVENT_ALL)) > return; > > - perf_pmu_disable(ctx->pmu); > - if (is_active & EVENT_PINNED) { > - list_for_each_entry_safe(event, tmp, &ctx->pinned_active, active_list) > - group_sched_out(event, cpuctx, ctx); > - } > - > - if (is_active & EVENT_FLEXIBLE) { > - list_for_each_entry_safe(event, tmp, &ctx->flexible_active, active_list) > - group_sched_out(event, cpuctx, ctx); > - > - /* > - * Since we cleared EVENT_FLEXIBLE, also clear > - * rotate_necessary, is will be reset by > - * ctx_flexible_sched_in() when needed. > - */ > - ctx->rotate_necessary = 0; > - } > - perf_pmu_enable(ctx->pmu); > + list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) > + __pmu_ctx_sched_out(pmu_ctx, is_active); > }
You mentioned trouble with cpc->task_epc, there's one rebase mistake from you and an original bug from me.
You lost the last hunk, I forgot to clear cpc on perf_remove_from_context().
With these fixes I can run: 'perf test' without things going insta-splat.
--- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -2311,6 +2311,7 @@ __perf_remove_from_context(struct perf_e struct perf_event_context *ctx, void *info) { + struct perf_event_pmu_context *pmu_ctx = event->pmu_ctx; unsigned long flags = (unsigned long)info; if (ctx->is_active & EVENT_TIME) { @@ -2325,8 +2326,17 @@ __perf_remove_from_context(struct perf_e perf_child_detach(event); list_del_event(event, ctx); - if (!event->pmu_ctx->nr_events) - event->pmu_ctx->rotate_necessary = 0; + if (!pmu_ctx->nr_events) { + pmu_ctx->rotate_necessary = 0; + + if (ctx->task) { + struct perf_cpu_pmu_context *cpc; + + cpc = this_cpu_ptr(pmu_ctx->pmu->cpu_pmu_context); + WARN_ON_ONCE(cpc->task_epc && cpc->task_epc != pmu_ctx); + cpc->task_epc = NULL; + } + } if (!ctx->nr_events && ctx->is_active) { if (ctx == &cpuctx->ctx) @@ -3198,7 +3208,7 @@ static void __pmu_ctx_sched_out(struct p struct perf_cpu_pmu_context *cpc; cpc = this_cpu_ptr(pmu->cpu_pmu_context); - WARN_ON_ONCE(cpc->task_epc != pmu_ctx); + WARN_ON_ONCE(cpc->task_epc && cpc->task_epc != pmu_ctx); cpc->task_epc = NULL; } @@ -3280,9 +3290,6 @@ ctx_sched_out(struct perf_event_context is_active ^= ctx->is_active; /* changed bits */ - if (!ctx->nr_active || !(is_active & EVENT_ALL)) - return; - list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) __pmu_ctx_sched_out(pmu_ctx, is_active); }
| |