Messages in this thread Patch in this message | | | From | Song Liu <> | Subject | [PATCH] perf/core: fix userpage->time_enabled of inactive events | Date | Tue, 21 Sep 2021 18:17:15 -0700 |
| |
Users of rdpmc rely on the mmapped user page to calculate accurate time_enabled. Currently, userpage->time_enabled is only updated when the event is added to the pmu. As a result, inactive event (due to counter multiplexing) does not have accurate userpage->time_enabled. This can be reproduced with something like:
/* open 20 task perf_event "cycles", to create multiplexing */
fd = perf_event_open(); /* open task perf_event "cycles" */ userpage = mmap(fd); /* use mmap and rdmpc */
while (true) { time_enabled_mmap = xxx; /* use logic in perf_event_mmap_page */ time_enabled_read = read(fd).time_enabled; if (time_enabled_mmap > time_enabled_read) BUG(); }
Fix this by updating userpage for inactive events in ctx_sched_in.
Reported-and-tested-by: Lucian Grijincu <lucian@fb.com> Signed-off-by: Song Liu <songliubraving@fb.com> --- include/linux/perf_event.h | 4 +++- kernel/events/core.c | 26 ++++++++++++++++++++++++++ 2 files changed, 29 insertions(+), 1 deletion(-)
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 2d510ad750edc..4aa52f7a48c16 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -683,7 +683,9 @@ struct perf_event { /* * timestamp shadows the actual context timing but it can * be safely used in NMI interrupt context. It reflects the - * context time as it was when the event was last scheduled in. + * context time as it was when the event was last scheduled in, + * or when ctx_sched_in failed to schedule the event because we + * run out of PMC. * * ctx_time already accounts for ctx->timestamp. Therefore to * compute ctx_time for a sample, simply add perf_clock(). diff --git a/kernel/events/core.c b/kernel/events/core.c index 1cb1f9b8392e2..549ce22df7bc7 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -3766,6 +3766,15 @@ ctx_flexible_sched_in(struct perf_event_context *ctx, merge_sched_in, &can_add_hw); } +static inline void +perf_event_update_inactive_userpage(struct perf_event *event, + struct perf_event_context *ctx) +{ + perf_event_update_time(event); + perf_set_shadow_time(event, ctx); + perf_event_update_userpage(event); +} + static void ctx_sched_in(struct perf_event_context *ctx, struct perf_cpu_context *cpuctx, @@ -3807,6 +3816,23 @@ ctx_sched_in(struct perf_event_context *ctx, /* Then walk through the lower prio flexible groups */ if (is_active & EVENT_FLEXIBLE) ctx_flexible_sched_in(ctx, cpuctx); + + /* + * Update userpage for inactive events. This is needed for accurate + * time_enabled. + */ + if (unlikely(ctx->rotate_necessary)) { + struct perf_event *event; + + perf_event_groups_for_each(event, &ctx->pinned_groups) { + if (event->state == PERF_EVENT_STATE_INACTIVE) + perf_event_update_inactive_userpage(event, ctx); + } + perf_event_groups_for_each(event, &ctx->flexible_groups) { + if (event->state == PERF_EVENT_STATE_INACTIVE) + perf_event_update_inactive_userpage(event, ctx); + } + } } static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, -- 2.30.2
| |