lkml.org 
[lkml]   [2020]   [Apr]   [16]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 5.6 251/254] perf/core: Unify {pinned,flexible}_sched_in()
Date
From: Peter Zijlstra <peterz@infradead.org>

[ Upstream commit ab6f824cfdf7363b5e529621cbc72ae6519c78d1 ]

Less is more; unify the two very nearly identical function.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
---
kernel/events/core.c | 58 ++++++++++++++++----------------------------
1 file changed, 21 insertions(+), 37 deletions(-)

diff --git a/kernel/events/core.c b/kernel/events/core.c
index e453589da97ca..95860901949e7 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1986,6 +1986,12 @@ static int perf_get_aux_event(struct perf_event *event,
return 1;
}

+static inline struct list_head *get_event_list(struct perf_event *event)
+{
+ struct perf_event_context *ctx = event->ctx;
+ return event->attr.pinned ? &ctx->pinned_active : &ctx->flexible_active;
+}
+
static void perf_group_detach(struct perf_event *event)
{
struct perf_event *sibling, *tmp;
@@ -2028,12 +2034,8 @@ static void perf_group_detach(struct perf_event *event)
if (!RB_EMPTY_NODE(&event->group_node)) {
add_event_to_groups(sibling, event->ctx);

- if (sibling->state == PERF_EVENT_STATE_ACTIVE) {
- struct list_head *list = sibling->attr.pinned ?
- &ctx->pinned_active : &ctx->flexible_active;
-
- list_add_tail(&sibling->active_list, list);
- }
+ if (sibling->state == PERF_EVENT_STATE_ACTIVE)
+ list_add_tail(&sibling->active_list, get_event_list(sibling));
}

WARN_ON_ONCE(sibling->ctx != event->ctx);
@@ -2350,6 +2352,8 @@ event_sched_in(struct perf_event *event,
{
int ret = 0;

+ WARN_ON_ONCE(event->ctx != ctx);
+
lockdep_assert_held(&ctx->lock);

if (event->state <= PERF_EVENT_STATE_OFF)
@@ -3425,10 +3429,12 @@ struct sched_in_data {
int can_add_hw;
};

-static int pinned_sched_in(struct perf_event *event, void *data)
+static int merge_sched_in(struct perf_event *event, void *data)
{
struct sched_in_data *sid = data;

+ WARN_ON_ONCE(event->ctx != sid->ctx);
+
if (event->state <= PERF_EVENT_STATE_OFF)
return 0;

@@ -3437,37 +3443,15 @@ static int pinned_sched_in(struct perf_event *event, void *data)

if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) {
if (!group_sched_in(event, sid->cpuctx, sid->ctx))
- list_add_tail(&event->active_list, &sid->ctx->pinned_active);
+ list_add_tail(&event->active_list, get_event_list(event));
}

- /*
- * If this pinned group hasn't been scheduled,
- * put it in error state.
- */
- if (event->state == PERF_EVENT_STATE_INACTIVE)
- perf_event_set_state(event, PERF_EVENT_STATE_ERROR);
-
- return 0;
-}
-
-static int flexible_sched_in(struct perf_event *event, void *data)
-{
- struct sched_in_data *sid = data;
-
- if (event->state <= PERF_EVENT_STATE_OFF)
- return 0;
-
- if (!event_filter_match(event))
- return 0;
+ if (event->state == PERF_EVENT_STATE_INACTIVE) {
+ if (event->attr.pinned)
+ perf_event_set_state(event, PERF_EVENT_STATE_ERROR);

- if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) {
- int ret = group_sched_in(event, sid->cpuctx, sid->ctx);
- if (ret) {
- sid->can_add_hw = 0;
- sid->ctx->rotate_necessary = 1;
- return 0;
- }
- list_add_tail(&event->active_list, &sid->ctx->flexible_active);
+ sid->can_add_hw = 0;
+ sid->ctx->rotate_necessary = 1;
}

return 0;
@@ -3485,7 +3469,7 @@ ctx_pinned_sched_in(struct perf_event_context *ctx,

visit_groups_merge(&ctx->pinned_groups,
smp_processor_id(),
- pinned_sched_in, &sid);
+ merge_sched_in, &sid);
}

static void
@@ -3500,7 +3484,7 @@ ctx_flexible_sched_in(struct perf_event_context *ctx,

visit_groups_merge(&ctx->flexible_groups,
smp_processor_id(),
- flexible_sched_in, &sid);
+ merge_sched_in, &sid);
}

static void
--
2.20.1


\
 
 \ /
  Last update: 2020-04-16 16:31    [W:2.019 / U:0.060 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site