lkml.org 
[lkml]   [2020]   [Dec]   [1]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
SubjectRe: [PATCH V2 3/3] perf: Optimize sched_task() in a context switch
On Mon, Nov 30, 2020 at 11:38:42AM -0800, kan.liang@linux.intel.com wrote:
> From: Kan Liang <kan.liang@linux.intel.com>
>
> Some calls to sched_task() in a context switch can be avoided. For
> example, large PEBS only requires flushing the buffer in context switch
> out. The current code still invokes the sched_task() for large PEBS in
> context switch in.

I still hate this one, how's something like this then?
Which I still don't really like.. but at least its simpler.

(completely untested, may contain spurious edits, might ICE the
compiler and set your pets on fire if it doesn't)

And given this is an optimization, can we actually measure it to improve
matters?

---

--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -380,7 +380,7 @@ static void power_pmu_bhrb_enable(struct
cpuhw->bhrb_context = event->ctx;
}
cpuhw->bhrb_users++;
- perf_sched_cb_inc(event->ctx->pmu);
+ perf_sched_cb_inc(event);
}

static void power_pmu_bhrb_disable(struct perf_event *event)
@@ -392,7 +392,7 @@ static void power_pmu_bhrb_disable(struc

WARN_ON_ONCE(!cpuhw->bhrb_users);
cpuhw->bhrb_users--;
- perf_sched_cb_dec(event->ctx->pmu);
+ perf_sched_cb_dec(event);

if (!cpuhw->disabled && !cpuhw->bhrb_users) {
/* BHRB cannot be turned off when other
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3567,7 +3567,7 @@ static int intel_pmu_hw_config(struct pe
if (!(event->attr.sample_type &
~intel_pmu_large_pebs_flags(event))) {
event->hw.flags |= PERF_X86_EVENT_LARGE_PEBS;
- event->attach_state |= PERF_ATTACH_SCHED_CB;
+ event->attach_state |= PERF_ATTACH_SCHED_CB_OUT;
}
}
if (x86_pmu.pebs_aliases)
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -1014,7 +1014,6 @@ static void
pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc,
struct perf_event *event, bool add)
{
- struct pmu *pmu = event->ctx->pmu;
/*
* Make sure we get updated with the first PEBS
* event. It will trigger also during removal, but
@@ -1024,9 +1023,9 @@ pebs_update_state(bool needed_cb, struct

if (needed_cb != pebs_needs_sched_cb(cpuc)) {
if (!needed_cb)
- perf_sched_cb_inc(pmu);
+ perf_sched_cb_inc(event);
else
- perf_sched_cb_dec(pmu);
+ perf_sched_cb_dec(event);

update = true;
}
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -693,7 +693,7 @@ void intel_pmu_lbr_add(struct perf_event
*/
if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip > 0)
cpuc->lbr_pebs_users++;
- perf_sched_cb_inc(event->ctx->pmu);
+ perf_sched_cb_inc(event);
if (!cpuc->lbr_users++ && !event->total_time_running)
intel_pmu_lbr_reset();

@@ -740,7 +740,7 @@ void intel_pmu_lbr_del(struct perf_event
cpuc->lbr_users--;
WARN_ON_ONCE(cpuc->lbr_users < 0);
WARN_ON_ONCE(cpuc->lbr_pebs_users < 0);
- perf_sched_cb_dec(event->ctx->pmu);
+ perf_sched_cb_dec(event);
}

static inline bool vlbr_exclude_host(void)
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -601,12 +601,14 @@ struct swevent_hlist {
struct rcu_head rcu_head;
};

-#define PERF_ATTACH_CONTEXT 0x01
-#define PERF_ATTACH_GROUP 0x02
-#define PERF_ATTACH_TASK 0x04
-#define PERF_ATTACH_TASK_DATA 0x08
-#define PERF_ATTACH_ITRACE 0x10
-#define PERF_ATTACH_SCHED_CB 0x20
+#define PERF_ATTACH_CONTEXT 0x01
+#define PERF_ATTACH_GROUP 0x02
+#define PERF_ATTACH_TASK 0x04
+#define PERF_ATTACH_TASK_DATA 0x08
+#define PERF_ATTACH_ITRACE 0x10
+#define PERF_ATTACH_SCHED_CB_IN 0x20
+#define PERF_ATTACH_SCHED_CB_OUT 0x40
+#define PERF_ATTACH_SCHED_CB 0x60

struct perf_cgroup;
struct perf_buffer;
@@ -875,6 +877,7 @@ struct perf_cpu_context {

struct list_head sched_cb_entry;
int sched_cb_usage;
+ int sched_cb_dir[2];

int online;
/*
@@ -967,8 +970,8 @@ extern const struct perf_event_attr *per
extern void perf_event_print_debug(void);
extern void perf_pmu_disable(struct pmu *pmu);
extern void perf_pmu_enable(struct pmu *pmu);
-extern void perf_sched_cb_dec(struct pmu *pmu);
-extern void perf_sched_cb_inc(struct pmu *pmu);
+extern void perf_sched_cb_dec(struct perf_event *event);
+extern void perf_sched_cb_inc(struct perf_event *event);
extern int perf_event_task_disable(void);
extern int perf_event_task_enable(void);

--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -384,7 +384,6 @@ static DEFINE_MUTEX(perf_sched_mutex);
static atomic_t perf_sched_count;

static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
-static DEFINE_PER_CPU(int, perf_sched_cb_usage);
static DEFINE_PER_CPU(struct pmu_event_list, pmu_sb_events);

static atomic_t nr_mmap_events __read_mostly;
@@ -3376,6 +3375,16 @@ static void perf_event_sync_stat(struct
}
}

+static void context_sched_task(struct perf_cpu_context *cpuctx,
+ struct perf_event_context *ctx,
+ bool sched_in)
+{
+ struct pmu *pmu = ctx->pmu;
+
+ if (cpuctx->sched_cb_dir[sched_in] && pmu->sched_task)
+ pmu->sched_task(ctx, false);
+}
+
static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
struct task_struct *next)
{
@@ -3424,9 +3433,7 @@ static void perf_event_context_sched_out
WRITE_ONCE(next_ctx->task, task);

perf_pmu_disable(pmu);
-
- if (cpuctx->sched_cb_usage && pmu->sched_task)
- pmu->sched_task(ctx, false);
+ context_sched_task(cpuctx, ctx, false);

/*
* PMU specific parts of task perf context can require
@@ -3465,8 +3472,7 @@ static void perf_event_context_sched_out
raw_spin_lock(&ctx->lock);
perf_pmu_disable(pmu);

- if (cpuctx->sched_cb_usage && pmu->sched_task)
- pmu->sched_task(ctx, false);
+ context_sched_task(cpuctx, ctx, false);
task_ctx_sched_out(cpuctx, ctx, EVENT_ALL);

perf_pmu_enable(pmu);
@@ -3476,25 +3482,38 @@ static void perf_event_context_sched_out

static DEFINE_PER_CPU(struct list_head, sched_cb_list);

-void perf_sched_cb_dec(struct pmu *pmu)
+void perf_sched_cb_dec(struct perf_event *event)
{
+ struct perf_event_context *ctx = event->ctx;
+ struct pmu *pmu = ctx->pmu;
struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);

- this_cpu_dec(perf_sched_cb_usage);
-
- if (!--cpuctx->sched_cb_usage)
+ if (!ctx->task && !--cpuctx->sched_cb_usage)
list_del(&cpuctx->sched_cb_entry);
+
+ if (event->attach_state & PERF_ATTACH_SCHED_CB_OUT)
+ cpuctx->sched_cb_dir[0]--;
+ if (event->attach_state & PERF_ATTACH_SCHED_CB_IN)
+ cpuctx->sched_cb_dir[1]--;
}


-void perf_sched_cb_inc(struct pmu *pmu)
+void perf_sched_cb_inc(struct perf_event *event)
{
+ struct perf_event_context *ctx = event->ctx;
+ struct pmu *pmu = ctx->pmu;
struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);

- if (!cpuctx->sched_cb_usage++)
+ if (!ctx->task && !cpuctx->sched_cb_usage++)
list_add(&cpuctx->sched_cb_entry, this_cpu_ptr(&sched_cb_list));

- this_cpu_inc(perf_sched_cb_usage);
+ if (!(event->attach_state & PERF_ATTACH_SCHED_CB))
+ event->attach_state |= PERF_ATTACH_SCHED_CB;
+
+ if (event->attach_state & PERF_ATTACH_SCHED_CB_OUT)
+ cpuctx->sched_cb_dir[0]++;
+ if (event->attach_state & PERF_ATTACH_SCHED_CB_IN)
+ cpuctx->sched_cb_dir[1]++;
}

/*
@@ -3523,9 +3542,9 @@ static void __perf_pmu_sched_task(struct
perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
}

-static void perf_pmu_sched_task(struct task_struct *prev,
- struct task_struct *next,
- bool sched_in)
+static inline void perf_pmu_sched_task(struct task_struct *prev,
+ struct task_struct *next,
+ bool sched_in)
{
struct perf_cpu_context *cpuctx;

@@ -3563,8 +3582,7 @@ void __perf_event_task_sched_out(struct
{
int ctxn;

- if (__this_cpu_read(perf_sched_cb_usage))
- perf_pmu_sched_task(task, next, false);
+ perf_pmu_sched_task(task, next, false);

if (atomic_read(&nr_switch_events))
perf_event_switch(task, next, false);
@@ -3828,8 +3846,7 @@ static void perf_event_context_sched_in(
cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
perf_event_sched_in(cpuctx, ctx, task);

- if (cpuctx->sched_cb_usage && pmu->sched_task)
- pmu->sched_task(cpuctx->task_ctx, true);
+ context_sched_task(cpuctx, ctx, true);

perf_pmu_enable(pmu);

@@ -3875,8 +3892,7 @@ void __perf_event_task_sched_in(struct t
if (atomic_read(&nr_switch_events))
perf_event_switch(task, prev, true);

- if (__this_cpu_read(perf_sched_cb_usage))
- perf_pmu_sched_task(prev, task, true);
+ perf_pmu_sched_task(prev, task, true);
}

static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
\
 
 \ /
  Last update: 2020-12-01 18:32    [W:0.174 / U:0.056 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site