lkml.org 
[lkml]   [2019]   [Mar]   [12]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 4.20 168/171] perf/x86/intel: Make cpuc allocations consistent
    Date
    4.20-stable review patch.  If anyone has any objections, please let me know.

    ------------------

    From: "Peter Zijlstra (Intel)" <peterz@infradead.org>

    commit d01b1f96a82e5dd7841a1d39db3abfdaf95f70ab upstream

    The cpuc data structure allocation is different between fake and real
    cpuc's; use the same code to init/free both.

    Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
    Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
    ---
    arch/x86/events/core.c | 13 +++++--------
    arch/x86/events/intel/core.c | 29 ++++++++++++++++++-----------
    arch/x86/events/perf_event.h | 11 ++++++++---
    3 files changed, 31 insertions(+), 22 deletions(-)

    --- a/arch/x86/events/core.c
    +++ b/arch/x86/events/core.c
    @@ -1995,7 +1995,7 @@ static int x86_pmu_commit_txn(struct pmu
    */
    static void free_fake_cpuc(struct cpu_hw_events *cpuc)
    {
    - kfree(cpuc->shared_regs);
    + intel_cpuc_finish(cpuc);
    kfree(cpuc);
    }

    @@ -2007,14 +2007,11 @@ static struct cpu_hw_events *allocate_fa
    cpuc = kzalloc(sizeof(*cpuc), GFP_KERNEL);
    if (!cpuc)
    return ERR_PTR(-ENOMEM);
    -
    - /* only needed, if we have extra_regs */
    - if (x86_pmu.extra_regs) {
    - cpuc->shared_regs = allocate_shared_regs(cpu);
    - if (!cpuc->shared_regs)
    - goto error;
    - }
    cpuc->is_fake = 1;
    +
    + if (intel_cpuc_prepare(cpuc, cpu))
    + goto error;
    +
    return cpuc;
    error:
    free_fake_cpuc(cpuc);
    --- a/arch/x86/events/intel/core.c
    +++ b/arch/x86/events/intel/core.c
    @@ -3398,7 +3398,7 @@ ssize_t intel_event_sysfs_show(char *pag
    return x86_event_sysfs_show(page, config, event);
    }

    -struct intel_shared_regs *allocate_shared_regs(int cpu)
    +static struct intel_shared_regs *allocate_shared_regs(int cpu)
    {
    struct intel_shared_regs *regs;
    int i;
    @@ -3430,10 +3430,9 @@ static struct intel_excl_cntrs *allocate
    return c;
    }

    -static int intel_pmu_cpu_prepare(int cpu)
    -{
    - struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);

    +int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
    +{
    if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
    cpuc->shared_regs = allocate_shared_regs(cpu);
    if (!cpuc->shared_regs)
    @@ -3443,7 +3442,7 @@ static int intel_pmu_cpu_prepare(int cpu
    if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
    size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint);

    - cpuc->constraint_list = kzalloc(sz, GFP_KERNEL);
    + cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
    if (!cpuc->constraint_list)
    goto err_shared_regs;

    @@ -3468,6 +3467,11 @@ err:
    return -ENOMEM;
    }

    +static int intel_pmu_cpu_prepare(int cpu)
    +{
    + return intel_cpuc_prepare(&per_cpu(cpu_hw_events, cpu), cpu);
    +}
    +
    static void flip_smm_bit(void *data)
    {
    unsigned long set = *(unsigned long *)data;
    @@ -3542,9 +3546,8 @@ static void intel_pmu_cpu_starting(int c
    }
    }

    -static void free_excl_cntrs(int cpu)
    +static void free_excl_cntrs(struct cpu_hw_events *cpuc)
    {
    - struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
    struct intel_excl_cntrs *c;

    c = cpuc->excl_cntrs;
    @@ -3565,9 +3568,8 @@ static void intel_pmu_cpu_dying(int cpu)
    disable_counter_freeze();
    }

    -static void intel_pmu_cpu_dead(int cpu)
    +void intel_cpuc_finish(struct cpu_hw_events *cpuc)
    {
    - struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
    struct intel_shared_regs *pc;

    pc = cpuc->shared_regs;
    @@ -3577,7 +3579,12 @@ static void intel_pmu_cpu_dead(int cpu)
    cpuc->shared_regs = NULL;
    }

    - free_excl_cntrs(cpu);
    + free_excl_cntrs(cpuc);
    +}
    +
    +static void intel_pmu_cpu_dead(int cpu)
    +{
    + intel_cpuc_finish(&per_cpu(cpu_hw_events, cpu));
    }

    static void intel_pmu_sched_task(struct perf_event_context *ctx,
    @@ -4715,7 +4722,7 @@ static __init int fixup_ht_bug(void)
    hardlockup_detector_perf_restart();

    for_each_online_cpu(c)
    - free_excl_cntrs(c);
    + free_excl_cntrs(&per_cpu(cpu_hw_events, c));

    cpus_read_unlock();
    pr_info("PMU erratum BJ122, BV98, HSD29 workaround disabled, HT off\n");
    --- a/arch/x86/events/perf_event.h
    +++ b/arch/x86/events/perf_event.h
    @@ -889,7 +889,8 @@ struct event_constraint *
    x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
    struct perf_event *event);

    -struct intel_shared_regs *allocate_shared_regs(int cpu);
    +extern int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu);
    +extern void intel_cpuc_finish(struct cpu_hw_events *cpuc);

    int intel_pmu_init(void);

    @@ -1025,9 +1026,13 @@ static inline int intel_pmu_init(void)
    return 0;
    }

    -static inline struct intel_shared_regs *allocate_shared_regs(int cpu)
    +static inline int intel_cpuc_prepare(struct cpu_hw_event *cpuc, int cpu)
    +{
    + return 0;
    +}
    +
    +static inline void intel_cpuc_finish(struct cpu_hw_event *cpuc)
    {
    - return NULL;
    }

    static inline int is_ht_workaround_enabled(void)

    \
     
     \ /
      Last update: 2019-03-12 18:54    [W:4.051 / U:0.552 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site