lkml.org 
[lkml]   [2019]   [Mar]   [18]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 18/22] perf/x86/intel: Support CPUID 10.ECX to disable fixed counters
    Date
    From: Andi Kleen <ak@linux.intel.com>

    Icelake supports a new CPUID 10.ECX cpu leaf to indicate some fixed
    counters are not supported. This extends the previous count to a bitmap
    which allows to disable even lower counters.

    It's a nop on Icelake (all fixed counters are supported), but let's
    implement it here. This adds the necessary checks. In theory it could
    be used today by a Hypervisor.

    For disabled counters disable any constraint events. I reuse the
    existing intel_ctrl variable to remember which counters are disabled.
    All code that reads all counters is fixed to check this extra bitmask.

    Signed-off-by: Andi Kleen <ak@linux.intel.com>
    Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
    ---
    arch/x86/events/core.c | 8 +++++++-
    arch/x86/events/intel/core.c | 22 +++++++++++++++-------
    arch/x86/events/perf_event.h | 6 ++++++
    3 files changed, 28 insertions(+), 8 deletions(-)

    diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
    index 796e46a59148..283df78c52e0 100644
    --- a/arch/x86/events/core.c
    +++ b/arch/x86/events/core.c
    @@ -225,6 +225,8 @@ static bool check_hw_exists(void)
    if (ret)
    goto msr_fail;
    for (i = 0; i < x86_pmu.num_counters_fixed; i++) {
    + if (fixed_counter_disabled(i))
    + continue;
    if (val & (0x03 << i*4)) {
    bios_fail = 1;
    val_fail = val;
    @@ -1362,6 +1364,8 @@ void perf_event_print_debug(void)
    cpu, idx, prev_left);
    }
    for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
    + if (fixed_counter_disabled(idx))
    + continue;
    rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);

    pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
    @@ -1877,7 +1881,9 @@ static int __init init_hw_perf_events(void)
    pr_info("... generic registers: %d\n", x86_pmu.num_counters);
    pr_info("... value mask: %016Lx\n", x86_pmu.cntval_mask);
    pr_info("... max period: %016Lx\n", x86_pmu.max_period);
    - pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed);
    + pr_info("... fixed-purpose events: %lu\n",
    + hweight64((((1ULL << x86_pmu.num_counters_fixed) - 1)
    + << INTEL_PMC_IDX_FIXED) & x86_pmu.intel_ctrl));
    pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl);

    /*
    diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
    index 3f86af8ce832..433dbd0152a9 100644
    --- a/arch/x86/events/intel/core.c
    +++ b/arch/x86/events/intel/core.c
    @@ -2278,8 +2278,11 @@ static void intel_pmu_reset(void)
    wrmsrl_safe(x86_pmu_config_addr(idx), 0ull);
    wrmsrl_safe(x86_pmu_event_addr(idx), 0ull);
    }
    - for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
    + for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
    + if (fixed_counter_disabled(idx))
    + continue;
    wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
    + }

    if (ds)
    ds->bts_index = ds->bts_buffer_base;
    @@ -4476,7 +4479,7 @@ __init int intel_pmu_init(void)
    union cpuid10_eax eax;
    union cpuid10_ebx ebx;
    struct event_constraint *c;
    - unsigned int unused;
    + unsigned int fixed_mask;
    struct extra_reg *er;
    int version, i;
    char *name;
    @@ -4497,9 +4500,11 @@ __init int intel_pmu_init(void)
    * Check whether the Architectural PerfMon supports
    * Branch Misses Retired hw_event or not.
    */
    - cpuid(10, &eax.full, &ebx.full, &unused, &edx.full);
    + cpuid(10, &eax.full, &ebx.full, &fixed_mask, &edx.full);
    if (eax.split.mask_length < ARCH_PERFMON_EVENTS_COUNT)
    return -ENODEV;
    + if (!fixed_mask)
    + fixed_mask = -1;

    version = eax.split.version_id;
    if (version < 2)
    @@ -5017,7 +5022,8 @@ __init int intel_pmu_init(void)
    }

    x86_pmu.intel_ctrl |=
    - ((1LL << x86_pmu.num_counters_fixed)-1) << INTEL_PMC_IDX_FIXED;
    + (((1LL << x86_pmu.num_counters_fixed)-1) & (u64)fixed_mask)
    + << INTEL_PMC_IDX_FIXED;

    if (x86_pmu.event_constraints) {
    /*
    @@ -5034,9 +5040,11 @@ __init int intel_pmu_init(void)
    c->weight = hweight64(c->idxmsk64);
    continue;
    }
    - if (c->cmask == FIXED_EVENT_FLAGS
    - && c->idxmsk64 != INTEL_PMC_MSK_FIXED_REF_CYCLES) {
    - c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
    + if (c->cmask == FIXED_EVENT_FLAGS) {
    + if (c->idxmsk64 != INTEL_PMC_MSK_FIXED_REF_CYCLES)
    + c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
    + /* Disabled fixed counters which are not in CPUID */
    + c->idxmsk64 &= x86_pmu.intel_ctrl;
    }
    c->idxmsk64 &=
    ~(~0ULL << (INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed));
    diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
    index ef8c4d846e87..8894e3bd1f23 100644
    --- a/arch/x86/events/perf_event.h
    +++ b/arch/x86/events/perf_event.h
    @@ -926,6 +926,12 @@ ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr,
    ssize_t events_ht_sysfs_show(struct device *dev, struct device_attribute *attr,
    char *page);

    +static inline bool fixed_counter_disabled(int i)
    +{
    + return x86_pmu.intel_ctrl &&
    + ((1ULL << (i + INTEL_PMC_IDX_FIXED)) & x86_pmu.intel_ctrl);
    +}
    +
    #ifdef CONFIG_CPU_SUP_AMD

    int amd_pmu_init(void);
    --
    2.17.1
    \
     
     \ /
      Last update: 2019-03-18 22:45    [W:4.111 / U:0.068 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site