lkml.org 
[lkml]   [2022]   [May]   [10]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 5.17 015/140] KVM: x86/svm: Account for family 17h event renumberings in amd_pmc_perf_hw_id
    Date
    From: Kyle Huey <me@kylehuey.com>

    commit 5eb849322d7f7ae9d5c587c7bc3b4f7c6872cd2f upstream.

    Zen renumbered some of the performance counters that correspond to the
    well known events in perf_hw_id. This code in KVM was never updated for
    that, so guest that attempt to use counters on Zen that correspond to the
    pre-Zen perf_hw_id values will silently receive the wrong values.

    This has been observed in the wild with rr[0] when running in Zen 3
    guests. rr uses the retired conditional branch counter 00d1 which is
    incorrectly recognized by KVM as PERF_COUNT_HW_STALLED_CYCLES_BACKEND.

    [0] https://rr-project.org/

    Signed-off-by: Kyle Huey <me@kylehuey.com>
    Message-Id: <20220503050136.86298-1-khuey@kylehuey.com>
    Cc: stable@vger.kernel.org
    [Check guest family, not host. - Paolo]
    Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
    Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
    ---
    arch/x86/kvm/svm/pmu.c | 28 +++++++++++++++++++++++++---
    1 file changed, 25 insertions(+), 3 deletions(-)

    --- a/arch/x86/kvm/svm/pmu.c
    +++ b/arch/x86/kvm/svm/pmu.c
    @@ -45,6 +45,22 @@ static struct kvm_event_hw_type_mapping
    [7] = { 0xd1, 0x00, PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
    };

    +/* duplicated from amd_f17h_perfmon_event_map. */
    +static struct kvm_event_hw_type_mapping amd_f17h_event_mapping[] = {
    + [0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES },
    + [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
    + [2] = { 0x60, 0xff, PERF_COUNT_HW_CACHE_REFERENCES },
    + [3] = { 0x64, 0x09, PERF_COUNT_HW_CACHE_MISSES },
    + [4] = { 0xc2, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
    + [5] = { 0xc3, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
    + [6] = { 0x87, 0x02, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
    + [7] = { 0x87, 0x01, PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
    +};
    +
    +/* amd_pmc_perf_hw_id depends on these being the same size */
    +static_assert(ARRAY_SIZE(amd_event_mapping) ==
    + ARRAY_SIZE(amd_f17h_event_mapping));
    +
    static unsigned int get_msr_base(struct kvm_pmu *pmu, enum pmu_type type)
    {
    struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
    @@ -140,6 +156,7 @@ static inline struct kvm_pmc *get_gp_pmc

    static unsigned int amd_pmc_perf_hw_id(struct kvm_pmc *pmc)
    {
    + struct kvm_event_hw_type_mapping *event_mapping;
    u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
    u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
    int i;
    @@ -148,15 +165,20 @@ static unsigned int amd_pmc_perf_hw_id(s
    if (WARN_ON(pmc_is_fixed(pmc)))
    return PERF_COUNT_HW_MAX;

    + if (guest_cpuid_family(pmc->vcpu) >= 0x17)
    + event_mapping = amd_f17h_event_mapping;
    + else
    + event_mapping = amd_event_mapping;
    +
    for (i = 0; i < ARRAY_SIZE(amd_event_mapping); i++)
    - if (amd_event_mapping[i].eventsel == event_select
    - && amd_event_mapping[i].unit_mask == unit_mask)
    + if (event_mapping[i].eventsel == event_select
    + && event_mapping[i].unit_mask == unit_mask)
    break;

    if (i == ARRAY_SIZE(amd_event_mapping))
    return PERF_COUNT_HW_MAX;

    - return amd_event_mapping[i].event_type;
    + return event_mapping[i].event_type;
    }

    /* check if a PMC is enabled by comparing it against global_ctrl bits. Because

    \
     
     \ /
      Last update: 2022-05-10 16:17    [W:2.883 / U:0.248 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site