lkml.org 
[lkml]   [2023]   [Feb]   [27]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v12 015/106] KVM: TDX: Make pmu_intel.c ignore guest TD case
    Date
    From: Isaku Yamahata <isaku.yamahata@intel.com>

    Because TDX KVM doesn't support PMU yet (it's future work of TDX KVM
    support as another patch series) and pmu_intel.c touches vmx specific
    structure in vcpu initialization, as workaround add dummy structure to
    struct vcpu_tdx and pmu_intel.c can ignore TDX case.

    Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com>
    ---
    arch/x86/kvm/vmx/pmu_intel.c | 46 +++++++++++++++++++++++++++++++++++-
    arch/x86/kvm/vmx/pmu_intel.h | 28 ++++++++++++++++++++++
    arch/x86/kvm/vmx/tdx.h | 8 ++++++-
    arch/x86/kvm/vmx/vmx.c | 2 +-
    arch/x86/kvm/vmx/vmx.h | 32 +------------------------
    5 files changed, 82 insertions(+), 34 deletions(-)
    create mode 100644 arch/x86/kvm/vmx/pmu_intel.h

    diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
    index efce9ad70e4e..39f43b0290c5 100644
    --- a/arch/x86/kvm/vmx/pmu_intel.c
    +++ b/arch/x86/kvm/vmx/pmu_intel.c
    @@ -19,6 +19,7 @@
    #include "lapic.h"
    #include "nested.h"
    #include "pmu.h"
    +#include "tdx.h"

    #define MSR_PMC_FULL_WIDTH_BIT (MSR_IA32_PMC0 - MSR_IA32_PERFCTR0)

    @@ -37,6 +38,26 @@ static struct kvm_event_hw_type_mapping intel_arch_events[] = {
    /* mapping between fixed pmc index and intel_arch_events array */
    static int fixed_pmc_events[] = {1, 0, 7};

    +struct lbr_desc *vcpu_to_lbr_desc(struct kvm_vcpu *vcpu)
    +{
    +#ifdef CONFIG_INTEL_TDX_HOST
    + if (is_td_vcpu(vcpu))
    + return &to_tdx(vcpu)->lbr_desc;
    +#endif
    +
    + return &to_vmx(vcpu)->lbr_desc;
    +}
    +
    +struct x86_pmu_lbr *vcpu_to_lbr_records(struct kvm_vcpu *vcpu)
    +{
    +#ifdef CONFIG_INTEL_TDX_HOST
    + if (is_td_vcpu(vcpu))
    + return &to_tdx(vcpu)->lbr_desc.records;
    +#endif
    +
    + return &to_vmx(vcpu)->lbr_desc.records;
    +}
    +
    static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
    {
    struct kvm_pmc *pmc;
    @@ -169,6 +190,23 @@ static inline struct kvm_pmc *get_fw_gp_pmc(struct kvm_pmu *pmu, u32 msr)
    return get_gp_pmc(pmu, msr, MSR_IA32_PMC0);
    }

    +bool intel_pmu_lbr_is_compatible(struct kvm_vcpu *vcpu)
    +{
    + if (is_td_vcpu(vcpu))
    + return false;
    + return cpuid_model_is_consistent(vcpu);
    +}
    +
    +bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu)
    +{
    + struct x86_pmu_lbr *lbr = vcpu_to_lbr_records(vcpu);
    +
    + if (is_td_vcpu(vcpu))
    + return false;
    +
    + return lbr->nr && (vcpu_get_perf_capabilities(vcpu) & PMU_CAP_LBR_FMT);
    +}
    +
    static bool intel_pmu_is_valid_lbr_msr(struct kvm_vcpu *vcpu, u32 index)
    {
    struct x86_pmu_lbr *records = vcpu_to_lbr_records(vcpu);
    @@ -279,6 +317,9 @@ int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu)
    PERF_SAMPLE_BRANCH_USER,
    };

    + if (WARN_ON_ONCE(is_td_vcpu(vcpu)))
    + return 0;
    +
    if (unlikely(lbr_desc->event)) {
    __set_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use);
    return 0;
    @@ -588,7 +629,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
    INTEL_PMC_MAX_GENERIC, pmu->nr_arch_fixed_counters);

    perf_capabilities = vcpu_get_perf_capabilities(vcpu);
    - if (cpuid_model_is_consistent(vcpu) &&
    + if (intel_pmu_lbr_is_compatible(vcpu) &&
    (perf_capabilities & PMU_CAP_LBR_FMT))
    x86_perf_get_lbr(&lbr_desc->records);
    else
    @@ -644,6 +685,9 @@ static void intel_pmu_reset(struct kvm_vcpu *vcpu)
    struct kvm_pmc *pmc = NULL;
    int i;

    + if (is_td_vcpu(vcpu))
    + return;
    +
    for (i = 0; i < KVM_INTEL_PMC_MAX_GENERIC; i++) {
    pmc = &pmu->gp_counters[i];

    diff --git a/arch/x86/kvm/vmx/pmu_intel.h b/arch/x86/kvm/vmx/pmu_intel.h
    new file mode 100644
    index 000000000000..66bba47c1269
    --- /dev/null
    +++ b/arch/x86/kvm/vmx/pmu_intel.h
    @@ -0,0 +1,28 @@
    +/* SPDX-License-Identifier: GPL-2.0 */
    +#ifndef __KVM_X86_VMX_PMU_INTEL_H
    +#define __KVM_X86_VMX_PMU_INTEL_H
    +
    +struct lbr_desc *vcpu_to_lbr_desc(struct kvm_vcpu *vcpu);
    +struct x86_pmu_lbr *vcpu_to_lbr_records(struct kvm_vcpu *vcpu);
    +
    +bool intel_pmu_lbr_is_compatible(struct kvm_vcpu *vcpu);
    +bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu);
    +int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu);
    +
    +struct lbr_desc {
    + /* Basic info about guest LBR records. */
    + struct x86_pmu_lbr records;
    +
    + /*
    + * Emulate LBR feature via passthrough LBR registers when the
    + * per-vcpu guest LBR event is scheduled on the current pcpu.
    + *
    + * The records may be inaccurate if the host reclaims the LBR.
    + */
    + struct perf_event *event;
    +
    + /* True if LBRs are marked as not intercepted in the MSR bitmap */
    + bool msr_passthrough;
    +};
    +
    +#endif /* __KVM_X86_VMX_PMU_INTEL_H */
    diff --git a/arch/x86/kvm/vmx/tdx.h b/arch/x86/kvm/vmx/tdx.h
    index 1e00e75b1c5e..5728820fed5e 100644
    --- a/arch/x86/kvm/vmx/tdx.h
    +++ b/arch/x86/kvm/vmx/tdx.h
    @@ -4,6 +4,7 @@

    #ifdef CONFIG_INTEL_TDX_HOST

    +#include "pmu_intel.h"
    #include "tdx_ops.h"

    struct kvm_tdx {
    @@ -21,7 +22,12 @@ struct kvm_tdx {

    struct vcpu_tdx {
    struct kvm_vcpu vcpu;
    - /* TDX specific members follow. */
    +
    + /*
    + * Dummy to make pmu_intel not corrupt memory.
    + * TODO: Support PMU for TDX. Future work.
    + */
    + struct lbr_desc lbr_desc;
    };

    static inline bool is_td(struct kvm *kvm)
    diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
    index bddbdd2988f4..5d2ff4d964bd 100644
    --- a/arch/x86/kvm/vmx/vmx.c
    +++ b/arch/x86/kvm/vmx/vmx.c
    @@ -2434,7 +2434,7 @@ int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
    if ((data & PMU_CAP_LBR_FMT) !=
    (kvm_caps.supported_perf_cap & PMU_CAP_LBR_FMT))
    return 1;
    - if (!cpuid_model_is_consistent(vcpu))
    + if (!intel_pmu_lbr_is_compatible(vcpu))
    return 1;
    }
    if (data & PERF_CAP_PEBS_FORMAT) {
    diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
    index a3da84f4ea45..d49d0ace9fb8 100644
    --- a/arch/x86/kvm/vmx/vmx.h
    +++ b/arch/x86/kvm/vmx/vmx.h
    @@ -11,6 +11,7 @@
    #include "capabilities.h"
    #include "../kvm_cache_regs.h"
    #include "posted_intr.h"
    +#include "pmu_intel.h"
    #include "vmcs.h"
    #include "vmx_ops.h"
    #include "../cpuid.h"
    @@ -105,22 +106,6 @@ static inline bool intel_pmu_has_perf_global_ctrl(struct kvm_pmu *pmu)
    return pmu->version > 1;
    }

    -struct lbr_desc {
    - /* Basic info about guest LBR records. */
    - struct x86_pmu_lbr records;
    -
    - /*
    - * Emulate LBR feature via passthrough LBR registers when the
    - * per-vcpu guest LBR event is scheduled on the current pcpu.
    - *
    - * The records may be inaccurate if the host reclaims the LBR.
    - */
    - struct perf_event *event;
    -
    - /* True if LBRs are marked as not intercepted in the MSR bitmap */
    - bool msr_passthrough;
    -};
    -
    /*
    * The nested_vmx structure is part of vcpu_vmx, and holds information we need
    * for correct emulation of VMX (i.e., nested VMX) on this vcpu.
    @@ -650,21 +635,6 @@ static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
    return container_of(vcpu, struct vcpu_vmx, vcpu);
    }

    -static inline struct lbr_desc *vcpu_to_lbr_desc(struct kvm_vcpu *vcpu)
    -{
    - return &to_vmx(vcpu)->lbr_desc;
    -}
    -
    -static inline struct x86_pmu_lbr *vcpu_to_lbr_records(struct kvm_vcpu *vcpu)
    -{
    - return &vcpu_to_lbr_desc(vcpu)->records;
    -}
    -
    -static inline bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu)
    -{
    - return !!vcpu_to_lbr_records(vcpu)->nr;
    -}
    -
    void intel_pmu_cross_mapped_check(struct kvm_pmu *pmu);
    int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu);
    void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu);
    --
    2.25.1
    \
     
     \ /
      Last update: 2023-03-27 00:36    [W:2.164 / U:0.224 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site