lkml.org 
[lkml]   [2022]   [Feb]   [4]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
From
Date
SubjectRe: [PATCH v2 2/6] KVM: x86/pmu: Refactoring find_arch_event() to pmc_perf_hw_id()
On Mon, Nov 29, 2021 at 11:42 PM Like Xu <like.xu.linux@gmail.com> wrote:
>
> From: Like Xu <likexu@tencent.com>
>
> The find_arch_event() returns a "unsigned int" value,
> which is used by the pmc_reprogram_counter() to
> program a PERF_TYPE_HARDWARE type perf_event.
>
> The returned value is actually the kernel defined gernic
> perf_hw_id, let's rename it to pmc_perf_hw_id() with simpler
> incoming parameters for better self-explanation.
>
> Signed-off-by: Like Xu <likexu@tencent.com>
> ---
> arch/x86/kvm/pmu.c | 8 +-------
> arch/x86/kvm/pmu.h | 3 +--
> arch/x86/kvm/svm/pmu.c | 8 ++++----
> arch/x86/kvm/vmx/pmu_intel.c | 9 +++++----
> 4 files changed, 11 insertions(+), 17 deletions(-)
>
> diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
> index 09873f6488f7..3b3ccf5b1106 100644
> --- a/arch/x86/kvm/pmu.c
> +++ b/arch/x86/kvm/pmu.c
> @@ -174,7 +174,6 @@ static bool pmc_resume_counter(struct kvm_pmc *pmc)
> void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
> {
> unsigned config, type = PERF_TYPE_RAW;
> - u8 event_select, unit_mask;
> struct kvm *kvm = pmc->vcpu->kvm;
> struct kvm_pmu_event_filter *filter;
> int i;
> @@ -206,17 +205,12 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
> if (!allow_event)
> return;
>
> - event_select = eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
> - unit_mask = (eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
> -
> if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE |
> ARCH_PERFMON_EVENTSEL_INV |
> ARCH_PERFMON_EVENTSEL_CMASK |
> HSW_IN_TX |
> HSW_IN_TX_CHECKPOINTED))) {
> - config = kvm_x86_ops.pmu_ops->find_arch_event(pmc_to_pmu(pmc),
> - event_select,
> - unit_mask);
> + config = kvm_x86_ops.pmu_ops->pmc_perf_hw_id(pmc);
> if (config != PERF_COUNT_HW_MAX)
> type = PERF_TYPE_HARDWARE;
> }
> diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
> index 59d6b76203d5..dd7dbb1c5048 100644
> --- a/arch/x86/kvm/pmu.h
> +++ b/arch/x86/kvm/pmu.h
> @@ -24,8 +24,7 @@ struct kvm_event_hw_type_mapping {
> };
>
> struct kvm_pmu_ops {
> - unsigned (*find_arch_event)(struct kvm_pmu *pmu, u8 event_select,
> - u8 unit_mask);
> + unsigned int (*pmc_perf_hw_id)(struct kvm_pmc *pmc);
> unsigned (*find_fixed_event)(int idx);
> bool (*pmc_is_enabled)(struct kvm_pmc *pmc);
> struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx);
> diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c
> index 0cf05e4caa4c..fb0ce8cda8a7 100644
> --- a/arch/x86/kvm/svm/pmu.c
> +++ b/arch/x86/kvm/svm/pmu.c
> @@ -138,10 +138,10 @@ static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
> return &pmu->gp_counters[msr_to_index(msr)];
> }
>
> -static unsigned amd_find_arch_event(struct kvm_pmu *pmu,
> - u8 event_select,
> - u8 unit_mask)
> +static unsigned int amd_pmc_perf_hw_id(struct kvm_pmc *pmc)
> {
> + u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
On AMD, the event select is 12 bits.
> + u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
> int i;
>
> for (i = 0; i < ARRAY_SIZE(amd_event_mapping); i++)
> @@ -323,7 +323,7 @@ static void amd_pmu_reset(struct kvm_vcpu *vcpu)
> }
>
> struct kvm_pmu_ops amd_pmu_ops = {
> - .find_arch_event = amd_find_arch_event,
> + .pmc_perf_hw_id = amd_pmc_perf_hw_id,
> .find_fixed_event = amd_find_fixed_event,
> .pmc_is_enabled = amd_pmc_is_enabled,
> .pmc_idx_to_pmc = amd_pmc_idx_to_pmc,
> diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
> index b7ab5fd03681..67a0188ecdc5 100644
> --- a/arch/x86/kvm/vmx/pmu_intel.c
> +++ b/arch/x86/kvm/vmx/pmu_intel.c
> @@ -68,10 +68,11 @@ static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
> reprogram_counter(pmu, bit);
> }
>
> -static unsigned intel_find_arch_event(struct kvm_pmu *pmu,
> - u8 event_select,
> - u8 unit_mask)
> +static unsigned int intel_pmc_perf_hw_id(struct kvm_pmc *pmc)
> {
> + struct kvm_pmu *pmu = pmc_to_pmu(pmc);
> + u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
> + u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
> int i;
>
> for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++)
> @@ -719,7 +720,7 @@ static void intel_pmu_cleanup(struct kvm_vcpu *vcpu)
> }
>
> struct kvm_pmu_ops intel_pmu_ops = {
> - .find_arch_event = intel_find_arch_event,
> + .pmc_perf_hw_id = intel_pmc_perf_hw_id,
> .find_fixed_event = intel_find_fixed_event,
> .pmc_is_enabled = intel_pmc_is_enabled,
> .pmc_idx_to_pmc = intel_pmc_idx_to_pmc,
> --
> 2.33.1
>

\
 
 \ /
  Last update: 2022-02-05 02:57    [W:0.238 / U:0.088 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site