lkml.org 
[lkml]   [2022]   [May]   [10]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 1/3] KVM: x86/pmu: Add fast-path check for per-vm vPMU disablility
Date
From: Like Xu <likexu@tencent.com>

Since vcpu->kvm->arch.enable_pmu is introduced in a generic way,
it makes more sense to move the relevant checks to generic code rather
than scattering usages around, thus saving cpu cycles from static_call()
when vPMU is disabled.

Signed-off-by: Like Xu <likexu@tencent.com>
---
arch/x86/kvm/pmu.c | 6 ++++++
arch/x86/kvm/svm/pmu.c | 3 ---
arch/x86/kvm/vmx/pmu_intel.c | 2 +-
3 files changed, 7 insertions(+), 4 deletions(-)

diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index 618f529f1c4d..522498945a4a 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -415,6 +415,9 @@ void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)

bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
{
+ if (!vcpu->kvm->arch.enable_pmu)
+ return false;
+
return static_call(kvm_x86_pmu_msr_idx_to_pmc)(vcpu, msr) ||
static_call(kvm_x86_pmu_is_valid_msr)(vcpu, msr);
}
@@ -445,6 +448,9 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
*/
void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
{
+ if (!vcpu->kvm->arch.enable_pmu)
+ return;
+
static_call(kvm_x86_pmu_refresh)(vcpu);
}

diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c
index 57ab4739eb19..68b9e22c84d2 100644
--- a/arch/x86/kvm/svm/pmu.c
+++ b/arch/x86/kvm/svm/pmu.c
@@ -101,9 +101,6 @@ static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
{
struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);

- if (!vcpu->kvm->arch.enable_pmu)
- return NULL;
-
switch (msr) {
case MSR_F15H_PERF_CTL0:
case MSR_F15H_PERF_CTL1:
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index 9db662399487..3f15ec2dd4b3 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -493,7 +493,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
pmu->raw_event_mask = X86_RAW_EVENT_MASK;

entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
- if (!entry || !vcpu->kvm->arch.enable_pmu)
+ if (!entry)
return;
eax.full = entry->eax;
edx.full = entry->edx;
--
2.36.1
\
 
 \ /
  Last update: 2022-05-10 13:58    [W:0.042 / U:0.084 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site