lkml.org 
[lkml]   [2022]   [Feb]   [21]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 04/11] KVM: x86/pmu: Drop "u64 eventsel" for reprogram_gp_counter()
Date
From: Like Xu <likexu@tencent.com>

Because inside reprogram_gp_counter(), it is bound to assign the requested
eventel to pmc->eventsel, this assignment step can be moved forward, thus
simplifying the passing of parameters to "struct kvm_pmc *pmc" only.

No functional change intended.

Signed-off-by: Like Xu <likexu@tencent.com>
---
arch/x86/kvm/pmu.c | 7 +++----
arch/x86/kvm/pmu.h | 2 +-
arch/x86/kvm/svm/pmu.c | 3 ++-
arch/x86/kvm/vmx/pmu_intel.c | 3 ++-
4 files changed, 8 insertions(+), 7 deletions(-)

diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index 125bdfdbaa7a..482a78956dd0 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -213,16 +213,15 @@ static bool check_pmu_event_filter(struct kvm_pmc *pmc)
return allow_event;
}

-void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
+void reprogram_gp_counter(struct kvm_pmc *pmc)
{
u64 config;
u32 type = PERF_TYPE_RAW;
+ u64 eventsel = pmc->eventsel;

if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL)
printk_once("kvm pmu: pin control bit is ignored\n");

- pmc->eventsel = eventsel;
-
pmc_pause_counter(pmc);

if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE) || !pmc_is_enabled(pmc))
@@ -289,7 +288,7 @@ EXPORT_SYMBOL_GPL(reprogram_fixed_counter);
void reprogram_counter(struct kvm_pmc *pmc)
{
if (pmc_is_gp(pmc))
- reprogram_gp_counter(pmc, pmc->eventsel);
+ reprogram_gp_counter(pmc);
else {
int idx = pmc->idx - INTEL_PMC_IDX_FIXED;
u8 ctrl = fixed_ctrl_field(pmc_to_pmu(pmc)->fixed_ctr_ctrl, idx);
diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
index b529c54dc309..4db50c290c62 100644
--- a/arch/x86/kvm/pmu.h
+++ b/arch/x86/kvm/pmu.h
@@ -140,7 +140,7 @@ static inline u64 get_sample_period(struct kvm_pmc *pmc, u64 counter_value)
return sample_period;
}

-void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel);
+void reprogram_gp_counter(struct kvm_pmc *pmc);
void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int fixed_idx);
void reprogram_counter(struct kvm_pmc *pmc);

diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c
index 5aa45f13b16d..db839578e8be 100644
--- a/arch/x86/kvm/svm/pmu.c
+++ b/arch/x86/kvm/svm/pmu.c
@@ -265,7 +265,8 @@ static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (data == pmc->eventsel)
return 0;
if (!(data & pmu->reserved_bits)) {
- reprogram_gp_counter(pmc, data);
+ pmc->eventsel = data;
+ reprogram_gp_counter(pmc);
return 0;
}
}
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index 049ce5519fb5..1ed7d23d6738 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -448,7 +448,8 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (data == pmc->eventsel)
return 0;
if (!(data & pmu->reserved_bits)) {
- reprogram_gp_counter(pmc, data);
+ pmc->eventsel = data;
+ reprogram_gp_counter(pmc);
return 0;
}
} else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, false))
--
2.35.0
\
 
 \ /
  Last update: 2022-02-21 19:50    [W:0.147 / U:0.704 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site