lkml.org 
[lkml]   [2022]   [Jan]   [11]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH v2] KVM: x86: Make the module parameter of vPMU more common
Date
From: Like Xu <likexu@tencent.com>

The new module parameter to control PMU virtualization should apply
to Intel as well as AMD, for situations where userspace is not trusted.
If the module parameter allows PMU virtualization, there could be a
new KVM_CAP or guest CPUID bits whereby userspace can enable/disable
PMU virtualization on a per-VM basis.

If the module parameter does not allow PMU virtualization, there
should be no userspace override, since we have no precedent for
authorizing that kind of override. If it's false, other counter-based
profiling features (such as LBR including the associated CPUID bits
if any) will not be exposed.

To keep "pmu" for the module param, e.g. same as ept, vpid, etc,
a harmless wrapper interface is introduced, which also helps to
distinguish the use of temporary variable with the same name.

Fixes: b1d66dad65dc ("KVM: x86/svm: Add module param to control PMU virtualization")
Suggested-by : Jim Mattson <jmattson@google.com>
Signed-off-by: Like Xu <likexu@tencent.com>
---
v1 -> v2 Changelog:
- prefer we keep "pmu" for the module param; (Sean)
- move the definition of module_param into the context of pmu;

Previous:
https://lore.kernel.org/kvm/20220111073823.21885-1-likexu@tencent.com/

arch/x86/kvm/cpuid.c | 6 +++---
arch/x86/kvm/pmu.c | 5 +++++
arch/x86/kvm/pmu.h | 8 ++++++++
arch/x86/kvm/svm/pmu.c | 2 +-
arch/x86/kvm/svm/svm.c | 6 +-----
arch/x86/kvm/svm/svm.h | 1 -
arch/x86/kvm/vmx/capabilities.h | 4 ++++
arch/x86/kvm/vmx/pmu_intel.c | 2 +-
8 files changed, 23 insertions(+), 11 deletions(-)

diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 0b920e12bb6d..b62ccc551f42 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -770,10 +770,10 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
perf_get_x86_pmu_capability(&cap);

/*
- * Only support guest architectural pmu on a host
- * with architectural pmu.
+ * The guest architecture pmu is only supported if the architecture
+ * pmu exists on the host and the module parameters allow it.
*/
- if (!cap.version)
+ if (!kvm_pmu_supported() || !cap.version)
memset(&cap, 0, sizeof(cap));

eax.split.version_id = min(cap.version, 2);
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index e632693a2266..7aef8180d2ec 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -19,6 +19,11 @@
#include "lapic.h"
#include "pmu.h"

+/* Enable/disable PMU virtualization */
+bool __read_mostly pmu = true;
+EXPORT_SYMBOL_GPL(pmu);
+module_param(pmu, bool, 0444);
+
/* This is enough to filter the vast majority of currently defined events. */
#define KVM_PMU_EVENT_FILTER_MAX_EVENTS 300

diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
index 7a7b8d5b775e..43b1eb4bc38e 100644
--- a/arch/x86/kvm/pmu.h
+++ b/arch/x86/kvm/pmu.h
@@ -41,6 +41,14 @@ struct kvm_pmu_ops {
void (*cleanup)(struct kvm_vcpu *vcpu);
};

+extern bool pmu;
+
+/* A wrapper interface to return the module parameter of the vPMU. */
+static inline bool kvm_pmu_supported(void)
+{
+ return pmu;
+}
+
static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
{
struct kvm_pmu *pmu = pmc_to_pmu(pmc);
diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c
index 12d8b301065a..794753abdda3 100644
--- a/arch/x86/kvm/svm/pmu.c
+++ b/arch/x86/kvm/svm/pmu.c
@@ -101,7 +101,7 @@ static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
{
struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);

- if (!pmu)
+ if (!kvm_pmu_supported())
return NULL;

switch (msr) {
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 6cb38044a860..e1c2b792ba69 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -192,10 +192,6 @@ module_param(vgif, int, 0444);
static int lbrv = true;
module_param(lbrv, int, 0444);

-/* enable/disable PMU virtualization */
-bool pmu = true;
-module_param(pmu, bool, 0444);
-
static int tsc_scaling = true;
module_param(tsc_scaling, int, 0444);

@@ -4712,7 +4708,7 @@ static __init int svm_hardware_setup(void)
pr_info("LBR virtualization supported\n");
}

- if (!pmu)
+ if (!kvm_pmu_supported())
pr_info("PMU virtualization is disabled\n");

svm_set_cpu_caps();
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index daa8ca84afcc..47ef8f4a9358 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -32,7 +32,6 @@
extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
extern bool npt_enabled;
extern bool intercept_smi;
-extern bool pmu;

/*
* Clean bits in VMCB.
diff --git a/arch/x86/kvm/vmx/capabilities.h b/arch/x86/kvm/vmx/capabilities.h
index c8029b7845b6..9181a57e7e91 100644
--- a/arch/x86/kvm/vmx/capabilities.h
+++ b/arch/x86/kvm/vmx/capabilities.h
@@ -5,6 +5,7 @@
#include <asm/vmx.h>

#include "lapic.h"
+#include "pmu.h"

extern bool __read_mostly enable_vpid;
extern bool __read_mostly flexpriority_enabled;
@@ -389,6 +390,9 @@ static inline u64 vmx_get_perf_capabilities(void)
{
u64 perf_cap = 0;

+ if (!kvm_pmu_supported())
+ return perf_cap;
+
if (boot_cpu_has(X86_FEATURE_PDCM))
rdmsrl(MSR_IA32_PERF_CAPABILITIES, perf_cap);

diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index ffccfd9823c0..70e7f1fff36b 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -487,7 +487,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
pmu->reserved_bits = 0xffffffff00200000ull;

entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
- if (!entry)
+ if (!kvm_pmu_supported() || !entry)
return;
eax.full = entry->eax;
edx.full = entry->edx;
--
2.33.1
\
 
 \ /
  Last update: 2022-01-12 05:03    [W:0.154 / U:0.020 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site