lkml.org 
[lkml]   [2023]   [Jan]   [27]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
SubjectRe: [PATCH v2 06/15] KVM: vmx/pmu: Emulate MSR_ARCH_LBR_CTL for guest Arch LBR
On Thu, Nov 24, 2022, Yang Weijiang wrote:
> @@ -345,6 +346,30 @@ static bool intel_pmu_handle_lbr_msrs_access(struct kvm_vcpu *vcpu,
> return true;
> }
>
> +static bool arch_lbr_ctl_is_valid(struct kvm_vcpu *vcpu, u64 ctl)
> +{
> + struct kvm_cpuid_entry2 *entry;
> + struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
> +
> + if (!pmu->kvm_arch_lbr_depth)
> + return false;
> +
> + if (ctl & ~KVM_ARCH_LBR_CTL_MASK)
> + return false;
> +
> + entry = kvm_find_cpuid_entry(vcpu, 0x1c);

Why!?!?! Why does this series reinvent the wheel so many times. We already have
a huge pile of reserved bits masks that are computed in intel_pmu_refresh(), just
do the easy thing and follow the established pattern.

> + if (!entry)
> + return false;
> +
> + if (!(entry->ebx & BIT(0)) && (ctl & ARCH_LBR_CTL_CPL))
> + return false;
> + if (!(entry->ebx & BIT(2)) && (ctl & ARCH_LBR_CTL_STACK))
> + return false;
> + if (!(entry->ebx & BIT(1)) && (ctl & ARCH_LBR_CTL_FILTER))
> + return false;
> + return true;
> +}
> +
> static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
> {
> struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
> @@ -377,6 +402,14 @@ static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
> case MSR_ARCH_LBR_DEPTH:
> msr_info->data = lbr_desc->records.nr;
> return 0;
> + case MSR_ARCH_LBR_CTL:
> + if (!kvm_cpu_cap_has(X86_FEATURE_ARCH_LBR)) {

So I assume the point of this code is to allow reading MSR_ARCH_LBR_CTL from
userspace before doing KVM_SET_CPUID2, but I would much rather do that in a
generic way, i.e. build this series on
https://lore.kernel.org/all/20230124234905.3774678-7-seanjc@google.com

> + WARN_ON_ONCE(!msr_info->host_initiated);
> + msr_info->data = 0;
> + } else {
> + msr_info->data = vmcs_read64(GUEST_IA32_LBR_CTL);
> + }
> + return 0;
> default:
> if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
> (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {
> @@ -483,6 +516,18 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
> if (kvm_cpu_cap_has(X86_FEATURE_ARCH_LBR))
> wrmsrl(MSR_ARCH_LBR_DEPTH, lbr_desc->records.nr);
> return 0;
> + case MSR_ARCH_LBR_CTL:
> + if (msr_info->host_initiated && !pmu->kvm_arch_lbr_depth)
> + return data != 0;
> +
> + if (!arch_lbr_ctl_is_valid(vcpu, data))
> + break;
> +
> + vmcs_write64(GUEST_IA32_LBR_CTL, data);
> + if (intel_pmu_lbr_is_enabled(vcpu) && !lbr_desc->event &&
> + (data & ARCH_LBR_CTL_LBREN))
> + intel_pmu_create_guest_lbr_event(vcpu);
> + return 0;
> default:
> if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
> (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {
> @@ -727,12 +772,16 @@ static void intel_pmu_reset(struct kvm_vcpu *vcpu)
> */
> static void intel_pmu_legacy_freezing_lbrs_on_pmi(struct kvm_vcpu *vcpu)
> {
> - u64 data = vmcs_read64(GUEST_IA32_DEBUGCTL);
> + u32 lbr_ctl_field = GUEST_IA32_DEBUGCTL;
>
> - if (data & DEBUGCTLMSR_FREEZE_LBRS_ON_PMI) {
> - data &= ~DEBUGCTLMSR_LBR;
> - vmcs_write64(GUEST_IA32_DEBUGCTL, data);
> - }
> + if (!(vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_FREEZE_LBRS_ON_PMI))
> + return;
> +
> + if (kvm_cpu_cap_has(X86_FEATURE_ARCH_LBR) &&
> + guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR))
> + lbr_ctl_field = GUEST_IA32_LBR_CTL;

Similar to other comments, just rely on KVM not allowing LBRs to be enabled unless
the guest is configured with the correct flavor.

> +
> + vmcs_write64(lbr_ctl_field, vmcs_read64(lbr_ctl_field) & ~0x1ULL);

Open coding a bit just because it happens to be in the same position in both fields
is ridiculous.

u64 debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);

if (!debugctl & DEBUGCTLMSR_FREEZE_LBRS_ON_PMI))
return;

if (cpu_feature_enabled(X86_FEATURE_ARCH_LBR))
vmcs_clear_bits64(GUEST_IA32_LBR_CTL, ARCH_LBR_CTL_LBREN);
else
vmcs_write64(GUEST_IA32_DEBUGCTL, debugctl & ~DEBUGCTLMSR_LBR);

> }
>
> static void intel_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
> @@ -801,7 +850,8 @@ void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu)
> {
> struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
> struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
> - bool lbr_enable = !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR) &&
> + bool lbr_enable = guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR) ?
> + (vmcs_read64(GUEST_IA32_LBR_CTL) & ARCH_LBR_CTL_LBREN) :
> (vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR);
>
> if (!lbr_desc->event) {
> @@ -829,7 +879,8 @@ void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu)
>
> static void intel_pmu_cleanup(struct kvm_vcpu *vcpu)
> {
> - bool lbr_enable = !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR) &&
> + bool lbr_enable = guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR) ?
> + (vmcs_read64(GUEST_IA32_LBR_CTL) & ARCH_LBR_CTL_LBREN) :
> (vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR);

Instead of open coding the same ugly ternary operator in multiple locations, add
a helper.

>
> if (!lbr_enable)
> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> index cea8c07f5229..1ae2efc29546 100644
> --- a/arch/x86/kvm/vmx/vmx.c
> +++ b/arch/x86/kvm/vmx/vmx.c
> @@ -2120,6 +2120,13 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
> VM_EXIT_SAVE_DEBUG_CONTROLS)
> get_vmcs12(vcpu)->guest_ia32_debugctl = data;
>
> + /*
> + * For Arch LBR, IA32_DEBUGCTL[bit 0] has no meaning.
> + * It can be written to 0 or 1, but reads will always return 0.
> + */
> + if (guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR))

This can be dependent solely on the host CPU. If LBRs are unsupported, the bit
will be marked invalid and cleared above. And as mentioned multiple times, KVM
needs to allow enabling LBRs iff the guest and host flavors match.

> + data &= ~DEBUGCTLMSR_LBR;

This needs to be done before shoving the value into vmcs12.

> +
> vmcs_write64(GUEST_IA32_DEBUGCTL, data);
> if (intel_pmu_lbr_is_enabled(vcpu) && !to_vmx(vcpu)->lbr_desc.event &&
> (data & DEBUGCTLMSR_LBR))
> --
> 2.27.0
>

\
 
 \ /
  Last update: 2023-03-27 00:00    [W:0.363 / U:0.028 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site