lkml.org 
[lkml]   [2019]   [Mar]   [11]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
SubjectRe: [PATCH v4 17/17] kvm: vmx: Emulate TEST_CTL MSR
From
Date
On 09/03/19 03:31, Xiaoyao Li wrote:
> Hi, Paolo,
>
> Do you have any comments on this patch?
>
> We are preparing v5 patches for split lock detection, if you have any comments
> about this one, please let me know.

No, my only comment is that it should be placed _before_ the other two
for bisectability. I think I have already sent that small remark.

Thanks,

Paolo

> Thanks,
> Xiaoyao
>
> On Fri, 2019-03-01 at 18:45 -0800, Fenghua Yu wrote:
>> From: Xiaoyao Li <xiaoyao.li@linux.intel.com>
>>
>> A control bit (bit 29) in TEST_CTL MSR 0x33 will be introduced in
>> future x86 processors. When bit 29 is set, the processor causes #AC
>> exception for split locked accesses at all CPL.
>>
>> Please check the latest Intel Software Developer's Manual
>> for more detailed information on the MSR and the split lock bit.
>>
>> 1. Since the kernel chooses to enable AC split lock by default, which
>> means if we don't emulate TEST_CTL MSR for guest, guest will run with
>> this feature enable while does not known it. Thus existing guests with
>> buggy firmware (like OVMF) and old kernels having the cross cache line
>> issues will fail the boot due to #AC.
>>
>> So we should emulate TEST_CTL MSR, and set it zero to disable AC split
>> lock by default. Whether and when to enable it is left to guest firmware
>> and guest kernel.
>>
>> 2. Host and guest can enable AC split lock independently, so using
>> msr autoload to switch it during VM entry/exit.
>>
>> Signed-off-by: Xiaoyao Li <xiaoyao.li@linux.intel.com>
>> Signed-off-by: Fenghua Yu <fenghua.yu@intel.com>
>> ---
>> arch/x86/kvm/vmx/vmx.c | 35 +++++++++++++++++++++++++++++++++++
>> arch/x86/kvm/vmx/vmx.h | 1 +
>> 2 files changed, 36 insertions(+)
>>
>> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
>> index 3e03c6e1e558..c0c5e8621afa 100644
>> --- a/arch/x86/kvm/vmx/vmx.c
>> +++ b/arch/x86/kvm/vmx/vmx.c
>> @@ -1659,6 +1659,12 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct
>> msr_data *msr_info)
>> u32 index;
>>
>> switch (msr_info->index) {
>> + case MSR_TEST_CTL:
>> + if (!msr_info->host_initiated &&
>> + !(vmx->core_capability & CORE_CAP_SPLIT_LOCK_DETECT))
>> + return 1;
>> + msr_info->data = vmx->msr_test_ctl;
>> + break;
>> #ifdef CONFIG_X86_64
>> case MSR_FS_BASE:
>> msr_info->data = vmcs_readl(GUEST_FS_BASE);
>> @@ -1805,6 +1811,14 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct
>> msr_data *msr_info)
>> u32 index;
>>
>> switch (msr_index) {
>> + case MSR_TEST_CTL:
>> + if (!(vmx->core_capability & CORE_CAP_SPLIT_LOCK_DETECT))
>> + return 1;
>> +
>> + if (data & ~TEST_CTL_ENABLE_SPLIT_LOCK_DETECT)
>> + return 1;
>> + vmx->msr_test_ctl = data;
>> + break;
>> case MSR_EFER:
>> ret = kvm_set_msr_common(vcpu, msr_info);
>> break;
>> @@ -4108,6 +4122,9 @@ static void vmx_vcpu_setup(struct vcpu_vmx *vmx)
>>
>> vmx->arch_capabilities = kvm_get_arch_capabilities();
>>
>> + /* disable AC split lock by default */
>> + vmx->msr_test_ctl = 0;
>> +
>> vm_exit_controls_init(vmx, vmx_vmexit_ctrl());
>>
>> /* 22.2.1, 20.8.1 */
>> @@ -4145,6 +4162,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool
>> init_event)
>>
>> vmx->rmode.vm86_active = 0;
>> vmx->spec_ctrl = 0;
>> + vmx->msr_test_ctl = 0;
>>
>> vcpu->arch.microcode_version = 0x100000000ULL;
>> vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
>> @@ -6344,6 +6362,21 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx
>> *vmx)
>> msrs[i].host, false);
>> }
>>
>> +static void atomic_switch_msr_test_ctl(struct vcpu_vmx *vmx)
>> +{
>> + u64 host_msr_test_ctl;
>> +
>> + if (!boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT))
>> + return;
>> +
>> + rdmsrl(MSR_TEST_CTL, host_msr_test_ctl);
>> + if (host_msr_test_ctl == vmx->msr_test_ctl)
>> + clear_atomic_switch_msr(vmx, MSR_TEST_CTL);
>> + else
>> + add_atomic_switch_msr(vmx, MSR_TEST_CTL, vmx->msr_test_ctl,
>> + host_msr_test_ctl, false);
>> +}
>> +
>> static void vmx_arm_hv_timer(struct vcpu_vmx *vmx, u32 val)
>> {
>> vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, val);
>> @@ -6585,6 +6618,8 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
>>
>> atomic_switch_perf_msrs(vmx);
>>
>> + atomic_switch_msr_test_ctl(vmx);
>> +
>> vmx_update_hv_timer(vcpu);
>>
>> /*
>> diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
>> index cc22379991f3..e8831609c6c3 100644
>> --- a/arch/x86/kvm/vmx/vmx.h
>> +++ b/arch/x86/kvm/vmx/vmx.h
>> @@ -191,6 +191,7 @@ struct vcpu_vmx {
>> u64 msr_guest_kernel_gs_base;
>> #endif
>>
>> + u64 msr_test_ctl;
>> u64 core_capability;
>> u64 arch_capabilities;
>> u64 spec_ctrl;
>

\
 
 \ /
  Last update: 2019-03-11 14:32    [W:0.087 / U:1.272 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site