lkml.org 
[lkml]   [2022]   [Aug]   [7]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH v8 036/103] KVM: VMX: Introduce test mode related to EPT violation VE
Date
From: Isaku Yamahata <isaku.yamahata@intel.com>

To support TDX, KVM is enhanced to operate with #VE. For TDX, KVM programs
to inject #VE conditionally and set #VE suppress bit in EPT entry. For VMX
case, #VE isn't used. If #VE happens for VMX, it's a bug. To be
defensive (test that VMX case isn't broken), introduce option
ept_violation_ve_test and when it's set, set error.

Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com>
---
arch/x86/include/asm/vmx.h | 12 +++++++
arch/x86/kvm/vmx/vmx.c | 71 ++++++++++++++++++++++++++++++++++++--
arch/x86/kvm/vmx/vmx.h | 4 +++
3 files changed, 85 insertions(+), 2 deletions(-)

diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 6231ef005a50..f0f8eecf55ac 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -68,6 +68,7 @@
#define SECONDARY_EXEC_ENCLS_EXITING VMCS_CONTROL_BIT(ENCLS_EXITING)
#define SECONDARY_EXEC_RDSEED_EXITING VMCS_CONTROL_BIT(RDSEED_EXITING)
#define SECONDARY_EXEC_ENABLE_PML VMCS_CONTROL_BIT(PAGE_MOD_LOGGING)
+#define SECONDARY_EXEC_EPT_VIOLATION_VE VMCS_CONTROL_BIT(EPT_VIOLATION_VE)
#define SECONDARY_EXEC_PT_CONCEAL_VMX VMCS_CONTROL_BIT(PT_CONCEAL_VMX)
#define SECONDARY_EXEC_XSAVES VMCS_CONTROL_BIT(XSAVES)
#define SECONDARY_EXEC_MODE_BASED_EPT_EXEC VMCS_CONTROL_BIT(MODE_BASED_EPT_EXEC)
@@ -223,6 +224,8 @@ enum vmcs_field {
VMREAD_BITMAP_HIGH = 0x00002027,
VMWRITE_BITMAP = 0x00002028,
VMWRITE_BITMAP_HIGH = 0x00002029,
+ VE_INFORMATION_ADDRESS = 0x0000202A,
+ VE_INFORMATION_ADDRESS_HIGH = 0x0000202B,
XSS_EXIT_BITMAP = 0x0000202C,
XSS_EXIT_BITMAP_HIGH = 0x0000202D,
ENCLS_EXITING_BITMAP = 0x0000202E,
@@ -628,4 +631,13 @@ enum vmx_l1d_flush_state {

extern enum vmx_l1d_flush_state l1tf_vmx_mitigation;

+struct vmx_ve_information {
+ u32 exit_reason;
+ u32 delivery;
+ u64 exit_qualification;
+ u64 guest_linear_address;
+ u64 guest_physical_address;
+ u16 eptp_index;
+};
+
#endif
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index ec2bd4df0684..4a3b3dc5d4d2 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -126,6 +126,9 @@ module_param(error_on_inconsistent_vmcs_config, bool, 0444);
static bool __read_mostly dump_invalid_vmcs = 0;
module_param(dump_invalid_vmcs, bool, 0644);

+static bool __read_mostly ept_violation_ve_test = 0;
+module_param(ept_violation_ve_test, bool, 0444);
+
#define MSR_BITMAP_MODE_X2APIC 1
#define MSR_BITMAP_MODE_X2APIC_APICV 2

@@ -785,6 +788,13 @@ void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu)

eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) |
(1u << DB_VECTOR) | (1u << AC_VECTOR);
+ /*
+ * #VE isn't used for VMX, but for TDX. To test against unexpected
+ * change related to #VE for VMX, intercept unexpected #VE and warn on
+ * it.
+ */
+ if (ept_violation_ve_test)
+ eb |= 1u << VE_VECTOR;
/*
* Guest access to VMware backdoor ports could legitimately
* trigger #GP because of TSS I/O permission bitmap.
@@ -2588,8 +2598,11 @@ static int setup_vmcs_config(struct vmcs_config *vmcs_conf,
&_cpu_based_exec_control) < 0)
return -EIO;
if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) {
+ u32 ept_violation_ve = ept_violation_ve_test ?
+ SECONDARY_EXEC_EPT_VIOLATION_VE : 0;
if (adjust_vmx_controls(KVM_REQUIRED_VMX_SECONDARY_VM_EXEC_CONTROL,
- KVM_OPTIONAL_VMX_SECONDARY_VM_EXEC_CONTROL,
+ KVM_OPTIONAL_VMX_SECONDARY_VM_EXEC_CONTROL |
+ ept_violation_ve,
MSR_IA32_VMX_PROCBASED_CTLS2,
&_cpu_based_2nd_exec_control) < 0)
return -EIO;
@@ -2618,6 +2631,7 @@ static int setup_vmcs_config(struct vmcs_config *vmcs_conf,
return -EIO;

vmx_cap->ept = 0;
+ _cpu_based_2nd_exec_control &= ~SECONDARY_EXEC_EPT_VIOLATION_VE;
}
if (!(_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_VPID) &&
vmx_cap->vpid) {
@@ -4455,6 +4469,7 @@ static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx)
exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
if (!enable_ept) {
exec_control &= ~SECONDARY_EXEC_ENABLE_EPT;
+ exec_control &= ~SECONDARY_EXEC_EPT_VIOLATION_VE;
enable_unrestricted_guest = 0;
}
if (!enable_unrestricted_guest)
@@ -4582,8 +4597,40 @@ static void init_vmcs(struct vcpu_vmx *vmx)

exec_controls_set(vmx, vmx_exec_control(vmx));

- if (cpu_has_secondary_exec_ctrls())
+ if (cpu_has_secondary_exec_ctrls()) {
secondary_exec_controls_set(vmx, vmx_secondary_exec_control(vmx));
+ if (secondary_exec_controls_get(vmx) &
+ SECONDARY_EXEC_EPT_VIOLATION_VE) {
+ if (!vmx->ve_info) {
+ /* ve_info must be page aligned. */
+ struct page *page;
+
+ BUILD_BUG_ON(sizeof(*vmx->ve_info) > PAGE_SIZE);
+ page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
+ if (page)
+ vmx->ve_info = page_to_virt(page);
+ }
+ if (vmx->ve_info) {
+ /*
+ * Allow #VE delivery. CPU sets this field to
+ * 0xFFFFFFFF on #VE delivery. Another #VE can
+ * occur only if software clears the field.
+ */
+ vmx->ve_info->delivery = 0;
+ vmcs_write64(VE_INFORMATION_ADDRESS,
+ __pa(vmx->ve_info));
+ } else {
+ /*
+ * Because SECONDARY_EXEC_EPT_VIOLATION_VE is
+ * used only when ept_violation_ve_test is true,
+ * it's okay to go with the bit disabled.
+ */
+ pr_err("Failed to allocate ve_info. disabling EPT_VIOLATION_VE.\n");
+ secondary_exec_controls_clearbit(
+ vmx, SECONDARY_EXEC_EPT_VIOLATION_VE);
+ }
+ }
+ }

if (cpu_has_tertiary_exec_ctrls())
tertiary_exec_controls_set(vmx, vmx_tertiary_exec_control(vmx));
@@ -5183,7 +5230,14 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
if (handle_guest_split_lock(kvm_rip_read(vcpu)))
return 1;
fallthrough;
+ case VE_VECTOR:
default:
+ if (ept_violation_ve_test && ex_no == VE_VECTOR) {
+ pr_err("VMEXIT due to unexpected #VE.\n");
+ secondary_exec_controls_clearbit(
+ vmx, SECONDARY_EXEC_EPT_VIOLATION_VE);
+ return 1;
+ }
kvm_run->exit_reason = KVM_EXIT_EXCEPTION;
kvm_run->ex.exception = ex_no;
kvm_run->ex.error_code = error_code;
@@ -6249,6 +6303,17 @@ void dump_vmcs(struct kvm_vcpu *vcpu)
if (secondary_exec_control & SECONDARY_EXEC_ENABLE_VPID)
pr_err("Virtual processor ID = 0x%04x\n",
vmcs_read16(VIRTUAL_PROCESSOR_ID));
+ if (secondary_exec_control & SECONDARY_EXEC_EPT_VIOLATION_VE) {
+ struct vmx_ve_information *ve_info;
+ pr_err("VE info address = 0x%016llx\n",
+ vmcs_read64(VE_INFORMATION_ADDRESS));
+ ve_info = __va(vmcs_read64(VE_INFORMATION_ADDRESS));
+ pr_err("ve_info: 0x%08x 0x%08x 0x%016llx 0x%016llx 0x%016llx 0x%04x\n",
+ ve_info->exit_reason, ve_info->delivery,
+ ve_info->exit_qualification,
+ ve_info->guest_linear_address,
+ ve_info->guest_physical_address, ve_info->eptp_index);
+ }
}

/*
@@ -7245,6 +7310,8 @@ void vmx_vcpu_free(struct kvm_vcpu *vcpu)
free_vpid(vmx->vpid);
nested_vmx_free_vcpu(vcpu);
free_loaded_vmcs(vmx->loaded_vmcs);
+ if (vmx->ve_info)
+ free_page((unsigned long)vmx->ve_info);
}

int vmx_vcpu_create(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 7df3cd254b47..a65f360f6d75 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -362,6 +362,9 @@ struct vcpu_vmx {
DECLARE_BITMAP(read, MAX_POSSIBLE_PASSTHROUGH_MSRS);
DECLARE_BITMAP(write, MAX_POSSIBLE_PASSTHROUGH_MSRS);
} shadow_msr_intercept;
+
+ /* ve_info must be page aligned. */
+ struct vmx_ve_information *ve_info;
};

struct kvm_vmx {
@@ -566,6 +569,7 @@ static inline u8 vmx_get_rvi(void)
SECONDARY_EXEC_RDSEED_EXITING | \
SECONDARY_EXEC_RDRAND_EXITING | \
SECONDARY_EXEC_ENABLE_PML | \
+ SECONDARY_EXEC_EPT_VIOLATION_VE | \
SECONDARY_EXEC_TSC_SCALING | \
SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE | \
SECONDARY_EXEC_PT_USE_GPA | \
--
2.25.1
\
 
 \ /
  Last update: 2022-08-08 00:10    [W:0.055 / U:0.052 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site