lkml.org 
[lkml]   [2022]   [Apr]   [6]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
Date
SubjectRe: [RFC PATCH v5 080/104] KVM: TDX: Implement methods to inject NMI
From
On 3/4/22 20:49, isaku.yamahata@intel.com wrote:
> From: Isaku Yamahata <isaku.yamahata@intel.com>
>
> TDX vcpu control structure defines one bit for pending NMI for VMM to
> inject NMI by setting the bit without knowing TDX vcpu NMI states. Because
> the vcpu state is protected, VMM can't know about NMI states of TDX vcpu.
> The TDX module handles actual injection and NMI states transition.
>
> Add methods for NMI and treat NMI can be injected always.
>
> Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com>
> ---
> arch/x86/kvm/vmx/main.c | 62 +++++++++++++++++++++++++++++++++++---
> arch/x86/kvm/vmx/tdx.c | 5 +++
> arch/x86/kvm/vmx/x86_ops.h | 2 ++
> 3 files changed, 64 insertions(+), 5 deletions(-)
>
> diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c
> index 404a260796e4..aa84c13f8ee1 100644
> --- a/arch/x86/kvm/vmx/main.c
> +++ b/arch/x86/kvm/vmx/main.c
> @@ -216,6 +216,58 @@ static void vt_flush_tlb_guest(struct kvm_vcpu *vcpu)
> vmx_flush_tlb_guest(vcpu);
> }
>
> +static void vt_inject_nmi(struct kvm_vcpu *vcpu)
> +{
> + if (is_td_vcpu(vcpu))
> + return tdx_inject_nmi(vcpu);
> +
> + vmx_inject_nmi(vcpu);
> +}
> +
> +static int vt_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
> +{
> + /*
> + * The TDX module manages NMI windows and NMI reinjection, and hides NMI
> + * blocking, all KVM can do is throw an NMI over the wall.
> + */
> + if (is_td_vcpu(vcpu))
> + return true;
> +
> + return vmx_nmi_allowed(vcpu, for_injection);
> +}
> +
> +static bool vt_get_nmi_mask(struct kvm_vcpu *vcpu)
> +{
> + /*
> + * Assume NMIs are always unmasked. KVM could query PEND_NMI and treat
> + * NMIs as masked if a previous NMI is still pending, but SEAMCALLs are
> + * expensive and the end result is unchanged as the only relevant usage
> + * of get_nmi_mask() is to limit the number of pending NMIs, i.e. it
> + * only changes whether KVM or the TDX module drops an NMI.
> + */
> + if (is_td_vcpu(vcpu))
> + return false;
> +
> + return vmx_get_nmi_mask(vcpu);
> +}
> +
> +static void vt_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
> +{
> + if (is_td_vcpu(vcpu))
> + return;
> +
> + vmx_set_nmi_mask(vcpu, masked);
> +}
> +
> +static void vt_enable_nmi_window(struct kvm_vcpu *vcpu)
> +{
> + /* Refer the comment in vt_get_nmi_mask(). */
> + if (is_td_vcpu(vcpu))
> + return;
> +
> + vmx_enable_nmi_window(vcpu);
> +}
> +
> static void vt_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa,
> int pgd_level)
> {
> @@ -366,14 +418,14 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
> .get_interrupt_shadow = vt_get_interrupt_shadow,
> .patch_hypercall = vmx_patch_hypercall,
> .set_irq = vt_inject_irq,
> - .set_nmi = vmx_inject_nmi,
> + .set_nmi = vt_inject_nmi,
> .queue_exception = vmx_queue_exception,
> .cancel_injection = vt_cancel_injection,
> .interrupt_allowed = vt_interrupt_allowed,
> - .nmi_allowed = vmx_nmi_allowed,
> - .get_nmi_mask = vmx_get_nmi_mask,
> - .set_nmi_mask = vmx_set_nmi_mask,
> - .enable_nmi_window = vmx_enable_nmi_window,
> + .nmi_allowed = vt_nmi_allowed,
> + .get_nmi_mask = vt_get_nmi_mask,
> + .set_nmi_mask = vt_set_nmi_mask,
> + .enable_nmi_window = vt_enable_nmi_window,
> .enable_irq_window = vt_enable_irq_window,
> .update_cr8_intercept = vmx_update_cr8_intercept,
> .set_virtual_apic_mode = vmx_set_virtual_apic_mode,
> diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
> index bdc658ca9e4f..273898de9f7a 100644
> --- a/arch/x86/kvm/vmx/tdx.c
> +++ b/arch/x86/kvm/vmx/tdx.c
> @@ -763,6 +763,11 @@ fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu)
> return EXIT_FASTPATH_NONE;
> }
>
> +void tdx_inject_nmi(struct kvm_vcpu *vcpu)
> +{
> + td_management_write8(to_tdx(vcpu), TD_VCPU_PEND_NMI, 1);
> +}
> +
> void tdx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, int pgd_level)
> {
> td_vmcs_write64(to_tdx(vcpu), SHARED_EPT_POINTER, root_hpa & PAGE_MASK);
> diff --git a/arch/x86/kvm/vmx/x86_ops.h b/arch/x86/kvm/vmx/x86_ops.h
> index c3768a20347f..31be5e8a1d5c 100644
> --- a/arch/x86/kvm/vmx/x86_ops.h
> +++ b/arch/x86/kvm/vmx/x86_ops.h
> @@ -150,6 +150,7 @@ void tdx_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
> void tdx_apicv_post_state_restore(struct kvm_vcpu *vcpu);
> void tdx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
> int trig_mode, int vector);
> +void tdx_inject_nmi(struct kvm_vcpu *vcpu);
>
> int tdx_vm_ioctl(struct kvm *kvm, void __user *argp);
> int tdx_vcpu_ioctl(struct kvm_vcpu *vcpu, void __user *argp);
> @@ -180,6 +181,7 @@ static inline void tdx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) {}
> static inline void tdx_apicv_post_state_restore(struct kvm_vcpu *vcpu) {}
> static inline void tdx_deliver_interrupt(
> struct kvm_lapic *apic, int delivery_mode, int trig_mode, int vector) {}
> +static inline void tdx_inject_nmi(struct kvm_vcpu *vcpu) {}
>
> static inline int tdx_vm_ioctl(struct kvm *kvm, void __user *argp) { return -EOPNOTSUPP; }
> static inline int tdx_vcpu_ioctl(struct kvm_vcpu *vcpu, void __user *argp) { return -EOPNOTSUPP; }

Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>

\
 
 \ /
  Last update: 2022-04-06 17:35    [W:0.911 / U:0.944 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site