lkml.org 
[lkml]   [2022]   [May]   [5]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[RFC PATCH v6 084/104] KVM: TDX: Add a place holder to handle TDX VM exit
    Date
    From: Isaku Yamahata <isaku.yamahata@intel.com>

    Wire up handle_exit and handle_exit_irqoff methods and add a place holder
    to handle VM exit. Add helper functions to get exit info, exit
    qualification, etc.

    Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com>
    Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
    ---
    arch/x86/kvm/vmx/main.c | 35 ++++++++++++++--
    arch/x86/kvm/vmx/tdx.c | 81 ++++++++++++++++++++++++++++++++++++++
    arch/x86/kvm/vmx/x86_ops.h | 11 ++++++
    3 files changed, 124 insertions(+), 3 deletions(-)

    diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c
    index 74ca538edf46..95b7a90aa0d7 100644
    --- a/arch/x86/kvm/vmx/main.c
    +++ b/arch/x86/kvm/vmx/main.c
    @@ -188,6 +188,23 @@ static bool vt_protected_apic_has_interrupt(struct kvm_vcpu *vcpu)
    return tdx_protected_apic_has_interrupt(vcpu);
    }

    +static int vt_handle_exit(struct kvm_vcpu *vcpu,
    + enum exit_fastpath_completion fastpath)
    +{
    + if (is_td_vcpu(vcpu))
    + return tdx_handle_exit(vcpu, fastpath);
    +
    + return vmx_handle_exit(vcpu, fastpath);
    +}
    +
    +static void vt_handle_exit_irqoff(struct kvm_vcpu *vcpu)
    +{
    + if (is_td_vcpu(vcpu))
    + return tdx_handle_exit_irqoff(vcpu);
    +
    + vmx_handle_exit_irqoff(vcpu);
    +}
    +
    static void vt_apicv_post_state_restore(struct kvm_vcpu *vcpu)
    {
    struct pi_desc *pi = vcpu_to_pi_desc(vcpu);
    @@ -371,6 +388,18 @@ static void vt_request_immediate_exit(struct kvm_vcpu *vcpu)
    vmx_request_immediate_exit(vcpu);
    }

    +static void vt_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason,
    + u64 *info1, u64 *info2, u32 *intr_info, u32 *error_code)
    +{
    + if (is_td_vcpu(vcpu)) {
    + tdx_get_exit_info(vcpu, reason, info1, info2, intr_info,
    + error_code);
    + return;
    + }
    +
    + vmx_get_exit_info(vcpu, reason, info1, info2, intr_info, error_code);
    +}
    +
    static int vt_mem_enc_ioctl(struct kvm *kvm, void __user *argp)
    {
    if (!is_td(kvm))
    @@ -444,7 +473,7 @@ struct kvm_x86_ops vt_x86_ops __initdata = {

    .vcpu_pre_run = vt_vcpu_pre_run,
    .vcpu_run = vt_vcpu_run,
    - .handle_exit = vmx_handle_exit,
    + .handle_exit = vt_handle_exit,
    .skip_emulated_instruction = vmx_skip_emulated_instruction,
    .update_emulated_instruction = vmx_update_emulated_instruction,
    .set_interrupt_shadow = vt_set_interrupt_shadow,
    @@ -479,7 +508,7 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
    .set_identity_map_addr = vmx_set_identity_map_addr,
    .get_mt_mask = vmx_get_mt_mask,

    - .get_exit_info = vmx_get_exit_info,
    + .get_exit_info = vt_get_exit_info,

    .vcpu_after_set_cpuid = vmx_vcpu_after_set_cpuid,

    @@ -493,7 +522,7 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
    .load_mmu_pgd = vt_load_mmu_pgd,

    .check_intercept = vmx_check_intercept,
    - .handle_exit_irqoff = vmx_handle_exit_irqoff,
    + .handle_exit_irqoff = vt_handle_exit_irqoff,

    .request_immediate_exit = vt_request_immediate_exit,

    diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
    index 39220f63a005..b3fc9d95fffd 100644
    --- a/arch/x86/kvm/vmx/tdx.c
    +++ b/arch/x86/kvm/vmx/tdx.c
    @@ -78,6 +78,26 @@ static __always_inline hpa_t set_hkid_to_hpa(hpa_t pa, u16 hkid)
    return pa;
    }

    +static __always_inline unsigned long tdexit_exit_qual(struct kvm_vcpu *vcpu)
    +{
    + return kvm_rcx_read(vcpu);
    +}
    +
    +static __always_inline unsigned long tdexit_ext_exit_qual(struct kvm_vcpu *vcpu)
    +{
    + return kvm_rdx_read(vcpu);
    +}
    +
    +static __always_inline unsigned long tdexit_gpa(struct kvm_vcpu *vcpu)
    +{
    + return kvm_r8_read(vcpu);
    +}
    +
    +static __always_inline unsigned long tdexit_intr_info(struct kvm_vcpu *vcpu)
    +{
    + return kvm_r9_read(vcpu);
    +}
    +
    static inline bool is_td_vcpu_created(struct vcpu_tdx *tdx)
    {
    return tdx->tdvpr.added;
    @@ -819,6 +839,25 @@ void tdx_inject_nmi(struct kvm_vcpu *vcpu)
    td_management_write8(to_tdx(vcpu), TD_VCPU_PEND_NMI, 1);
    }

    +void tdx_handle_exit_irqoff(struct kvm_vcpu *vcpu)
    +{
    + struct vcpu_tdx *tdx = to_tdx(vcpu);
    + u16 exit_reason = tdx->exit_reason.basic;
    +
    + if (exit_reason == EXIT_REASON_EXCEPTION_NMI)
    + vmx_handle_exception_nmi_irqoff(vcpu, tdexit_intr_info(vcpu));
    + else if (exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT)
    + vmx_handle_external_interrupt_irqoff(vcpu,
    + tdexit_intr_info(vcpu));
    +}
    +
    +static int tdx_handle_triple_fault(struct kvm_vcpu *vcpu)
    +{
    + vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
    + vcpu->mmio_needed = 0;
    + return 0;
    +}
    +
    void tdx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, int pgd_level)
    {
    td_vmcs_write64(to_tdx(vcpu), SHARED_EPT_POINTER, root_hpa & PAGE_MASK);
    @@ -1152,6 +1191,48 @@ void tdx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
    __vmx_deliver_posted_interrupt(vcpu, &tdx->pi_desc, vector);
    }

    +int tdx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t fastpath)
    +{
    + union tdx_exit_reason exit_reason = to_tdx(vcpu)->exit_reason;
    +
    + if (unlikely(exit_reason.non_recoverable || exit_reason.error)) {
    + if (exit_reason.basic == EXIT_REASON_TRIPLE_FAULT)
    + return tdx_handle_triple_fault(vcpu);
    +
    + kvm_pr_unimpl("TD exit 0x%llx, %d hkid 0x%x hkid pa 0x%llx\n",
    + exit_reason.full, exit_reason.basic,
    + to_kvm_tdx(vcpu->kvm)->hkid,
    + set_hkid_to_hpa(0, to_kvm_tdx(vcpu->kvm)->hkid));
    + goto unhandled_exit;
    + }
    +
    + WARN_ON_ONCE(fastpath != EXIT_FASTPATH_NONE);
    +
    + switch (exit_reason.basic) {
    + default:
    + break;
    + }
    +
    +unhandled_exit:
    + vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
    + vcpu->run->hw.hardware_exit_reason = exit_reason.full;
    + return 0;
    +}
    +
    +void tdx_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason,
    + u64 *info1, u64 *info2, u32 *intr_info, u32 *error_code)
    +{
    + struct vcpu_tdx *tdx = to_tdx(vcpu);
    +
    + *reason = tdx->exit_reason.full;
    +
    + *info1 = tdexit_exit_qual(vcpu);
    + *info2 = tdexit_ext_exit_qual(vcpu);
    +
    + *intr_info = tdexit_intr_info(vcpu);
    + *error_code = 0;
    +}
    +
    int tdx_dev_ioctl(void __user *argp)
    {
    struct kvm_tdx_capabilities __user *user_caps;
    diff --git a/arch/x86/kvm/vmx/x86_ops.h b/arch/x86/kvm/vmx/x86_ops.h
    index 0ef1e94d4196..53cf6d5a72a1 100644
    --- a/arch/x86/kvm/vmx/x86_ops.h
    +++ b/arch/x86/kvm/vmx/x86_ops.h
    @@ -147,10 +147,15 @@ void tdx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
    void tdx_vcpu_put(struct kvm_vcpu *vcpu);
    void tdx_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
    bool tdx_protected_apic_has_interrupt(struct kvm_vcpu *vcpu);
    +void tdx_handle_exit_irqoff(struct kvm_vcpu *vcpu);
    +int tdx_handle_exit(struct kvm_vcpu *vcpu,
    + enum exit_fastpath_completion fastpath);

    void tdx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
    int trig_mode, int vector);
    void tdx_inject_nmi(struct kvm_vcpu *vcpu);
    +void tdx_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason,
    + u64 *info1, u64 *info2, u32 *intr_info, u32 *error_code);

    int tdx_vm_ioctl(struct kvm *kvm, void __user *argp);
    int tdx_vcpu_ioctl(struct kvm_vcpu *vcpu, void __user *argp);
    @@ -178,10 +183,16 @@ static inline void tdx_prepare_switch_to_guest(struct kvm_vcpu *vcpu) {}
    static inline void tdx_vcpu_put(struct kvm_vcpu *vcpu) {}
    static inline void tdx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) {}
    static inline bool tdx_protected_apic_has_interrupt(struct kvm_vcpu *vcpu) { return false; }
    +static inline void tdx_handle_exit_irqoff(struct kvm_vcpu *vcpu) {}
    +static inline int tdx_handle_exit(struct kvm_vcpu *vcpu,
    + enum exit_fastpath_completion fastpath) { return 0; }

    static inline void tdx_deliver_interrupt(
    struct kvm_lapic *apic, int delivery_mode, int trig_mode, int vector) {}
    static inline void tdx_inject_nmi(struct kvm_vcpu *vcpu) {}
    +static inline void tdx_get_exit_info(
    + struct kvm_vcpu *vcpu, u32 *reason, u64 *info1, u64 *info2,
    + u32 *intr_info, u32 *error_code) {}

    static inline int tdx_vm_ioctl(struct kvm *kvm, void __user *argp) { return -EOPNOTSUPP; }
    static inline int tdx_vcpu_ioctl(struct kvm_vcpu *vcpu, void __user *argp) { return -EOPNOTSUPP; }
    --
    2.25.1
    \
     
     \ /
      Last update: 2022-05-05 20:23    [W:3.288 / U:0.496 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site