lkml.org 
[lkml]   [2022]   [Dec]   [14]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH RFC v7 33/64] KVM: SVM: Add support to handle AP reset MSR protocol
    Date
    From: Tom Lendacky <thomas.lendacky@amd.com>

    Add support for AP Reset Hold being invoked using the GHCB MSR protocol,
    available in version 2 of the GHCB specification.

    Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
    Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
    Signed-off-by: Ashish Kalra <ashish.kalra@amd.com>
    Signed-off-by: Michael Roth <michael.roth@amd.com>
    ---
    arch/x86/include/asm/sev-common.h | 2 ++
    arch/x86/kvm/svm/sev.c | 56 ++++++++++++++++++++++++++-----
    arch/x86/kvm/svm/svm.h | 1 +
    3 files changed, 51 insertions(+), 8 deletions(-)

    diff --git a/arch/x86/include/asm/sev-common.h b/arch/x86/include/asm/sev-common.h
    index b8357d6ecd47..e15548d88f2a 100644
    --- a/arch/x86/include/asm/sev-common.h
    +++ b/arch/x86/include/asm/sev-common.h
    @@ -56,6 +56,8 @@
    /* AP Reset Hold */
    #define GHCB_MSR_AP_RESET_HOLD_REQ 0x006
    #define GHCB_MSR_AP_RESET_HOLD_RESP 0x007
    +#define GHCB_MSR_AP_RESET_HOLD_RESULT_POS 12
    +#define GHCB_MSR_AP_RESET_HOLD_RESULT_MASK GENMASK_ULL(51, 0)

    /* GHCB GPA Register */
    #define GHCB_MSR_REG_GPA_REQ 0x012
    diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
    index 6579ed218f6a..244c58bd3de7 100644
    --- a/arch/x86/kvm/svm/sev.c
    +++ b/arch/x86/kvm/svm/sev.c
    @@ -57,6 +57,10 @@ module_param_named(sev_es, sev_es_enabled, bool, 0444);
    #define sev_es_enabled false
    #endif /* CONFIG_KVM_AMD_SEV */

    +#define AP_RESET_HOLD_NONE 0
    +#define AP_RESET_HOLD_NAE_EVENT 1
    +#define AP_RESET_HOLD_MSR_PROTO 2
    +
    static u8 sev_enc_bit;
    static DECLARE_RWSEM(sev_deactivate_lock);
    static DEFINE_MUTEX(sev_bitmap_lock);
    @@ -2698,6 +2702,9 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm)

    void sev_es_unmap_ghcb(struct vcpu_svm *svm)
    {
    + /* Clear any indication that the vCPU is in a type of AP Reset Hold */
    + svm->sev_es.ap_reset_hold_type = AP_RESET_HOLD_NONE;
    +
    if (!svm->sev_es.ghcb)
    return;

    @@ -2910,6 +2917,22 @@ static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm)
    GHCB_MSR_INFO_POS);
    break;
    }
    + case GHCB_MSR_AP_RESET_HOLD_REQ:
    + svm->sev_es.ap_reset_hold_type = AP_RESET_HOLD_MSR_PROTO;
    + ret = kvm_emulate_ap_reset_hold(&svm->vcpu);
    +
    + /*
    + * Preset the result to a non-SIPI return and then only set
    + * the result to non-zero when delivering a SIPI.
    + */
    + set_ghcb_msr_bits(svm, 0,
    + GHCB_MSR_AP_RESET_HOLD_RESULT_MASK,
    + GHCB_MSR_AP_RESET_HOLD_RESULT_POS);
    +
    + set_ghcb_msr_bits(svm, GHCB_MSR_AP_RESET_HOLD_RESP,
    + GHCB_MSR_INFO_MASK,
    + GHCB_MSR_INFO_POS);
    + break;
    case GHCB_MSR_TERM_REQ: {
    u64 reason_set, reason_code;

    @@ -3009,6 +3032,7 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
    ret = svm_invoke_exit_handler(vcpu, SVM_EXIT_IRET);
    break;
    case SVM_VMGEXIT_AP_HLT_LOOP:
    + svm->sev_es.ap_reset_hold_type = AP_RESET_HOLD_NAE_EVENT;
    ret = kvm_emulate_ap_reset_hold(vcpu);
    break;
    case SVM_VMGEXIT_AP_JUMP_TABLE: {
    @@ -3169,15 +3193,31 @@ void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
    return;
    }

    - /*
    - * Subsequent SIPI: Return from an AP Reset Hold VMGEXIT, where
    - * the guest will set the CS and RIP. Set SW_EXIT_INFO_2 to a
    - * non-zero value.
    - */
    - if (!svm->sev_es.ghcb)
    - return;
    + /* Subsequent SIPI */
    + switch (svm->sev_es.ap_reset_hold_type) {
    + case AP_RESET_HOLD_NAE_EVENT:
    + /*
    + * Return from an AP Reset Hold VMGEXIT, where the guest will
    + * set the CS and RIP. Set SW_EXIT_INFO_2 to a non-zero value.
    + */
    + ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, 1);
    + break;
    + case AP_RESET_HOLD_MSR_PROTO:
    + /*
    + * Return from an AP Reset Hold VMGEXIT, where the guest will
    + * set the CS and RIP. Set GHCB data field to a non-zero value.
    + */
    + set_ghcb_msr_bits(svm, 1,
    + GHCB_MSR_AP_RESET_HOLD_RESULT_MASK,
    + GHCB_MSR_AP_RESET_HOLD_RESULT_POS);

    - ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, 1);
    + set_ghcb_msr_bits(svm, GHCB_MSR_AP_RESET_HOLD_RESP,
    + GHCB_MSR_INFO_MASK,
    + GHCB_MSR_INFO_POS);
    + break;
    + default:
    + break;
    + }
    }

    int sev_fault_is_private(struct kvm *kvm, gpa_t gpa, u64 error_code, bool *private_fault)
    diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
    index c760ec51a910..cb9da04e745a 100644
    --- a/arch/x86/kvm/svm/svm.h
    +++ b/arch/x86/kvm/svm/svm.h
    @@ -198,6 +198,7 @@ struct vcpu_sev_es_state {
    struct ghcb *ghcb;
    struct kvm_host_map ghcb_map;
    bool received_first_sipi;
    + unsigned int ap_reset_hold_type;

    /* SEV-ES scratch area support */
    void *ghcb_sa;
    --
    2.25.1
    \
     
     \ /
      Last update: 2022-12-14 21:02    [W:4.156 / U:0.312 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site