lkml.org 
[lkml]   [2023]   [Jan]   [9]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
SubjectRe: [PATCH RFC v7 52/64] KVM: SVM: Provide support for SNP_GUEST_REQUEST NAE event
From


On 10/1/23 10:41, Kalra, Ashish wrote:
> On 1/8/2023 9:33 PM, Alexey Kardashevskiy wrote:
>> On 15/12/22 06:40, Michael Roth wrote:
>>> From: Brijesh Singh <brijesh.singh@amd.com>
>>>
>>> Version 2 of GHCB specification added the support for two SNP Guest
>>> Request Message NAE events. The events allows for an SEV-SNP guest to
>>> make request to the SEV-SNP firmware through hypervisor using the
>>> SNP_GUEST_REQUEST API define in the SEV-SNP firmware specification.
>>>
>>> The SNP_EXT_GUEST_REQUEST is similar to SNP_GUEST_REQUEST with the
>>> difference of an additional certificate blob that can be passed through
>>> the SNP_SET_CONFIG ioctl defined in the CCP driver. The CCP driver
>>> provides snp_guest_ext_guest_request() that is used by the KVM to get
>>> both the report and certificate data at once.
>>>
>>> Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
>>> Signed-off-by: Ashish Kalra <ashish.kalra@amd.com>
>>> Signed-off-by: Michael Roth <michael.roth@amd.com>
>>> ---
>>>   arch/x86/kvm/svm/sev.c | 185 +++++++++++++++++++++++++++++++++++++++--
>>>   arch/x86/kvm/svm/svm.h |   2 +
>>>   2 files changed, 181 insertions(+), 6 deletions(-)
>>>
>>> diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
>>> index 5f2b2092cdae..18efa70553c2 100644
>>> --- a/arch/x86/kvm/svm/sev.c
>>> +++ b/arch/x86/kvm/svm/sev.c
>>> @@ -331,6 +331,7 @@ static int sev_guest_init(struct kvm *kvm, struct
>>> kvm_sev_cmd *argp)
>>>           if (ret)
>>>               goto e_free;
>>> +        mutex_init(&sev->guest_req_lock);
>>>           ret = sev_snp_init(&argp->error, false);
>>>       } else {
>>>           ret = sev_platform_init(&argp->error);
>>> @@ -2051,23 +2052,34 @@ int sev_vm_move_enc_context_from(struct kvm
>>> *kvm, unsigned int source_fd)
>>>    */
>>>   static void *snp_context_create(struct kvm *kvm, struct kvm_sev_cmd
>>> *argp)
>>>   {
>>> +    struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
>>>       struct sev_data_snp_addr data = {};
>>> -    void *context;
>>> +    void *context, *certs_data;
>>>       int rc;
>>> +    /* Allocate memory used for the certs data in SNP guest request */
>>> +    certs_data = kzalloc(SEV_FW_BLOB_MAX_SIZE, GFP_KERNEL_ACCOUNT);
>>> +    if (!certs_data)
>>> +        return NULL;
>>> +
>>>       /* Allocate memory for context page */
>>>       context = snp_alloc_firmware_page(GFP_KERNEL_ACCOUNT);
>>>       if (!context)
>>> -        return NULL;
>>> +        goto e_free;
>>>       data.gctx_paddr = __psp_pa(context);
>>>       rc = __sev_issue_cmd(argp->sev_fd, SEV_CMD_SNP_GCTX_CREATE,
>>> &data, &argp->error);
>>> -    if (rc) {
>>> -        snp_free_firmware_page(context);
>>> -        return NULL;
>>> -    }
>>> +    if (rc)
>>> +        goto e_free;
>>> +
>>> +    sev->snp_certs_data = certs_data;
>>>       return context;
>>> +
>>> +e_free:
>>> +    snp_free_firmware_page(context);
>>> +    kfree(certs_data);
>>> +    return NULL;
>>>   }
>>>   static int snp_bind_asid(struct kvm *kvm, int *error)
>>> @@ -2653,6 +2665,8 @@ static int snp_decommission_context(struct kvm
>>> *kvm)
>>>       snp_free_firmware_page(sev->snp_context);
>>>       sev->snp_context = NULL;
>>> +    kfree(sev->snp_certs_data);
>>> +
>>>       return 0;
>>>   }
>>> @@ -3174,6 +3188,8 @@ static int sev_es_validate_vmgexit(struct
>>> vcpu_svm *svm, u64 *exit_code)
>>>       case SVM_VMGEXIT_UNSUPPORTED_EVENT:
>>>       case SVM_VMGEXIT_HV_FEATURES:
>>>       case SVM_VMGEXIT_PSC:
>>> +    case SVM_VMGEXIT_GUEST_REQUEST:
>>> +    case SVM_VMGEXIT_EXT_GUEST_REQUEST:
>>>           break;
>>>       default:
>>>           reason = GHCB_ERR_INVALID_EVENT;
>>> @@ -3396,6 +3412,149 @@ static int snp_complete_psc(struct kvm_vcpu
>>> *vcpu)
>>>       return 1;
>>>   }
>>> +static unsigned long snp_setup_guest_buf(struct vcpu_svm *svm,
>>> +                     struct sev_data_snp_guest_request *data,
>>> +                     gpa_t req_gpa, gpa_t resp_gpa)
>>> +{
>>> +    struct kvm_vcpu *vcpu = &svm->vcpu;
>>> +    struct kvm *kvm = vcpu->kvm;
>>> +    kvm_pfn_t req_pfn, resp_pfn;
>>> +    struct kvm_sev_info *sev;
>>> +
>>> +    sev = &to_kvm_svm(kvm)->sev_info;
>>> +
>>> +    if (!IS_ALIGNED(req_gpa, PAGE_SIZE) || !IS_ALIGNED(resp_gpa,
>>> PAGE_SIZE))
>>> +        return SEV_RET_INVALID_PARAM;
>>> +
>>> +    req_pfn = gfn_to_pfn(kvm, gpa_to_gfn(req_gpa));
>>> +    if (is_error_noslot_pfn(req_pfn))
>>> +        return SEV_RET_INVALID_ADDRESS;
>>> +
>>> +    resp_pfn = gfn_to_pfn(kvm, gpa_to_gfn(resp_gpa));
>>> +    if (is_error_noslot_pfn(resp_pfn))
>>> +        return SEV_RET_INVALID_ADDRESS;
>>> +
>>> +    if (rmp_make_private(resp_pfn, 0, PG_LEVEL_4K, 0, true))
>>> +        return SEV_RET_INVALID_ADDRESS;
>>> +
>>> +    data->gctx_paddr = __psp_pa(sev->snp_context);
>>> +    data->req_paddr = __sme_set(req_pfn << PAGE_SHIFT);
>>> +    data->res_paddr = __sme_set(resp_pfn << PAGE_SHIFT);
>>> +
>>> +    return 0;
>>> +}
>>> +
>>> +static void snp_cleanup_guest_buf(struct sev_data_snp_guest_request
>>> *data, unsigned long *rc)
>>> +{
>>> +    u64 pfn = __sme_clr(data->res_paddr) >> PAGE_SHIFT;
>>> +    int ret;
>>> +
>>> +    ret = snp_page_reclaim(pfn);
>>> +    if (ret)
>>> +        *rc = SEV_RET_INVALID_ADDRESS;
>>> +
>>> +    ret = rmp_make_shared(pfn, PG_LEVEL_4K);
>>> +    if (ret)
>>> +        *rc = SEV_RET_INVALID_ADDRESS;
>>> +}
>>> +
>>> +static void snp_handle_guest_request(struct vcpu_svm *svm, gpa_t
>>> req_gpa, gpa_t resp_gpa)
>>> +{
>>> +    struct sev_data_snp_guest_request data = {0};
>>> +    struct kvm_vcpu *vcpu = &svm->vcpu;
>>> +    struct kvm *kvm = vcpu->kvm;
>>> +    struct kvm_sev_info *sev;
>>> +    unsigned long rc;
>>> +    int err;
>>> +
>>> +    if (!sev_snp_guest(vcpu->kvm)) {
>>> +        rc = SEV_RET_INVALID_GUEST;
>>> +        goto e_fail;
>>> +    }
>>> +
>>> +    sev = &to_kvm_svm(kvm)->sev_info;
>>> +
>>> +    mutex_lock(&sev->guest_req_lock);
>>> +
>>> +    rc = snp_setup_guest_buf(svm, &data, req_gpa, resp_gpa);
>>> +    if (rc)
>>> +        goto unlock;
>>> +
>>> +    rc = sev_issue_cmd(kvm, SEV_CMD_SNP_GUEST_REQUEST, &data, &err);
>>
>>
>> This one goes via sev_issue_cmd_external_user() and uses sev-fd...
>>
>>> +    if (rc)
>>> +        /* use the firmware error code */
>>> +        rc = err;
>>> +
>>> +    snp_cleanup_guest_buf(&data, &rc);
>>> +
>>> +unlock:
>>> +    mutex_unlock(&sev->guest_req_lock);
>>> +
>>> +e_fail:
>>> +    svm_set_ghcb_sw_exit_info_2(vcpu, rc);
>>> +}
>>> +
>>> +static void snp_handle_ext_guest_request(struct vcpu_svm *svm, gpa_t
>>> req_gpa, gpa_t resp_gpa)
>>> +{
>>> +    struct sev_data_snp_guest_request req = {0};
>>> +    struct kvm_vcpu *vcpu = &svm->vcpu;
>>> +    struct kvm *kvm = vcpu->kvm;
>>> +    unsigned long data_npages;
>>> +    struct kvm_sev_info *sev;
>>> +    unsigned long rc, err;
>>> +    u64 data_gpa;
>>> +
>>> +    if (!sev_snp_guest(vcpu->kvm)) {
>>> +        rc = SEV_RET_INVALID_GUEST;
>>> +        goto e_fail;
>>> +    }
>>> +
>>> +    sev = &to_kvm_svm(kvm)->sev_info;
>>> +
>>> +    data_gpa = vcpu->arch.regs[VCPU_REGS_RAX];
>>> +    data_npages = vcpu->arch.regs[VCPU_REGS_RBX];
>>> +
>>> +    if (!IS_ALIGNED(data_gpa, PAGE_SIZE)) {
>>> +        rc = SEV_RET_INVALID_ADDRESS;
>>> +        goto e_fail;
>>> +    }
>>> +
>>> +    mutex_lock(&sev->guest_req_lock);
>>> +
>>> +    rc = snp_setup_guest_buf(svm, &req, req_gpa, resp_gpa);
>>> +    if (rc)
>>> +        goto unlock;
>>> +
>>> +    rc = snp_guest_ext_guest_request(&req, (unsigned
>>> long)sev->snp_certs_data,
>>> +                     &data_npages, &err);
>>
>> but this one does not and jump straight to
>> drivers/crypto/ccp/sev-dev.c ignoring sev->fd. Why different? Can
>> these two be unified? sev_issue_cmd_external_user() only checks if fd
>> is /dev/sev which is hardly useful.
>>
>> "[PATCH RFC v7 32/64] crypto: ccp: Provide APIs to query extended
>> attestation report" added this one.
>
> SNP_EXT_GUEST_REQUEST additionally returns a certificate blob and that's
> why it goes through the CCP driver interface
> snp_guest_ext_guest_request() that is used to get both the report and
> certificate data/blob at the same time.

True. I thought though that this calls for extending sev_issue_cmd() to
take care of these extra parameters rather than just skipping the sev->fd.


> All the FW API calls on the KVM side go through sev_issue_cmd() and
> sev_issue_cmd_external_user() interfaces and that i believe uses sev->fd
> more of as a sanity check.

Does not look like it:

https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/drivers/crypto/ccp/sev-dev.c?h=v6.2-rc3#n1290

===
int sev_issue_cmd_external_user(struct file *filep, unsigned int cmd,
void *data, int *error)
{
if (!filep || filep->f_op != &sev_fops)
return -EBADF;

return sev_do_cmd(cmd, data, error);
}
EXPORT_SYMBOL_GPL(sev_issue_cmd_external_user);
===

The only "more" is that it requires sev->fd to be a valid open fd, what
is the value in that? I may easily miss the bigger picture here. Thanks,


> Thanks,
> Ashish
>
>>
>> Besides, is sev->fd really needed in the sev struct at all? Thanks,
>>
>>
>>> +    if (rc) {
>>> +        /*
>>> +         * If buffer length is small then return the expected
>>> +         * length in rbx.
>>> +         */
>>> +        if (err == SNP_GUEST_REQ_INVALID_LEN)
>>> +            vcpu->arch.regs[VCPU_REGS_RBX] = data_npages;
>>> +
>>> +        /* pass the firmware error code */
>>> +        rc = err;
>>> +        goto cleanup;
>>> +    }
>>> +
>>> +    /* Copy the certificate blob in the guest memory */
>>> +    if (data_npages &&
>>> +        kvm_write_guest(kvm, data_gpa, sev->snp_certs_data,
>>> data_npages << PAGE_SHIFT))
>>> +        rc = SEV_RET_INVALID_ADDRESS;
>>> +
>>> +cleanup:
>>> +    snp_cleanup_guest_buf(&req, &rc);
>>> +
>>> +unlock:
>>> +    mutex_unlock(&sev->guest_req_lock);
>>> +
>>> +e_fail:
>>> +    svm_set_ghcb_sw_exit_info_2(vcpu, rc);
>>> +}
>>> +
>>>   static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm)
>>>   {
>>>       struct vmcb_control_area *control = &svm->vmcb->control;
>>> @@ -3629,6 +3788,20 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
>>>           vcpu->run->vmgexit.ghcb_msr = ghcb_gpa;
>>>           vcpu->arch.complete_userspace_io = snp_complete_psc;
>>>           break;
>>> +    case SVM_VMGEXIT_GUEST_REQUEST: {
>>> +        snp_handle_guest_request(svm, control->exit_info_1,
>>> control->exit_info_2);
>>> +
>>> +        ret = 1;
>>> +        break;
>>> +    }
>>> +    case SVM_VMGEXIT_EXT_GUEST_REQUEST: {
>>> +        snp_handle_ext_guest_request(svm,
>>> +                         control->exit_info_1,
>>> +                         control->exit_info_2);
>>> +
>>> +        ret = 1;
>>> +        break;
>>> +    }
>>>       case SVM_VMGEXIT_UNSUPPORTED_EVENT:
>>>           vcpu_unimpl(vcpu,
>>>                   "vmgexit: unsupported event - exit_info_1=%#llx,
>>> exit_info_2=%#llx\n",
>>> diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
>>> index 12b9f4d539fb..7c0f9d00950f 100644
>>> --- a/arch/x86/kvm/svm/svm.h
>>> +++ b/arch/x86/kvm/svm/svm.h
>>> @@ -101,6 +101,8 @@ struct kvm_sev_info {
>>>       u64 snp_init_flags;
>>>       void *snp_context;      /* SNP guest context page */
>>>       spinlock_t psc_lock;
>>> +    void *snp_certs_data;
>>> +    struct mutex guest_req_lock;
>>>   };
>>>   struct kvm_svm {
>>

--
Alexey

\
 
 \ /
  Last update: 2023-03-26 23:34    [W:0.159 / U:0.632 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site