lkml.org 
[lkml]   [2022]   [Dec]   [19]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
Date
SubjectRe: [PATCH RFC v7 40/64] KVM: SVM: Add KVM_SEV_SNP_LAUNCH_FINISH command
From
Hello Tom,

On 12/19/2022 12:04 PM, Tom Lendacky wrote:
> On 12/14/22 13:40, Michael Roth wrote:
>> From: Brijesh Singh <brijesh.singh@amd.com>
>>
>> The KVM_SEV_SNP_LAUNCH_FINISH finalize the cryptographic digest and
>> stores
>> it as the measurement of the guest at launch.
>>
>> While finalizing the launch flow, it also issues the LAUNCH_UPDATE
>> command
>> to encrypt the VMSA pages.
>>
>> If its an SNP guest, then VMSA was added in the RMP entry as
>> a guest owned page and also removed from the kernel direct map
>> so flush it later after it is transitioned back to hypervisor
>> state and restored in the direct map.
>>
>> Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
>> Signed-off-by: Harald Hoyer <harald@profian.com>
>> Signed-off-by: Ashish Kalra <ashish.kalra@amd.com>
>> Signed-off-by: Michael Roth <michael.roth@amd.com>
>> ---
>>   .../virt/kvm/x86/amd-memory-encryption.rst    |  22 ++++
>>   arch/x86/kvm/svm/sev.c                        | 119 ++++++++++++++++++
>>   include/uapi/linux/kvm.h                      |  14 +++
>>   3 files changed, 155 insertions(+)
>>
>> diff --git a/Documentation/virt/kvm/x86/amd-memory-encryption.rst
>> b/Documentation/virt/kvm/x86/amd-memory-encryption.rst
>> index c94be8e6d657..e4b42aaab1de 100644
>> --- a/Documentation/virt/kvm/x86/amd-memory-encryption.rst
>> +++ b/Documentation/virt/kvm/x86/amd-memory-encryption.rst
>> @@ -513,6 +513,28 @@ Returns: 0 on success, -negative on error
>>   See the SEV-SNP spec for further details on how to build the VMPL
>> permission
>>   mask and page type.
>> +21. KVM_SNP_LAUNCH_FINISH
>> +-------------------------
>> +
>> +After completion of the SNP guest launch flow, the
>> KVM_SNP_LAUNCH_FINISH command can be
>> +issued to make the guest ready for the execution.
>> +
>> +Parameters (in): struct kvm_sev_snp_launch_finish
>> +
>> +Returns: 0 on success, -negative on error
>> +
>> +::
>> +
>> +        struct kvm_sev_snp_launch_finish {
>> +                __u64 id_block_uaddr;
>> +                __u64 id_auth_uaddr;
>> +                __u8 id_block_en;
>> +                __u8 auth_key_en;
>> +                __u8 host_data[32];
>
> This is missing the 6 bytes of padding at the end of the struct.
>

Yes will fix this, the documentation is missing that, the structure
defination in include/uapi/linux/kvm.h includes it.

But why do we need this padding ?

>> +        };
>> +
>> +
>> +See SEV-SNP specification for further details on launch finish input
>> parameters.
>>   References
>>   ==========
>> diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
>> index 379e61a9226a..6f901545bed9 100644
>> --- a/arch/x86/kvm/svm/sev.c
>> +++ b/arch/x86/kvm/svm/sev.c
>> @@ -2243,6 +2243,106 @@ static int snp_launch_update(struct kvm *kvm,
>> struct kvm_sev_cmd *argp)
>>                         snp_launch_update_gfn_handler, argp);
>>   }
>> +static int snp_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd
>> *argp)
>> +{
>> +    struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
>> +    struct sev_data_snp_launch_update data = {};
>> +    int i, ret;
>> +
>> +    data.gctx_paddr = __psp_pa(sev->snp_context);
>> +    data.page_type = SNP_PAGE_TYPE_VMSA;
>> +
>> +    for (i = 0; i < kvm->created_vcpus; i++) {
>> +        struct vcpu_svm *svm = to_svm(xa_load(&kvm->vcpu_array, i));
>> +        u64 pfn = __pa(svm->sev_es.vmsa) >> PAGE_SHIFT;
>> +
>> +        /* Perform some pre-encryption checks against the VMSA */
>> +        ret = sev_es_sync_vmsa(svm);
>> +        if (ret)
>> +            return ret;
>> +
>> +        /* Transition the VMSA page to a firmware state. */
>> +        ret = rmp_make_private(pfn, -1, PG_LEVEL_4K, sev->asid, true);
>> +        if (ret)
>> +            return ret;
>> +
>> +        /* Issue the SNP command to encrypt the VMSA */
>> +        data.address = __sme_pa(svm->sev_es.vmsa);
>> +        ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_SNP_LAUNCH_UPDATE,
>> +                      &data, &argp->error);
>> +        if (ret) {
>> +            snp_page_reclaim(pfn);
>> +            return ret;
>> +        }
>> +
>> +        svm->vcpu.arch.guest_state_protected = true;
>> +    }
>> +
>> +    return 0;
>> +}
>> +
>> +static int snp_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
>> +{
>> +    struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
>> +    struct kvm_sev_snp_launch_finish params;
>> +    struct sev_data_snp_launch_finish *data;
>> +    void *id_block = NULL, *id_auth = NULL;
>> +    int ret;
>> +
>> +    if (!sev_snp_guest(kvm))
>> +        return -ENOTTY;
>> +
>> +    if (!sev->snp_context)
>> +        return -EINVAL;
>> +
>> +    if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data,
>> sizeof(params)))
>> +        return -EFAULT;
>> +
>> +    /* Measure all vCPUs using LAUNCH_UPDATE before finalizing the
>> launch flow. */
>> +    ret = snp_launch_update_vmsa(kvm, argp);
>> +    if (ret)
>> +        return ret;
>> +
>> +    data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
>> +    if (!data)
>> +        return -ENOMEM;
>> +
>> +    if (params.id_block_en) {
>> +        id_block = psp_copy_user_blob(params.id_block_uaddr,
>> KVM_SEV_SNP_ID_BLOCK_SIZE);
>> +        if (IS_ERR(id_block)) {
>> +            ret = PTR_ERR(id_block);
>> +            goto e_free;
>> +        }
>> +
>> +        data->id_block_en = 1;
>> +        data->id_block_paddr = __sme_pa(id_block);
>> +
>> +        id_auth = psp_copy_user_blob(params.id_auth_uaddr,
>> KVM_SEV_SNP_ID_AUTH_SIZE);
>> +        if (IS_ERR(id_auth)) {
>> +            ret = PTR_ERR(id_auth);
>> +            goto e_free_id_block;
>> +        }
>> +
>> +        data->id_auth_paddr = __sme_pa(id_auth);
>> +
>> +        if (params.auth_key_en)
>> +            data->auth_key_en = 1;
>> +    }
>> +
>> +    data->gctx_paddr = __psp_pa(sev->snp_context);
>
> This is missing the copying of the params.host_data field into the
> data->host_data field. This is needed so that the host_data shows up in
> the attestation report.
>

Yes will fix this.

Thanks,
Ashish

> Thanks,
> Tom
>
>> +    ret = sev_issue_cmd(kvm, SEV_CMD_SNP_LAUNCH_FINISH, data,
>> &argp->error);
>> +
>> +    kfree(id_auth);
>> +
>> +e_free_id_block:
>> +    kfree(id_block);
>> +
>> +e_free:
>> +    kfree(data);
>> +
>> +    return ret;
>> +}
>> +
>>   int sev_mem_enc_ioctl(struct kvm *kvm, void __user *argp)
>>   {
>>       struct kvm_sev_cmd sev_cmd;
>> @@ -2339,6 +2439,9 @@ int sev_mem_enc_ioctl(struct kvm *kvm, void
>> __user *argp)
>>       case KVM_SEV_SNP_LAUNCH_UPDATE:
>>           r = snp_launch_update(kvm, &sev_cmd);
>>           break;
>> +    case KVM_SEV_SNP_LAUNCH_FINISH:
>> +        r = snp_launch_finish(kvm, &sev_cmd);
>> +        break;
>>       default:
>>           r = -EINVAL;
>>           goto out;
>> @@ -2794,11 +2897,27 @@ void sev_free_vcpu(struct kvm_vcpu *vcpu)
>>       svm = to_svm(vcpu);
>> +    /*
>> +     * If its an SNP guest, then VMSA was added in the RMP entry as
>> +     * a guest owned page. Transition the page to hypervisor state
>> +     * before releasing it back to the system.
>> +     * Also the page is removed from the kernel direct map, so flush it
>> +     * later after it is transitioned back to hypervisor state and
>> +     * restored in the direct map.
>> +     */
>> +    if (sev_snp_guest(vcpu->kvm)) {
>> +        u64 pfn = __pa(svm->sev_es.vmsa) >> PAGE_SHIFT;
>> +
>> +        if (host_rmp_make_shared(pfn, PG_LEVEL_4K, true))
>> +            goto skip_vmsa_free;
>> +    }
>> +
>>       if (vcpu->arch.guest_state_protected)
>>           sev_flush_encrypted_page(vcpu, svm->sev_es.vmsa);
>>       __free_page(virt_to_page(svm->sev_es.vmsa));
>> +skip_vmsa_free:
>>       if (svm->sev_es.ghcb_sa_free)
>>           kvfree(svm->sev_es.ghcb_sa);
>>   }
>> diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
>> index 9b6c95cc62a8..c468adc1f147 100644
>> --- a/include/uapi/linux/kvm.h
>> +++ b/include/uapi/linux/kvm.h
>> @@ -1942,6 +1942,7 @@ enum sev_cmd_id {
>>       KVM_SEV_SNP_INIT,
>>       KVM_SEV_SNP_LAUNCH_START,
>>       KVM_SEV_SNP_LAUNCH_UPDATE,
>> +    KVM_SEV_SNP_LAUNCH_FINISH,
>>       KVM_SEV_NR_MAX,
>>   };
>> @@ -2076,6 +2077,19 @@ struct kvm_sev_snp_launch_update {
>>       __u8 vmpl1_perms;
>>   };
>> +#define KVM_SEV_SNP_ID_BLOCK_SIZE    96
>> +#define KVM_SEV_SNP_ID_AUTH_SIZE    4096
>> +#define KVM_SEV_SNP_FINISH_DATA_SIZE    32
>> +
>> +struct kvm_sev_snp_launch_finish {
>> +    __u64 id_block_uaddr;
>> +    __u64 id_auth_uaddr;
>> +    __u8 id_block_en;
>> +    __u8 auth_key_en;
>> +    __u8 host_data[KVM_SEV_SNP_FINISH_DATA_SIZE];
>> +    __u8 pad[6];
>> +};
>> +
>>   #define KVM_DEV_ASSIGN_ENABLE_IOMMU    (1 << 0)
>>   #define KVM_DEV_ASSIGN_PCI_2_3        (1 << 1)
>>   #define KVM_DEV_ASSIGN_MASK_INTX    (1 << 2)

\
 
 \ /
  Last update: 2023-03-26 23:15    [W:0.368 / U:2.132 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site