lkml.org 
[lkml]   [2023]   [Feb]   [20]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH RFC v8 09/56] KVM: SEV: Handle memory backed by restricted memfd
    Date
    From: Nikunj A Dadhania <nikunj@amd.com>

    Do not pin the guest memory backed by a restrictedmem backend, as
    pages in the restrictedmem are already pinned. Instead, populate the
    pages array for these guests using the already-pinned pages provided by
    restrictedmem backend.

    Signed-off-by: Nikunj A Dadhania <nikunj@amd.com>
    Signed-off-by: Michael Roth <michael.roth@amd.com>
    ---
    arch/x86/kvm/svm/sev.c | 68 +++++++++++++++++++++++++++++++++++-------
    1 file changed, 58 insertions(+), 10 deletions(-)

    diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
    index 523c78bbff3f..ad9b29ff4590 100644
    --- a/arch/x86/kvm/svm/sev.c
    +++ b/arch/x86/kvm/svm/sev.c
    @@ -383,9 +383,46 @@ static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
    return ret;
    }

    +static int sev_private_mem_get_pages_handler(struct kvm *kvm, struct kvm_gfn_range *range,
    + void *data)
    +{
    + struct kvm_memory_slot *memslot = range->slot;
    + struct page **pages = data;
    + int ret = 0, i = 0;
    + kvm_pfn_t pfn;
    + gfn_t gfn;
    +
    + for (gfn = range->start; gfn < range->end; gfn++) {
    + int order;
    +
    + ret = kvm_restrictedmem_get_pfn(memslot, gfn, &pfn, &order);
    + if (ret)
    + return ret;
    +
    + if (is_error_noslot_pfn(pfn))
    + return -EFAULT;
    +
    + pages[i++] = pfn_to_page(pfn);
    + }
    +
    + return ret;
    +}
    +
    +static int sev_private_mem_get_pages(struct kvm *kvm, unsigned long addr,
    + unsigned long size, unsigned long npages,
    + struct page **pages)
    +{
    + return kvm_vm_do_hva_range_op(kvm, addr, addr + size,
    + sev_private_mem_get_pages_handler, pages);
    +}
    +
    /*
    * Legacy SEV guest pin the pages and return the array populated with pinned
    * pages.
    + *
    + * SEV guests using restricted memfd backend, pages are already marked as
    + * unmovable and unevictable. Populate the pages array for these guests using
    + * restrictedmem get_pfn.
    */
    static struct page **sev_memory_get_pages(struct kvm *kvm, unsigned long uaddr,
    unsigned long ulen, unsigned long *n,
    @@ -393,7 +430,7 @@ static struct page **sev_memory_get_pages(struct kvm *kvm, unsigned long uaddr,
    {
    struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
    unsigned long npages, size;
    - int npinned;
    + int npinned = 0;
    unsigned long locked, lock_limit;
    struct page **pages;
    unsigned long first, last;
    @@ -429,16 +466,25 @@ static struct page **sev_memory_get_pages(struct kvm *kvm, unsigned long uaddr,
    if (!pages)
    return ERR_PTR(-ENOMEM);

    - /* Pin the user virtual address. */
    - npinned = pin_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages);
    - if (npinned != npages) {
    - pr_err("SEV: Failure locking %lu pages.\n", npages);
    - ret = -ENOMEM;
    - goto err;
    + if (kvm_arch_has_private_mem(kvm)) {
    + /* Get the PFN from memfile */
    + if (sev_private_mem_get_pages(kvm, uaddr, ulen, npages, pages)) {
    + pr_err("%s: ERROR: unable to find slot for uaddr %lx", __func__, uaddr);
    + ret = -ENOMEM;
    + goto err;
    + }
    + } else {
    + /* Pin the user virtual address. */
    + npinned = pin_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages);
    + if (npinned != npages) {
    + pr_err("SEV: Failure locking %lu pages.\n", npages);
    + ret = -ENOMEM;
    + goto err;
    + }
    + sev->pages_locked = locked;
    }

    *n = npages;
    - sev->pages_locked = locked;

    return pages;

    @@ -455,9 +501,11 @@ static void sev_memory_put_pages(struct kvm *kvm, struct page **pages,
    {
    struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;

    - unpin_user_pages(pages, npages);
    + if (!kvm_arch_has_private_mem(kvm)) {
    + unpin_user_pages(pages, npages);
    + sev->pages_locked -= npages;
    + }
    kvfree(pages);
    - sev->pages_locked -= npages;
    }

    static void sev_clflush_pages(struct page *pages[], unsigned long npages)
    --
    2.25.1
    \
     
     \ /
      Last update: 2023-03-27 00:29    [W:4.632 / U:0.144 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site