lkml.org 
[lkml]   [2019]   [Dec]   [5]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
SubjectRe: [PATCH v3 07/15] KVM: Refactor error handling for setting memory region
From
Date
On 10/25/19 1:07 AM, Sean Christopherson wrote:
> Replace a big pile o' gotos with returns to make it more obvious what
> error code is being returned, and to prepare for refactoring the
> functional, i.e. post-checks, portion of __kvm_set_memory_region().
>
> Reviewed-by: Janosch Frank <frankja@linux.ibm.com>
> Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
> ---
> virt/kvm/kvm_main.c | 40 ++++++++++++++++++----------------------
> 1 file changed, 18 insertions(+), 22 deletions(-)
>
> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> index a43902d9036d..e2f47d60f696 100644
> --- a/virt/kvm/kvm_main.c
> +++ b/virt/kvm/kvm_main.c
> @@ -942,34 +942,33 @@ int __kvm_set_memory_region(struct kvm *kvm,
>
> r = check_memory_region_flags(mem);
> if (r)
> - goto out;
> + return r;
>
> - r = -EINVAL;
> as_id = mem->slot >> 16;
> id = (u16)mem->slot;
>
> /* General sanity checks */
> if (mem->memory_size & (PAGE_SIZE - 1))
> - goto out;
> + return -EINVAL;
> if (mem->guest_phys_addr & (PAGE_SIZE - 1))
> - goto out;
> + return -EINVAL;
> /* We can read the guest memory with __xxx_user() later on. */
> if ((id < KVM_USER_MEM_SLOTS) &&
> ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
> !access_ok((void __user *)(unsigned long)mem->userspace_addr,
> mem->memory_size)))
> - goto out;
> + return -EINVAL;
> if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM)
> - goto out;
> + return -EINVAL;
> if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
> - goto out;
> + return -EINVAL;
>
> slot = id_to_memslot(__kvm_memslots(kvm, as_id), id);
> base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
> npages = mem->memory_size >> PAGE_SHIFT;
>
> if (npages > KVM_MEM_MAX_NR_PAGES)
> - goto out;
> + return -EINVAL;
>
> new = old = *slot;
>
> @@ -986,20 +985,18 @@ int __kvm_set_memory_region(struct kvm *kvm,
> if ((new.userspace_addr != old.userspace_addr) ||
> (npages != old.npages) ||
> ((new.flags ^ old.flags) & KVM_MEM_READONLY))
> - goto out;
> + return -EINVAL;
>
> if (base_gfn != old.base_gfn)
> change = KVM_MR_MOVE;
> else if (new.flags != old.flags)
> change = KVM_MR_FLAGS_ONLY;
> - else { /* Nothing to change. */
> - r = 0;
> - goto out;
> - }
> + else /* Nothing to change. */
> + return 0;
> }
> } else {
> if (!old.npages)
> - goto out;
> + return -EINVAL;
>
> change = KVM_MR_DELETE;
> new.base_gfn = 0;
> @@ -1008,29 +1005,29 @@ int __kvm_set_memory_region(struct kvm *kvm,
>
> if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) {
> /* Check for overlaps */
> - r = -EEXIST;
> kvm_for_each_memslot(slot, __kvm_memslots(kvm, as_id)) {
> if (slot->id == id)
> continue;
> if (!((base_gfn + npages <= slot->base_gfn) ||
> (base_gfn >= slot->base_gfn + slot->npages)))
> - goto out;
> + return -EEXIST;
> }
> }
>
> - r = -ENOMEM;
> -
> /* Allocate/free page dirty bitmap as needed */
> if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
> new.dirty_bitmap = NULL;
> else if (!new.dirty_bitmap) {
> - if (kvm_create_dirty_bitmap(&new) < 0)
> - goto out;
> + r = kvm_create_dirty_bitmap(&new);
> + if (r)
> + return r;

Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>

> }
>
> slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL_ACCOUNT);
> - if (!slots)
> + if (!slots) {
> + r = -ENOMEM;
> goto out_bitmap;
> + }
> memcpy(slots, __kvm_memslots(kvm, as_id), sizeof(struct kvm_memslots));
>
> if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) {
> @@ -1081,7 +1078,6 @@ int __kvm_set_memory_region(struct kvm *kvm,
> out_bitmap:
> if (new.dirty_bitmap && !old.dirty_bitmap)
> kvm_destroy_dirty_bitmap(&new);
> -out:
> return r;
> }
> EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
>

\
 
 \ /
  Last update: 2019-12-05 10:51    [W:0.152 / U:1.244 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site