lkml.org 
[lkml]   [2023]   [Aug]   [7]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    Subject[RFC PATCH 06/11] KVM: x86: Refactor common code out of sev.c
    From
    Split sev_lock_two_vms() into kvm_mark_migration_in_progress() and
    kvm_lock_two_vms() and refactor sev.c to use these two new functions.

    Co-developed-by: Sagi Shahar <sagis@google.com>
    Signed-off-by: Sagi Shahar <sagis@google.com>
    Co-developed-by: Vishal Annapurve <vannapurve@google.com>
    Signed-off-by: Vishal Annapurve <vannapurve@google.com>
    Signed-off-by: Ackerley Tng <ackerleytng@google.com>
    ---
    arch/x86/kvm/svm/sev.c | 59 ++++++++++------------------------------
    arch/x86/kvm/x86.c | 62 ++++++++++++++++++++++++++++++++++++++++++
    arch/x86/kvm/x86.h | 6 ++++
    3 files changed, 82 insertions(+), 45 deletions(-)

    diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
    index 725289b523c7..3c4313417966 100644
    --- a/arch/x86/kvm/svm/sev.c
    +++ b/arch/x86/kvm/svm/sev.c
    @@ -1554,47 +1554,6 @@ static bool is_cmd_allowed_from_mirror(u32 cmd_id)
    return false;
    }

    -static int sev_lock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
    -{
    - int r = -EBUSY;
    -
    - if (dst_kvm == src_kvm)
    - return -EINVAL;
    -
    - /*
    - * Bail if these VMs are already involved in a migration to avoid
    - * deadlock between two VMs trying to migrate to/from each other.
    - */
    - if (atomic_cmpxchg_acquire(&dst_kvm->migration_in_progress, 0, 1))
    - return -EBUSY;
    -
    - if (atomic_cmpxchg_acquire(&src_kvm->migration_in_progress, 0, 1))
    - goto release_dst;
    -
    - r = -EINTR;
    - if (mutex_lock_killable(&dst_kvm->lock))
    - goto release_src;
    - if (mutex_lock_killable_nested(&src_kvm->lock, SINGLE_DEPTH_NESTING))
    - goto unlock_dst;
    - return 0;
    -
    -unlock_dst:
    - mutex_unlock(&dst_kvm->lock);
    -release_src:
    - atomic_set_release(&src_kvm->migration_in_progress, 0);
    -release_dst:
    - atomic_set_release(&dst_kvm->migration_in_progress, 0);
    - return r;
    -}
    -
    -static void sev_unlock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
    -{
    - mutex_unlock(&dst_kvm->lock);
    - mutex_unlock(&src_kvm->lock);
    - atomic_set_release(&dst_kvm->migration_in_progress, 0);
    - atomic_set_release(&src_kvm->migration_in_progress, 0);
    -}
    -
    /* vCPU mutex subclasses. */
    enum sev_migration_role {
    SEV_MIGRATION_SOURCE = 0,
    @@ -1777,9 +1736,12 @@ int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
    }

    source_kvm = f.file->private_data;
    - ret = sev_lock_two_vms(kvm, source_kvm);
    + ret = kvm_mark_migration_in_progress(kvm, source_kvm);
    if (ret)
    goto out_fput;
    + ret = kvm_lock_two_vms(kvm, source_kvm);
    + if (ret)
    + goto out_mark_migration_done;

    if (sev_guest(kvm) || !sev_guest(source_kvm)) {
    ret = -EINVAL;
    @@ -1823,8 +1785,10 @@ int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
    sev_misc_cg_uncharge(cg_cleanup_sev);
    put_misc_cg(cg_cleanup_sev->misc_cg);
    cg_cleanup_sev->misc_cg = NULL;
    +out_mark_migration_done:
    + kvm_mark_migration_done(kvm, source_kvm);
    out_unlock:
    - sev_unlock_two_vms(kvm, source_kvm);
    + kvm_unlock_two_vms(kvm, source_kvm);
    out_fput:
    fdput(f);
    return ret;
    @@ -2057,9 +2021,12 @@ int sev_vm_copy_enc_context_from(struct kvm *kvm, unsigned int source_fd)
    }

    source_kvm = f.file->private_data;
    - ret = sev_lock_two_vms(kvm, source_kvm);
    + ret = kvm_mark_migration_in_progress(kvm, source_kvm);
    if (ret)
    goto e_source_fput;
    + ret = kvm_lock_two_vms(kvm, source_kvm);
    + if (ret)
    + goto e_mark_migration_done;

    /*
    * Mirrors of mirrors should work, but let's not get silly. Also
    @@ -2100,7 +2067,9 @@ int sev_vm_copy_enc_context_from(struct kvm *kvm, unsigned int source_fd)
    */

    e_unlock:
    - sev_unlock_two_vms(kvm, source_kvm);
    + kvm_unlock_two_vms(kvm, source_kvm);
    +e_mark_migration_done:
    + kvm_mark_migration_done(kvm, source_kvm);
    e_source_fput:
    fdput(f);
    return ret;
    diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
    index de195ad83ec0..494b75ef7197 100644
    --- a/arch/x86/kvm/x86.c
    +++ b/arch/x86/kvm/x86.c
    @@ -4340,6 +4340,68 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
    }
    EXPORT_SYMBOL_GPL(kvm_get_msr_common);

    +int kvm_mark_migration_in_progress(struct kvm *dst_kvm, struct kvm *src_kvm)
    +{
    + int r;
    +
    + if (dst_kvm == src_kvm)
    + return -EINVAL;
    +
    + /*
    + * Bail if these VMs are already involved in a migration to avoid
    + * deadlock between two VMs trying to migrate to/from each other.
    + */
    + r = -EBUSY;
    + if (atomic_cmpxchg_acquire(&dst_kvm->migration_in_progress, 0, 1))
    + return r;
    +
    + if (atomic_cmpxchg_acquire(&src_kvm->migration_in_progress, 0, 1))
    + goto release_dst;
    +
    + return 0;
    +
    +release_dst:
    + atomic_set_release(&dst_kvm->migration_in_progress, 0);
    + return r;
    +}
    +EXPORT_SYMBOL_GPL(kvm_mark_migration_in_progress);
    +
    +void kvm_mark_migration_done(struct kvm *dst_kvm, struct kvm *src_kvm)
    +{
    + atomic_set_release(&dst_kvm->migration_in_progress, 0);
    + atomic_set_release(&src_kvm->migration_in_progress, 0);
    +}
    +EXPORT_SYMBOL_GPL(kvm_mark_migration_done);
    +
    +int kvm_lock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
    +{
    + int r;
    +
    + if (dst_kvm == src_kvm)
    + return -EINVAL;
    +
    + r = -EINTR;
    + if (mutex_lock_killable(&dst_kvm->lock))
    + return r;
    +
    + if (mutex_lock_killable_nested(&src_kvm->lock, SINGLE_DEPTH_NESTING))
    + goto unlock_dst;
    +
    + return 0;
    +
    +unlock_dst:
    + mutex_unlock(&dst_kvm->lock);
    + return r;
    +}
    +EXPORT_SYMBOL_GPL(kvm_lock_two_vms);
    +
    +void kvm_unlock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
    +{
    + mutex_unlock(&dst_kvm->lock);
    + mutex_unlock(&src_kvm->lock);
    +}
    +EXPORT_SYMBOL_GPL(kvm_unlock_two_vms);
    +
    /*
    * Read or write a bunch of msrs. All parameters are kernel addresses.
    *
    diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
    index 82e3dafc5453..4c6edaf5ac5b 100644
    --- a/arch/x86/kvm/x86.h
    +++ b/arch/x86/kvm/x86.h
    @@ -539,4 +539,10 @@ int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, unsigned int size,
    unsigned int port, void *data, unsigned int count,
    int in);

    +int kvm_mark_migration_in_progress(struct kvm *dst_kvm, struct kvm *src_kvm);
    +void kvm_mark_migration_done(struct kvm *dst_kvm, struct kvm *src_kvm);
    +
    +int kvm_lock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm);
    +void kvm_unlock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm);
    +
    #endif
    --
    2.41.0.640.ga95def55d0-goog
    \
     
     \ /
      Last update: 2023-08-08 01:03    [W:4.470 / U:0.024 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site