lkml.org 
[lkml]   [2022]   [Oct]   [25]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v9 4/8] KVM: Use gfn instead of hva for mmu_notifier_retry
    Date
    Currently in mmu_notifier validate path, hva range is recorded and then
    checked against in the mmu_notifier_retry_hva() of the page fault path.
    However, for the to be introduced private memory, a page fault may not
    have a hva associated, checking gfn(gpa) makes more sense.

    For existing non private memory case, gfn is expected to continue to
    work. The only downside is when aliasing multiple gfns to a single hva,
    the current algorithm of checking multiple ranges could result in a much
    larger range being rejected. Such aliasing should be uncommon, so the
    impact is expected small.

    It also fixes a bug in kvm_zap_gfn_range() which has already been using
    gfn when calling kvm_mmu_invalidate_begin/end() while these functions
    accept hva in current code.

    Signed-off-by: Chao Peng <chao.p.peng@linux.intel.com>
    ---
    arch/x86/kvm/mmu/mmu.c | 2 +-
    include/linux/kvm_host.h | 18 +++++++---------
    virt/kvm/kvm_main.c | 45 ++++++++++++++++++++++++++--------------
    3 files changed, 39 insertions(+), 26 deletions(-)

    diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
    index 6f81539061d6..33b1aec44fb8 100644
    --- a/arch/x86/kvm/mmu/mmu.c
    +++ b/arch/x86/kvm/mmu/mmu.c
    @@ -4217,7 +4217,7 @@ static bool is_page_fault_stale(struct kvm_vcpu *vcpu,
    return true;

    return fault->slot &&
    - mmu_invalidate_retry_hva(vcpu->kvm, mmu_seq, fault->hva);
    + mmu_invalidate_retry_gfn(vcpu->kvm, mmu_seq, fault->gfn);
    }

    static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
    diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
    index 739a7562a1f3..79e5cbc35fcf 100644
    --- a/include/linux/kvm_host.h
    +++ b/include/linux/kvm_host.h
    @@ -775,8 +775,8 @@ struct kvm {
    struct mmu_notifier mmu_notifier;
    unsigned long mmu_invalidate_seq;
    long mmu_invalidate_in_progress;
    - unsigned long mmu_invalidate_range_start;
    - unsigned long mmu_invalidate_range_end;
    + gfn_t mmu_invalidate_range_start;
    + gfn_t mmu_invalidate_range_end;
    #endif
    struct list_head devices;
    u64 manual_dirty_log_protect;
    @@ -1365,10 +1365,8 @@ void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc);
    void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);
    #endif

    -void kvm_mmu_invalidate_begin(struct kvm *kvm, unsigned long start,
    - unsigned long end);
    -void kvm_mmu_invalidate_end(struct kvm *kvm, unsigned long start,
    - unsigned long end);
    +void kvm_mmu_invalidate_begin(struct kvm *kvm, gfn_t start, gfn_t end);
    +void kvm_mmu_invalidate_end(struct kvm *kvm, gfn_t start, gfn_t end);

    long kvm_arch_dev_ioctl(struct file *filp,
    unsigned int ioctl, unsigned long arg);
    @@ -1937,9 +1935,9 @@ static inline int mmu_invalidate_retry(struct kvm *kvm, unsigned long mmu_seq)
    return 0;
    }

    -static inline int mmu_invalidate_retry_hva(struct kvm *kvm,
    +static inline int mmu_invalidate_retry_gfn(struct kvm *kvm,
    unsigned long mmu_seq,
    - unsigned long hva)
    + gfn_t gfn)
    {
    lockdep_assert_held(&kvm->mmu_lock);
    /*
    @@ -1949,8 +1947,8 @@ static inline int mmu_invalidate_retry_hva(struct kvm *kvm,
    * positives, due to shortcuts when handing concurrent invalidations.
    */
    if (unlikely(kvm->mmu_invalidate_in_progress) &&
    - hva >= kvm->mmu_invalidate_range_start &&
    - hva < kvm->mmu_invalidate_range_end)
    + gfn >= kvm->mmu_invalidate_range_start &&
    + gfn < kvm->mmu_invalidate_range_end)
    return 1;
    if (kvm->mmu_invalidate_seq != mmu_seq)
    return 1;
    diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
    index 8dace78a0278..09c9cdeb773c 100644
    --- a/virt/kvm/kvm_main.c
    +++ b/virt/kvm/kvm_main.c
    @@ -540,8 +540,7 @@ static void kvm_mmu_notifier_invalidate_range(struct mmu_notifier *mn,

    typedef bool (*hva_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range);

    -typedef void (*on_lock_fn_t)(struct kvm *kvm, unsigned long start,
    - unsigned long end);
    +typedef void (*on_lock_fn_t)(struct kvm *kvm, gfn_t start, gfn_t end);

    typedef void (*on_unlock_fn_t)(struct kvm *kvm);

    @@ -628,7 +627,8 @@ static __always_inline int __kvm_handle_hva_range(struct kvm *kvm,
    locked = true;
    KVM_MMU_LOCK(kvm);
    if (!IS_KVM_NULL_FN(range->on_lock))
    - range->on_lock(kvm, range->start, range->end);
    + range->on_lock(kvm, gfn_range.start,
    + gfn_range.end);
    if (IS_KVM_NULL_FN(range->handler))
    break;
    }
    @@ -715,15 +715,9 @@ static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
    kvm_handle_hva_range(mn, address, address + 1, pte, kvm_set_spte_gfn);
    }

    -void kvm_mmu_invalidate_begin(struct kvm *kvm, unsigned long start,
    - unsigned long end)
    +static inline void update_invalidate_range(struct kvm *kvm, gfn_t start,
    + gfn_t end)
    {
    - /*
    - * The count increase must become visible at unlock time as no
    - * spte can be established without taking the mmu_lock and
    - * count is also read inside the mmu_lock critical section.
    - */
    - kvm->mmu_invalidate_in_progress++;
    if (likely(kvm->mmu_invalidate_in_progress == 1)) {
    kvm->mmu_invalidate_range_start = start;
    kvm->mmu_invalidate_range_end = end;
    @@ -744,6 +738,28 @@ void kvm_mmu_invalidate_begin(struct kvm *kvm, unsigned long start,
    }
    }

    +static void mark_invalidate_in_progress(struct kvm *kvm, gfn_t start, gfn_t end)
    +{
    + /*
    + * The count increase must become visible at unlock time as no
    + * spte can be established without taking the mmu_lock and
    + * count is also read inside the mmu_lock critical section.
    + */
    + kvm->mmu_invalidate_in_progress++;
    +}
    +
    +static bool kvm_mmu_handle_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
    +{
    + update_invalidate_range(kvm, range->start, range->end);
    + return kvm_unmap_gfn_range(kvm, range);
    +}
    +
    +void kvm_mmu_invalidate_begin(struct kvm *kvm, gfn_t start, gfn_t end)
    +{
    + mark_invalidate_in_progress(kvm, start, end);
    + update_invalidate_range(kvm, start, end);
    +}
    +
    static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
    const struct mmu_notifier_range *range)
    {
    @@ -752,8 +768,8 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
    .start = range->start,
    .end = range->end,
    .pte = __pte(0),
    - .handler = kvm_unmap_gfn_range,
    - .on_lock = kvm_mmu_invalidate_begin,
    + .handler = kvm_mmu_handle_gfn_range,
    + .on_lock = mark_invalidate_in_progress,
    .on_unlock = kvm_arch_guest_memory_reclaimed,
    .flush_on_ret = true,
    .may_block = mmu_notifier_range_blockable(range),
    @@ -791,8 +807,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
    return 0;
    }

    -void kvm_mmu_invalidate_end(struct kvm *kvm, unsigned long start,
    - unsigned long end)
    +void kvm_mmu_invalidate_end(struct kvm *kvm, gfn_t start, gfn_t end)
    {
    /*
    * This sequence increase will notify the kvm page fault that
    --
    2.25.1
    \
     
     \ /
      Last update: 2022-10-25 17:20    [W:4.389 / U:0.788 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site