lkml.org 
[lkml]   [2021]   [Apr]   [5]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 5.10 091/126] KVM: x86/mmu: Ensure TLBs are flushed when yielding during GFN range zap
    Date
    From: Sean Christopherson <seanjc@google.com>

    [ Upstream commit a835429cda91621fca915d80672a157b47738afb ]

    When flushing a range of GFNs across multiple roots, ensure any pending
    flush from a previous root is honored before yielding while walking the
    tables of the current root.

    Note, kvm_tdp_mmu_zap_gfn_range() now intentionally overwrites its local
    "flush" with the result to avoid redundant flushes. zap_gfn_range()
    preserves and return the incoming "flush", unless of course the flush was
    performed prior to yielding and no new flush was triggered.

    Fixes: 1af4a96025b3 ("KVM: x86/mmu: Yield in TDU MMU iter even if no SPTES changed")
    Cc: stable@vger.kernel.org
    Reviewed-by: Ben Gardon <bgardon@google.com>
    Signed-off-by: Sean Christopherson <seanjc@google.com>
    Message-Id: <20210325200119.1359384-2-seanjc@google.com>
    Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
    Signed-off-by: Sasha Levin <sashal@kernel.org>
    ---
    arch/x86/kvm/mmu/tdp_mmu.c | 23 ++++++++++++-----------
    1 file changed, 12 insertions(+), 11 deletions(-)

    diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
    index a54a9ed979d1..34ef3e1a0f84 100644
    --- a/arch/x86/kvm/mmu/tdp_mmu.c
    +++ b/arch/x86/kvm/mmu/tdp_mmu.c
    @@ -111,7 +111,7 @@ bool is_tdp_mmu_root(struct kvm *kvm, hpa_t hpa)
    }

    static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
    - gfn_t start, gfn_t end, bool can_yield);
    + gfn_t start, gfn_t end, bool can_yield, bool flush);

    void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root)
    {
    @@ -124,7 +124,7 @@ void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root)

    list_del(&root->link);

    - zap_gfn_range(kvm, root, 0, max_gfn, false);
    + zap_gfn_range(kvm, root, 0, max_gfn, false, false);

    free_page((unsigned long)root->spt);
    kmem_cache_free(mmu_page_header_cache, root);
    @@ -504,20 +504,21 @@ static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm,
    * scheduler needs the CPU or there is contention on the MMU lock. If this
    * function cannot yield, it will not release the MMU lock or reschedule and
    * the caller must ensure it does not supply too large a GFN range, or the
    - * operation can cause a soft lockup.
    + * operation can cause a soft lockup. Note, in some use cases a flush may be
    + * required by prior actions. Ensure the pending flush is performed prior to
    + * yielding.
    */
    static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
    - gfn_t start, gfn_t end, bool can_yield)
    + gfn_t start, gfn_t end, bool can_yield, bool flush)
    {
    struct tdp_iter iter;
    - bool flush_needed = false;

    rcu_read_lock();

    tdp_root_for_each_pte(iter, root, start, end) {
    if (can_yield &&
    - tdp_mmu_iter_cond_resched(kvm, &iter, flush_needed)) {
    - flush_needed = false;
    + tdp_mmu_iter_cond_resched(kvm, &iter, flush)) {
    + flush = false;
    continue;
    }

    @@ -535,11 +536,11 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
    continue;

    tdp_mmu_set_spte(kvm, &iter, 0);
    - flush_needed = true;
    + flush = true;
    }

    rcu_read_unlock();
    - return flush_needed;
    + return flush;
    }

    /*
    @@ -554,7 +555,7 @@ bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end)
    bool flush = false;

    for_each_tdp_mmu_root_yield_safe(kvm, root)
    - flush |= zap_gfn_range(kvm, root, start, end, true);
    + flush = zap_gfn_range(kvm, root, start, end, true, flush);

    return flush;
    }
    @@ -757,7 +758,7 @@ static int zap_gfn_range_hva_wrapper(struct kvm *kvm,
    struct kvm_mmu_page *root, gfn_t start,
    gfn_t end, unsigned long unused)
    {
    - return zap_gfn_range(kvm, root, start, end, false);
    + return zap_gfn_range(kvm, root, start, end, false, false);
    }

    int kvm_tdp_mmu_zap_hva_range(struct kvm *kvm, unsigned long start,
    --
    2.30.1


    \
     
     \ /
      Last update: 2021-04-05 11:16    [W:2.098 / U:0.008 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site