lkml.org 
[lkml]   [2021]   [Feb]   [2]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
Subject[PATCH v2 25/28] KVM: x86/mmu: Allow zapping collapsible SPTEs to use MMU read lock
From
To speed the process of disabling dirty logging, change the TDP MMU
function which zaps collapsible SPTEs to run under the MMU read lock.

Signed-off-by: Ben Gardon <bgardon@google.com>
---
arch/x86/kvm/mmu/mmu.c | 5 ++---
arch/x86/kvm/mmu/tdp_mmu.c | 22 +++++++++++++++-------
2 files changed, 17 insertions(+), 10 deletions(-)

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 254ff87d2a61..e3cf868be6bd 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -5517,8 +5517,7 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
start, end - 1, true);
}
}
-
- kvm_mmu_unlock(kvm);
+ write_unlock(&kvm->mmu_lock);

if (kvm->arch.tdp_mmu_enabled) {
read_lock(&kvm->mmu_lock);
@@ -5611,10 +5610,10 @@ void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
write_lock(&kvm->mmu_lock);
slot_handle_leaf(kvm, (struct kvm_memory_slot *)memslot,
kvm_mmu_zap_collapsible_spte, true);
+ write_unlock(&kvm->mmu_lock);

if (kvm->arch.tdp_mmu_enabled)
kvm_tdp_mmu_zap_collapsible_sptes(kvm, memslot);
- write_unlock(&kvm->mmu_lock);
}

void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index de26762433ea..cfe66b8d39fa 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -1451,10 +1451,9 @@ static void zap_collapsible_spte_range(struct kvm *kvm,
rcu_read_lock();

tdp_root_for_each_pte(iter, root, start, end) {
- if (tdp_mmu_iter_cond_resched(kvm, &iter, false, false)) {
- spte_set = false;
+retry:
+ if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
continue;
- }

if (!is_shadow_present_pte(iter.old_spte) ||
!is_last_spte(iter.old_spte, iter.level))
@@ -1465,9 +1464,14 @@ static void zap_collapsible_spte_range(struct kvm *kvm,
!PageTransCompoundMap(pfn_to_page(pfn)))
continue;

- tdp_mmu_set_spte(kvm, &iter, 0);
-
- spte_set = true;
+ if (!tdp_mmu_zap_spte_atomic(kvm, &iter)) {
+ /*
+ * The iter must explicitly re-read the SPTE because
+ * the atomic cmpxchg failed.
+ */
+ iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
+ goto retry;
+ }
}

rcu_read_unlock();
@@ -1485,7 +1489,9 @@ void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
struct kvm_mmu_page *root;
int root_as_id;

- for_each_tdp_mmu_root_yield_safe(kvm, root, false) {
+ read_lock(&kvm->mmu_lock);
+
+ for_each_tdp_mmu_root_yield_safe(kvm, root, true) {
root_as_id = kvm_mmu_page_as_id(root);
if (root_as_id != slot->as_id)
continue;
@@ -1493,6 +1499,8 @@ void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
zap_collapsible_spte_range(kvm, root, slot->base_gfn,
slot->base_gfn + slot->npages);
}
+
+ read_unlock(&kvm->mmu_lock);
}

/*
--
2.30.0.365.g02bc693789-goog
\
 
 \ /
  Last update: 2021-02-02 20:09    [W:0.297 / U:1.532 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site