lkml.org 
[lkml]   [2021]   [Feb]   [3]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
SubjectRe: [PATCH v2 26/28] KVM: x86/mmu: Allow enabling / disabling dirty logging under MMU read lock
From
Date
On 02/02/21 19:57, Ben Gardon wrote:
> To reduce lock contention and interference with page fault handlers,
> allow the TDP MMU functions which enable and disable dirty logging
> to operate under the MMU read lock.
>
>
> Extend dirty logging enable disable functions read lock-ness
>
> Signed-off-by: Ben Gardon <bgardon@google.com>
> ---
> arch/x86/kvm/mmu/mmu.c | 14 +++---
> arch/x86/kvm/mmu/tdp_mmu.c | 93 ++++++++++++++++++++++++++++++--------
> arch/x86/kvm/mmu/tdp_mmu.h | 2 +-
> 3 files changed, 84 insertions(+), 25 deletions(-)
>
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index e3cf868be6bd..6ba2a72d4330 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -5638,9 +5638,10 @@ void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
>
> write_lock(&kvm->mmu_lock);
> flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty, false);
> + write_unlock(&kvm->mmu_lock);
> +
> if (kvm->arch.tdp_mmu_enabled)
> flush |= kvm_tdp_mmu_clear_dirty_slot(kvm, memslot);
> - write_unlock(&kvm->mmu_lock);
>
> /*
> * It's also safe to flush TLBs out of mmu lock here as currently this
> @@ -5661,9 +5662,10 @@ void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
> write_lock(&kvm->mmu_lock);
> flush = slot_handle_large_level(kvm, memslot, slot_rmap_write_protect,
> false);
> + write_unlock(&kvm->mmu_lock);
> +
> if (kvm->arch.tdp_mmu_enabled)
> flush |= kvm_tdp_mmu_wrprot_slot(kvm, memslot, PG_LEVEL_2M);
> - write_unlock(&kvm->mmu_lock);
>
> if (flush)
> kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
> @@ -5677,12 +5679,12 @@ void kvm_mmu_slot_set_dirty(struct kvm *kvm,
>
> write_lock(&kvm->mmu_lock);
> flush = slot_handle_all_level(kvm, memslot, __rmap_set_dirty, false);
> - if (kvm->arch.tdp_mmu_enabled)
> - flush |= kvm_tdp_mmu_slot_set_dirty(kvm, memslot);
> - write_unlock(&kvm->mmu_lock);
> -
> if (flush)
> kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
> + write_unlock(&kvm->mmu_lock);
> +
> + if (kvm->arch.tdp_mmu_enabled)
> + kvm_tdp_mmu_slot_set_dirty(kvm, memslot);
> }
> EXPORT_SYMBOL_GPL(kvm_mmu_slot_set_dirty);
>
> diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
> index cfe66b8d39fa..6093926a6bc5 100644
> --- a/arch/x86/kvm/mmu/tdp_mmu.c
> +++ b/arch/x86/kvm/mmu/tdp_mmu.c
> @@ -553,18 +553,22 @@ static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
> }
>
> /*
> - * tdp_mmu_set_spte_atomic - Set a TDP MMU SPTE atomically and handle the
> + * __tdp_mmu_set_spte_atomic - Set a TDP MMU SPTE atomically and handle the
> * associated bookkeeping
> *
> * @kvm: kvm instance
> * @iter: a tdp_iter instance currently on the SPTE that should be set
> * @new_spte: The value the SPTE should be set to
> + * @record_dirty_log: Record the page as dirty in the dirty bitmap if
> + * appropriate for the change being made. Should be set
> + * unless performing certain dirty logging operations.
> + * Leaving record_dirty_log unset in that case prevents page
> + * writes from being double counted.
> * Returns: true if the SPTE was set, false if it was not. If false is returned,
> * this function will have no side-effects.
> */
> -static inline bool tdp_mmu_set_spte_atomic(struct kvm *kvm,
> - struct tdp_iter *iter,
> - u64 new_spte)
> +static inline bool __tdp_mmu_set_spte_atomic(struct kvm *kvm,
> + struct tdp_iter *iter, u64 new_spte, bool record_dirty_log)

Instead of adding the bool argument, just name this
tdp_mmu_set_spte_atomic_no_dirty_log...

> {
> u64 *root_pt = tdp_iter_root_pt(iter);
> struct kvm_mmu_page *root = sptep_to_sp(root_pt);
> @@ -583,12 +587,31 @@ static inline bool tdp_mmu_set_spte_atomic(struct kvm *kvm,
> new_spte) != iter->old_spte)
> return false;
>
> - handle_changed_spte(kvm, as_id, iter->gfn, iter->old_spte, new_spte,
> - iter->level, true);
> + __handle_changed_spte(kvm, as_id, iter->gfn, iter->old_spte, new_spte,
> + iter->level, true);
> + handle_changed_spte_acc_track(iter->old_spte, new_spte, iter->level);
> + if (record_dirty_log)
> + handle_changed_spte_dirty_log(kvm, as_id, iter->gfn,
> + iter->old_spte, new_spte,
> + iter->level);

... and tdp_mmu_set_spte_atomic becomes

if (!tdp_mmu_set_spte_atomic_no_dirty_log(kvm, iter, new_spte))
return false;

handle_changed_spte_dirty_log(kvm, as_id, iter->gfn,
iter->old_spte, new_spte,
iter->level);
return true;


> @@ -1301,7 +1344,8 @@ bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm, struct kvm_memory_slot *slot)
> int root_as_id;
> bool spte_set = false;
>
> - for_each_tdp_mmu_root_yield_safe(kvm, root, false) {
> + read_lock(&kvm->mmu_lock);
> + for_each_tdp_mmu_root_yield_safe(kvm, root, true) {
> root_as_id = kvm_mmu_page_as_id(root);
> if (root_as_id != slot->as_id)
> continue;
> @@ -1309,6 +1353,7 @@ bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm, struct kvm_memory_slot *slot)
> spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn,
> slot->base_gfn + slot->npages);
> }
> + read_unlock(&kvm->mmu_lock);

Same remark as before.

> return spte_set;
> }
> @@ -1397,7 +1442,8 @@ static bool set_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
> rcu_read_lock();
>
> tdp_root_for_each_pte(iter, root, start, end) {
> - if (tdp_mmu_iter_cond_resched(kvm, &iter, false, false))
> +retry:
> + if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
> continue;
>
> if (!is_shadow_present_pte(iter.old_spte) ||
> @@ -1406,7 +1452,14 @@ static bool set_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
>
> new_spte = iter.old_spte | shadow_dirty_mask;
>
> - tdp_mmu_set_spte(kvm, &iter, new_spte);
> + if (!tdp_mmu_set_spte_atomic(kvm, &iter, new_spte)) {
> + /*
> + * The iter must explicitly re-read the SPTE because
> + * the atomic cmpxchg failed.
> + */
> + iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
> + goto retry;
> + }
> spte_set = true;

Yep, looks like that spte_set assignment should not have been removed. :)

> }
>
> @@ -1417,15 +1470,15 @@ static bool set_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
> /*
> * Set the dirty status of all the SPTEs mapping GFNs in the memslot. This is
> * only used for PML, and so will involve setting the dirty bit on each SPTE.
> - * Returns true if an SPTE has been changed and the TLBs need to be flushed.
> */
> -bool kvm_tdp_mmu_slot_set_dirty(struct kvm *kvm, struct kvm_memory_slot *slot)
> +void kvm_tdp_mmu_slot_set_dirty(struct kvm *kvm, struct kvm_memory_slot *slot)
> {
> struct kvm_mmu_page *root;
> int root_as_id;
> bool spte_set = false;
>
> - for_each_tdp_mmu_root_yield_safe(kvm, root, false) {
> + read_lock(&kvm->mmu_lock);

And again here.

Paolo

> + for_each_tdp_mmu_root_yield_safe(kvm, root, true) {
> root_as_id = kvm_mmu_page_as_id(root);
> if (root_as_id != slot->as_id)
> continue;
> @@ -1433,7 +1486,11 @@ bool kvm_tdp_mmu_slot_set_dirty(struct kvm *kvm, struct kvm_memory_slot *slot)
> spte_set |= set_dirty_gfn_range(kvm, root, slot->base_gfn,
> slot->base_gfn + slot->npages);
> }
> - return spte_set;
> +
> + if (spte_set)
> + kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
> +
> + read_unlock(&kvm->mmu_lock);
> }
>
> /*
> diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h
> index 10ada884270b..848b41b20985 100644
> --- a/arch/x86/kvm/mmu/tdp_mmu.h
> +++ b/arch/x86/kvm/mmu/tdp_mmu.h
> @@ -38,7 +38,7 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
> struct kvm_memory_slot *slot,
> gfn_t gfn, unsigned long mask,
> bool wrprot);
> -bool kvm_tdp_mmu_slot_set_dirty(struct kvm *kvm, struct kvm_memory_slot *slot);
> +void kvm_tdp_mmu_slot_set_dirty(struct kvm *kvm, struct kvm_memory_slot *slot);
> void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
> const struct kvm_memory_slot *slot);
>
>

\
 
 \ /
  Last update: 2021-02-03 12:41    [W:0.516 / U:0.012 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site