lkml.org 
[lkml]   [2022]   [Dec]   [21]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
Subject[RFC 11/14] KVM: x86/MMU: Factor Shadow MMU wrprot / clear dirty ops out of mmu.c
From
There are several functions in mmu.c which bifrucate to the Shadow
and/or TDP MMU implementations. In most of these, the Shadow MMU
implementation is open-coded. Wrap these instances in a nice function
which just needs kvm and slot arguments or similar. This matches the TDP
MMU interface and will allow for some nice cleanups in a following
commit.

No functional change intended.

Signed-off-by: Ben Gardon <bgardon@google.com>
---
arch/x86/kvm/mmu/mmu.c | 52 ++++++----------------------
arch/x86/kvm/mmu/shadow_mmu.c | 64 +++++++++++++++++++++++++++++++++++
arch/x86/kvm/mmu/shadow_mmu.h | 15 ++++++++
3 files changed, 90 insertions(+), 41 deletions(-)

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 160dd143a814..ce2a6dd38c67 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -417,23 +417,13 @@ static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t gfn_offset, unsigned long mask)
{
- struct kvm_rmap_head *rmap_head;
-
if (is_tdp_mmu_enabled(kvm))
kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
slot->base_gfn + gfn_offset, mask, true);

- if (!kvm_memslots_have_rmaps(kvm))
- return;
-
- while (mask) {
- rmap_head = gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
- PG_LEVEL_4K, slot);
- rmap_write_protect(rmap_head, false);
-
- /* clear the first set bit */
- mask &= mask - 1;
- }
+ if (kvm_memslots_have_rmaps(kvm))
+ kvm_shadow_mmu_write_protect_pt_masked(kvm, slot, gfn_offset,
+ mask);
}

/**
@@ -450,23 +440,13 @@ static void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t gfn_offset, unsigned long mask)
{
- struct kvm_rmap_head *rmap_head;
-
if (is_tdp_mmu_enabled(kvm))
kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
slot->base_gfn + gfn_offset, mask, false);

- if (!kvm_memslots_have_rmaps(kvm))
- return;
-
- while (mask) {
- rmap_head = gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
- PG_LEVEL_4K, slot);
- __rmap_clear_dirty(kvm, rmap_head, slot);
-
- /* clear the first set bit */
- mask &= mask - 1;
- }
+ if (kvm_memslots_have_rmaps(kvm))
+ kvm_shadow_mmu_clear_dirty_pt_masked(kvm, slot, gfn_offset,
+ mask);
}

/**
@@ -524,16 +504,11 @@ bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
struct kvm_memory_slot *slot, u64 gfn,
int min_level)
{
- struct kvm_rmap_head *rmap_head;
- int i;
bool write_protected = false;

- if (kvm_memslots_have_rmaps(kvm)) {
- for (i = min_level; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
- rmap_head = gfn_to_rmap(gfn, i, slot);
- write_protected |= rmap_write_protect(rmap_head, true);
- }
- }
+ if (kvm_memslots_have_rmaps(kvm))
+ write_protected |=
+ kvm_shadow_mmu_write_protect_gfn(kvm, slot, gfn, min_level);

if (is_tdp_mmu_enabled(kvm))
write_protected |=
@@ -2917,8 +2892,7 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
{
if (kvm_memslots_have_rmaps(kvm)) {
write_lock(&kvm->mmu_lock);
- slot_handle_level(kvm, memslot, slot_rmap_write_protect,
- start_level, KVM_MAX_HUGEPAGE_LEVEL, false);
+ kvm_shadow_mmu_wrprot_slot(kvm, memslot, start_level);
write_unlock(&kvm->mmu_lock);
}

@@ -3069,11 +3043,7 @@ void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
{
if (kvm_memslots_have_rmaps(kvm)) {
write_lock(&kvm->mmu_lock);
- /*
- * Clear dirty bits only on 4k SPTEs since the legacy MMU only
- * support dirty logging at a 4k granularity.
- */
- slot_handle_level_4k(kvm, memslot, __rmap_clear_dirty, false);
+ kvm_shadow_mmu_clear_dirty_slot(kvm, memslot);
write_unlock(&kvm->mmu_lock);
}

diff --git a/arch/x86/kvm/mmu/shadow_mmu.c b/arch/x86/kvm/mmu/shadow_mmu.c
index 2d1a4026cf00..80b8c78daaeb 100644
--- a/arch/x86/kvm/mmu/shadow_mmu.c
+++ b/arch/x86/kvm/mmu/shadow_mmu.c
@@ -3440,3 +3440,67 @@ unsigned long kvm_shadow_mmu_shrink_scan(struct kvm *kvm, int pages_to_free)

return freed;
}
+
+void kvm_shadow_mmu_write_protect_pt_masked(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
+ gfn_t gfn_offset, unsigned long mask)
+{
+ struct kvm_rmap_head *rmap_head;
+
+ while (mask) {
+ rmap_head = gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
+ PG_LEVEL_4K, slot);
+ rmap_write_protect(rmap_head, false);
+
+ /* clear the first set bit */
+ mask &= mask - 1;
+ }
+}
+
+void kvm_shadow_mmu_clear_dirty_pt_masked(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
+ gfn_t gfn_offset, unsigned long mask)
+{
+ struct kvm_rmap_head *rmap_head;
+
+ while (mask) {
+ rmap_head = gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
+ PG_LEVEL_4K, slot);
+ __rmap_clear_dirty(kvm, rmap_head, slot);
+
+ /* clear the first set bit */
+ mask &= mask - 1;
+ }
+}
+
+bool kvm_shadow_mmu_write_protect_gfn(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
+ u64 gfn, int min_level)
+{
+ struct kvm_rmap_head *rmap_head;
+ int i;
+ bool write_protected = false;
+
+ if (kvm_memslots_have_rmaps(kvm)) {
+ for (i = min_level; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
+ rmap_head = gfn_to_rmap(gfn, i, slot);
+ write_protected |= rmap_write_protect(rmap_head, true);
+ }
+ }
+
+ return write_protected;
+}
+
+void kvm_shadow_mmu_clear_dirty_slot(struct kvm *kvm,
+ const struct kvm_memory_slot *memslot)
+{
+ slot_handle_level_4k(kvm, memslot, __rmap_clear_dirty, false);
+}
+
+void kvm_shadow_mmu_wrprot_slot(struct kvm *kvm,
+ const struct kvm_memory_slot *memslot,
+ int start_level)
+{
+ slot_handle_level(kvm, memslot, slot_rmap_write_protect,
+ start_level, KVM_MAX_HUGEPAGE_LEVEL, false);
+}
diff --git a/arch/x86/kvm/mmu/shadow_mmu.h b/arch/x86/kvm/mmu/shadow_mmu.h
index af201d34d0b2..c322eeaa0688 100644
--- a/arch/x86/kvm/mmu/shadow_mmu.h
+++ b/arch/x86/kvm/mmu/shadow_mmu.h
@@ -104,6 +104,21 @@ void kvm_shadow_mmu_zap_collapsible_sptes(struct kvm *kvm,
bool kvm_shadow_mmu_has_zapped_obsolete_pages(struct kvm *kvm);
unsigned long kvm_shadow_mmu_shrink_scan(struct kvm *kvm, int pages_to_free);

+void kvm_shadow_mmu_write_protect_pt_masked(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
+ gfn_t gfn_offset, unsigned long mask);
+void kvm_shadow_mmu_clear_dirty_pt_masked(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
+ gfn_t gfn_offset, unsigned long mask);
+bool kvm_shadow_mmu_write_protect_gfn(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
+ u64 gfn, int min_level);
+void kvm_shadow_mmu_clear_dirty_slot(struct kvm *kvm,
+ const struct kvm_memory_slot *memslot);
+void kvm_shadow_mmu_wrprot_slot(struct kvm *kvm,
+ const struct kvm_memory_slot *memslot,
+ int start_level);
+
/* Exports from paging_tmpl.h */
gpa_t paging32_gva_to_gpa(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
gpa_t vaddr, u64 access,
--
2.39.0.314.g84b9a713c41-goog
\
 
 \ /
  Last update: 2023-03-26 23:17    [W:0.199 / U:0.460 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site