lkml.org 
[lkml]   [2022]   [Jul]   [11]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
Subject[PATCH 2/5] KVM: x86/mmu: Rename rmap zap helpers to better show relationships
From
Rename the helpers that zap rmaps to use consistent naming and better
show the relationships between the various helpers. E.g. rename
pte_list_remove() to kvm_zap_one_rmap(), use "zap" universally instead of
a mix of "zap" and "unmap", etc...

No functional change intended.

Signed-off-by: Sean Christopherson <seanjc@google.com>
---
arch/x86/kvm/mmu/mmu.c | 37 ++++++++++++++++++-------------------
1 file changed, 18 insertions(+), 19 deletions(-)

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 2605d6ebc193..32f9427f3334 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -957,15 +957,15 @@ static void __pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head)
}
}

-static void pte_list_remove(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
- u64 *sptep)
+static void kvm_zap_one_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
+ u64 *sptep)
{
mmu_spte_clear_track_bits(kvm, sptep);
__pte_list_remove(sptep, rmap_head);
}

-/* Return true if rmap existed, false otherwise */
-static bool pte_list_destroy(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
+/* Return true if at least one rmap was zapped, false otherwise */
+static bool ____kvm_zap_rmaps(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
{
struct pte_list_desc *desc, *next;
int i;
@@ -1383,17 +1383,17 @@ static bool kvm_vcpu_write_protect_gfn(struct kvm_vcpu *vcpu, u64 gfn)
return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn, PG_LEVEL_4K);
}

-static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
- const struct kvm_memory_slot *slot)
+static bool __kvm_zap_rmaps(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
+ const struct kvm_memory_slot *slot)
{
- return pte_list_destroy(kvm, rmap_head);
+ return ____kvm_zap_rmaps(kvm, rmap_head);
}

-static bool kvm_unmap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
- struct kvm_memory_slot *slot, gfn_t gfn, int level,
- pte_t unused)
+static bool kvm_zap_rmaps(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
+ struct kvm_memory_slot *slot, gfn_t gfn, int level,
+ pte_t unused)
{
- return kvm_zap_rmapp(kvm, rmap_head, slot);
+ return __kvm_zap_rmaps(kvm, rmap_head, slot);
}

static bool kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
@@ -1417,7 +1417,7 @@ static bool kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
need_flush = true;

if (pte_write(pte)) {
- pte_list_remove(kvm, rmap_head, sptep);
+ kvm_zap_one_rmap(kvm, rmap_head, sptep);
goto restart;
} else {
new_spte = kvm_mmu_changed_pte_notifier_make_spte(
@@ -1529,7 +1529,7 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
bool flush = false;

if (kvm_memslots_have_rmaps(kvm))
- flush = kvm_handle_gfn_range(kvm, range, kvm_unmap_rmapp);
+ flush = kvm_handle_gfn_range(kvm, range, kvm_zap_rmaps);

if (is_tdp_mmu_enabled(kvm))
flush = kvm_tdp_mmu_unmap_gfn_range(kvm, range, flush);
@@ -1596,7 +1596,7 @@ static void __rmap_add(struct kvm *kvm,
rmap_count = pte_list_add(cache, spte, rmap_head);

if (rmap_count > RMAP_RECYCLE_THRESHOLD) {
- kvm_unmap_rmapp(kvm, rmap_head, NULL, gfn, sp->role.level, __pte(0));
+ kvm_zap_rmaps(kvm, rmap_head, NULL, gfn, sp->role.level, __pte(0));
kvm_flush_remote_tlbs_with_address(
kvm, sp->gfn, KVM_PAGES_PER_HPAGE(sp->role.level));
}
@@ -5977,7 +5977,7 @@ void kvm_mmu_uninit_vm(struct kvm *kvm)
mmu_free_vm_memory_caches(kvm);
}

-static bool __kvm_zap_rmaps(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
+static bool __kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
{
const struct kvm_memory_slot *memslot;
struct kvm_memslots *slots;
@@ -5999,8 +5999,7 @@ static bool __kvm_zap_rmaps(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
if (WARN_ON_ONCE(start >= end))
continue;

- flush = slot_handle_level_range(kvm, memslot, kvm_zap_rmapp,
-
+ flush = slot_handle_level_range(kvm, memslot, __kvm_zap_rmaps,
PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL,
start, end - 1, true, flush);
}
@@ -6025,7 +6024,7 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)

kvm_inc_notifier_count(kvm, gfn_start, gfn_end);

- flush = __kvm_zap_rmaps(kvm, gfn_start, gfn_end);
+ flush = __kvm_zap_gfn_range(kvm, gfn_start, gfn_end);

if (is_tdp_mmu_enabled(kvm)) {
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
@@ -6401,7 +6400,7 @@ static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
if (sp->role.direct &&
sp->role.level < kvm_mmu_max_mapping_level(kvm, slot, sp->gfn,
pfn, PG_LEVEL_NUM)) {
- pte_list_remove(kvm, rmap_head, sptep);
+ kvm_zap_one_rmap(kvm, rmap_head, sptep);

if (kvm_available_flush_tlb_with_range())
kvm_flush_remote_tlbs_with_address(kvm, sp->gfn,
--
2.37.0.144.g8ac04bfd2-goog
\
 
 \ /
  Last update: 2022-07-12 03:57    [W:0.119 / U:0.416 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site