lkml.org 
[lkml]   [2021]   [Apr]   [1]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
Subject[PATCH v2 02/13] KVM: x86/mmu: Move kvm_mmu_(get|put)_root to TDP MMU
From
The TDP MMU is almost the only user of kvm_mmu_get_root and
kvm_mmu_put_root. There is only one use of put_root in mmu.c for the
legacy / shadow MMU. Open code that one use and move the get / put
functions to the TDP MMU so they can be extended in future commits.

No functional change intended.

Signed-off-by: Ben Gardon <bgardon@google.com>
---
arch/x86/kvm/mmu/mmu.c | 10 ++++------
arch/x86/kvm/mmu/mmu_internal.h | 16 ----------------
arch/x86/kvm/mmu/tdp_mmu.c | 6 +++---
arch/x86/kvm/mmu/tdp_mmu.h | 18 ++++++++++++++++++
4 files changed, 25 insertions(+), 25 deletions(-)

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 617809529987..9c7ef7ca8bf6 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -3153,12 +3153,10 @@ static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,

sp = to_shadow_page(*root_hpa & PT64_BASE_ADDR_MASK);

- if (kvm_mmu_put_root(kvm, sp)) {
- if (is_tdp_mmu_page(sp))
- kvm_tdp_mmu_free_root(kvm, sp);
- else if (sp->role.invalid)
- kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
- }
+ if (is_tdp_mmu_page(sp) && kvm_tdp_mmu_put_root(kvm, sp))
+ kvm_tdp_mmu_free_root(kvm, sp);
+ else if (!--sp->root_count && sp->role.invalid)
+ kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);

*root_hpa = INVALID_PAGE;
}
diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h
index d44fe8a43a19..9347d73996b5 100644
--- a/arch/x86/kvm/mmu/mmu_internal.h
+++ b/arch/x86/kvm/mmu/mmu_internal.h
@@ -113,22 +113,6 @@ bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
u64 start_gfn, u64 pages);

-static inline void kvm_mmu_get_root(struct kvm *kvm, struct kvm_mmu_page *sp)
-{
- BUG_ON(!sp->root_count);
- lockdep_assert_held(&kvm->mmu_lock);
-
- ++sp->root_count;
-}
-
-static inline bool kvm_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *sp)
-{
- lockdep_assert_held(&kvm->mmu_lock);
- --sp->root_count;
-
- return !sp->root_count;
-}
-
static inline int kvm_mmu_role_as_id(union kvm_mmu_page_role role)
{
return role.smm ? 1 : 0;
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index d5210a212c59..6f612ac755a0 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -43,7 +43,7 @@ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)

static void tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root)
{
- if (kvm_mmu_put_root(kvm, root))
+ if (kvm_tdp_mmu_put_root(kvm, root))
kvm_tdp_mmu_free_root(kvm, root);
}

@@ -55,7 +55,7 @@ static inline bool tdp_mmu_next_root_valid(struct kvm *kvm,
if (list_entry_is_head(root, &kvm->arch.tdp_mmu_roots, link))
return false;

- kvm_mmu_get_root(kvm, root);
+ kvm_tdp_mmu_get_root(kvm, root);
return true;

}
@@ -154,7 +154,7 @@ hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu)
/* Check for an existing root before allocating a new one. */
for_each_tdp_mmu_root(kvm, root, kvm_mmu_role_as_id(role)) {
if (root->role.word == role.word) {
- kvm_mmu_get_root(kvm, root);
+ kvm_tdp_mmu_get_root(kvm, root);
goto out;
}
}
diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h
index d7007480b3d2..c9a081c786a5 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.h
+++ b/arch/x86/kvm/mmu/tdp_mmu.h
@@ -8,6 +8,24 @@
hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu);
void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root);

+static inline void kvm_tdp_mmu_get_root(struct kvm *kvm,
+ struct kvm_mmu_page *root)
+{
+ BUG_ON(!root->root_count);
+ lockdep_assert_held(&kvm->mmu_lock);
+
+ ++root->root_count;
+}
+
+static inline bool kvm_tdp_mmu_put_root(struct kvm *kvm,
+ struct kvm_mmu_page *root)
+{
+ lockdep_assert_held(&kvm->mmu_lock);
+ --root->root_count;
+
+ return !root->root_count;
+}
+
bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start,
gfn_t end, bool can_yield, bool flush);
static inline bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id,
--
2.31.0.208.g409f899ff0-goog
\
 
 \ /
  Last update: 2021-04-02 01:38    [W:0.206 / U:0.780 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site