lkml.org 
[lkml]   [2024]   [May]   [8]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH] KVM: x86/mmu: Only allocate shadowed translation cache for sp->role.level <= KVM_MAX_HUGEPAGE_LEVEL
Date
Only the indirect SP with sp->role.level <= KVM_MAX_HUGEPAGE_LEVEL might
have leaf gptes, so allocation of shadowed translation cache is needed
only for it. Additionally, use sp->shadowed_translation to determine
whether to use the information in shadowed translation cache or not.

Suggested-by: Lai Jiangshan <jiangshan.ljs@antgroup.com>
Signed-off-by: Hou Wenlong <houwenlong.hwl@antgroup.com>
---
arch/x86/kvm/mmu/mmu.c | 27 +++++++++++++++++++--------
1 file changed, 19 insertions(+), 8 deletions(-)

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index fc3b59b59ee1..8be987d0f05e 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -716,12 +716,12 @@ static bool sp_has_gptes(struct kvm_mmu_page *sp);

static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index)
{
+ if (sp->shadowed_translation)
+ return sp->shadowed_translation[index] >> PAGE_SHIFT;
+
if (sp->role.passthrough)
return sp->gfn;

- if (!sp->role.direct)
- return sp->shadowed_translation[index] >> PAGE_SHIFT;
-
return sp->gfn + (index << ((sp->role.level - 1) * SPTE_LEVEL_BITS));
}

@@ -733,7 +733,7 @@ static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index)
*/
static u32 kvm_mmu_page_get_access(struct kvm_mmu_page *sp, int index)
{
- if (sp_has_gptes(sp))
+ if (sp->shadowed_translation)
return sp->shadowed_translation[index] & ACC_ALL;

/*
@@ -754,7 +754,7 @@ static u32 kvm_mmu_page_get_access(struct kvm_mmu_page *sp, int index)
static void kvm_mmu_page_set_translation(struct kvm_mmu_page *sp, int index,
gfn_t gfn, unsigned int access)
{
- if (sp_has_gptes(sp)) {
+ if (sp->shadowed_translation) {
sp->shadowed_translation[index] = (gfn << PAGE_SHIFT) | access;
return;
}
@@ -1697,7 +1697,7 @@ static void kvm_mmu_free_shadow_page(struct kvm_mmu_page *sp)
hlist_del(&sp->hash_link);
list_del(&sp->link);
free_page((unsigned long)sp->spt);
- if (!sp->role.direct)
+ if (sp->shadowed_translation)
free_page((unsigned long)sp->shadowed_translation);
kmem_cache_free(mmu_page_header_cache, sp);
}
@@ -1850,6 +1850,17 @@ static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
static void kvm_mmu_commit_zap_page(struct kvm *kvm,
struct list_head *invalid_list);

+static bool sp_might_have_leaf_gptes(struct kvm_mmu_page *sp)
+{
+ if (sp->role.direct)
+ return false;
+
+ if (sp->role.level > KVM_MAX_HUGEPAGE_LEVEL)
+ return false;
+
+ return true;
+}
+
static bool sp_has_gptes(struct kvm_mmu_page *sp)
{
if (sp->role.direct)
@@ -2199,7 +2210,8 @@ static struct kvm_mmu_page *kvm_mmu_alloc_shadow_page(struct kvm *kvm,

sp = kvm_mmu_memory_cache_alloc(caches->page_header_cache);
sp->spt = kvm_mmu_memory_cache_alloc(caches->shadow_page_cache);
- if (!role.direct)
+ sp->role = role;
+ if (sp_might_have_leaf_gptes(sp))
sp->shadowed_translation = kvm_mmu_memory_cache_alloc(caches->shadowed_info_cache);

set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
@@ -2216,7 +2228,6 @@ static struct kvm_mmu_page *kvm_mmu_alloc_shadow_page(struct kvm *kvm,
kvm_account_mmu_page(kvm, sp);

sp->gfn = gfn;
- sp->role = role;
hlist_add_head(&sp->hash_link, sp_list);
if (sp_has_gptes(sp))
account_shadowed(kvm, sp);
--
2.31.1

\
 
 \ /
  Last update: 2024-05-08 05:19    [W:0.018 / U:0.508 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site