lkml.org 
[lkml]   [2022]   [Jun]   [22]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH v7 12/23] KVM: x86/mmu: Pass kvm pointer separately from vcpu to kvm_mmu_find_shadow_page()
Date
From: David Matlack <dmatlack@google.com>

Get the kvm pointer from the caller, rather than deriving it from
vcpu->kvm, and plumb the kvm pointer all the way from
kvm_mmu_get_shadow_page(). With this change in place, the vcpu pointer
is only needed to sync indirect shadow pages. In other words,
__kvm_mmu_get_shadow_page() can now be used to get *direct* shadow pages
without a vcpu pointer. This enables eager page splitting, which needs
to allocate direct shadow pages during VM ioctls.

No functional change intended.

Signed-off-by: David Matlack <dmatlack@google.com>
Message-Id: <20220516232138.1783324-13-dmatlack@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
arch/x86/kvm/mmu/mmu.c | 28 +++++++++++++++-------------
1 file changed, 15 insertions(+), 13 deletions(-)

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index c5a88e8d1b53..88b3f3c2c8b1 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -1975,7 +1975,8 @@ static void clear_sp_write_flooding_count(u64 *spte)
__clear_sp_write_flooding_count(sptep_to_sp(spte));
}

-static struct kvm_mmu_page *kvm_mmu_find_shadow_page(struct kvm_vcpu *vcpu,
+static struct kvm_mmu_page *kvm_mmu_find_shadow_page(struct kvm *kvm,
+ struct kvm_vcpu *vcpu,
gfn_t gfn,
struct hlist_head *sp_list,
union kvm_mmu_page_role role)
@@ -1985,7 +1986,7 @@ static struct kvm_mmu_page *kvm_mmu_find_shadow_page(struct kvm_vcpu *vcpu,
int collisions = 0;
LIST_HEAD(invalid_list);

- for_each_valid_sp(vcpu->kvm, sp, sp_list) {
+ for_each_valid_sp(kvm, sp, sp_list) {
if (sp->gfn != gfn) {
collisions++;
continue;
@@ -2002,7 +2003,7 @@ static struct kvm_mmu_page *kvm_mmu_find_shadow_page(struct kvm_vcpu *vcpu,
* upper-level page will be write-protected.
*/
if (role.level > PG_LEVEL_4K && sp->unsync)
- kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
+ kvm_mmu_prepare_zap_page(kvm, sp,
&invalid_list);
continue;
}
@@ -2030,7 +2031,7 @@ static struct kvm_mmu_page *kvm_mmu_find_shadow_page(struct kvm_vcpu *vcpu,

WARN_ON(!list_empty(&invalid_list));
if (ret > 0)
- kvm_flush_remote_tlbs(vcpu->kvm);
+ kvm_flush_remote_tlbs(kvm);
}

__clear_sp_write_flooding_count(sp);
@@ -2039,13 +2040,13 @@ static struct kvm_mmu_page *kvm_mmu_find_shadow_page(struct kvm_vcpu *vcpu,
}

sp = NULL;
- ++vcpu->kvm->stat.mmu_cache_miss;
+ ++kvm->stat.mmu_cache_miss;

out:
- kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
+ kvm_mmu_commit_zap_page(kvm, &invalid_list);

- if (collisions > vcpu->kvm->stat.max_mmu_page_hash_collisions)
- vcpu->kvm->stat.max_mmu_page_hash_collisions = collisions;
+ if (collisions > kvm->stat.max_mmu_page_hash_collisions)
+ kvm->stat.max_mmu_page_hash_collisions = collisions;
return sp;
}

@@ -2089,7 +2090,8 @@ static struct kvm_mmu_page *kvm_mmu_alloc_shadow_page(struct kvm *kvm,
return sp;
}

-static struct kvm_mmu_page *__kvm_mmu_get_shadow_page(struct kvm_vcpu *vcpu,
+static struct kvm_mmu_page *__kvm_mmu_get_shadow_page(struct kvm *kvm,
+ struct kvm_vcpu *vcpu,
struct shadow_page_caches *caches,
gfn_t gfn,
union kvm_mmu_page_role role)
@@ -2098,12 +2100,12 @@ static struct kvm_mmu_page *__kvm_mmu_get_shadow_page(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp;
bool created = false;

- sp_list = &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)];
+ sp_list = &kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)];

- sp = kvm_mmu_find_shadow_page(vcpu, gfn, sp_list, role);
+ sp = kvm_mmu_find_shadow_page(kvm, vcpu, gfn, sp_list, role);
if (!sp) {
created = true;
- sp = kvm_mmu_alloc_shadow_page(vcpu->kvm, caches, gfn, sp_list, role);
+ sp = kvm_mmu_alloc_shadow_page(kvm, caches, gfn, sp_list, role);
}

trace_kvm_mmu_get_page(sp, created);
@@ -2120,7 +2122,7 @@ static struct kvm_mmu_page *kvm_mmu_get_shadow_page(struct kvm_vcpu *vcpu,
.gfn_array_cache = &vcpu->arch.mmu_gfn_array_cache,
};

- return __kvm_mmu_get_shadow_page(vcpu, &caches, gfn, role);
+ return __kvm_mmu_get_shadow_page(vcpu->kvm, vcpu, &caches, gfn, role);
}

static union kvm_mmu_page_role kvm_mmu_child_role(u64 *sptep, bool direct, unsigned int access)
--
2.31.1

\
 
 \ /
  Last update: 2022-06-22 21:30    [W:0.531 / U:0.056 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site