lkml.org 
[lkml]   [2020]   [Jun]   [10]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
From
Date
SubjectRe: [PATCH 12/21] KVM: x86/mmu: Skip filling the gfn cache for guaranteed direct MMU topups
On Fri, Jun 5, 2020 at 2:39 PM Sean Christopherson
<sean.j.christopherson@intel.com> wrote:
>
> Don't bother filling the gfn array cache when the caller is a fully
> direct MMU, i.e. won't need a gfn array for shadow pages.
>
> Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Reviewed-by: Ben Gardon <bgardon@google.com>
> ---
> arch/x86/kvm/mmu/mmu.c | 18 ++++++++++--------
> arch/x86/kvm/mmu/paging_tmpl.h | 4 ++--
> 2 files changed, 12 insertions(+), 10 deletions(-)
>
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index a8f8eebf67df..8d66cf558f1b 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -1101,7 +1101,7 @@ static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
> }
> }
>
> -static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
> +static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
> {
> int r;
>
> @@ -1114,10 +1114,12 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
> PT64_ROOT_MAX_LEVEL);
> if (r)
> return r;
> - r = mmu_topup_memory_cache(&vcpu->arch.mmu_gfn_array_cache,
> - PT64_ROOT_MAX_LEVEL);
> - if (r)
> - return r;
> + if (maybe_indirect) {
> + r = mmu_topup_memory_cache(&vcpu->arch.mmu_gfn_array_cache,
> + PT64_ROOT_MAX_LEVEL);
> + if (r)
> + return r;
> + }
> return mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
> PT64_ROOT_MAX_LEVEL);
> }
> @@ -4107,7 +4109,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
> if (fast_page_fault(vcpu, gpa, error_code))
> return RET_PF_RETRY;
>
> - r = mmu_topup_memory_caches(vcpu);
> + r = mmu_topup_memory_caches(vcpu, false);
> if (r)
> return r;
>
> @@ -5147,7 +5149,7 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
> {
> int r;
>
> - r = mmu_topup_memory_caches(vcpu);
> + r = mmu_topup_memory_caches(vcpu, !vcpu->arch.mmu->direct_map);
> if (r)
> goto out;
> r = mmu_alloc_roots(vcpu);
> @@ -5341,7 +5343,7 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
> * or not since pte prefetch is skiped if it does not have
> * enough objects in the cache.
> */
> - mmu_topup_memory_caches(vcpu);
> + mmu_topup_memory_caches(vcpu, true);
>
> spin_lock(&vcpu->kvm->mmu_lock);
>
> diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
> index 3de32122f601..ac39710d0594 100644
> --- a/arch/x86/kvm/mmu/paging_tmpl.h
> +++ b/arch/x86/kvm/mmu/paging_tmpl.h
> @@ -818,7 +818,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code,
> return RET_PF_EMULATE;
> }
>
> - r = mmu_topup_memory_caches(vcpu);
> + r = mmu_topup_memory_caches(vcpu, true);
> if (r)
> return r;
>
> @@ -905,7 +905,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa)
> * No need to check return value here, rmap_can_add() can
> * help us to skip pte prefetch later.
> */
> - mmu_topup_memory_caches(vcpu);
> + mmu_topup_memory_caches(vcpu, true);
>
> if (!VALID_PAGE(root_hpa)) {
> WARN_ON(1);
> --
> 2.26.0
>

\
 
 \ /
  Last update: 2020-06-10 20:53    [W:0.332 / U:0.188 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site