lkml.org 
[lkml]   [2020]   [Jun]   [11]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
Date
From
SubjectRe: [PATCH 18/21] KVM: arm64: Use common KVM implementation of MMU memory caches
On 2020-06-05 22:38, Sean Christopherson wrote:
> Move to the common MMU memory cache implementation now that the common
> code and arm64's existing code are semantically compatible.
>
> No functional change intended.
>
> Suggested-by: Christoffer Dall <christoffer.dall@arm.com>
> Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
> ---
> arch/arm64/include/asm/kvm_host.h | 12 -------
> arch/arm64/include/asm/kvm_types.h | 2 ++
> arch/arm64/kvm/mmu.c | 51 ++++++------------------------
> 3 files changed, 12 insertions(+), 53 deletions(-)
>
> diff --git a/arch/arm64/include/asm/kvm_host.h
> b/arch/arm64/include/asm/kvm_host.h
> index 2385dede96e0..d221b6b129fd 100644
> --- a/arch/arm64/include/asm/kvm_host.h
> +++ b/arch/arm64/include/asm/kvm_host.h
> @@ -97,18 +97,6 @@ struct kvm_arch {
> bool return_nisv_io_abort_to_user;
> };
>
> -#define KVM_NR_MEM_OBJS 40
> -
> -/*
> - * We don't want allocation failures within the mmu code, so we
> preallocate
> - * enough memory for a single page fault in a cache.
> - */
> -struct kvm_mmu_memory_cache {
> - int nobjs;
> - gfp_t gfp_zero;
> - void *objects[KVM_NR_MEM_OBJS];
> -};
> -
> struct kvm_vcpu_fault_info {
> u32 esr_el2; /* Hyp Syndrom Register */
> u64 far_el2; /* Hyp Fault Address Register */
> diff --git a/arch/arm64/include/asm/kvm_types.h
> b/arch/arm64/include/asm/kvm_types.h
> index d0987007d581..9a126b9e2d7c 100644
> --- a/arch/arm64/include/asm/kvm_types.h
> +++ b/arch/arm64/include/asm/kvm_types.h
> @@ -2,5 +2,7 @@
> #ifndef _ASM_ARM64_KVM_TYPES_H
> #define _ASM_ARM64_KVM_TYPES_H
>
> +#define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE 40
> +
> #endif /* _ASM_ARM64_KVM_TYPES_H */
>
> diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
> index 688213ef34f0..976405e2fbb2 100644
> --- a/arch/arm64/kvm/mmu.c
> +++ b/arch/arm64/kvm/mmu.c
> @@ -124,37 +124,6 @@ static void stage2_dissolve_pud(struct kvm *kvm,
> phys_addr_t addr, pud_t *pudp)
> put_page(virt_to_page(pudp));
> }
>
> -static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
> int min)
> -{
> - void *page;
> -
> - if (cache->nobjs >= min)
> - return 0;
> - while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
> - page = (void *)__get_free_page(GFP_KERNEL_ACCOUNT |
> - cache->gfp_zero);
> - if (!page)
> - return -ENOMEM;
> - cache->objects[cache->nobjs++] = page;
> - }
> - return 0;
> -}
> -
> -static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
> -{
> - while (mc->nobjs)
> - free_page((unsigned long)mc->objects[--mc->nobjs]);
> -}
> -
> -static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
> -{
> - void *p;
> -
> - BUG_ON(!mc || !mc->nobjs);
> - p = mc->objects[--mc->nobjs];
> - return p;
> -}
> -
> static void clear_stage2_pgd_entry(struct kvm *kvm, pgd_t *pgd,
> phys_addr_t addr)
> {
> pud_t *pud_table __maybe_unused = stage2_pud_offset(kvm, pgd, 0UL);
> @@ -1024,7 +993,7 @@ static pud_t *stage2_get_pud(struct kvm *kvm,
> struct kvm_mmu_memory_cache *cache
> if (stage2_pgd_none(kvm, *pgd)) {
> if (!cache)
> return NULL;
> - pud = mmu_memory_cache_alloc(cache);
> + pud = kvm_mmu_memory_cache_alloc(cache);
> stage2_pgd_populate(kvm, pgd, pud);
> get_page(virt_to_page(pgd));
> }

Quick note: this patch (as it is) breaks on arm64 due to Mike Rapoport's
P4D rework. I've fixed it locally in order to test the series.

Thanks,

M.
--
Jazz is not dead. It just smells funny...

\
 
 \ /
  Last update: 2020-06-11 10:02    [W:0.366 / U:0.460 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site