lkml.org 
[lkml]   [2022]   [Feb]   [24]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
SubjectRe: [PATCH v2 14/18] KVM: x86/mmu: avoid indirect call for get_cr3
From
Date
On Thu, 2022-02-17 at 16:03 -0500, Paolo Bonzini wrote:
> Most of the time, calls to get_guest_pgd result in calling
> kvm_read_cr3 (the exception is only nested TDP). Hardcode
> the default instead of using the get_cr3 function, avoiding
> a retpoline if they are enabled.
>
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> ---
> arch/x86/kvm/mmu.h | 13 +++++++++++++
> arch/x86/kvm/mmu/mmu.c | 15 +++++----------
> arch/x86/kvm/mmu/paging_tmpl.h | 2 +-
> arch/x86/kvm/x86.c | 2 +-
> 4 files changed, 20 insertions(+), 12 deletions(-)
>
> diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
> index 1d0c1904d69a..1808d6814ddb 100644
> --- a/arch/x86/kvm/mmu.h
> +++ b/arch/x86/kvm/mmu.h
> @@ -116,6 +116,19 @@ static inline void kvm_mmu_load_pgd(struct kvm_vcpu *vcpu)
> vcpu->arch.mmu->shadow_root_level);
> }
>
> +static inline gpa_t __kvm_mmu_get_guest_pgd(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
> +{
> + if (!mmu->get_guest_pgd)
> + return kvm_read_cr3(vcpu);
> + else
> + return mmu->get_guest_pgd(vcpu);
> +}
> +
> +static inline gpa_t kvm_mmu_get_guest_pgd(struct kvm_vcpu *vcpu)
> +{
> + return __kvm_mmu_get_guest_pgd(vcpu, vcpu->arch.mmu);
> +}
> +
> struct kvm_page_fault {
> /* arguments to kvm_mmu_do_page_fault. */
> const gpa_t addr;
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index 4e8e3e9530ca..d422d0d2adf8 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -3451,7 +3451,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
> unsigned i;
> int r;
>
> - root_pgd = mmu->get_guest_pgd(vcpu);
> + root_pgd = kvm_mmu_get_guest_pgd(vcpu);
> root_gfn = root_pgd >> PAGE_SHIFT;
>
> if (mmu_check_root(vcpu, root_gfn))
> @@ -3881,7 +3881,7 @@ static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
> arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
> arch.gfn = gfn;
> arch.direct_map = vcpu->arch.mmu->direct_map;
> - arch.cr3 = vcpu->arch.mmu->get_guest_pgd(vcpu);
> + arch.cr3 = kvm_mmu_get_guest_pgd(vcpu);
>
> return kvm_setup_async_pf(vcpu, cr2_or_gpa,
> kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
> @@ -4230,11 +4230,6 @@ void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd)
> }
> EXPORT_SYMBOL_GPL(kvm_mmu_new_pgd);
>
> -static unsigned long get_cr3(struct kvm_vcpu *vcpu)
> -{
> - return kvm_read_cr3(vcpu);
> -}
> -
> static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
> unsigned int access)
> {
> @@ -4789,7 +4784,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
> context->invlpg = NULL;
> context->shadow_root_level = kvm_mmu_get_tdp_level(vcpu);
> context->direct_map = true;
> - context->get_guest_pgd = get_cr3;
> + context->get_guest_pgd = NULL; /* use kvm_read_cr3 */
> context->get_pdptr = kvm_pdptr_read;
> context->inject_page_fault = kvm_inject_page_fault;
> context->root_level = role_regs_to_root_level(&regs);
> @@ -4964,7 +4959,7 @@ static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
>
> kvm_init_shadow_mmu(vcpu, &regs);
>
> - context->get_guest_pgd = get_cr3;
> + context->get_guest_pgd = NULL; /* use kvm_read_cr3 */
> context->get_pdptr = kvm_pdptr_read;
> context->inject_page_fault = kvm_inject_page_fault;
> }
> @@ -4996,7 +4991,7 @@ static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
> return;
>
> g_context->mmu_role.as_u64 = new_role.as_u64;
> - g_context->get_guest_pgd = get_cr3;
> + g_context->get_guest_pgd = NULL; /* use kvm_read_cr3 */
> g_context->get_pdptr = kvm_pdptr_read;
> g_context->inject_page_fault = kvm_inject_page_fault;
> g_context->root_level = new_role.base.level;
> diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
> index 346f3bad3cb9..1a85aba837b2 100644
> --- a/arch/x86/kvm/mmu/paging_tmpl.h
> +++ b/arch/x86/kvm/mmu/paging_tmpl.h
> @@ -362,7 +362,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
> trace_kvm_mmu_pagetable_walk(addr, access);
> retry_walk:
> walker->level = mmu->root_level;
> - pte = mmu->get_guest_pgd(vcpu);
> + pte = __kvm_mmu_get_guest_pgd(vcpu, mmu);
> have_ad = PT_HAVE_ACCESSED_DIRTY(mmu);
>
> #if PTTYPE == 64
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index f10878aa5b20..adcee7c305ca 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -12161,7 +12161,7 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
> return;
>
> if (!vcpu->arch.mmu->direct_map &&
> - work->arch.cr3 != vcpu->arch.mmu->get_guest_pgd(vcpu))
> + work->arch.cr3 != kvm_mmu_get_guest_pgd(vcpu))
> return;
>
> kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, 0, true);


Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>

Not sure though if that is worth it though. IMHO it would be better to convert mmu callbacks
(and nested ops callbacks, etc) to static calls.

Best regards,
Maxim Levitsky



\
 
 \ /
  Last update: 2022-02-24 12:05    [W:0.282 / U:0.216 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site