lkml.org 
[lkml]   [2022]   [Apr]   [5]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    /
    Date
    SubjectRe: [RFC PATCH v5 046/104] KVM: x86/tdp_mmu: refactor kvm_tdp_mmu_map()
    From
    On 3/4/22 20:49, isaku.yamahata@intel.com wrote:
    > From: Isaku Yamahata <isaku.yamahata@intel.com>
    >
    > Factor out non-leaf SPTE population logic from kvm_tdp_mmu_map(). MapGPA
    > hypercall needs to populate non-leaf SPTE to record which GPA, private or
    > shared, is allowed in the leaf EPT entry.
    >
    > Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com>

    Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>

    and feel free to rebase/resubmit this one, with subject "KVM:
    x86/tdp_mmu: extract tdp_mmu_populate_nonleaf()".

    Paolo

    > ---
    > arch/x86/kvm/mmu/tdp_mmu.c | 48 ++++++++++++++++++++++++--------------
    > 1 file changed, 30 insertions(+), 18 deletions(-)
    >
    > diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
    > index b6ec2f112c26..8db262440d5c 100644
    > --- a/arch/x86/kvm/mmu/tdp_mmu.c
    > +++ b/arch/x86/kvm/mmu/tdp_mmu.c
    > @@ -955,6 +955,31 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu,
    > return ret;
    > }
    >
    > +static bool tdp_mmu_populate_nonleaf(
    > + struct kvm_vcpu *vcpu, struct tdp_iter *iter, bool account_nx)
    > +{
    > + struct kvm_mmu_page *sp;
    > + u64 *child_pt;
    > + u64 new_spte;
    > +
    > + WARN_ON(is_shadow_present_pte(iter->old_spte));
    > + WARN_ON(is_removed_spte(iter->old_spte));
    > +
    > + sp = alloc_tdp_mmu_page(vcpu, iter->gfn, iter->level - 1);
    > + child_pt = sp->spt;
    > +
    > + new_spte = make_nonleaf_spte(child_pt, !shadow_accessed_mask);
    > +
    > + if (!tdp_mmu_set_spte_atomic(vcpu->kvm, iter, new_spte)) {
    > + tdp_mmu_free_sp(sp);
    > + return false;
    > + }
    > +
    > + tdp_mmu_link_page(vcpu->kvm, sp, account_nx);
    > + trace_kvm_mmu_get_page(sp, true);
    > + return true;
    > +}
    > +
    > /*
    > * Handle a TDP page fault (NPT/EPT violation/misconfiguration) by installing
    > * page tables and SPTEs to translate the faulting guest physical address.
    > @@ -963,9 +988,6 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
    > {
    > struct kvm_mmu *mmu = vcpu->arch.mmu;
    > struct tdp_iter iter;
    > - struct kvm_mmu_page *sp;
    > - u64 *child_pt;
    > - u64 new_spte;
    > int ret;
    >
    > kvm_mmu_hugepage_adjust(vcpu, fault);
    > @@ -1000,6 +1022,8 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
    > }
    >
    > if (!is_shadow_present_pte(iter.old_spte)) {
    > + bool account_nx;
    > +
    > /*
    > * If SPTE has been frozen by another thread, just
    > * give up and retry, avoiding unnecessary page table
    > @@ -1008,22 +1032,10 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
    > if (is_removed_spte(iter.old_spte))
    > break;
    >
    > - sp = alloc_tdp_mmu_page(vcpu, iter.gfn, iter.level - 1);
    > - child_pt = sp->spt;
    > -
    > - new_spte = make_nonleaf_spte(child_pt,
    > - !shadow_accessed_mask);
    > -
    > - if (tdp_mmu_set_spte_atomic(vcpu->kvm, &iter, new_spte)) {
    > - tdp_mmu_link_page(vcpu->kvm, sp,
    > - fault->huge_page_disallowed &&
    > - fault->req_level >= iter.level);
    > -
    > - trace_kvm_mmu_get_page(sp, true);
    > - } else {
    > - tdp_mmu_free_sp(sp);
    > + account_nx = fault->huge_page_disallowed &&
    > + fault->req_level >= iter.level;
    > + if (!tdp_mmu_populate_nonleaf(vcpu, &iter, account_nx))
    > break;
    > - }
    > }
    > }
    >

    \
     
     \ /
      Last update: 2022-04-06 01:20    [W:4.024 / U:0.032 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site