lkml.org 
[lkml]   [2023]   [Feb]   [27]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v12 052/106] KVM: x86/mmu: Introduce kvm_mmu_map_tdp_page() for use by TDX
    Date
    From: Sean Christopherson <sean.j.christopherson@intel.com>

    Introduce a helper to directly (pun intended) fault-in a TDP page
    without having to go through the full page fault path. This allows
    TDX to get the resulting pfn and also allows the RET_PF_* enums to
    stay in mmu.c where they belong.

    Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
    Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com>
    ---
    arch/x86/kvm/mmu.h | 3 +++
    arch/x86/kvm/mmu/mmu.c | 49 ++++++++++++++++++++++++++++++++++++++++++
    2 files changed, 52 insertions(+)

    diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
    index 0234201d5e63..6944f78c4401 100644
    --- a/arch/x86/kvm/mmu.h
    +++ b/arch/x86/kvm/mmu.h
    @@ -154,6 +154,9 @@ static inline void kvm_mmu_load_pgd(struct kvm_vcpu *vcpu)
    vcpu->arch.mmu->root_role.level);
    }

    +kvm_pfn_t kvm_mmu_map_tdp_page(struct kvm_vcpu *vcpu, gpa_t gpa,
    + u32 error_code, int max_level);
    +
    /*
    * Check if a given access (described through the I/D, W/R and U/S bits of a
    * page fault error code pfec) causes a permission fault with the given PTE
    diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
    index 0c852517c0e7..6fef584c92c3 100644
    --- a/arch/x86/kvm/mmu/mmu.c
    +++ b/arch/x86/kvm/mmu/mmu.c
    @@ -4570,6 +4570,55 @@ int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
    return direct_page_fault(vcpu, fault);
    }

    +kvm_pfn_t kvm_mmu_map_tdp_page(struct kvm_vcpu *vcpu, gpa_t gpa,
    + u32 error_code, int max_level)
    +{
    + int r;
    + struct kvm_page_fault fault = (struct kvm_page_fault) {
    + .addr = gpa,
    + .error_code = error_code,
    + .exec = error_code & PFERR_FETCH_MASK,
    + .write = error_code & PFERR_WRITE_MASK,
    + .present = error_code & PFERR_PRESENT_MASK,
    + .rsvd = error_code & PFERR_RSVD_MASK,
    + .user = error_code & PFERR_USER_MASK,
    + .prefetch = false,
    + .is_tdp = true,
    + .nx_huge_page_workaround_enabled = is_nx_huge_page_enabled(vcpu->kvm),
    + .is_private = kvm_is_private_gpa(vcpu->kvm, gpa),
    + };
    +
    + WARN_ON_ONCE(!vcpu->arch.mmu->root_role.direct);
    + fault.gfn = gpa_to_gfn(fault.addr) & ~kvm_gfn_shared_mask(vcpu->kvm);
    + fault.slot = kvm_vcpu_gfn_to_memslot(vcpu, fault.gfn);
    +
    + if (mmu_topup_memory_caches(vcpu, false))
    + return KVM_PFN_ERR_FAULT;
    +
    + /*
    + * Loop on the page fault path to handle the case where an mmu_notifier
    + * invalidation triggers RET_PF_RETRY. In the normal page fault path,
    + * KVM needs to resume the guest in case the invalidation changed any
    + * of the page fault properties, i.e. the gpa or error code. For this
    + * path, the gpa and error code are fixed by the caller, and the caller
    + * expects failure if and only if the page fault can't be fixed.
    + */
    + do {
    + fault.max_level = max_level;
    + fault.req_level = PG_LEVEL_4K;
    + fault.goal_level = PG_LEVEL_4K;
    +
    +#ifdef CONFIG_X86_64
    + if (tdp_mmu_enabled)
    + r = kvm_tdp_mmu_page_fault(vcpu, &fault);
    + else
    +#endif
    + r = direct_page_fault(vcpu, &fault);
    + } while (r == RET_PF_RETRY && !is_error_noslot_pfn(fault.pfn));
    + return fault.pfn;
    +}
    +EXPORT_SYMBOL_GPL(kvm_mmu_map_tdp_page);
    +
    static void nonpaging_init_context(struct kvm_mmu *context)
    {
    context->page_fault = nonpaging_page_fault;
    --
    2.25.1
    \
     
     \ /
      Last update: 2023-03-27 00:36    [W:4.073 / U:0.160 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site