lkml.org 
[lkml]   [2023]   [Nov]   [7]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v17 042/116] KVM: x86/mmu: Add a new is_private member for union kvm_mmu_page_role
    Date
    From: Isaku Yamahata <isaku.yamahata@intel.com>

    Because TDX support introduces private mapping, add a new member in union
    kvm_mmu_page_role with access functions to check the member.

    Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com>
    ---
    arch/x86/include/asm/kvm_host.h | 27 +++++++++++++++++++++++++++
    arch/x86/kvm/mmu/mmu_internal.h | 5 +++++
    arch/x86/kvm/mmu/spte.h | 6 ++++++
    3 files changed, 38 insertions(+)

    diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
    index 9490ba898233..1c6121d06320 100644
    --- a/arch/x86/include/asm/kvm_host.h
    +++ b/arch/x86/include/asm/kvm_host.h
    @@ -348,7 +348,12 @@ union kvm_mmu_page_role {
    unsigned ad_disabled:1;
    unsigned guest_mode:1;
    unsigned passthrough:1;
    +#ifdef CONFIG_KVM_MMU_PRIVATE
    + unsigned is_private:1;
    + unsigned :4;
    +#else
    unsigned :5;
    +#endif

    /*
    * This is left at the top of the word so that
    @@ -360,6 +365,28 @@ union kvm_mmu_page_role {
    };
    };

    +#ifdef CONFIG_KVM_MMU_PRIVATE
    +static inline bool kvm_mmu_page_role_is_private(union kvm_mmu_page_role role)
    +{
    + return !!role.is_private;
    +}
    +
    +static inline void kvm_mmu_page_role_set_private(union kvm_mmu_page_role *role)
    +{
    + role->is_private = 1;
    +}
    +#else
    +static inline bool kvm_mmu_page_role_is_private(union kvm_mmu_page_role role)
    +{
    + return false;
    +}
    +
    +static inline void kvm_mmu_page_role_set_private(union kvm_mmu_page_role *role)
    +{
    + WARN_ON_ONCE(1);
    +}
    +#endif
    +
    /*
    * kvm_mmu_extended_role complements kvm_mmu_page_role, tracking properties
    * relevant to the current MMU configuration. When loading CR0, CR4, or EFER,
    diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h
    index e2df38457dae..f4451c67810a 100644
    --- a/arch/x86/kvm/mmu/mmu_internal.h
    +++ b/arch/x86/kvm/mmu/mmu_internal.h
    @@ -144,6 +144,11 @@ static inline int kvm_mmu_page_as_id(struct kvm_mmu_page *sp)
    return kvm_mmu_role_as_id(sp->role);
    }

    +static inline bool is_private_sp(const struct kvm_mmu_page *sp)
    +{
    + return kvm_mmu_page_role_is_private(sp->role);
    +}
    +
    static inline bool kvm_mmu_page_ad_need_write_protect(struct kvm_mmu_page *sp)
    {
    /*
    diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h
    index 1a163aee9ec6..88db32cba0fd 100644
    --- a/arch/x86/kvm/mmu/spte.h
    +++ b/arch/x86/kvm/mmu/spte.h
    @@ -264,6 +264,12 @@ static inline struct kvm_mmu_page *root_to_sp(hpa_t root)
    return spte_to_child_sp(root);
    }

    +static inline bool is_private_sptep(u64 *sptep)
    +{
    + WARN_ON_ONCE(!sptep);
    + return is_private_sp(sptep_to_sp(sptep));
    +}
    +
    static inline bool is_mmio_spte(struct kvm *kvm, u64 spte)
    {
    return (spte & shadow_mmio_mask) == kvm->arch.shadow_mmio_value &&
    --
    2.25.1
    \
     
     \ /
      Last update: 2023-11-20 13:51    [W:2.443 / U:0.364 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site