lkml.org 
[lkml]   [2021]   [Jun]   [22]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    Subject[PATCH 47/54] KVM: x86/mmu: Add helpers to do full reserved SPTE checks w/ generic MMU
    From
    Extract the reserved SPTE check and print helpers in get_mmio_spte() to
    new helpers so that KVM can also WARN on reserved badness when making a
    SPTE.

    Tag the checking helper with __always_inline to improve the probability
    of the compiler generating optimal code for the checking loop, e.g. gcc
    appears to avoid using %rbp when the helper is tagged with a vanilla
    "inline".

    No functional change intended.

    Signed-off-by: Sean Christopherson <seanjc@google.com>
    ---
    arch/x86/kvm/mmu/mmu.c | 23 ++---------------------
    arch/x86/kvm/mmu/spte.h | 32 ++++++++++++++++++++++++++++++++
    2 files changed, 34 insertions(+), 21 deletions(-)

    diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
    index 92260cf48d5e..34e7a489e71b 100644
    --- a/arch/x86/kvm/mmu/mmu.c
    +++ b/arch/x86/kvm/mmu/mmu.c
    @@ -3594,19 +3594,6 @@ static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gpa_t vaddr,
    return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access, exception);
    }

    -static bool
    -__is_rsvd_bits_set(struct rsvd_bits_validate *rsvd_check, u64 pte, int level)
    -{
    - int bit7 = (pte >> 7) & 1;
    -
    - return pte & rsvd_check->rsvd_bits_mask[bit7][level-1];
    -}
    -
    -static bool __is_bad_mt_xwr(struct rsvd_bits_validate *rsvd_check, u64 pte)
    -{
    - return rsvd_check->bad_mt_xwr & BIT_ULL(pte & 0x3f);
    -}
    -
    static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct)
    {
    /*
    @@ -3684,13 +3671,7 @@ static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
    rsvd_check = &vcpu->arch.mmu->shadow_zero_check;

    for (level = root; level >= leaf; level--)
    - /*
    - * Use a bitwise-OR instead of a logical-OR to aggregate the
    - * reserved bit and EPT's invalid memtype/XWR checks to avoid
    - * adding a Jcc in the loop.
    - */
    - reserved |= __is_bad_mt_xwr(rsvd_check, sptes[level]) |
    - __is_rsvd_bits_set(rsvd_check, sptes[level], level);
    + reserved |= is_rsvd_spte(rsvd_check, sptes[level], level);

    if (reserved) {
    pr_err("%s: reserved bits set on MMU-present spte, addr 0x%llx, hierarchy:\n",
    @@ -3698,7 +3679,7 @@ static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
    for (level = root; level >= leaf; level--)
    pr_err("------ spte = 0x%llx level = %d, rsvd bits = 0x%llx",
    sptes[level], level,
    - rsvd_check->rsvd_bits_mask[(sptes[level] >> 7) & 1][level-1]);
    + get_rsvd_bits(rsvd_check, sptes[level], level));
    }

    return reserved;
    diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h
    index bca0ba11cccf..47e10dd9352d 100644
    --- a/arch/x86/kvm/mmu/spte.h
    +++ b/arch/x86/kvm/mmu/spte.h
    @@ -293,6 +293,38 @@ static inline bool is_dirty_spte(u64 spte)
    return dirty_mask ? spte & dirty_mask : spte & PT_WRITABLE_MASK;
    }

    +static inline u64 get_rsvd_bits(struct rsvd_bits_validate *rsvd_check, u64 pte,
    + int level)
    +{
    + int bit7 = (pte >> 7) & 1;
    +
    + return rsvd_check->rsvd_bits_mask[bit7][level-1];
    +}
    +
    +static inline bool __is_rsvd_bits_set(struct rsvd_bits_validate *rsvd_check,
    + u64 pte, int level)
    +{
    + return pte & get_rsvd_bits(rsvd_check, pte, level);
    +}
    +
    +static inline bool __is_bad_mt_xwr(struct rsvd_bits_validate *rsvd_check,
    + u64 pte)
    +{
    + return rsvd_check->bad_mt_xwr & BIT_ULL(pte & 0x3f);
    +}
    +
    +static __always_inline bool is_rsvd_spte(struct rsvd_bits_validate *rsvd_check,
    + u64 spte, int level)
    +{
    + /*
    + * Use a bitwise-OR instead of a logical-OR to aggregate the reserved
    + * bits and EPT's invalid memtype/XWR checks to avoid an extra Jcc
    + * (this is used in hot paths).
    + */
    + return __is_bad_mt_xwr(rsvd_check, spte) |
    + __is_rsvd_bits_set(rsvd_check, spte, level);
    +}
    +
    static inline bool spte_can_locklessly_be_made_writable(u64 spte)
    {
    return (spte & shadow_host_writable_mask) &&
    --
    2.32.0.288.g62a8d224e6-goog
    \
     
     \ /
      Last update: 2021-06-22 20:04    [W:4.050 / U:0.096 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site