lkml.org 
[lkml]   [2021]   [Mar]   [10]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    Subject[PATCH v4 27/34] KVM: arm64: Always zero invalid PTEs
    From
    kvm_set_invalid_pte() currently only clears bit 0 from a PTE because
    stage2_map_walk_table_post() needs to be able to follow the anchor. In
    preparation for re-using bits 63-02 from invalid PTEs, make sure to zero
    it entirely by ensuring to cache the anchor's child upfront.

    Suggested-by: Will Deacon <will@kernel.org>
    Signed-off-by: Quentin Perret <qperret@google.com>
    ---
    arch/arm64/kvm/hyp/pgtable.c | 26 ++++++++++++++++----------
    1 file changed, 16 insertions(+), 10 deletions(-)

    diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c
    index bdd6e3d4eeb6..f37b4179b880 100644
    --- a/arch/arm64/kvm/hyp/pgtable.c
    +++ b/arch/arm64/kvm/hyp/pgtable.c
    @@ -156,10 +156,9 @@ static kvm_pte_t *kvm_pte_follow(kvm_pte_t pte, struct kvm_pgtable_mm_ops *mm_op
    return mm_ops->phys_to_virt(kvm_pte_to_phys(pte));
    }

    -static void kvm_set_invalid_pte(kvm_pte_t *ptep)
    +static void kvm_clear_pte(kvm_pte_t *ptep)
    {
    - kvm_pte_t pte = *ptep;
    - WRITE_ONCE(*ptep, pte & ~KVM_PTE_VALID);
    + WRITE_ONCE(*ptep, 0);
    }

    static void kvm_set_table_pte(kvm_pte_t *ptep, kvm_pte_t *childp,
    @@ -443,6 +442,7 @@ struct stage2_map_data {
    kvm_pte_t attr;

    kvm_pte_t *anchor;
    + kvm_pte_t *childp;

    struct kvm_s2_mmu *mmu;
    void *memcache;
    @@ -532,7 +532,7 @@ static int stage2_map_walker_try_leaf(u64 addr, u64 end, u32 level,
    * There's an existing different valid leaf entry, so perform
    * break-before-make.
    */
    - kvm_set_invalid_pte(ptep);
    + kvm_clear_pte(ptep);
    kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, data->mmu, addr, level);
    mm_ops->put_page(ptep);
    }
    @@ -553,7 +553,8 @@ static int stage2_map_walk_table_pre(u64 addr, u64 end, u32 level,
    if (!kvm_block_mapping_supported(addr, end, data->phys, level))
    return 0;

    - kvm_set_invalid_pte(ptep);
    + data->childp = kvm_pte_follow(*ptep, data->mm_ops);
    + kvm_clear_pte(ptep);

    /*
    * Invalidate the whole stage-2, as we may have numerous leaf
    @@ -599,7 +600,7 @@ static int stage2_map_walk_leaf(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
    * will be mapped lazily.
    */
    if (kvm_pte_valid(pte)) {
    - kvm_set_invalid_pte(ptep);
    + kvm_clear_pte(ptep);
    kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, data->mmu, addr, level);
    mm_ops->put_page(ptep);
    }
    @@ -615,19 +616,24 @@ static int stage2_map_walk_table_post(u64 addr, u64 end, u32 level,
    struct stage2_map_data *data)
    {
    struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops;
    + kvm_pte_t *childp;
    int ret = 0;

    if (!data->anchor)
    return 0;

    - mm_ops->put_page(kvm_pte_follow(*ptep, mm_ops));
    - mm_ops->put_page(ptep);
    -
    if (data->anchor == ptep) {
    + childp = data->childp;
    data->anchor = NULL;
    + data->childp = NULL;
    ret = stage2_map_walk_leaf(addr, end, level, ptep, data);
    + } else {
    + childp = kvm_pte_follow(*ptep, mm_ops);
    }

    + mm_ops->put_page(childp);
    + mm_ops->put_page(ptep);
    +
    return ret;
    }

    @@ -736,7 +742,7 @@ static int stage2_unmap_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
    * block entry and rely on the remaining portions being faulted
    * back lazily.
    */
    - kvm_set_invalid_pte(ptep);
    + kvm_clear_pte(ptep);
    kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, addr, level);
    mm_ops->put_page(ptep);

    --
    2.30.1.766.gb4fecdf3b7-goog
    \
     
     \ /
      Last update: 2021-03-10 19:09    [W:4.218 / U:0.032 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site