lkml.org 
[lkml]   [2015]   [Apr]   [30]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 3.12 21/63] arm64: KVM: flush VM pages before letting the guest enable caches
    Date
    From: Marc Zyngier <marc.zyngier@arm.com>

    3.12-stable review patch. If anyone has any objections, please let me know.

    ===============

    commit 9d218a1fcf4c6b759d442ef702842fae92e1ea61 upstream.

    When the guest runs with caches disabled (like in an early boot
    sequence, for example), all the writes are diectly going to RAM,
    bypassing the caches altogether.

    Once the MMU and caches are enabled, whatever sits in the cache
    becomes suddenly visible, which isn't what the guest expects.

    A way to avoid this potential disaster is to invalidate the cache
    when the MMU is being turned on. For this, we hook into the SCTLR_EL1
    trapping code, and scan the stage-2 page tables, invalidating the
    pages/sections that have already been mapped in.

    Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
    Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
    Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
    Signed-off-by: Shannon Zhao <shannon.zhao@linaro.org>
    Signed-off-by: Jiri Slaby <jslaby@suse.cz>
    ---
    arch/arm/include/asm/kvm_mmu.h | 2 +
    arch/arm/kvm/mmu.c | 83 ++++++++++++++++++++++++++++++++++++++++
    arch/arm64/include/asm/kvm_mmu.h | 2 +
    arch/arm64/kvm/sys_regs.c | 5 ++-
    4 files changed, 91 insertions(+), 1 deletion(-)

    diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
    index 5c946dfdcb94..0de650faf1af 100644
    --- a/arch/arm/include/asm/kvm_mmu.h
    +++ b/arch/arm/include/asm/kvm_mmu.h
    @@ -143,6 +143,8 @@ static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,

    #define kvm_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l))

    +void stage2_flush_vm(struct kvm *kvm);
    +
    #endif /* !__ASSEMBLY__ */

    #endif /* __ARM_KVM_MMU_H__ */
    diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
    index e747dc10c033..61c5a92f6d9d 100644
    --- a/arch/arm/kvm/mmu.c
    +++ b/arch/arm/kvm/mmu.c
    @@ -162,6 +162,89 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
    }
    }

    +static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
    + phys_addr_t addr, phys_addr_t end)
    +{
    + pte_t *pte;
    +
    + pte = pte_offset_kernel(pmd, addr);
    + do {
    + if (!pte_none(*pte)) {
    + hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
    + kvm_flush_dcache_to_poc((void*)hva, PAGE_SIZE);
    + }
    + } while (pte++, addr += PAGE_SIZE, addr != end);
    +}
    +
    +static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud,
    + phys_addr_t addr, phys_addr_t end)
    +{
    + pmd_t *pmd;
    + phys_addr_t next;
    +
    + pmd = pmd_offset(pud, addr);
    + do {
    + next = kvm_pmd_addr_end(addr, end);
    + if (!pmd_none(*pmd)) {
    + stage2_flush_ptes(kvm, pmd, addr, next);
    + }
    + } while (pmd++, addr = next, addr != end);
    +}
    +
    +static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd,
    + phys_addr_t addr, phys_addr_t end)
    +{
    + pud_t *pud;
    + phys_addr_t next;
    +
    + pud = pud_offset(pgd, addr);
    + do {
    + next = kvm_pud_addr_end(addr, end);
    + if (!pud_none(*pud)) {
    + stage2_flush_pmds(kvm, pud, addr, next);
    + }
    + } while (pud++, addr = next, addr != end);
    +}
    +
    +static void stage2_flush_memslot(struct kvm *kvm,
    + struct kvm_memory_slot *memslot)
    +{
    + phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
    + phys_addr_t end = addr + PAGE_SIZE * memslot->npages;
    + phys_addr_t next;
    + pgd_t *pgd;
    +
    + pgd = kvm->arch.pgd + pgd_index(addr);
    + do {
    + next = kvm_pgd_addr_end(addr, end);
    + stage2_flush_puds(kvm, pgd, addr, next);
    + } while (pgd++, addr = next, addr != end);
    +}
    +
    +/**
    + * stage2_flush_vm - Invalidate cache for pages mapped in stage 2
    + * @kvm: The struct kvm pointer
    + *
    + * Go through the stage 2 page tables and invalidate any cache lines
    + * backing memory already mapped to the VM.
    + */
    +void stage2_flush_vm(struct kvm *kvm)
    +{
    + struct kvm_memslots *slots;
    + struct kvm_memory_slot *memslot;
    + int idx;
    +
    + idx = srcu_read_lock(&kvm->srcu);
    + spin_lock(&kvm->mmu_lock);
    +
    + slots = kvm_memslots(kvm);
    + kvm_for_each_memslot(memslot, slots)
    + stage2_flush_memslot(kvm, memslot);
    +
    + spin_unlock(&kvm->mmu_lock);
    + srcu_read_unlock(&kvm->srcu, idx);
    +}
    +
    /**
    * free_boot_hyp_pgd - free HYP boot page tables
    *
    diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
    index 802bd971f1de..3b038b39ba9b 100644
    --- a/arch/arm64/include/asm/kvm_mmu.h
    +++ b/arch/arm64/include/asm/kvm_mmu.h
    @@ -142,5 +142,7 @@ static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
    }
    }

    +void stage2_flush_vm(struct kvm *kvm);
    +
    #endif /* __ASSEMBLY__ */
    #endif /* __ARM64_KVM_MMU_H__ */
    diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
    index 2097e5ecba42..03244582bc55 100644
    --- a/arch/arm64/kvm/sys_regs.c
    +++ b/arch/arm64/kvm/sys_regs.c
    @@ -27,6 +27,7 @@
    #include <asm/kvm_host.h>
    #include <asm/kvm_emulate.h>
    #include <asm/kvm_coproc.h>
    +#include <asm/kvm_mmu.h>
    #include <asm/cacheflush.h>
    #include <asm/cputype.h>
    #include <trace/events/kvm.h>
    @@ -154,8 +155,10 @@ static bool access_sctlr(struct kvm_vcpu *vcpu,
    {
    access_vm_reg(vcpu, p, r);

    - if (vcpu_has_cache_enabled(vcpu)) /* MMU+Caches enabled? */
    + if (vcpu_has_cache_enabled(vcpu)) { /* MMU+Caches enabled? */
    vcpu->arch.hcr_el2 &= ~HCR_TVM;
    + stage2_flush_vm(vcpu->kvm);
    + }

    return true;
    }
    --
    2.3.5


    \
     
     \ /
      Last update: 2015-04-30 15:01    [W:4.048 / U:0.936 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site