lkml.org 
[lkml]   [2021]   [Sep]   [1]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    Subject[PATCH v3 01/12] KVM: arm64: selftests: Add MMIO readl/writel support
    From
    Define the readl() and writel() functions for the guests to
    access (4-byte) the MMIO region.

    The routines, and their dependents, are inspired from the kernel's
    arch/arm64/include/asm/io.h and arch/arm64/include/asm/barrier.h.

    Signed-off-by: Raghavendra Rao Ananta <rananta@google.com>
    ---
    .../selftests/kvm/include/aarch64/processor.h | 45 ++++++++++++++++++-
    1 file changed, 44 insertions(+), 1 deletion(-)

    diff --git a/tools/testing/selftests/kvm/include/aarch64/processor.h b/tools/testing/selftests/kvm/include/aarch64/processor.h
    index c0273aefa63d..3cbaf5c1e26b 100644
    --- a/tools/testing/selftests/kvm/include/aarch64/processor.h
    +++ b/tools/testing/selftests/kvm/include/aarch64/processor.h
    @@ -130,6 +130,49 @@ void vm_install_sync_handler(struct kvm_vm *vm,
    val; \
    })

    -#define isb() asm volatile("isb" : : : "memory")
    +#define isb() asm volatile("isb" : : : "memory")
    +#define dsb(opt) asm volatile("dsb " #opt : : : "memory")
    +#define dmb(opt) asm volatile("dmb " #opt : : : "memory")
    +
    +#define dma_wmb() dmb(oshst)
    +#define __iowmb() dma_wmb()
    +
    +#define dma_rmb() dmb(oshld)
    +
    +#define __iormb(v) \
    +({ \
    + unsigned long tmp; \
    + \
    + dma_rmb(); \
    + \
    + /* \
    + * Courtesy of arch/arm64/include/asm/io.h: \
    + * Create a dummy control dependency from the IO read to any \
    + * later instructions. This ensures that a subsequent call \
    + * to udelay() will be ordered due to the ISB in __delay(). \
    + */ \
    + asm volatile("eor %0, %1, %1\n" \
    + "cbnz %0, ." \
    + : "=r" (tmp) : "r" ((unsigned long)(v)) \
    + : "memory"); \
    +})
    +
    +static __always_inline void __raw_writel(u32 val, volatile void *addr)
    +{
    + asm volatile("str %w0, [%1]" : : "rZ" (val), "r" (addr));
    +}
    +
    +static __always_inline u32 __raw_readl(const volatile void *addr)
    +{
    + u32 val;
    + asm volatile("ldr %w0, [%1]" : "=r" (val) : "r" (addr));
    + return val;
    +}
    +
    +#define writel_relaxed(v,c) ((void)__raw_writel((__force u32)cpu_to_le32(v),(c)))
    +#define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32)__raw_readl(c)); __r; })
    +
    +#define writel(v,c) ({ __iowmb(); writel_relaxed((v),(c));})
    +#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(__v); __v; })

    #endif /* SELFTEST_KVM_PROCESSOR_H */
    --
    2.33.0.153.gba50c8fa24-goog
    \
     
     \ /
      Last update: 2021-09-01 23:16    [W:4.109 / U:0.384 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site