lkml.org 
[lkml]   [2020]   [Aug]   [6]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
From
Date
SubjectRe: csky: smp_mb__after_spinlock
Acked-by: Guo Ren <guoren@kernel.org>

On Thu, Aug 6, 2020 at 3:55 AM <peterz@infradead.org> wrote:
>
> On Wed, Aug 05, 2020 at 12:41:46PM +0200, peterz@infradead.org wrote:
> > Hi,
> >
> > While doing an audit of smp_mb__after_spinlock, I found that csky
> > defines it, why?
> >
> > CSKY only has smp_mb(), it doesn't override __atomic_acquire_fence or
> > otherwise special cases it's atomic*_acquire() primitives. It has an
> > explicit smp_mb() in its arch_spin_lock().
>
> Also, why have two implementations of all the locking ?
>
> ---
> diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig
> index bd31ab12f77d..332738e93e57 100644
> --- a/arch/csky/Kconfig
> +++ b/arch/csky/Kconfig
> @@ -7,7 +7,7 @@ config CSKY
> select ARCH_HAS_SYNC_DMA_FOR_CPU
> select ARCH_HAS_SYNC_DMA_FOR_DEVICE
> select ARCH_USE_BUILTIN_BSWAP
> - select ARCH_USE_QUEUED_RWLOCKS if NR_CPUS>2
> + select ARCH_USE_QUEUED_RWLOCKS
> select ARCH_WANT_FRAME_POINTERS if !CPU_CK610
> select COMMON_CLK
> select CLKSRC_MMIO
> diff --git a/arch/csky/include/asm/spinlock.h b/arch/csky/include/asm/spinlock.h
> index 7cf3f2b34cea..69f5aa249c5f 100644
> --- a/arch/csky/include/asm/spinlock.h
> +++ b/arch/csky/include/asm/spinlock.h
> @@ -6,8 +6,6 @@
> #include <linux/spinlock_types.h>
> #include <asm/barrier.h>
>
> -#ifdef CONFIG_QUEUED_RWLOCKS
> -
> /*
> * Ticket-based spin-locking.
> */
> @@ -88,169 +86,4 @@ static inline int arch_spin_is_contended(arch_spinlock_t *lock)
>
> #include <asm/qrwlock.h>
>
> -/* See include/linux/spinlock.h */
> -#define smp_mb__after_spinlock() smp_mb()
> -
> -#else /* CONFIG_QUEUED_RWLOCKS */
> -
> -/*
> - * Test-and-set spin-locking.
> - */
> -static inline void arch_spin_lock(arch_spinlock_t *lock)
> -{
> - u32 *p = &lock->lock;
> - u32 tmp;
> -
> - asm volatile (
> - "1: ldex.w %0, (%1) \n"
> - " bnez %0, 1b \n"
> - " movi %0, 1 \n"
> - " stex.w %0, (%1) \n"
> - " bez %0, 1b \n"
> - : "=&r" (tmp)
> - : "r"(p)
> - : "cc");
> - smp_mb();
> -}
> -
> -static inline void arch_spin_unlock(arch_spinlock_t *lock)
> -{
> - smp_mb();
> - WRITE_ONCE(lock->lock, 0);
> -}
> -
> -static inline int arch_spin_trylock(arch_spinlock_t *lock)
> -{
> - u32 *p = &lock->lock;
> - u32 tmp;
> -
> - asm volatile (
> - "1: ldex.w %0, (%1) \n"
> - " bnez %0, 2f \n"
> - " movi %0, 1 \n"
> - " stex.w %0, (%1) \n"
> - " bez %0, 1b \n"
> - " movi %0, 0 \n"
> - "2: \n"
> - : "=&r" (tmp)
> - : "r"(p)
> - : "cc");
> -
> - if (!tmp)
> - smp_mb();
> -
> - return !tmp;
> -}
> -
> -#define arch_spin_is_locked(x) (READ_ONCE((x)->lock) != 0)
> -
> -/*
> - * read lock/unlock/trylock
> - */
> -static inline void arch_read_lock(arch_rwlock_t *lock)
> -{
> - u32 *p = &lock->lock;
> - u32 tmp;
> -
> - asm volatile (
> - "1: ldex.w %0, (%1) \n"
> - " blz %0, 1b \n"
> - " addi %0, 1 \n"
> - " stex.w %0, (%1) \n"
> - " bez %0, 1b \n"
> - : "=&r" (tmp)
> - : "r"(p)
> - : "cc");
> - smp_mb();
> -}
> -
> -static inline void arch_read_unlock(arch_rwlock_t *lock)
> -{
> - u32 *p = &lock->lock;
> - u32 tmp;
> -
> - smp_mb();
> - asm volatile (
> - "1: ldex.w %0, (%1) \n"
> - " subi %0, 1 \n"
> - " stex.w %0, (%1) \n"
> - " bez %0, 1b \n"
> - : "=&r" (tmp)
> - : "r"(p)
> - : "cc");
> -}
> -
> -static inline int arch_read_trylock(arch_rwlock_t *lock)
> -{
> - u32 *p = &lock->lock;
> - u32 tmp;
> -
> - asm volatile (
> - "1: ldex.w %0, (%1) \n"
> - " blz %0, 2f \n"
> - " addi %0, 1 \n"
> - " stex.w %0, (%1) \n"
> - " bez %0, 1b \n"
> - " movi %0, 0 \n"
> - "2: \n"
> - : "=&r" (tmp)
> - : "r"(p)
> - : "cc");
> -
> - if (!tmp)
> - smp_mb();
> -
> - return !tmp;
> -}
> -
> -/*
> - * write lock/unlock/trylock
> - */
> -static inline void arch_write_lock(arch_rwlock_t *lock)
> -{
> - u32 *p = &lock->lock;
> - u32 tmp;
> -
> - asm volatile (
> - "1: ldex.w %0, (%1) \n"
> - " bnez %0, 1b \n"
> - " subi %0, 1 \n"
> - " stex.w %0, (%1) \n"
> - " bez %0, 1b \n"
> - : "=&r" (tmp)
> - : "r"(p)
> - : "cc");
> - smp_mb();
> -}
> -
> -static inline void arch_write_unlock(arch_rwlock_t *lock)
> -{
> - smp_mb();
> - WRITE_ONCE(lock->lock, 0);
> -}
> -
> -static inline int arch_write_trylock(arch_rwlock_t *lock)
> -{
> - u32 *p = &lock->lock;
> - u32 tmp;
> -
> - asm volatile (
> - "1: ldex.w %0, (%1) \n"
> - " bnez %0, 2f \n"
> - " subi %0, 1 \n"
> - " stex.w %0, (%1) \n"
> - " bez %0, 1b \n"
> - " movi %0, 0 \n"
> - "2: \n"
> - : "=&r" (tmp)
> - : "r"(p)
> - : "cc");
> -
> - if (!tmp)
> - smp_mb();
> -
> - return !tmp;
> -}
> -
> -#endif /* CONFIG_QUEUED_RWLOCKS */
> #endif /* __ASM_CSKY_SPINLOCK_H */



--
Best Regards
Guo Ren

ML: https://lore.kernel.org/linux-csky/

\
 
 \ /
  Last update: 2020-08-07 02:24    [W:0.050 / U:0.072 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site