Messages in this thread Patch in this message | | | From | Nicholas Piggin <> | Subject | [PATCH 08/13] locking/qspinlock: stop renaming queued_spin_lock_slowpath to native_queued_spin_lock_slowpath | Date | Tue, 5 Jul 2022 00:38:15 +1000 |
| |
The native version can simply be queued_spin_lock_slowpath, and the paravirt version __pv_queued_spin_lock_slowpath, which is as they are named in the C code.
Signed-off-by: Nicholas Piggin <npiggin@gmail.com> --- arch/powerpc/include/asm/qspinlock.h | 38 ++++++++++------------------ arch/x86/include/asm/qspinlock.h | 14 +++++++--- arch/x86/kernel/paravirt.c | 2 +- kernel/locking/qspinlock.c | 8 +----- 4 files changed, 26 insertions(+), 36 deletions(-)
diff --git a/arch/powerpc/include/asm/qspinlock.h b/arch/powerpc/include/asm/qspinlock.h index b676c4fb90fd..dd231c756233 100644 --- a/arch/powerpc/include/asm/qspinlock.h +++ b/arch/powerpc/include/asm/qspinlock.h @@ -7,42 +7,32 @@ #define _Q_PENDING_LOOPS (1 << 9) /* not tuned */ -#ifdef CONFIG_PARAVIRT_SPINLOCKS -extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); -extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); -extern void __pv_queued_spin_unlock(struct qspinlock *lock); +void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +void __pv_queued_spin_unlock(struct qspinlock *lock); -static __always_inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) +static __always_inline void queued_spin_lock(struct qspinlock *lock) { - if (!is_shared_processor()) - native_queued_spin_lock_slowpath(lock, val); + u32 val = 0; + + if (likely(arch_atomic_try_cmpxchg_lock(&lock->val, &val, _Q_LOCKED_VAL))) + return; + + if (!IS_ENABLED(CONFIG_PARAVIRT_SPINLOCKS) || !is_shared_processor()) + queued_spin_lock_slowpath(lock, val); else __pv_queued_spin_lock_slowpath(lock, val); } +#define queued_spin_lock queued_spin_lock -#define queued_spin_unlock queued_spin_unlock static inline void queued_spin_unlock(struct qspinlock *lock) { - if (!is_shared_processor()) + if (!IS_ENABLED(CONFIG_PARAVIRT_SPINLOCKS) || !is_shared_processor()) smp_store_release(&lock->locked, 0); else __pv_queued_spin_unlock(lock); } - -#else -extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); -#endif - -static __always_inline void queued_spin_lock(struct qspinlock *lock) -{ - u32 val = 0; - - if (likely(arch_atomic_try_cmpxchg_lock(&lock->val, &val, _Q_LOCKED_VAL))) - return; - - queued_spin_lock_slowpath(lock, val); -} -#define queued_spin_lock queued_spin_lock +#define queued_spin_unlock queued_spin_unlock #ifdef CONFIG_PARAVIRT_SPINLOCKS #define SPIN_THRESHOLD (1<<15) /* not tuned */ diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h index 7f914fe7bc30..603ad61e9dfe 100644 --- a/arch/x86/include/asm/qspinlock.h +++ b/arch/x86/include/asm/qspinlock.h @@ -28,7 +28,7 @@ static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lo } #ifdef CONFIG_PARAVIRT_SPINLOCKS -extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); extern void __pv_init_lock_hash(void); extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); extern void __raw_callee_save___pv_queued_spin_unlock(struct qspinlock *lock); @@ -38,7 +38,6 @@ extern bool nopvspin; #define __pv_queued_spin_unlock __pv_queued_spin_unlock #endif -#define queued_spin_unlock queued_spin_unlock /** * queued_spin_unlock - release a queued spinlock * @lock : Pointer to queued spinlock structure @@ -50,22 +49,29 @@ static inline void native_queued_spin_unlock(struct qspinlock *lock) smp_store_release(&lock->locked, 0); } -static inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) +static inline void queued_spin_lock(struct qspinlock *lock) { + int val = 0; + + if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL))) + return; + pv_queued_spin_lock_slowpath(lock, val); } +#define queued_spin_lock queued_spin_lock static inline void queued_spin_unlock(struct qspinlock *lock) { kcsan_release(); pv_queued_spin_unlock(lock); } +#define queued_spin_unlock queued_spin_unlock -#define vcpu_is_preempted vcpu_is_preempted static inline bool vcpu_is_preempted(long cpu) { return pv_vcpu_is_preempted(cpu); } +#define vcpu_is_preempted vcpu_is_preempted #endif #ifdef CONFIG_PARAVIRT diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 7ca2d46c08cc..f03e2962afa8 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -384,7 +384,7 @@ struct paravirt_patch_template pv_ops = { #if defined(CONFIG_PARAVIRT_SPINLOCKS) /* Lock ops. */ #ifdef CONFIG_SMP - .lock.queued_spin_lock_slowpath = native_queued_spin_lock_slowpath, + .lock.queued_spin_lock_slowpath = queued_spin_lock_slowpath, .lock.queued_spin_unlock = PV_CALLEE_SAVE(__native_queued_spin_unlock), .lock.wait = paravirt_nop, diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c index 4045b5683ecb..412b83040bac 100644 --- a/kernel/locking/qspinlock.c +++ b/kernel/locking/qspinlock.c @@ -295,8 +295,7 @@ static __always_inline void set_locked(struct qspinlock *lock) * pv_kick(cpu) -- wakes a suspended vcpu * * Using these we implement __pv_queued_spin_lock_slowpath() and - * __pv_queued_spin_unlock() to replace native_queued_spin_lock_slowpath() and - * native_queued_spin_unlock(). + * __pv_queued_spin_unlock(). */ #define _Q_SLOW_VAL (3U << _Q_LOCKED_OFFSET) @@ -986,10 +985,6 @@ static inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, bool parav * contended : (*,x,y) +--> (*,0,0) ---> (*,0,1) -' : * queue : ^--' : */ -#ifdef CONFIG_PARAVIRT_SPINLOCKS -#define queued_spin_lock_slowpath native_queued_spin_lock_slowpath -#endif - void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) { if (virt_spin_lock(lock)) @@ -1070,7 +1065,6 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) EXPORT_SYMBOL(queued_spin_lock_slowpath); #ifdef CONFIG_PARAVIRT_SPINLOCKS -#undef queued_spin_lock_slowpath void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) { queued_spin_lock_mcs_queue(lock, true); -- 2.35.1
| |