lkml.org 
[lkml]   [2022]   [Jun]   [28]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH V7 4/5] asm-generic: spinlock: Add combo spinlock (ticket & queued)
Date
From: Guo Ren <guoren@linux.alibaba.com>

Some architecture has a flexible requirement on the type of spinlock.
Some LL/SC architectures of ISA don't force micro-arch to give a strong
forward guarantee. Thus different kinds of memory model micro-arch would
come out in one ISA. The ticket lock is suitable for exclusive monitor
designed LL/SC micro-arch with limited cores and "!NUMA". The
queue-spinlock could deal with NUMA/large-scale scenarios with a strong
forward guarantee designed LL/SC micro-arch.

So, make the spinlock a combo with feature.

Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Signed-off-by: Guo Ren <guoren@kernel.org>
Cc: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Palmer Dabbelt <palmer@rivosinc.com>
---
include/asm-generic/spinlock.h | 43 ++++++++++++++++++++++++++++++++--
kernel/locking/qspinlock.c | 2 ++
2 files changed, 43 insertions(+), 2 deletions(-)

diff --git a/include/asm-generic/spinlock.h b/include/asm-generic/spinlock.h
index f41dc7c2b900..a9b43089bf99 100644
--- a/include/asm-generic/spinlock.h
+++ b/include/asm-generic/spinlock.h
@@ -28,34 +28,73 @@
#define __ASM_GENERIC_SPINLOCK_H

#include <asm-generic/ticket_spinlock.h>
+#ifdef CONFIG_ARCH_USE_QUEUED_SPINLOCKS
+#include <linux/jump_label.h>
+#include <asm-generic/qspinlock.h>
+
+DECLARE_STATIC_KEY_TRUE(use_qspinlock_key);
+#endif
+
+#undef arch_spin_is_locked
+#undef arch_spin_is_contended
+#undef arch_spin_value_unlocked
+#undef arch_spin_lock
+#undef arch_spin_trylock
+#undef arch_spin_unlock

static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
{
- ticket_spin_lock(lock);
+#ifdef CONFIG_ARCH_USE_QUEUED_SPINLOCKS
+ if (static_branch_likely(&use_qspinlock_key))
+ queued_spin_lock(lock);
+ else
+#endif
+ ticket_spin_lock(lock);
}

static __always_inline bool arch_spin_trylock(arch_spinlock_t *lock)
{
+#ifdef CONFIG_ARCH_USE_QUEUED_SPINLOCKS
+ if (static_branch_likely(&use_qspinlock_key))
+ return queued_spin_trylock(lock);
+#endif
return ticket_spin_trylock(lock);
}

static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
{
- ticket_spin_unlock(lock);
+#ifdef CONFIG_ARCH_USE_QUEUED_SPINLOCKS
+ if (static_branch_likely(&use_qspinlock_key))
+ queued_spin_unlock(lock);
+ else
+#endif
+ ticket_spin_unlock(lock);
}

static __always_inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
+#ifdef CONFIG_ARCH_USE_QUEUED_SPINLOCKS
+ if (static_branch_likely(&use_qspinlock_key))
+ return queued_spin_is_locked(lock);
+#endif
return ticket_spin_is_locked(lock);
}

static __always_inline int arch_spin_is_contended(arch_spinlock_t *lock)
{
+#ifdef CONFIG_ARCH_USE_QUEUED_SPINLOCKS
+ if (static_branch_likely(&use_qspinlock_key))
+ return queued_spin_is_contended(lock);
+#endif
return ticket_spin_is_contended(lock);
}

static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
{
+#ifdef CONFIG_ARCH_USE_QUEUED_SPINLOCKS
+ if (static_branch_likely(&use_qspinlock_key))
+ return queued_spin_value_unlocked(lock);
+#endif
return ticket_spin_value_unlocked(lock);
}

diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
index 65a9a10caa6f..b7f7436f42f6 100644
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -566,6 +566,8 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
}
EXPORT_SYMBOL(queued_spin_lock_slowpath);

+DEFINE_STATIC_KEY_TRUE_RO(use_qspinlock_key);
+
/*
* Generate the paravirt code for queued_spin_unlock_slowpath().
*/
--
2.36.1
\
 
 \ /
  Last update: 2022-06-28 10:21    [W:0.602 / U:0.780 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site