lkml.org 
[lkml]   [2022]   [Jul]   [11]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
Date
From
SubjectRe: [PATCH 00/13] locking/qspinlock: simplify code generation
Excerpts from Peter Zijlstra's message of July 6, 2022 3:59 am:
> On Tue, Jul 05, 2022 at 12:38:07AM +1000, Nicholas Piggin wrote:
>> Hi,
>>
>> Been recently looking a bit closer at queued spinlock code, and
>> found it's a little tricky to follow especially the pv generation.
>> This series tries to improve the situation. It's not well tested
>> outside powerpc, but it's really the x86 pv code that is the
>> other major complexity that should need some review and testing.
>> Opinions?
>
> perhaps something like so on top/instead? This would still allow
> slotting in other implementations with relative ease and the compilers
> should constant fold all this.

Yeah that could be a bit neater... I don't know. It all has to be
inlined and compiled together so it's a matter of taste in syntactic
sugar. Doing it with C is probably better than doing it with CPP,
all else being equal.

At the moment I'm not planning to replace the PV functions on powerpc
though. If/when it comes to that I'd say more changes would be needed.

Thanks,
Nick

>
> --- a/kernel/locking/qspinlock.c
> +++ b/kernel/locking/qspinlock.c
> @@ -609,7 +609,7 @@ static void pv_kick_node(struct qspinloc
> *
> * The current value of the lock will be returned for additional processing.
> */
> -static void pv_wait_head_or_lock(struct qspinlock *lock, struct qnode *node)
> +static u32 pv_wait_head_or_lock(struct qspinlock *lock, struct qnode *node)
> {
> struct qspinlock **lp = NULL;
> int waitcnt = 0;
> @@ -641,7 +641,7 @@ static void pv_wait_head_or_lock(struct
> set_pending(lock);
> for (loop = SPIN_THRESHOLD; loop; loop--) {
> if (trylock_clear_pending(lock))
> - return; /* got lock */
> + goto out; /* got lock */
> cpu_relax();
> }
> clear_pending(lock);
> @@ -669,7 +669,7 @@ static void pv_wait_head_or_lock(struct
> */
> WRITE_ONCE(lock->locked, _Q_LOCKED_VAL);
> WRITE_ONCE(*lp, NULL);
> - return; /* got lock */
> + goto out; /* got lock */
> }
> }
> WRITE_ONCE(node->state, vcpu_hashed);
> @@ -683,12 +683,22 @@ static void pv_wait_head_or_lock(struct
> */
> }
>
> +out:
> /*
> * The cmpxchg() or xchg() call before coming here provides the
> * acquire semantics for locking.
> */
> + return atomic_read(&lock->val);
> }
>
> +static const struct queue_ops pv_ops = {
> + .init_node = pv_init_node,
> + .trylock = pv_hybrid_queued_unfair_trylock,
> + .wait_node = pv_wait_node,
> + .wait_head_or_lock = pv_wait_head_or_lock,
> + .kick_node = pv_kick_node,
> +};
> +
> /*
> * PV versions of the unlock fastpath and slowpath functions to be used
> * instead of queued_spin_unlock().
> @@ -756,18 +766,18 @@ __visible void __pv_queued_spin_unlock(s
> EXPORT_SYMBOL(__pv_queued_spin_unlock);
> #endif
>
> -#else /* CONFIG_PARAVIRT_SPINLOCKS */
> -static __always_inline void pv_init_node(struct qnode *node) { }
> -static __always_inline void pv_wait_node(struct qnode *node,
> - struct qnode *prev) { }
> -static __always_inline void pv_kick_node(struct qspinlock *lock,
> - struct qnode *node) { }
> -static __always_inline void pv_wait_head_or_lock(struct qspinlock *lock,
> - struct qnode *node) { }
> -static __always_inline bool pv_hybrid_queued_unfair_trylock(struct qspinlock *lock) { BUILD_BUG(); }
> #endif /* CONFIG_PARAVIRT_SPINLOCKS */
>
> -static inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, bool paravirt)
> +struct queue_ops {
> + void (*init_node)(struct qnode *node);
> + bool (*trylock)(struct qspinlock *lock);
> + void (*wait_node)(struct qnode *node, struct qnode *prev);
> + u32 (*wait_head_or_lock)(struct qspinlock *lock, struct qnode *node);
> + void (*kick_node)(struct qspinlock *lock, struct qnode *node);
> +};
> +
> +static __always_inline
> +void queued_spin_lock_mcs_queue(struct qspinlock *lock, const struct queue_ops *ops)
> {
> struct qnode *prev, *next, *node;
> u32 val, old, tail;
> @@ -813,16 +823,16 @@ static inline void queued_spin_lock_mcs_
>
> node->locked = 0;
> node->next = NULL;
> - if (paravirt)
> - pv_init_node(node);
> + if (ops && ops->init_node)
> + ops->init_node(node);
>
> /*
> * We touched a (possibly) cold cacheline in the per-cpu queue node;
> * attempt the trylock once more in the hope someone let go while we
> * weren't watching.
> */
> - if (paravirt) {
> - if (pv_hybrid_queued_unfair_trylock(lock))
> + if (ops && ops->trylock) {
> + if (ops->trylock(lock))
> goto release;
> } else {
> if (queued_spin_trylock(lock))
> @@ -857,8 +867,8 @@ static inline void queued_spin_lock_mcs_
> WRITE_ONCE(prev->next, node);
>
> /* Wait for mcs node lock to be released */
> - if (paravirt)
> - pv_wait_node(node, prev);
> + if (ops && ops->wait_node)
> + ops->wait_node(node, prev);
> else
> smp_cond_load_acquire(&node->locked, VAL);
>
> @@ -893,12 +903,11 @@ static inline void queued_spin_lock_mcs_
> * If PV isn't active, 0 will be returned instead.
> *
> */
> - if (paravirt) {
> - pv_wait_head_or_lock(lock, node);
> - val = atomic_read(&lock->val);
> + if (ops && ops->wait_head_or_lock) {
> + val = ops->wait_head_or_lock(lock, node);
> } else {
> val = atomic_cond_read_acquire(&lock->val,
> - !(VAL & _Q_LOCKED_PENDING_MASK));
> + !(VAL & _Q_LOCKED_PENDING_MASK));
> }
>
> /*
> @@ -1049,14 +1058,14 @@ void queued_spin_lock_slowpath(struct qs
> */
> queue:
> lockevent_inc(lock_slowpath);
> - queued_spin_lock_mcs_queue(lock, false);
> + queued_spin_lock_mcs_queue(lock, NULL);
> }
> EXPORT_SYMBOL(queued_spin_lock_slowpath);
>
> #ifdef CONFIG_PARAVIRT_SPINLOCKS
> void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
> {
> - queued_spin_lock_mcs_queue(lock, true);
> + queued_spin_lock_mcs_queue(lock, &pv_ops);
> }
> EXPORT_SYMBOL(__pv_queued_spin_lock_slowpath);
>
>

\
 
 \ /
  Last update: 2022-07-12 02:57    [W:0.730 / U:0.032 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site