lkml.org 
[lkml]   [2022]   [Jul]   [5]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    /
    Date
    SubjectRe: [PATCH 05/13] locking/qspinlock: be less clever with the preprocessor
    From
    On 7/4/22 10:38, Nicholas Piggin wrote:
    > Stop qspinlock.c including itself and avoid most of the function
    > renaming with the preprocessor.
    >
    > This is mostly done by having the common slowpath code take a 'bool
    > paravirt' argument and adjusting code based on that.
    >
    > Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
    > ---
    > kernel/locking/qspinlock.c | 116 ++++++++++++----------------
    > kernel/locking/qspinlock_paravirt.h | 10 +--
    > 2 files changed, 52 insertions(+), 74 deletions(-)
    >
    > diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
    > index 8f2173e22479..b96c58ca51de 100644
    > --- a/kernel/locking/qspinlock.c
    > +++ b/kernel/locking/qspinlock.c
    > @@ -11,8 +11,6 @@
    > * Peter Zijlstra <peterz@infradead.org>
    > */
    >
    > -#ifndef _GEN_PV_LOCK_SLOWPATH
    > -
    > #include <linux/smp.h>
    > #include <linux/bug.h>
    > #include <linux/cpumask.h>
    > @@ -285,35 +283,21 @@ static __always_inline void set_locked(struct qspinlock *lock)
    > WRITE_ONCE(lock->locked, _Q_LOCKED_VAL);
    > }
    >
    > -
    > -/*
    > - * Generate the native code for queued_spin_unlock_slowpath(); provide NOPs for
    > - * all the PV callbacks.
    > - */
    > -
    > -static __always_inline void __pv_init_node(struct qnode *node) { }
    > -static __always_inline void __pv_wait_node(struct qnode *node,
    > - struct qnode *prev) { }
    > -static __always_inline void __pv_kick_node(struct qspinlock *lock,
    > - struct qnode *node) { }
    > -static __always_inline u32 __pv_wait_head_or_lock(struct qspinlock *lock,
    > - struct qnode *node)
    > - { return 0; }
    > -
    > -#define pv_enabled() false
    > -
    > -#define pv_init_node __pv_init_node
    > -#define pv_wait_node __pv_wait_node
    > -#define pv_kick_node __pv_kick_node
    > -#define pv_wait_head_or_lock __pv_wait_head_or_lock
    > -
    > #ifdef CONFIG_PARAVIRT_SPINLOCKS
    > -#define queued_spin_lock_slowpath native_queued_spin_lock_slowpath
    > -#endif
    > -
    > -#endif /* _GEN_PV_LOCK_SLOWPATH */
    > +#include "qspinlock_paravirt.h"
    > +#else /* CONFIG_PARAVIRT_SPINLOCKS */
    > +static __always_inline void pv_init_node(struct qnode *node) { }
    > +static __always_inline void pv_wait_node(struct qnode *node,
    > + struct qnode *prev) { }
    > +static __always_inline void pv_kick_node(struct qspinlock *lock,
    > + struct qnode *node) { }
    > +static __always_inline u32 pv_wait_head_or_lock(struct qspinlock *lock,
    > + struct qnode *node)
    > + { return 0; }
    > +static __always_inline bool pv_hybrid_queued_unfair_trylock(struct qspinlock *lock) { BUILD_BUG(); }
    > +#endif /* CONFIG_PARAVIRT_SPINLOCKS */
    >
    > -static inline void queued_spin_lock_mcs_queue(struct qspinlock *lock)
    > +static inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, bool paravirt)

    Using "const bool paravirt" may help the compiler generating better code
    by eliminating dead one, if it is not doing that already.

    > {
    > struct qnode *prev, *next, *node;
    > u32 val, old, tail;
    > @@ -338,8 +322,13 @@ static inline void queued_spin_lock_mcs_queue(struct qspinlock *lock)
    > */
    > if (unlikely(idx >= MAX_NODES)) {
    > lockevent_inc(lock_no_node);
    > - while (!queued_spin_trylock(lock))
    > - cpu_relax();
    > + if (paravirt) {
    > + while (!pv_hybrid_queued_unfair_trylock(lock))
    > + cpu_relax();
    > + } else {
    > + while (!queued_spin_trylock(lock))
    > + cpu_relax();
    > + }

    The code will look a bit better if you add the following helper function
    and use it instead.

    static inline bool queued_spin_trylock_common(struct qspinlock *lock,
    const bool paravirt)
    {
            if (paravirt)
                    return pv_hybrid_queued_unfair_trylock(lock);
            else
                    return queued_spin_trylock(lock);
    }

    Cheers,
    Longman

    \
     
     \ /
      Last update: 2022-07-05 22:03    [W:4.179 / U:0.416 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site