lkml.org 
[lkml]   [2020]   [Apr]   [3]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v10 2/5] locking/qspinlock: Refactor the qspinlock slow path
    Date
    Move some of the code manipulating the spin lock into separate functions.
    This would allow easier integration of alternative ways to manipulate
    that lock.

    Signed-off-by: Alex Kogan <alex.kogan@oracle.com>
    Reviewed-by: Steve Sistare <steven.sistare@oracle.com>
    Reviewed-by: Waiman Long <longman@redhat.com>
    ---
    kernel/locking/qspinlock.c | 38 ++++++++++++++++++++++++++++++++++++--
    1 file changed, 36 insertions(+), 2 deletions(-)

    diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
    index ac1dedbe0237..6e63c72e3fbd 100644
    --- a/kernel/locking/qspinlock.c
    +++ b/kernel/locking/qspinlock.c
    @@ -289,6 +289,34 @@ static __always_inline u32 __pv_wait_head_or_lock(struct qspinlock *lock,
    #define queued_spin_lock_slowpath native_queued_spin_lock_slowpath
    #endif

    +/*
    + * __try_clear_tail - try to clear tail by setting the lock value to
    + * _Q_LOCKED_VAL.
    + * @lock: Pointer to the queued spinlock structure
    + * @val: Current value of the lock
    + * @node: Pointer to the MCS node of the lock holder
    + */
    +static __always_inline bool __try_clear_tail(struct qspinlock *lock,
    + u32 val,
    + struct mcs_spinlock *node)
    +{
    + return atomic_try_cmpxchg_relaxed(&lock->val, &val, _Q_LOCKED_VAL);
    +}
    +
    +/*
    + * __mcs_lock_handoff - pass the MCS lock to the next waiter
    + * @node: Pointer to the MCS node of the lock holder
    + * @next: Pointer to the MCS node of the first waiter in the MCS queue
    + */
    +static __always_inline void __mcs_lock_handoff(struct mcs_spinlock *node,
    + struct mcs_spinlock *next)
    +{
    + arch_mcs_lock_handoff(&next->locked, 1);
    +}
    +
    +#define try_clear_tail __try_clear_tail
    +#define mcs_lock_handoff __mcs_lock_handoff
    +
    #endif /* _GEN_PV_LOCK_SLOWPATH */

    /**
    @@ -533,7 +561,7 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
    * PENDING will make the uncontended transition fail.
    */
    if ((val & _Q_TAIL_MASK) == tail) {
    - if (atomic_try_cmpxchg_relaxed(&lock->val, &val, _Q_LOCKED_VAL))
    + if (try_clear_tail(lock, val, node))
    goto release; /* No contention */
    }

    @@ -550,7 +578,7 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
    if (!next)
    next = smp_cond_load_relaxed(&node->next, (VAL));

    - arch_mcs_lock_handoff(&next->locked, 1);
    + mcs_lock_handoff(node, next);
    pv_kick_node(lock, next);

    release:
    @@ -575,6 +603,12 @@ EXPORT_SYMBOL(queued_spin_lock_slowpath);
    #undef pv_kick_node
    #undef pv_wait_head_or_lock

    +#undef try_clear_tail
    +#define try_clear_tail __try_clear_tail
    +
    +#undef mcs_lock_handoff
    +#define mcs_lock_handoff __mcs_lock_handoff
    +
    #undef queued_spin_lock_slowpath
    #define queued_spin_lock_slowpath __pv_queued_spin_lock_slowpath

    --
    2.21.1 (Apple Git-122.3)
    \
     
     \ /
      Last update: 2020-04-03 23:07    [W:4.082 / U:0.316 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site