lkml.org 
[lkml]   [2019]   [Jul]   [15]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH v3 2/5] locking/qspinlock: Refactor the qspinlock slow path
Date
Move some of the code manipulating the spin lock into separate functions.
This would allow easier integration of alternative ways to manipulate
that lock.

Signed-off-by: Alex Kogan <alex.kogan@oracle.com>
Reviewed-by: Steve Sistare <steven.sistare@oracle.com>
---
kernel/locking/qspinlock.c | 40 ++++++++++++++++++++++++++++++++++++++--
1 file changed, 38 insertions(+), 2 deletions(-)

diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
index 961781624638..5668466b3006 100644
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -297,6 +297,36 @@ static __always_inline u32 __pv_wait_head_or_lock(struct qspinlock *lock,
#define queued_spin_lock_slowpath native_queued_spin_lock_slowpath
#endif

+/*
+ * set_locked_empty_mcs - Try to set the spinlock value to _Q_LOCKED_VAL,
+ * and by doing that unlock the MCS lock when its waiting queue is empty
+ * @lock: Pointer to queued spinlock structure
+ * @val: Current value of the lock
+ * @node: Pointer to the MCS node of the lock holder
+ *
+ * *,*,* -> 0,0,1
+ */
+static __always_inline bool __set_locked_empty_mcs(struct qspinlock *lock,
+ u32 val,
+ struct mcs_spinlock *node)
+{
+ return atomic_try_cmpxchg_relaxed(&lock->val, &val, _Q_LOCKED_VAL);
+}
+
+/*
+ * pass_mcs_lock - pass the MCS lock to the next waiter
+ * @node: Pointer to the MCS node of the lock holder
+ * @next: Pointer to the MCS node of the first waiter in the MCS queue
+ */
+static __always_inline void __pass_mcs_lock(struct mcs_spinlock *node,
+ struct mcs_spinlock *next)
+{
+ arch_mcs_spin_unlock_contended(&next->locked, 1);
+}
+
+#define set_locked_empty_mcs __set_locked_empty_mcs
+#define pass_mcs_lock __pass_mcs_lock
+
#endif /* _GEN_PV_LOCK_SLOWPATH */

/**
@@ -541,7 +571,7 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
* PENDING will make the uncontended transition fail.
*/
if ((val & _Q_TAIL_MASK) == tail) {
- if (atomic_try_cmpxchg_relaxed(&lock->val, &val, _Q_LOCKED_VAL))
+ if (set_locked_empty_mcs(lock, val, node))
goto release; /* No contention */
}

@@ -558,7 +588,7 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
if (!next)
next = smp_cond_load_relaxed(&node->next, (VAL));

- arch_mcs_spin_unlock_contended(&next->locked, 1);
+ pass_mcs_lock(node, next);
pv_kick_node(lock, next);

release:
@@ -583,6 +613,12 @@ EXPORT_SYMBOL(queued_spin_lock_slowpath);
#undef pv_kick_node
#undef pv_wait_head_or_lock

+#undef set_locked_empty_mcs
+#define set_locked_empty_mcs __set_locked_empty_mcs
+
+#undef pass_mcs_lock
+#define pass_mcs_lock __pass_mcs_lock
+
#undef queued_spin_lock_slowpath
#define queued_spin_lock_slowpath __pv_queued_spin_lock_slowpath

--
2.11.0 (Apple Git-81)
\
 
 \ /
  Last update: 2019-07-15 21:28    [W:0.262 / U:0.768 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site