lkml.org 
[lkml]   [2022]   [Jul]   [4]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 04/13] locking/qspinlock: move pv lock word helpers into qspinlock.c
Date
There is no real reason not to keep all the bit manipulation together.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
kernel/locking/qspinlock.c | 107 ++++++++++++++++------------
kernel/locking/qspinlock_paravirt.h | 51 -------------
2 files changed, 63 insertions(+), 95 deletions(-)

diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
index 7360d643de29..8f2173e22479 100644
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -141,7 +141,24 @@ struct qnode *grab_qnode(struct qnode *base, int idx)

#define _Q_LOCKED_PENDING_MASK (_Q_LOCKED_MASK | _Q_PENDING_MASK)

+/**
+ * set_pending - set the pending bit.
+ * @lock: Pointer to queued spinlock structure
+ *
+ * *,0,* -> *,1,*
+ *
+ * The pending bit is used by the queue head vCPU to indicate that it
+ * is actively spinning on the lock and no lock stealing is allowed.
+ */
+static __always_inline void set_pending(struct qspinlock *lock)
+{
#if _Q_PENDING_BITS == 8
+ WRITE_ONCE(lock->pending, 1);
+#else
+ atomic_or(_Q_PENDING_VAL, &lock->val);
+#endif
+}
+
/**
* clear_pending - clear the pending bit.
* @lock: Pointer to queued spinlock structure
@@ -150,7 +167,11 @@ struct qnode *grab_qnode(struct qnode *base, int idx)
*/
static __always_inline void clear_pending(struct qspinlock *lock)
{
+#if _Q_PENDING_BITS == 8
WRITE_ONCE(lock->pending, 0);
+#else
+ atomic_andnot(_Q_PENDING_VAL, &lock->val);
+#endif
}

/**
@@ -163,74 +184,72 @@ static __always_inline void clear_pending(struct qspinlock *lock)
*/
static __always_inline void clear_pending_set_locked(struct qspinlock *lock)
{
+#if _Q_PENDING_BITS == 8
WRITE_ONCE(lock->locked_pending, _Q_LOCKED_VAL);
+#else
+ atomic_add(-_Q_PENDING_VAL + _Q_LOCKED_VAL, &lock->val);
+#endif
}

-/*
- * xchg_tail - Put in the new queue tail code word & retrieve previous one
- * @lock : Pointer to queued spinlock structure
- * @tail : The new queue tail code word
- * Return: The previous queue tail code word
- *
- * xchg(lock, tail), which heads an address dependency
- *
- * p,*,* -> n,*,* ; prev = xchg(lock, node)
- */
-static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail)
-{
- /*
- * We can use relaxed semantics since the caller ensures that the
- * MCS node is properly initialized before updating the tail.
- */
- return (u32)xchg_relaxed(&lock->tail,
- tail >> _Q_TAIL_OFFSET) << _Q_TAIL_OFFSET;
-}
-
-#else /* _Q_PENDING_BITS == 8 */
-
/**
- * clear_pending - clear the pending bit.
+ * trylock_clear_pending - try to take ownership and clear the pending bit
* @lock: Pointer to queued spinlock structure
*
- * *,1,* -> *,0,*
+ * 0,1,0 -> 0,0,1
*/
-static __always_inline void clear_pending(struct qspinlock *lock)
+static __always_inline int trylock_clear_pending(struct qspinlock *lock)
{
- atomic_andnot(_Q_PENDING_VAL, &lock->val);
-}
+#if _Q_PENDING_BITS == 8
+ return !READ_ONCE(lock->locked) &&
+ (cmpxchg_acquire(&lock->locked_pending, _Q_PENDING_VAL,
+ _Q_LOCKED_VAL) == _Q_PENDING_VAL);
+#else
+ int val = atomic_read(&lock->val);

-/**
- * clear_pending_set_locked - take ownership and clear the pending bit.
- * @lock: Pointer to queued spinlock structure
- *
- * *,1,0 -> *,0,1
- */
-static __always_inline void clear_pending_set_locked(struct qspinlock *lock)
-{
- atomic_add(-_Q_PENDING_VAL + _Q_LOCKED_VAL, &lock->val);
+ for (;;) {
+ int old, new;
+
+ if (val & _Q_LOCKED_MASK)
+ break;
+
+ /*
+ * Try to clear pending bit & set locked bit
+ */
+ old = val;
+ new = (val & ~_Q_PENDING_MASK) | _Q_LOCKED_VAL;
+ val = atomic_cmpxchg_acquire(&lock->val, old, new);
+
+ if (val == old)
+ return 1;
+ }
+ return 0;
+#endif
}

-/**
+/*
* xchg_tail - Put in the new queue tail code word & retrieve previous one
* @lock : Pointer to queued spinlock structure
* @tail : The new queue tail code word
* Return: The previous queue tail code word
*
- * xchg(lock, tail)
+ * xchg(lock, tail), which heads an address dependency
*
* p,*,* -> n,*,* ; prev = xchg(lock, node)
*/
static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail)
{
+ /*
+ * We can use relaxed semantics since the caller ensures that the
+ * MCS node is properly initialized before updating the tail.
+ */
+#if _Q_PENDING_BITS == 8
+ return (u32)xchg_relaxed(&lock->tail,
+ tail >> _Q_TAIL_OFFSET) << _Q_TAIL_OFFSET;
+#else
u32 old, new, val = atomic_read(&lock->val);

for (;;) {
new = (val & _Q_LOCKED_PENDING_MASK) | tail;
- /*
- * We can use relaxed semantics since the caller ensures that
- * the MCS node is properly initialized before updating the
- * tail.
- */
old = atomic_cmpxchg_relaxed(&lock->val, val, new);
if (old == val)
break;
@@ -238,8 +257,8 @@ static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail)
val = old;
}
return old;
+#endif
}
-#endif /* _Q_PENDING_BITS == 8 */

/**
* queued_fetch_set_pending_acquire - fetch the whole lock value and set pending
diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h
index cce3d3dde216..97385861adc2 100644
--- a/kernel/locking/qspinlock_paravirt.h
+++ b/kernel/locking/qspinlock_paravirt.h
@@ -95,57 +95,6 @@ static inline bool pv_hybrid_queued_unfair_trylock(struct qspinlock *lock)
return false;
}

-/*
- * The pending bit is used by the queue head vCPU to indicate that it
- * is actively spinning on the lock and no lock stealing is allowed.
- */
-#if _Q_PENDING_BITS == 8
-static __always_inline void set_pending(struct qspinlock *lock)
-{
- WRITE_ONCE(lock->pending, 1);
-}
-
-/*
- * The pending bit check in pv_queued_spin_steal_lock() isn't a memory
- * barrier. Therefore, an atomic cmpxchg_acquire() is used to acquire the
- * lock just to be sure that it will get it.
- */
-static __always_inline int trylock_clear_pending(struct qspinlock *lock)
-{
- return !READ_ONCE(lock->locked) &&
- (cmpxchg_acquire(&lock->locked_pending, _Q_PENDING_VAL,
- _Q_LOCKED_VAL) == _Q_PENDING_VAL);
-}
-#else /* _Q_PENDING_BITS == 8 */
-static __always_inline void set_pending(struct qspinlock *lock)
-{
- atomic_or(_Q_PENDING_VAL, &lock->val);
-}
-
-static __always_inline int trylock_clear_pending(struct qspinlock *lock)
-{
- int val = atomic_read(&lock->val);
-
- for (;;) {
- int old, new;
-
- if (val & _Q_LOCKED_MASK)
- break;
-
- /*
- * Try to clear pending bit & set locked bit
- */
- old = val;
- new = (val & ~_Q_PENDING_MASK) | _Q_LOCKED_VAL;
- val = atomic_cmpxchg_acquire(&lock->val, old, new);
-
- if (val == old)
- return 1;
- }
- return 0;
-}
-#endif /* _Q_PENDING_BITS == 8 */
-
/*
* Lock and MCS node addresses hash table for fast lookup
*
--
2.35.1
\
 
 \ /
  Last update: 2022-07-04 16:39    [W:0.316 / U:0.548 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site