lkml.org 
[lkml]   [2017]   [Oct]   [6]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v2 5/5] kernel/locking: Prevent slowpath writers getting held up by fastpath
    Date
    When a prospective writer takes the qrwlock locking slowpath due to the
    lock being held, it attempts to cmpxchg the wmode field from 0 to
    _QW_WAITING so that concurrent lockers also take the slowpath and queue
    on the spinlock accordingly, allowing the lockers to drain.

    Unfortunately, this isn't fair, because a fastpath writer that comes in
    after the lock is made available but before the _QW_WAITING flag is set
    can effectively jump the queue. If there is a steady stream of prospective
    writers, then the waiter will be held off indefinitely.

    This patch restores fairness by separating _QW_WAITING and _QW_LOCKED
    into two distinct fields: _QW_LOCKED continues to occupy the bottom byte
    of the lockword so that it can be cleared unconditionally when unlocking,
    but _QW_WAITING now occupies what used to be the bottom bit of the reader
    count. This then forces the slow-path for concurrent lockers.

    Cc: Peter Zijlstra <peterz@infradead.org>
    Cc: Ingo Molnar <mingo@redhat.com>
    Cc: Waiman Long <longman@redhat.com>
    Cc: Boqun Feng <boqun.feng@gmail.com>
    Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
    Signed-off-by: Will Deacon <will.deacon@arm.com>
    ---
    include/asm-generic/qrwlock.h | 10 +++++-----
    include/asm-generic/qrwlock_types.h | 8 ++++----
    kernel/locking/qrwlock.c | 20 +++++---------------
    3 files changed, 14 insertions(+), 24 deletions(-)

    diff --git a/include/asm-generic/qrwlock.h b/include/asm-generic/qrwlock.h
    index 02c0a768e6b0..63cb7d347b25 100644
    --- a/include/asm-generic/qrwlock.h
    +++ b/include/asm-generic/qrwlock.h
    @@ -40,10 +40,10 @@
    * | rd | wr |
    * +----+----+----+----+
    */
    -#define _QW_WAITING 1 /* A writer is waiting */
    -#define _QW_LOCKED 0xff /* A writer holds the lock */
    -#define _QW_WMASK 0xff /* Writer mask */
    -#define _QR_SHIFT 8 /* Reader count shift */
    +#define _QW_WAITING 0x100 /* A writer is waiting */
    +#define _QW_LOCKED 0x0ff /* A writer holds the lock */
    +#define _QW_WMASK 0x1ff /* Writer mask */
    +#define _QR_SHIFT 9 /* Reader count shift */
    #define _QR_BIAS (1U << _QR_SHIFT)

    /*
    @@ -134,7 +134,7 @@ static inline void queued_read_unlock(struct qrwlock *lock)
    */
    static inline void queued_write_unlock(struct qrwlock *lock)
    {
    - smp_store_release(&lock->wmode, 0);
    + smp_store_release(&lock->wlocked, 0);
    }

    /*
    diff --git a/include/asm-generic/qrwlock_types.h b/include/asm-generic/qrwlock_types.h
    index 507f2dc51bba..8af752acbdc0 100644
    --- a/include/asm-generic/qrwlock_types.h
    +++ b/include/asm-generic/qrwlock_types.h
    @@ -13,11 +13,11 @@ typedef struct qrwlock {
    atomic_t cnts;
    struct {
    #ifdef __LITTLE_ENDIAN
    - u8 wmode; /* Writer mode */
    - u8 rcnts[3]; /* Reader counts */
    + u8 wlocked; /* Locked for write? */
    + u8 __lstate[3];
    #else
    - u8 rcnts[3]; /* Reader counts */
    - u8 wmode; /* Writer mode */
    + u8 __lstate[3];
    + u8 wlocked; /* Locked for write? */
    #endif
    };
    };
    diff --git a/kernel/locking/qrwlock.c b/kernel/locking/qrwlock.c
    index b7ea4647c74d..e940f2c2b4f2 100644
    --- a/kernel/locking/qrwlock.c
    +++ b/kernel/locking/qrwlock.c
    @@ -40,8 +40,7 @@ void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts)
    * so spin with ACQUIRE semantics until the lock is available
    * without waiting in the queue.
    */
    - atomic_cond_read_acquire(&lock->cnts, (VAL & _QW_WMASK)
    - != _QW_LOCKED);
    + atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED));
    return;
    }
    atomic_sub(_QR_BIAS, &lock->cnts);
    @@ -57,7 +56,7 @@ void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts)
    * that accesses can't leak upwards out of our subsequent critical
    * section in the case that the lock is currently held for write.
    */
    - atomic_cond_read_acquire(&lock->cnts, (VAL & _QW_WMASK) != _QW_LOCKED);
    + atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED));

    /*
    * Signal the next one in queue to become queue head
    @@ -80,19 +79,10 @@ void queued_write_lock_slowpath(struct qrwlock *lock)
    (atomic_cmpxchg_acquire(&lock->cnts, 0, _QW_LOCKED) == 0))
    goto unlock;

    - /*
    - * Set the waiting flag to notify readers that a writer is pending,
    - * or wait for a previous writer to go away.
    - */
    - for (;;) {
    - if (!READ_ONCE(lock->wmode) &&
    - (cmpxchg_relaxed(&lock->wmode, 0, _QW_WAITING) == 0))
    - break;
    -
    - cpu_relax();
    - }
    + /* Set the waiting flag to notify readers that a writer is pending */
    + atomic_add(_QW_WAITING, &lock->cnts);

    - /* When no more readers, set the locked flag */
    + /* When no more readers or writers, set the locked flag */
    do {
    atomic_cond_read_acquire(&lock->cnts, VAL == _QW_WAITING);
    } while (atomic_cmpxchg_relaxed(&lock->cnts, _QW_WAITING,
    --
    2.1.4
    \
     
     \ /
      Last update: 2017-10-06 15:35    [W:4.404 / U:0.092 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site