lkml.org 
[lkml]   [2014]   [Jan]   [14]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v9 4/5] qrwlock: Use smp_store_release() in write_unlock()
    Date
    This patch modifies the queue_write_unlock() function to use the new
    smp_store_release() function (currently in tip). It also removes the
    temporary implementation of smp_load_acquire() and smp_store_release()
    function in qrwlock.c.

    This patch will use atomic subtraction instead if the writer field is
    not atomic.

    Signed-off-by: Waiman Long <Waiman.Long@hp.com>
    ---
    include/asm-generic/qrwlock.h | 10 ++++++----
    kernel/locking/qrwlock.c | 34 ----------------------------------
    2 files changed, 6 insertions(+), 38 deletions(-)

    diff --git a/include/asm-generic/qrwlock.h b/include/asm-generic/qrwlock.h
    index 5abb6ca..68f488b 100644
    --- a/include/asm-generic/qrwlock.h
    +++ b/include/asm-generic/qrwlock.h
    @@ -181,11 +181,13 @@ static inline void queue_read_unlock(struct qrwlock *lock)
    static inline void queue_write_unlock(struct qrwlock *lock)
    {
    /*
    - * Make sure that none of the critical section will be leaked out.
    + * If the writer field is atomic, it can be cleared directly.
    + * Otherwise, an atomic subtraction will be used to clear it.
    */
    - smp_mb__before_clear_bit();
    - ACCESS_ONCE(lock->cnts.writer) = 0;
    - smp_mb__after_clear_bit();
    + if (__native_word(lock->cnts.writer))
    + smp_store_release(&lock->cnts.writer, 0);
    + else
    + atomic_sub(_QW_LOCKED, &lock->cnts.rwa);
    }

    /*
    diff --git a/kernel/locking/qrwlock.c b/kernel/locking/qrwlock.c
    index 053be4d..2727188 100644
    --- a/kernel/locking/qrwlock.c
    +++ b/kernel/locking/qrwlock.c
    @@ -47,40 +47,6 @@
    # define arch_mutex_cpu_relax() cpu_relax()
    #endif

    -#ifndef smp_load_acquire
    -# ifdef CONFIG_X86
    -# define smp_load_acquire(p) \
    - ({ \
    - typeof(*p) ___p1 = ACCESS_ONCE(*p); \
    - barrier(); \
    - ___p1; \
    - })
    -# else
    -# define smp_load_acquire(p) \
    - ({ \
    - typeof(*p) ___p1 = ACCESS_ONCE(*p); \
    - smp_mb(); \
    - ___p1; \
    - })
    -# endif
    -#endif
    -
    -#ifndef smp_store_release
    -# ifdef CONFIG_X86
    -# define smp_store_release(p, v) \
    - do { \
    - barrier(); \
    - ACCESS_ONCE(*p) = v; \
    - } while (0)
    -# else
    -# define smp_store_release(p, v) \
    - do { \
    - smp_mb(); \
    - ACCESS_ONCE(*p) = v; \
    - } while (0)
    -# endif
    -#endif
    -
    /*
    * If an xadd (exchange-add) macro isn't available, simulate one with
    * the atomic_add_return() function.
    --
    1.7.1


    \
     
     \ /
      Last update: 2014-01-15 06:21    [W:5.655 / U:0.192 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site