lkml.org 
[lkml]   [2014]   [Jan]   [24]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    /
    Date
    From
    SubjectRe: [PATCH v11 1/4] qrwlock: A queue read/write lock implementation
    On Thu, Jan 23, 2014 at 11:28:48PM -0500, Waiman Long wrote:
    > +/**
    > + * queue_read_trylock - try to acquire read lock of a queue rwlock
    > + * @lock : Pointer to queue rwlock structure
    > + * Return: 1 if lock acquired, 0 if failed
    > + */
    > +static inline int queue_read_trylock(struct qrwlock *lock)
    > +{
    > + union qrwcnts cnts;
    > +
    > + cnts.rwc = ACCESS_ONCE(lock->cnts.rwc);
    > + if (likely(!cnts.writer)) {
    > + cnts.rwc = (u32)atomic_add_return(_QR_BIAS, &lock->cnts.rwa);
    > + if (likely(!cnts.writer)) {
    > + smp_mb__after_atomic_inc();

    That's superfluous, as atomic_add_return() is documented as being a full
    barrier.

    > + return 1;
    > + }
    > + atomic_sub(_QR_BIAS, &lock->cnts.rwa);
    > + }
    > + return 0;
    > +}
    > +
    > +/**
    > + * queue_write_trylock - try to acquire write lock of a queue rwlock
    > + * @lock : Pointer to queue rwlock structure
    > + * Return: 1 if lock acquired, 0 if failed
    > + */
    > +static inline int queue_write_trylock(struct qrwlock *lock)
    > +{
    > + union qrwcnts old, new;
    > +
    > + old.rwc = ACCESS_ONCE(lock->cnts.rwc);
    > + if (likely(!old.rwc)) {
    > + new.rwc = old.rwc;
    > + new.writer = _QW_LOCKED;
    > + if (likely(cmpxchg(&lock->cnts.rwc, old.rwc, new.rwc)
    > + == old.rwc))

    One could actually use atomic_cmpxchg() and avoid one (ab)use of that
    union :-)

    > + return 1;
    > + }
    > + return 0;
    > +}
    > +/**
    > + * queue_read_lock - acquire read lock of a queue rwlock
    > + * @lock: Pointer to queue rwlock structure
    > + */
    > +static inline void queue_read_lock(struct qrwlock *lock)
    > +{
    > + union qrwcnts cnts;
    > +
    > + cnts.rwc = atomic_add_return(_QR_BIAS, &lock->cnts.rwa);
    > + if (likely(!cnts.writer)) {
    > + smp_mb__after_atomic_inc();

    Superfluous again.

    > + return;
    > + }
    > + /*
    > + * Slowpath will decrement the reader count, if necessary
    > + */
    > + queue_read_lock_slowpath(lock);
    > +}
    > +
    > +/**
    > + * queue_write_lock - acquire write lock of a queue rwlock
    > + * @lock : Pointer to queue rwlock structure
    > + */
    > +static inline void queue_write_lock(struct qrwlock *lock)
    > +{
    > + /*
    > + * Optimize for the unfair lock case where the fair flag is 0.
    > + */
    > + if (cmpxchg(&lock->cnts.rwc, 0, _QW_LOCKED) == 0)

    Could equally be atomic_cmpxchg() again.

    > + return;
    > + queue_write_lock_slowpath(lock);
    > +}
    > +
    > +/**
    > + * queue_read_unlock - release read lock of a queue rwlock
    > + * @lock : Pointer to queue rwlock structure
    > + */
    > +static inline void queue_read_unlock(struct qrwlock *lock)
    > +{
    > + /*
    > + * Atomically decrement the reader count
    > + */
    > + smp_mb__before_atomic_dec();
    > + atomic_sub(_QR_BIAS, &lock->cnts.rwa);
    > +}
    > +
    > +/**
    > + * queue_write_unlock - release write lock of a queue rwlock
    > + * @lock : Pointer to queue rwlock structure
    > + */
    > +static inline void queue_write_unlock(struct qrwlock *lock)
    > +{
    > + /*
    > + * If the writer field is atomic, it can be cleared directly.
    > + * Otherwise, an atomic subtraction will be used to clear it.
    > + */
    > + if (__native_word(lock->cnts.writer))
    > + smp_store_release(&lock->cnts.writer, 0);
    > + else {
    > + smp_mb__before_atomic_dec();
    > + atomic_sub(_QW_LOCKED, &lock->cnts.rwa);
    > + }

    Missing {}, Documentation/CodingStyle Chapter 3 near the very end.

    > +}


    \
     
     \ /
      Last update: 2014-01-24 09:41    [W:8.784 / U:0.192 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site