lkml.org 
[lkml]   [2022]   [Nov]   [17]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v6 3/6] locking/rwsem: Disable preemption at all down_write*() and up_write() code paths
    Date
    The previous patch has disabled preemption at all the down_read()
    and up_read() code paths. For symmetry, this patch extends commit
    48dfb5d2560d ("locking/rwsem: Disable preemption while trying for rwsem
    lock") to have preemption disabled at all the down_write() and up_write()
    code path including downgrade_write().

    Suggested-by: Peter Zijlstra <peterz@infradead.org>
    Signed-off-by: Waiman Long <longman@redhat.com>
    ---
    kernel/locking/rwsem.c | 38 +++++++++++++++++++-------------------
    1 file changed, 19 insertions(+), 19 deletions(-)

    diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
    index ebaff8a87e1d..8159a69b5de8 100644
    --- a/kernel/locking/rwsem.c
    +++ b/kernel/locking/rwsem.c
    @@ -256,16 +256,13 @@ static inline bool rwsem_read_trylock(struct rw_semaphore *sem, long *cntp)
    static inline bool rwsem_write_trylock(struct rw_semaphore *sem)
    {
    long tmp = RWSEM_UNLOCKED_VALUE;
    - bool ret = false;

    - preempt_disable();
    if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp, RWSEM_WRITER_LOCKED)) {
    rwsem_set_owner(sem);
    - ret = true;
    + return true;
    }

    - preempt_enable();
    - return ret;
    + return false;
    }

    /*
    @@ -716,7 +713,6 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
    return false;
    }

    - preempt_disable();
    /*
    * Disable preemption is equal to the RCU read-side crital section,
    * thus the task_strcut structure won't go away.
    @@ -728,7 +724,6 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
    if ((flags & RWSEM_NONSPINNABLE) ||
    (owner && !(flags & RWSEM_READER_OWNED) && !owner_on_cpu(owner)))
    ret = false;
    - preempt_enable();

    lockevent_cond_inc(rwsem_opt_fail, !ret);
    return ret;
    @@ -828,8 +823,6 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
    int loop = 0;
    u64 rspin_threshold = 0;

    - preempt_disable();
    -
    /* sem->wait_lock should not be held when doing optimistic spinning */
    if (!osq_lock(&sem->osq))
    goto done;
    @@ -937,7 +930,6 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
    }
    osq_unlock(&sem->osq);
    done:
    - preempt_enable();
    lockevent_cond_inc(rwsem_opt_fail, !taken);
    return taken;
    }
    @@ -1178,15 +1170,12 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
    if (waiter.handoff_set) {
    enum owner_state owner_state;

    - preempt_disable();
    owner_state = rwsem_spin_on_owner(sem);
    - preempt_enable();
    -
    if (owner_state == OWNER_NULL)
    goto trylock_again;
    }

    - schedule();
    + schedule_preempt_disabled();
    lockevent_inc(rwsem_sleep_writer);
    set_current_state(state);
    trylock_again:
    @@ -1310,12 +1299,15 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
    */
    static inline int __down_write_common(struct rw_semaphore *sem, int state)
    {
    + int ret = 0;
    +
    + preempt_disable();
    if (unlikely(!rwsem_write_trylock(sem))) {
    if (IS_ERR(rwsem_down_write_slowpath(sem, state)))
    - return -EINTR;
    + ret = -EINTR;
    }
    -
    - return 0;
    + preempt_enable();
    + return ret;
    }

    static inline void __down_write(struct rw_semaphore *sem)
    @@ -1330,8 +1322,14 @@ static inline int __down_write_killable(struct rw_semaphore *sem)

    static inline int __down_write_trylock(struct rw_semaphore *sem)
    {
    + int ret;
    +
    + preempt_disable();
    DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
    - return rwsem_write_trylock(sem);
    + ret = rwsem_write_trylock(sem);
    + preempt_enable();
    +
    + return ret;
    }

    /*
    @@ -1374,9 +1372,9 @@ static inline void __up_write(struct rw_semaphore *sem)
    preempt_disable();
    rwsem_clear_owner(sem);
    tmp = atomic_long_fetch_add_release(-RWSEM_WRITER_LOCKED, &sem->count);
    - preempt_enable();
    if (unlikely(tmp & RWSEM_FLAG_WAITERS))
    rwsem_wake(sem);
    + preempt_enable();
    }

    /*
    @@ -1394,11 +1392,13 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
    * write side. As such, rely on RELEASE semantics.
    */
    DEBUG_RWSEMS_WARN_ON(rwsem_owner(sem) != current, sem);
    + preempt_disable();
    tmp = atomic_long_fetch_add_release(
    -RWSEM_WRITER_LOCKED+RWSEM_READER_BIAS, &sem->count);
    rwsem_set_reader_owned(sem);
    if (tmp & RWSEM_FLAG_WAITERS)
    rwsem_downgrade_wake(sem);
    + preempt_enable();
    }

    #else /* !CONFIG_PREEMPT_RT */
    --
    2.31.1
    \
     
     \ /
      Last update: 2022-11-18 03:22    [W:2.217 / U:0.016 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site