Messages in this thread Patch in this message | | | From | Waiman Long <> | Subject | [PATCH v2 1/8] locking/rwsem: Minor code refactoring in rwsem_mark_wake() | Date | Mon, 27 Mar 2023 16:24:06 -0400 |
| |
Rename "oldcount" to "count" as it is not always old count value. Also make some minor code refactoring to reduce indentation. There is no functional change.
Signed-off-by: Waiman Long <longman@redhat.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20230216210933.1169097-2-longman@redhat.com --- kernel/locking/rwsem.c | 44 +++++++++++++++++++++--------------------- 1 file changed, 22 insertions(+), 22 deletions(-)
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c index acb5a50309a1..e589f69793df 100644 --- a/kernel/locking/rwsem.c +++ b/kernel/locking/rwsem.c @@ -40,7 +40,7 @@ * * When the rwsem is reader-owned and a spinning writer has timed out, * the nonspinnable bit will be set to disable optimistic spinning. - + * * When a writer acquires a rwsem, it puts its task_struct pointer * into the owner field. It is cleared after an unlock. * @@ -413,7 +413,7 @@ static void rwsem_mark_wake(struct rw_semaphore *sem, struct wake_q_head *wake_q) { struct rwsem_waiter *waiter, *tmp; - long oldcount, woken = 0, adjustment = 0; + long count, woken = 0, adjustment = 0; struct list_head wlist; lockdep_assert_held(&sem->wait_lock); @@ -424,22 +424,23 @@ static void rwsem_mark_wake(struct rw_semaphore *sem, */ waiter = rwsem_first_waiter(sem); - if (waiter->type == RWSEM_WAITING_FOR_WRITE) { - if (wake_type == RWSEM_WAKE_ANY) { - /* - * Mark writer at the front of the queue for wakeup. - * Until the task is actually later awoken later by - * the caller, other writers are able to steal it. - * Readers, on the other hand, will block as they - * will notice the queued writer. - */ - wake_q_add(wake_q, waiter->task); - lockevent_inc(rwsem_wake_writer); - } + if (waiter->type != RWSEM_WAITING_FOR_WRITE) + goto wake_readers; - return; + if (wake_type == RWSEM_WAKE_ANY) { + /* + * Mark writer at the front of the queue for wakeup. + * Until the task is actually later awoken later by + * the caller, other writers are able to steal it. + * Readers, on the other hand, will block as they + * will notice the queued writer. + */ + wake_q_add(wake_q, waiter->task); + lockevent_inc(rwsem_wake_writer); } + return; +wake_readers: /* * No reader wakeup if there are too many of them already. */ @@ -455,15 +456,15 @@ static void rwsem_mark_wake(struct rw_semaphore *sem, struct task_struct *owner; adjustment = RWSEM_READER_BIAS; - oldcount = atomic_long_fetch_add(adjustment, &sem->count); - if (unlikely(oldcount & RWSEM_WRITER_MASK)) { + count = atomic_long_fetch_add(adjustment, &sem->count); + if (unlikely(count & RWSEM_WRITER_MASK)) { /* * When we've been waiting "too" long (for writers * to give up the lock), request a HANDOFF to * force the issue. */ if (time_after(jiffies, waiter->timeout)) { - if (!(oldcount & RWSEM_FLAG_HANDOFF)) { + if (!(count & RWSEM_FLAG_HANDOFF)) { adjustment -= RWSEM_FLAG_HANDOFF; lockevent_inc(rwsem_rlock_handoff); } @@ -524,21 +525,21 @@ static void rwsem_mark_wake(struct rw_semaphore *sem, adjustment = woken * RWSEM_READER_BIAS - adjustment; lockevent_cond_inc(rwsem_wake_reader, woken); - oldcount = atomic_long_read(&sem->count); + count = atomic_long_read(&sem->count); if (list_empty(&sem->wait_list)) { /* * Combined with list_move_tail() above, this implies * rwsem_del_waiter(). */ adjustment -= RWSEM_FLAG_WAITERS; - if (oldcount & RWSEM_FLAG_HANDOFF) + if (count & RWSEM_FLAG_HANDOFF) adjustment -= RWSEM_FLAG_HANDOFF; } else if (woken) { /* * When we've woken a reader, we no longer need to force * writers to give up the lock and we can clear HANDOFF. */ - if (oldcount & RWSEM_FLAG_HANDOFF) + if (count & RWSEM_FLAG_HANDOFF) adjustment -= RWSEM_FLAG_HANDOFF; } @@ -844,7 +845,6 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem) * Try to acquire the lock */ taken = rwsem_try_write_lock_unqueued(sem); - if (taken) break; -- 2.31.1
| |