lkml.org 
[lkml]   [2021]   [Jul]   [30]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[patch 29/63] locking/rtmutex: Provide the spin/rwlock core lock function
    From: Thomas Gleixner <tglx@linutronix.de>

    A simplified version of the rtmutex slowlock function which neither handles
    signals nor timeouts and is careful about preserving the state of the
    blocked task accross the lock operation.

    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
    ---
    kernel/locking/rtmutex.c | 60 ++++++++++++++++++++++++++++++++++++++++
    kernel/locking/rtmutex_common.h | 2 -
    2 files changed, 61 insertions(+), 1 deletion(-)
    ---
    --- a/kernel/locking/rtmutex.c
    +++ b/kernel/locking/rtmutex.c
    @@ -1410,3 +1410,63 @@ static __always_inline int __rt_mutex_lo
    return rt_mutex_slowlock(lock, state);
    }
    #endif /* RT_MUTEX_BUILD_MUTEX */
    +
    +#ifdef RT_MUTEX_BUILD_SPINLOCKS
    +/*
    + * Functions required for spin/rw_lock substitution on RT kernels
    + */
    +
    +/**
    + * rtlock_slowlock_locked - Slow path lock acquisition for RT locks
    + * @lock: The underlying rt mutex
    + */
    +static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock)
    +{
    + struct rt_mutex_waiter waiter;
    +
    + lockdep_assert_held(&lock->wait_lock);
    +
    + if (try_to_take_rt_mutex(lock, current, NULL))
    + return;
    +
    + rt_mutex_init_rtlock_waiter(&waiter);
    +
    + /* Save current state and set state to TASK_RTLOCK_WAIT */
    + current_save_and_set_rtlock_wait_state();
    +
    + task_blocks_on_rt_mutex(lock, &waiter, current, RT_MUTEX_MIN_CHAINWALK);
    +
    + for (;;) {
    + /* Try to acquire the lock again. */
    + if (try_to_take_rt_mutex(lock, current, &waiter))
    + break;
    +
    + raw_spin_unlock_irq(&lock->wait_lock);
    +
    + schedule_rtlock();
    +
    + raw_spin_lock_irq(&lock->wait_lock);
    + set_current_state(TASK_RTLOCK_WAIT);
    + }
    +
    + /* Restore the task state */
    + current_restore_rtlock_saved_state();
    +
    + /*
    + * try_to_take_rt_mutex() sets the waiter bit unconditionally. We
    + * might have to fix that up:
    + */
    + fixup_rt_mutex_waiters(lock);
    + debug_rt_mutex_free_waiter(&waiter);
    +}
    +
    +static __always_inline void __sched rtlock_slowlock(struct rt_mutex_base *lock)
    +{
    + unsigned long flags;
    +
    + raw_spin_lock_irqsave(&lock->wait_lock, flags);
    + rtlock_slowlock_locked(lock);
    + raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
    +}
    +
    +#endif /* RT_MUTEX_BUILD_SPINLOCKS */
    --- a/kernel/locking/rtmutex_common.h
    +++ b/kernel/locking/rtmutex_common.h
    @@ -181,7 +181,7 @@ static inline void rt_mutex_init_waiter(
    waiter->task = NULL;
    }

    -static inline void rtlock_init_rtmutex_waiter(struct rt_mutex_waiter *waiter)
    +static inline void rt_mutex_init_rtlock_waiter(struct rt_mutex_waiter *waiter)
    {
    rt_mutex_init_waiter(waiter);
    waiter->wake_state = TASK_RTLOCK_WAIT;
    \
     
     \ /
      Last update: 2021-07-30 16:22    [W:2.920 / U:0.008 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site