lkml.org 
[lkml]   [2021]   [May]   [12]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
Subject[tip: sched/core] sched: Provide raw_spin_rq_*lock*() helpers
The following commit has been merged into the sched/core branch of tip:

Commit-ID: 39d371b7c0c299d489041884d005aacc4bba8c15
Gitweb: https://git.kernel.org/tip/39d371b7c0c299d489041884d005aacc4bba8c15
Author: Peter Zijlstra <peterz@infradead.org>
AuthorDate: Tue, 02 Mar 2021 12:13:13 +01:00
Committer: Peter Zijlstra <peterz@infradead.org>
CommitterDate: Wed, 12 May 2021 11:43:26 +02:00

sched: Provide raw_spin_rq_*lock*() helpers

In prepration for playing games with rq->lock, add some rq_lock
wrappers.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Tested-by: Don Hiatt <dhiatt@digitalocean.com>
Tested-by: Hongyu Ning <hongyu.ning@linux.intel.com>
Tested-by: Vincent Guittot <vincent.guittot@linaro.org>
Link: https://lkml.kernel.org/r/20210422123308.075967879@infradead.org
---
kernel/sched/core.c | 15 +++++++++++++-
kernel/sched/sched.h | 50 +++++++++++++++++++++++++++++++++++++++++++-
2 files changed, 65 insertions(+)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 660120d..5568018 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -184,6 +184,21 @@ int sysctl_sched_rt_runtime = 950000;
*
*/

+void raw_spin_rq_lock_nested(struct rq *rq, int subclass)
+{
+ raw_spin_lock_nested(rq_lockp(rq), subclass);
+}
+
+bool raw_spin_rq_trylock(struct rq *rq)
+{
+ return raw_spin_trylock(rq_lockp(rq));
+}
+
+void raw_spin_rq_unlock(struct rq *rq)
+{
+ raw_spin_unlock(rq_lockp(rq));
+}
+
/*
* __task_rq_lock - lock the rq @p resides on.
*/
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index a189bec..f654587 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1113,6 +1113,56 @@ static inline bool is_migration_disabled(struct task_struct *p)
#endif
}

+static inline raw_spinlock_t *rq_lockp(struct rq *rq)
+{
+ return &rq->lock;
+}
+
+static inline void lockdep_assert_rq_held(struct rq *rq)
+{
+ lockdep_assert_held(rq_lockp(rq));
+}
+
+extern void raw_spin_rq_lock_nested(struct rq *rq, int subclass);
+extern bool raw_spin_rq_trylock(struct rq *rq);
+extern void raw_spin_rq_unlock(struct rq *rq);
+
+static inline void raw_spin_rq_lock(struct rq *rq)
+{
+ raw_spin_rq_lock_nested(rq, 0);
+}
+
+static inline void raw_spin_rq_lock_irq(struct rq *rq)
+{
+ local_irq_disable();
+ raw_spin_rq_lock(rq);
+}
+
+static inline void raw_spin_rq_unlock_irq(struct rq *rq)
+{
+ raw_spin_rq_unlock(rq);
+ local_irq_enable();
+}
+
+static inline unsigned long _raw_spin_rq_lock_irqsave(struct rq *rq)
+{
+ unsigned long flags;
+ local_irq_save(flags);
+ raw_spin_rq_lock(rq);
+ return flags;
+}
+
+static inline void raw_spin_rq_unlock_irqrestore(struct rq *rq, unsigned long flags)
+{
+ raw_spin_rq_unlock(rq);
+ local_irq_restore(flags);
+}
+
+#define raw_spin_rq_lock_irqsave(rq, flags) \
+do { \
+ flags = _raw_spin_rq_lock_irqsave(rq); \
+} while (0)
+
#ifdef CONFIG_SCHED_SMT
extern void __update_idle_core(struct rq *rq);

\
 
 \ /
  Last update: 2021-05-12 12:30    [W:0.734 / U:0.468 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site