Messages in this thread Patch in this message |  | | From | "Joel Fernandes (Google)" <> | Subject | [PATCH -tip 04/32] sched: Core-wide rq->lock | Date | Tue, 17 Nov 2020 18:19:34 -0500 |
| |
From: Peter Zijlstra <peterz@infradead.org>
Introduce the basic infrastructure to have a core wide rq->lock.
Tested-by: Julien Desfossez <jdesfossez@digitalocean.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Julien Desfossez <jdesfossez@digitalocean.com> Signed-off-by: Vineeth Remanan Pillai <viremana@linux.microsoft.com> Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org> --- kernel/Kconfig.preempt | 5 ++ kernel/sched/core.c | 108 +++++++++++++++++++++++++++++++++++++++++ kernel/sched/sched.h | 31 ++++++++++++ 3 files changed, 144 insertions(+)
diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt index bf82259cff96..6d8be4630bd6 100644 --- a/kernel/Kconfig.preempt +++ b/kernel/Kconfig.preempt @@ -80,3 +80,8 @@ config PREEMPT_COUNT config PREEMPTION bool select PREEMPT_COUNT + +config SCHED_CORE + bool "Core Scheduling for SMT" + default y + depends on SCHED_SMT diff --git a/kernel/sched/core.c b/kernel/sched/core.c index db5cc05a68bc..6d88bc9a6818 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -74,6 +74,70 @@ unsigned int sysctl_sched_rt_period = 1000000; __read_mostly int scheduler_running; +#ifdef CONFIG_SCHED_CORE + +DEFINE_STATIC_KEY_FALSE(__sched_core_enabled); + +/* + * The static-key + stop-machine variable are needed such that: + * + * spin_lock(rq_lockp(rq)); + * ... + * spin_unlock(rq_lockp(rq)); + * + * ends up locking and unlocking the _same_ lock, and all CPUs + * always agree on what rq has what lock. + * + * XXX entirely possible to selectively enable cores, don't bother for now. + */ +static int __sched_core_stopper(void *data) +{ + bool enabled = !!(unsigned long)data; + int cpu; + + for_each_possible_cpu(cpu) + cpu_rq(cpu)->core_enabled = enabled; + + return 0; +} + +static DEFINE_MUTEX(sched_core_mutex); +static int sched_core_count; + +static void __sched_core_enable(void) +{ + // XXX verify there are no cookie tasks (yet) + + static_branch_enable(&__sched_core_enabled); + stop_machine(__sched_core_stopper, (void *)true, NULL); +} + +static void __sched_core_disable(void) +{ + // XXX verify there are no cookie tasks (left) + + stop_machine(__sched_core_stopper, (void *)false, NULL); + static_branch_disable(&__sched_core_enabled); +} + +void sched_core_get(void) +{ + mutex_lock(&sched_core_mutex); + if (!sched_core_count++) + __sched_core_enable(); + mutex_unlock(&sched_core_mutex); +} + +void sched_core_put(void) +{ + mutex_lock(&sched_core_mutex); + if (!--sched_core_count) + __sched_core_disable(); + mutex_unlock(&sched_core_mutex); +} + +#endif /* CONFIG_SCHED_CORE */ + /* * part of the period that we allow rt tasks to run in us. * default: 0.95s @@ -4859,6 +4923,42 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) BUG(); } +#ifdef CONFIG_SCHED_CORE + +static inline void sched_core_cpu_starting(unsigned int cpu) +{ + const struct cpumask *smt_mask = cpu_smt_mask(cpu); + struct rq *rq, *core_rq = NULL; + int i; + + core_rq = cpu_rq(cpu)->core; + + if (!core_rq) { + for_each_cpu(i, smt_mask) { + rq = cpu_rq(i); + if (rq->core && rq->core == rq) + core_rq = rq; + } + + if (!core_rq) + core_rq = cpu_rq(cpu); + + for_each_cpu(i, smt_mask) { + rq = cpu_rq(i); + + WARN_ON_ONCE(rq->core && rq->core != core_rq); + rq->core = core_rq; + } + } + + printk("core: %d -> %d\n", cpu, cpu_of(core_rq)); +} +#else /* !CONFIG_SCHED_CORE */ + +static inline void sched_core_cpu_starting(unsigned int cpu) {} + +#endif /* CONFIG_SCHED_CORE */ + /* * __schedule() is the main scheduler function. * @@ -7484,6 +7584,9 @@ static void sched_rq_cpu_starting(unsigned int cpu) int sched_cpu_starting(unsigned int cpu) { + + sched_core_cpu_starting(cpu); + sched_rq_cpu_starting(cpu); sched_tick_start(cpu); return 0; @@ -7747,6 +7850,11 @@ void __init sched_init(void) #endif /* CONFIG_SMP */ hrtick_rq_init(rq); atomic_set(&rq->nr_iowait, 0); + +#ifdef CONFIG_SCHED_CORE + rq->core = NULL; + rq->core_enabled = 0; +#endif } set_load_weight(&init_task, false); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 5a0dd2b312aa..0dfccf988998 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1061,6 +1061,12 @@ struct rq { #endif unsigned int push_busy; struct cpu_stop_work push_work; + +#ifdef CONFIG_SCHED_CORE + /* per rq */ + struct rq *core; + unsigned int core_enabled; +#endif }; #ifdef CONFIG_FAIR_GROUP_SCHED @@ -1099,11 +1105,36 @@ static inline bool is_migration_disabled(struct task_struct *p) #endif } +#ifdef CONFIG_SCHED_CORE +DECLARE_STATIC_KEY_FALSE(__sched_core_enabled); + +static inline bool sched_core_enabled(struct rq *rq) +{ + return static_branch_unlikely(&__sched_core_enabled) && rq->core_enabled; +} + +static inline raw_spinlock_t *rq_lockp(struct rq *rq) +{ + if (sched_core_enabled(rq)) + return &rq->core->__lock; + + return &rq->__lock; +} + +#else /* !CONFIG_SCHED_CORE */ + +static inline bool sched_core_enabled(struct rq *rq) +{ + return false; +} + static inline raw_spinlock_t *rq_lockp(struct rq *rq) { return &rq->__lock; } +#endif /* CONFIG_SCHED_CORE */ + #ifdef CONFIG_SCHED_SMT extern void __update_idle_core(struct rq *rq); -- 2.29.2.299.gdc1121823c-goog
|  |