Messages in this thread Patch in this message | | | Date | Fri, 05 Apr 2024 12:28:00 +0200 | From | Peter Zijlstra <> | Subject | [RFC][PATCH 06/10] sched: Allow sched_class::dequeue_task() to fail |
| |
Change the function signature of sched_class::dequeue_task() to return a boolean, allowing future patches to 'fail' dequeue.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> --- kernel/sched/core.c | 7 +++++-- kernel/sched/deadline.c | 4 +++- kernel/sched/fair.c | 4 +++- kernel/sched/idle.c | 3 ++- kernel/sched/rt.c | 4 +++- kernel/sched/sched.h | 2 +- kernel/sched/stop_task.c | 3 ++- 7 files changed, 19 insertions(+), 8 deletions(-)
--- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2119,7 +2119,10 @@ static inline void enqueue_task(struct r sched_core_enqueue(rq, p); } -static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags) +/* + * Must only return false when DEQUEUE_SLEEP. + */ +static inline bool dequeue_task(struct rq *rq, struct task_struct *p, int flags) { if (sched_core_enabled(rq)) sched_core_dequeue(rq, p, flags); @@ -2133,7 +2136,7 @@ static inline void dequeue_task(struct r } uclamp_rq_dec(rq, p); - p->sched_class->dequeue_task(rq, p, flags); + return p->sched_class->dequeue_task(rq, p, flags); } void activate_task(struct rq *rq, struct task_struct *p, int flags) --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -1842,7 +1842,7 @@ static void enqueue_task_dl(struct rq *r enqueue_pushable_dl_task(rq, p); } -static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) +static bool dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) { update_curr_dl(rq); @@ -1852,6 +1852,8 @@ static void dequeue_task_dl(struct rq *r dequeue_dl_entity(&p->dl, flags); if (!p->dl.dl_throttled && !dl_server(&p->dl)) dequeue_pushable_dl_task(rq, p); + + return true; } /* --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -6828,7 +6828,7 @@ static void set_next_buddy(struct sched_ * decreased. We remove the task from the rbtree and * update the fair scheduling stats: */ -static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) +static bool dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) { struct cfs_rq *cfs_rq; struct sched_entity *se = &p->se; @@ -6896,6 +6896,8 @@ static void dequeue_task_fair(struct rq dequeue_throttle: util_est_update(&rq->cfs, p, task_sleep); hrtick_update(rq); + + return true; } #ifdef CONFIG_SMP --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -486,13 +486,14 @@ struct task_struct *pick_next_task_idle( * It is not legal to sleep in the idle task - print a warning * message if some code attempts to do it: */ -static void +static bool dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags) { raw_spin_rq_unlock_irq(rq); printk(KERN_ERR "bad: scheduling from the idle thread!\n"); dump_stack(); raw_spin_rq_lock_irq(rq); + return true; } /* --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -1493,7 +1493,7 @@ enqueue_task_rt(struct rq *rq, struct ta enqueue_pushable_task(rq, p); } -static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags) +static bool dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags) { struct sched_rt_entity *rt_se = &p->rt; @@ -1501,6 +1501,8 @@ static void dequeue_task_rt(struct rq *r dequeue_rt_entity(rt_se, flags); dequeue_pushable_task(rq, p); + + return true; } /* --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -2278,7 +2278,7 @@ struct sched_class { #endif void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags); - void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags); + bool (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags); void (*yield_task) (struct rq *rq); bool (*yield_to_task)(struct rq *rq, struct task_struct *p); --- a/kernel/sched/stop_task.c +++ b/kernel/sched/stop_task.c @@ -57,10 +57,11 @@ enqueue_task_stop(struct rq *rq, struct add_nr_running(rq, 1); } -static void +static bool dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags) { sub_nr_running(rq, 1); + return true; } static void yield_task_stop(struct rq *rq)
| |