lkml.org 
[lkml]   [2018]   [Sep]   [7]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[RFC 56/60] cosched: Adjust wakeup preemption rules for coscheduling
Date
Modify check_preempt_wakeup() to work correctly with coscheduled sets.

On the one hand, that means not blindly preempting, when the woken
task potentially belongs to a different set and we're not allowed to
switch sets. Instead we have to notify the correct leader to follow up.

On the other hand, we need to handle additional idle cases, as CPUs
are now idle *within* certain coscheduled sets and woken tasks may
not preempt the idle task blindly anymore.

Signed-off-by: Jan H. Schönherr <jschoenh@amazon.de>
---
kernel/sched/fair.c | 85 +++++++++++++++++++++++++++++++++++++++++++++++++++--
1 file changed, 83 insertions(+), 2 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 07fd9dd5561d..0c1d9334ea8e 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6882,6 +6882,9 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
int next_buddy_marked = 0;
struct cfs_rq *cfs_rq;
int scale;
+#ifdef CONFIG_COSCHEDULING
+ struct rq_flags rf;
+#endif

/* FIXME: locking may be off after fetching the idle_se */
if (cosched_is_idle(rq, curr))
@@ -6908,6 +6911,13 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
}

/*
+ * FIXME: Check whether this can be re-enabled with coscheduling
+ *
+ * We might want to send a reschedule IPI to the leader, which is only
+ * checked further below.
+ */
+#ifndef CONFIG_COSCHEDULING
+ /*
* We can come here with TIF_NEED_RESCHED already set from new task
* wake up path.
*
@@ -6919,11 +6929,22 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
*/
if (test_tsk_need_resched(curr))
return;
+#endif

+ /*
+ * FIXME: Check whether this can be re-enabled with coscheduling
+ *
+ * curr and p may belong could belong to different coscheduled sets,
+ * in which case the decision is not straight-forward. Additionally,
+ * the preempt code needs to know the CPU it has to send an IPI
+ * to. This is not yet known here.
+ */
+#ifndef CONFIG_COSCHEDULING
/* Idle tasks are by definition preempted by non-idle tasks. */
if (unlikely(curr->policy == SCHED_IDLE) &&
likely(p->policy != SCHED_IDLE))
goto preempt;
+#endif

/*
* Batch and idle tasks do not preempt non-idle tasks (their preemption
@@ -6932,7 +6953,55 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION))
return;

+ /*
+ * FIXME: find_matching_se() might end up at SEs where a different CPU
+ * is leader. While we do get locks *afterwards* the question is
+ * whether anything bad can happen due to the lock-free traversal.
+ */
find_matching_se(&se, &pse);
+
+#ifdef CONFIG_COSCHEDULING
+ if (se == pse) {
+ /*
+ * There is nothing to do on this CPU within the current
+ * coscheduled set and the newly woken task belongs to this
+ * coscheduled set. Hence, it is a welcome distraction.
+ *
+ * [find_matching_se() walks up the hierarchy for se and pse
+ * until they are within the same CFS runqueue. As equality
+ * was eliminated at the beginning, equality now means that
+ * se was rq->idle_se from the start and pse approached it
+ * from within a child runqueue.]
+ */
+ SCHED_WARN_ON(!cosched_is_idle(rq, curr));
+ SCHED_WARN_ON(cosched_get_idle_se(rq) != se);
+ goto preempt;
+ }
+
+ if (hrq_of(cfs_rq_of(se))->sdrq_data.level) {
+ rq_lock(hrq_of(cfs_rq_of(se)), &rf);
+ update_rq_clock(hrq_of(cfs_rq_of(se)));
+ }
+
+ if (!cfs_rq_of(se)->curr) {
+ /*
+ * There is nothing to do at a higher level within the current
+ * coscheduled set and the newly woken task belongs to a
+ * different coscheduled set. Hence, it is a welcome
+ * distraction for the leader of that higher level.
+ *
+ * [If a leader does not find a SE in its top_cfs_rq, it does
+ * not record anything as current. Still, it tells its
+ * children within which coscheduled set they are idle.
+ * find_matching_se() now ended at such an idle leader. As
+ * we checked for se==pse earlier, we cannot be this leader.]
+ */
+ SCHED_WARN_ON(leader_of(se) == cpu_of(rq));
+ resched_cpu_locked(leader_of(se));
+ goto out;
+ }
+#endif
+
update_curr(cfs_rq_of(se));
BUG_ON(!pse);
if (wakeup_preempt_entity(se, pse) == 1) {
@@ -6942,18 +7011,30 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
*/
if (!next_buddy_marked)
set_next_buddy(pse);
+#ifdef CONFIG_COSCHEDULING
+ if (leader_of(se) != cpu_of(rq)) {
+ resched_cpu_locked(leader_of(se));
+ goto out;
+ }
+ if (hrq_of(cfs_rq_of(se))->sdrq_data.level)
+ rq_unlock(hrq_of(cfs_rq_of(se)), &rf);
+#endif
goto preempt;
}

+#ifdef CONFIG_COSCHEDULING
+out:
+ if (hrq_of(cfs_rq_of(se))->sdrq_data.level)
+ rq_unlock(hrq_of(cfs_rq_of(se)), &rf);
+#endif
return;
-
preempt:
resched_curr(rq);
/*
* Only set the backward buddy when the current task is still
* on the rq. This can happen when a wakeup gets interleaved
* with schedule on the ->pre_schedule() or idle_balance()
- * point, either of which can * drop the rq lock.
+ * point, either of which can drop the rq lock.
*
* Also, during early boot the idle thread is in the fair class,
* for obvious reasons its a bad idea to schedule back to it.
--
2.9.3.1.gcba166c.dirty
\
 
 \ /
  Last update: 2018-09-07 23:48    [W:0.622 / U:0.284 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site