lkml.org 
[lkml]   [2014]   [May]   [5]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[RFC PATCH 11/12 v1] Intercept periodic load balance
Date
Signed-off-by: Yuyang Du <yuyang.du@intel.com>
---
kernel/sched/fair.c | 33 ++++++++++++++++++++++++++++++++-
1 file changed, 32 insertions(+), 1 deletion(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 374c86b..6cbf6c5 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -7260,6 +7260,36 @@ static void run_rebalance_domains(struct softirq_action *h)
enum cpu_idle_type idle = this_rq->idle_balance ?
CPU_IDLE : CPU_NOT_IDLE;

+#ifdef CONFIG_WORKLOAD_CONSOLIDATION
+ struct cpumask *nonshielded = __get_cpu_var(local_cpu_mask);
+ int this_cpu = cpu_of(this_rq);
+
+ /*
+ * if we encounter shadowded cpus here, don't do balance on them
+ */
+ cpumask_copy(nonshielded, cpu_active_mask);
+
+ rcu_read_lock();
+ workload_consolidation_nonshielded_mask(this_cpu, nonshielded);
+ rcu_read_unlock();
+
+ /*
+ * aggressively unload the shielded cpus to unshielded cpus
+ */
+ workload_consolidation_unload(nonshielded);
+
+ if (cpumask_test_cpu(this_cpu, nonshielded)) {
+ rebalance_domains(this_rq, idle);
+
+ /*
+ * If this cpu has a pending nohz_balance_kick, then do the
+ * balancing on behalf of the other idle cpus whose ticks are
+ * stopped.
+ */
+ cpumask_and(nonshielded, nonshielded, nohz.idle_cpus_mask);
+ nohz_idle_balance(this_rq, idle, nonshielded);
+ }
+#else
rebalance_domains(this_rq, idle);

/*
@@ -7267,7 +7297,8 @@ static void run_rebalance_domains(struct softirq_action *h)
* balancing on behalf of the other idle cpus whose ticks are
* stopped.
*/
- nohz_idle_balance(this_rq, idle);
+ nohz_idle_balance(this_rq, idle, nohz.idle_cpus_mask);
+#endif
}

/*
--
1.7.9.5


\
 
 \ /
  Last update: 2014-05-05 11:41    [W:0.147 / U:0.236 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site