Messages in this thread Patch in this message | | | From | Yuyang Du <> | Subject | [RFC PATCH 7/9 v4] Implement Workload Consolidation in idle_balance | Date | Wed, 25 Jun 2014 08:36:06 +0800 |
| |
1) Skip pulling task to the idle non-consolidated CPUs.
2) In addition, for consolidated Idle CPU, we aggressively pull tasks from non-consolidated CPUs.
Signed-off-by: Yuyang Du <yuyang.du@intel.com> --- kernel/sched/fair.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 008cbc9..bf65fde 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -2608,6 +2608,9 @@ static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq, static inline void update_cpu_concurrency(struct rq *rq); static struct sched_group *wc_find_group(struct sched_domain *sd, struct task_struct *p, int this_cpu); +static void wc_unload(struct cpumask *nonshielded, struct sched_domain *sd); +static void wc_nonshielded_mask(int cpu, struct sched_domain *sd, + struct cpumask *mask); static int cpu_cc_capable(int cpu); /* @@ -6808,6 +6811,22 @@ static int idle_balance(struct rq *this_rq) update_blocked_averages(this_cpu); rcu_read_lock(); + + sd = per_cpu(sd_wc, this_cpu); + if (sd) { + struct cpumask *nonshielded_cpus = __get_cpu_var(load_balance_mask); + + cpumask_copy(nonshielded_cpus, cpu_active_mask); + + /* + * if we encounter shielded cpus here, don't do balance on them + */ + wc_nonshielded_mask(this_cpu, sd, nonshielded_cpus); + if (!cpumask_test_cpu(this_cpu, nonshielded_cpus)) + goto unlock; + wc_unload(nonshielded_cpus, sd); + } + for_each_domain(this_cpu, sd) { int continue_balancing = 1; u64 t0, domain_cost; @@ -6843,6 +6862,7 @@ static int idle_balance(struct rq *this_rq) if (pulled_task || this_rq->nr_running > 0) break; } +unlock: rcu_read_unlock(); raw_spin_lock(&this_rq->lock); -- 1.7.9.5
| |