lkml.org 
[lkml]   [2008]   [Sep]   [24]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[RFC PATCH v1 4/5] Small imbalance corrections
Date
Add functions to bump up the imbalance to eventually initiate 
a task move to balance across groups.

Signed-off-by: Vaidyanathan Srinivasan <svaidy@linux.vnet.ibm.com>
---

kernel/sched.c | 72 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++
1 files changed, 72 insertions(+), 0 deletions(-)

diff --git a/kernel/sched.c b/kernel/sched.c
index dd87061..1f38e2c 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3219,6 +3219,78 @@ void update_sd_loads(struct sd_loads *sdl, struct group_loads *gl)

}

+/* Bump up imbalance to one task so that some task movement can happen */
+
+int small_imbalance_one_task(struct sd_loads *sdl, unsigned long *imbalance)
+{
+ unsigned int imbn;
+ imbn = 2;
+ if (sdl->local.nr_running) {
+ if (sdl->busiest.avg_load_per_task >
+ sdl->local.avg_load_per_task)
+ imbn = 1;
+ }
+
+ if (sdl->max_load - sdl->local.load +
+ 2*sdl->busiest.avg_load_per_task >=
+ sdl->busiest.avg_load_per_task * imbn) {
+ *imbalance = sdl->busiest.avg_load_per_task;
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Adjust imbalance to move task if the result of the move will
+ * yield better use of cpu power
+ */
+
+void small_imbalance_optimize_cpu_power(struct sd_loads *sdl,
+ unsigned long *imbalance)
+{
+ unsigned long tmp, pwr_now, pwr_move;
+ pwr_move = pwr_now = 0;
+
+ /*
+ * OK, we don't have enough imbalance to justify moving tasks,
+ * however we may be able to increase total CPU power used by
+ * moving them.
+ */
+
+ pwr_now += sdl->busiest.group->__cpu_power *
+ min(sdl->busiest.avg_load_per_task, sdl->max_load);
+ pwr_now += sdl->local.group->__cpu_power *
+ min(sdl->local.avg_load_per_task, sdl->local.load);
+ pwr_now /= SCHED_LOAD_SCALE;
+
+ /* Amount of load we'd subtract */
+ tmp = sg_div_cpu_power(sdl->busiest.group,
+ sdl->busiest.avg_load_per_task * SCHED_LOAD_SCALE);
+ if (sdl->max_load > tmp)
+ pwr_move += sdl->busiest.group->__cpu_power *
+ min(sdl->busiest.avg_load_per_task,
+ sdl->max_load - tmp);
+
+ /* Amount of load we'd add */
+ if (sdl->max_load * sdl->busiest.group->__cpu_power <
+ sdl->busiest.avg_load_per_task * SCHED_LOAD_SCALE)
+ tmp = sg_div_cpu_power(sdl->local.group,
+ sdl->max_load *
+ sdl->busiest.group->__cpu_power);
+ else
+ tmp = sg_div_cpu_power(sdl->local.group,
+ sdl->busiest.avg_load_per_task * SCHED_LOAD_SCALE);
+ pwr_move += sdl->local.group->__cpu_power *
+ min(sdl->local.avg_load_per_task,
+ sdl->local.load + tmp);
+ pwr_move /= SCHED_LOAD_SCALE;
+
+ /* Move if we gain throughput */
+ if (pwr_move > pwr_now)
+ *imbalance = sdl->busiest.avg_load_per_task;
+
+}
+
#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
void update_powersavings_group_loads(struct sd_loads *sdl,
struct group_loads *gl,


\
 
 \ /
  Last update: 2008-09-24 18:19    [W:0.067 / U:0.524 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site