Messages in this thread Patch in this message | ![/](/images/icornerl.gif) | | Date | Mon, 15 Nov 2010 15:47:08 -0800 | From | Paul Turner <> | Subject | [tg_shares_up rewrite v4 09/11] sched: demand based update_cfs_load() |
| |
When the system is busy, dilation of rq->next_balance makes lb->update_shares() insufficiently frequent for threads which don't sleep (no dequeue/enqueue updates). Adjust for this by making demand based updates based on the accumulation of execution time sufficient to wrap our averaging window.
Signed-off-by: Paul Turner <pjt@google.com>
--- kernel/sched.c | 9 ++++++++- kernel/sched_fair.c | 12 ++++++++++++ 2 files changed, 20 insertions(+), 1 deletion(-)
Index: tip/kernel/sched.c =================================================================== --- tip.orig/kernel/sched.c +++ tip/kernel/sched.c @@ -353,9 +353,16 @@ struct cfs_rq { */ unsigned long h_load; + /* + * Maintaining per-cpu shares distribution for group scheduling + * + * load_stamp is the last time we updated the load average + * load_last is the last time we updated the load average and saw load + * load_unacc_exec_time is currently unaccounted execution time + */ u64 load_avg; u64 load_period; - u64 load_stamp, load_last; + u64 load_stamp, load_last, load_unacc_exec_time; unsigned long load_contribution; #endif Index: tip/kernel/sched_fair.c =================================================================== --- tip.orig/kernel/sched_fair.c +++ tip/kernel/sched_fair.c @@ -539,6 +539,9 @@ static u64 sched_vslice(struct cfs_rq *c return calc_delta_fair(sched_slice(cfs_rq, se), se); } +static void update_cfs_load(struct cfs_rq *cfs_rq); +static void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta); + /* * Update the current task's runtime statistics. Skip current tasks that * are not in our scheduling class. @@ -558,6 +561,14 @@ __update_curr(struct cfs_rq *cfs_rq, str curr->vruntime += delta_exec_weighted; update_min_vruntime(cfs_rq); + +#ifdef CONFIG_FAIR_GROUP_SCHED + cfs_rq->load_unacc_exec_time += delta_exec; + if (cfs_rq->load_unacc_exec_time > sysctl_sched_shares_window) { + update_cfs_load(cfs_rq); + update_cfs_shares(cfs_rq, 0); + } +#endif } static void update_curr(struct cfs_rq *cfs_rq) @@ -713,6 +724,7 @@ static void update_cfs_load(struct cfs_r } cfs_rq->load_stamp = now; + cfs_rq->load_unacc_exec_time = 0; cfs_rq->load_period += delta; if (load) { cfs_rq->load_last = now;
| ![\](/images/icornerr.gif) |