Messages in this thread Patch in this message | | | From | Yuyang Du <> | Subject | [PATCH v7 3/4] sched: Init cfs_rq's sched_entity load average | Date | Thu, 14 May 2015 10:22:14 +0800 |
| |
The runnable load and utilization averages of cfs_rq's sched_entity were not initiated. Like done to a task, give new cfs_rq' sched_entity start values to heavy its load in infant time.
Signed-off-by: Yuyang Du <yuyang.du@intel.com> --- kernel/sched/core.c | 2 +- kernel/sched/fair.c | 13 +++++++------ kernel/sched/sched.h | 2 +- 3 files changed, 9 insertions(+), 8 deletions(-)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index b4e6c21..f46992f 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2102,7 +2102,7 @@ void wake_up_new_task(struct task_struct *p) #endif /* Initialize new task's runnable average */ - init_task_runnable_average(p); + init_entity_runnable_average(&p->se); rq = __task_rq_lock(p); activate_task(rq, p, 0); p->on_rq = TASK_ON_RQ_QUEUED; diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 2a87012..962d40d 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -672,10 +672,10 @@ static unsigned long task_h_load(struct task_struct *p); #define LOAD_AVG_MAX 47742 /* maximum possible load avg */ #define LOAD_AVG_MAX_N 345 /* number of full periods to produce LOAD_MAX_AVG */ -/* Give new task start runnable values to heavy its load in infant time */ -void init_task_runnable_average(struct task_struct *p) +/* Give new sched_entity start runnable values to heavy its load in infant time */ +void init_entity_runnable_average(struct sched_entity *se) { - struct sched_avg *sa = &p->se.avg; + struct sched_avg *sa = &se->avg; sa->last_update_time = 0; /* @@ -684,14 +684,14 @@ void init_task_runnable_average(struct task_struct *p) * will definitely be update (after enqueue). */ sa->period_contrib = 1023; - sa->load_avg = p->se.load.weight; - sa->load_sum = p->se.load.weight * LOAD_AVG_MAX; + sa->load_avg = se->load.weight; + sa->load_sum = se->load.weight * LOAD_AVG_MAX; sa->util_avg = SCHED_LOAD_SCALE; sa->util_sum = SCHED_LOAD_SCALE * LOAD_AVG_MAX; /* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */ } #else -void init_task_runnable_average(struct task_struct *p) +void init_entity_runnable_average(struct sched_entity *se) { } #endif @@ -7854,6 +7854,7 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) init_cfs_rq(cfs_rq); init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]); + init_entity_runnable_average(se); } return 1; diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 13ff009..08c9647 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1288,7 +1288,7 @@ unsigned long to_ratio(u64 period, u64 runtime); extern void update_idle_cpu_load(struct rq *this_rq); -extern void init_task_runnable_average(struct task_struct *p); +extern void init_entity_runnable_average(struct sched_entity *se); static inline void add_nr_running(struct rq *rq, unsigned count) { -- 1.7.9.5
| |