sched: Consider runnable load average in move_tasks()
Aside from using runnable load average in background, move_tasks is also the key function in load balance. We need consider the runnable load average in it in order to make it an apple to apple load comparison. Morten had caught a div u64 bug on ARM, thanks! Thanks-to: Morten Rasmussen <morten.rasmussen@arm.com> Signed-off-by: Alex Shi <alex.shi@intel.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1371694737-29336-8-git-send-email-alex.shi@intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
b92486cbf2
commit
a003a25b22
1 changed files with 9 additions and 9 deletions
|
@ -4179,11 +4179,14 @@ static int tg_load_down(struct task_group *tg, void *data)
|
||||||
long cpu = (long)data;
|
long cpu = (long)data;
|
||||||
|
|
||||||
if (!tg->parent) {
|
if (!tg->parent) {
|
||||||
load = cpu_rq(cpu)->load.weight;
|
load = cpu_rq(cpu)->avg.load_avg_contrib;
|
||||||
} else {
|
} else {
|
||||||
|
unsigned long tmp_rla;
|
||||||
|
tmp_rla = tg->parent->cfs_rq[cpu]->runnable_load_avg + 1;
|
||||||
|
|
||||||
load = tg->parent->cfs_rq[cpu]->h_load;
|
load = tg->parent->cfs_rq[cpu]->h_load;
|
||||||
load *= tg->se[cpu]->load.weight;
|
load *= tg->se[cpu]->avg.load_avg_contrib;
|
||||||
load /= tg->parent->cfs_rq[cpu]->load.weight + 1;
|
load /= tmp_rla;
|
||||||
}
|
}
|
||||||
|
|
||||||
tg->cfs_rq[cpu]->h_load = load;
|
tg->cfs_rq[cpu]->h_load = load;
|
||||||
|
@ -4209,12 +4212,9 @@ static void update_h_load(long cpu)
|
||||||
static unsigned long task_h_load(struct task_struct *p)
|
static unsigned long task_h_load(struct task_struct *p)
|
||||||
{
|
{
|
||||||
struct cfs_rq *cfs_rq = task_cfs_rq(p);
|
struct cfs_rq *cfs_rq = task_cfs_rq(p);
|
||||||
unsigned long load;
|
|
||||||
|
|
||||||
load = p->se.load.weight;
|
return div64_ul(p->se.avg.load_avg_contrib * cfs_rq->h_load,
|
||||||
load = div_u64(load * cfs_rq->h_load, cfs_rq->load.weight + 1);
|
cfs_rq->runnable_load_avg + 1);
|
||||||
|
|
||||||
return load;
|
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
static inline void update_blocked_averages(int cpu)
|
static inline void update_blocked_averages(int cpu)
|
||||||
|
@ -4227,7 +4227,7 @@ static inline void update_h_load(long cpu)
|
||||||
|
|
||||||
static unsigned long task_h_load(struct task_struct *p)
|
static unsigned long task_h_load(struct task_struct *p)
|
||||||
{
|
{
|
||||||
return p->se.load.weight;
|
return p->se.avg.load_avg_contrib;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue