diff options
author | Peter Zijlstra <peterz@infradead.org> | 2014-07-11 16:01:53 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2014-07-16 13:38:23 +0200 |
commit | e720fff6341fe4b95e5a93c939bd3c77fa55ced4 (patch) | |
tree | 31ccde4e945d6f51dbe0c788800f0a5f111bdcc2 /kernel | |
parent | 5cd08fbfdb6baa9fe98f530b76898fc5725a6289 (diff) |
sched/numa: Revert "Use effective_load() to balance NUMA loads"
Due to divergent trees, Rik find that this patch is no longer
required.
Requested-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: http://lkml.kernel.org/n/tip-u6odkgkw8wz3m7orgsjfo5pi@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched/fair.c | 20 |
1 files changed, 6 insertions, 14 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index f5f0cc91518c..45943b2fa82b 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1151,7 +1151,6 @@ static void task_numa_compare(struct task_numa_env *env, struct rq *src_rq = cpu_rq(env->src_cpu); struct rq *dst_rq = cpu_rq(env->dst_cpu); struct task_struct *cur; - struct task_group *tg; long src_load, dst_load; long load; long imp = env->p->numa_group ? groupimp : taskimp; @@ -1223,14 +1222,9 @@ static void task_numa_compare(struct task_numa_env *env, * In the overloaded case, try and keep the load balanced. */ balance: - src_load = env->src_stats.load; - dst_load = env->dst_stats.load; - - /* Calculate the effect of moving env->p from src to dst. */ - load = env->p->se.load.weight; - tg = task_group(env->p); - src_load += effective_load(tg, env->src_cpu, -load, -load); - dst_load += effective_load(tg, env->dst_cpu, load, load); + load = task_h_load(env->p); + dst_load = env->dst_stats.load + load; + src_load = env->src_stats.load - load; if (moveimp > imp && moveimp > env->best_imp) { /* @@ -1250,11 +1244,9 @@ balance: goto unlock; if (cur) { - /* Cur moves in the opposite direction. */ - load = cur->se.load.weight; - tg = task_group(cur); - src_load += effective_load(tg, env->src_cpu, load, load); - dst_load += effective_load(tg, env->dst_cpu, -load, -load); + load = task_h_load(cur); + dst_load -= load; + src_load += load; } if (load_too_imbalanced(src_load, dst_load, env)) |