summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2008-06-27 13:41:33 +0200
committerIngo Molnar <mingo@elte.hu>2008-06-27 14:31:44 +0200
commit93b75217df39e6d75889cc6f8050343286aff4a5 (patch)
treed3091afd2b88aa9732ff63b3a839bdec358aef7d /kernel
parentcb5ef42a03a13f95a9ea94e6cda4f7a47497871f (diff)
sched: disable source/target_load bias
The bias given by source/target_load functions can be very large, disable it by default to get faster convergence. Signed-off-by: Peter Zijlstra <peterz@infradead.org> Cc: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> Cc: Mike Galbraith <efault@gmx.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c4
-rw-r--r--kernel/sched_features.h1
2 files changed, 3 insertions, 2 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 10d43f5bf0fc..6c5eb3bc37e0 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2000,7 +2000,7 @@ static unsigned long source_load(int cpu, int type)
struct rq *rq = cpu_rq(cpu);
unsigned long total = weighted_cpuload(cpu);
- if (type == 0)
+ if (type == 0 || !sched_feat(LB_BIAS))
return total;
return min(rq->cpu_load[type-1], total);
@@ -2015,7 +2015,7 @@ static unsigned long target_load(int cpu, int type)
struct rq *rq = cpu_rq(cpu);
unsigned long total = weighted_cpuload(cpu);
- if (type == 0)
+ if (type == 0 || !sched_feat(LB_BIAS))
return total;
return max(rq->cpu_load[type-1], total);
diff --git a/kernel/sched_features.h b/kernel/sched_features.h
index 04123af2e678..d56e3053e746 100644
--- a/kernel/sched_features.h
+++ b/kernel/sched_features.h
@@ -8,3 +8,4 @@ SCHED_FEAT(SYNC_WAKEUPS, 1)
SCHED_FEAT(HRTICK, 1)
SCHED_FEAT(DOUBLE_TICK, 0)
SCHED_FEAT(ASYM_GRAN, 1)
+SCHED_FEAT(LB_BIAS, 0) \ No newline at end of file