summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--kernel/sched/fair.c58
-rw-r--r--kernel/sched/sched.h7
2 files changed, 30 insertions, 35 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index bb456f44b7b1..765d87acdf05 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4171,47 +4171,48 @@ static void update_blocked_averages(int cpu)
}
/*
- * Compute the cpu's hierarchical load factor for each task group.
+ * Compute the hierarchical load factor for cfs_rq and all its ascendants.
* This needs to be done in a top-down fashion because the load of a child
* group is a fraction of its parents load.
*/
-static int tg_load_down(struct task_group *tg, void *data)
+static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
{
- unsigned long load;
- long cpu = (long)data;
-
- if (!tg->parent) {
- load = cpu_rq(cpu)->avg.load_avg_contrib;
- } else {
- load = tg->parent->cfs_rq[cpu]->h_load;
- load = div64_ul(load * tg->se[cpu]->avg.load_avg_contrib,
- tg->parent->cfs_rq[cpu]->runnable_load_avg + 1);
- }
-
- tg->cfs_rq[cpu]->h_load = load;
-
- return 0;
-}
-
-static void update_h_load(long cpu)
-{
- struct rq *rq = cpu_rq(cpu);
+ struct rq *rq = rq_of(cfs_rq);
+ struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)];
unsigned long now = jiffies;
+ unsigned long load;
- if (rq->h_load_throttle == now)
+ if (cfs_rq->last_h_load_update == now)
return;
- rq->h_load_throttle = now;
+ cfs_rq->h_load_next = NULL;
+ for_each_sched_entity(se) {
+ cfs_rq = cfs_rq_of(se);
+ cfs_rq->h_load_next = se;
+ if (cfs_rq->last_h_load_update == now)
+ break;
+ }
- rcu_read_lock();
- walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
- rcu_read_unlock();
+ if (!se) {
+ cfs_rq->h_load = rq->avg.load_avg_contrib;
+ cfs_rq->last_h_load_update = now;
+ }
+
+ while ((se = cfs_rq->h_load_next) != NULL) {
+ load = cfs_rq->h_load;
+ load = div64_ul(load * se->avg.load_avg_contrib,
+ cfs_rq->runnable_load_avg + 1);
+ cfs_rq = group_cfs_rq(se);
+ cfs_rq->h_load = load;
+ cfs_rq->last_h_load_update = now;
+ }
}
static unsigned long task_h_load(struct task_struct *p)
{
struct cfs_rq *cfs_rq = task_cfs_rq(p);
+ update_cfs_rq_h_load(cfs_rq);
return div64_ul(p->se.avg.load_avg_contrib * cfs_rq->h_load,
cfs_rq->runnable_load_avg + 1);
}
@@ -4220,10 +4221,6 @@ static inline void update_blocked_averages(int cpu)
{
}
-static inline void update_h_load(long cpu)
-{
-}
-
static unsigned long task_h_load(struct task_struct *p)
{
return p->se.avg.load_avg_contrib;
@@ -5108,7 +5105,6 @@ redo:
env.src_rq = busiest;
env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running);
- update_h_load(env.src_cpu);
more_balance:
local_irq_save(flags);
double_rq_lock(env.dst_rq, busiest);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index ef0a7b2439dd..5e129efb84ce 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -285,7 +285,6 @@ struct cfs_rq {
/* Required to track per-cpu representation of a task_group */
u32 tg_runnable_contrib;
unsigned long tg_load_contrib;
-#endif /* CONFIG_FAIR_GROUP_SCHED */
/*
* h_load = weight * f(tg)
@@ -294,6 +293,9 @@ struct cfs_rq {
* this group.
*/
unsigned long h_load;
+ u64 last_h_load_update;
+ struct sched_entity *h_load_next;
+#endif /* CONFIG_FAIR_GROUP_SCHED */
#endif /* CONFIG_SMP */
#ifdef CONFIG_FAIR_GROUP_SCHED
@@ -429,9 +431,6 @@ struct rq {
#ifdef CONFIG_FAIR_GROUP_SCHED
/* list of leaf cfs_rq on this cpu: */
struct list_head leaf_cfs_rq_list;
-#ifdef CONFIG_SMP
- unsigned long h_load_throttle;
-#endif /* CONFIG_SMP */
#endif /* CONFIG_FAIR_GROUP_SCHED */
#ifdef CONFIG_RT_GROUP_SCHED