diff options
author | Waiman Long <longman@redhat.com> | 2018-11-08 10:08:40 -0500 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2018-11-08 12:27:29 -0800 |
commit | 4716909cc5c566e946a3acc884bf5dc469812007 (patch) | |
tree | c70f38e1cbfcee11095bc04615ca8114a2702593 /kernel/cgroup | |
parent | 3881b86128d0be22e8947ac1fca0429c74bf055e (diff) |
cpuset: Track cpusets that use parent's effective_cpus
In the default hierarchy, a cpuset will use the parent's effective_cpus
if none of the requested CPUs can be granted from the parent. That can
be a problem if a parent is a partition root with children partition
roots. Changes to a parent's effective_cpus list due to changes in a
child partition root may not be properly reflected in a child cpuset
that use parent's effective_cpus because the cpu_exclusive rule of a
partition root will not guard against that.
In order to avoid the mismatch, two new tracking variables are added to
the cpuset structure to track if a cpuset uses parent's effective_cpus
and the number of children cpusets that use its effective_cpus. So
whenever cpumask changes are made to a parent, it will also check to
see if it has other children cpusets that use its effective_cpus and
call update_cpumasks_hier() if that is the case.
Signed-off-by: Waiman Long <longman@redhat.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/cgroup')
-rw-r--r-- | kernel/cgroup/cpuset.c | 71 |
1 files changed, 70 insertions, 1 deletions
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index ef41f58d7cdf..21eaa896c1a9 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -147,6 +147,14 @@ struct cpuset { /* partition root state */ int partition_root_state; + + /* + * Default hierarchy only: + * use_parent_ecpus - set if using parent's effective_cpus + * child_ecpus_count - # of children with use_parent_ecpus set + */ + int use_parent_ecpus; + int child_ecpus_count; }; /* @@ -1227,8 +1235,17 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp) * If it becomes empty, inherit the effective mask of the * parent, which is guaranteed to have some CPUs. */ - if (is_in_v2_mode() && cpumask_empty(tmp->new_cpus)) + if (is_in_v2_mode() && cpumask_empty(tmp->new_cpus)) { cpumask_copy(tmp->new_cpus, parent->effective_cpus); + if (!cp->use_parent_ecpus) { + cp->use_parent_ecpus = true; + parent->child_ecpus_count++; + } + } else if (cp->use_parent_ecpus) { + cp->use_parent_ecpus = false; + WARN_ON_ONCE(!parent->child_ecpus_count); + parent->child_ecpus_count--; + } /* * Skip the whole subtree if the cpumask remains the same @@ -1346,6 +1363,35 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp) } /** + * update_sibling_cpumasks - Update siblings cpumasks + * @parent: Parent cpuset + * @cs: Current cpuset + * @tmp: Temp variables + */ +static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs, + struct tmpmasks *tmp) +{ + struct cpuset *sibling; + struct cgroup_subsys_state *pos_css; + + /* + * Check all its siblings and call update_cpumasks_hier() + * if their use_parent_ecpus flag is set in order for them + * to use the right effective_cpus value. + */ + rcu_read_lock(); + cpuset_for_each_child(sibling, pos_css, parent) { + if (sibling == cs) + continue; + if (!sibling->use_parent_ecpus) + continue; + + update_cpumasks_hier(sibling, tmp); + } + rcu_read_unlock(); +} + +/** * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it * @cs: the cpuset to consider * @trialcs: trial cpuset @@ -1420,6 +1466,17 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, spin_unlock_irq(&callback_lock); update_cpumasks_hier(cs, &tmp); + + if (cs->partition_root_state) { + struct cpuset *parent = parent_cs(cs); + + /* + * For partition root, update the cpumasks of sibling + * cpusets if they use parent's effective_cpus. + */ + if (parent->child_ecpus_count) + update_sibling_cpumasks(parent, cs, &tmp); + } return 0; } @@ -1856,6 +1913,9 @@ static int update_prstate(struct cpuset *cs, int val) if (parent != &top_cpuset) update_tasks_cpumask(parent); + if (parent->child_ecpus_count) + update_sibling_cpumasks(parent, cs, &tmp); + rebuild_sched_domains_locked(); out: free_cpumasks(NULL, &tmp); @@ -2550,6 +2610,8 @@ static int cpuset_css_online(struct cgroup_subsys_state *css) if (is_in_v2_mode()) { cpumask_copy(cs->effective_cpus, parent->effective_cpus); cs->effective_mems = parent->effective_mems; + cs->use_parent_ecpus = true; + parent->child_ecpus_count++; } spin_unlock_irq(&callback_lock); @@ -2613,6 +2675,13 @@ static void cpuset_css_offline(struct cgroup_subsys_state *css) is_sched_load_balance(cs)) update_flag(CS_SCHED_LOAD_BALANCE, cs, 0); + if (cs->use_parent_ecpus) { + struct cpuset *parent = parent_cs(cs); + + cs->use_parent_ecpus = false; + parent->child_ecpus_count--; + } + cpuset_dec(); clear_bit(CS_ONLINE, &cs->flags); |