summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cgroup.c17
-rw-r--r--kernel/panic.c6
-rw-r--r--kernel/rcutree.c2
-rw-r--r--kernel/rcutree.h14
-rw-r--r--kernel/rcutree_plugin.h165
-rw-r--r--kernel/sched/core.c187
-rw-r--r--kernel/sched/fair.c29
-rw-r--r--kernel/sched/rt.c2
-rw-r--r--kernel/sched/sched.h2
-rw-r--r--kernel/sys.c60
-rw-r--r--kernel/time/tick-sched.c7
-rw-r--r--kernel/time/timekeeping.c2
12 files changed, 327 insertions, 166 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 0f3527d6184a..72fcd3069a90 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -896,10 +896,13 @@ static void cgroup_diput(struct dentry *dentry, struct inode *inode)
mutex_unlock(&cgroup_mutex);
/*
- * Drop the active superblock reference that we took when we
- * created the cgroup
+ * We want to drop the active superblock reference from the
+ * cgroup creation after all the dentry refs are gone -
+ * kill_sb gets mighty unhappy otherwise. Mark
+ * dentry->d_fsdata with cgroup_diput() to tell
+ * cgroup_d_release() to call deactivate_super().
*/
- deactivate_super(cgrp->root->sb);
+ dentry->d_fsdata = cgroup_diput;
/*
* if we're getting rid of the cgroup, refcount should ensure
@@ -925,6 +928,13 @@ static int cgroup_delete(const struct dentry *d)
return 1;
}
+static void cgroup_d_release(struct dentry *dentry)
+{
+ /* did cgroup_diput() tell me to deactivate super? */
+ if (dentry->d_fsdata == cgroup_diput)
+ deactivate_super(dentry->d_sb);
+}
+
static void remove_dir(struct dentry *d)
{
struct dentry *parent = dget(d->d_parent);
@@ -1532,6 +1542,7 @@ static int cgroup_get_rootdir(struct super_block *sb)
static const struct dentry_operations cgroup_dops = {
.d_iput = cgroup_diput,
.d_delete = cgroup_delete,
+ .d_release = cgroup_d_release,
};
struct inode *inode =
diff --git a/kernel/panic.c b/kernel/panic.c
index 8ed89a175d79..d2a5f4ecc6dd 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -27,7 +27,7 @@
#define PANIC_TIMER_STEP 100
#define PANIC_BLINK_SPD 18
-int panic_on_oops;
+int panic_on_oops = CONFIG_PANIC_ON_OOPS_VALUE;
static unsigned long tainted_mask;
static int pause_on_oops;
static int pause_on_oops_flag;
@@ -108,8 +108,6 @@ void panic(const char *fmt, ...)
*/
crash_kexec(NULL);
- kmsg_dump(KMSG_DUMP_PANIC);
-
/*
* Note smp_send_stop is the usual smp shutdown function, which
* unfortunately means it may not be hardened to work in a panic
@@ -117,6 +115,8 @@ void panic(const char *fmt, ...)
*/
smp_send_stop();
+ kmsg_dump(KMSG_DUMP_PANIC);
+
atomic_notifier_call_chain(&panic_notifier_list, 0, buf);
bust_spinlocks(0);
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 0da7b88d92d0..3b0f1337f75b 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -1397,6 +1397,8 @@ static void rcu_adopt_orphan_cbs(struct rcu_state *rsp)
rdp->qlen_lazy += rsp->qlen_lazy;
rdp->qlen += rsp->qlen;
rdp->n_cbs_adopted += rsp->qlen;
+ if (rsp->qlen_lazy != rsp->qlen)
+ rcu_idle_count_callbacks_posted();
rsp->qlen_lazy = 0;
rsp->qlen = 0;
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index 7f5d138dedf5..ea056495783e 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -84,6 +84,20 @@ struct rcu_dynticks {
/* Process level is worth LLONG_MAX/2. */
int dynticks_nmi_nesting; /* Track NMI nesting level. */
atomic_t dynticks; /* Even value for idle, else odd. */
+#ifdef CONFIG_RCU_FAST_NO_HZ
+ int dyntick_drain; /* Prepare-for-idle state variable. */
+ unsigned long dyntick_holdoff;
+ /* No retries for the jiffy of failure. */
+ struct timer_list idle_gp_timer;
+ /* Wake up CPU sleeping with callbacks. */
+ unsigned long idle_gp_timer_expires;
+ /* When to wake up CPU (for repost). */
+ bool idle_first_pass; /* First pass of attempt to go idle? */
+ unsigned long nonlazy_posted;
+ /* # times non-lazy CBs posted to CPU. */
+ unsigned long nonlazy_posted_snap;
+ /* idle-period nonlazy_posted snapshot. */
+#endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
};
/* RCU's kthread states for tracing. */
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 2411000d9869..5271a020887e 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -1886,8 +1886,9 @@ static void __cpuinit rcu_prepare_kthreads(int cpu)
* Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs
* any flavor of RCU.
*/
-int rcu_needs_cpu(int cpu)
+int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
{
+ *delta_jiffies = ULONG_MAX;
return rcu_cpu_has_callbacks(cpu);
}
@@ -1962,41 +1963,6 @@ static void rcu_idle_count_callbacks_posted(void)
#define RCU_IDLE_GP_DELAY 6 /* Roughly one grace period. */
#define RCU_IDLE_LAZY_GP_DELAY (6 * HZ) /* Roughly six seconds. */
-/* Loop counter for rcu_prepare_for_idle(). */
-static DEFINE_PER_CPU(int, rcu_dyntick_drain);
-/* If rcu_dyntick_holdoff==jiffies, don't try to enter dyntick-idle mode. */
-static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff);
-/* Timer to awaken the CPU if it enters dyntick-idle mode with callbacks. */
-static DEFINE_PER_CPU(struct timer_list, rcu_idle_gp_timer);
-/* Scheduled expiry time for rcu_idle_gp_timer to allow reposting. */
-static DEFINE_PER_CPU(unsigned long, rcu_idle_gp_timer_expires);
-/* Enable special processing on first attempt to enter dyntick-idle mode. */
-static DEFINE_PER_CPU(bool, rcu_idle_first_pass);
-/* Running count of non-lazy callbacks posted, never decremented. */
-static DEFINE_PER_CPU(unsigned long, rcu_nonlazy_posted);
-/* Snapshot of rcu_nonlazy_posted to detect meaningful exits from idle. */
-static DEFINE_PER_CPU(unsigned long, rcu_nonlazy_posted_snap);
-
-/*
- * Allow the CPU to enter dyntick-idle mode if either: (1) There are no
- * callbacks on this CPU, (2) this CPU has not yet attempted to enter
- * dyntick-idle mode, or (3) this CPU is in the process of attempting to
- * enter dyntick-idle mode. Otherwise, if we have recently tried and failed
- * to enter dyntick-idle mode, we refuse to try to enter it. After all,
- * it is better to incur scheduling-clock interrupts than to spin
- * continuously for the same time duration!
- */
-int rcu_needs_cpu(int cpu)
-{
- /* Flag a new idle sojourn to the idle-entry state machine. */
- per_cpu(rcu_idle_first_pass, cpu) = 1;
- /* If no callbacks, RCU doesn't need the CPU. */
- if (!rcu_cpu_has_callbacks(cpu))
- return 0;
- /* Otherwise, RCU needs the CPU only if it recently tried and failed. */
- return per_cpu(rcu_dyntick_holdoff, cpu) == jiffies;
-}
-
/*
* Does the specified flavor of RCU have non-lazy callbacks pending on
* the specified CPU? Both RCU flavor and CPU are specified by the
@@ -2040,6 +2006,47 @@ static bool rcu_cpu_has_nonlazy_callbacks(int cpu)
}
/*
+ * Allow the CPU to enter dyntick-idle mode if either: (1) There are no
+ * callbacks on this CPU, (2) this CPU has not yet attempted to enter
+ * dyntick-idle mode, or (3) this CPU is in the process of attempting to
+ * enter dyntick-idle mode. Otherwise, if we have recently tried and failed
+ * to enter dyntick-idle mode, we refuse to try to enter it. After all,
+ * it is better to incur scheduling-clock interrupts than to spin
+ * continuously for the same time duration!
+ *
+ * The delta_jiffies argument is used to store the time when RCU is
+ * going to need the CPU again if it still has callbacks. The reason
+ * for this is that rcu_prepare_for_idle() might need to post a timer,
+ * but if so, it will do so after tick_nohz_stop_sched_tick() has set
+ * the wakeup time for this CPU. This means that RCU's timer can be
+ * delayed until the wakeup time, which defeats the purpose of posting
+ * a timer.
+ */
+int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
+{
+ struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
+
+ /* Flag a new idle sojourn to the idle-entry state machine. */
+ rdtp->idle_first_pass = 1;
+ /* If no callbacks, RCU doesn't need the CPU. */
+ if (!rcu_cpu_has_callbacks(cpu)) {
+ *delta_jiffies = ULONG_MAX;
+ return 0;
+ }
+ if (rdtp->dyntick_holdoff == jiffies) {
+ /* RCU recently tried and failed, so don't try again. */
+ *delta_jiffies = 1;
+ return 1;
+ }
+ /* Set up for the possibility that RCU will post a timer. */
+ if (rcu_cpu_has_nonlazy_callbacks(cpu))
+ *delta_jiffies = RCU_IDLE_GP_DELAY;
+ else
+ *delta_jiffies = RCU_IDLE_LAZY_GP_DELAY;
+ return 0;
+}
+
+/*
* Handler for smp_call_function_single(). The only point of this
* handler is to wake the CPU up, so the handler does only tracing.
*/
@@ -2075,21 +2082,24 @@ static void rcu_idle_gp_timer_func(unsigned long cpu_in)
*/
static void rcu_prepare_for_idle_init(int cpu)
{
- per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
- setup_timer(&per_cpu(rcu_idle_gp_timer, cpu),
- rcu_idle_gp_timer_func, cpu);
- per_cpu(rcu_idle_gp_timer_expires, cpu) = jiffies - 1;
- per_cpu(rcu_idle_first_pass, cpu) = 1;
+ struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
+
+ rdtp->dyntick_holdoff = jiffies - 1;
+ setup_timer(&rdtp->idle_gp_timer, rcu_idle_gp_timer_func, cpu);
+ rdtp->idle_gp_timer_expires = jiffies - 1;
+ rdtp->idle_first_pass = 1;
}
/*
* Clean up for exit from idle. Because we are exiting from idle, there
- * is no longer any point to rcu_idle_gp_timer, so cancel it. This will
+ * is no longer any point to ->idle_gp_timer, so cancel it. This will
* do nothing if this timer is not active, so just cancel it unconditionally.
*/
static void rcu_cleanup_after_idle(int cpu)
{
- del_timer(&per_cpu(rcu_idle_gp_timer, cpu));
+ struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
+
+ del_timer(&rdtp->idle_gp_timer);
trace_rcu_prep_idle("Cleanup after idle");
}
@@ -2108,42 +2118,41 @@ static void rcu_cleanup_after_idle(int cpu)
* Because it is not legal to invoke rcu_process_callbacks() with irqs
* disabled, we do one pass of force_quiescent_state(), then do a
* invoke_rcu_core() to cause rcu_process_callbacks() to be invoked
- * later. The per-cpu rcu_dyntick_drain variable controls the sequencing.
+ * later. The ->dyntick_drain field controls the sequencing.
*
* The caller must have disabled interrupts.
*/
static void rcu_prepare_for_idle(int cpu)
{
struct timer_list *tp;
+ struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
/*
* If this is an idle re-entry, for example, due to use of
* RCU_NONIDLE() or the new idle-loop tracing API within the idle
* loop, then don't take any state-machine actions, unless the
* momentary exit from idle queued additional non-lazy callbacks.
- * Instead, repost the rcu_idle_gp_timer if this CPU has callbacks
+ * Instead, repost the ->idle_gp_timer if this CPU has callbacks
* pending.
*/
- if (!per_cpu(rcu_idle_first_pass, cpu) &&
- (per_cpu(rcu_nonlazy_posted, cpu) ==
- per_cpu(rcu_nonlazy_posted_snap, cpu))) {
+ if (!rdtp->idle_first_pass &&
+ (rdtp->nonlazy_posted == rdtp->nonlazy_posted_snap)) {
if (rcu_cpu_has_callbacks(cpu)) {
- tp = &per_cpu(rcu_idle_gp_timer, cpu);
- mod_timer_pinned(tp, per_cpu(rcu_idle_gp_timer_expires, cpu));
+ tp = &rdtp->idle_gp_timer;
+ mod_timer_pinned(tp, rdtp->idle_gp_timer_expires);
}
return;
}
- per_cpu(rcu_idle_first_pass, cpu) = 0;
- per_cpu(rcu_nonlazy_posted_snap, cpu) =
- per_cpu(rcu_nonlazy_posted, cpu) - 1;
+ rdtp->idle_first_pass = 0;
+ rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted - 1;
/*
* If there are no callbacks on this CPU, enter dyntick-idle mode.
* Also reset state to avoid prejudicing later attempts.
*/
if (!rcu_cpu_has_callbacks(cpu)) {
- per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
- per_cpu(rcu_dyntick_drain, cpu) = 0;
+ rdtp->dyntick_holdoff = jiffies - 1;
+ rdtp->dyntick_drain = 0;
trace_rcu_prep_idle("No callbacks");
return;
}
@@ -2152,36 +2161,37 @@ static void rcu_prepare_for_idle(int cpu)
* If in holdoff mode, just return. We will presumably have
* refrained from disabling the scheduling-clock tick.
*/
- if (per_cpu(rcu_dyntick_holdoff, cpu) == jiffies) {
+ if (rdtp->dyntick_holdoff == jiffies) {
trace_rcu_prep_idle("In holdoff");
return;
}
- /* Check and update the rcu_dyntick_drain sequencing. */
- if (per_cpu(rcu_dyntick_drain, cpu) <= 0) {
+ /* Check and update the ->dyntick_drain sequencing. */
+ if (rdtp->dyntick_drain <= 0) {
/* First time through, initialize the counter. */
- per_cpu(rcu_dyntick_drain, cpu) = RCU_IDLE_FLUSHES;
- } else if (per_cpu(rcu_dyntick_drain, cpu) <= RCU_IDLE_OPT_FLUSHES &&
+ rdtp->dyntick_drain = RCU_IDLE_FLUSHES;
+ } else if (rdtp->dyntick_drain <= RCU_IDLE_OPT_FLUSHES &&
!rcu_pending(cpu) &&
!local_softirq_pending()) {
/* Can we go dyntick-idle despite still having callbacks? */
- trace_rcu_prep_idle("Dyntick with callbacks");
- per_cpu(rcu_dyntick_drain, cpu) = 0;
- per_cpu(rcu_dyntick_holdoff, cpu) = jiffies;
- if (rcu_cpu_has_nonlazy_callbacks(cpu))
- per_cpu(rcu_idle_gp_timer_expires, cpu) =
+ rdtp->dyntick_drain = 0;
+ rdtp->dyntick_holdoff = jiffies;
+ if (rcu_cpu_has_nonlazy_callbacks(cpu)) {
+ trace_rcu_prep_idle("Dyntick with callbacks");
+ rdtp->idle_gp_timer_expires =
jiffies + RCU_IDLE_GP_DELAY;
- else
- per_cpu(rcu_idle_gp_timer_expires, cpu) =
+ } else {
+ rdtp->idle_gp_timer_expires =
jiffies + RCU_IDLE_LAZY_GP_DELAY;
- tp = &per_cpu(rcu_idle_gp_timer, cpu);
- mod_timer_pinned(tp, per_cpu(rcu_idle_gp_timer_expires, cpu));
- per_cpu(rcu_nonlazy_posted_snap, cpu) =
- per_cpu(rcu_nonlazy_posted, cpu);
+ trace_rcu_prep_idle("Dyntick with lazy callbacks");
+ }
+ tp = &rdtp->idle_gp_timer;
+ mod_timer_pinned(tp, rdtp->idle_gp_timer_expires);
+ rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted;
return; /* Nothing more to do immediately. */
- } else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) {
+ } else if (--(rdtp->dyntick_drain) <= 0) {
/* We have hit the limit, so time to give up. */
- per_cpu(rcu_dyntick_holdoff, cpu) = jiffies;
+ rdtp->dyntick_holdoff = jiffies;
trace_rcu_prep_idle("Begin holdoff");
invoke_rcu_core(); /* Force the CPU out of dyntick-idle. */
return;
@@ -2227,7 +2237,7 @@ static void rcu_prepare_for_idle(int cpu)
*/
static void rcu_idle_count_callbacks_posted(void)
{
- __this_cpu_add(rcu_nonlazy_posted, 1);
+ __this_cpu_add(rcu_dynticks.nonlazy_posted, 1);
}
#endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
@@ -2238,11 +2248,12 @@ static void rcu_idle_count_callbacks_posted(void)
static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
{
- struct timer_list *tltp = &per_cpu(rcu_idle_gp_timer, cpu);
+ struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
+ struct timer_list *tltp = &rdtp->idle_gp_timer;
sprintf(cp, "drain=%d %c timer=%lu",
- per_cpu(rcu_dyntick_drain, cpu),
- per_cpu(rcu_dyntick_holdoff, cpu) == jiffies ? 'H' : '.',
+ rdtp->dyntick_drain,
+ rdtp->dyntick_holdoff == jiffies ? 'H' : '.',
timer_pending(tltp) ? tltp->expires - jiffies : -1);
}
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index c46958e26121..d5594a4268d4 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5556,15 +5556,20 @@ static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */
#ifdef CONFIG_SCHED_DEBUG
-static __read_mostly int sched_domain_debug_enabled;
+static __read_mostly int sched_debug_enabled;
-static int __init sched_domain_debug_setup(char *str)
+static int __init sched_debug_setup(char *str)
{
- sched_domain_debug_enabled = 1;
+ sched_debug_enabled = 1;
return 0;
}
-early_param("sched_debug", sched_domain_debug_setup);
+early_param("sched_debug", sched_debug_setup);
+
+static inline bool sched_debug(void)
+{
+ return sched_debug_enabled;
+}
static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
struct cpumask *groupmask)
@@ -5604,7 +5609,12 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
break;
}
- if (!group->sgp->power) {
+ /*
+ * Even though we initialize ->power to something semi-sane,
+ * we leave power_orig unset. This allows us to detect if
+ * domain iteration is still funny without causing /0 traps.
+ */
+ if (!group->sgp->power_orig) {
printk(KERN_CONT "\n");
printk(KERN_ERR "ERROR: domain->cpu_power not "
"set\n");
@@ -5652,7 +5662,7 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu)
{
int level = 0;
- if (!sched_domain_debug_enabled)
+ if (!sched_debug_enabled)
return;
if (!sd) {
@@ -5673,6 +5683,10 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu)
}
#else /* !CONFIG_SCHED_DEBUG */
# define sched_domain_debug(sd, cpu) do { } while (0)
+static inline bool sched_debug(void)
+{
+ return false;
+}
#endif /* CONFIG_SCHED_DEBUG */
static int sd_degenerate(struct sched_domain *sd)
@@ -5994,6 +6008,44 @@ struct sched_domain_topology_level {
struct sd_data data;
};
+/*
+ * Build an iteration mask that can exclude certain CPUs from the upwards
+ * domain traversal.
+ *
+ * Asymmetric node setups can result in situations where the domain tree is of
+ * unequal depth, make sure to skip domains that already cover the entire
+ * range.
+ *
+ * In that case build_sched_domains() will have terminated the iteration early
+ * and our sibling sd spans will be empty. Domains should always include the
+ * cpu they're built on, so check that.
+ *
+ */
+static void build_group_mask(struct sched_domain *sd, struct sched_group *sg)
+{
+ const struct cpumask *span = sched_domain_span(sd);
+ struct sd_data *sdd = sd->private;
+ struct sched_domain *sibling;
+ int i;
+
+ for_each_cpu(i, span) {
+ sibling = *per_cpu_ptr(sdd->sd, i);
+ if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
+ continue;
+
+ cpumask_set_cpu(i, sched_group_mask(sg));
+ }
+}
+
+/*
+ * Return the canonical balance cpu for this group, this is the first cpu
+ * of this group that's also in the iteration mask.
+ */
+int group_balance_cpu(struct sched_group *sg)
+{
+ return cpumask_first_and(sched_group_cpus(sg), sched_group_mask(sg));
+}
+
static int
build_overlap_sched_groups(struct sched_domain *sd, int cpu)
{
@@ -6012,6 +6064,12 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
if (cpumask_test_cpu(i, covered))
continue;
+ child = *per_cpu_ptr(sdd->sd, i);
+
+ /* See the comment near build_group_mask(). */
+ if (!cpumask_test_cpu(i, sched_domain_span(child)))
+ continue;
+
sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
GFP_KERNEL, cpu_to_node(cpu));
@@ -6019,8 +6077,6 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
goto fail;
sg_span = sched_group_cpus(sg);
-
- child = *per_cpu_ptr(sdd->sd, i);
if (child->child) {
child = child->child;
cpumask_copy(sg_span, sched_domain_span(child));
@@ -6030,13 +6086,24 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
cpumask_or(covered, covered, sg_span);
sg->sgp = *per_cpu_ptr(sdd->sgp, i);
- atomic_inc(&sg->sgp->ref);
+ if (atomic_inc_return(&sg->sgp->ref) == 1)
+ build_group_mask(sd, sg);
+ /*
+ * Initialize sgp->power such that even if we mess up the
+ * domains and no possible iteration will get us here, we won't
+ * die on a /0 trap.
+ */
+ sg->sgp->power = SCHED_POWER_SCALE * cpumask_weight(sg_span);
+
+ /*
+ * Make sure the first group of this domain contains the
+ * canonical balance cpu. Otherwise the sched_domain iteration
+ * breaks. See update_sg_lb_stats().
+ */
if ((!groups && cpumask_test_cpu(cpu, sg_span)) ||
- cpumask_first(sg_span) == cpu) {
- WARN_ON_ONCE(!cpumask_test_cpu(cpu, sg_span));
+ group_balance_cpu(sg) == cpu)
groups = sg;
- }
if (!first)
first = sg;
@@ -6109,6 +6176,7 @@ build_sched_groups(struct sched_domain *sd, int cpu)
cpumask_clear(sched_group_cpus(sg));
sg->sgp->power = 0;
+ cpumask_setall(sched_group_mask(sg));
for_each_cpu(j, span) {
if (get_group(j, sdd, NULL) != group)
@@ -6150,7 +6218,7 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd)
sg = sg->next;
} while (sg != sd->groups);
- if (cpu != group_first_cpu(sg))
+ if (cpu != group_balance_cpu(sg))
return;
update_group_power(sd, cpu);
@@ -6200,11 +6268,8 @@ int sched_domain_level_max;
static int __init setup_relax_domain_level(char *str)
{
- unsigned long val;
-
- val = simple_strtoul(str, NULL, 0);
- if (val < sched_domain_level_max)
- default_relax_domain_level = val;
+ if (kstrtoint(str, 0, &default_relax_domain_level))
+ pr_warn("Unable to set relax_domain_level\n");
return 1;
}
@@ -6314,14 +6379,13 @@ static struct sched_domain_topology_level *sched_domain_topology = default_topol
#ifdef CONFIG_NUMA
static int sched_domains_numa_levels;
-static int sched_domains_numa_scale;
static int *sched_domains_numa_distance;
static struct cpumask ***sched_domains_numa_masks;
static int sched_domains_curr_level;
static inline int sd_local_flags(int level)
{
- if (sched_domains_numa_distance[level] > REMOTE_DISTANCE)
+ if (sched_domains_numa_distance[level] > RECLAIM_DISTANCE)
return 0;
return SD_BALANCE_EXEC | SD_BALANCE_FORK | SD_WAKE_AFFINE;
@@ -6379,6 +6443,42 @@ static const struct cpumask *sd_numa_mask(int cpu)
return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)];
}
+static void sched_numa_warn(const char *str)
+{
+ static int done = false;
+ int i,j;
+
+ if (done)
+ return;
+
+ done = true;
+
+ printk(KERN_WARNING "ERROR: %s\n\n", str);
+
+ for (i = 0; i < nr_node_ids; i++) {
+ printk(KERN_WARNING " ");
+ for (j = 0; j < nr_node_ids; j++)
+ printk(KERN_CONT "%02d ", node_distance(i,j));
+ printk(KERN_CONT "\n");
+ }
+ printk(KERN_WARNING "\n");
+}
+
+static bool find_numa_distance(int distance)
+{
+ int i;
+
+ if (distance == node_distance(0, 0))
+ return true;
+
+ for (i = 0; i < sched_domains_numa_levels; i++) {
+ if (sched_domains_numa_distance[i] == distance)
+ return true;
+ }
+
+ return false;
+}
+
static void sched_init_numa(void)
{
int next_distance, curr_distance = node_distance(0, 0);
@@ -6386,7 +6486,6 @@ static void sched_init_numa(void)
int level = 0;
int i, j, k;
- sched_domains_numa_scale = curr_distance;
sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL);
if (!sched_domains_numa_distance)
return;
@@ -6397,23 +6496,41 @@ static void sched_init_numa(void)
*
* Assumes node_distance(0,j) includes all distances in
* node_distance(i,j) in order to avoid cubic time.
- *
- * XXX: could be optimized to O(n log n) by using sort()
*/
next_distance = curr_distance;
for (i = 0; i < nr_node_ids; i++) {
for (j = 0; j < nr_node_ids; j++) {
- int distance = node_distance(0, j);
- if (distance > curr_distance &&
- (distance < next_distance ||
- next_distance == curr_distance))
- next_distance = distance;
+ for (k = 0; k < nr_node_ids; k++) {
+ int distance = node_distance(i, k);
+
+ if (distance > curr_distance &&
+ (distance < next_distance ||
+ next_distance == curr_distance))
+ next_distance = distance;
+
+ /*
+ * While not a strong assumption it would be nice to know
+ * about cases where if node A is connected to B, B is not
+ * equally connected to A.
+ */
+ if (sched_debug() && node_distance(k, i) != distance)
+ sched_numa_warn("Node-distance not symmetric");
+
+ if (sched_debug() && i && !find_numa_distance(distance))
+ sched_numa_warn("Node-0 not representative");
+ }
+ if (next_distance != curr_distance) {
+ sched_domains_numa_distance[level++] = next_distance;
+ sched_domains_numa_levels = level;
+ curr_distance = next_distance;
+ } else break;
}
- if (next_distance != curr_distance) {
- sched_domains_numa_distance[level++] = next_distance;
- sched_domains_numa_levels = level;
- curr_distance = next_distance;
- } else break;
+
+ /*
+ * In case of sched_debug() we verify the above assumption.
+ */
+ if (!sched_debug())
+ break;
}
/*
* 'level' contains the number of unique distances, excluding the
@@ -6525,7 +6642,7 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
*per_cpu_ptr(sdd->sg, j) = sg;
- sgp = kzalloc_node(sizeof(struct sched_group_power),
+ sgp = kzalloc_node(sizeof(struct sched_group_power) + cpumask_size(),
GFP_KERNEL, cpu_to_node(j));
if (!sgp)
return -ENOMEM;
@@ -6578,7 +6695,6 @@ struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
if (!sd)
return child;
- set_domain_attribute(sd, attr);
cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
if (child) {
sd->level = child->level + 1;
@@ -6586,6 +6702,7 @@ struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
child->parent = sd;
}
sd->child = child;
+ set_domain_attribute(sd, attr);
return sd;
}
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index b2a2d236f27b..c099cc6eebe3 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3602,7 +3602,7 @@ void update_group_power(struct sched_domain *sd, int cpu)
} while (group != child->groups);
}
- sdg->sgp->power = power;
+ sdg->sgp->power_orig = sdg->sgp->power = power;
}
/*
@@ -3632,7 +3632,7 @@ fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
/**
* update_sg_lb_stats - Update sched_group's statistics for load balancing.
- * @sd: The sched_domain whose statistics are to be updated.
+ * @env: The load balancing environment.
* @group: sched_group whose statistics are to be updated.
* @load_idx: Load index of sched_domain of this_cpu for load calc.
* @local_group: Does group contain this_cpu.
@@ -3652,7 +3652,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
int i;
if (local_group)
- balance_cpu = group_first_cpu(group);
+ balance_cpu = group_balance_cpu(group);
/* Tally up the load of all CPUs in the group */
max_cpu_load = 0;
@@ -3667,7 +3667,8 @@ static inline void update_sg_lb_stats(struct lb_env *env,
/* Bias balancing toward cpus of our domain */
if (local_group) {
- if (idle_cpu(i) && !first_idle_cpu) {
+ if (idle_cpu(i) && !first_idle_cpu &&
+ cpumask_test_cpu(i, sched_group_mask(group))) {
first_idle_cpu = 1;
balance_cpu = i;
}
@@ -3741,11 +3742,10 @@ static inline void update_sg_lb_stats(struct lb_env *env,
/**
* update_sd_pick_busiest - return 1 on busiest group
- * @sd: sched_domain whose statistics are to be checked
+ * @env: The load balancing environment.
* @sds: sched_domain statistics
* @sg: sched_group candidate to be checked for being the busiest
* @sgs: sched_group statistics
- * @this_cpu: the current cpu
*
* Determine if @sg is a busier group than the previously selected
* busiest group.
@@ -3783,9 +3783,7 @@ static bool update_sd_pick_busiest(struct lb_env *env,
/**
* update_sd_lb_stats - Update sched_domain's statistics for load balancing.
- * @sd: sched_domain whose statistics are to be updated.
- * @this_cpu: Cpu for which load balance is currently performed.
- * @idle: Idle status of this_cpu
+ * @env: The load balancing environment.
* @cpus: Set of cpus considered for load balancing.
* @balance: Should we balance.
* @sds: variable to hold the statistics for this sched_domain.
@@ -3874,10 +3872,8 @@ static inline void update_sd_lb_stats(struct lb_env *env,
* Returns 1 when packing is required and a task should be moved to
* this CPU. The amount of the imbalance is returned in *imbalance.
*
- * @sd: The sched_domain whose packing is to be checked.
+ * @env: The load balancing environment.
* @sds: Statistics of the sched_domain which is to be packed
- * @this_cpu: The cpu at whose sched_domain we're performing load-balance.
- * @imbalance: returns amount of imbalanced due to packing.
*/
static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
{
@@ -3903,9 +3899,8 @@ static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
* fix_small_imbalance - Calculate the minor imbalance that exists
* amongst the groups of a sched_domain, during
* load balancing.
+ * @env: The load balancing environment.
* @sds: Statistics of the sched_domain whose imbalance is to be calculated.
- * @this_cpu: The cpu at whose sched_domain we're performing load-balance.
- * @imbalance: Variable to store the imbalance.
*/
static inline
void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
@@ -4048,11 +4043,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
* Also calculates the amount of weighted load which should be moved
* to restore balance.
*
- * @sd: The sched_domain whose busiest group is to be returned.
- * @this_cpu: The cpu for which load balancing is currently being performed.
- * @imbalance: Variable which stores amount of weighted load which should
- * be moved to restore balance/put a group to idle.
- * @idle: The idle status of this_cpu.
+ * @env: The load balancing environment.
* @cpus: The set of CPUs under consideration for load-balancing.
* @balance: Pointer to a variable indicating if this_cpu
* is the appropriate cpu to perform load balancing at this_level.
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 2a4e8dffbd6b..573e1ca01102 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1562,7 +1562,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
task_running(rq, task) ||
!task->on_rq)) {
- raw_spin_unlock(&lowest_rq->lock);
+ double_unlock_balance(rq, lowest_rq);
lowest_rq = NULL;
break;
}
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index ba9dccfd24ce..6d52cea7f33d 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -526,6 +526,8 @@ static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
DECLARE_PER_CPU(struct sched_domain *, sd_llc);
DECLARE_PER_CPU(int, sd_llc_id);
+extern int group_balance_cpu(struct sched_group *sg);
+
#endif /* CONFIG_SMP */
#include "stats.h"
diff --git a/kernel/sys.c b/kernel/sys.c
index 9ff89cb9657a..f0ec44dcd415 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1786,27 +1786,13 @@ SYSCALL_DEFINE1(umask, int, mask)
}
#ifdef CONFIG_CHECKPOINT_RESTORE
-static bool vma_flags_mismatch(struct vm_area_struct *vma,
- unsigned long required,
- unsigned long banned)
-{
- return (vma->vm_flags & required) != required ||
- (vma->vm_flags & banned);
-}
-
static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
{
+ struct vm_area_struct *vma;
struct file *exe_file;
struct dentry *dentry;
int err;
- /*
- * Setting new mm::exe_file is only allowed when no VM_EXECUTABLE vma's
- * remain. So perform a quick test first.
- */
- if (mm->num_exe_file_vmas)
- return -EBUSY;
-
exe_file = fget(fd);
if (!exe_file)
return -EBADF;
@@ -1827,17 +1813,30 @@ static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
if (err)
goto exit;
+ down_write(&mm->mmap_sem);
+
+ /*
+ * Forbid mm->exe_file change if there are mapped other files.
+ */
+ err = -EBUSY;
+ for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ if (vma->vm_file && !path_equal(&vma->vm_file->f_path,
+ &exe_file->f_path))
+ goto exit_unlock;
+ }
+
/*
* The symlink can be changed only once, just to disallow arbitrary
* transitions malicious software might bring in. This means one
* could make a snapshot over all processes running and monitor
* /proc/pid/exe changes to notice unusual activity if needed.
*/
- down_write(&mm->mmap_sem);
- if (likely(!mm->exe_file))
- set_mm_exe_file(mm, exe_file);
- else
- err = -EBUSY;
+ err = -EPERM;
+ if (test_and_set_bit(MMF_EXE_FILE_CHANGED, &mm->flags))
+ goto exit_unlock;
+
+ set_mm_exe_file(mm, exe_file);
+exit_unlock:
up_write(&mm->mmap_sem);
exit:
@@ -1862,7 +1861,7 @@ static int prctl_set_mm(int opt, unsigned long addr,
if (opt == PR_SET_MM_EXE_FILE)
return prctl_set_mm_exe_file(mm, (unsigned int)addr);
- if (addr >= TASK_SIZE)
+ if (addr >= TASK_SIZE || addr < mmap_min_addr)
return -EINVAL;
error = -EINVAL;
@@ -1924,12 +1923,6 @@ static int prctl_set_mm(int opt, unsigned long addr,
error = -EFAULT;
goto out;
}
-#ifdef CONFIG_STACK_GROWSUP
- if (vma_flags_mismatch(vma, VM_READ | VM_WRITE | VM_GROWSUP, 0))
-#else
- if (vma_flags_mismatch(vma, VM_READ | VM_WRITE | VM_GROWSDOWN, 0))
-#endif
- goto out;
if (opt == PR_SET_MM_START_STACK)
mm->start_stack = addr;
else if (opt == PR_SET_MM_ARG_START)
@@ -1981,12 +1974,22 @@ out:
up_read(&mm->mmap_sem);
return error;
}
+
+static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr)
+{
+ return put_user(me->clear_child_tid, tid_addr);
+}
+
#else /* CONFIG_CHECKPOINT_RESTORE */
static int prctl_set_mm(int opt, unsigned long addr,
unsigned long arg4, unsigned long arg5)
{
return -EINVAL;
}
+static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr)
+{
+ return -EINVAL;
+}
#endif
SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
@@ -2124,6 +2127,9 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
else
return -EINVAL;
break;
+ case PR_GET_TID_ADDRESS:
+ error = prctl_get_tid_address(me, (int __user **)arg2);
+ break;
default:
return -EINVAL;
}
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index da70c6db496c..869997833928 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -274,6 +274,7 @@ EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us);
static void tick_nohz_stop_sched_tick(struct tick_sched *ts)
{
unsigned long seq, last_jiffies, next_jiffies, delta_jiffies;
+ unsigned long rcu_delta_jiffies;
ktime_t last_update, expires, now;
struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
u64 time_delta;
@@ -322,7 +323,7 @@ static void tick_nohz_stop_sched_tick(struct tick_sched *ts)
time_delta = timekeeping_max_deferment();
} while (read_seqretry(&xtime_lock, seq));
- if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu) ||
+ if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) || printk_needs_cpu(cpu) ||
arch_needs_cpu(cpu)) {
next_jiffies = last_jiffies + 1;
delta_jiffies = 1;
@@ -330,6 +331,10 @@ static void tick_nohz_stop_sched_tick(struct tick_sched *ts)
/* Get the next timer wheel timer */
next_jiffies = get_next_timer_interrupt(last_jiffies);
delta_jiffies = next_jiffies - last_jiffies;
+ if (rcu_delta_jiffies < delta_jiffies) {
+ next_jiffies = last_jiffies + rcu_delta_jiffies;
+ delta_jiffies = rcu_delta_jiffies;
+ }
}
/*
* Do not stop the tick, if we are only one off
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 6e46cacf5969..6f46a00a1e8a 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -962,6 +962,7 @@ static cycle_t logarithmic_accumulation(cycle_t offset, int shift)
timekeeper.xtime.tv_sec++;
leap = second_overflow(timekeeper.xtime.tv_sec);
timekeeper.xtime.tv_sec += leap;
+ timekeeper.wall_to_monotonic.tv_sec -= leap;
}
/* Accumulate raw time */
@@ -1077,6 +1078,7 @@ static void update_wall_time(void)
timekeeper.xtime.tv_sec++;
leap = second_overflow(timekeeper.xtime.tv_sec);
timekeeper.xtime.tv_sec += leap;
+ timekeeper.wall_to_monotonic.tv_sec -= leap;
}
timekeeping_update(false);