diff options
author | Matt Evans <matt@ozlabs.org> | 2010-07-29 18:47:17 +0000 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2010-07-31 15:05:21 +1000 |
commit | e2f7f73717c0a2927bbe7551d90b1ec47a094361 (patch) | |
tree | e07dac6b21235a92ba6c456e3c471749a2396191 | |
parent | 2c48a7d615b82e030196e8b61ab0c7933be16dff (diff) |
powerpc/kexec: Add to and tidy debug/comments in machine_kexec64.c
Tidies some typos, KERN_INFO-ise an info msg, and add a debug msg showing
when the final sequence starts.
Also adds a comment to kexec_prepare_cpus_wait() to make note of a possible
problem; the need for kexec to deal with CPUs that failed to originally start
up.
Signed-off-by: Matt Evans <matt@ozlabs.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
-rw-r--r-- | arch/powerpc/kernel/machine_kexec_64.c | 29 |
1 files changed, 24 insertions, 5 deletions
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c index 022d2f613b7b..a0bcec3614e1 100644 --- a/arch/powerpc/kernel/machine_kexec_64.c +++ b/arch/powerpc/kernel/machine_kexec_64.c @@ -15,6 +15,7 @@ #include <linux/thread_info.h> #include <linux/init_task.h> #include <linux/errno.h> +#include <linux/kernel.h> #include <asm/page.h> #include <asm/current.h> @@ -184,7 +185,20 @@ static void kexec_prepare_cpus_wait(int wait_state) hw_breakpoint_disable(); my_cpu = get_cpu(); - /* Make sure each CPU has atleast made it to the state we need */ + /* Make sure each CPU has at least made it to the state we need. + * + * FIXME: There is a (slim) chance of a problem if not all of the CPUs + * are correctly onlined. If somehow we start a CPU on boot with RTAS + * start-cpu, but somehow that CPU doesn't write callin_cpu_map[] in + * time, the boot CPU will timeout. If it does eventually execute + * stuff, the secondary will start up (paca[].cpu_start was written) and + * get into a peculiar state. If the platform supports + * smp_ops->take_timebase(), the secondary CPU will probably be spinning + * in there. If not (i.e. pseries), the secondary will continue on and + * try to online itself/idle/etc. If it survives that, we need to find + * these possible-but-not-online-but-should-be CPUs and chaperone them + * into kexec_smp_wait(). + */ for_each_online_cpu(i) { if (i == my_cpu) continue; @@ -192,9 +206,9 @@ static void kexec_prepare_cpus_wait(int wait_state) while (paca[i].kexec_state < wait_state) { barrier(); if (i != notified) { - printk( "kexec: waiting for cpu %d (physical" - " %d) to enter %i state\n", - i, paca[i].hw_cpu_id, wait_state); + printk(KERN_INFO "kexec: waiting for cpu %d " + "(physical %d) to enter %i state\n", + i, paca[i].hw_cpu_id, wait_state); notified = i; } } @@ -218,7 +232,10 @@ static void kexec_prepare_cpus(void) if (ppc_md.kexec_cpu_down) ppc_md.kexec_cpu_down(0, 0); - /* Before removing MMU mapings make sure all CPUs have entered real mode */ + /* + * Before removing MMU mappings make sure all CPUs have entered real + * mode: + */ kexec_prepare_cpus_wait(KEXEC_STATE_REAL_MODE); put_cpu(); @@ -287,6 +304,8 @@ void default_machine_kexec(struct kimage *image) if (crashing_cpu == -1) kexec_prepare_cpus(); + pr_debug("kexec: Starting switchover sequence.\n"); + /* switch to a staticly allocated stack. Based on irq stack code. * XXX: the task struct will likely be invalid once we do the copy! */ |