diff options
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r-- | arch/arm/kernel/entry-armv.S | 106 | ||||
-rw-r--r-- | arch/arm/kernel/entry-v7m.S | 2 | ||||
-rw-r--r-- | arch/arm/kernel/fiq.c | 24 | ||||
-rw-r--r-- | arch/arm/kernel/head-common.S | 1 | ||||
-rw-r--r-- | arch/arm/kernel/head-nommu.S | 2 | ||||
-rw-r--r-- | arch/arm/kernel/head.S | 2 | ||||
-rw-r--r-- | arch/arm/kernel/hw_breakpoint.c | 4 | ||||
-rw-r--r-- | arch/arm/kernel/hyp-stub.S | 4 | ||||
-rw-r--r-- | arch/arm/kernel/machine_kexec.c | 21 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event.c | 10 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event_cpu.c | 6 | ||||
-rw-r--r-- | arch/arm/kernel/process.c | 49 | ||||
-rw-r--r-- | arch/arm/kernel/psci_smp.c | 3 | ||||
-rw-r--r-- | arch/arm/kernel/setup.c | 3 | ||||
-rw-r--r-- | arch/arm/kernel/signal.c | 56 | ||||
-rw-r--r-- | arch/arm/kernel/signal.h | 12 | ||||
-rw-r--r-- | arch/arm/kernel/smp.c | 28 | ||||
-rw-r--r-- | arch/arm/kernel/smp_tlb.c | 17 | ||||
-rw-r--r-- | arch/arm/kernel/smp_twd.c | 6 | ||||
-rw-r--r-- | arch/arm/kernel/traps.c | 46 | ||||
-rw-r--r-- | arch/arm/kernel/vmlinux.lds.S | 17 |
21 files changed, 268 insertions, 151 deletions
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index a39cfc2a1f90..9cbe70c8b0ef 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -357,7 +357,8 @@ ENDPROC(__pabt_svc) .endm .macro kuser_cmpxchg_check -#if !defined(CONFIG_CPU_32v6K) && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) +#if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS) && \ + !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) #ifndef CONFIG_MMU #warning "NPTL on non MMU needs fixing" #else @@ -742,6 +743,18 @@ ENDPROC(__switch_to) #endif .endm + .macro kuser_pad, sym, size + .if (. - \sym) & 3 + .rept 4 - (. - \sym) & 3 + .byte 0 + .endr + .endif + .rept (\size - (. - \sym)) / 4 + .word 0xe7fddef1 + .endr + .endm + +#ifdef CONFIG_KUSER_HELPERS .align 5 .globl __kuser_helper_start __kuser_helper_start: @@ -832,18 +845,13 @@ kuser_cmpxchg64_fixup: #error "incoherent kernel configuration" #endif - /* pad to next slot */ - .rept (16 - (. - __kuser_cmpxchg64)/4) - .word 0 - .endr - - .align 5 + kuser_pad __kuser_cmpxchg64, 64 __kuser_memory_barrier: @ 0xffff0fa0 smp_dmb arm usr_ret lr - .align 5 + kuser_pad __kuser_memory_barrier, 32 __kuser_cmpxchg: @ 0xffff0fc0 @@ -916,13 +924,14 @@ kuser_cmpxchg32_fixup: #endif - .align 5 + kuser_pad __kuser_cmpxchg, 32 __kuser_get_tls: @ 0xffff0fe0 ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init usr_ret lr mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code - .rep 4 + kuser_pad __kuser_get_tls, 16 + .rep 3 .word 0 @ 0xffff0ff0 software TLS value, then .endr @ pad up to __kuser_helper_version @@ -932,14 +941,16 @@ __kuser_helper_version: @ 0xffff0ffc .globl __kuser_helper_end __kuser_helper_end: +#endif + THUMB( .thumb ) /* * Vector stubs. * - * This code is copied to 0xffff0200 so we can use branches in the - * vectors, rather than ldr's. Note that this code must not - * exceed 0x300 bytes. + * This code is copied to 0xffff1000 so we can use branches in the + * vectors, rather than ldr's. Note that this code must not exceed + * a page size. * * Common stub entry macro: * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC @@ -986,8 +997,17 @@ ENDPROC(vector_\name) 1: .endm - .globl __stubs_start + .section .stubs, "ax", %progbits __stubs_start: + @ This must be the first word + .word vector_swi + +vector_rst: + ARM( swi SYS_ERROR0 ) + THUMB( svc #0 ) + THUMB( nop ) + b vector_und + /* * Interrupt dispatcher */ @@ -1082,6 +1102,16 @@ __stubs_start: .align 5 /*============================================================================= + * Address exception handler + *----------------------------------------------------------------------------- + * These aren't too critical. + * (they're not supposed to happen, and won't happen in 32-bit data mode). + */ + +vector_addrexcptn: + b vector_addrexcptn + +/*============================================================================= * Undefined FIQs *----------------------------------------------------------------------------- * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC @@ -1094,45 +1124,19 @@ __stubs_start: vector_fiq: subs pc, lr, #4 -/*============================================================================= - * Address exception handler - *----------------------------------------------------------------------------- - * These aren't too critical. - * (they're not supposed to happen, and won't happen in 32-bit data mode). - */ - -vector_addrexcptn: - b vector_addrexcptn - -/* - * We group all the following data together to optimise - * for CPUs with separate I & D caches. - */ - .align 5 - -.LCvswi: - .word vector_swi - - .globl __stubs_end -__stubs_end: - - .equ stubs_offset, __vectors_start + 0x200 - __stubs_start + .globl vector_fiq_offset + .equ vector_fiq_offset, vector_fiq - .globl __vectors_start + .section .vectors, "ax", %progbits __vectors_start: - ARM( swi SYS_ERROR0 ) - THUMB( svc #0 ) - THUMB( nop ) - W(b) vector_und + stubs_offset - W(ldr) pc, .LCvswi + stubs_offset - W(b) vector_pabt + stubs_offset - W(b) vector_dabt + stubs_offset - W(b) vector_addrexcptn + stubs_offset - W(b) vector_irq + stubs_offset - W(b) vector_fiq + stubs_offset - - .globl __vectors_end -__vectors_end: + W(b) vector_rst + W(b) vector_und + W(ldr) pc, __vectors_start + 0x1000 + W(b) vector_pabt + W(b) vector_dabt + W(b) vector_addrexcptn + W(b) vector_irq + W(b) vector_fiq .data diff --git a/arch/arm/kernel/entry-v7m.S b/arch/arm/kernel/entry-v7m.S index e00621f1403f..52b26432c9a9 100644 --- a/arch/arm/kernel/entry-v7m.S +++ b/arch/arm/kernel/entry-v7m.S @@ -49,7 +49,7 @@ __irq_entry: mov r1, sp stmdb sp!, {lr} @ routine called with r0 = irq number, r1 = struct pt_regs * - bl nvic_do_IRQ + bl nvic_handle_irq pop {lr} @ diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c index 2adda11f712f..918875d96d5d 100644 --- a/arch/arm/kernel/fiq.c +++ b/arch/arm/kernel/fiq.c @@ -47,6 +47,11 @@ #include <asm/irq.h> #include <asm/traps.h> +#define FIQ_OFFSET ({ \ + extern void *vector_fiq_offset; \ + (unsigned)&vector_fiq_offset; \ + }) + static unsigned long no_fiq_insn; /* Default reacquire function @@ -79,14 +84,14 @@ int show_fiq_list(struct seq_file *p, int prec) void set_fiq_handler(void *start, unsigned int length) { -#if defined(CONFIG_CPU_USE_DOMAINS) - memcpy((void *)0xffff001c, start, length); -#else - memcpy(vectors_page + 0x1c, start, length); -#endif - flush_icache_range(0xffff001c, 0xffff001c + length); - if (!vectors_high()) - flush_icache_range(0x1c, 0x1c + length); + void *base = vectors_page; + unsigned offset = FIQ_OFFSET; + + memcpy(base + offset, start, length); + if (!cache_is_vipt_nonaliasing()) + flush_icache_range((unsigned long)base + offset, offset + + length); + flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length); } int claim_fiq(struct fiq_handler *f) @@ -144,6 +149,7 @@ EXPORT_SYMBOL(disable_fiq); void __init init_FIQ(int start) { - no_fiq_insn = *(unsigned long *)0xffff001c; + unsigned offset = FIQ_OFFSET; + no_fiq_insn = *(unsigned long *)(0xffff0000 + offset); fiq_start = start; } diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S index 76ab5ca50610..47cd974e57ea 100644 --- a/arch/arm/kernel/head-common.S +++ b/arch/arm/kernel/head-common.S @@ -149,7 +149,6 @@ ENDPROC(lookup_processor_type) * r5 = proc_info pointer in physical address space * r9 = cpuid (preserved) */ - __CPUINIT __lookup_processor_type: adr r3, __lookup_processor_type_data ldmia r3, {r4 - r6} diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S index 75f14cc3e073..14235ba64a90 100644 --- a/arch/arm/kernel/head-nommu.S +++ b/arch/arm/kernel/head-nommu.S @@ -87,7 +87,7 @@ ENTRY(stext) ENDPROC(stext) #ifdef CONFIG_SMP - __CPUINIT + .text ENTRY(secondary_startup) /* * Common entry point for secondary CPUs. diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 45e8935cae4e..2c7cc1e03473 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -343,7 +343,7 @@ __turn_mmu_on_loc: .long __turn_mmu_on_end #if defined(CONFIG_SMP) - __CPUINIT + .text ENTRY(secondary_startup) /* * Common entry point for secondary CPUs. diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index 1fd749ee4a1b..7b95de601357 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c @@ -1020,7 +1020,7 @@ out_mdbgen: cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu)); } -static int __cpuinit dbg_reset_notify(struct notifier_block *self, +static int dbg_reset_notify(struct notifier_block *self, unsigned long action, void *cpu) { if ((action & ~CPU_TASKS_FROZEN) == CPU_ONLINE) @@ -1029,7 +1029,7 @@ static int __cpuinit dbg_reset_notify(struct notifier_block *self, return NOTIFY_OK; } -static struct notifier_block __cpuinitdata dbg_reset_nb = { +static struct notifier_block dbg_reset_nb = { .notifier_call = dbg_reset_notify, }; diff --git a/arch/arm/kernel/hyp-stub.S b/arch/arm/kernel/hyp-stub.S index 4910232c4833..797b1a6a4906 100644 --- a/arch/arm/kernel/hyp-stub.S +++ b/arch/arm/kernel/hyp-stub.S @@ -56,8 +56,8 @@ ENTRY(__boot_cpu_mode) ldr \reg3, [\reg2] ldr \reg1, [\reg2, \reg3] cmp \mode, \reg1 @ matches primary CPU boot mode? - orrne r7, r7, #BOOT_CPU_MODE_MISMATCH - strne r7, [r5, r6] @ record what happened and give up + orrne \reg1, \reg1, #BOOT_CPU_MODE_MISMATCH + strne \reg1, [\reg2, \reg3] @ record what happened and give up .endm #else /* ZIMAGE */ diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c index 4fb074c446bf..57221e349a7c 100644 --- a/arch/arm/kernel/machine_kexec.c +++ b/arch/arm/kernel/machine_kexec.c @@ -15,6 +15,7 @@ #include <asm/mmu_context.h> #include <asm/cacheflush.h> #include <asm/mach-types.h> +#include <asm/smp_plat.h> #include <asm/system_misc.h> extern const unsigned char relocate_new_kernel[]; @@ -39,6 +40,14 @@ int machine_kexec_prepare(struct kimage *image) int i, err; /* + * Validate that if the current HW supports SMP, then the SW supports + * and implements CPU hotplug for the current HW. If not, we won't be + * able to kexec reliably, so fail the prepare operation. + */ + if (num_possible_cpus() > 1 && !platform_can_cpu_hotplug()) + return -EINVAL; + + /* * No segment at default ATAGs address. try to locate * a dtb using magic. */ @@ -73,6 +82,7 @@ void machine_crash_nonpanic_core(void *unused) crash_save_cpu(®s, smp_processor_id()); flush_cache_all(); + set_cpu_online(smp_processor_id(), false); atomic_dec(&waiting_for_crash_ipi); while (1) cpu_relax(); @@ -134,10 +144,13 @@ void machine_kexec(struct kimage *image) unsigned long reboot_code_buffer_phys; void *reboot_code_buffer; - if (num_online_cpus() > 1) { - pr_err("kexec: error: multiple CPUs still online\n"); - return; - } + /* + * This can only happen if machine_shutdown() failed to disable some + * CPU, and that can only happen if the checks in + * machine_kexec_prepare() were not correct. If this fails, we can't + * reliably kexec anyway, so BUG_ON is appropriate. + */ + BUG_ON(num_online_cpus() > 1); page_list = image->head & PAGE_MASK; diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index d9f5cd4e533f..e186ee1e63f6 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -53,7 +53,12 @@ armpmu_map_cache_event(const unsigned (*cache_map) static int armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) { - int mapping = (*event_map)[config]; + int mapping; + + if (config >= PERF_COUNT_HW_MAX) + return -EINVAL; + + mapping = (*event_map)[config]; return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; } @@ -253,6 +258,9 @@ validate_event(struct pmu_hw_events *hw_events, struct arm_pmu *armpmu = to_arm_pmu(event->pmu); struct pmu *leader_pmu = event->group_leader->pmu; + if (is_software_event(event)) + return 1; + if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF) return 1; diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c index 1f2740e3dbc0..aebe0e99c153 100644 --- a/arch/arm/kernel/perf_event_cpu.c +++ b/arch/arm/kernel/perf_event_cpu.c @@ -157,8 +157,8 @@ static void cpu_pmu_init(struct arm_pmu *cpu_pmu) * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading * junk values out of them. */ -static int __cpuinit cpu_pmu_notify(struct notifier_block *b, - unsigned long action, void *hcpu) +static int cpu_pmu_notify(struct notifier_block *b, unsigned long action, + void *hcpu) { if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING) return NOTIFY_DONE; @@ -171,7 +171,7 @@ static int __cpuinit cpu_pmu_notify(struct notifier_block *b, return NOTIFY_OK; } -static struct notifier_block __cpuinitdata cpu_pmu_hotplug_notifier = { +static struct notifier_block cpu_pmu_hotplug_notifier = { .notifier_call = cpu_pmu_notify, }; diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index d3ca4f6915af..94f6b05f9e24 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -197,6 +197,7 @@ void machine_shutdown(void) */ void machine_halt(void) { + local_irq_disable(); smp_send_stop(); local_irq_disable(); @@ -211,6 +212,7 @@ void machine_halt(void) */ void machine_power_off(void) { + local_irq_disable(); smp_send_stop(); if (pm_power_off) @@ -230,6 +232,7 @@ void machine_power_off(void) */ void machine_restart(char *cmd) { + local_irq_disable(); smp_send_stop(); arm_pm_restart(reboot_mode, cmd); @@ -426,10 +429,11 @@ unsigned long arch_randomize_brk(struct mm_struct *mm) } #ifdef CONFIG_MMU +#ifdef CONFIG_KUSER_HELPERS /* * The vectors page is always readable from user space for the - * atomic helpers and the signal restart code. Insert it into the - * gate_vma so that it is visible through ptrace and /proc/<pid>/mem. + * atomic helpers. Insert it into the gate_vma so that it is visible + * through ptrace and /proc/<pid>/mem. */ static struct vm_area_struct gate_vma = { .vm_start = 0xffff0000, @@ -458,9 +462,48 @@ int in_gate_area_no_mm(unsigned long addr) { return in_gate_area(NULL, addr); } +#define is_gate_vma(vma) ((vma) == &gate_vma) +#else +#define is_gate_vma(vma) 0 +#endif const char *arch_vma_name(struct vm_area_struct *vma) { - return (vma == &gate_vma) ? "[vectors]" : NULL; + return is_gate_vma(vma) ? "[vectors]" : + (vma->vm_mm && vma->vm_start == vma->vm_mm->context.sigpage) ? + "[sigpage]" : NULL; +} + +static struct page *signal_page; +extern struct page *get_signal_page(void); + +int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) +{ + struct mm_struct *mm = current->mm; + unsigned long addr; + int ret; + + if (!signal_page) + signal_page = get_signal_page(); + if (!signal_page) + return -ENOMEM; + + down_write(&mm->mmap_sem); + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0); + if (IS_ERR_VALUE(addr)) { + ret = addr; + goto up_fail; + } + + ret = install_special_mapping(mm, addr, PAGE_SIZE, + VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC, + &signal_page); + + if (ret == 0) + mm->context.sigpage = addr; + + up_fail: + up_write(&mm->mmap_sem); + return ret; } #endif diff --git a/arch/arm/kernel/psci_smp.c b/arch/arm/kernel/psci_smp.c index 219f1d73572a..70ded3fb42d9 100644 --- a/arch/arm/kernel/psci_smp.c +++ b/arch/arm/kernel/psci_smp.c @@ -46,8 +46,7 @@ extern void secondary_startup(void); -static int __cpuinit psci_boot_secondary(unsigned int cpu, - struct task_struct *idle) +static int psci_boot_secondary(unsigned int cpu, struct task_struct *idle) { if (psci_ops.cpu_on) return psci_ops.cpu_on(cpu_logical_map(cpu), diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 63af9a7ae512..afc2489ee13b 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -836,6 +836,8 @@ static int __init meminfo_cmp(const void *_a, const void *_b) void __init hyp_mode_check(void) { #ifdef CONFIG_ARM_VIRT_EXT + sync_boot_mode(); + if (is_hyp_mode_available()) { pr_info("CPU: All CPU(s) started in HYP mode.\n"); pr_info("CPU: Virtualization extensions available.\n"); @@ -971,6 +973,7 @@ static const char *hwcap_str[] = { "vfpv4", "idiva", "idivt", + "vfpd32", "lpae", NULL }; diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 1c16c35c271a..ab3304225272 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -8,6 +8,7 @@ * published by the Free Software Foundation. */ #include <linux/errno.h> +#include <linux/random.h> #include <linux/signal.h> #include <linux/personality.h> #include <linux/uaccess.h> @@ -15,12 +16,11 @@ #include <asm/elf.h> #include <asm/cacheflush.h> +#include <asm/traps.h> #include <asm/ucontext.h> #include <asm/unistd.h> #include <asm/vfp.h> -#include "signal.h" - /* * For ARM syscalls, we encode the syscall number into the instruction. */ @@ -40,11 +40,13 @@ #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_sigreturn - __NR_SYSCALL_BASE)) #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE)) -const unsigned long sigreturn_codes[7] = { +static const unsigned long sigreturn_codes[7] = { MOV_R7_NR_SIGRETURN, SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN, MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN, }; +static unsigned long signal_return_offset; + #ifdef CONFIG_CRUNCH static int preserve_crunch_context(struct crunch_sigframe __user *frame) { @@ -400,14 +402,20 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig, __put_user(sigreturn_codes[idx+1], rc+1)) return 1; - if ((cpsr & MODE32_BIT) && !IS_ENABLED(CONFIG_ARM_MPU)) { +#ifdef CONFIG_MMU + if (cpsr & MODE32_BIT) { + struct mm_struct *mm = current->mm; + /* - * 32-bit code can use the new high-page - * signal return code support except when the MPU has - * protected the vectors page from PL0 + * 32-bit code can use the signal return page + * except when the MPU has protected the vectors + * page from PL0 */ - retcode = KERN_SIGRETURN_CODE + (idx << 2) + thumb; - } else { + retcode = mm->context.sigpage + signal_return_offset + + (idx << 2) + thumb; + } else +#endif + { /* * Ensure that the instruction cache sees * the return code written onto the stack. @@ -608,3 +616,33 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) } while (thread_flags & _TIF_WORK_MASK); return 0; } + +struct page *get_signal_page(void) +{ + unsigned long ptr; + unsigned offset; + struct page *page; + void *addr; + + page = alloc_pages(GFP_KERNEL, 0); + + if (!page) + return NULL; + + addr = page_address(page); + + /* Give the signal return code some randomness */ + offset = 0x200 + (get_random_int() & 0x7fc); + signal_return_offset = offset; + + /* + * Copy signal return handlers into the vector page, and + * set sigreturn to be a pointer to these. + */ + memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes)); + + ptr = (unsigned long)addr + offset; + flush_icache_range(ptr, ptr + sizeof(sigreturn_codes)); + + return page; +} diff --git a/arch/arm/kernel/signal.h b/arch/arm/kernel/signal.h deleted file mode 100644 index 5ff067b7c752..000000000000 --- a/arch/arm/kernel/signal.h +++ /dev/null @@ -1,12 +0,0 @@ -/* - * linux/arch/arm/kernel/signal.h - * - * Copyright (C) 2005-2009 Russell King. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#define KERN_SIGRETURN_CODE (CONFIG_VECTORS_BASE + 0x00000500) - -extern const unsigned long sigreturn_codes[7]; diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index c5fb5469054b..2dc19349eb19 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -58,7 +58,7 @@ struct secondary_data secondary_data; * control for which core is the next to come out of the secondary * boot "holding pen" */ -volatile int __cpuinitdata pen_release = -1; +volatile int pen_release = -1; enum ipi_msg_type { IPI_WAKEUP, @@ -86,7 +86,7 @@ static unsigned long get_arch_pgd(pgd_t *pgd) return pgdir >> ARCH_PGD_SHIFT; } -int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) +int __cpu_up(unsigned int cpu, struct task_struct *idle) { int ret; @@ -138,13 +138,23 @@ void __init smp_init_cpus(void) smp_ops.smp_init_cpus(); } -int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) +int boot_secondary(unsigned int cpu, struct task_struct *idle) { if (smp_ops.smp_boot_secondary) return smp_ops.smp_boot_secondary(cpu, idle); return -ENOSYS; } +int platform_can_cpu_hotplug(void) +{ +#ifdef CONFIG_HOTPLUG_CPU + if (smp_ops.cpu_kill) + return 1; +#endif + + return 0; +} + #ifdef CONFIG_HOTPLUG_CPU static void percpu_timer_stop(void); @@ -170,7 +180,7 @@ static int platform_cpu_disable(unsigned int cpu) /* * __cpu_disable runs on the processor to be shutdown. */ -int __cpuinit __cpu_disable(void) +int __cpu_disable(void) { unsigned int cpu = smp_processor_id(); int ret; @@ -216,7 +226,7 @@ static DECLARE_COMPLETION(cpu_died); * called on the thread which is asking for a CPU to be shutdown - * waits until shutdown has completed, or it is timed out. */ -void __cpuinit __cpu_die(unsigned int cpu) +void __cpu_die(unsigned int cpu) { if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) { pr_err("CPU%u: cpu didn't die\n", cpu); @@ -306,7 +316,7 @@ void __ref cpu_die(void) * Called by both boot and secondaries to move global data into * per-processor storage. */ -static void __cpuinit smp_store_cpu_info(unsigned int cpuid) +static void smp_store_cpu_info(unsigned int cpuid) { struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid); @@ -322,7 +332,7 @@ static void percpu_timer_setup(void); * This is the secondary CPU boot entry. We're using this CPUs * idle thread stack, but a set of temporary page tables. */ -asmlinkage void __cpuinit secondary_start_kernel(void) +asmlinkage void secondary_start_kernel(void) { struct mm_struct *mm = &init_mm; unsigned int cpu; @@ -521,7 +531,7 @@ static void broadcast_timer_set_mode(enum clock_event_mode mode, { } -static void __cpuinit broadcast_timer_setup(struct clock_event_device *evt) +static void broadcast_timer_setup(struct clock_event_device *evt) { evt->name = "dummy_timer"; evt->features = CLOCK_EVT_FEAT_ONESHOT | @@ -550,7 +560,7 @@ int local_timer_register(struct local_timer_ops *ops) } #endif -static void __cpuinit percpu_timer_setup(void) +static void percpu_timer_setup(void) { unsigned int cpu = smp_processor_id(); struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu); diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c index a98b62dca2fa..c2edfff573c2 100644 --- a/arch/arm/kernel/smp_tlb.c +++ b/arch/arm/kernel/smp_tlb.c @@ -70,23 +70,6 @@ static inline void ipi_flush_bp_all(void *ignored) local_flush_bp_all(); } -#ifdef CONFIG_ARM_ERRATA_798181 -static int erratum_a15_798181(void) -{ - unsigned int midr = read_cpuid_id(); - - /* Cortex-A15 r0p0..r3p2 affected */ - if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2) - return 0; - return 1; -} -#else -static int erratum_a15_798181(void) -{ - return 0; -} -#endif - static void ipi_flush_tlb_a15_erratum(void *arg) { dmb(); diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c index f6fd1d4398c6..25956204ef23 100644 --- a/arch/arm/kernel/smp_twd.c +++ b/arch/arm/kernel/smp_twd.c @@ -187,7 +187,7 @@ core_initcall(twd_cpufreq_init); #endif -static void __cpuinit twd_calibrate_rate(void) +static void twd_calibrate_rate(void) { unsigned long count; u64 waitjiffies; @@ -265,7 +265,7 @@ static void twd_get_clock(struct device_node *np) /* * Setup the local clock events for a CPU. */ -static int __cpuinit twd_timer_setup(struct clock_event_device *clk) +static int twd_timer_setup(struct clock_event_device *clk) { struct clock_event_device **this_cpu_clk; int cpu = smp_processor_id(); @@ -308,7 +308,7 @@ static int __cpuinit twd_timer_setup(struct clock_event_device *clk) return 0; } -static struct local_timer_ops twd_lt_ops __cpuinitdata = { +static struct local_timer_ops twd_lt_ops = { .setup = twd_timer_setup, .stop = twd_timer_stop, }; diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index cab094c234ee..ab517fcce21b 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -35,8 +35,6 @@ #include <asm/tls.h> #include <asm/system_misc.h> -#include "signal.h" - static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" }; void *vectors_page; @@ -800,15 +798,26 @@ void __init trap_init(void) return; } -static void __init kuser_get_tls_init(unsigned long vectors) +#ifdef CONFIG_KUSER_HELPERS +static void __init kuser_init(void *vectors) { + extern char __kuser_helper_start[], __kuser_helper_end[]; + int kuser_sz = __kuser_helper_end - __kuser_helper_start; + + memcpy(vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz); + /* * vectors + 0xfe0 = __kuser_get_tls * vectors + 0xfe8 = hardware TLS instruction at 0xffff0fe8 */ if (tls_emu || has_tls_reg) - memcpy((void *)vectors + 0xfe0, (void *)vectors + 0xfe8, 4); + memcpy(vectors + 0xfe0, vectors + 0xfe8, 4); } +#else +static void __init kuser_init(void *vectors) +{ +} +#endif void __init early_trap_init(void *vectors_base) { @@ -816,33 +825,30 @@ void __init early_trap_init(void *vectors_base) unsigned long vectors = (unsigned long)vectors_base; extern char __stubs_start[], __stubs_end[]; extern char __vectors_start[], __vectors_end[]; - extern char __kuser_helper_start[], __kuser_helper_end[]; - int kuser_sz = __kuser_helper_end - __kuser_helper_start; + unsigned i; vectors_page = vectors_base; /* + * Poison the vectors page with an undefined instruction. This + * instruction is chosen to be undefined for both ARM and Thumb + * ISAs. The Thumb version is an undefined instruction with a + * branch back to the undefined instruction. + */ + for (i = 0; i < PAGE_SIZE / sizeof(u32); i++) + ((u32 *)vectors_base)[i] = 0xe7fddef1; + + /* * Copy the vectors, stubs and kuser helpers (in entry-armv.S) * into the vector page, mapped at 0xffff0000, and ensure these * are visible to the instruction stream. */ memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start); - memcpy((void *)vectors + 0x200, __stubs_start, __stubs_end - __stubs_start); - memcpy((void *)vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz); + memcpy((void *)vectors + 0x1000, __stubs_start, __stubs_end - __stubs_start); - /* - * Do processor specific fixups for the kuser helpers - */ - kuser_get_tls_init(vectors); - - /* - * Copy signal return handlers into the vector page, and - * set sigreturn to be a pointer to these. - */ - memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE), - sigreturn_codes, sizeof(sigreturn_codes)); + kuser_init(vectors_base); - flush_icache_range(vectors, vectors + PAGE_SIZE); + flush_icache_range(vectors, vectors + PAGE_SIZE * 2); modify_domain(DOMAIN_USER, DOMAIN_CLIENT); #else /* ifndef CONFIG_CPU_V7M */ /* diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index fa25e4e425f6..7bcee5c9b604 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -148,6 +148,23 @@ SECTIONS . = ALIGN(PAGE_SIZE); __init_begin = .; #endif + /* + * The vectors and stubs are relocatable code, and the + * only thing that matters is their relative offsets + */ + __vectors_start = .; + .vectors 0 : AT(__vectors_start) { + *(.vectors) + } + . = __vectors_start + SIZEOF(.vectors); + __vectors_end = .; + + __stubs_start = .; + .stubs 0x1000 : AT(__stubs_start) { + *(.stubs) + } + . = __stubs_start + SIZEOF(.stubs); + __stubs_end = .; INIT_TEXT_SECTION(8) .exit.text : { |