From 33f663ff9186da1bcc88dc7830b3a632bd472da5 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Wed, 24 Mar 2010 16:46:52 +0100 Subject: ARM: 5993/1: ARM: Move the outer_cache definitions into a separate file (1/4) To avoid #include collisions with subsequent patches in the series, this patch moves the outer_cache definitions to a separate asm/outercache.h file. Signed-off-by: Catalin Marinas Signed-off-by: Russell King --- arch/arm/include/asm/cacheflush.h | 38 +----------------------- arch/arm/include/asm/outercache.h | 61 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 62 insertions(+), 37 deletions(-) create mode 100644 arch/arm/include/asm/outercache.h (limited to 'arch/arm') diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 72da7e045c6b..0d08d4170b64 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -15,6 +15,7 @@ #include #include #include +#include #define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT) @@ -219,12 +220,6 @@ struct cpu_cache_fns { void (*dma_flush_range)(const void *, const void *); }; -struct outer_cache_fns { - void (*inv_range)(unsigned long, unsigned long); - void (*clean_range)(unsigned long, unsigned long); - void (*flush_range)(unsigned long, unsigned long); -}; - /* * Select the calling method */ @@ -281,37 +276,6 @@ extern void dmac_flush_range(const void *, const void *); #endif -#ifdef CONFIG_OUTER_CACHE - -extern struct outer_cache_fns outer_cache; - -static inline void outer_inv_range(unsigned long start, unsigned long end) -{ - if (outer_cache.inv_range) - outer_cache.inv_range(start, end); -} -static inline void outer_clean_range(unsigned long start, unsigned long end) -{ - if (outer_cache.clean_range) - outer_cache.clean_range(start, end); -} -static inline void outer_flush_range(unsigned long start, unsigned long end) -{ - if (outer_cache.flush_range) - outer_cache.flush_range(start, end); -} - -#else - -static inline void outer_inv_range(unsigned long start, unsigned long end) -{ } -static inline void outer_clean_range(unsigned long start, unsigned long end) -{ } -static inline void outer_flush_range(unsigned long start, unsigned long end) -{ } - -#endif - /* * Copy user data from/to a page which is mapped into a different * processes address space. Really, we want to allow our "user diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h new file mode 100644 index 000000000000..c8571cbb5574 --- /dev/null +++ b/arch/arm/include/asm/outercache.h @@ -0,0 +1,61 @@ +/* + * arch/arm/include/asm/outercache.h + * + * Copyright (C) 2010 ARM Ltd. + * Written by Catalin Marinas + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __ASM_OUTERCACHE_H +#define __ASM_OUTERCACHE_H + +struct outer_cache_fns { + void (*inv_range)(unsigned long, unsigned long); + void (*clean_range)(unsigned long, unsigned long); + void (*flush_range)(unsigned long, unsigned long); +}; + +#ifdef CONFIG_OUTER_CACHE + +extern struct outer_cache_fns outer_cache; + +static inline void outer_inv_range(unsigned long start, unsigned long end) +{ + if (outer_cache.inv_range) + outer_cache.inv_range(start, end); +} +static inline void outer_clean_range(unsigned long start, unsigned long end) +{ + if (outer_cache.clean_range) + outer_cache.clean_range(start, end); +} +static inline void outer_flush_range(unsigned long start, unsigned long end) +{ + if (outer_cache.flush_range) + outer_cache.flush_range(start, end); +} + +#else + +static inline void outer_inv_range(unsigned long start, unsigned long end) +{ } +static inline void outer_clean_range(unsigned long start, unsigned long end) +{ } +static inline void outer_flush_range(unsigned long start, unsigned long end) +{ } + +#endif + +#endif /* __ASM_OUTERCACHE_H */ -- cgit v1.2.3 From 319f551a0a167b49b5bbb4a9ff4802046a572bc5 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Wed, 24 Mar 2010 16:47:53 +0100 Subject: ARM: 5994/1: ARM: Add outer_cache_fns.sync function pointer (2/4) This patch introduces the outer_cache_fns.sync function pointer together with the OUTER_CACHE_SYNC config option that can be used to drain the write buffer of the outer cache. Signed-off-by: Catalin Marinas Signed-off-by: Russell King --- arch/arm/include/asm/outercache.h | 14 ++++++++++++++ arch/arm/mm/Kconfig | 6 ++++++ 2 files changed, 20 insertions(+) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h index c8571cbb5574..25f76bae57ab 100644 --- a/arch/arm/include/asm/outercache.h +++ b/arch/arm/include/asm/outercache.h @@ -25,6 +25,9 @@ struct outer_cache_fns { void (*inv_range)(unsigned long, unsigned long); void (*clean_range)(unsigned long, unsigned long); void (*flush_range)(unsigned long, unsigned long); +#ifdef CONFIG_OUTER_CACHE_SYNC + void (*sync)(void); +#endif }; #ifdef CONFIG_OUTER_CACHE @@ -58,4 +61,15 @@ static inline void outer_flush_range(unsigned long start, unsigned long end) #endif +#ifdef CONFIG_OUTER_CACHE_SYNC +static inline void outer_sync(void) +{ + if (outer_cache.sync) + outer_cache.sync(); +} +#else +static inline void outer_sync(void) +{ } +#endif + #endif /* __ASM_OUTERCACHE_H */ diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index c4ed9f93f646..88a24de55aaf 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -736,6 +736,12 @@ config NEEDS_SYSCALL_FOR_CMPXCHG config OUTER_CACHE bool +config OUTER_CACHE_SYNC + bool + help + The outer cache has a outer_cache_fns.sync function pointer + that can be used to drain the write buffer of the outer cache. + config CACHE_FEROCEON_L2 bool "Enable the Feroceon L2 cache controller" depends on ARCH_KIRKWOOD || ARCH_MV78XX0 -- cgit v1.2.3 From 23107c542068b2b94390aa333f6b330af64961e4 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Wed, 24 Mar 2010 16:48:53 +0100 Subject: ARM: 5995/1: ARM: Add L2x0 outer_sync() support (3/4) The L2x0 cache controllers need to explicitly drain their write buffer even for Normal Noncacheable memory accesses. Signed-off-by: Catalin Marinas Signed-off-by: Russell King --- arch/arm/mm/Kconfig | 1 + arch/arm/mm/cache-l2x0.c | 10 ++++++++++ 2 files changed, 11 insertions(+) (limited to 'arch/arm') diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 88a24de55aaf..55a2a00db77f 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -763,6 +763,7 @@ config CACHE_L2X0 REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX || ARCH_NOMADIK || ARCH_OMAP4 default y select OUTER_CACHE + select OUTER_CACHE_SYNC help This option enables the L2x0 PrimeCell. diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index 07334632d3e2..21ad68ba22ba 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -93,6 +93,15 @@ static inline void l2x0_flush_line(unsigned long addr) } #endif +static void l2x0_cache_sync(void) +{ + unsigned long flags; + + spin_lock_irqsave(&l2x0_lock, flags); + cache_sync(); + spin_unlock_irqrestore(&l2x0_lock, flags); +} + static inline void l2x0_inv_all(void) { unsigned long flags; @@ -225,6 +234,7 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) outer_cache.inv_range = l2x0_inv_range; outer_cache.clean_range = l2x0_clean_range; outer_cache.flush_range = l2x0_flush_range; + outer_cache.sync = l2x0_cache_sync; printk(KERN_INFO "L2X0 cache controller enabled\n"); } -- cgit v1.2.3 From e7c5650f6067f65f8e961394f376d4862808d0d2 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Wed, 24 Mar 2010 16:49:54 +0100 Subject: ARM: 5996/1: ARM: Change the mandatory barriers implementation (4/4) The mandatory barriers (mb, rmb, wmb) are used even on uniprocessor systems for things like ordering Normal Non-cacheable memory accesses with DMA transfer (via Device memory writes). The current implementation uses dmb() for mb() and friends but this is not sufficient. The DMB only ensures the relative ordering of the observability of accesses by other processors or devices acting as masters. In case of DMA transfers started by writes to device memory, the relative ordering is not ensured because accesses to slave ports of a device are not considered observable by the DMB definition. A DSB is required for the data to reach the main memory (even if mapped as Normal Non-cacheable) before the device receives the notification to begin the transfer. Furthermore, some L2 cache controllers (like L2x0 or PL310) buffer stores to Normal Non-cacheable memory and this would need to be drained with the outer_sync() function call. The patch also allows platforms to define their own mandatory barriers implementation by selecting CONFIG_ARCH_HAS_BARRIERS and providing a mach/barriers.h file. Note that the SMP barriers are unchanged (being DMBs as before) since they are only guaranteed to work with Normal Cacheable memory. Signed-off-by: Catalin Marinas Signed-off-by: Russell King --- arch/arm/include/asm/system.h | 16 ++++++++++------ arch/arm/mm/Kconfig | 6 ++++++ 2 files changed, 16 insertions(+), 6 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h index ca88e6a84707..4ace45ec3ef8 100644 --- a/arch/arm/include/asm/system.h +++ b/arch/arm/include/asm/system.h @@ -60,6 +60,8 @@ #include #include +#include + #define __exception __attribute__((section(".exception.text"))) struct thread_info; @@ -137,10 +139,12 @@ extern unsigned int user_debug; #define dmb() __asm__ __volatile__ ("" : : : "memory") #endif -#if __LINUX_ARM_ARCH__ >= 7 || defined(CONFIG_SMP) -#define mb() dmb() +#ifdef CONFIG_ARCH_HAS_BARRIERS +#include +#elif __LINUX_ARM_ARCH__ >= 7 || defined(CONFIG_SMP) +#define mb() do { dsb(); outer_sync(); } while (0) #define rmb() dmb() -#define wmb() dmb() +#define wmb() mb() #else #define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) #define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) @@ -152,9 +156,9 @@ extern unsigned int user_debug; #define smp_rmb() barrier() #define smp_wmb() barrier() #else -#define smp_mb() mb() -#define smp_rmb() rmb() -#define smp_wmb() wmb() +#define smp_mb() dmb() +#define smp_rmb() dmb() +#define smp_wmb() dmb() #endif #define read_barrier_depends() do { } while(0) diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 55a2a00db77f..5bd7c89a6045 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -788,3 +788,9 @@ config ARM_L1_CACHE_SHIFT int default 6 if ARM_L1_CACHE_SHIFT_6 default 5 + +config ARCH_HAS_BARRIERS + bool + help + This option allows the use of custom mandatory barriers + included via the mach/barriers.h file. -- cgit v1.2.3 From 325ffc3633f1c30ef89d98d619f7e1497366e77c Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Fri, 26 Mar 2010 15:44:57 +0100 Subject: ARM: 5997/1: ARM: Correct the VFPv3 detection A CPU has VFPv3 hardware if the FPSID[19:16] bits are 2 or more. Currently Linux was only checking for 3 or more. Signed-off-by: Catalin Marinas Signed-off-by: Russell King --- arch/arm/vfp/vfpmodule.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm') diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index 7f3f59fcaa21..a420cb949328 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c @@ -545,7 +545,7 @@ static int __init vfp_init(void) */ elf_hwcap |= HWCAP_VFP; #ifdef CONFIG_VFPv3 - if (VFP_arch >= 3) { + if (VFP_arch >= 2) { elf_hwcap |= HWCAP_VFPv3; /* -- cgit v1.2.3 From aedceb2a490bae56f9d7e80be480421e1cf22ce0 Mon Sep 17 00:00:00 2001 From: viresh kumar Date: Mon, 29 Mar 2010 05:29:57 +0100 Subject: ARM: 6000/1: removing compilation warning comming from irq.h is using struct pt_regs *. Due to this compilation warning is comming. Removing this warning by adding declaration of struct pt_regs. Signed-off-by: Viresh Kumar Signed-off-by: Russell King --- arch/arm/include/asm/irq.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h index 328f14a8b790..237282f7c762 100644 --- a/arch/arm/include/asm/irq.h +++ b/arch/arm/include/asm/irq.h @@ -17,6 +17,7 @@ #ifndef __ASSEMBLY__ struct irqaction; +struct pt_regs; extern void migrate_irqs(void); extern void asm_do_IRQ(unsigned int, struct pt_regs *); -- cgit v1.2.3 From 55a07517edbf15b83f323644edf346726eccd7e0 Mon Sep 17 00:00:00 2001 From: viresh kumar Date: Mon, 29 Mar 2010 05:58:51 +0100 Subject: ARM: 6001/1: removing compilation warning comming from clkdev.h clkdev.h is using struct device *. Due to this compilation warning is comming. Removing this warning. Signed-off-by: Viresh Kumar Signed-off-by: Russell King --- arch/arm/include/asm/clkdev.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/clkdev.h b/arch/arm/include/asm/clkdev.h index 7a0690da5e63..b56c1389b6fa 100644 --- a/arch/arm/include/asm/clkdev.h +++ b/arch/arm/include/asm/clkdev.h @@ -13,6 +13,7 @@ #define __ASM_CLKDEV_H struct clk; +struct device; struct clk_lookup { struct list_head node; -- cgit v1.2.3 From 782a0fd16723bfc0e765d789e82853d5dc424e76 Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Mon, 29 Mar 2010 06:59:16 +0100 Subject: ARM: 6005/1: arm: kprobes: fix register corruption with jprobes Current implementation of jprobes allocates empty pt_regs from the stack which is then passed to kprobe_handler() and eventually to singlestep(). Now when instruction being simulated is STMFD (like in normal function prologues without CONFIG_FRAME_POINTER), stores using SP actually write over top of the fabricated pt_regs structure. This can be reproduced for example by using LKDTM module: # modprobe lkdtm # mount -t debugfs none /sys/kernel/debug # echo PANIC > /sys/kernel/debug/provoke-crash/INT_HW_IRQ_EN after this, it fails with corrupted registers (before the requested crash would occur): lkdtm: Crash point INT_HW_IRQ_EN of type PANIC hit, trigger in 9 rounds lkdtm: Crash point INT_HW_IRQ_EN of type PANIC hit, trigger in 8 rounds Internal error: Oops - undefined instruction: 0 [#1] last sysfs file: /sys/devices/platform/serial8250.0/sleep_timeout Modules linked in: lkdtm CPU: 0 Not tainted (2.6.34-rc2 #69) PC is at irq_desc+0x1638/0xeeb0 LR is at 0x25 pc : [] lr : [<00000025>] psr: c80a0013 sp : ce94bd60 ip : c050b3e8 fp : a0000013 r10: c0aa453c r9 : cf5d4000 r8 : ce9a1822 r7 : c050b424 r6 : 00000025 r5 : c039d8f8 r4 : c050b3e8 r3 : 00000001 r2 : cf4d0440 r1 : c039d8f8 r0 : 00000020 Flags: NZcv IRQs on FIQs on Mode SVC_32 ISA ARM Segment user Control: 10c5387d Table: 8e804019 DAC: 00000015 Process sh (pid: 496, stack limit = 0xce94a2e8) Stack: (0xce94bd60 to 0xce94c000) [...] Code: 000002cd 00000000 00000000 00000001 (dead4ead) ---[ end trace 2b46d5f2b682f370 ]--- Kernel panic - not syncing: Fatal exception in interrupt This patch allocates enough space (2 * sizeof(struct pt_regs)) from the stack to prevent such corruption. Signed-off-by: Mika Westerberg Acked-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/kernel/kprobes.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'arch/arm') diff --git a/arch/arm/kernel/kprobes.c b/arch/arm/kernel/kprobes.c index 60c62c377fa9..610e0f561c32 100644 --- a/arch/arm/kernel/kprobes.c +++ b/arch/arm/kernel/kprobes.c @@ -393,6 +393,14 @@ void __kprobes jprobe_return(void) /* * Setup an empty pt_regs. Fill SP and PC fields as * they're needed by longjmp_break_handler. + * + * We allocate some slack between the original SP and start of + * our fabricated regs. To be precise we want to have worst case + * covered which is STMFD with all 16 regs so we allocate 2 * + * sizeof(struct_pt_regs)). + * + * This is to prevent any simulated instruction from writing + * over the regs when they are accessing the stack. */ "sub sp, %0, %1 \n\t" "ldr r0, ="__stringify(JPROBE_MAGIC_ADDR)"\n\t" @@ -410,7 +418,7 @@ void __kprobes jprobe_return(void) "ldmia sp, {r0 - pc} \n\t" : : "r" (kcb->jprobe_saved_regs.ARM_sp), - "I" (sizeof(struct pt_regs)), + "I" (sizeof(struct pt_regs) * 2), "J" (offsetof(struct pt_regs, ARM_sp)), "J" (offsetof(struct pt_regs, ARM_pc)), "J" (offsetof(struct pt_regs, ARM_cpsr)) -- cgit v1.2.3 From fd522a8dec11a08b5fdd23982193808e268be19e Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Mon, 29 Mar 2010 10:29:46 +0100 Subject: ARM: 6006/1: ARM: Use the correct NOP size in memmove for Thumb-2 kernel builds When compiling the kernel to Thumb-2, using a 16-bit NOP in the memmove() implementation causes the preceding ADD PC instruction to branch incorrectly in the middle of a 32-bit LDR or STR instruction. The memmove() code is now similar to the memcpy() template. Signed-off-by: Catalin Marinas Signed-off-by: Russell King --- arch/arm/lib/memmove.S | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/lib/memmove.S b/arch/arm/lib/memmove.S index 5025c863713d..938fc14f962d 100644 --- a/arch/arm/lib/memmove.S +++ b/arch/arm/lib/memmove.S @@ -74,7 +74,7 @@ ENTRY(memmove) rsb ip, ip, #32 addne pc, pc, ip @ C is always clear here b 7f -6: nop +6: W(nop) W(ldr) r3, [r1, #-4]! W(ldr) r4, [r1, #-4]! W(ldr) r5, [r1, #-4]! @@ -85,7 +85,7 @@ ENTRY(memmove) add pc, pc, ip nop - nop + W(nop) W(str) r3, [r0, #-4]! W(str) r4, [r0, #-4]! W(str) r5, [r0, #-4]! -- cgit v1.2.3