diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-04-05 13:20:43 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-04-05 13:20:43 -0700 |
commit | 2d1eb87ae1e6f3999e77989fd2f831b134270787 (patch) | |
tree | adf505adb17c96929c797920613afa3dc5731650 /arch/arm/include/asm | |
parent | 2f997759dffe5458446075a58734df39d8035e6e (diff) | |
parent | bce5669be3a8946952258a064ef26defeb887138 (diff) |
Merge branch 'for-linus' of git://ftp.arm.linux.org.uk/~rmk/linux-arm
Pull ARM changes from Russell King:
- Perf updates from Will Deacon:
- Support for Qualcomm Krait processors (run perf on your phone!)
- Support for Cortex-A12 (run perf stat on your FPGA!)
- Support for perf_sample_event_took, allowing us to automatically decrease
the sample rate if we can't handle the PMU interrupts quickly enough
(run perf record on your FPGA!).
- Basic uprobes support from David Long:
This patch series adds basic uprobes support to ARM. It is based on
patches developed earlier by Rabin Vincent. That approach of adding
hooks into the kprobes instruction parsing code was not well received.
This approach separates the ARM instruction parsing code in kprobes out
into a separate set of functions which can be used by both kprobes and
uprobes. Both kprobes and uprobes then provide their own semantic action
tables to process the results of the parsing.
- ARMv7M (microcontroller) updates from Uwe Kleine-König
- OMAP DMA updates (recently added Vinod's Ack even though they've been
sitting in linux-next for a few months) to reduce the reliance of
omap-dma on the code in arch/arm.
- SA11x0 changes from Dmitry Eremin-Solenikov and Alexander Shiyan
- Support for Cortex-A12 CPU
- Align support for ARMv6 with ARMv7 so they can cooperate better in a
single zImage.
- Addition of first AT_HWCAP2 feature bits for ARMv8 crypto support.
- Removal of IRQ_DISABLED from various ARM files
- Improved efficiency of virt_to_page() for single zImage
- Patch from Ulf Hansson to permit runtime PM callbacks to be available for
AMBA devices for suspend/resume as well.
- Finally kill asm/system.h on ARM.
* 'for-linus' of git://ftp.arm.linux.org.uk/~rmk/linux-arm: (89 commits)
dmaengine: omap-dma: more consolidation of CCR register setup
dmaengine: omap-dma: move IRQ handling to omap-dma
dmaengine: omap-dma: move register read/writes into omap-dma.c
ARM: omap: dma: get rid of 'p' allocation and clean up
ARM: omap: move dma channel allocation into plat-omap code
ARM: omap: dma: get rid of errata global
ARM: omap: clean up DMA register accesses
ARM: omap: remove almost-const variables
ARM: omap: remove references to disable_irq_lch
dmaengine: omap-dma: cleanup errata 3.3 handling
dmaengine: omap-dma: provide register read/write functions
dmaengine: omap-dma: use cached CCR value when enabling DMA
dmaengine: omap-dma: move barrier to omap_dma_start_desc()
dmaengine: omap-dma: move clnk_ctrl setting to preparation functions
dmaengine: omap-dma: improve efficiency loading C.SA/C.EI/C.FI registers
dmaengine: omap-dma: consolidate clearing channel status register
dmaengine: omap-dma: move CCR buffering disable errata out of the fast path
dmaengine: omap-dma: provide register definitions
dmaengine: omap-dma: consolidate setup of CCR
dmaengine: omap-dma: consolidate setup of CSDP
...
Diffstat (limited to 'arch/arm/include/asm')
-rw-r--r-- | arch/arm/include/asm/assembler.h | 8 | ||||
-rw-r--r-- | arch/arm/include/asm/atomic.h | 44 | ||||
-rw-r--r-- | arch/arm/include/asm/cmpxchg.h | 6 | ||||
-rw-r--r-- | arch/arm/include/asm/cputype.h | 1 | ||||
-rw-r--r-- | arch/arm/include/asm/floppy.h | 2 | ||||
-rw-r--r-- | arch/arm/include/asm/futex.h | 9 | ||||
-rw-r--r-- | arch/arm/include/asm/hw_breakpoint.h | 1 | ||||
-rw-r--r-- | arch/arm/include/asm/hwcap.h | 3 | ||||
-rw-r--r-- | arch/arm/include/asm/jump_label.h | 1 | ||||
-rw-r--r-- | arch/arm/include/asm/kprobes.h | 17 | ||||
-rw-r--r-- | arch/arm/include/asm/memory.h | 41 | ||||
-rw-r--r-- | arch/arm/include/asm/pgtable-2level.h | 1 | ||||
-rw-r--r-- | arch/arm/include/asm/pgtable.h | 7 | ||||
-rw-r--r-- | arch/arm/include/asm/pmu.h | 2 | ||||
-rw-r--r-- | arch/arm/include/asm/probes.h | 43 | ||||
-rw-r--r-- | arch/arm/include/asm/ptrace.h | 14 | ||||
-rw-r--r-- | arch/arm/include/asm/sync_bitops.h | 1 | ||||
-rw-r--r-- | arch/arm/include/asm/system.h | 7 | ||||
-rw-r--r-- | arch/arm/include/asm/thread_info.h | 5 | ||||
-rw-r--r-- | arch/arm/include/asm/uaccess.h | 2 | ||||
-rw-r--r-- | arch/arm/include/asm/unistd.h | 1 | ||||
-rw-r--r-- | arch/arm/include/asm/uprobes.h | 45 |
22 files changed, 198 insertions, 63 deletions
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 5c2285160575..380ac4f20000 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -30,8 +30,8 @@ * Endian independent macros for shifting bytes within registers. */ #ifndef __ARMEB__ -#define pull lsr -#define push lsl +#define lspull lsr +#define lspush lsl #define get_byte_0 lsl #0 #define get_byte_1 lsr #8 #define get_byte_2 lsr #16 @@ -41,8 +41,8 @@ #define put_byte_2 lsl #16 #define put_byte_3 lsl #24 #else -#define pull lsl -#define push lsr +#define lspull lsl +#define lspush lsr #define get_byte_0 lsr #24 #define get_byte_1 lsr #16 #define get_byte_2 lsr #8 diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index 62d2cb53b069..9a92fd7864a8 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h @@ -60,6 +60,7 @@ static inline int atomic_add_return(int i, atomic_t *v) int result; smp_mb(); + prefetchw(&v->counter); __asm__ __volatile__("@ atomic_add_return\n" "1: ldrex %0, [%3]\n" @@ -99,6 +100,7 @@ static inline int atomic_sub_return(int i, atomic_t *v) int result; smp_mb(); + prefetchw(&v->counter); __asm__ __volatile__("@ atomic_sub_return\n" "1: ldrex %0, [%3]\n" @@ -121,6 +123,7 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) unsigned long res; smp_mb(); + prefetchw(&ptr->counter); do { __asm__ __volatile__("@ atomic_cmpxchg\n" @@ -138,6 +141,33 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) return oldval; } +static inline int __atomic_add_unless(atomic_t *v, int a, int u) +{ + int oldval, newval; + unsigned long tmp; + + smp_mb(); + prefetchw(&v->counter); + + __asm__ __volatile__ ("@ atomic_add_unless\n" +"1: ldrex %0, [%4]\n" +" teq %0, %5\n" +" beq 2f\n" +" add %1, %0, %6\n" +" strex %2, %1, [%4]\n" +" teq %2, #0\n" +" bne 1b\n" +"2:" + : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter) + : "r" (&v->counter), "r" (u), "r" (a) + : "cc"); + + if (oldval != u) + smp_mb(); + + return oldval; +} + #else /* ARM_ARCH_6 */ #ifdef CONFIG_SMP @@ -186,10 +216,6 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new) return ret; } -#endif /* __LINUX_ARM_ARCH__ */ - -#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) - static inline int __atomic_add_unless(atomic_t *v, int a, int u) { int c, old; @@ -200,6 +226,10 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) return c; } +#endif /* __LINUX_ARM_ARCH__ */ + +#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) + #define atomic_inc(v) atomic_add(1, v) #define atomic_dec(v) atomic_sub(1, v) @@ -299,6 +329,7 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v) unsigned long tmp; smp_mb(); + prefetchw(&v->counter); __asm__ __volatile__("@ atomic64_add_return\n" "1: ldrexd %0, %H0, [%3]\n" @@ -340,6 +371,7 @@ static inline long long atomic64_sub_return(long long i, atomic64_t *v) unsigned long tmp; smp_mb(); + prefetchw(&v->counter); __asm__ __volatile__("@ atomic64_sub_return\n" "1: ldrexd %0, %H0, [%3]\n" @@ -364,6 +396,7 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old, unsigned long res; smp_mb(); + prefetchw(&ptr->counter); do { __asm__ __volatile__("@ atomic64_cmpxchg\n" @@ -388,6 +421,7 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new) unsigned long tmp; smp_mb(); + prefetchw(&ptr->counter); __asm__ __volatile__("@ atomic64_xchg\n" "1: ldrexd %0, %H0, [%3]\n" @@ -409,6 +443,7 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v) unsigned long tmp; smp_mb(); + prefetchw(&v->counter); __asm__ __volatile__("@ atomic64_dec_if_positive\n" "1: ldrexd %0, %H0, [%3]\n" @@ -436,6 +471,7 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) int ret = 1; smp_mb(); + prefetchw(&v->counter); __asm__ __volatile__("@ atomic64_add_unless\n" "1: ldrexd %0, %H0, [%4]\n" diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h index df2fbba7efc8..abb2c3769b01 100644 --- a/arch/arm/include/asm/cmpxchg.h +++ b/arch/arm/include/asm/cmpxchg.h @@ -2,6 +2,7 @@ #define __ASM_ARM_CMPXCHG_H #include <linux/irqflags.h> +#include <linux/prefetch.h> #include <asm/barrier.h> #if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110) @@ -35,6 +36,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size #endif smp_mb(); + prefetchw((const void *)ptr); switch (size) { #if __LINUX_ARM_ARCH__ >= 6 @@ -138,6 +140,8 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, { unsigned long oldval, res; + prefetchw((const void *)ptr); + switch (size) { #ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */ case 1: @@ -230,6 +234,8 @@ static inline unsigned long long __cmpxchg64(unsigned long long *ptr, unsigned long long oldval; unsigned long res; + prefetchw(ptr); + __asm__ __volatile__( "1: ldrexd %1, %H1, [%3]\n" " teq %1, %4\n" diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h index acdde76b39bb..42f0889f0584 100644 --- a/arch/arm/include/asm/cputype.h +++ b/arch/arm/include/asm/cputype.h @@ -71,6 +71,7 @@ #define ARM_CPU_PART_CORTEX_A5 0xC050 #define ARM_CPU_PART_CORTEX_A15 0xC0F0 #define ARM_CPU_PART_CORTEX_A7 0xC070 +#define ARM_CPU_PART_CORTEX_A12 0xC0D0 #define ARM_CPU_XSCALE_ARCH_MASK 0xe000 #define ARM_CPU_XSCALE_ARCH_V1 0x2000 diff --git a/arch/arm/include/asm/floppy.h b/arch/arm/include/asm/floppy.h index c9f03eccc9d8..f4882553fbb0 100644 --- a/arch/arm/include/asm/floppy.h +++ b/arch/arm/include/asm/floppy.h @@ -25,7 +25,7 @@ #define fd_inb(port) inb((port)) #define fd_request_irq() request_irq(IRQ_FLOPPYDISK,floppy_interrupt,\ - IRQF_DISABLED,"floppy",NULL) + 0,"floppy",NULL) #define fd_free_irq() free_irq(IRQ_FLOPPYDISK,NULL) #define fd_disable_irq() disable_irq(IRQ_FLOPPYDISK) #define fd_enable_irq() enable_irq(IRQ_FLOPPYDISK) diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h index e42cf597f6e6..53e69dae796f 100644 --- a/arch/arm/include/asm/futex.h +++ b/arch/arm/include/asm/futex.h @@ -3,11 +3,6 @@ #ifdef __KERNEL__ -#if defined(CONFIG_CPU_USE_DOMAINS) && defined(CONFIG_SMP) -/* ARM doesn't provide unprivileged exclusive memory accessors */ -#include <asm-generic/futex.h> -#else - #include <linux/futex.h> #include <linux/uaccess.h> #include <asm/errno.h> @@ -28,6 +23,7 @@ #define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \ smp_mb(); \ + prefetchw(uaddr); \ __asm__ __volatile__( \ "1: ldrex %1, [%3]\n" \ " " insn "\n" \ @@ -51,6 +47,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, return -EFAULT; smp_mb(); + /* Prefetching cannot fault */ + prefetchw(uaddr); __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n" "1: ldrex %1, [%4]\n" " teq %1, %2\n" @@ -164,6 +162,5 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) return ret; } -#endif /* !(CPU_USE_DOMAINS && SMP) */ #endif /* __KERNEL__ */ #endif /* _ASM_ARM_FUTEX_H */ diff --git a/arch/arm/include/asm/hw_breakpoint.h b/arch/arm/include/asm/hw_breakpoint.h index eef55ea9ef00..8e427c7b4425 100644 --- a/arch/arm/include/asm/hw_breakpoint.h +++ b/arch/arm/include/asm/hw_breakpoint.h @@ -51,6 +51,7 @@ static inline void decode_ctrl_reg(u32 reg, #define ARM_DEBUG_ARCH_V7_ECP14 3 #define ARM_DEBUG_ARCH_V7_MM 4 #define ARM_DEBUG_ARCH_V7_1 5 +#define ARM_DEBUG_ARCH_V8 6 /* Breakpoint */ #define ARM_BREAKPOINT_EXECUTE 0 diff --git a/arch/arm/include/asm/hwcap.h b/arch/arm/include/asm/hwcap.h index 6ff56eca3f1f..6e183fd269fb 100644 --- a/arch/arm/include/asm/hwcap.h +++ b/arch/arm/include/asm/hwcap.h @@ -9,6 +9,7 @@ * instruction set this cpu supports. */ #define ELF_HWCAP (elf_hwcap) -extern unsigned int elf_hwcap; +#define ELF_HWCAP2 (elf_hwcap2) +extern unsigned int elf_hwcap, elf_hwcap2; #endif #endif diff --git a/arch/arm/include/asm/jump_label.h b/arch/arm/include/asm/jump_label.h index 863c892b4aaa..70f9b9bfb1f9 100644 --- a/arch/arm/include/asm/jump_label.h +++ b/arch/arm/include/asm/jump_label.h @@ -4,7 +4,6 @@ #ifdef __KERNEL__ #include <linux/types.h> -#include <asm/system.h> #define JUMP_LABEL_NOP_SIZE 4 diff --git a/arch/arm/include/asm/kprobes.h b/arch/arm/include/asm/kprobes.h index f82ec22eeb11..49fa0dfaad33 100644 --- a/arch/arm/include/asm/kprobes.h +++ b/arch/arm/include/asm/kprobes.h @@ -18,7 +18,7 @@ #include <linux/types.h> #include <linux/ptrace.h> -#include <linux/percpu.h> +#include <linux/notifier.h> #define __ARCH_WANT_KPROBES_INSN_SLOT #define MAX_INSN_SIZE 2 @@ -28,21 +28,10 @@ #define kretprobe_blacklist_size 0 typedef u32 kprobe_opcode_t; - struct kprobe; -typedef void (kprobe_insn_handler_t)(struct kprobe *, struct pt_regs *); -typedef unsigned long (kprobe_check_cc)(unsigned long); -typedef void (kprobe_insn_singlestep_t)(struct kprobe *, struct pt_regs *); -typedef void (kprobe_insn_fn_t)(void); +#include <asm/probes.h> -/* Architecture specific copy of original instruction. */ -struct arch_specific_insn { - kprobe_opcode_t *insn; - kprobe_insn_handler_t *insn_handler; - kprobe_check_cc *insn_check_cc; - kprobe_insn_singlestep_t *insn_singlestep; - kprobe_insn_fn_t *insn_fn; -}; +#define arch_specific_insn arch_probes_insn struct prev_kprobe { struct kprobe *kp; diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 4afb376d9c7c..02fa2558f662 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -166,9 +166,17 @@ * Physical vs virtual RAM address space conversion. These are * private definitions which should NOT be used outside memory.h * files. Use virt_to_phys/phys_to_virt/__pa/__va instead. + * + * PFNs are used to describe any physical page; this means + * PFN 0 == physical address 0. */ -#ifndef __virt_to_phys -#ifdef CONFIG_ARM_PATCH_PHYS_VIRT +#if defined(__virt_to_phys) +#define PHYS_OFFSET PLAT_PHYS_OFFSET +#define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT)) + +#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) + +#elif defined(CONFIG_ARM_PATCH_PHYS_VIRT) /* * Constants used to force the right instruction encodings and shifts @@ -177,12 +185,17 @@ #define __PV_BITS_31_24 0x81000000 #define __PV_BITS_7_0 0x81 -extern u64 __pv_phys_offset; +extern unsigned long __pv_phys_pfn_offset; extern u64 __pv_offset; extern void fixup_pv_table(const void *, unsigned long); extern const void *__pv_table_begin, *__pv_table_end; -#define PHYS_OFFSET __pv_phys_offset +#define PHYS_OFFSET ((phys_addr_t)__pv_phys_pfn_offset << PAGE_SHIFT) +#define PHYS_PFN_OFFSET (__pv_phys_pfn_offset) + +#define virt_to_pfn(kaddr) \ + ((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \ + PHYS_PFN_OFFSET) #define __pv_stub(from,to,instr,type) \ __asm__("@ __pv_stub\n" \ @@ -243,6 +256,7 @@ static inline unsigned long __phys_to_virt(phys_addr_t x) #else #define PHYS_OFFSET PLAT_PHYS_OFFSET +#define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT)) static inline phys_addr_t __virt_to_phys(unsigned long x) { @@ -254,18 +268,11 @@ static inline unsigned long __phys_to_virt(phys_addr_t x) return x - PHYS_OFFSET + PAGE_OFFSET; } -#endif -#endif +#define virt_to_pfn(kaddr) \ + ((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \ + PHYS_PFN_OFFSET) -/* - * PFNs are used to describe any physical page; this means - * PFN 0 == physical address 0. - * - * This is the PFN of the first RAM page in the kernel - * direct-mapped view. We assume this is the first page - * of RAM in the mem_map as well. - */ -#define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT)) +#endif /* * These are *only* valid on the kernel direct mapped RAM memory. @@ -343,9 +350,9 @@ static inline __deprecated void *bus_to_virt(unsigned long x) */ #define ARCH_PFN_OFFSET PHYS_PFN_OFFSET -#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) +#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr)) #define virt_addr_valid(kaddr) (((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) \ - && pfn_valid(__pa(kaddr) >> PAGE_SHIFT) ) + && pfn_valid(virt_to_pfn(kaddr))) #endif diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h index dfff709fda3c..219ac88a9542 100644 --- a/arch/arm/include/asm/pgtable-2level.h +++ b/arch/arm/include/asm/pgtable-2level.h @@ -140,6 +140,7 @@ #define L_PTE_MT_DEV_NONSHARED (_AT(pteval_t, 0x0c) << 2) /* 1100 */ #define L_PTE_MT_DEV_WC (_AT(pteval_t, 0x09) << 2) /* 1001 */ #define L_PTE_MT_DEV_CACHED (_AT(pteval_t, 0x0b) << 2) /* 1011 */ +#define L_PTE_MT_VECTORS (_AT(pteval_t, 0x0f) << 2) /* 1111 */ #define L_PTE_MT_MASK (_AT(pteval_t, 0x0f) << 2) #ifndef __ASSEMBLY__ diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 7d59b524f2af..5478e5d6ad89 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -216,13 +216,16 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd) #define pte_none(pte) (!pte_val(pte)) #define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT) +#define pte_valid(pte) (pte_val(pte) & L_PTE_VALID) +#define pte_accessible(mm, pte) (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte)) #define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY)) #define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY) #define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG) #define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN)) #define pte_special(pte) (0) -#define pte_present_user(pte) (pte_present(pte) && (pte_val(pte) & L_PTE_USER)) +#define pte_valid_user(pte) \ + (pte_valid(pte) && (pte_val(pte) & L_PTE_USER) && pte_young(pte)) #if __LINUX_ARM_ARCH__ < 6 static inline void __sync_icache_dcache(pte_t pteval) @@ -237,7 +240,7 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, { unsigned long ext = 0; - if (addr < TASK_SIZE && pte_present_user(pteval)) { + if (addr < TASK_SIZE && pte_valid_user(pteval)) { __sync_icache_dcache(pteval); ext |= PTE_EXT_NG; } diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h index f24edad26c70..ae1919be8f98 100644 --- a/arch/arm/include/asm/pmu.h +++ b/arch/arm/include/asm/pmu.h @@ -71,6 +71,8 @@ struct arm_pmu { void (*disable)(struct perf_event *event); int (*get_event_idx)(struct pmu_hw_events *hw_events, struct perf_event *event); + void (*clear_event_idx)(struct pmu_hw_events *hw_events, + struct perf_event *event); int (*set_event_filter)(struct hw_perf_event *evt, struct perf_event_attr *attr); u32 (*read_counter)(struct perf_event *event); diff --git a/arch/arm/include/asm/probes.h b/arch/arm/include/asm/probes.h new file mode 100644 index 000000000000..806cfe622a9e --- /dev/null +++ b/arch/arm/include/asm/probes.h @@ -0,0 +1,43 @@ +/* + * arch/arm/include/asm/probes.h + * + * Original contents copied from arch/arm/include/asm/kprobes.h + * which contains the following notice... + * + * Copyright (C) 2006, 2007 Motorola Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#ifndef _ASM_PROBES_H +#define _ASM_PROBES_H + +typedef u32 probes_opcode_t; + +struct arch_probes_insn; +typedef void (probes_insn_handler_t)(probes_opcode_t, + struct arch_probes_insn *, + struct pt_regs *); +typedef unsigned long (probes_check_cc)(unsigned long); +typedef void (probes_insn_singlestep_t)(probes_opcode_t, + struct arch_probes_insn *, + struct pt_regs *); +typedef void (probes_insn_fn_t)(void); + +/* Architecture specific copy of original instruction. */ +struct arch_probes_insn { + probes_opcode_t *insn; + probes_insn_handler_t *insn_handler; + probes_check_cc *insn_check_cc; + probes_insn_singlestep_t *insn_singlestep; + probes_insn_fn_t *insn_fn; +}; + +#endif diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h index 04c99f36ff7f..c877654fe3bf 100644 --- a/arch/arm/include/asm/ptrace.h +++ b/arch/arm/include/asm/ptrace.h @@ -27,9 +27,13 @@ struct pt_regs { #define thumb_mode(regs) (0) #endif +#ifndef CONFIG_CPU_V7M #define isa_mode(regs) \ - ((((regs)->ARM_cpsr & PSR_J_BIT) >> 23) | \ - (((regs)->ARM_cpsr & PSR_T_BIT) >> 5)) + ((((regs)->ARM_cpsr & PSR_J_BIT) >> (__ffs(PSR_J_BIT) - 1)) | \ + (((regs)->ARM_cpsr & PSR_T_BIT) >> (__ffs(PSR_T_BIT)))) +#else +#define isa_mode(regs) 1 /* Thumb */ +#endif #define processor_mode(regs) \ ((regs)->ARM_cpsr & MODE_MASK) @@ -80,6 +84,12 @@ static inline long regs_return_value(struct pt_regs *regs) #define instruction_pointer(regs) (regs)->ARM_pc +static inline void instruction_pointer_set(struct pt_regs *regs, + unsigned long val) +{ + instruction_pointer(regs) = val; +} + #ifdef CONFIG_SMP extern unsigned long profile_pc(struct pt_regs *regs); #else diff --git a/arch/arm/include/asm/sync_bitops.h b/arch/arm/include/asm/sync_bitops.h index 63479eecbf76..9732b8e11e63 100644 --- a/arch/arm/include/asm/sync_bitops.h +++ b/arch/arm/include/asm/sync_bitops.h @@ -2,7 +2,6 @@ #define __ASM_SYNC_BITOPS_H__ #include <asm/bitops.h> -#include <asm/system.h> /* sync_bitops functions are equivalent to the SMP implementation of the * original functions, independently from CONFIG_SMP being defined. diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h deleted file mode 100644 index 368165e33c1c..000000000000 --- a/arch/arm/include/asm/system.h +++ /dev/null @@ -1,7 +0,0 @@ -/* FILE TO BE DELETED. DO NOT ADD STUFF HERE! */ -#include <asm/barrier.h> -#include <asm/compiler.h> -#include <asm/cmpxchg.h> -#include <asm/switch_to.h> -#include <asm/system_info.h> -#include <asm/system_misc.h> diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index 71a06b293489..f989d7c22dc5 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -153,6 +153,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, #define TIF_SIGPENDING 0 #define TIF_NEED_RESCHED 1 #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ +#define TIF_UPROBE 7 #define TIF_SYSCALL_TRACE 8 #define TIF_SYSCALL_AUDIT 9 #define TIF_SYSCALL_TRACEPOINT 10 @@ -165,6 +166,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) +#define _TIF_UPROBE (1 << TIF_UPROBE) #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) @@ -178,7 +180,8 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, /* * Change these and you break ASM code in entry-common.S */ -#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | _TIF_NOTIFY_RESUME) +#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ + _TIF_NOTIFY_RESUME | _TIF_UPROBE) #endif /* __KERNEL__ */ #endif /* __ASM_ARM_THREAD_INFO_H */ diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index 72abdc541f38..12c3a5decc60 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -19,7 +19,7 @@ #include <asm/unified.h> #include <asm/compiler.h> -#if __LINUX_ARM_ARCH__ < 6 +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS #include <asm-generic/uaccess-unaligned.h> #else #define __get_user_unaligned __get_user diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h index acabef1a75df..43876245fc57 100644 --- a/arch/arm/include/asm/unistd.h +++ b/arch/arm/include/asm/unistd.h @@ -48,6 +48,5 @@ */ #define __IGNORE_fadvise64_64 #define __IGNORE_migrate_pages -#define __IGNORE_kcmp #endif /* __ASM_ARM_UNISTD_H */ diff --git a/arch/arm/include/asm/uprobes.h b/arch/arm/include/asm/uprobes.h new file mode 100644 index 000000000000..9472c20b7d49 --- /dev/null +++ b/arch/arm/include/asm/uprobes.h @@ -0,0 +1,45 @@ +/* + * Copyright (C) 2012 Rabin Vincent <rabin at rab.in> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _ASM_UPROBES_H +#define _ASM_UPROBES_H + +#include <asm/probes.h> +#include <asm/opcodes.h> + +typedef u32 uprobe_opcode_t; + +#define MAX_UINSN_BYTES 4 +#define UPROBE_XOL_SLOT_BYTES 64 + +#define UPROBE_SWBP_ARM_INSN 0xe7f001f9 +#define UPROBE_SS_ARM_INSN 0xe7f001fa +#define UPROBE_SWBP_INSN __opcode_to_mem_arm(UPROBE_SWBP_ARM_INSN) +#define UPROBE_SWBP_INSN_SIZE 4 + +struct arch_uprobe_task { + u32 backup; + unsigned long saved_trap_no; +}; + +struct arch_uprobe { + u8 insn[MAX_UINSN_BYTES]; + unsigned long ixol[2]; + uprobe_opcode_t bpinsn; + bool simulate; + u32 pcreg; + void (*prehandler)(struct arch_uprobe *auprobe, + struct arch_uprobe_task *autask, + struct pt_regs *regs); + void (*posthandler)(struct arch_uprobe *auprobe, + struct arch_uprobe_task *autask, + struct pt_regs *regs); + struct arch_probes_insn asi; +}; + +#endif |