diff options
Diffstat (limited to 'arch/arm64/include/asm')
30 files changed, 542 insertions, 225 deletions
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild index 0b3fcf86e6ba..dc770bd4f5a5 100644 --- a/arch/arm64/include/asm/Kbuild +++ b/arch/arm64/include/asm/Kbuild @@ -9,8 +9,9 @@ generic-y += current.h generic-y += delay.h generic-y += div64.h generic-y += dma.h -generic-y += emergency-restart.h +generic-y += dma-contiguous.h generic-y += early_ioremap.h +generic-y += emergency-restart.h generic-y += errno.h generic-y += ftrace.h generic-y += hash.h @@ -29,6 +30,7 @@ generic-y += mman.h generic-y += msgbuf.h generic-y += mutex.h generic-y += pci.h +generic-y += pci-bridge.h generic-y += poll.h generic-y += preempt.h generic-y += resource.h diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h index 9400596a0f39..f19097134b02 100644 --- a/arch/arm64/include/asm/arch_timer.h +++ b/arch/arm64/include/asm/arch_timer.h @@ -104,37 +104,6 @@ static inline void arch_timer_set_cntkctl(u32 cntkctl) asm volatile("msr cntkctl_el1, %0" : : "r" (cntkctl)); } -static inline void arch_counter_set_user_access(void) -{ - u32 cntkctl = arch_timer_get_cntkctl(); - - /* Disable user access to the timers and the physical counter */ - /* Also disable virtual event stream */ - cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN - | ARCH_TIMER_USR_VT_ACCESS_EN - | ARCH_TIMER_VIRT_EVT_EN - | ARCH_TIMER_USR_PCT_ACCESS_EN); - - /* Enable user access to the virtual counter */ - cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN; - - arch_timer_set_cntkctl(cntkctl); -} - -static inline void arch_timer_evtstrm_enable(int divider) -{ - u32 cntkctl = arch_timer_get_cntkctl(); - cntkctl &= ~ARCH_TIMER_EVT_TRIGGER_MASK; - /* Set the divider and enable virtual event stream */ - cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT) - | ARCH_TIMER_VIRT_EVT_EN; - arch_timer_set_cntkctl(cntkctl); - elf_hwcap |= HWCAP_EVTSTRM; -#ifdef CONFIG_COMPAT - compat_elf_hwcap |= COMPAT_HWCAP_EVTSTRM; -#endif -} - static inline u64 arch_counter_get_cntvct(void) { u64 cval; diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h index 65f1569ac96e..7047051ded40 100644 --- a/arch/arm64/include/asm/atomic.h +++ b/arch/arm64/include/asm/atomic.h @@ -35,7 +35,7 @@ * strex/ldrex monitor on some implementations. The reason we can use it for * atomic_set() is the clrex or dummy strex done on every exception return. */ -#define atomic_read(v) (*(volatile int *)&(v)->counter) +#define atomic_read(v) ACCESS_ONCE((v)->counter) #define atomic_set(v,i) (((v)->counter) = (i)) /* @@ -43,69 +43,51 @@ * store exclusive to ensure that these are atomic. We may loop * to ensure that the update happens. */ -static inline void atomic_add(int i, atomic_t *v) -{ - unsigned long tmp; - int result; - - asm volatile("// atomic_add\n" -"1: ldxr %w0, %2\n" -" add %w0, %w0, %w3\n" -" stxr %w1, %w0, %2\n" -" cbnz %w1, 1b" - : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) - : "Ir" (i)); -} - -static inline int atomic_add_return(int i, atomic_t *v) -{ - unsigned long tmp; - int result; - - asm volatile("// atomic_add_return\n" -"1: ldxr %w0, %2\n" -" add %w0, %w0, %w3\n" -" stlxr %w1, %w0, %2\n" -" cbnz %w1, 1b" - : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) - : "Ir" (i) - : "memory"); - - smp_mb(); - return result; -} - -static inline void atomic_sub(int i, atomic_t *v) -{ - unsigned long tmp; - int result; - asm volatile("// atomic_sub\n" -"1: ldxr %w0, %2\n" -" sub %w0, %w0, %w3\n" -" stxr %w1, %w0, %2\n" -" cbnz %w1, 1b" - : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) - : "Ir" (i)); +#define ATOMIC_OP(op, asm_op) \ +static inline void atomic_##op(int i, atomic_t *v) \ +{ \ + unsigned long tmp; \ + int result; \ + \ + asm volatile("// atomic_" #op "\n" \ +"1: ldxr %w0, %2\n" \ +" " #asm_op " %w0, %w0, %w3\n" \ +" stxr %w1, %w0, %2\n" \ +" cbnz %w1, 1b" \ + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ + : "Ir" (i)); \ +} \ + +#define ATOMIC_OP_RETURN(op, asm_op) \ +static inline int atomic_##op##_return(int i, atomic_t *v) \ +{ \ + unsigned long tmp; \ + int result; \ + \ + asm volatile("// atomic_" #op "_return\n" \ +"1: ldxr %w0, %2\n" \ +" " #asm_op " %w0, %w0, %w3\n" \ +" stlxr %w1, %w0, %2\n" \ +" cbnz %w1, 1b" \ + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ + : "Ir" (i) \ + : "memory"); \ + \ + smp_mb(); \ + return result; \ } -static inline int atomic_sub_return(int i, atomic_t *v) -{ - unsigned long tmp; - int result; +#define ATOMIC_OPS(op, asm_op) \ + ATOMIC_OP(op, asm_op) \ + ATOMIC_OP_RETURN(op, asm_op) - asm volatile("// atomic_sub_return\n" -"1: ldxr %w0, %2\n" -" sub %w0, %w0, %w3\n" -" stlxr %w1, %w0, %2\n" -" cbnz %w1, 1b" - : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) - : "Ir" (i) - : "memory"); +ATOMIC_OPS(add, add) +ATOMIC_OPS(sub, sub) - smp_mb(); - return result; -} +#undef ATOMIC_OPS +#undef ATOMIC_OP_RETURN +#undef ATOMIC_OP static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) { @@ -157,72 +139,53 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) */ #define ATOMIC64_INIT(i) { (i) } -#define atomic64_read(v) (*(volatile long *)&(v)->counter) +#define atomic64_read(v) ACCESS_ONCE((v)->counter) #define atomic64_set(v,i) (((v)->counter) = (i)) -static inline void atomic64_add(u64 i, atomic64_t *v) -{ - long result; - unsigned long tmp; - - asm volatile("// atomic64_add\n" -"1: ldxr %0, %2\n" -" add %0, %0, %3\n" -" stxr %w1, %0, %2\n" -" cbnz %w1, 1b" - : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) - : "Ir" (i)); +#define ATOMIC64_OP(op, asm_op) \ +static inline void atomic64_##op(long i, atomic64_t *v) \ +{ \ + long result; \ + unsigned long tmp; \ + \ + asm volatile("// atomic64_" #op "\n" \ +"1: ldxr %0, %2\n" \ +" " #asm_op " %0, %0, %3\n" \ +" stxr %w1, %0, %2\n" \ +" cbnz %w1, 1b" \ + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ + : "Ir" (i)); \ +} \ + +#define ATOMIC64_OP_RETURN(op, asm_op) \ +static inline long atomic64_##op##_return(long i, atomic64_t *v) \ +{ \ + long result; \ + unsigned long tmp; \ + \ + asm volatile("// atomic64_" #op "_return\n" \ +"1: ldxr %0, %2\n" \ +" " #asm_op " %0, %0, %3\n" \ +" stlxr %w1, %0, %2\n" \ +" cbnz %w1, 1b" \ + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ + : "Ir" (i) \ + : "memory"); \ + \ + smp_mb(); \ + return result; \ } -static inline long atomic64_add_return(long i, atomic64_t *v) -{ - long result; - unsigned long tmp; +#define ATOMIC64_OPS(op, asm_op) \ + ATOMIC64_OP(op, asm_op) \ + ATOMIC64_OP_RETURN(op, asm_op) - asm volatile("// atomic64_add_return\n" -"1: ldxr %0, %2\n" -" add %0, %0, %3\n" -" stlxr %w1, %0, %2\n" -" cbnz %w1, 1b" - : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) - : "Ir" (i) - : "memory"); +ATOMIC64_OPS(add, add) +ATOMIC64_OPS(sub, sub) - smp_mb(); - return result; -} - -static inline void atomic64_sub(u64 i, atomic64_t *v) -{ - long result; - unsigned long tmp; - - asm volatile("// atomic64_sub\n" -"1: ldxr %0, %2\n" -" sub %0, %0, %3\n" -" stxr %w1, %0, %2\n" -" cbnz %w1, 1b" - : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) - : "Ir" (i)); -} - -static inline long atomic64_sub_return(long i, atomic64_t *v) -{ - long result; - unsigned long tmp; - - asm volatile("// atomic64_sub_return\n" -"1: ldxr %0, %2\n" -" sub %0, %0, %3\n" -" stlxr %w1, %0, %2\n" -" cbnz %w1, 1b" - : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) - : "Ir" (i) - : "memory"); - - smp_mb(); - return result; -} +#undef ATOMIC64_OPS +#undef ATOMIC64_OP_RETURN +#undef ATOMIC64_OP static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new) { diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h index f2defe1c380c..689b6379188c 100644 --- a/arch/arm64/include/asm/cacheflush.h +++ b/arch/arm64/include/asm/cacheflush.h @@ -148,4 +148,8 @@ static inline void flush_cache_vunmap(unsigned long start, unsigned long end) { } +int set_memory_ro(unsigned long addr, int numpages); +int set_memory_rw(unsigned long addr, int numpages); +int set_memory_x(unsigned long addr, int numpages); +int set_memory_nx(unsigned long addr, int numpages); #endif diff --git a/arch/arm64/include/asm/cachetype.h b/arch/arm64/include/asm/cachetype.h index 7a2e0762cb40..4c631a0a3609 100644 --- a/arch/arm64/include/asm/cachetype.h +++ b/arch/arm64/include/asm/cachetype.h @@ -39,6 +39,26 @@ extern unsigned long __icache_flags; +#define CCSIDR_EL1_LINESIZE_MASK 0x7 +#define CCSIDR_EL1_LINESIZE(x) ((x) & CCSIDR_EL1_LINESIZE_MASK) + +#define CCSIDR_EL1_NUMSETS_SHIFT 13 +#define CCSIDR_EL1_NUMSETS_MASK (0x7fff << CCSIDR_EL1_NUMSETS_SHIFT) +#define CCSIDR_EL1_NUMSETS(x) \ + (((x) & CCSIDR_EL1_NUMSETS_MASK) >> CCSIDR_EL1_NUMSETS_SHIFT) + +extern u64 __attribute_const__ icache_get_ccsidr(void); + +static inline int icache_get_linesize(void) +{ + return 16 << CCSIDR_EL1_LINESIZE(icache_get_ccsidr()); +} + +static inline int icache_get_numsets(void) +{ + return 1 + CCSIDR_EL1_NUMSETS(icache_get_ccsidr()); +} + /* * Whilst the D-side always behaves as PIPT on AArch64, aliasing is * permitted in the I-cache. diff --git a/arch/arm64/include/asm/cpu_ops.h b/arch/arm64/include/asm/cpu_ops.h index d7b4b38a8e86..6f8e2ef9094a 100644 --- a/arch/arm64/include/asm/cpu_ops.h +++ b/arch/arm64/include/asm/cpu_ops.h @@ -28,6 +28,8 @@ struct device_node; * enable-method property. * @cpu_init: Reads any data necessary for a specific enable-method from the * devicetree, for a given cpu node and proposed logical id. + * @cpu_init_idle: Reads any data necessary to initialize CPU idle states from + * devicetree, for a given cpu node and proposed logical id. * @cpu_prepare: Early one-time preparation step for a cpu. If there is a * mechanism for doing so, tests whether it is possible to boot * the given CPU. @@ -47,6 +49,7 @@ struct device_node; struct cpu_operations { const char *name; int (*cpu_init)(struct device_node *, unsigned int); + int (*cpu_init_idle)(struct device_node *, unsigned int); int (*cpu_prepare)(unsigned int); int (*cpu_boot)(unsigned int); void (*cpu_postboot)(void); @@ -61,7 +64,7 @@ struct cpu_operations { }; extern const struct cpu_operations *cpu_ops[NR_CPUS]; -extern int __init cpu_read_ops(struct device_node *dn, int cpu); -extern void __init cpu_read_bootcpu_ops(void); +int __init cpu_read_ops(struct device_node *dn, int cpu); +void __init cpu_read_bootcpu_ops(void); #endif /* ifndef __ASM_CPU_OPS_H */ diff --git a/arch/arm64/include/asm/cpuidle.h b/arch/arm64/include/asm/cpuidle.h new file mode 100644 index 000000000000..b52a9932e2b1 --- /dev/null +++ b/arch/arm64/include/asm/cpuidle.h @@ -0,0 +1,13 @@ +#ifndef __ASM_CPUIDLE_H +#define __ASM_CPUIDLE_H + +#ifdef CONFIG_CPU_IDLE +extern int cpu_init_idle(unsigned int cpu); +#else +static inline int cpu_init_idle(unsigned int cpu) +{ + return -EOPNOTSUPP; +} +#endif + +#endif diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h index 7fb343779498..40ec68aa6870 100644 --- a/arch/arm64/include/asm/debug-monitors.h +++ b/arch/arm64/include/asm/debug-monitors.h @@ -48,11 +48,13 @@ /* * #imm16 values used for BRK instruction generation * Allowed values for kgbd are 0x400 - 0x7ff + * 0x100: for triggering a fault on purpose (reserved) * 0x400: for dynamic BRK instruction * 0x401: for compile time BRK instruction */ -#define KGDB_DYN_DGB_BRK_IMM 0x400 -#define KDBG_COMPILED_DBG_BRK_IMM 0x401 +#define FAULT_BRK_IMM 0x100 +#define KGDB_DYN_DBG_BRK_IMM 0x400 +#define KGDB_COMPILED_DBG_BRK_IMM 0x401 /* * BRK instruction encoding @@ -61,24 +63,30 @@ #define AARCH64_BREAK_MON 0xd4200000 /* + * BRK instruction for provoking a fault on purpose + * Unlike kgdb, #imm16 value with unallocated handler is used for faulting. + */ +#define AARCH64_BREAK_FAULT (AARCH64_BREAK_MON | (FAULT_BRK_IMM << 5)) + +/* * Extract byte from BRK instruction */ -#define KGDB_DYN_DGB_BRK_INS_BYTE(x) \ +#define KGDB_DYN_DBG_BRK_INS_BYTE(x) \ ((((AARCH64_BREAK_MON) & 0xffe0001f) >> (x * 8)) & 0xff) /* * Extract byte from BRK #imm16 */ -#define KGBD_DYN_DGB_BRK_IMM_BYTE(x) \ - (((((KGDB_DYN_DGB_BRK_IMM) & 0xffff) << 5) >> (x * 8)) & 0xff) +#define KGBD_DYN_DBG_BRK_IMM_BYTE(x) \ + (((((KGDB_DYN_DBG_BRK_IMM) & 0xffff) << 5) >> (x * 8)) & 0xff) -#define KGDB_DYN_DGB_BRK_BYTE(x) \ - (KGDB_DYN_DGB_BRK_INS_BYTE(x) | KGBD_DYN_DGB_BRK_IMM_BYTE(x)) +#define KGDB_DYN_DBG_BRK_BYTE(x) \ + (KGDB_DYN_DBG_BRK_INS_BYTE(x) | KGBD_DYN_DBG_BRK_IMM_BYTE(x)) -#define KGDB_DYN_BRK_INS_BYTE0 KGDB_DYN_DGB_BRK_BYTE(0) -#define KGDB_DYN_BRK_INS_BYTE1 KGDB_DYN_DGB_BRK_BYTE(1) -#define KGDB_DYN_BRK_INS_BYTE2 KGDB_DYN_DGB_BRK_BYTE(2) -#define KGDB_DYN_BRK_INS_BYTE3 KGDB_DYN_DGB_BRK_BYTE(3) +#define KGDB_DYN_BRK_INS_BYTE0 KGDB_DYN_DBG_BRK_BYTE(0) +#define KGDB_DYN_BRK_INS_BYTE1 KGDB_DYN_DBG_BRK_BYTE(1) +#define KGDB_DYN_BRK_INS_BYTE2 KGDB_DYN_DBG_BRK_BYTE(2) +#define KGDB_DYN_BRK_INS_BYTE3 KGDB_DYN_DBG_BRK_BYTE(3) #define CACHE_FLUSH_IS_SAFE 1 diff --git a/arch/arm64/include/asm/dma-contiguous.h b/arch/arm64/include/asm/dma-contiguous.h deleted file mode 100644 index 14c4c0ca7f2a..000000000000 --- a/arch/arm64/include/asm/dma-contiguous.h +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright (c) 2013, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef _ASM_DMA_CONTIGUOUS_H -#define _ASM_DMA_CONTIGUOUS_H - -#ifdef __KERNEL__ -#ifdef CONFIG_DMA_CMA - -#include <linux/types.h> - -static inline void -dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) { } - -#endif -#endif - -#endif diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h index dc82e52acdb3..adeae3f6f0fc 100644 --- a/arch/arm64/include/asm/dma-mapping.h +++ b/arch/arm64/include/asm/dma-mapping.h @@ -52,6 +52,13 @@ static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) dev->archdata.dma_ops = ops; } +static inline int set_arch_dma_coherent_ops(struct device *dev) +{ + set_dma_ops(dev, &coherent_swiotlb_dma_ops); + return 0; +} +#define set_arch_dma_coherent_ops set_arch_dma_coherent_ops + #include <asm-generic/dma-mapping-common.h> static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) diff --git a/arch/arm64/include/asm/hardirq.h b/arch/arm64/include/asm/hardirq.h index 0be67821f9ce..e8a3268a891c 100644 --- a/arch/arm64/include/asm/hardirq.h +++ b/arch/arm64/include/asm/hardirq.h @@ -47,8 +47,6 @@ static inline void ack_bad_irq(unsigned int irq) irq_err_count++; } -extern void handle_IRQ(unsigned int, struct pt_regs *); - /* * No arch-specific IRQ flags. */ diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h index d064047612b1..52b484b6aa1a 100644 --- a/arch/arm64/include/asm/hw_breakpoint.h +++ b/arch/arm64/include/asm/hw_breakpoint.h @@ -79,7 +79,6 @@ static inline void decode_ctrl_reg(u32 reg, */ #define ARM_MAX_BRP 16 #define ARM_MAX_WRP 16 -#define ARM_MAX_HBP_SLOTS (ARM_MAX_BRP + ARM_MAX_WRP) /* Virtual debug register bases. */ #define AARCH64_DBG_REG_BVR 0 diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index dc1f73b13e74..56a9e63b6c33 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -2,6 +2,8 @@ * Copyright (C) 2013 Huawei Ltd. * Author: Jiang Liu <liuj97@gmail.com> * + * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com> + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. @@ -64,12 +66,155 @@ enum aarch64_insn_imm_type { AARCH64_INSN_IMM_14, AARCH64_INSN_IMM_12, AARCH64_INSN_IMM_9, + AARCH64_INSN_IMM_7, + AARCH64_INSN_IMM_6, + AARCH64_INSN_IMM_S, + AARCH64_INSN_IMM_R, AARCH64_INSN_IMM_MAX }; +enum aarch64_insn_register_type { + AARCH64_INSN_REGTYPE_RT, + AARCH64_INSN_REGTYPE_RN, + AARCH64_INSN_REGTYPE_RT2, + AARCH64_INSN_REGTYPE_RM, + AARCH64_INSN_REGTYPE_RD, + AARCH64_INSN_REGTYPE_RA, +}; + +enum aarch64_insn_register { + AARCH64_INSN_REG_0 = 0, + AARCH64_INSN_REG_1 = 1, + AARCH64_INSN_REG_2 = 2, + AARCH64_INSN_REG_3 = 3, + AARCH64_INSN_REG_4 = 4, + AARCH64_INSN_REG_5 = 5, + AARCH64_INSN_REG_6 = 6, + AARCH64_INSN_REG_7 = 7, + AARCH64_INSN_REG_8 = 8, + AARCH64_INSN_REG_9 = 9, + AARCH64_INSN_REG_10 = 10, + AARCH64_INSN_REG_11 = 11, + AARCH64_INSN_REG_12 = 12, + AARCH64_INSN_REG_13 = 13, + AARCH64_INSN_REG_14 = 14, + AARCH64_INSN_REG_15 = 15, + AARCH64_INSN_REG_16 = 16, + AARCH64_INSN_REG_17 = 17, + AARCH64_INSN_REG_18 = 18, + AARCH64_INSN_REG_19 = 19, + AARCH64_INSN_REG_20 = 20, + AARCH64_INSN_REG_21 = 21, + AARCH64_INSN_REG_22 = 22, + AARCH64_INSN_REG_23 = 23, + AARCH64_INSN_REG_24 = 24, + AARCH64_INSN_REG_25 = 25, + AARCH64_INSN_REG_26 = 26, + AARCH64_INSN_REG_27 = 27, + AARCH64_INSN_REG_28 = 28, + AARCH64_INSN_REG_29 = 29, + AARCH64_INSN_REG_FP = 29, /* Frame pointer */ + AARCH64_INSN_REG_30 = 30, + AARCH64_INSN_REG_LR = 30, /* Link register */ + AARCH64_INSN_REG_ZR = 31, /* Zero: as source register */ + AARCH64_INSN_REG_SP = 31 /* Stack pointer: as load/store base reg */ +}; + +enum aarch64_insn_variant { + AARCH64_INSN_VARIANT_32BIT, + AARCH64_INSN_VARIANT_64BIT +}; + +enum aarch64_insn_condition { + AARCH64_INSN_COND_EQ = 0x0, /* == */ + AARCH64_INSN_COND_NE = 0x1, /* != */ + AARCH64_INSN_COND_CS = 0x2, /* unsigned >= */ + AARCH64_INSN_COND_CC = 0x3, /* unsigned < */ + AARCH64_INSN_COND_MI = 0x4, /* < 0 */ + AARCH64_INSN_COND_PL = 0x5, /* >= 0 */ + AARCH64_INSN_COND_VS = 0x6, /* overflow */ + AARCH64_INSN_COND_VC = 0x7, /* no overflow */ + AARCH64_INSN_COND_HI = 0x8, /* unsigned > */ + AARCH64_INSN_COND_LS = 0x9, /* unsigned <= */ + AARCH64_INSN_COND_GE = 0xa, /* signed >= */ + AARCH64_INSN_COND_LT = 0xb, /* signed < */ + AARCH64_INSN_COND_GT = 0xc, /* signed > */ + AARCH64_INSN_COND_LE = 0xd, /* signed <= */ + AARCH64_INSN_COND_AL = 0xe, /* always */ +}; + enum aarch64_insn_branch_type { AARCH64_INSN_BRANCH_NOLINK, AARCH64_INSN_BRANCH_LINK, + AARCH64_INSN_BRANCH_RETURN, + AARCH64_INSN_BRANCH_COMP_ZERO, + AARCH64_INSN_BRANCH_COMP_NONZERO, +}; + +enum aarch64_insn_size_type { + AARCH64_INSN_SIZE_8, + AARCH64_INSN_SIZE_16, + AARCH64_INSN_SIZE_32, + AARCH64_INSN_SIZE_64, +}; + +enum aarch64_insn_ldst_type { + AARCH64_INSN_LDST_LOAD_REG_OFFSET, + AARCH64_INSN_LDST_STORE_REG_OFFSET, + AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX, + AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX, + AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX, + AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX, +}; + +enum aarch64_insn_adsb_type { + AARCH64_INSN_ADSB_ADD, + AARCH64_INSN_ADSB_SUB, + AARCH64_INSN_ADSB_ADD_SETFLAGS, + AARCH64_INSN_ADSB_SUB_SETFLAGS +}; + +enum aarch64_insn_movewide_type { + AARCH64_INSN_MOVEWIDE_ZERO, + AARCH64_INSN_MOVEWIDE_KEEP, + AARCH64_INSN_MOVEWIDE_INVERSE +}; + +enum aarch64_insn_bitfield_type { + AARCH64_INSN_BITFIELD_MOVE, + AARCH64_INSN_BITFIELD_MOVE_UNSIGNED, + AARCH64_INSN_BITFIELD_MOVE_SIGNED +}; + +enum aarch64_insn_data1_type { + AARCH64_INSN_DATA1_REVERSE_16, + AARCH64_INSN_DATA1_REVERSE_32, + AARCH64_INSN_DATA1_REVERSE_64, +}; + +enum aarch64_insn_data2_type { + AARCH64_INSN_DATA2_UDIV, + AARCH64_INSN_DATA2_SDIV, + AARCH64_INSN_DATA2_LSLV, + AARCH64_INSN_DATA2_LSRV, + AARCH64_INSN_DATA2_ASRV, + AARCH64_INSN_DATA2_RORV, +}; + +enum aarch64_insn_data3_type { + AARCH64_INSN_DATA3_MADD, + AARCH64_INSN_DATA3_MSUB, +}; + +enum aarch64_insn_logic_type { + AARCH64_INSN_LOGIC_AND, + AARCH64_INSN_LOGIC_BIC, + AARCH64_INSN_LOGIC_ORR, + AARCH64_INSN_LOGIC_ORN, + AARCH64_INSN_LOGIC_EOR, + AARCH64_INSN_LOGIC_EON, + AARCH64_INSN_LOGIC_AND_SETFLAGS, + AARCH64_INSN_LOGIC_BIC_SETFLAGS }; #define __AARCH64_INSN_FUNCS(abbr, mask, val) \ @@ -78,13 +223,58 @@ static __always_inline bool aarch64_insn_is_##abbr(u32 code) \ static __always_inline u32 aarch64_insn_get_##abbr##_value(void) \ { return (val); } +__AARCH64_INSN_FUNCS(str_reg, 0x3FE0EC00, 0x38206800) +__AARCH64_INSN_FUNCS(ldr_reg, 0x3FE0EC00, 0x38606800) +__AARCH64_INSN_FUNCS(stp_post, 0x7FC00000, 0x28800000) +__AARCH64_INSN_FUNCS(ldp_post, 0x7FC00000, 0x28C00000) +__AARCH64_INSN_FUNCS(stp_pre, 0x7FC00000, 0x29800000) +__AARCH64_INSN_FUNCS(ldp_pre, 0x7FC00000, 0x29C00000) +__AARCH64_INSN_FUNCS(add_imm, 0x7F000000, 0x11000000) +__AARCH64_INSN_FUNCS(adds_imm, 0x7F000000, 0x31000000) +__AARCH64_INSN_FUNCS(sub_imm, 0x7F000000, 0x51000000) +__AARCH64_INSN_FUNCS(subs_imm, 0x7F000000, 0x71000000) +__AARCH64_INSN_FUNCS(movn, 0x7F800000, 0x12800000) +__AARCH64_INSN_FUNCS(sbfm, 0x7F800000, 0x13000000) +__AARCH64_INSN_FUNCS(bfm, 0x7F800000, 0x33000000) +__AARCH64_INSN_FUNCS(movz, 0x7F800000, 0x52800000) +__AARCH64_INSN_FUNCS(ubfm, 0x7F800000, 0x53000000) +__AARCH64_INSN_FUNCS(movk, 0x7F800000, 0x72800000) +__AARCH64_INSN_FUNCS(add, 0x7F200000, 0x0B000000) +__AARCH64_INSN_FUNCS(adds, 0x7F200000, 0x2B000000) +__AARCH64_INSN_FUNCS(sub, 0x7F200000, 0x4B000000) +__AARCH64_INSN_FUNCS(subs, 0x7F200000, 0x6B000000) +__AARCH64_INSN_FUNCS(madd, 0x7FE08000, 0x1B000000) +__AARCH64_INSN_FUNCS(msub, 0x7FE08000, 0x1B008000) +__AARCH64_INSN_FUNCS(udiv, 0x7FE0FC00, 0x1AC00800) +__AARCH64_INSN_FUNCS(sdiv, 0x7FE0FC00, 0x1AC00C00) +__AARCH64_INSN_FUNCS(lslv, 0x7FE0FC00, 0x1AC02000) +__AARCH64_INSN_FUNCS(lsrv, 0x7FE0FC00, 0x1AC02400) +__AARCH64_INSN_FUNCS(asrv, 0x7FE0FC00, 0x1AC02800) +__AARCH64_INSN_FUNCS(rorv, 0x7FE0FC00, 0x1AC02C00) +__AARCH64_INSN_FUNCS(rev16, 0x7FFFFC00, 0x5AC00400) +__AARCH64_INSN_FUNCS(rev32, 0x7FFFFC00, 0x5AC00800) +__AARCH64_INSN_FUNCS(rev64, 0x7FFFFC00, 0x5AC00C00) +__AARCH64_INSN_FUNCS(and, 0x7F200000, 0x0A000000) +__AARCH64_INSN_FUNCS(bic, 0x7F200000, 0x0A200000) +__AARCH64_INSN_FUNCS(orr, 0x7F200000, 0x2A000000) +__AARCH64_INSN_FUNCS(orn, 0x7F200000, 0x2A200000) +__AARCH64_INSN_FUNCS(eor, 0x7F200000, 0x4A000000) +__AARCH64_INSN_FUNCS(eon, 0x7F200000, 0x4A200000) +__AARCH64_INSN_FUNCS(ands, 0x7F200000, 0x6A000000) +__AARCH64_INSN_FUNCS(bics, 0x7F200000, 0x6A200000) __AARCH64_INSN_FUNCS(b, 0xFC000000, 0x14000000) __AARCH64_INSN_FUNCS(bl, 0xFC000000, 0x94000000) +__AARCH64_INSN_FUNCS(cbz, 0xFE000000, 0x34000000) +__AARCH64_INSN_FUNCS(cbnz, 0xFE000000, 0x35000000) +__AARCH64_INSN_FUNCS(bcond, 0xFF000010, 0x54000000) __AARCH64_INSN_FUNCS(svc, 0xFFE0001F, 0xD4000001) __AARCH64_INSN_FUNCS(hvc, 0xFFE0001F, 0xD4000002) __AARCH64_INSN_FUNCS(smc, 0xFFE0001F, 0xD4000003) __AARCH64_INSN_FUNCS(brk, 0xFFE0001F, 0xD4200000) __AARCH64_INSN_FUNCS(hint, 0xFFFFF01F, 0xD503201F) +__AARCH64_INSN_FUNCS(br, 0xFFFFFC1F, 0xD61F0000) +__AARCH64_INSN_FUNCS(blr, 0xFFFFFC1F, 0xD63F0000) +__AARCH64_INSN_FUNCS(ret, 0xFFFFFC1F, 0xD65F0000) #undef __AARCH64_INSN_FUNCS @@ -97,8 +287,67 @@ u32 aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type, u32 insn, u64 imm); u32 aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr, enum aarch64_insn_branch_type type); +u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr, + enum aarch64_insn_register reg, + enum aarch64_insn_variant variant, + enum aarch64_insn_branch_type type); +u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr, + enum aarch64_insn_condition cond); u32 aarch64_insn_gen_hint(enum aarch64_insn_hint_op op); u32 aarch64_insn_gen_nop(void); +u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg, + enum aarch64_insn_branch_type type); +u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg, + enum aarch64_insn_register base, + enum aarch64_insn_register offset, + enum aarch64_insn_size_type size, + enum aarch64_insn_ldst_type type); +u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1, + enum aarch64_insn_register reg2, + enum aarch64_insn_register base, + int offset, + enum aarch64_insn_variant variant, + enum aarch64_insn_ldst_type type); +u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst, + enum aarch64_insn_register src, + int imm, enum aarch64_insn_variant variant, + enum aarch64_insn_adsb_type type); +u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst, + enum aarch64_insn_register src, + int immr, int imms, + enum aarch64_insn_variant variant, + enum aarch64_insn_bitfield_type type); +u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst, + int imm, int shift, + enum aarch64_insn_variant variant, + enum aarch64_insn_movewide_type type); +u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst, + enum aarch64_insn_register src, + enum aarch64_insn_register reg, + int shift, + enum aarch64_insn_variant variant, + enum aarch64_insn_adsb_type type); +u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst, + enum aarch64_insn_register src, + enum aarch64_insn_variant variant, + enum aarch64_insn_data1_type type); +u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst, + enum aarch64_insn_register src, + enum aarch64_insn_register reg, + enum aarch64_insn_variant variant, + enum aarch64_insn_data2_type type); +u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst, + enum aarch64_insn_register src, + enum aarch64_insn_register reg1, + enum aarch64_insn_register reg2, + enum aarch64_insn_variant variant, + enum aarch64_insn_data3_type type); +u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst, + enum aarch64_insn_register src, + enum aarch64_insn_register reg, + int shift, + enum aarch64_insn_variant variant, + enum aarch64_insn_logic_type type); bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn); diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h index e0ecdcf6632d..79f1d519221f 100644 --- a/arch/arm64/include/asm/io.h +++ b/arch/arm64/include/asm/io.h @@ -121,7 +121,8 @@ static inline u64 __raw_readq(const volatile void __iomem *addr) /* * I/O port access primitives. */ -#define IO_SPACE_LIMIT 0xffff +#define arch_has_dev_port() (1) +#define IO_SPACE_LIMIT (SZ_32M - 1) #define PCI_IOBASE ((void __iomem *)(MODULES_VADDR - SZ_32M)) static inline u8 inb(unsigned long addr) @@ -243,7 +244,7 @@ extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size); * (PHYS_OFFSET and PHYS_MASK taken into account). */ #define ARCH_HAS_VALID_PHYS_ADDR_RANGE -extern int valid_phys_addr_range(unsigned long addr, size_t size); +extern int valid_phys_addr_range(phys_addr_t addr, size_t size); extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size); extern int devmem_is_allowed(unsigned long pfn); diff --git a/arch/arm64/include/asm/irq_work.h b/arch/arm64/include/asm/irq_work.h new file mode 100644 index 000000000000..8e24ef3f7c82 --- /dev/null +++ b/arch/arm64/include/asm/irq_work.h @@ -0,0 +1,11 @@ +#ifndef __ASM_IRQ_WORK_H +#define __ASM_IRQ_WORK_H + +#include <asm/smp.h> + +static inline bool arch_irq_work_has_interrupt(void) +{ + return !!__smp_cross_call; +} + +#endif /* __ASM_IRQ_WORK_H */ diff --git a/arch/arm64/include/asm/kgdb.h b/arch/arm64/include/asm/kgdb.h index 3c8aafc1082f..f69f69c8120c 100644 --- a/arch/arm64/include/asm/kgdb.h +++ b/arch/arm64/include/asm/kgdb.h @@ -29,7 +29,7 @@ static inline void arch_kgdb_breakpoint(void) { - asm ("brk %0" : : "I" (KDBG_COMPILED_DBG_BRK_IMM)); + asm ("brk %0" : : "I" (KGDB_COMPILED_DBG_BRK_IMM)); } extern void kgdb_handle_bus_error(void); diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index bcde41905746..2012c4ba8d67 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -180,7 +180,8 @@ int kvm_unmap_hva_range(struct kvm *kvm, void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); /* We do not have shadow page tables, hence the empty hooks */ -static inline int kvm_age_hva(struct kvm *kvm, unsigned long hva) +static inline int kvm_age_hva(struct kvm *kvm, unsigned long start, + unsigned long end) { return 0; } @@ -190,6 +191,11 @@ static inline int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) return 0; } +static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm, + unsigned long address) +{ +} + struct kvm_vcpu *kvm_arm_get_running_vcpu(void); struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void); diff --git a/arch/arm64/include/asm/pci.h b/arch/arm64/include/asm/pci.h new file mode 100644 index 000000000000..872ba939fcb2 --- /dev/null +++ b/arch/arm64/include/asm/pci.h @@ -0,0 +1,37 @@ +#ifndef __ASM_PCI_H +#define __ASM_PCI_H +#ifdef __KERNEL__ + +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/dma-mapping.h> + +#include <asm/io.h> +#include <asm-generic/pci-bridge.h> +#include <asm-generic/pci-dma-compat.h> + +#define PCIBIOS_MIN_IO 0x1000 +#define PCIBIOS_MIN_MEM 0 + +/* + * Set to 1 if the kernel should re-assign all PCI bus numbers + */ +#define pcibios_assign_all_busses() \ + (pci_has_flag(PCI_REASSIGN_ALL_BUS)) + +/* + * PCI address space differs from physical memory address space + */ +#define PCI_DMA_BUS_IS_PHYS (0) + +extern int isa_dma_bridge_buggy; + +#ifdef CONFIG_PCI +static inline int pci_proc_domain(struct pci_bus *bus) +{ + return 1; +} +#endif /* CONFIG_PCI */ + +#endif /* __KERNEL__ */ +#endif /* __ASM_PCI_H */ diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h index 453a179469a3..5279e5733386 100644 --- a/arch/arm64/include/asm/percpu.h +++ b/arch/arm64/include/asm/percpu.h @@ -26,13 +26,13 @@ static inline void set_my_cpu_offset(unsigned long off) static inline unsigned long __my_cpu_offset(void) { unsigned long off; - register unsigned long *sp asm ("sp"); /* * We want to allow caching the value, so avoid using volatile and * instead use a fake stack read to hazard against barrier(). */ - asm("mrs %0, tpidr_el1" : "=r" (off) : "Q" (*sp)); + asm("mrs %0, tpidr_el1" : "=r" (off) : + "Q" (*(const unsigned long *)current_stack_pointer)); return off; } diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 51f6f5284ce5..41a43bf26492 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -149,46 +149,51 @@ extern struct page *empty_zero_page; #define pte_valid_not_user(pte) \ ((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID) -static inline pte_t pte_wrprotect(pte_t pte) +static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot) { - pte_val(pte) &= ~PTE_WRITE; + pte_val(pte) &= ~pgprot_val(prot); return pte; } -static inline pte_t pte_mkwrite(pte_t pte) +static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot) { - pte_val(pte) |= PTE_WRITE; + pte_val(pte) |= pgprot_val(prot); return pte; } +static inline pte_t pte_wrprotect(pte_t pte) +{ + return clear_pte_bit(pte, __pgprot(PTE_WRITE)); +} + +static inline pte_t pte_mkwrite(pte_t pte) +{ + return set_pte_bit(pte, __pgprot(PTE_WRITE)); +} + static inline pte_t pte_mkclean(pte_t pte) { - pte_val(pte) &= ~PTE_DIRTY; - return pte; + return clear_pte_bit(pte, __pgprot(PTE_DIRTY)); } static inline pte_t pte_mkdirty(pte_t pte) { - pte_val(pte) |= PTE_DIRTY; - return pte; + return set_pte_bit(pte, __pgprot(PTE_DIRTY)); } static inline pte_t pte_mkold(pte_t pte) { - pte_val(pte) &= ~PTE_AF; - return pte; + return clear_pte_bit(pte, __pgprot(PTE_AF)); } static inline pte_t pte_mkyoung(pte_t pte) { - pte_val(pte) |= PTE_AF; - return pte; + return set_pte_bit(pte, __pgprot(PTE_AF)); } static inline pte_t pte_mkspecial(pte_t pte) { - pte_val(pte) |= PTE_SPECIAL; - return pte; + return set_pte_bit(pte, __pgprot(PTE_SPECIAL)); } static inline void set_pte(pte_t *ptep, pte_t pte) @@ -239,6 +244,16 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, #define __HAVE_ARCH_PTE_SPECIAL +static inline pte_t pud_pte(pud_t pud) +{ + return __pte(pud_val(pud)); +} + +static inline pmd_t pud_pmd(pud_t pud) +{ + return __pmd(pud_val(pud)); +} + static inline pte_t pmd_pte(pmd_t pmd) { return __pte(pmd_val(pmd)); @@ -256,7 +271,13 @@ static inline pmd_t pte_pmd(pte_t pte) #ifdef CONFIG_TRANSPARENT_HUGEPAGE #define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT)) #define pmd_trans_splitting(pmd) pte_special(pmd_pte(pmd)) -#endif +#ifdef CONFIG_HAVE_RCU_TABLE_FREE +#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH +struct vm_area_struct; +void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address, + pmd_t *pmdp); +#endif /* CONFIG_HAVE_RCU_TABLE_FREE */ +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #define pmd_young(pmd) pte_young(pmd_pte(pmd)) #define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd))) @@ -277,6 +298,7 @@ static inline pmd_t pte_pmd(pte_t pte) #define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot) #define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK)) +#define pud_write(pud) pte_write(pud_pte(pud)) #define pud_pfn(pud) (((pud_val(pud) & PUD_MASK) & PHYS_MASK) >> PAGE_SHIFT) #define set_pmd_at(mm, addr, pmdp, pmd) set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd)) @@ -296,6 +318,8 @@ static inline int has_transparent_hugepage(void) __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN) #define pgprot_writecombine(prot) \ __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN) +#define pgprot_device(prot) \ + __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN) #define __HAVE_PHYS_MEM_ACCESS_PROT struct file; extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, @@ -376,6 +400,8 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr); } +#define pud_page(pud) pmd_page(pud_pmd(pud)) + #endif /* CONFIG_ARM64_PGTABLE_LEVELS > 2 */ #if CONFIG_ARM64_PGTABLE_LEVELS > 3 diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h index 0c657bb54597..9a8fd84f8fb2 100644 --- a/arch/arm64/include/asm/proc-fns.h +++ b/arch/arm64/include/asm/proc-fns.h @@ -32,6 +32,8 @@ extern void cpu_cache_off(void); extern void cpu_do_idle(void); extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm); extern void cpu_reset(unsigned long addr) __attribute__((noreturn)); +void cpu_soft_restart(phys_addr_t cpu_reset, + unsigned long addr) __attribute__((noreturn)); extern void cpu_do_suspend(struct cpu_suspend_ctx *ptr); extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr); diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 3df21feeabdd..286b1bec547c 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -139,7 +139,7 @@ extern struct task_struct *cpu_switch_to(struct task_struct *prev, ((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1) #define KSTK_EIP(tsk) ((unsigned long)task_pt_regs(tsk)->pc) -#define KSTK_ESP(tsk) ((unsigned long)task_pt_regs(tsk)->sp) +#define KSTK_ESP(tsk) user_stack_pointer(task_pt_regs(tsk)) /* * Prefetching support diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h index 501000fadb6f..41ed9e13795e 100644 --- a/arch/arm64/include/asm/ptrace.h +++ b/arch/arm64/include/asm/ptrace.h @@ -137,7 +137,7 @@ struct pt_regs { (!((regs)->pstate & PSR_F_BIT)) #define user_stack_pointer(regs) \ - (!compat_user_mode(regs)) ? ((regs)->sp) : ((regs)->compat_sp) + (!compat_user_mode(regs) ? (regs)->sp : (regs)->compat_sp) static inline unsigned long regs_return_value(struct pt_regs *regs) { diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h index a498f2cd2c2a..780f82c827b6 100644 --- a/arch/arm64/include/asm/smp.h +++ b/arch/arm64/include/asm/smp.h @@ -48,6 +48,8 @@ extern void smp_init_cpus(void); */ extern void set_smp_cross_call(void (*)(const struct cpumask *, unsigned int)); +extern void (*__smp_cross_call)(const struct cpumask *, unsigned int); + /* * Called from the secondary holding pen, this is the secondary CPU entry point. */ diff --git a/arch/arm64/include/asm/sparsemem.h b/arch/arm64/include/asm/sparsemem.h index 1be62bcb9d47..74a9d301819f 100644 --- a/arch/arm64/include/asm/sparsemem.h +++ b/arch/arm64/include/asm/sparsemem.h @@ -17,7 +17,7 @@ #define __ASM_SPARSEMEM_H #ifdef CONFIG_SPARSEMEM -#define MAX_PHYSMEM_BITS 40 +#define MAX_PHYSMEM_BITS 48 #define SECTION_SIZE_BITS 30 #endif diff --git a/arch/arm64/include/asm/suspend.h b/arch/arm64/include/asm/suspend.h index e9c149c042e0..456d67c1f0fa 100644 --- a/arch/arm64/include/asm/suspend.h +++ b/arch/arm64/include/asm/suspend.h @@ -21,6 +21,7 @@ struct sleep_save_sp { phys_addr_t save_ptr_stash_phys; }; +extern int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long)); extern void cpu_resume(void); extern int cpu_suspend(unsigned long); diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index 45108d802f5e..459bf8e53208 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -69,14 +69,19 @@ struct thread_info { #define init_stack (init_thread_union.stack) /* + * how to get the current stack pointer from C + */ +register unsigned long current_stack_pointer asm ("sp"); + +/* * how to get the thread information struct from C */ static inline struct thread_info *current_thread_info(void) __attribute_const__; static inline struct thread_info *current_thread_info(void) { - register unsigned long sp asm ("sp"); - return (struct thread_info *)(sp & ~(THREAD_SIZE - 1)); + return (struct thread_info *) + (current_stack_pointer & ~(THREAD_SIZE - 1)); } #define thread_saved_pc(tsk) \ diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h index 62731ef9749a..a82c0c5c8b52 100644 --- a/arch/arm64/include/asm/tlb.h +++ b/arch/arm64/include/asm/tlb.h @@ -23,6 +23,20 @@ #include <asm-generic/tlb.h> +#include <linux/pagemap.h> +#include <linux/swap.h> + +#ifdef CONFIG_HAVE_RCU_TABLE_FREE + +#define tlb_remove_entry(tlb, entry) tlb_remove_table(tlb, entry) +static inline void __tlb_remove_table(void *_table) +{ + free_page_and_swap_cache((struct page *)_table); +} +#else +#define tlb_remove_entry(tlb, entry) tlb_remove_page(tlb, entry) +#endif /* CONFIG_HAVE_RCU_TABLE_FREE */ + /* * There's three ways the TLB shootdown code is used: * 1. Unmapping a range of vmas. See zap_page_range(), unmap_region(). @@ -88,7 +102,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, { pgtable_page_dtor(pte); tlb_add_flush(tlb, addr); - tlb_remove_page(tlb, pte); + tlb_remove_entry(tlb, pte); } #if CONFIG_ARM64_PGTABLE_LEVELS > 2 @@ -96,7 +110,7 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr) { tlb_add_flush(tlb, addr); - tlb_remove_page(tlb, virt_to_page(pmdp)); + tlb_remove_entry(tlb, virt_to_page(pmdp)); } #endif @@ -105,7 +119,7 @@ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp, unsigned long addr) { tlb_add_flush(tlb, addr); - tlb_remove_page(tlb, virt_to_page(pudp)); + tlb_remove_entry(tlb, virt_to_page(pudp)); } #endif diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h index 4bc95d27e063..6d2bf419431d 100644 --- a/arch/arm64/include/asm/unistd.h +++ b/arch/arm64/include/asm/unistd.h @@ -41,7 +41,7 @@ #define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2) #define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5) -#define __NR_compat_syscalls 383 +#define __NR_compat_syscalls 386 #endif #define __ARCH_WANT_SYS_CLONE diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h index e242600c4046..da1f06b535e3 100644 --- a/arch/arm64/include/asm/unistd32.h +++ b/arch/arm64/include/asm/unistd32.h @@ -787,3 +787,8 @@ __SYSCALL(__NR_sched_setattr, sys_sched_setattr) __SYSCALL(__NR_sched_getattr, sys_sched_getattr) #define __NR_renameat2 382 __SYSCALL(__NR_renameat2, sys_renameat2) + /* 383 for seccomp */ +#define __NR_getrandom 384 +__SYSCALL(__NR_getrandom, sys_getrandom) +#define __NR_memfd_create 385 +__SYSCALL(__NR_memfd_create, sys_memfd_create) |