From 3b1313eb32c499d46dc4c3e896d19d9564c879c4 Mon Sep 17 00:00:00 2001 From: Vladimir Kondratiev Date: Sun, 24 Nov 2019 16:07:31 +0200 Subject: mips: cacheinfo: report shared CPU map Report L1 caches as shared per core; L2 - per cluster. This fixes "perf" that went crazy if shared_cpu_map attribute not reported on sysfs, in form of /sys/devices/system/cpu/cpu*/cache/index*/shared_cpu_list /sys/devices/system/cpu/cpu*/cache/index*/shared_cpu_map Signed-off-by: Vladimir Kondratiev Signed-off-by: Paul Burton Cc: Ralf Baechle Cc: James Hogan Cc: linux-mips@vger.kernel.org Cc: linux-kernel@vger.kernel.org --- arch/mips/kernel/cacheinfo.c | 27 ++++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/mips/kernel/cacheinfo.c b/arch/mips/kernel/cacheinfo.c index f777e44653d5..47312c529410 100644 --- a/arch/mips/kernel/cacheinfo.c +++ b/arch/mips/kernel/cacheinfo.c @@ -50,6 +50,25 @@ static int __init_cache_level(unsigned int cpu) return 0; } +static void fill_cpumask_siblings(int cpu, cpumask_t *cpu_map) +{ + int cpu1; + + for_each_possible_cpu(cpu1) + if (cpus_are_siblings(cpu, cpu1)) + cpumask_set_cpu(cpu1, cpu_map); +} + +static void fill_cpumask_cluster(int cpu, cpumask_t *cpu_map) +{ + int cpu1; + int cluster = cpu_cluster(&cpu_data[cpu]); + + for_each_possible_cpu(cpu1) + if (cpu_cluster(&cpu_data[cpu1]) == cluster) + cpumask_set_cpu(cpu1, cpu_map); +} + static int __populate_cache_leaves(unsigned int cpu) { struct cpuinfo_mips *c = ¤t_cpu_data; @@ -57,14 +76,20 @@ static int __populate_cache_leaves(unsigned int cpu) struct cacheinfo *this_leaf = this_cpu_ci->info_list; if (c->icache.waysize) { + /* L1 caches are per core */ + fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map); populate_cache(dcache, this_leaf, 1, CACHE_TYPE_DATA); + fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map); populate_cache(icache, this_leaf, 1, CACHE_TYPE_INST); } else { populate_cache(dcache, this_leaf, 1, CACHE_TYPE_UNIFIED); } - if (c->scache.waysize) + if (c->scache.waysize) { + /* L2 cache is per cluster */ + fill_cpumask_cluster(cpu, &this_leaf->shared_cpu_map); populate_cache(scache, this_leaf, 2, CACHE_TYPE_UNIFIED); + } if (c->tcache.waysize) populate_cache(tcache, this_leaf, 3, CACHE_TYPE_UNIFIED); -- cgit v1.2.3 From 87f67cc4c7b81273fd1d9f775192eeb3124562f9 Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Wed, 27 Nov 2019 07:26:12 -0800 Subject: MIPS: Fix boot on Fuloong2 systems MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit 268a2d60013049 ("MIPS: Loongson64: Rename CPU TYPES") changed Kconfig symbols as follows: CPU_LOONGSON2 to CPU_LOONGSON2EF CPU_LOONGSON3 to CPU_LOONGSON64 SYS_HAS_CPU_LOONGSON3 to SYS_HAS_CPU_LOONGSON64 It did not touch SYS_HAS_CPU_LOONGSON2E or SYS_HAS_CPU_LOONGSON2F. However, the patch changed a conditional from #if defined(CONFIG_SYS_HAS_CPU_LOONGSON2E) || \ defined(CONFIG_SYS_HAS_CPU_LOONGSON2F) to #if defined(CONFIG_SYS_HAS_CPU_LOONGSON2EF) SYS_HAS_CPU_LOONGSON2EF does not exist, resulting in boot failures with the qemu fulong2e emulation. Revert to the original code. Fixes: 268a2d60013049 ("MIPS: Loongson64: Rename CPU TYPES") Cc: Jiaxun Yang Signed-off-by: Guenter Roeck Reviewed-by: Philippe Mathieu-Daudé Signed-off-by: Paul Burton Cc: Ralf Baechle Cc: James Hogan Cc: linux-mips@vger.kernel.org Cc: linux-kernel@vger.kernel.org --- arch/mips/include/asm/cpu-type.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/mips/include/asm/cpu-type.h b/arch/mips/include/asm/cpu-type.h index c46c59b0f1b4..49f0061a6051 100644 --- a/arch/mips/include/asm/cpu-type.h +++ b/arch/mips/include/asm/cpu-type.h @@ -15,7 +15,8 @@ static inline int __pure __get_cpu_type(const int cpu_type) { switch (cpu_type) { -#if defined(CONFIG_SYS_HAS_CPU_LOONGSON2EF) +#if defined(CONFIG_SYS_HAS_CPU_LOONGSON2E) || \ + defined(CONFIG_SYS_HAS_CPU_LOONGSON2F) case CPU_LOONGSON2EF: #endif -- cgit v1.2.3 From 7d2aa4bb90f5f6f1b8de8848c26042403f2d7bf9 Mon Sep 17 00:00:00 2001 From: Vincenzo Frascino Date: Fri, 29 Nov 2019 14:36:58 +0000 Subject: mips: Fix gettimeofday() in the vdso library The libc provides a discovery mechanism for vDSO library and its symbols. When a symbol is not exposed by the vDSOs the libc falls back on the system calls. With the introduction of the unified vDSO library on mips this behavior is not honored anymore by the kernel in the case of gettimeofday(). The issue has been noticed and reported due to a dhclient failure on the CI20 board: root@letux:~# dhclient ../../../../lib/isc/unix/time.c:200: Operation not permitted root@letux:~# Restore the original behavior fixing gettimeofday() in the vDSO library. Reported-by: H. Nikolaus Schaller Tested-by: H. Nikolaus Schaller # CI20 with JZ4780 Signed-off-by: Vincenzo Frascino Signed-off-by: Paul Burton Cc: mips-creator-ci20-dev@googlegroups.com Cc: letux-kernel@openphoenux.org Cc: linux-mips@vger.kernel.org Cc: linux-kernel@vger.kernel.org --- arch/mips/include/asm/vdso/gettimeofday.h | 13 ------------- arch/mips/vdso/vgettimeofday.c | 20 ++++++++++++++++++++ 2 files changed, 20 insertions(+), 13 deletions(-) (limited to 'arch') diff --git a/arch/mips/include/asm/vdso/gettimeofday.h b/arch/mips/include/asm/vdso/gettimeofday.h index b08825531e9f..0ae9b4cbc153 100644 --- a/arch/mips/include/asm/vdso/gettimeofday.h +++ b/arch/mips/include/asm/vdso/gettimeofday.h @@ -26,8 +26,6 @@ #define __VDSO_USE_SYSCALL ULLONG_MAX -#ifdef CONFIG_MIPS_CLOCK_VSYSCALL - static __always_inline long gettimeofday_fallback( struct __kernel_old_timeval *_tv, struct timezone *_tz) @@ -48,17 +46,6 @@ static __always_inline long gettimeofday_fallback( return error ? -ret : ret; } -#else - -static __always_inline long gettimeofday_fallback( - struct __kernel_old_timeval *_tv, - struct timezone *_tz) -{ - return -1; -} - -#endif - static __always_inline long clock_gettime_fallback( clockid_t _clkid, struct __kernel_timespec *_ts) diff --git a/arch/mips/vdso/vgettimeofday.c b/arch/mips/vdso/vgettimeofday.c index 6ebdc37c89fc..6b83b6376a4b 100644 --- a/arch/mips/vdso/vgettimeofday.c +++ b/arch/mips/vdso/vgettimeofday.c @@ -17,12 +17,22 @@ int __vdso_clock_gettime(clockid_t clock, return __cvdso_clock_gettime32(clock, ts); } +#ifdef CONFIG_MIPS_CLOCK_VSYSCALL + +/* + * This is behind the ifdef so that we don't provide the symbol when there's no + * possibility of there being a usable clocksource, because there's nothing we + * can do without it. When libc fails the symbol lookup it should fall back on + * the standard syscall path. + */ int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz) { return __cvdso_gettimeofday(tv, tz); } +#endif /* CONFIG_MIPS_CLOCK_VSYSCALL */ + int __vdso_clock_getres(clockid_t clock_id, struct old_timespec32 *res) { @@ -43,12 +53,22 @@ int __vdso_clock_gettime(clockid_t clock, return __cvdso_clock_gettime(clock, ts); } +#ifdef CONFIG_MIPS_CLOCK_VSYSCALL + +/* + * This is behind the ifdef so that we don't provide the symbol when there's no + * possibility of there being a usable clocksource, because there's nothing we + * can do without it. When libc fails the symbol lookup it should fall back on + * the standard syscall path. + */ int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz) { return __cvdso_gettimeofday(tv, tz); } +#endif /* CONFIG_MIPS_CLOCK_VSYSCALL */ + int __vdso_clock_getres(clockid_t clock_id, struct __kernel_timespec *res) { -- cgit v1.2.3 From 1928b36cfa4df1aeedf5f2644d0c33f3a1fcfd7b Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Tue, 26 Nov 2019 17:54:09 -0800 Subject: arc: eznps: fix allmodconfig kconfig warning Fix kconfig warning for arch/arc/plat-eznps/Kconfig allmodconfig: WARNING: unmet direct dependencies detected for CLKSRC_NPS Depends on [n]: GENERIC_CLOCKEVENTS [=y] && !PHYS_ADDR_T_64BIT [=y] Selected by [y]: - ARC_PLAT_EZNPS [=y] Signed-off-by: Randy Dunlap Cc: Vineet Gupta Cc: Ofer Levi Cc: linux-snps-arc@lists.infradead.org Signed-off-by: Vineet Gupta --- arch/arc/plat-eznps/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arc/plat-eznps/Kconfig b/arch/arc/plat-eznps/Kconfig index a376a50d3fea..a931d0a256d0 100644 --- a/arch/arc/plat-eznps/Kconfig +++ b/arch/arc/plat-eznps/Kconfig @@ -7,7 +7,7 @@ menuconfig ARC_PLAT_EZNPS bool "\"EZchip\" ARC dev platform" select CPU_BIG_ENDIAN - select CLKSRC_NPS + select CLKSRC_NPS if !PHYS_ADDR_T_64BIT select EZNPS_GIC select EZCHIP_NPS_MANAGEMENT_ENET if ETHERNET help -- cgit v1.2.3 From cd4c2493fa588e43ad9e066594698c9bd55d28a4 Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Mon, 9 Dec 2019 17:31:35 +0200 Subject: ARC: mm: drop stale define of __ARCH_USE_5LEVEL_HACK Commit 6aae3425aa9c ("ARC: mm: remove __ARCH_USE_5LEVEL_HACK") make ARC paging code 5-level compliant but left behind a stale define of __ARCH_USE_5LEVEL_HACK in arch/arc/include/asm/hugepage.h. Remove it. Signed-off-by: Mike Rapoport Signed-off-by: Vineet Gupta --- arch/arc/include/asm/hugepage.h | 1 - 1 file changed, 1 deletion(-) (limited to 'arch') diff --git a/arch/arc/include/asm/hugepage.h b/arch/arc/include/asm/hugepage.h index 9a74ce71a767..30ac40fed2c5 100644 --- a/arch/arc/include/asm/hugepage.h +++ b/arch/arc/include/asm/hugepage.h @@ -8,7 +8,6 @@ #define _ASM_ARC_HUGEPAGE_H #include -#define __ARCH_USE_5LEVEL_HACK #include static inline pte_t pmd_pte(pmd_t pmd) -- cgit v1.2.3 From a4a3893114a41e365274d5fab5d9ff5acc235ff0 Mon Sep 17 00:00:00 2001 From: Jouni Hogander Date: Mon, 9 Dec 2019 14:37:07 +0200 Subject: MIPS: Prevent link failure with kcov instrumentation __sanitizer_cov_trace_pc() is not linked in and causing link failure if KCOV_INSTRUMENT is enabled. Fix this by disabling instrumentation for compressed image. Signed-off-by: Jouni Hogander Signed-off-by: Paul Burton Cc: Lukas Bulwahn Cc: linux-mips@vger.kernel.org --- arch/mips/boot/compressed/Makefile | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch') diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile index 172801ed35b8..d859f079b771 100644 --- a/arch/mips/boot/compressed/Makefile +++ b/arch/mips/boot/compressed/Makefile @@ -29,6 +29,9 @@ KBUILD_AFLAGS := $(KBUILD_AFLAGS) -D__ASSEMBLY__ \ -DBOOT_HEAP_SIZE=$(BOOT_HEAP_SIZE) \ -DKERNEL_ENTRY=$(VMLINUX_ENTRY_ADDRESS) +# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in. +KCOV_INSTRUMENT := n + # decompressor objects (linked with vmlinuz) vmlinuzobjs-y := $(obj)/head.o $(obj)/decompress.o $(obj)/string.o -- cgit v1.2.3 From f8fffebdea752a25757b906f3dffecf1a59a6194 Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Thu, 5 Dec 2019 10:23:18 -0800 Subject: MIPS: BPF: Disable MIPS32 eBPF JIT Commit 716850ab104d ("MIPS: eBPF: Initial eBPF support for MIPS32 architecture.") enabled our eBPF JIT for MIPS32 kernels, whereas it has previously only been availailable for MIPS64. It was my understanding at the time that the BPF test suite was passing & JITing a comparable number of tests to our cBPF JIT [1], but it turns out that was not the case. The eBPF JIT has a number of problems on MIPS32: - Most notably various code paths still result in emission of MIPS64 instructions which will cause reserved instruction exceptions & kernel panics when run on MIPS32 CPUs. - The eBPF JIT doesn't account for differences between the O32 ABI used by MIPS32 kernels versus the N64 ABI used by MIPS64 kernels. Notably arguments beyond the first 4 are passed on the stack in O32, and this is entirely unhandled when JITing a BPF_CALL instruction. Stack space must be reserved for arguments even if they all fit in registers, and the callee is free to assume that stack space has been reserved for its use - with the eBPF JIT this is not the case, so calling any function can result in clobbering values on the stack & unpredictable behaviour. Function arguments in eBPF are always 64-bit values which is also entirely unhandled - the JIT still uses a single (32-bit) register per argument. As a result all function arguments are always passed incorrectly when JITing a BPF_CALL instruction, leading to kernel crashes or strange behavior. - The JIT attempts to bail our on use of ALU64 instructions or 64-bit memory access instructions. The code doing this at the start of build_one_insn() incorrectly checks whether BPF_OP() equals BPF_DW, when it should really be checking BPF_SIZE() & only doing so when BPF_CLASS() is one of BPF_{LD,LDX,ST,STX}. This results in false positives that cause more bailouts than intended, and that in turns hides some of the problems described above. - The kernel's cBPF->eBPF translation makes heavy use of 64-bit eBPF instructions that the MIPS32 eBPF JIT bails out on, leading to most cBPF programs not being JITed at all. Until these problems are resolved, revert the enabling of the eBPF JIT on MIPS32 done by commit 716850ab104d ("MIPS: eBPF: Initial eBPF support for MIPS32 architecture."). Note that this does not undo the changes made to the eBPF JIT by that commit, since they are a useful starting point to providing MIPS32 support - they're just not nearly complete. [1] https://lore.kernel.org/linux-mips/MWHPR2201MB13583388481F01A422CE7D66D4410@MWHPR2201MB1358.namprd22.prod.outlook.com/ Signed-off-by: Paul Burton Fixes: 716850ab104d ("MIPS: eBPF: Initial eBPF support for MIPS32 architecture.") Cc: Daniel Borkmann Cc: Hassan Naveed Cc: Tony Ambardar Cc: bpf@vger.kernel.org Cc: netdev@vger.kernel.org Cc: # v5.2+ Cc: linux-mips@vger.kernel.org Cc: linux-kernel@vger.kernel.org --- arch/mips/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index c86be02b6d89..90ee2ed463a8 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -47,7 +47,7 @@ config MIPS select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_TRANSPARENT_HUGEPAGE if CPU_SUPPORTS_HUGEPAGES select HAVE_ASM_MODVERSIONS - select HAVE_EBPF_JIT if (!CPU_MICROMIPS) + select HAVE_EBPF_JIT if (64BIT && !CPU_MICROMIPS) select HAVE_CONTEXT_TRACKING select HAVE_COPY_THREAD_TLS select HAVE_C_RECORDMCOUNT -- cgit v1.2.3 From f596cf0d8062cb5d0a4513a8b3afca318c13be10 Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Fri, 6 Dec 2019 11:07:41 +0300 Subject: MIPS: BPF: eBPF JIT: check for MIPS ISA compliance in Kconfig It is completely wrong to check for compile-time MIPS ISA revision in the body of bpf_int_jit_compile() as it may lead to get MIPS JIT fully omitted by the CC while the rest system will think that the JIT is actually present and works [1]. We can check if the selected CPU really supports MIPS eBPF JIT at configure time and avoid such situations when kernel can be built without both JIT and interpreter, but with CONFIG_BPF_SYSCALL=y. [1] https://lore.kernel.org/linux-mips/09d713a59665d745e21d021deeaebe0a@dlink.ru/ Fixes: 716850ab104d ("MIPS: eBPF: Initial eBPF support for MIPS32 architecture.") Cc: # v5.2+ Signed-off-by: Alexander Lobakin Signed-off-by: Paul Burton Cc: Ralf Baechle Cc: James Hogan Cc: Hassan Naveed Cc: Alexei Starovoitov Cc: Daniel Borkmann Cc: Martin KaFai Lau Cc: Song Liu Cc: Yonghong Song Cc: Andrii Nakryiko Cc: linux-mips@vger.kernel.org Cc: linux-kernel@vger.kernel.org Cc: netdev@vger.kernel.org Cc: bpf@vger.kernel.org --- arch/mips/Kconfig | 2 +- arch/mips/net/ebpf_jit.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 90ee2ed463a8..827bbda105f3 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -47,7 +47,7 @@ config MIPS select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_TRANSPARENT_HUGEPAGE if CPU_SUPPORTS_HUGEPAGES select HAVE_ASM_MODVERSIONS - select HAVE_EBPF_JIT if (64BIT && !CPU_MICROMIPS) + select HAVE_EBPF_JIT if 64BIT && !CPU_MICROMIPS && TARGET_ISA_REV >= 2 select HAVE_CONTEXT_TRACKING select HAVE_COPY_THREAD_TLS select HAVE_C_RECORDMCOUNT diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c index 46b76751f3a5..a2405d5f7d1e 100644 --- a/arch/mips/net/ebpf_jit.c +++ b/arch/mips/net/ebpf_jit.c @@ -1803,7 +1803,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) unsigned int image_size; u8 *image_ptr; - if (!prog->jit_requested || MIPS_ISA_REV < 2) + if (!prog->jit_requested) return prog; tmp = bpf_jit_blind_constants(prog); -- cgit v1.2.3 From 833a20b8d4924b63ef7700e43fe4941963002822 Mon Sep 17 00:00:00 2001 From: Eugeniy Paltsev Date: Thu, 19 Dec 2019 16:30:40 +0300 Subject: ARC: asm-offsets: remove duplicate entry We define 'PT_user_r25' twice in asm-offsets.c It's not a big issue as we define it to the same value, however let's fix it. Signed-off-by: Eugeniy Paltsev Signed-off-by: Vineet Gupta --- arch/arc/kernel/asm-offsets.c | 1 - 1 file changed, 1 deletion(-) (limited to 'arch') diff --git a/arch/arc/kernel/asm-offsets.c b/arch/arc/kernel/asm-offsets.c index 1f621e416521..631ebb5d3458 100644 --- a/arch/arc/kernel/asm-offsets.c +++ b/arch/arc/kernel/asm-offsets.c @@ -66,7 +66,6 @@ int main(void) DEFINE(SZ_CALLEE_REGS, sizeof(struct callee_regs)); DEFINE(SZ_PT_REGS, sizeof(struct pt_regs)); - DEFINE(PT_user_r25, offsetof(struct pt_regs, user_r25)); return 0; } -- cgit v1.2.3 From 91a063c956084fb21cf2523bce6892514e3f1799 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Sat, 21 Dec 2019 14:16:54 +1100 Subject: powerpc/mm: Mark get_slice_psize() & slice_addr_is_low() as notrace These slice routines are called from the SLB miss handler, which can lead to warnings from the IRQ code, because we have not reconciled the IRQ state properly: WARNING: CPU: 72 PID: 30150 at arch/powerpc/kernel/irq.c:258 arch_local_irq_restore.part.0+0xcc/0x100 Modules linked in: CPU: 72 PID: 30150 Comm: ftracetest Not tainted 5.5.0-rc2-gcc9x-g7e0165b2f1a9 #1 NIP: c00000000001d83c LR: c00000000029ab90 CTR: c00000000026cf90 REGS: c0000007eee3b960 TRAP: 0700 Not tainted (5.5.0-rc2-gcc9x-g7e0165b2f1a9) MSR: 8000000000021033 CR: 22242844 XER: 20000000 CFAR: c00000000001d780 IRQMASK: 0 ... NIP arch_local_irq_restore.part.0+0xcc/0x100 LR trace_graph_entry+0x270/0x340 Call Trace: trace_graph_entry+0x254/0x340 (unreliable) function_graph_enter+0xe4/0x1a0 prepare_ftrace_return+0xa0/0x130 ftrace_graph_caller+0x44/0x94 # (get_slice_psize()) slb_allocate_user+0x7c/0x100 do_slb_fault+0xf8/0x300 instruction_access_slb_common+0x140/0x180 Fixes: 48e7b7695745 ("powerpc/64s/hash: Convert SLB miss handlers to C") Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20191221121337.4894-1-mpe@ellerman.id.au --- arch/powerpc/mm/slice.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c index 42bbcd47cc85..dffe1a45b6ed 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c @@ -50,7 +50,7 @@ static void slice_print_mask(const char *label, const struct slice_mask *mask) { #endif -static inline bool slice_addr_is_low(unsigned long addr) +static inline notrace bool slice_addr_is_low(unsigned long addr) { u64 tmp = (u64)addr; @@ -659,7 +659,7 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp, mm_ctx_user_psize(¤t->mm->context), 1); } -unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr) +unsigned int notrace get_slice_psize(struct mm_struct *mm, unsigned long addr) { unsigned char *psizes; int index, mask_index; -- cgit v1.2.3 From 4d47ce158efb7b145c680e79cd33e3c6fd773de4 Mon Sep 17 00:00:00 2001 From: Luc Van Oostenryck Date: Sun, 22 Dec 2019 10:26:04 +0100 Subject: riscv: fix compile failure with EXPORT_SYMBOL() & !MMU When support for !MMU was added, the declaration of __asm_copy_to_user() & __asm_copy_from_user() were #ifdefed out hence their EXPORT_SYMBOL() give an error message like: .../riscv_ksyms.c:13:15: error: '__asm_copy_to_user' undeclared here .../riscv_ksyms.c:14:15: error: '__asm_copy_from_user' undeclared here Since these symbols are not defined with !MMU it's wrong to export them. Same for __clear_user() (even though this one is also declared in include/asm-generic/uaccess.h and thus doesn't give an error message). Fix this by doing the EXPORT_SYMBOL() directly where these symbols are defined: inside lib/uaccess.S itself. Fixes: 6bd33e1ece52 ("riscv: fix compile failure with EXPORT_SYMBOL() & !MMU") Reported-by: kbuild test robot Cc: Christoph Hellwig Cc: Palmer Dabbelt Cc: Paul Walmsley Signed-off-by: Luc Van Oostenryck Signed-off-by: Paul Walmsley --- arch/riscv/kernel/riscv_ksyms.c | 3 --- arch/riscv/lib/uaccess.S | 4 ++++ 2 files changed, 4 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/riscv/kernel/riscv_ksyms.c b/arch/riscv/kernel/riscv_ksyms.c index 4800cf703186..2a02b7eebee0 100644 --- a/arch/riscv/kernel/riscv_ksyms.c +++ b/arch/riscv/kernel/riscv_ksyms.c @@ -9,8 +9,5 @@ /* * Assembly functions that may be used (directly or indirectly) by modules */ -EXPORT_SYMBOL(__clear_user); -EXPORT_SYMBOL(__asm_copy_to_user); -EXPORT_SYMBOL(__asm_copy_from_user); EXPORT_SYMBOL(memset); EXPORT_SYMBOL(memcpy); diff --git a/arch/riscv/lib/uaccess.S b/arch/riscv/lib/uaccess.S index fecd65657a6f..f29d2ba2c0a6 100644 --- a/arch/riscv/lib/uaccess.S +++ b/arch/riscv/lib/uaccess.S @@ -1,4 +1,5 @@ #include +#include #include #include @@ -66,6 +67,8 @@ ENTRY(__asm_copy_from_user) j 3b ENDPROC(__asm_copy_to_user) ENDPROC(__asm_copy_from_user) +EXPORT_SYMBOL(__asm_copy_to_user) +EXPORT_SYMBOL(__asm_copy_from_user) ENTRY(__clear_user) @@ -108,6 +111,7 @@ ENTRY(__clear_user) bltu a0, a3, 5b j 3b ENDPROC(__clear_user) +EXPORT_SYMBOL(__clear_user) .section .fixup,"ax" .balign 4 -- cgit v1.2.3 From 556f47ac6083d778843e89aa21b1242eee2693ed Mon Sep 17 00:00:00 2001 From: David Abdurachmanov Date: Wed, 18 Dec 2019 10:47:56 +0200 Subject: riscv: reject invalid syscalls below -1 Running "stress-ng --enosys 4 -t 20 -v" showed a large number of kernel oops with "Unable to handle kernel paging request at virtual address" message. This happens when enosys stressor starts testing random non-valid syscalls. I forgot to redirect any syscall below -1 to sys_ni_syscall. With the patch kernel oops messages are gone while running stress-ng enosys stressor. Signed-off-by: David Abdurachmanov Fixes: 5340627e3fe0 ("riscv: add support for SECCOMP and SECCOMP_FILTER") Signed-off-by: Paul Walmsley --- arch/riscv/kernel/entry.S | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S index a1349ca64669..e163b7b64c86 100644 --- a/arch/riscv/kernel/entry.S +++ b/arch/riscv/kernel/entry.S @@ -246,6 +246,7 @@ check_syscall_nr: */ li t1, -1 beq a7, t1, ret_from_syscall_rejected + blt a7, t1, 1f /* Call syscall */ la s0, sys_call_table slli t0, a7, RISCV_LGPTR -- cgit v1.2.3 From 1833e327a5ea1d1f356fbf6ded0760c9ff4b0594 Mon Sep 17 00:00:00 2001 From: Olof Johansson Date: Mon, 16 Dec 2019 20:07:04 -0800 Subject: riscv: export flush_icache_all to modules This is needed by LKDTM (crash dump test module), it calls flush_icache_range(), which on RISC-V turns into flush_icache_all(). On other architectures, the actual implementation is exported, so follow that precedence and export it here too. Fixes build of CONFIG_LKDTM that fails with: ERROR: "flush_icache_all" [drivers/misc/lkdtm/lkdtm.ko] undefined! Signed-off-by: Olof Johansson Signed-off-by: Paul Walmsley --- arch/riscv/mm/cacheflush.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c index 8f1900686640..8930ab7278e6 100644 --- a/arch/riscv/mm/cacheflush.c +++ b/arch/riscv/mm/cacheflush.c @@ -22,6 +22,7 @@ void flush_icache_all(void) else on_each_cpu(ipi_remote_fence_i, NULL, 1); } +EXPORT_SYMBOL(flush_icache_all); /* * Performs an icache flush for the given MM context. RISC-V has no direct -- cgit v1.2.3 From 7ecc6c1d5c8dbc713c647512a5267ca0eafe3e1c Mon Sep 17 00:00:00 2001 From: Eugeniy Paltsev Date: Fri, 27 Dec 2019 21:03:43 +0300 Subject: ARC: pt_regs: remove hardcoded registers offset Replace hardcoded registers offset numbers by calculated via offsetof. Signed-off-by: Eugeniy Paltsev Signed-off-by: Vineet Gupta --- arch/arc/include/asm/entry-arcv2.h | 8 ++++---- arch/arc/kernel/asm-offsets.c | 9 +++++++++ 2 files changed, 13 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/arc/include/asm/entry-arcv2.h b/arch/arc/include/asm/entry-arcv2.h index 41b16f21beec..0b8b63d0bec1 100644 --- a/arch/arc/include/asm/entry-arcv2.h +++ b/arch/arc/include/asm/entry-arcv2.h @@ -162,7 +162,7 @@ #endif #ifdef CONFIG_ARC_HAS_ACCL_REGS - ST2 r58, r59, PT_sp + 12 + ST2 r58, r59, PT_r58 #endif .endm @@ -172,8 +172,8 @@ LD2 gp, fp, PT_r26 ; gp (r26), fp (r27) - ld r12, [sp, PT_sp + 4] - ld r30, [sp, PT_sp + 8] + ld r12, [sp, PT_r12] + ld r30, [sp, PT_r30] ; Restore SP (into AUX_USER_SP) only if returning to U mode ; - for K mode, it will be implicitly restored as stack is unwound @@ -190,7 +190,7 @@ #endif #ifdef CONFIG_ARC_HAS_ACCL_REGS - LD2 r58, r59, PT_sp + 12 + LD2 r58, r59, PT_r58 #endif .endm diff --git a/arch/arc/kernel/asm-offsets.c b/arch/arc/kernel/asm-offsets.c index 631ebb5d3458..c783bcd35eb8 100644 --- a/arch/arc/kernel/asm-offsets.c +++ b/arch/arc/kernel/asm-offsets.c @@ -67,5 +67,14 @@ int main(void) DEFINE(SZ_CALLEE_REGS, sizeof(struct callee_regs)); DEFINE(SZ_PT_REGS, sizeof(struct pt_regs)); +#ifdef CONFIG_ISA_ARCV2 + OFFSET(PT_r12, pt_regs, r12); + OFFSET(PT_r30, pt_regs, r30); +#endif +#ifdef CONFIG_ARC_HAS_ACCL_REGS + OFFSET(PT_r58, pt_regs, r58); + OFFSET(PT_r59, pt_regs, r59); +#endif + return 0; } -- cgit v1.2.3 From 6da3eced8c5f3b03340b0c395bacd552c4d52411 Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Mon, 23 Dec 2019 14:31:47 +0100 Subject: powerpc/spinlocks: Include correct header for static key MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Recently, the spinlock implementation grew a static key optimization, but the jump_label.h header include was left out, leading to build errors: linux/arch/powerpc/include/asm/spinlock.h:44:7: error: implicit declaration of function ‘static_branch_unlikely’ 44 | if (!static_branch_unlikely(&shared_processor)) This commit adds the missing header. mpe: The build break is only seen with CONFIG_JUMP_LABEL=n. Fixes: 656c21d6af5d ("powerpc/shared: Use static key to detect shared processor") Signed-off-by: Jason A. Donenfeld Reviewed-by: Srikar Dronamraju Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20191223133147.129983-1-Jason@zx2c4.com --- arch/powerpc/include/asm/spinlock.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index 1b55fc08f853..860228e917dc 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h @@ -15,6 +15,7 @@ * * (the type definitions are in asm/spinlock_types.h) */ +#include #include #ifdef CONFIG_PPC64 #include -- cgit v1.2.3 From bbcc5672b0063b0e9d65dc8787a4f09c3b5bb5cc Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Wed, 1 Jan 2020 20:50:38 -0800 Subject: MIPS: Avoid VDSO ABI breakage due to global register variable Declaring __current_thread_info as a global register variable has the effect of preventing GCC from saving & restoring its value in cases where the ABI would typically do so. To quote GCC documentation: > If the register is a call-saved register, call ABI is affected: the > register will not be restored in function epilogue sequences after the > variable has been assigned. Therefore, functions cannot safely return > to callers that assume standard ABI. When our position independent VDSO is built for the n32 or n64 ABIs all functions it exposes should be preserving the value of $gp/$28 for their caller, but in the presence of the __current_thread_info global register variable GCC stops doing so & simply clobbers $gp/$28 when calculating the address of the GOT. In cases where the VDSO returns success this problem will typically be masked by the caller in libc returning & restoring $gp/$28 itself, but that is by no means guaranteed. In cases where the VDSO returns an error libc will typically contain a fallback path which will now fail (typically with a bad memory access) if it attempts anything which relies upon the value of $gp/$28 - eg. accessing anything via the GOT. One fix for this would be to move the declaration of __current_thread_info inside the current_thread_info() function, demoting it from global register variable to local register variable & avoiding inadvertently creating a non-standard calling ABI for the VDSO. Unfortunately this causes issues for clang, which doesn't support local register variables as pointed out by commit fe92da0f355e ("MIPS: Changed current_thread_info() to an equivalent supported by both clang and GCC") which introduced the global register variable before we had a VDSO to worry about. Instead, fix this by continuing to use the global register variable for the kernel proper but declare __current_thread_info as a simple extern variable when building the VDSO. It should never be referenced, and will cause a link error if it is. This resolves the calling convention issue for the VDSO without having any impact upon the build of the kernel itself for either clang or gcc. Signed-off-by: Paul Burton Fixes: ebb5e78cc634 ("MIPS: Initial implementation of a VDSO") Reported-by: Jason A. Donenfeld Reviewed-by: Jason A. Donenfeld Tested-by: Jason A. Donenfeld Cc: Arnd Bergmann Cc: Christian Brauner Cc: Vincenzo Frascino Cc: # v4.4+ Cc: linux-mips@vger.kernel.org Cc: linux-kernel@vger.kernel.org --- arch/mips/include/asm/thread_info.h | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h index 4993db40482c..ee26f9a4575d 100644 --- a/arch/mips/include/asm/thread_info.h +++ b/arch/mips/include/asm/thread_info.h @@ -49,8 +49,26 @@ struct thread_info { .addr_limit = KERNEL_DS, \ } -/* How to get the thread information struct from C. */ +/* + * A pointer to the struct thread_info for the currently executing thread is + * held in register $28/$gp. + * + * We declare __current_thread_info as a global register variable rather than a + * local register variable within current_thread_info() because clang doesn't + * support explicit local register variables. + * + * When building the VDSO we take care not to declare the global register + * variable because this causes GCC to not preserve the value of $28/$gp in + * functions that change its value (which is common in the PIC VDSO when + * accessing the GOT). Since the VDSO shouldn't be accessing + * __current_thread_info anyway we declare it extern in order to cause a link + * failure if it's referenced. + */ +#ifdef __VDSO__ +extern struct thread_info *__current_thread_info; +#else register struct thread_info *__current_thread_info __asm__("$28"); +#endif static inline struct thread_info *current_thread_info(void) { -- cgit v1.2.3 From ac51e005fe1456a288929a41d71adc6224e912d2 Mon Sep 17 00:00:00 2001 From: Zong Li Date: Thu, 2 Jan 2020 11:12:40 +0800 Subject: riscv: mm: use __pa_symbol for kernel symbols __pa_symbol is the marcro that should be used for kernel symbols. It is also a pre-requisite for DEBUG_VIRTUAL which will do bounds checking. Signed-off-by: Zong Li Reviewed-by: Anup Patel Signed-off-by: Paul Walmsley --- arch/riscv/mm/init.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index 69f6678db7f3..965a8cf4829c 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -99,13 +99,13 @@ static void __init setup_initrd(void) pr_info("initrd not found or empty"); goto disable; } - if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) { + if (__pa_symbol(initrd_end) > PFN_PHYS(max_low_pfn)) { pr_err("initrd extends beyond end of memory"); goto disable; } size = initrd_end - initrd_start; - memblock_reserve(__pa(initrd_start), size); + memblock_reserve(__pa_symbol(initrd_start), size); initrd_below_start_ok = 1; pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n", @@ -124,8 +124,8 @@ void __init setup_bootmem(void) { struct memblock_region *reg; phys_addr_t mem_size = 0; - phys_addr_t vmlinux_end = __pa(&_end); - phys_addr_t vmlinux_start = __pa(&_start); + phys_addr_t vmlinux_end = __pa_symbol(&_end); + phys_addr_t vmlinux_start = __pa_symbol(&_start); /* Find the memory region containing the kernel */ for_each_memblock(memory, reg) { @@ -445,7 +445,7 @@ static void __init setup_vm_final(void) /* Setup swapper PGD for fixmap */ create_pgd_mapping(swapper_pg_dir, FIXADDR_START, - __pa(fixmap_pgd_next), + __pa_symbol(fixmap_pgd_next), PGDIR_SIZE, PAGE_TABLE); /* Map all memory banks */ @@ -474,7 +474,7 @@ static void __init setup_vm_final(void) clear_fixmap(FIX_PMD); /* Move to swapper page table */ - csr_write(CSR_SATP, PFN_DOWN(__pa(swapper_pg_dir)) | SATP_MODE); + csr_write(CSR_SATP, PFN_DOWN(__pa_symbol(swapper_pg_dir)) | SATP_MODE); local_flush_tlb_all(); } #else -- cgit v1.2.3 From 0da310e82d3a9bff6ef6b0f2fbf45d1a05cc64fe Mon Sep 17 00:00:00 2001 From: Zong Li Date: Thu, 2 Jan 2020 11:09:54 +0800 Subject: riscv: gcov: enable gcov for RISC-V This patch enables GCOV code coverage measurement on RISC-V. Lightly tested on QEMU and Hifive Unleashed board, seems to work as expected. Signed-off-by: Zong Li Reviewed-by: Anup Patel Acked-by: Jonathan Corbet Signed-off-by: Paul Walmsley --- arch/riscv/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index d8efbaa78d67..a31169b02ec0 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -64,6 +64,7 @@ config RISCV select SPARSEMEM_STATIC if 32BIT select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU select HAVE_ARCH_MMAP_RND_BITS if MMU + select ARCH_HAS_GCOV_PROFILE_ALL config ARCH_MMAP_RND_BITS_MIN default 18 if 64BIT -- cgit v1.2.3 From cfda8617e22a8bf217a613d0b3ba3a38778443ba Mon Sep 17 00:00:00 2001 From: Yash Shah Date: Fri, 3 Jan 2020 09:43:20 +0530 Subject: riscv: dts: Add DT support for SiFive L2 cache controller Add the L2 cache controller DT node in SiFive FU540 soc-specific DT file Signed-off-by: Yash Shah Reviewed-by: Palmer Dabbelt Signed-off-by: Paul Walmsley --- arch/riscv/boot/dts/sifive/fu540-c000.dtsi | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'arch') diff --git a/arch/riscv/boot/dts/sifive/fu540-c000.dtsi b/arch/riscv/boot/dts/sifive/fu540-c000.dtsi index 70a1891e7cd0..a2e3d54e830c 100644 --- a/arch/riscv/boot/dts/sifive/fu540-c000.dtsi +++ b/arch/riscv/boot/dts/sifive/fu540-c000.dtsi @@ -54,6 +54,7 @@ reg = <1>; riscv,isa = "rv64imafdc"; tlb-split; + next-level-cache = <&l2cache>; cpu1_intc: interrupt-controller { #interrupt-cells = <1>; compatible = "riscv,cpu-intc"; @@ -77,6 +78,7 @@ reg = <2>; riscv,isa = "rv64imafdc"; tlb-split; + next-level-cache = <&l2cache>; cpu2_intc: interrupt-controller { #interrupt-cells = <1>; compatible = "riscv,cpu-intc"; @@ -100,6 +102,7 @@ reg = <3>; riscv,isa = "rv64imafdc"; tlb-split; + next-level-cache = <&l2cache>; cpu3_intc: interrupt-controller { #interrupt-cells = <1>; compatible = "riscv,cpu-intc"; @@ -123,6 +126,7 @@ reg = <4>; riscv,isa = "rv64imafdc"; tlb-split; + next-level-cache = <&l2cache>; cpu4_intc: interrupt-controller { #interrupt-cells = <1>; compatible = "riscv,cpu-intc"; @@ -253,6 +257,17 @@ #pwm-cells = <3>; status = "disabled"; }; + l2cache: cache-controller@2010000 { + compatible = "sifive,fu540-c000-ccache", "cache"; + cache-block-size = <64>; + cache-level = <2>; + cache-sets = <1024>; + cache-size = <2097152>; + cache-unified; + interrupt-parent = <&plic0>; + interrupts = <1 2 3>; + reg = <0x0 0x2010000 0x0 0x1000>; + }; }; }; -- cgit v1.2.3 From 1d8f65798240b6577d8c44d20c8ea8f1d429e495 Mon Sep 17 00:00:00 2001 From: Zong Li Date: Mon, 23 Dec 2019 16:46:13 +0800 Subject: riscv: ftrace: correct the condition logic in function graph tracer The condition should be logical NOT to assign the hook address to parent address. Because the return value 0 of function_graph_enter upon success. Fixes: e949b6db51dc (riscv/function_graph: Simplify with function_graph_enter()) Signed-off-by: Zong Li Reviewed-by: Steven Rostedt (VMware) Cc: stable@vger.kernel.org Signed-off-by: Paul Walmsley --- arch/riscv/kernel/ftrace.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c index b94d8db5ddcc..c40fdcdeb950 100644 --- a/arch/riscv/kernel/ftrace.c +++ b/arch/riscv/kernel/ftrace.c @@ -142,7 +142,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, */ old = *parent; - if (function_graph_enter(old, self_addr, frame_pointer, parent)) + if (!function_graph_enter(old, self_addr, frame_pointer, parent)) *parent = return_hooker; } -- cgit v1.2.3 From feee6b2989165631b17ac6d4ccdbf6759254e85a Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Sat, 4 Jan 2020 12:59:33 -0800 Subject: mm/memory_hotplug: shrink zones when offlining memory We currently try to shrink a single zone when removing memory. We use the zone of the first page of the memory we are removing. If that memmap was never initialized (e.g., memory was never onlined), we will read garbage and can trigger kernel BUGs (due to a stale pointer): BUG: unable to handle page fault for address: 000000000000353d #PF: supervisor write access in kernel mode #PF: error_code(0x0002) - not-present page PGD 0 P4D 0 Oops: 0002 [#1] SMP PTI CPU: 1 PID: 7 Comm: kworker/u8:0 Not tainted 5.3.0-rc5-next-20190820+ #317 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.12.1-0-ga5cab58e9a3f-prebuilt.qemu.4 Workqueue: kacpi_hotplug acpi_hotplug_work_fn RIP: 0010:clear_zone_contiguous+0x5/0x10 Code: 48 89 c6 48 89 c3 e8 2a fe ff ff 48 85 c0 75 cf 5b 5d c3 c6 85 fd 05 00 00 01 5b 5d c3 0f 1f 840 RSP: 0018:ffffad2400043c98 EFLAGS: 00010246 RAX: 0000000000000000 RBX: 0000000200000000 RCX: 0000000000000000 RDX: 0000000000200000 RSI: 0000000000140000 RDI: 0000000000002f40 RBP: 0000000140000000 R08: 0000000000000000 R09: 0000000000000001 R10: 0000000000000000 R11: 0000000000000000 R12: 0000000000140000 R13: 0000000000140000 R14: 0000000000002f40 R15: ffff9e3e7aff3680 FS: 0000000000000000(0000) GS:ffff9e3e7bb00000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 000000000000353d CR3: 0000000058610000 CR4: 00000000000006e0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 Call Trace: __remove_pages+0x4b/0x640 arch_remove_memory+0x63/0x8d try_remove_memory+0xdb/0x130 __remove_memory+0xa/0x11 acpi_memory_device_remove+0x70/0x100 acpi_bus_trim+0x55/0x90 acpi_device_hotplug+0x227/0x3a0 acpi_hotplug_work_fn+0x1a/0x30 process_one_work+0x221/0x550 worker_thread+0x50/0x3b0 kthread+0x105/0x140 ret_from_fork+0x3a/0x50 Modules linked in: CR2: 000000000000353d Instead, shrink the zones when offlining memory or when onlining failed. Introduce and use remove_pfn_range_from_zone(() for that. We now properly shrink the zones, even if we have DIMMs whereby - Some memory blocks fall into no zone (never onlined) - Some memory blocks fall into multiple zones (offlined+re-onlined) - Multiple memory blocks that fall into different zones Drop the zone parameter (with a potential dubious value) from __remove_pages() and __remove_section(). Link: http://lkml.kernel.org/r/20191006085646.5768-6-david@redhat.com Fixes: f1dd2cd13c4b ("mm, memory_hotplug: do not associate hotadded memory to zones until online") [visible after d0dc12e86b319] Signed-off-by: David Hildenbrand Reviewed-by: Oscar Salvador Cc: Michal Hocko Cc: "Matthew Wilcox (Oracle)" Cc: "Aneesh Kumar K.V" Cc: Pavel Tatashin Cc: Greg Kroah-Hartman Cc: Dan Williams Cc: Logan Gunthorpe Cc: [5.0+] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm64/mm/mmu.c | 4 +--- arch/ia64/mm/init.c | 4 +--- arch/powerpc/mm/mem.c | 3 +-- arch/s390/mm/init.c | 4 +--- arch/sh/mm/init.c | 4 +--- arch/x86/mm/init_32.c | 4 +--- arch/x86/mm/init_64.c | 4 +--- 7 files changed, 7 insertions(+), 20 deletions(-) (limited to 'arch') diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 5a3b15a14a7f..40797cbfba2d 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -1070,7 +1070,6 @@ void arch_remove_memory(int nid, u64 start, u64 size, { unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; - struct zone *zone; /* * FIXME: Cleanup page tables (also in arch_add_memory() in case @@ -1079,7 +1078,6 @@ void arch_remove_memory(int nid, u64 start, u64 size, * unplug. ARCH_ENABLE_MEMORY_HOTREMOVE must not be * unlocked yet. */ - zone = page_zone(pfn_to_page(start_pfn)); - __remove_pages(zone, start_pfn, nr_pages, altmap); + __remove_pages(start_pfn, nr_pages, altmap); } #endif diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 58fd67068bac..b01d68a2d5d9 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -689,9 +689,7 @@ void arch_remove_memory(int nid, u64 start, u64 size, { unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; - struct zone *zone; - zone = page_zone(pfn_to_page(start_pfn)); - __remove_pages(zone, start_pfn, nr_pages, altmap); + __remove_pages(start_pfn, nr_pages, altmap); } #endif diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 617c2777926f..f5535eae637f 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -151,10 +151,9 @@ void __ref arch_remove_memory(int nid, u64 start, u64 size, { unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; - struct page *page = pfn_to_page(start_pfn) + vmem_altmap_offset(altmap); int ret; - __remove_pages(page_zone(page), start_pfn, nr_pages, altmap); + __remove_pages(start_pfn, nr_pages, altmap); /* Remove htab bolted mappings for this section of memory */ start = (unsigned long)__va(start); diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index f0ce22220565..ac44bd76db4b 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -292,10 +292,8 @@ void arch_remove_memory(int nid, u64 start, u64 size, { unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; - struct zone *zone; - zone = page_zone(pfn_to_page(start_pfn)); - __remove_pages(zone, start_pfn, nr_pages, altmap); + __remove_pages(start_pfn, nr_pages, altmap); vmem_remove_mapping(start, size); } #endif /* CONFIG_MEMORY_HOTPLUG */ diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index dfdbaa50946e..d1b1ff2be17a 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -434,9 +434,7 @@ void arch_remove_memory(int nid, u64 start, u64 size, { unsigned long start_pfn = PFN_DOWN(start); unsigned long nr_pages = size >> PAGE_SHIFT; - struct zone *zone; - zone = page_zone(pfn_to_page(start_pfn)); - __remove_pages(zone, start_pfn, nr_pages, altmap); + __remove_pages(start_pfn, nr_pages, altmap); } #endif /* CONFIG_MEMORY_HOTPLUG */ diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 930edeb41ec3..0a74407ef92e 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -865,10 +865,8 @@ void arch_remove_memory(int nid, u64 start, u64 size, { unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; - struct zone *zone; - zone = page_zone(pfn_to_page(start_pfn)); - __remove_pages(zone, start_pfn, nr_pages, altmap); + __remove_pages(start_pfn, nr_pages, altmap); } #endif diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index dcb9bc961b39..bcfede46fe02 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -1212,10 +1212,8 @@ void __ref arch_remove_memory(int nid, u64 start, u64 size, { unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; - struct page *page = pfn_to_page(start_pfn) + vmem_altmap_offset(altmap); - struct zone *zone = page_zone(page); - __remove_pages(zone, start_pfn, nr_pages, altmap); + __remove_pages(start_pfn, nr_pages, altmap); kernel_physical_mapping_remove(start, start + size); } #endif /* CONFIG_MEMORY_HOTPLUG */ -- cgit v1.2.3 From 780a0cfda9006a9a22d6473c2d4c527f5c68eb2e Mon Sep 17 00:00:00 2001 From: Nick Desaulniers Date: Sat, 4 Jan 2020 12:59:59 -0800 Subject: hexagon: parenthesize registers in asm predicates Hexagon requires that register predicates in assembly be parenthesized. Link: https://github.com/ClangBuiltLinux/linux/issues/754 Link: http://lkml.kernel.org/r/20191209222956.239798-3-ndesaulniers@google.com Signed-off-by: Nick Desaulniers Suggested-by: Sid Manning Acked-by: Brian Cain Cc: Lee Jones Cc: Andy Shevchenko Cc: Tuowen Zhao Cc: Mika Westerberg Cc: Luis Chamberlain Cc: Greg Kroah-Hartman Cc: Alexios Zavras Cc: Allison Randal Cc: Will Deacon Cc: Richard Fontana Cc: Thomas Gleixner Cc: Peter Zijlstra Cc: Boqun Feng Cc: Ingo Molnar Cc: Geert Uytterhoeven Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/hexagon/include/asm/atomic.h | 8 ++++---- arch/hexagon/include/asm/bitops.h | 8 ++++---- arch/hexagon/include/asm/cmpxchg.h | 2 +- arch/hexagon/include/asm/futex.h | 6 +++--- arch/hexagon/include/asm/spinlock.h | 20 ++++++++++---------- arch/hexagon/kernel/vm_entry.S | 2 +- 6 files changed, 23 insertions(+), 23 deletions(-) (limited to 'arch') diff --git a/arch/hexagon/include/asm/atomic.h b/arch/hexagon/include/asm/atomic.h index 12cd9231c4b8..0231d69c8bf2 100644 --- a/arch/hexagon/include/asm/atomic.h +++ b/arch/hexagon/include/asm/atomic.h @@ -91,7 +91,7 @@ static inline void atomic_##op(int i, atomic_t *v) \ "1: %0 = memw_locked(%1);\n" \ " %0 = "#op "(%0,%2);\n" \ " memw_locked(%1,P3)=%0;\n" \ - " if !P3 jump 1b;\n" \ + " if (!P3) jump 1b;\n" \ : "=&r" (output) \ : "r" (&v->counter), "r" (i) \ : "memory", "p3" \ @@ -107,7 +107,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ "1: %0 = memw_locked(%1);\n" \ " %0 = "#op "(%0,%2);\n" \ " memw_locked(%1,P3)=%0;\n" \ - " if !P3 jump 1b;\n" \ + " if (!P3) jump 1b;\n" \ : "=&r" (output) \ : "r" (&v->counter), "r" (i) \ : "memory", "p3" \ @@ -124,7 +124,7 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \ "1: %0 = memw_locked(%2);\n" \ " %1 = "#op "(%0,%3);\n" \ " memw_locked(%2,P3)=%1;\n" \ - " if !P3 jump 1b;\n" \ + " if (!P3) jump 1b;\n" \ : "=&r" (output), "=&r" (val) \ : "r" (&v->counter), "r" (i) \ : "memory", "p3" \ @@ -173,7 +173,7 @@ static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) " }" " memw_locked(%2, p3) = %1;" " {" - " if !p3 jump 1b;" + " if (!p3) jump 1b;" " }" "2:" : "=&r" (__oldval), "=&r" (tmp) diff --git a/arch/hexagon/include/asm/bitops.h b/arch/hexagon/include/asm/bitops.h index 47384b094b94..71429f756af0 100644 --- a/arch/hexagon/include/asm/bitops.h +++ b/arch/hexagon/include/asm/bitops.h @@ -38,7 +38,7 @@ static inline int test_and_clear_bit(int nr, volatile void *addr) "1: R12 = memw_locked(R10);\n" " { P0 = tstbit(R12,R11); R12 = clrbit(R12,R11); }\n" " memw_locked(R10,P1) = R12;\n" - " {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n" + " {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n" : "=&r" (oldval) : "r" (addr), "r" (nr) : "r10", "r11", "r12", "p0", "p1", "memory" @@ -62,7 +62,7 @@ static inline int test_and_set_bit(int nr, volatile void *addr) "1: R12 = memw_locked(R10);\n" " { P0 = tstbit(R12,R11); R12 = setbit(R12,R11); }\n" " memw_locked(R10,P1) = R12;\n" - " {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n" + " {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n" : "=&r" (oldval) : "r" (addr), "r" (nr) : "r10", "r11", "r12", "p0", "p1", "memory" @@ -88,7 +88,7 @@ static inline int test_and_change_bit(int nr, volatile void *addr) "1: R12 = memw_locked(R10);\n" " { P0 = tstbit(R12,R11); R12 = togglebit(R12,R11); }\n" " memw_locked(R10,P1) = R12;\n" - " {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n" + " {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n" : "=&r" (oldval) : "r" (addr), "r" (nr) : "r10", "r11", "r12", "p0", "p1", "memory" @@ -223,7 +223,7 @@ static inline int ffs(int x) int r; asm("{ P0 = cmp.eq(%1,#0); %0 = ct0(%1);}\n" - "{ if P0 %0 = #0; if !P0 %0 = add(%0,#1);}\n" + "{ if (P0) %0 = #0; if (!P0) %0 = add(%0,#1);}\n" : "=&r" (r) : "r" (x) : "p0"); diff --git a/arch/hexagon/include/asm/cmpxchg.h b/arch/hexagon/include/asm/cmpxchg.h index 6091322c3af9..92b8a02e588a 100644 --- a/arch/hexagon/include/asm/cmpxchg.h +++ b/arch/hexagon/include/asm/cmpxchg.h @@ -30,7 +30,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, __asm__ __volatile__ ( "1: %0 = memw_locked(%1);\n" /* load into retval */ " memw_locked(%1,P0) = %2;\n" /* store into memory */ - " if !P0 jump 1b;\n" + " if (!P0) jump 1b;\n" : "=&r" (retval) : "r" (ptr), "r" (x) : "memory", "p0" diff --git a/arch/hexagon/include/asm/futex.h b/arch/hexagon/include/asm/futex.h index cb635216a732..0191f7c7193e 100644 --- a/arch/hexagon/include/asm/futex.h +++ b/arch/hexagon/include/asm/futex.h @@ -16,7 +16,7 @@ /* For example: %1 = %4 */ \ insn \ "2: memw_locked(%3,p2) = %1;\n" \ - " if !p2 jump 1b;\n" \ + " if (!p2) jump 1b;\n" \ " %1 = #0;\n" \ "3:\n" \ ".section .fixup,\"ax\"\n" \ @@ -84,10 +84,10 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, "1: %1 = memw_locked(%3)\n" " {\n" " p2 = cmp.eq(%1,%4)\n" - " if !p2.new jump:NT 3f\n" + " if (!p2.new) jump:NT 3f\n" " }\n" "2: memw_locked(%3,p2) = %5\n" - " if !p2 jump 1b\n" + " if (!p2) jump 1b\n" "3:\n" ".section .fixup,\"ax\"\n" "4: %0 = #%6\n" diff --git a/arch/hexagon/include/asm/spinlock.h b/arch/hexagon/include/asm/spinlock.h index bfe07d842ff3..ef103b73bec8 100644 --- a/arch/hexagon/include/asm/spinlock.h +++ b/arch/hexagon/include/asm/spinlock.h @@ -30,9 +30,9 @@ static inline void arch_read_lock(arch_rwlock_t *lock) __asm__ __volatile__( "1: R6 = memw_locked(%0);\n" " { P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n" - " { if !P3 jump 1b; }\n" + " { if (!P3) jump 1b; }\n" " memw_locked(%0,P3) = R6;\n" - " { if !P3 jump 1b; }\n" + " { if (!P3) jump 1b; }\n" : : "r" (&lock->lock) : "memory", "r6", "p3" @@ -46,7 +46,7 @@ static inline void arch_read_unlock(arch_rwlock_t *lock) "1: R6 = memw_locked(%0);\n" " R6 = add(R6,#-1);\n" " memw_locked(%0,P3) = R6\n" - " if !P3 jump 1b;\n" + " if (!P3) jump 1b;\n" : : "r" (&lock->lock) : "memory", "r6", "p3" @@ -61,7 +61,7 @@ static inline int arch_read_trylock(arch_rwlock_t *lock) __asm__ __volatile__( " R6 = memw_locked(%1);\n" " { %0 = #0; P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n" - " { if !P3 jump 1f; }\n" + " { if (!P3) jump 1f; }\n" " memw_locked(%1,P3) = R6;\n" " { %0 = P3 }\n" "1:\n" @@ -78,9 +78,9 @@ static inline void arch_write_lock(arch_rwlock_t *lock) __asm__ __volatile__( "1: R6 = memw_locked(%0)\n" " { P3 = cmp.eq(R6,#0); R6 = #-1;}\n" - " { if !P3 jump 1b; }\n" + " { if (!P3) jump 1b; }\n" " memw_locked(%0,P3) = R6;\n" - " { if !P3 jump 1b; }\n" + " { if (!P3) jump 1b; }\n" : : "r" (&lock->lock) : "memory", "r6", "p3" @@ -94,7 +94,7 @@ static inline int arch_write_trylock(arch_rwlock_t *lock) __asm__ __volatile__( " R6 = memw_locked(%1)\n" " { %0 = #0; P3 = cmp.eq(R6,#0); R6 = #-1;}\n" - " { if !P3 jump 1f; }\n" + " { if (!P3) jump 1f; }\n" " memw_locked(%1,P3) = R6;\n" " %0 = P3;\n" "1:\n" @@ -117,9 +117,9 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) __asm__ __volatile__( "1: R6 = memw_locked(%0);\n" " P3 = cmp.eq(R6,#0);\n" - " { if !P3 jump 1b; R6 = #1; }\n" + " { if (!P3) jump 1b; R6 = #1; }\n" " memw_locked(%0,P3) = R6;\n" - " { if !P3 jump 1b; }\n" + " { if (!P3) jump 1b; }\n" : : "r" (&lock->lock) : "memory", "r6", "p3" @@ -139,7 +139,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock) __asm__ __volatile__( " R6 = memw_locked(%1);\n" " P3 = cmp.eq(R6,#0);\n" - " { if !P3 jump 1f; R6 = #1; %0 = #0; }\n" + " { if (!P3) jump 1f; R6 = #1; %0 = #0; }\n" " memw_locked(%1,P3) = R6;\n" " %0 = P3;\n" "1:\n" diff --git a/arch/hexagon/kernel/vm_entry.S b/arch/hexagon/kernel/vm_entry.S index 12242c27e2df..4023fdbea490 100644 --- a/arch/hexagon/kernel/vm_entry.S +++ b/arch/hexagon/kernel/vm_entry.S @@ -369,7 +369,7 @@ ret_from_fork: R26.L = #LO(do_work_pending); R0 = #VM_INT_DISABLE; } - if P0 jump check_work_pending + if (P0) jump check_work_pending { R0 = R25; callr R24 -- cgit v1.2.3 From 63e80314ab7cf4783526d2e44ee57a90514911c9 Mon Sep 17 00:00:00 2001 From: Nick Desaulniers Date: Sat, 4 Jan 2020 13:00:02 -0800 Subject: hexagon: work around compiler crash Clang cannot translate the string "r30" into a valid register yet. Link: https://github.com/ClangBuiltLinux/linux/issues/755 Link: http://lkml.kernel.org/r/20191028155722.23419-1-ndesaulniers@google.com Signed-off-by: Nick Desaulniers Suggested-by: Sid Manning Reviewed-by: Brian Cain Cc: Allison Randal Cc: Greg Kroah-Hartman Cc: Richard Fontana Cc: Thomas Gleixner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/hexagon/kernel/stacktrace.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/hexagon/kernel/stacktrace.c b/arch/hexagon/kernel/stacktrace.c index 35f29423fda8..5ed02f699479 100644 --- a/arch/hexagon/kernel/stacktrace.c +++ b/arch/hexagon/kernel/stacktrace.c @@ -11,8 +11,6 @@ #include #include -register unsigned long current_frame_pointer asm("r30"); - struct stackframe { unsigned long fp; unsigned long rets; @@ -30,7 +28,7 @@ void save_stack_trace(struct stack_trace *trace) low = (unsigned long)task_stack_page(current); high = low + THREAD_SIZE; - fp = current_frame_pointer; + fp = (unsigned long)__builtin_frame_address(0); while (fp >= low && fp <= (high - sizeof(*frame))) { frame = (struct stackframe *)fp; -- cgit v1.2.3 From 7312b70699252074d753c5005fc67266c547bbe3 Mon Sep 17 00:00:00 2001 From: Nick Desaulniers Date: Sat, 4 Jan 2020 13:00:26 -0800 Subject: hexagon: define ioremap_uc Similar to commit 38e45d81d14e ("sparc64: implement ioremap_uc") define ioremap_uc for hexagon to avoid errors from -Wimplicit-function-definition. Link: http://lkml.kernel.org/r/20191209222956.239798-2-ndesaulniers@google.com Link: https://github.com/ClangBuiltLinux/linux/issues/797 Fixes: e537654b7039 ("lib: devres: add a helper function for ioremap_uc") Signed-off-by: Nick Desaulniers Suggested-by: Nathan Chancellor Acked-by: Brian Cain Cc: Lee Jones Cc: Andy Shevchenko Cc: Tuowen Zhao Cc: Mika Westerberg Cc: Luis Chamberlain Cc: Greg Kroah-Hartman Cc: Alexios Zavras Cc: Allison Randal Cc: Will Deacon Cc: Richard Fontana Cc: Thomas Gleixner Cc: Peter Zijlstra Cc: Boqun Feng Cc: Ingo Molnar Cc: Geert Uytterhoeven Cc: Christoph Hellwig Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/hexagon/include/asm/io.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/hexagon/include/asm/io.h b/arch/hexagon/include/asm/io.h index 539e3efcf39c..b0dbc3473172 100644 --- a/arch/hexagon/include/asm/io.h +++ b/arch/hexagon/include/asm/io.h @@ -173,6 +173,7 @@ static inline void writel(u32 data, volatile void __iomem *addr) void __iomem *ioremap(unsigned long phys_addr, unsigned long size); #define ioremap_nocache ioremap +#define ioremap_uc(X, Y) ioremap((X), (Y)) #define __raw_writel writel -- cgit v1.2.3 From 2f3035da4019780250658d1ffe486bc324e04805 Mon Sep 17 00:00:00 2001 From: Paul Walmsley Date: Fri, 20 Dec 2019 03:09:49 -0800 Subject: riscv: prefix IRQ_ macro names with an RV_ namespace "IRQ_TIMER", used in the arch/riscv CSR header file, is a sufficiently generic macro name that it's used by several source files across the Linux code base. Some of these other files ultimately include the arch/riscv CSR include file, causing collisions. Fix by prefixing the RISC-V csr.h IRQ_ macro names with an RV_ prefix. Fixes: a4c3733d32a72 ("riscv: abstract out CSR names for supervisor vs machine mode") Reported-by: Olof Johansson Acked-by: Olof Johansson Signed-off-by: Paul Walmsley --- arch/riscv/include/asm/csr.h | 18 +++++++++--------- arch/riscv/kernel/irq.c | 6 +++--- 2 files changed, 12 insertions(+), 12 deletions(-) (limited to 'arch') diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h index 0a62d2d68455..435b65532e29 100644 --- a/arch/riscv/include/asm/csr.h +++ b/arch/riscv/include/asm/csr.h @@ -116,9 +116,9 @@ # define SR_PIE SR_MPIE # define SR_PP SR_MPP -# define IRQ_SOFT IRQ_M_SOFT -# define IRQ_TIMER IRQ_M_TIMER -# define IRQ_EXT IRQ_M_EXT +# define RV_IRQ_SOFT IRQ_M_SOFT +# define RV_IRQ_TIMER IRQ_M_TIMER +# define RV_IRQ_EXT IRQ_M_EXT #else /* CONFIG_RISCV_M_MODE */ # define CSR_STATUS CSR_SSTATUS # define CSR_IE CSR_SIE @@ -133,15 +133,15 @@ # define SR_PIE SR_SPIE # define SR_PP SR_SPP -# define IRQ_SOFT IRQ_S_SOFT -# define IRQ_TIMER IRQ_S_TIMER -# define IRQ_EXT IRQ_S_EXT +# define RV_IRQ_SOFT IRQ_S_SOFT +# define RV_IRQ_TIMER IRQ_S_TIMER +# define RV_IRQ_EXT IRQ_S_EXT #endif /* CONFIG_RISCV_M_MODE */ /* IE/IP (Supervisor/Machine Interrupt Enable/Pending) flags */ -#define IE_SIE (_AC(0x1, UL) << IRQ_SOFT) -#define IE_TIE (_AC(0x1, UL) << IRQ_TIMER) -#define IE_EIE (_AC(0x1, UL) << IRQ_EXT) +#define IE_SIE (_AC(0x1, UL) << RV_IRQ_SOFT) +#define IE_TIE (_AC(0x1, UL) << RV_IRQ_TIMER) +#define IE_EIE (_AC(0x1, UL) << RV_IRQ_EXT) #ifndef __ASSEMBLY__ diff --git a/arch/riscv/kernel/irq.c b/arch/riscv/kernel/irq.c index 3f07a91d5afb..345c4f2eba13 100644 --- a/arch/riscv/kernel/irq.c +++ b/arch/riscv/kernel/irq.c @@ -23,11 +23,11 @@ asmlinkage __visible void __irq_entry do_IRQ(struct pt_regs *regs) irq_enter(); switch (regs->cause & ~CAUSE_IRQ_FLAG) { - case IRQ_TIMER: + case RV_IRQ_TIMER: riscv_timer_interrupt(); break; #ifdef CONFIG_SMP - case IRQ_SOFT: + case RV_IRQ_SOFT: /* * We only use software interrupts to pass IPIs, so if a non-SMP * system gets one, then we don't know what to do. @@ -35,7 +35,7 @@ asmlinkage __visible void __irq_entry do_IRQ(struct pt_regs *regs) riscv_software_interrupt(); break; #endif - case IRQ_EXT: + case RV_IRQ_EXT: handle_arch_irq(regs); break; default: -- cgit v1.2.3 From 24cecc37746393432d994c0dbc251fb9ac7c5d72 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Mon, 6 Jan 2020 14:35:39 +0000 Subject: arm64: Revert support for execute-only user mappings The ARMv8 64-bit architecture supports execute-only user permissions by clearing the PTE_USER and PTE_UXN bits, practically making it a mostly privileged mapping but from which user running at EL0 can still execute. The downside, however, is that the kernel at EL1 inadvertently reading such mapping would not trip over the PAN (privileged access never) protection. Revert the relevant bits from commit cab15ce604e5 ("arm64: Introduce execute-only page access permissions") so that PROT_EXEC implies PROT_READ (and therefore PTE_USER) until the architecture gains proper support for execute-only user mappings. Fixes: cab15ce604e5 ("arm64: Introduce execute-only page access permissions") Cc: # 4.9.x- Acked-by: Will Deacon Signed-off-by: Catalin Marinas Signed-off-by: Linus Torvalds --- arch/arm64/include/asm/pgtable-prot.h | 5 ++--- arch/arm64/include/asm/pgtable.h | 10 +++------- arch/arm64/mm/fault.c | 2 +- 3 files changed, 6 insertions(+), 11 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h index 8dc6c5cdabe6..baf52baaa2a5 100644 --- a/arch/arm64/include/asm/pgtable-prot.h +++ b/arch/arm64/include/asm/pgtable-prot.h @@ -85,13 +85,12 @@ #define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_WRITE) #define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN) #define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN) -#define PAGE_EXECONLY __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PTE_PXN) #define __P000 PAGE_NONE #define __P001 PAGE_READONLY #define __P010 PAGE_READONLY #define __P011 PAGE_READONLY -#define __P100 PAGE_EXECONLY +#define __P100 PAGE_READONLY_EXEC #define __P101 PAGE_READONLY_EXEC #define __P110 PAGE_READONLY_EXEC #define __P111 PAGE_READONLY_EXEC @@ -100,7 +99,7 @@ #define __S001 PAGE_READONLY #define __S010 PAGE_SHARED #define __S011 PAGE_SHARED -#define __S100 PAGE_EXECONLY +#define __S100 PAGE_READONLY_EXEC #define __S101 PAGE_READONLY_EXEC #define __S110 PAGE_SHARED_EXEC #define __S111 PAGE_SHARED_EXEC diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 5d15b4735a0e..cd5de0e40bfa 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -96,12 +96,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; #define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte)) #define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID)) -/* - * Execute-only user mappings do not have the PTE_USER bit set. All valid - * kernel mappings have the PTE_UXN bit set. - */ #define pte_valid_not_user(pte) \ - ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN)) + ((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID) #define pte_valid_young(pte) \ ((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF)) #define pte_valid_user(pte) \ @@ -117,8 +113,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; /* * p??_access_permitted() is true for valid user mappings (subject to the - * write permission check) other than user execute-only which do not have the - * PTE_USER bit set. PROT_NONE mappings do not have the PTE_VALID bit set. + * write permission check). PROT_NONE mappings do not have the PTE_VALID bit + * set. */ #define pte_access_permitted(pte, write) \ (pte_valid_user(pte) && (!(write) || pte_write(pte))) diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 077b02a2d4d3..85566d32958f 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -445,7 +445,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, const struct fault_info *inf; struct mm_struct *mm = current->mm; vm_fault_t fault, major = 0; - unsigned long vm_flags = VM_READ | VM_WRITE; + unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC; unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; if (kprobe_page_fault(regs, esr)) -- cgit v1.2.3 From 3e3c8ca5a351350031f0f3d5ecedf7048b1b9008 Mon Sep 17 00:00:00 2001 From: Amanieu d'Antras Date: Thu, 2 Jan 2020 18:24:07 +0100 Subject: arm64: Move __ARCH_WANT_SYS_CLONE3 definition to uapi headers Previously this was only defined in the internal headers which resulted in __NR_clone3 not being defined in the user headers. Signed-off-by: Amanieu d'Antras Cc: linux-arm-kernel@lists.infradead.org Cc: # 5.3.x Reviewed-by: Arnd Bergmann Link: https://lore.kernel.org/r/20200102172413.654385-2-amanieu@gmail.com Signed-off-by: Christian Brauner --- arch/arm64/include/asm/unistd.h | 1 - arch/arm64/include/uapi/asm/unistd.h | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h index 2629a68b8724..5af82587909e 100644 --- a/arch/arm64/include/asm/unistd.h +++ b/arch/arm64/include/asm/unistd.h @@ -42,7 +42,6 @@ #endif #define __ARCH_WANT_SYS_CLONE -#define __ARCH_WANT_SYS_CLONE3 #ifndef __COMPAT_SYSCALL_NR #include diff --git a/arch/arm64/include/uapi/asm/unistd.h b/arch/arm64/include/uapi/asm/unistd.h index 4703d218663a..f83a70e07df8 100644 --- a/arch/arm64/include/uapi/asm/unistd.h +++ b/arch/arm64/include/uapi/asm/unistd.h @@ -19,5 +19,6 @@ #define __ARCH_WANT_NEW_STAT #define __ARCH_WANT_SET_GET_RLIMIT #define __ARCH_WANT_TIME32_SYSCALLS +#define __ARCH_WANT_SYS_CLONE3 #include -- cgit v1.2.3 From a4376f2fbcc8084832f2f114577c8d68234c7903 Mon Sep 17 00:00:00 2001 From: Amanieu d'Antras Date: Thu, 2 Jan 2020 18:24:08 +0100 Subject: arm64: Implement copy_thread_tls This is required for clone3 which passes the TLS value through a struct rather than a register. Signed-off-by: Amanieu d'Antras Cc: linux-arm-kernel@lists.infradead.org Cc: # 5.3.x Acked-by: Will Deacon Link: https://lore.kernel.org/r/20200102172413.654385-3-amanieu@gmail.com Signed-off-by: Christian Brauner --- arch/arm64/Kconfig | 1 + arch/arm64/kernel/process.c | 10 +++++----- 2 files changed, 6 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index b1b4476ddb83..e688dfad0b72 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -138,6 +138,7 @@ config ARM64 select HAVE_CMPXCHG_DOUBLE select HAVE_CMPXCHG_LOCAL select HAVE_CONTEXT_TRACKING + select HAVE_COPY_THREAD_TLS select HAVE_DEBUG_BUGVERBOSE select HAVE_DEBUG_KMEMLEAK select HAVE_DMA_CONTIGUOUS diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 71f788cd2b18..d54586d5b031 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -360,8 +360,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) asmlinkage void ret_from_fork(void) asm("ret_from_fork"); -int copy_thread(unsigned long clone_flags, unsigned long stack_start, - unsigned long stk_sz, struct task_struct *p) +int copy_thread_tls(unsigned long clone_flags, unsigned long stack_start, + unsigned long stk_sz, struct task_struct *p, unsigned long tls) { struct pt_regs *childregs = task_pt_regs(p); @@ -394,11 +394,11 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, } /* - * If a TLS pointer was passed to clone (4th argument), use it - * for the new thread. + * If a TLS pointer was passed to clone, use it for the new + * thread. */ if (clone_flags & CLONE_SETTLS) - p->thread.uw.tp_value = childregs->regs[3]; + p->thread.uw.tp_value = tls; } else { memset(childregs, 0, sizeof(struct pt_regs)); childregs->pstate = PSR_MODE_EL1h; -- cgit v1.2.3 From 167ee0b82429cb5df272808c7a21370b7c961ab2 Mon Sep 17 00:00:00 2001 From: Amanieu d'Antras Date: Thu, 2 Jan 2020 18:24:09 +0100 Subject: arm: Implement copy_thread_tls This is required for clone3 which passes the TLS value through a struct rather than a register. Signed-off-by: Amanieu d'Antras Cc: linux-arm-kernel@lists.infradead.org Cc: # 5.3.x Link: https://lore.kernel.org/r/20200102172413.654385-4-amanieu@gmail.com Signed-off-by: Christian Brauner --- arch/arm/Kconfig | 1 + arch/arm/kernel/process.c | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index ba75e3661a41..96dab76da3b3 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -72,6 +72,7 @@ config ARM select HAVE_ARM_SMCCC if CPU_V7 select HAVE_EBPF_JIT if !CPU_ENDIAN_BE32 select HAVE_CONTEXT_TRACKING + select HAVE_COPY_THREAD_TLS select HAVE_C_RECORDMCOUNT select HAVE_DEBUG_KMEMLEAK select HAVE_DMA_CONTIGUOUS if MMU diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index cea1c27c29cb..46e478fb5ea2 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -226,8 +226,8 @@ void release_thread(struct task_struct *dead_task) asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); int -copy_thread(unsigned long clone_flags, unsigned long stack_start, - unsigned long stk_sz, struct task_struct *p) +copy_thread_tls(unsigned long clone_flags, unsigned long stack_start, + unsigned long stk_sz, struct task_struct *p, unsigned long tls) { struct thread_info *thread = task_thread_info(p); struct pt_regs *childregs = task_pt_regs(p); @@ -261,7 +261,7 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start, clear_ptrace_hw_breakpoint(p); if (clone_flags & CLONE_SETTLS) - thread->tp_value[0] = childregs->ARM_r3; + thread->tp_value[0] = tls; thread->tp_value[1] = get_tpuser(); thread_notify(THREAD_NOTIFY_COPY, thread); -- cgit v1.2.3 From d2f36c787b2181561d8b95814f8cdad64b348ad7 Mon Sep 17 00:00:00 2001 From: Amanieu d'Antras Date: Thu, 2 Jan 2020 18:24:10 +0100 Subject: parisc: Implement copy_thread_tls This is required for clone3 which passes the TLS value through a struct rather than a register. Signed-off-by: Amanieu d'Antras Cc: linux-parisc@vger.kernel.org Cc: # 5.3.x Link: https://lore.kernel.org/r/20200102172413.654385-5-amanieu@gmail.com Signed-off-by: Christian Brauner --- arch/parisc/Kconfig | 1 + arch/parisc/kernel/process.c | 8 ++++---- 2 files changed, 5 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index b16237c95ea3..0c29d6cb2c8d 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -62,6 +62,7 @@ config PARISC select HAVE_FTRACE_MCOUNT_RECORD if HAVE_DYNAMIC_FTRACE select HAVE_KPROBES_ON_FTRACE select HAVE_DYNAMIC_FTRACE_WITH_REGS + select HAVE_COPY_THREAD_TLS help The PA-RISC microprocessor is designed by Hewlett-Packard and used diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c index ecc5c2771208..230a6422b99f 100644 --- a/arch/parisc/kernel/process.c +++ b/arch/parisc/kernel/process.c @@ -208,8 +208,8 @@ arch_initcall(parisc_idle_init); * Copy architecture-specific thread state */ int -copy_thread(unsigned long clone_flags, unsigned long usp, - unsigned long kthread_arg, struct task_struct *p) +copy_thread_tls(unsigned long clone_flags, unsigned long usp, + unsigned long kthread_arg, struct task_struct *p, unsigned long tls) { struct pt_regs *cregs = &(p->thread.regs); void *stack = task_stack_page(p); @@ -254,9 +254,9 @@ copy_thread(unsigned long clone_flags, unsigned long usp, cregs->ksp = (unsigned long)stack + THREAD_SZ_ALGN + FRAME_SIZE; cregs->kpc = (unsigned long) &child_return; - /* Setup thread TLS area from the 4th parameter in clone */ + /* Setup thread TLS area */ if (clone_flags & CLONE_SETTLS) - cregs->cr27 = cregs->gr[23]; + cregs->cr27 = tls; } return 0; -- cgit v1.2.3 From 20bda4ed62f507ed72e30e817b43c65fdba60be7 Mon Sep 17 00:00:00 2001 From: Amanieu d'Antras Date: Thu, 2 Jan 2020 18:24:11 +0100 Subject: riscv: Implement copy_thread_tls This is required for clone3 which passes the TLS value through a struct rather than a register. Signed-off-by: Amanieu d'Antras Cc: linux-riscv@lists.infradead.org Cc: # 5.3.x Link: https://lore.kernel.org/r/20200102172413.654385-6-amanieu@gmail.com Signed-off-by: Christian Brauner --- arch/riscv/Kconfig | 1 + arch/riscv/kernel/process.c | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index a31169b02ec0..fa7dc03459e7 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -65,6 +65,7 @@ config RISCV select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU select HAVE_ARCH_MMAP_RND_BITS if MMU select ARCH_HAS_GCOV_PROFILE_ALL + select HAVE_COPY_THREAD_TLS config ARCH_MMAP_RND_BITS_MIN default 18 if 64BIT diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c index 95a3031e5c7c..817cf7b0974c 100644 --- a/arch/riscv/kernel/process.c +++ b/arch/riscv/kernel/process.c @@ -99,8 +99,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) return 0; } -int copy_thread(unsigned long clone_flags, unsigned long usp, - unsigned long arg, struct task_struct *p) +int copy_thread_tls(unsigned long clone_flags, unsigned long usp, + unsigned long arg, struct task_struct *p, unsigned long tls) { struct pt_regs *childregs = task_pt_regs(p); @@ -121,7 +121,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, if (usp) /* User fork */ childregs->sp = usp; if (clone_flags & CLONE_SETTLS) - childregs->tp = childregs->a5; + childregs->tp = tls; childregs->a0 = 0; /* Return value of fork() */ p->thread.ra = (unsigned long)ret_from_fork; } -- cgit v1.2.3 From c346b94f8c5d1b7d637522c908209de93305a8eb Mon Sep 17 00:00:00 2001 From: Amanieu d'Antras Date: Thu, 2 Jan 2020 18:24:12 +0100 Subject: xtensa: Implement copy_thread_tls This is required for clone3 which passes the TLS value through a struct rather than a register. Signed-off-by: Amanieu d'Antras Cc: linux-xtensa@linux-xtensa.org Cc: # 5.3.x Link: https://lore.kernel.org/r/20200102172413.654385-7-amanieu@gmail.com Signed-off-by: Christian Brauner --- arch/xtensa/Kconfig | 1 + arch/xtensa/kernel/process.c | 8 ++++---- 2 files changed, 5 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index 4a3fa295d8fe..296c5324dace 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -24,6 +24,7 @@ config XTENSA select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL select HAVE_ARCH_KASAN if MMU && !XIP_KERNEL select HAVE_ARCH_TRACEHOOK + select HAVE_COPY_THREAD_TLS select HAVE_DEBUG_KMEMLEAK select HAVE_DMA_CONTIGUOUS select HAVE_EXIT_THREAD diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c index 9e1c49134c07..3edecc41ef8c 100644 --- a/arch/xtensa/kernel/process.c +++ b/arch/xtensa/kernel/process.c @@ -202,8 +202,9 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) * involved. Much simpler to just not copy those live frames across. */ -int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn, - unsigned long thread_fn_arg, struct task_struct *p) +int copy_thread_tls(unsigned long clone_flags, unsigned long usp_thread_fn, + unsigned long thread_fn_arg, struct task_struct *p, + unsigned long tls) { struct pt_regs *childregs = task_pt_regs(p); @@ -266,9 +267,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn, childregs->syscall = regs->syscall; - /* The thread pointer is passed in the '4th argument' (= a5) */ if (clone_flags & CLONE_SETTLS) - childregs->threadptr = childregs->areg[5]; + childregs->threadptr = tls; } else { p->thread.ra = MAKE_RA_FOR_CALL( (unsigned long)ret_from_kernel_thread, 1); -- cgit v1.2.3 From 457677c70c7672a4586b0b8abc396cc1ecdd376d Mon Sep 17 00:00:00 2001 From: Amanieu d'Antras Date: Sat, 4 Jan 2020 13:39:30 +0100 Subject: um: Implement copy_thread_tls This is required for clone3 which passes the TLS value through a struct rather than a register. Signed-off-by: Amanieu d'Antras Cc: linux-um@lists.infradead.org Cc: # 5.3.x Link: https://lore.kernel.org/r/20200104123928.1048822-1-amanieu@gmail.com Signed-off-by: Christian Brauner --- arch/um/Kconfig | 1 + arch/um/include/asm/ptrace-generic.h | 2 +- arch/um/kernel/process.c | 6 +++--- arch/x86/um/tls_32.c | 6 ++---- arch/x86/um/tls_64.c | 7 +++---- 5 files changed, 10 insertions(+), 12 deletions(-) (limited to 'arch') diff --git a/arch/um/Kconfig b/arch/um/Kconfig index 2a6d04fcb3e9..6f0edd0c0220 100644 --- a/arch/um/Kconfig +++ b/arch/um/Kconfig @@ -14,6 +14,7 @@ config UML select HAVE_FUTEX_CMPXCHG if FUTEX select HAVE_DEBUG_KMEMLEAK select HAVE_DEBUG_BUGVERBOSE + select HAVE_COPY_THREAD_TLS select GENERIC_IRQ_SHOW select GENERIC_CPU_DEVICES select GENERIC_CLOCKEVENTS diff --git a/arch/um/include/asm/ptrace-generic.h b/arch/um/include/asm/ptrace-generic.h index 81c647ef9c6c..adf91ef553ae 100644 --- a/arch/um/include/asm/ptrace-generic.h +++ b/arch/um/include/asm/ptrace-generic.h @@ -36,7 +36,7 @@ extern long subarch_ptrace(struct task_struct *child, long request, extern unsigned long getreg(struct task_struct *child, int regno); extern int putreg(struct task_struct *child, int regno, unsigned long value); -extern int arch_copy_tls(struct task_struct *new); +extern int arch_set_tls(struct task_struct *new, unsigned long tls); extern void clear_flushed_tls(struct task_struct *task); extern int syscall_trace_enter(struct pt_regs *regs); extern void syscall_trace_leave(struct pt_regs *regs); diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c index 263a8f069133..17045e7211bf 100644 --- a/arch/um/kernel/process.c +++ b/arch/um/kernel/process.c @@ -153,8 +153,8 @@ void fork_handler(void) userspace(¤t->thread.regs.regs, current_thread_info()->aux_fp_regs); } -int copy_thread(unsigned long clone_flags, unsigned long sp, - unsigned long arg, struct task_struct * p) +int copy_thread_tls(unsigned long clone_flags, unsigned long sp, + unsigned long arg, struct task_struct * p, unsigned long tls) { void (*handler)(void); int kthread = current->flags & PF_KTHREAD; @@ -188,7 +188,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, * Set a new TLS for the child thread? */ if (clone_flags & CLONE_SETTLS) - ret = arch_copy_tls(p); + ret = arch_set_tls(p, tls); } return ret; diff --git a/arch/x86/um/tls_32.c b/arch/x86/um/tls_32.c index 5bd949da7a4a..ac8eee093f9c 100644 --- a/arch/x86/um/tls_32.c +++ b/arch/x86/um/tls_32.c @@ -215,14 +215,12 @@ static int set_tls_entry(struct task_struct* task, struct user_desc *info, return 0; } -int arch_copy_tls(struct task_struct *new) +int arch_set_tls(struct task_struct *new, unsigned long tls) { struct user_desc info; int idx, ret = -EFAULT; - if (copy_from_user(&info, - (void __user *) UPT_SI(&new->thread.regs.regs), - sizeof(info))) + if (copy_from_user(&info, (void __user *) tls, sizeof(info))) goto out; ret = -EINVAL; diff --git a/arch/x86/um/tls_64.c b/arch/x86/um/tls_64.c index 3a621e0d3925..ebd3855d9b13 100644 --- a/arch/x86/um/tls_64.c +++ b/arch/x86/um/tls_64.c @@ -6,14 +6,13 @@ void clear_flushed_tls(struct task_struct *task) { } -int arch_copy_tls(struct task_struct *t) +int arch_set_tls(struct task_struct *t, unsigned long tls) { /* * If CLONE_SETTLS is set, we need to save the thread id - * (which is argument 5, child_tid, of clone) so it can be set - * during context switches. + * so it can be set during context switches. */ - t->thread.arch.fs = t->thread.regs.regs.gp[R8 / sizeof(long)]; + t->thread.arch.fs = tls; return 0; } -- cgit v1.2.3 From 13cf4cf030183dd9a8731f3fe32456e83b6c7b68 Mon Sep 17 00:00:00 2001 From: Yash Shah Date: Tue, 7 Jan 2020 22:09:06 -0800 Subject: riscv: move sifive_l2_cache.h to include/soc The commit 9209fb51896f ("riscv: move sifive_l2_cache.c to drivers/soc") moves the sifive L2 cache driver to driver/soc. It did not move the header file along with the driver. Therefore this patch moves the header file to driver/soc Signed-off-by: Yash Shah Reviewed-by: Anup Patel [paul.walmsley@sifive.com: updated to fix the include guard] Fixes: 9209fb51896f ("riscv: move sifive_l2_cache.c to drivers/soc") Signed-off-by: Paul Walmsley --- arch/riscv/include/asm/sifive_l2_cache.h | 16 ---------------- 1 file changed, 16 deletions(-) delete mode 100644 arch/riscv/include/asm/sifive_l2_cache.h (limited to 'arch') diff --git a/arch/riscv/include/asm/sifive_l2_cache.h b/arch/riscv/include/asm/sifive_l2_cache.h deleted file mode 100644 index 04f6748fc50b..000000000000 --- a/arch/riscv/include/asm/sifive_l2_cache.h +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * SiFive L2 Cache Controller header file - * - */ - -#ifndef _ASM_RISCV_SIFIVE_L2_CACHE_H -#define _ASM_RISCV_SIFIVE_L2_CACHE_H - -extern int register_sifive_l2_error_notifier(struct notifier_block *nb); -extern int unregister_sifive_l2_error_notifier(struct notifier_block *nb); - -#define SIFIVE_L2_ERR_TYPE_CE 0 -#define SIFIVE_L2_ERR_TYPE_UE 1 - -#endif /* _ASM_RISCV_SIFIVE_L2_CACHE_H */ -- cgit v1.2.3 From dc6fcba72f0435b7884f2e92fd634bb9f78a2c60 Mon Sep 17 00:00:00 2001 From: Guo Ren Date: Sun, 5 Jan 2020 10:52:14 +0800 Subject: riscv: Fixup obvious bug for fp-regs reset CSR_MISA is defined in Privileged Architectures' spec: 3.1.1 Machine ISA Register misa. Every bit:1 indicate a feature, so we should beqz reset_done when there is no F/D bit in csr_misa register. Signed-off-by: Guo Ren [paul.walmsley@sifive.com: fix typo in commit message] Fixes: 9e80635619b51 ("riscv: clear the instruction cache and all registers when booting") Signed-off-by: Paul Walmsley --- arch/riscv/kernel/head.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S index 797802c73dee..2227db63f895 100644 --- a/arch/riscv/kernel/head.S +++ b/arch/riscv/kernel/head.S @@ -251,7 +251,7 @@ ENTRY(reset_regs) #ifdef CONFIG_FPU csrr t0, CSR_MISA andi t0, t0, (COMPAT_HWCAP_ISA_F | COMPAT_HWCAP_ISA_D) - bnez t0, .Lreset_regs_done + beqz t0, .Lreset_regs_done li t1, SR_FS csrs CSR_STATUS, t1 -- cgit v1.2.3