summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-07-09 12:08:15 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2019-07-09 12:08:15 -0700
commit593c75463ab6d6985cdc9916f3d1c28b5f6340cd (patch)
treefead1a52de9609ccd841bc4caa3a2ea44ddff932
parent565eb5f8c5d379b6a6a3134c76b2fcfecdd007d3 (diff)
parentb3d5f311d3cfdea0b0e2373409e50423b130f847 (diff)
Merge branch 'parisc-5.3-1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux
Pull parisc updates from Helge Deller: "Dynamic ftrace support by Sven Schnelle and a header guard fix by Denis Efremov" * 'parisc-5.3-1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux: parisc: asm: psw.h: missing header guard parisc: add dynamic ftrace compiler.h: add CC_USING_PATCHABLE_FUNCTION_ENTRY parisc: use pr_debug() in kernel/module.c parisc: add WARN_ON() to clear_fixmap parisc: add spinlock to patch function parisc: add support for patching multiple words
-rw-r--r--arch/parisc/Kconfig2
-rw-r--r--arch/parisc/Makefile18
-rw-r--r--arch/parisc/include/asm/ftrace.h15
-rw-r--r--arch/parisc/include/asm/patch.h4
-rw-r--r--arch/parisc/include/asm/psw.h2
-rw-r--r--arch/parisc/kernel/Makefile9
-rw-r--r--arch/parisc/kernel/entry.S64
-rw-r--r--arch/parisc/kernel/ftrace.c129
-rw-r--r--arch/parisc/kernel/module.c64
-rw-r--r--arch/parisc/kernel/module.lds7
-rw-r--r--arch/parisc/kernel/patch.c88
-rw-r--r--arch/parisc/kernel/vmlinux.lds.S2
-rw-r--r--arch/parisc/mm/fixmap.c7
-rw-r--r--include/asm-generic/vmlinux.lds.h7
-rw-r--r--include/linux/compiler_types.h2
15 files changed, 359 insertions, 61 deletions
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 4860efa91d7b..42875ff15671 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -59,6 +59,8 @@ config PARISC
select HAVE_ARCH_KGDB
select HAVE_KPROBES
select HAVE_KRETPROBES
+ select HAVE_DYNAMIC_FTRACE if $(cc-option,-fpatchable-function-entry=1,1)
+ select HAVE_FTRACE_MCOUNT_RECORD if HAVE_DYNAMIC_FTRACE
help
The PA-RISC microprocessor is designed by Hewlett-Packard and used
diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile
index c19af26febe6..58d46665cad9 100644
--- a/arch/parisc/Makefile
+++ b/arch/parisc/Makefile
@@ -47,6 +47,24 @@ ifneq ($(SUBARCH),$(UTS_MACHINE))
endif
endif
+ifdef CONFIG_DYNAMIC_FTRACE
+ifdef CONFIG_64BIT
+NOP_COUNT := 8
+else
+NOP_COUNT := 5
+endif
+
+export CC_USING_RECORD_MCOUNT:=1
+export CC_USING_PATCHABLE_FUNCTION_ENTRY:=1
+
+KBUILD_AFLAGS += -DCC_USING_PATCHABLE_FUNCTION_ENTRY=1
+KBUILD_CFLAGS += -DCC_USING_PATCHABLE_FUNCTION_ENTRY=1 \
+ -DFTRACE_PATCHABLE_FUNCTION_SIZE=$(NOP_COUNT)
+
+CC_FLAGS_FTRACE := -fpatchable-function-entry=$(NOP_COUNT),$(shell echo $$(($(NOP_COUNT)-1)))
+KBUILD_LDFLAGS_MODULE += -T $(srctree)/arch/parisc/kernel/module.lds
+endif
+
OBJCOPY_FLAGS =-O binary -R .note -R .comment -S
cflags-y := -pipe
diff --git a/arch/parisc/include/asm/ftrace.h b/arch/parisc/include/asm/ftrace.h
index 42b2c75a1645..958c0aa5dbb2 100644
--- a/arch/parisc/include/asm/ftrace.h
+++ b/arch/parisc/include/asm/ftrace.h
@@ -5,12 +5,23 @@
#ifndef __ASSEMBLY__
extern void mcount(void);
-#define MCOUNT_INSN_SIZE 4
-
+#define MCOUNT_ADDR ((unsigned long)mcount)
+#define MCOUNT_INSN_SIZE 4
+#define CC_USING_NOP_MCOUNT
extern unsigned long sys_call_table[];
extern unsigned long return_address(unsigned int);
+#ifdef CONFIG_DYNAMIC_FTRACE
+extern void ftrace_caller(void);
+
+struct dyn_arch_ftrace {
+};
+
+unsigned long ftrace_call_adjust(unsigned long addr);
+
+#endif
+
#define ftrace_return_address(n) return_address(n)
#endif /* __ASSEMBLY__ */
diff --git a/arch/parisc/include/asm/patch.h b/arch/parisc/include/asm/patch.h
index 685b58a13968..400d84c6e504 100644
--- a/arch/parisc/include/asm/patch.h
+++ b/arch/parisc/include/asm/patch.h
@@ -4,8 +4,10 @@
/* stop machine and patch kernel text */
void patch_text(void *addr, unsigned int insn);
+void patch_text_multiple(void *addr, u32 *insn, unsigned int len);
/* patch kernel text with machine already stopped (e.g. in kgdb) */
-void __patch_text(void *addr, unsigned int insn);
+void __patch_text(void *addr, u32 insn);
+void __patch_text_multiple(void *addr, u32 *insn, unsigned int len);
#endif
diff --git a/arch/parisc/include/asm/psw.h b/arch/parisc/include/asm/psw.h
index 76c301146c31..46921ffcc407 100644
--- a/arch/parisc/include/asm/psw.h
+++ b/arch/parisc/include/asm/psw.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _PARISC_PSW_H
-
+#define _PARISC_PSW_H
#define PSW_I 0x00000001
#define PSW_D 0x00000002
diff --git a/arch/parisc/kernel/Makefile b/arch/parisc/kernel/Makefile
index fc0df5c44468..c232266b517c 100644
--- a/arch/parisc/kernel/Makefile
+++ b/arch/parisc/kernel/Makefile
@@ -14,10 +14,11 @@ obj-y := cache.o pacache.o setup.o pdt.o traps.o time.o irq.o \
ifdef CONFIG_FUNCTION_TRACER
# Do not profile debug and lowlevel utilities
-CFLAGS_REMOVE_ftrace.o = -pg
-CFLAGS_REMOVE_cache.o = -pg
-CFLAGS_REMOVE_perf.o = -pg
-CFLAGS_REMOVE_unwind.o = -pg
+CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_cache.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_perf.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_unwind.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_patch.o = $(CC_FLAGS_FTRACE)
endif
obj-$(CONFIG_SMP) += smp.o
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index 89c801c2b5d1..3e430590c1e1 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -2012,6 +2012,70 @@ ftrace_stub:
#endif
ENDPROC_CFI(mcount)
+#ifdef CONFIG_DYNAMIC_FTRACE
+
+#ifdef CONFIG_64BIT
+#define FTRACE_FRAME_SIZE (2*FRAME_SIZE)
+#else
+#define FTRACE_FRAME_SIZE FRAME_SIZE
+#endif
+ENTRY_CFI(ftrace_caller, caller,frame=FTRACE_FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP)
+ftrace_caller:
+ .global ftrace_caller
+
+ STREG %r3, -FTRACE_FRAME_SIZE+1*REG_SZ(%sp)
+ ldo -FTRACE_FRAME_SIZE(%sp), %r3
+ STREG %rp, -RP_OFFSET(%r3)
+
+ /* Offset 0 is already allocated for %r1 */
+ STREG %r23, 2*REG_SZ(%r3)
+ STREG %r24, 3*REG_SZ(%r3)
+ STREG %r25, 4*REG_SZ(%r3)
+ STREG %r26, 5*REG_SZ(%r3)
+ STREG %r28, 6*REG_SZ(%r3)
+ STREG %r29, 7*REG_SZ(%r3)
+#ifdef CONFIG_64BIT
+ STREG %r19, 8*REG_SZ(%r3)
+ STREG %r20, 9*REG_SZ(%r3)
+ STREG %r21, 10*REG_SZ(%r3)
+ STREG %r22, 11*REG_SZ(%r3)
+ STREG %r27, 12*REG_SZ(%r3)
+ STREG %r31, 13*REG_SZ(%r3)
+ loadgp
+ ldo -16(%sp),%r29
+#endif
+ LDREG 0(%r3), %r25
+ copy %rp, %r26
+ ldo -8(%r25), %r25
+ b,l ftrace_function_trampoline, %rp
+ copy %r3, %r24
+
+ LDREG -RP_OFFSET(%r3), %rp
+ LDREG 2*REG_SZ(%r3), %r23
+ LDREG 3*REG_SZ(%r3), %r24
+ LDREG 4*REG_SZ(%r3), %r25
+ LDREG 5*REG_SZ(%r3), %r26
+ LDREG 6*REG_SZ(%r3), %r28
+ LDREG 7*REG_SZ(%r3), %r29
+#ifdef CONFIG_64BIT
+ LDREG 8*REG_SZ(%r3), %r19
+ LDREG 9*REG_SZ(%r3), %r20
+ LDREG 10*REG_SZ(%r3), %r21
+ LDREG 11*REG_SZ(%r3), %r22
+ LDREG 12*REG_SZ(%r3), %r27
+ LDREG 13*REG_SZ(%r3), %r31
+#endif
+ LDREG 1*REG_SZ(%r3), %r3
+
+ LDREGM -FTRACE_FRAME_SIZE(%sp), %r1
+ /* Adjust return point to jump back to beginning of traced function */
+ ldo -4(%r1), %r1
+ bv,n (%r1)
+
+ENDPROC_CFI(ftrace_caller)
+
+#endif
+
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.align 8
ENTRY_CFI(return_to_handler, caller,frame=FRAME_SIZE)
diff --git a/arch/parisc/kernel/ftrace.c b/arch/parisc/kernel/ftrace.c
index a28f915993b1..d784ccdd8fef 100644
--- a/arch/parisc/kernel/ftrace.c
+++ b/arch/parisc/kernel/ftrace.c
@@ -7,17 +7,17 @@
* Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
*
* future possible enhancements:
- * - add CONFIG_DYNAMIC_FTRACE
* - add CONFIG_STACK_TRACER
*/
#include <linux/init.h>
#include <linux/ftrace.h>
+#include <linux/uaccess.h>
#include <asm/assembly.h>
#include <asm/sections.h>
#include <asm/ftrace.h>
-
+#include <asm/patch.h>
#define __hot __attribute__ ((__section__ (".text.hot")))
@@ -50,13 +50,11 @@ void notrace __hot ftrace_function_trampoline(unsigned long parent,
unsigned long self_addr,
unsigned long org_sp_gr3)
{
- extern ftrace_func_t ftrace_trace_function; /* depends on CONFIG_DYNAMIC_FTRACE */
-
- if (ftrace_trace_function != ftrace_stub) {
- /* struct ftrace_ops *op, struct pt_regs *regs); */
- ftrace_trace_function(parent, self_addr, NULL, NULL);
- return;
- }
+#ifndef CONFIG_DYNAMIC_FTRACE
+ extern ftrace_func_t ftrace_trace_function;
+#endif
+ if (ftrace_trace_function != ftrace_stub)
+ ftrace_trace_function(self_addr, parent, NULL, NULL);
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
if (ftrace_graph_return != (trace_func_graph_ret_t) ftrace_stub ||
@@ -75,3 +73,116 @@ void notrace __hot ftrace_function_trampoline(unsigned long parent,
#endif
}
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+int ftrace_enable_ftrace_graph_caller(void)
+{
+ return 0;
+}
+
+int ftrace_disable_ftrace_graph_caller(void)
+{
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+
+int __init ftrace_dyn_arch_init(void)
+{
+ return 0;
+}
+int ftrace_update_ftrace_func(ftrace_func_t func)
+{
+ return 0;
+}
+
+unsigned long ftrace_call_adjust(unsigned long addr)
+{
+ return addr+(FTRACE_PATCHABLE_FUNCTION_SIZE-1)*4;
+}
+
+int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+{
+ u32 insn[FTRACE_PATCHABLE_FUNCTION_SIZE];
+ u32 *tramp;
+ int size, ret, i;
+ void *ip;
+
+#ifdef CONFIG_64BIT
+ unsigned long addr2 =
+ (unsigned long)dereference_function_descriptor((void *)addr);
+
+ u32 ftrace_trampoline[] = {
+ 0x73c10208, /* std,ma r1,100(sp) */
+ 0x0c2110c1, /* ldd -10(r1),r1 */
+ 0xe820d002, /* bve,n (r1) */
+ addr2 >> 32,
+ addr2 & 0xffffffff,
+ 0xe83f1fd7, /* b,l,n .-14,r1 */
+ };
+
+ u32 ftrace_trampoline_unaligned[] = {
+ addr2 >> 32,
+ addr2 & 0xffffffff,
+ 0x37de0200, /* ldo 100(sp),sp */
+ 0x73c13e01, /* std r1,-100(sp) */
+ 0x34213ff9, /* ldo -4(r1),r1 */
+ 0x50213fc1, /* ldd -20(r1),r1 */
+ 0xe820d002, /* bve,n (r1) */
+ 0xe83f1fcf, /* b,l,n .-20,r1 */
+ };
+
+ BUILD_BUG_ON(ARRAY_SIZE(ftrace_trampoline_unaligned) >
+ FTRACE_PATCHABLE_FUNCTION_SIZE);
+#else
+ u32 ftrace_trampoline[] = {
+ (u32)addr,
+ 0x6fc10080, /* stw,ma r1,40(sp) */
+ 0x48213fd1, /* ldw -18(r1),r1 */
+ 0xe820c002, /* bv,n r0(r1) */
+ 0xe83f1fdf, /* b,l,n .-c,r1 */
+ };
+#endif
+
+ BUILD_BUG_ON(ARRAY_SIZE(ftrace_trampoline) >
+ FTRACE_PATCHABLE_FUNCTION_SIZE);
+
+ size = sizeof(ftrace_trampoline);
+ tramp = ftrace_trampoline;
+
+#ifdef CONFIG_64BIT
+ if (rec->ip & 0x4) {
+ size = sizeof(ftrace_trampoline_unaligned);
+ tramp = ftrace_trampoline_unaligned;
+ }
+#endif
+
+ ip = (void *)(rec->ip + 4 - size);
+
+ ret = probe_kernel_read(insn, ip, size);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < size / 4; i++) {
+ if (insn[i] != INSN_NOP)
+ return -EINVAL;
+ }
+
+ __patch_text_multiple(ip, tramp, size);
+ return 0;
+}
+
+int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
+ unsigned long addr)
+{
+ u32 insn[FTRACE_PATCHABLE_FUNCTION_SIZE];
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(insn); i++)
+ insn[i] = INSN_NOP;
+
+ __patch_text_multiple((void *)rec->ip + 4 - sizeof(insn),
+ insn, sizeof(insn));
+ return 0;
+}
+#endif
diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
index 1f0f29a289d3..ac5f34993b53 100644
--- a/arch/parisc/kernel/module.c
+++ b/arch/parisc/kernel/module.c
@@ -33,9 +33,9 @@
* However, SEGREL32 is used only for PARISC unwind entries, and we want
* those entries to have an absolute address, and not just an offset.
*
- * The unwind table mechanism has the ability to specify an offset for
+ * The unwind table mechanism has the ability to specify an offset for
* the unwind table; however, because we split off the init functions into
- * a different piece of memory, it is not possible to do this using a
+ * a different piece of memory, it is not possible to do this using a
* single offset. Instead, we use the above hack for now.
*/
@@ -53,12 +53,6 @@
#include <asm/unwind.h>
#include <asm/sections.h>
-#if 0
-#define DEBUGP printk
-#else
-#define DEBUGP(fmt...)
-#endif
-
#define RELOC_REACHABLE(val, bits) \
(( ( !((val) & (1<<((bits)-1))) && ((val)>>(bits)) != 0 ) || \
( ((val) & (1<<((bits)-1))) && ((val)>>(bits)) != (((__typeof__(val))(~0))>>((bits)+2)))) ? \
@@ -300,7 +294,7 @@ unsigned int arch_mod_section_prepend(struct module *mod,
* sizeof(struct stub_entry);
}
-#define CONST
+#define CONST
int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
CONST Elf_Shdr *sechdrs,
CONST char *secstrings,
@@ -386,7 +380,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
got[i].addr = value;
out:
- DEBUGP("GOT ENTRY %d[%x] val %lx\n", i, i*sizeof(struct got_entry),
+ pr_debug("GOT ENTRY %d[%lx] val %lx\n", i, i*sizeof(struct got_entry),
value);
return i * sizeof(struct got_entry);
}
@@ -539,7 +533,7 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
//unsigned long dp = (unsigned long)$global$;
register unsigned long dp asm ("r27");
- DEBUGP("Applying relocate section %u to %u\n", relsec,
+ pr_debug("Applying relocate section %u to %u\n", relsec,
targetsec);
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
/* This is where to make the change */
@@ -563,7 +557,7 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
#if 0
#define r(t) ELF32_R_TYPE(rel[i].r_info)==t ? #t :
- DEBUGP("Symbol %s loc 0x%x val 0x%x addend 0x%x: %s\n",
+ pr_debug("Symbol %s loc 0x%x val 0x%x addend 0x%x: %s\n",
strtab + sym->st_name,
(uint32_t)loc, val, addend,
r(R_PARISC_PLABEL32)
@@ -604,7 +598,7 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
/* See note about special handling of SEGREL32 at
* the beginning of this file.
*/
- *loc = fsel(val, addend);
+ *loc = fsel(val, addend);
break;
case R_PARISC_SECREL32:
/* 32-bit section relative address. */
@@ -683,7 +677,7 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
Elf_Addr loc0;
unsigned int targetsec = sechdrs[relsec].sh_info;
- DEBUGP("Applying relocate section %u to %u\n", relsec,
+ pr_debug("Applying relocate section %u to %u\n", relsec,
targetsec);
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
/* This is where to make the change */
@@ -725,7 +719,7 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
case R_PARISC_LTOFF21L:
/* LT-relative; left 21 bits */
val = get_got(me, val, addend);
- DEBUGP("LTOFF21L Symbol %s loc %p val %lx\n",
+ pr_debug("LTOFF21L Symbol %s loc %p val %llx\n",
strtab + sym->st_name,
loc, val);
val = lrsel(val, 0);
@@ -736,14 +730,14 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
/* LT-relative; right 14 bits */
val = get_got(me, val, addend);
val = rrsel(val, 0);
- DEBUGP("LTOFF14R Symbol %s loc %p val %lx\n",
+ pr_debug("LTOFF14R Symbol %s loc %p val %llx\n",
strtab + sym->st_name,
loc, val);
*loc = mask(*loc, 14) | reassemble_14(val);
break;
case R_PARISC_PCREL22F:
/* PC-relative; 22 bits */
- DEBUGP("PCREL22F Symbol %s loc %p val %lx\n",
+ pr_debug("PCREL22F Symbol %s loc %p val %llx\n",
strtab + sym->st_name,
loc, val);
val += addend;
@@ -775,7 +769,7 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
val = get_stub(me, val, addend, ELF_STUB_GOT,
loc0, targetsec);
}
- DEBUGP("STUB FOR %s loc %lx, val %lx+%lx at %lx\n",
+ pr_debug("STUB FOR %s loc %px, val %llx+%llx at %llx\n",
strtab + sym->st_name, loc, sym->st_value,
addend, val);
val = (val - dot - 8)/4;
@@ -799,7 +793,7 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
/* See note about special handling of SEGREL32 at
* the beginning of this file.
*/
- *loc = fsel(val, addend);
+ *loc = fsel(val, addend);
break;
case R_PARISC_SECREL32:
/* 32-bit section relative address. */
@@ -809,14 +803,14 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
/* 64-bit function address */
if(in_local(me, (void *)(val + addend))) {
*loc64 = get_fdesc(me, val+addend);
- DEBUGP("FDESC for %s at %p points to %lx\n",
+ pr_debug("FDESC for %s at %llx points to %llx\n",
strtab + sym->st_name, *loc64,
((Elf_Fdesc *)*loc64)->addr);
} else {
/* if the symbol is not local to this
* module then val+addend is a pointer
* to the function descriptor */
- DEBUGP("Non local FPTR64 Symbol %s loc %p val %lx\n",
+ pr_debug("Non local FPTR64 Symbol %s loc %p val %llx\n",
strtab + sym->st_name,
loc, val);
*loc64 = val + addend;
@@ -847,7 +841,7 @@ register_unwind_table(struct module *me,
end = table + sechdrs[me->arch.unwind_section].sh_size;
gp = (Elf_Addr)me->core_layout.base + me->arch.got_offset;
- DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
+ pr_debug("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
me->arch.unwind_section, table, end, gp);
me->arch.unwind = unwind_table_add(me->name, 0, gp, table, end);
}
@@ -868,6 +862,7 @@ int module_finalize(const Elf_Ehdr *hdr,
const char *strtab = NULL;
const Elf_Shdr *s;
char *secstrings;
+ int err, symindex = -1;
Elf_Sym *newptr, *oldptr;
Elf_Shdr *symhdr = NULL;
#ifdef DEBUG
@@ -894,6 +889,7 @@ int module_finalize(const Elf_Ehdr *hdr,
if(sechdrs[i].sh_type == SHT_SYMTAB
&& (sechdrs[i].sh_flags & SHF_ALLOC)) {
int strindex = sechdrs[i].sh_link;
+ symindex = i;
/* FIXME: AWFUL HACK
* The cast is to drop the const from
* the sechdrs pointer */
@@ -903,7 +899,7 @@ int module_finalize(const Elf_Ehdr *hdr,
}
}
- DEBUGP("module %s: strtab %p, symhdr %p\n",
+ pr_debug("module %s: strtab %p, symhdr %p\n",
me->name, strtab, symhdr);
if(me->arch.got_count > MAX_GOTS) {
@@ -922,7 +918,7 @@ int module_finalize(const Elf_Ehdr *hdr,
oldptr = (void *)symhdr->sh_addr;
newptr = oldptr + 1; /* we start counting at 1 */
nsyms = symhdr->sh_size / sizeof(Elf_Sym);
- DEBUGP("OLD num_symtab %lu\n", nsyms);
+ pr_debug("OLD num_symtab %lu\n", nsyms);
for (i = 1; i < nsyms; i++) {
oldptr++; /* note, count starts at 1 so preincrement */
@@ -937,7 +933,7 @@ int module_finalize(const Elf_Ehdr *hdr,
}
nsyms = newptr - (Elf_Sym *)symhdr->sh_addr;
- DEBUGP("NEW num_symtab %lu\n", nsyms);
+ pr_debug("NEW num_symtab %lu\n", nsyms);
symhdr->sh_size = nsyms * sizeof(Elf_Sym);
/* find .altinstructions section */
@@ -949,8 +945,24 @@ int module_finalize(const Elf_Ehdr *hdr,
if (!strcmp(".altinstructions", secname))
/* patch .altinstructions */
apply_alternatives(aseg, aseg + s->sh_size, me->name);
- }
+ /* For 32 bit kernels we're compiling modules with
+ * -ffunction-sections so we must relocate the addresses in the
+ *__mcount_loc section.
+ */
+ if (symindex != -1 && !strcmp(secname, "__mcount_loc")) {
+ if (s->sh_type == SHT_REL)
+ err = apply_relocate((Elf_Shdr *)sechdrs,
+ strtab, symindex,
+ s - sechdrs, me);
+ else if (s->sh_type == SHT_RELA)
+ err = apply_relocate_add((Elf_Shdr *)sechdrs,
+ strtab, symindex,
+ s - sechdrs, me);
+ if (err)
+ return err;
+ }
+ }
return 0;
}
diff --git a/arch/parisc/kernel/module.lds b/arch/parisc/kernel/module.lds
new file mode 100644
index 000000000000..1a9a92aca5c8
--- /dev/null
+++ b/arch/parisc/kernel/module.lds
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+SECTIONS {
+ __mcount_loc : {
+ *(__patchable_function_entries)
+ }
+}
diff --git a/arch/parisc/kernel/patch.c b/arch/parisc/kernel/patch.c
index cdcd981278b3..80a0ab372802 100644
--- a/arch/parisc/kernel/patch.c
+++ b/arch/parisc/kernel/patch.c
@@ -17,15 +17,20 @@
struct patch {
void *addr;
- unsigned int insn;
+ u32 *insn;
+ unsigned int len;
};
-static void __kprobes *patch_map(void *addr, int fixmap)
+static DEFINE_RAW_SPINLOCK(patch_lock);
+
+static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags,
+ int *need_unmap)
{
unsigned long uintaddr = (uintptr_t) addr;
bool module = !core_kernel_text(uintaddr);
struct page *page;
+ *need_unmap = 0;
if (module && IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
page = vmalloc_to_page(addr);
else if (!module && IS_ENABLED(CONFIG_STRICT_KERNEL_RWX))
@@ -33,36 +38,74 @@ static void __kprobes *patch_map(void *addr, int fixmap)
else
return addr;
+ *need_unmap = 1;
set_fixmap(fixmap, page_to_phys(page));
+ if (flags)
+ raw_spin_lock_irqsave(&patch_lock, *flags);
+ else
+ __acquire(&patch_lock);
return (void *) (__fix_to_virt(fixmap) + (uintaddr & ~PAGE_MASK));
}
-static void __kprobes patch_unmap(int fixmap)
+static void __kprobes patch_unmap(int fixmap, unsigned long *flags)
{
clear_fixmap(fixmap);
+
+ if (flags)
+ raw_spin_unlock_irqrestore(&patch_lock, *flags);
+ else
+ __release(&patch_lock);
+}
+
+void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len)
+{
+ unsigned long start = (unsigned long)addr;
+ unsigned long end = (unsigned long)addr + len;
+ unsigned long flags;
+ u32 *p, *fixmap;
+ int mapped;
+
+ /* Make sure we don't have any aliases in cache */
+ flush_kernel_vmap_range(addr, len);
+ flush_icache_range(start, end);
+
+ p = fixmap = patch_map(addr, FIX_TEXT_POKE0, &flags, &mapped);
+
+ while (len >= 4) {
+ *p++ = *insn++;
+ addr += sizeof(u32);
+ len -= sizeof(u32);
+ if (len && offset_in_page(addr) == 0) {
+ /*
+ * We're crossing a page boundary, so
+ * need to remap
+ */
+ flush_kernel_vmap_range((void *)fixmap,
+ (p-fixmap) * sizeof(*p));
+ if (mapped)
+ patch_unmap(FIX_TEXT_POKE0, &flags);
+ p = fixmap = patch_map(addr, FIX_TEXT_POKE0, &flags,
+ &mapped);
+ }
+ }
+
+ flush_kernel_vmap_range((void *)fixmap, (p-fixmap) * sizeof(*p));
+ if (mapped)
+ patch_unmap(FIX_TEXT_POKE0, &flags);
+ flush_icache_range(start, end);
}
-void __kprobes __patch_text(void *addr, unsigned int insn)
+void __kprobes __patch_text(void *addr, u32 insn)
{
- void *waddr = addr;
- int size;
-
- waddr = patch_map(addr, FIX_TEXT_POKE0);
- *(u32 *)waddr = insn;
- size = sizeof(u32);
- flush_kernel_vmap_range(waddr, size);
- patch_unmap(FIX_TEXT_POKE0);
- flush_icache_range((uintptr_t)(addr),
- (uintptr_t)(addr) + size);
+ __patch_text_multiple(addr, &insn, sizeof(insn));
}
static int __kprobes patch_text_stop_machine(void *data)
{
struct patch *patch = data;
- __patch_text(patch->addr, patch->insn);
-
+ __patch_text_multiple(patch->addr, patch->insn, patch->len);
return 0;
}
@@ -70,7 +113,20 @@ void __kprobes patch_text(void *addr, unsigned int insn)
{
struct patch patch = {
.addr = addr,
+ .insn = &insn,
+ .len = sizeof(insn),
+ };
+
+ stop_machine_cpuslocked(patch_text_stop_machine, &patch, NULL);
+}
+
+void __kprobes patch_text_multiple(void *addr, u32 *insn, unsigned int len)
+{
+
+ struct patch patch = {
+ .addr = addr,
.insn = insn,
+ .len = len
};
stop_machine_cpuslocked(patch_text_stop_machine, &patch, NULL);
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
index cd33b4feacb1..99cd24f2ea01 100644
--- a/arch/parisc/kernel/vmlinux.lds.S
+++ b/arch/parisc/kernel/vmlinux.lds.S
@@ -18,6 +18,8 @@
*(.data..vm0.pgd) \
*(.data..vm0.pte)
+#define CC_USING_PATCHABLE_FUNCTION_ENTRY
+
#include <asm-generic/vmlinux.lds.h>
/* needed for the processor specific cache alignment size */
diff --git a/arch/parisc/mm/fixmap.c b/arch/parisc/mm/fixmap.c
index c8d41b54fb19..474cd241c150 100644
--- a/arch/parisc/mm/fixmap.c
+++ b/arch/parisc/mm/fixmap.c
@@ -10,7 +10,7 @@
#include <asm/cacheflush.h>
#include <asm/fixmap.h>
-void set_fixmap(enum fixed_addresses idx, phys_addr_t phys)
+void notrace set_fixmap(enum fixed_addresses idx, phys_addr_t phys)
{
unsigned long vaddr = __fix_to_virt(idx);
pgd_t *pgd = pgd_offset_k(vaddr);
@@ -28,13 +28,16 @@ void set_fixmap(enum fixed_addresses idx, phys_addr_t phys)
flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE);
}
-void clear_fixmap(enum fixed_addresses idx)
+void notrace clear_fixmap(enum fixed_addresses idx)
{
unsigned long vaddr = __fix_to_virt(idx);
pgd_t *pgd = pgd_offset_k(vaddr);
pmd_t *pmd = pmd_offset(pgd, vaddr);
pte_t *pte = pte_offset_kernel(pmd, vaddr);
+ if (WARN_ON(pte_none(*pte)))
+ return;
+
pte_clear(&init_mm, vaddr, pte);
flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE);
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 088987e9a3ea..ca42182992a5 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -110,10 +110,17 @@
#endif
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
+#ifdef CC_USING_PATCHABLE_FUNCTION_ENTRY
+#define MCOUNT_REC() . = ALIGN(8); \
+ __start_mcount_loc = .; \
+ KEEP(*(__patchable_function_entries)) \
+ __stop_mcount_loc = .;
+#else
#define MCOUNT_REC() . = ALIGN(8); \
__start_mcount_loc = .; \
KEEP(*(__mcount_loc)) \
__stop_mcount_loc = .;
+#endif
#else
#define MCOUNT_REC()
#endif
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index 19e58b9138a0..095d55c3834d 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -112,6 +112,8 @@ struct ftrace_likely_data {
#if defined(CC_USING_HOTPATCH)
#define notrace __attribute__((hotpatch(0, 0)))
+#elif defined(CC_USING_PATCHABLE_FUNCTION_ENTRY)
+#define notrace __attribute__((patchable_function_entry(0, 0)))
#else
#define notrace __attribute__((__no_instrument_function__))
#endif