summaryrefslogtreecommitdiff
path: root/arch/s390/kernel
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2014-08-15 13:01:46 +0200
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2014-09-09 08:53:28 +0200
commit10dec7dbd50ab0be96dda085d625d54ce800e426 (patch)
treea293f2b0d4cc1e68cd09770dfcd284356d3bb932 /arch/s390/kernel
parent2481a87b0250bbf429fc8cdc78331efbc44a0221 (diff)
s390/ftrace: add HAVE_DYNAMIC_FTRACE_WITH_REGS support
This code is based on a patch from Vojtech Pavlik. http://marc.info/?l=linux-s390&m=140438885114413&w=2 The actual implementation now differs significantly: Instead of adding a second function "ftrace_regs_caller" which would be nearly identical to the existing ftrace_caller function, the current ftrace_caller function is now an alias to ftrace_regs_caller and always passes the needed pt_regs structure and function_trace_op parameters unconditionally. Besides that also use asm offsets to correctly allocate and access the new struct pt_regs on the stack. While at it we can make use of new instruction to get rid of some indirect loads if compiled for new machines. The passed struct pt_regs can be changed by the called function and it's new contents will replace the current contents. Note: to change the return address the embedded psw member of the pt_regs structure must be changed. The psw member is right now incomplete, since the mask part is missing. For all current use cases this should be sufficent. Providing and restoring a sane mask would mean we need to add an epsw/lpswe pair to the mcount code. Only these two instruction would cost us ~120 cycles which currently seems not necessary. Cc: Vojtech Pavlik <vojtech@suse.cz> Cc: Jiri Kosina <jkosina@suse.cz> Cc: Jiri Slaby <jslaby@suse.cz> Cc: Steven Rostedt <rostedt@goodmis.org> Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/kernel')
-rw-r--r--arch/s390/kernel/ftrace.c7
-rw-r--r--arch/s390/kernel/mcount64.S43
2 files changed, 38 insertions, 12 deletions
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index de55efa5b64e..14b61954d5a8 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -107,6 +107,13 @@ asm(
#endif /* CONFIG_64BIT */
+#ifdef CONFIG_64BIT
+int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
+ unsigned long addr)
+{
+ return 0;
+}
+#endif
int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
unsigned long addr)
diff --git a/arch/s390/kernel/mcount64.S b/arch/s390/kernel/mcount64.S
index 5b33c83adde9..4a65dabae851 100644
--- a/arch/s390/kernel/mcount64.S
+++ b/arch/s390/kernel/mcount64.S
@@ -8,28 +8,47 @@
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/ftrace.h>
+#include <asm/ptrace.h>
.section .kprobes.text, "ax"
ENTRY(ftrace_stub)
br %r14
+#define STACK_FRAME_SIZE (STACK_FRAME_OVERHEAD + __PT_SIZE)
+#define STACK_PARENT_IP (STACK_FRAME_SIZE + 8)
+#define STACK_PTREGS (STACK_FRAME_OVERHEAD)
+#define STACK_PTREGS_GPRS (STACK_PTREGS + __PT_GPRS)
+#define STACK_PTREGS_PSW (STACK_PTREGS + __PT_PSW)
+
ENTRY(_mcount)
#ifdef CONFIG_DYNAMIC_FTRACE
br %r14
ENTRY(ftrace_caller)
+ .globl ftrace_regs_caller
+ .set ftrace_regs_caller,ftrace_caller
#endif
- stmg %r2,%r5,32(%r15)
- stg %r14,112(%r15)
lgr %r1,%r15
- aghi %r15,-160
+ aghi %r15,-STACK_FRAME_SIZE
stg %r1,__SF_BACKCHAIN(%r15)
+ stg %r1,(STACK_PTREGS_GPRS+15*8)(%r15)
+ stmg %r0,%r13,STACK_PTREGS_GPRS(%r15)
+ stg %r14,(STACK_PTREGS_PSW+8)(%r15)
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+ aghik %r2,%r14,-MCOUNT_INSN_SIZE
+ lgrl %r4,function_trace_op
+ lgrl %r14,ftrace_trace_function
+#else
lgr %r2,%r14
- lg %r3,168(%r15)
aghi %r2,-MCOUNT_INSN_SIZE
+ larl %r4,function_trace_op
+ lg %r4,0(%r4)
larl %r14,ftrace_trace_function
lg %r14,0(%r14)
+#endif
+ lg %r3,STACK_PARENT_IP(%r15)
+ la %r5,STACK_PTREGS(%r15)
basr %r14,%r14
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
# The j instruction gets runtime patched to a nop instruction.
@@ -37,16 +56,16 @@ ENTRY(ftrace_caller)
# j .+4
ENTRY(ftrace_graph_caller)
j ftrace_graph_caller_end
- lg %r2,168(%r15)
- lg %r3,272(%r15)
+ lg %r2,STACK_PARENT_IP(%r15)
+ lg %r3,(STACK_PTREGS_PSW+8)(%r15)
brasl %r14,prepare_ftrace_return
- stg %r2,168(%r15)
+ stg %r2,STACK_PARENT_IP(%r15)
ftrace_graph_caller_end:
.globl ftrace_graph_caller_end
#endif
- aghi %r15,160
- lmg %r2,%r5,32(%r15)
- lg %r14,112(%r15)
+ lmg %r0,%r13,STACK_PTREGS_GPRS(%r15)
+ lg %r14,(STACK_PTREGS_PSW+8)(%r15)
+ aghi %r15,STACK_FRAME_SIZE
br %r14
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
@@ -54,10 +73,10 @@ ftrace_graph_caller_end:
ENTRY(return_to_handler)
stmg %r2,%r5,32(%r15)
lgr %r1,%r15
- aghi %r15,-160
+ aghi %r15,-STACK_FRAME_OVERHEAD
stg %r1,__SF_BACKCHAIN(%r15)
brasl %r14,ftrace_return_to_handler
- aghi %r15,160
+ aghi %r15,STACK_FRAME_OVERHEAD
lgr %r14,%r2
lmg %r2,%r5,32(%r15)
br %r14