summaryrefslogtreecommitdiff
path: root/arch/x86
diff options
context:
space:
mode:
authorSteven Rostedt (VMware) <rostedt@goodmis.org>2019-05-10 12:05:46 -0400
committerSteven Rostedt (VMware) <rostedt@goodmis.org>2019-05-10 12:33:03 -0400
commit518049d9d3e25dcd7be6e3d728e86f292ad0c922 (patch)
treefdc458e59447517e471a9113d8f32f11cb4d4a8d /arch/x86
parenta15fd609ad53a631a927c6680e8fb606f42a712b (diff)
ftrace/x86_32: Remove support for non DYNAMIC_FTRACE
When DYNAMIC_FTRACE is enabled in the kernel, all the functions that can be traced by the function tracer have a "nop" placeholder at the start of the function. When function tracing is enabled, the nop is converted into a call to the tracing infrastructure where the functions get traced. This also allows for specifying specific functions to trace, and a lot of infrastructure is built on top of this. When DYNAMIC_FTRACE is not enabled, all the functions have a call to the ftrace trampoline. A check is made to see if a function pointer is the ftrace_stub or not, and if it is not, it calls the function pointer to trace the code. This adds over 10% overhead to the kernel even when tracing is disabled. When an architecture supports DYNAMIC_FTRACE there really is no reason to use the static tracing. I have kept non DYNAMIC_FTRACE available in x86 so that the generic code for non DYNAMIC_FTRACE can be tested. There is no reason to support non DYNAMIC_FTRACE for both x86_64 and x86_32. As the non DYNAMIC_FTRACE for x86_32 does not even support fentry, and we want to remove mcount completely, there's no reason to keep non DYNAMIC_FTRACE around for x86_32. Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig11
-rw-r--r--arch/x86/kernel/ftrace_32.S39
2 files changed, 11 insertions, 39 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 5ad92419be19..0544041ae3a2 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -31,6 +31,17 @@ config X86_64
select X86_DEV_DMA_OPS
select ARCH_HAS_SYSCALL_WRAPPER
+config FORCE_DYNAMIC_FTRACE
+ def_bool y
+ depends on X86_32
+ depends on FUNCTION_TRACER
+ select DYNAMIC_FTRACE
+ help
+ We keep the static function tracing (!DYNAMIC_FTRACE) around
+ in order to test the non static function tracing in the
+ generic code, as other architectures still use it. But we
+ only need to keep it around for x86_64. No need to keep it
+ for x86_32. For x86_32, force DYNAMIC_FTRACE.
#
# Arch settings
#
diff --git a/arch/x86/kernel/ftrace_32.S b/arch/x86/kernel/ftrace_32.S
index 4c8440de3355..459e6b4a19bc 100644
--- a/arch/x86/kernel/ftrace_32.S
+++ b/arch/x86/kernel/ftrace_32.S
@@ -18,8 +18,6 @@ EXPORT_SYMBOL(__fentry__)
EXPORT_SYMBOL(mcount)
#endif
-#ifdef CONFIG_DYNAMIC_FTRACE
-
/* mcount uses a frame pointer even if CONFIG_FRAME_POINTER is not set */
#if !defined(CC_USING_FENTRY) || defined(CONFIG_FRAME_POINTER)
# define USING_FRAME_POINTER
@@ -170,43 +168,6 @@ GLOBAL(ftrace_regs_call)
lea 3*4(%esp), %esp /* Skip orig_ax, ip and cs */
jmp .Lftrace_ret
-#else /* ! CONFIG_DYNAMIC_FTRACE */
-
-ENTRY(function_hook)
- cmpl $__PAGE_OFFSET, %esp
- jb ftrace_stub /* Paging not enabled yet? */
-
- cmpl $ftrace_stub, ftrace_trace_function
- jnz .Ltrace
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- cmpl $ftrace_stub, ftrace_graph_return
- jnz ftrace_graph_caller
-
- cmpl $ftrace_graph_entry_stub, ftrace_graph_entry
- jnz ftrace_graph_caller
-#endif
-.globl ftrace_stub
-ftrace_stub:
- ret
-
- /* taken from glibc */
-.Ltrace:
- pushl %eax
- pushl %ecx
- pushl %edx
- movl 0xc(%esp), %eax
- movl 0x4(%ebp), %edx
- subl $MCOUNT_INSN_SIZE, %eax
-
- movl ftrace_trace_function, %ecx
- CALL_NOSPEC %ecx
-
- popl %edx
- popl %ecx
- popl %eax
- jmp ftrace_stub
-END(function_hook)
-#endif /* CONFIG_DYNAMIC_FTRACE */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
ENTRY(ftrace_graph_caller)