From fdbd518adfaf2c10903250dffe70c61ed90b7dd7 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Fri, 3 Feb 2017 01:58:03 -0700 Subject: x86/entry/32: Relax a pvops stub clobber specification The code at .Lrestore_nocheck does not make any assumptions on register values, so all registers can be clobbered on code paths leading there. Signed-off-by: Jan Beulich Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/5894542B02000078001366C5@prv-mh.provo.novell.com Signed-off-by: Ingo Molnar --- arch/x86/entry/entry_32.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86/entry') diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index 57f7ec35216e..5553475f6008 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -585,7 +585,7 @@ ENTRY(iret_exc ) * will soon execute iret and the tracer was already set to * the irqstate after the IRET: */ - DISABLE_INTERRUPTS(CLBR_EAX) + DISABLE_INTERRUPTS(CLBR_ANY) lss (%esp), %esp /* switch to espfix segment */ jmp .Lrestore_nocheck #endif -- cgit v1.2.3 From 2140a9942b84dd4bf559dd1215b8f43c36ece5b5 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Fri, 3 Feb 2017 02:03:25 -0700 Subject: x86/entry/64: Relax pvops stub clobber specifications Except for the error_exit case, none of the code paths following the {DIS,EN}ABLE_INTERRUPTS() invocations being modified here make any assumptions on register values, so all registers can be clobbered there. In the error_exit case a minor adjustment to register usage (at once eliminating an instruction) also allows for this to be true. Signed-off-by: Jan Beulich Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/5894556D02000078001366D3@prv-mh.provo.novell.com Signed-off-by: Ingo Molnar --- arch/x86/entry/entry_64.S | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) (limited to 'arch/x86/entry') diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 044d18ebc43c..d2b2a2948ffe 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -212,7 +212,7 @@ entry_SYSCALL_64_fastpath: * If we see that no exit work is required (which we are required * to check with IRQs off), then we can go straight to SYSRET64. */ - DISABLE_INTERRUPTS(CLBR_NONE) + DISABLE_INTERRUPTS(CLBR_ANY) TRACE_IRQS_OFF movq PER_CPU_VAR(current_task), %r11 testl $_TIF_ALLWORK_MASK, TASK_TI_flags(%r11) @@ -233,7 +233,7 @@ entry_SYSCALL_64_fastpath: * raise(3) will trigger this, for example. IRQs are off. */ TRACE_IRQS_ON - ENABLE_INTERRUPTS(CLBR_NONE) + ENABLE_INTERRUPTS(CLBR_ANY) SAVE_EXTRA_REGS movq %rsp, %rdi call syscall_return_slowpath /* returns with IRQs disabled */ @@ -343,7 +343,7 @@ ENTRY(stub_ptregs_64) * Called from fast path -- disable IRQs again, pop return address * and jump to slow path */ - DISABLE_INTERRUPTS(CLBR_NONE) + DISABLE_INTERRUPTS(CLBR_ANY) TRACE_IRQS_OFF popq %rax jmp entry_SYSCALL64_slow_path @@ -518,7 +518,7 @@ common_interrupt: interrupt do_IRQ /* 0(%rsp): old RSP */ ret_from_intr: - DISABLE_INTERRUPTS(CLBR_NONE) + DISABLE_INTERRUPTS(CLBR_ANY) TRACE_IRQS_OFF decl PER_CPU_VAR(irq_count) @@ -1051,7 +1051,7 @@ END(paranoid_entry) * On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) */ ENTRY(paranoid_exit) - DISABLE_INTERRUPTS(CLBR_NONE) + DISABLE_INTERRUPTS(CLBR_ANY) TRACE_IRQS_OFF_DEBUG testl %ebx, %ebx /* swapgs needed? */ jnz paranoid_exit_no_swapgs @@ -1156,10 +1156,9 @@ END(error_entry) * 0: user gsbase is loaded, we need SWAPGS and standard preparation for return to usermode */ ENTRY(error_exit) - movl %ebx, %eax - DISABLE_INTERRUPTS(CLBR_NONE) + DISABLE_INTERRUPTS(CLBR_ANY) TRACE_IRQS_OFF - testl %eax, %eax + testl %ebx, %ebx jnz retint_kernel jmp retint_user END(error_exit) -- cgit v1.2.3 From 3d82c59c6e3cb168284d9b0a1143415d9c98ae40 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Thu, 23 Mar 2017 10:33:49 -0400 Subject: x86/ftrace: Move the ftrace specific code out of entry_32.S The function tracing hook code for ftrace is not an entry point from userspace and does not belong in the entry_*.S files. It has already been moved out of entry_64.S. Move it out of entry_32.S into its own ftrace_32.S file. Signed-off-by: Steven Rostedt (VMware) Reviewed-by: Ingo Molnar Cc: Peter Zijlstra Cc: Andy Lutomirski Cc: Masami Hiramatsu Cc: Josh Poimboeuf Cc: Andrew Morton Cc: Linus Torvalds Link: http://lkml.kernel.org/r/20170323143445.645218946@goodmis.org Signed-off-by: Thomas Gleixner --- arch/x86/entry/entry_32.S | 169 ---------------------------------------------- 1 file changed, 169 deletions(-) (limited to 'arch/x86/entry') diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index 5553475f6008..50bc26949e9e 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -35,16 +35,13 @@ #include #include #include -#include #include #include -#include #include #include #include #include #include -#include #include .section .entry.text, "ax" @@ -886,172 +883,6 @@ BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR, #endif /* CONFIG_HYPERV */ -#ifdef CONFIG_FUNCTION_TRACER -#ifdef CONFIG_DYNAMIC_FTRACE - -ENTRY(mcount) - ret -END(mcount) - -ENTRY(ftrace_caller) - pushl %eax - pushl %ecx - pushl %edx - pushl $0 /* Pass NULL as regs pointer */ - movl 4*4(%esp), %eax - movl 0x4(%ebp), %edx - movl function_trace_op, %ecx - subl $MCOUNT_INSN_SIZE, %eax - -.globl ftrace_call -ftrace_call: - call ftrace_stub - - addl $4, %esp /* skip NULL pointer */ - popl %edx - popl %ecx - popl %eax -.Lftrace_ret: -#ifdef CONFIG_FUNCTION_GRAPH_TRACER -.globl ftrace_graph_call -ftrace_graph_call: - jmp ftrace_stub -#endif - -/* This is weak to keep gas from relaxing the jumps */ -WEAK(ftrace_stub) - ret -END(ftrace_caller) - -ENTRY(ftrace_regs_caller) - pushf /* push flags before compare (in cs location) */ - - /* - * i386 does not save SS and ESP when coming from kernel. - * Instead, to get sp, ®s->sp is used (see ptrace.h). - * Unfortunately, that means eflags must be at the same location - * as the current return ip is. We move the return ip into the - * ip location, and move flags into the return ip location. - */ - pushl 4(%esp) /* save return ip into ip slot */ - - pushl $0 /* Load 0 into orig_ax */ - pushl %gs - pushl %fs - pushl %es - pushl %ds - pushl %eax - pushl %ebp - pushl %edi - pushl %esi - pushl %edx - pushl %ecx - pushl %ebx - - movl 13*4(%esp), %eax /* Get the saved flags */ - movl %eax, 14*4(%esp) /* Move saved flags into regs->flags location */ - /* clobbering return ip */ - movl $__KERNEL_CS, 13*4(%esp) - - movl 12*4(%esp), %eax /* Load ip (1st parameter) */ - subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */ - movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */ - movl function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */ - pushl %esp /* Save pt_regs as 4th parameter */ - -GLOBAL(ftrace_regs_call) - call ftrace_stub - - addl $4, %esp /* Skip pt_regs */ - movl 14*4(%esp), %eax /* Move flags back into cs */ - movl %eax, 13*4(%esp) /* Needed to keep addl from modifying flags */ - movl 12*4(%esp), %eax /* Get return ip from regs->ip */ - movl %eax, 14*4(%esp) /* Put return ip back for ret */ - - popl %ebx - popl %ecx - popl %edx - popl %esi - popl %edi - popl %ebp - popl %eax - popl %ds - popl %es - popl %fs - popl %gs - addl $8, %esp /* Skip orig_ax and ip */ - popf /* Pop flags at end (no addl to corrupt flags) */ - jmp .Lftrace_ret - - popf - jmp ftrace_stub -#else /* ! CONFIG_DYNAMIC_FTRACE */ - -ENTRY(mcount) - cmpl $__PAGE_OFFSET, %esp - jb ftrace_stub /* Paging not enabled yet? */ - - cmpl $ftrace_stub, ftrace_trace_function - jnz .Ltrace -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - cmpl $ftrace_stub, ftrace_graph_return - jnz ftrace_graph_caller - - cmpl $ftrace_graph_entry_stub, ftrace_graph_entry - jnz ftrace_graph_caller -#endif -.globl ftrace_stub -ftrace_stub: - ret - - /* taken from glibc */ -.Ltrace: - pushl %eax - pushl %ecx - pushl %edx - movl 0xc(%esp), %eax - movl 0x4(%ebp), %edx - subl $MCOUNT_INSN_SIZE, %eax - - call *ftrace_trace_function - - popl %edx - popl %ecx - popl %eax - jmp ftrace_stub -END(mcount) -#endif /* CONFIG_DYNAMIC_FTRACE */ -EXPORT_SYMBOL(mcount) -#endif /* CONFIG_FUNCTION_TRACER */ - -#ifdef CONFIG_FUNCTION_GRAPH_TRACER -ENTRY(ftrace_graph_caller) - pushl %eax - pushl %ecx - pushl %edx - movl 0xc(%esp), %eax - lea 0x4(%ebp), %edx - movl (%ebp), %ecx - subl $MCOUNT_INSN_SIZE, %eax - call prepare_ftrace_return - popl %edx - popl %ecx - popl %eax - ret -END(ftrace_graph_caller) - -.globl return_to_handler -return_to_handler: - pushl %eax - pushl %edx - movl %ebp, %eax - call ftrace_return_to_handler - movl %eax, %ecx - popl %edx - popl %eax - jmp *%ecx -#endif - #ifdef CONFIG_TRACING ENTRY(trace_page_fault) ASM_CLAC -- cgit v1.2.3