summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorBorislav Petkov <bp@suse.de>2013-06-09 12:07:32 +0200
committerH. Peter Anvin <hpa@linux.intel.com>2013-06-20 17:37:19 -0700
commit5700f743b597951743da9c7d891d3989aac0486e (patch)
treee0482b8bb6ec72bc1db19bcd853922efb97b36bc /arch
parentc3b83598c1eeb1507603b461f5843ec2a49e3033 (diff)
x86: Sanity-check static_cpu_has usage
static_cpu_has may be used only after alternatives have run. Before that it always returns false if constant folding with __builtin_constant_p() doesn't happen. And you don't want that. This patch is the result of me debugging an issue where I overzealously put static_cpu_has in code which executed before alternatives have run and had to spend some time with scratching head and cursing at the monitor. So add a jump to a warning which screams loudly when we use this function too early. The alternatives patch that check away in conjunction with patching the rest of the kernel image. [ hpa: factored this into its own configuration option. If we want to have an overarching option, it should be an option which selects other options, not as a group option in the source code. ] Signed-off-by: Borislav Petkov <bp@suse.de> Link: http://lkml.kernel.org/r/1370772454-6106-4-git-send-email-bp@alien8.de Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/Kconfig.debug10
-rw-r--r--arch/x86/include/asm/cpufeature.h30
-rw-r--r--arch/x86/kernel/cpu/common.c8
3 files changed, 46 insertions, 2 deletions
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index c198b7e13e7b..c6acdf752715 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -304,4 +304,14 @@ config DEBUG_NMI_SELFTEST
If unsure, say N.
+config X86_DEBUG_STATIC_CPU_HAS
+ bool "Debug alternatives"
+ depends on DEBUG_KERNEL
+ ---help---
+ This option causes additional code to be generated which
+ fails if static_cpu_has() is used before alternatives have
+ run.
+
+ If unsure, say N.
+
endmenu
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 043d61e66efc..252b28f1176a 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -356,15 +356,35 @@ extern const char * const x86_power_flags[32];
#endif /* CONFIG_X86_64 */
#if __GNUC__ >= 4
+extern void warn_pre_alternatives(void);
+
/*
* Static testing of CPU features. Used the same as boot_cpu_has().
* These are only valid after alternatives have run, but will statically
* patch the target code for additional performance.
- *
*/
static __always_inline __pure bool __static_cpu_has(u16 bit)
{
#if __GNUC__ > 4 || __GNUC_MINOR__ >= 5
+
+#ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
+ /*
+ * Catch too early usage of this before alternatives
+ * have run.
+ */
+ asm goto("1: jmp %l[t_warn]\n"
+ "2:\n"
+ ".section .altinstructions,\"a\"\n"
+ " .long 1b - .\n"
+ " .long 0\n" /* no replacement */
+ " .word %P0\n" /* 1: do replace */
+ " .byte 2b - 1b\n" /* source len */
+ " .byte 0\n" /* replacement len */
+ ".previous\n"
+ /* skipping size check since replacement size = 0 */
+ : : "i" (X86_FEATURE_ALWAYS) : : t_warn);
+#endif
+
asm goto("1: jmp %l[t_no]\n"
"2:\n"
".section .altinstructions,\"a\"\n"
@@ -379,7 +399,13 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
return true;
t_no:
return false;
-#else
+
+#ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
+ t_warn:
+ warn_pre_alternatives();
+ return false;
+#endif
+#else /* GCC_VERSION >= 40500 */
u8 flag;
/* Open-coded due to __stringify() in ALTERNATIVE() */
asm volatile("1: movb $0,%0\n"
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index d388ce1f3e1b..59adaa17e220 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1364,3 +1364,11 @@ void __cpuinit cpu_init(void)
fpu_init();
}
#endif
+
+#ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
+void warn_pre_alternatives(void)
+{
+ WARN(1, "You're using static_cpu_has before alternatives have run!\n");
+}
+EXPORT_SYMBOL_GPL(warn_pre_alternatives);
+#endif