diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2021-06-28 13:10:25 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-06-28 13:10:25 -0700 |
commit | 8e4d7a78f08a788a839bd88a2710ba7a71a86e24 (patch) | |
tree | e577d7c6847c365bb12045d7a723ada238cfb2a2 /arch/x86/kernel | |
parent | 98e62da8b3ee9ac3faf388fd78ee982a765170a7 (diff) | |
parent | 1d3156396cf6ea0873145092f4e040374ff1d862 (diff) |
Merge tag 'x86-cleanups-2021-06-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 cleanups from Ingo Molnar:
"Misc cleanups & removal of obsolete code"
* tag 'x86-cleanups-2021-06-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/sgx: Correct kernel-doc's arg name in sgx_encl_release()
doc: Remove references to IBM Calgary
x86/setup: Document that Windows reserves the first MiB
x86/crash: Remove crash_reserve_low_1M()
x86/setup: Remove CONFIG_X86_RESERVE_LOW and reservelow= options
x86/alternative: Align insn bytes vertically
x86: Fix leftover comment typos
x86/asm: Simplify __smp_mb() definition
x86/alternatives: Make the x86nops[] symbol static
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/alternative.c | 6 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/sgx/encl.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/crash.c | 13 | ||||
-rw-r--r-- | arch/x86/kernel/kprobes/core.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/setup.c | 45 |
5 files changed, 16 insertions, 52 deletions
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 6fe5b44fcbc9..e9da3dc71254 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -75,7 +75,7 @@ do { \ } \ } while (0) -const unsigned char x86nops[] = +static const unsigned char x86nops[] = { BYTES_NOP1, BYTES_NOP2, @@ -301,8 +301,8 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start, instr, instr, a->instrlen, replacement, a->replacementlen); - DUMP_BYTES(instr, a->instrlen, "%px: old_insn: ", instr); - DUMP_BYTES(replacement, a->replacementlen, "%px: rpl_insn: ", replacement); + DUMP_BYTES(instr, a->instrlen, "%px: old_insn: ", instr); + DUMP_BYTES(replacement, a->replacementlen, "%px: rpl_insn: ", replacement); memcpy(insn_buff, replacement, a->replacementlen); insn_buff_sz = a->replacementlen; diff --git a/arch/x86/kernel/cpu/sgx/encl.c b/arch/x86/kernel/cpu/sgx/encl.c index 3be203297988..001808e3901c 100644 --- a/arch/x86/kernel/cpu/sgx/encl.c +++ b/arch/x86/kernel/cpu/sgx/encl.c @@ -383,7 +383,7 @@ const struct vm_operations_struct sgx_vm_ops = { /** * sgx_encl_release - Destroy an enclave instance - * @kref: address of a kref inside &sgx_encl + * @ref: address of a kref inside &sgx_encl * * Used together with kref_put(). Frees all the resources associated with the * enclave and the instance itself. diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index 54ce999ed321..e8326a8d1c5d 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c @@ -70,19 +70,6 @@ static inline void cpu_crash_vmclear_loaded_vmcss(void) rcu_read_unlock(); } -/* - * When the crashkernel option is specified, only use the low - * 1M for the real mode trampoline. - */ -void __init crash_reserve_low_1M(void) -{ - if (cmdline_find_option(boot_command_line, "crashkernel", NULL, 0) < 0) - return; - - memblock_reserve(0, 1<<20); - pr_info("Reserving the low 1M of memory for crashkernel\n"); -} - #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC) static void kdump_nmi_callback(int cpu, struct pt_regs *regs) diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 1b3fe0edd329..c492ad3001ca 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -674,7 +674,7 @@ static int prepare_emulation(struct kprobe *p, struct insn *insn) break; if (insn->addr_bytes != sizeof(unsigned long)) - return -EOPNOTSUPP; /* Don't support differnt size */ + return -EOPNOTSUPP; /* Don't support different size */ if (X86_MODRM_MOD(opcode) != 3) return -EOPNOTSUPP; /* TODO: support memory addressing */ diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 1e720626069a..85acd22f8022 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -695,30 +695,6 @@ static void __init e820_add_kernel_range(void) e820__range_add(start, size, E820_TYPE_RAM); } -static unsigned reserve_low = CONFIG_X86_RESERVE_LOW << 10; - -static int __init parse_reservelow(char *p) -{ - unsigned long long size; - - if (!p) - return -EINVAL; - - size = memparse(p, &p); - - if (size < 4096) - size = 4096; - - if (size > 640*1024) - size = 640*1024; - - reserve_low = size; - - return 0; -} - -early_param("reservelow", parse_reservelow); - static void __init early_reserve_memory(void) { /* @@ -1084,17 +1060,18 @@ void __init setup_arch(char **cmdline_p) #endif /* - * Find free memory for the real mode trampoline and place it - * there. - * If there is not enough free memory under 1M, on EFI-enabled - * systems there will be additional attempt to reclaim the memory - * for the real mode trampoline at efi_free_boot_services(). + * Find free memory for the real mode trampoline and place it there. If + * there is not enough free memory under 1M, on EFI-enabled systems + * there will be additional attempt to reclaim the memory for the real + * mode trampoline at efi_free_boot_services(). + * + * Unconditionally reserve the entire first 1M of RAM because BIOSes + * are known to corrupt low memory and several hundred kilobytes are not + * worth complex detection what memory gets clobbered. Windows does the + * same thing for very similar reasons. * - * Unconditionally reserve the entire first 1M of RAM because - * BIOSes are know to corrupt low memory and several - * hundred kilobytes are not worth complex detection what memory gets - * clobbered. Moreover, on machines with SandyBridge graphics or in - * setups that use crashkernel the entire 1M is reserved anyway. + * Moreover, on machines with SandyBridge graphics or in setups that use + * crashkernel the entire 1M is reserved anyway. */ reserve_real_mode(); |