diff options
author | Atish Patra <atish.patra@wdc.com> | 2020-03-17 18:11:43 -0700 |
---|---|---|
committer | Palmer Dabbelt <palmerdabbelt@google.com> | 2020-03-31 11:27:50 -0700 |
commit | cfafe260137418d0265d0df3bb18dc494af2b43e (patch) | |
tree | 50bde642415f253c2f4bf848fcb5441af43f2c79 | |
parent | db5a79460315bd12dedee5f964cd72f3a534ecb2 (diff) |
RISC-V: Add supported for ordered booting method using HSM
Currently, all harts have to jump Linux in RISC-V. This complicates the
multi-stage boot process as every transient stage also has to ensure all
harts enter to that stage and jump to Linux afterwards. It also obstructs
a clean Kexec implementation.
SBI HSM extension provides alternate solutions where only a single hart
need to boot and enter Linux. The booting hart can bring up secondary
harts one by one afterwards.
Add SBI HSM based cpu_ops that implements an ordered booting method in
RISC-V. This change is also backward compatible with older firmware not
implementing HSM extension. If a latest kernel is used with older
firmware, it will continue to use the default spinning booting method.
Signed-off-by: Atish Patra <atish.patra@wdc.com>
Reviewed-by: Anup Patel <anup@brainfault.org>
Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
-rw-r--r-- | arch/riscv/kernel/Makefile | 3 | ||||
-rw-r--r-- | arch/riscv/kernel/cpu_ops.c | 10 | ||||
-rw-r--r-- | arch/riscv/kernel/cpu_ops_sbi.c | 81 | ||||
-rw-r--r-- | arch/riscv/kernel/head.S | 26 | ||||
-rw-r--r-- | arch/riscv/kernel/smpboot.c | 2 | ||||
-rw-r--r-- | arch/riscv/kernel/traps.c | 2 |
6 files changed, 121 insertions, 3 deletions
diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile index 43d49ea36a3f..674a23cc4223 100644 --- a/arch/riscv/kernel/Makefile +++ b/arch/riscv/kernel/Makefile @@ -46,5 +46,8 @@ obj-$(CONFIG_PERF_EVENTS) += perf_event.o obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o obj-$(CONFIG_HAVE_PERF_REGS) += perf_regs.o obj-$(CONFIG_RISCV_SBI) += sbi.o +ifeq ($(CONFIG_RISCV_SBI), y) +obj-$(CONFIG_SMP) += cpu_ops_sbi.o +endif clean: diff --git a/arch/riscv/kernel/cpu_ops.c b/arch/riscv/kernel/cpu_ops.c index 62705908eee5..c4c33bf02369 100644 --- a/arch/riscv/kernel/cpu_ops.c +++ b/arch/riscv/kernel/cpu_ops.c @@ -18,6 +18,7 @@ const struct cpu_operations *cpu_ops[NR_CPUS] __ro_after_init; void *__cpu_up_stack_pointer[NR_CPUS]; void *__cpu_up_task_pointer[NR_CPUS]; +extern const struct cpu_operations cpu_ops_sbi; extern const struct cpu_operations cpu_ops_spinwait; void cpu_update_secondary_bootdata(unsigned int cpuid, @@ -34,5 +35,12 @@ void cpu_update_secondary_bootdata(unsigned int cpuid, void __init cpu_set_ops(int cpuid) { - cpu_ops[cpuid] = &cpu_ops_spinwait; +#if IS_ENABLED(CONFIG_RISCV_SBI) + if (sbi_probe_extension(SBI_EXT_HSM) > 0) { + if (!cpuid) + pr_info("SBI v0.2 HSM extension detected\n"); + cpu_ops[cpuid] = &cpu_ops_sbi; + } else +#endif + cpu_ops[cpuid] = &cpu_ops_spinwait; } diff --git a/arch/riscv/kernel/cpu_ops_sbi.c b/arch/riscv/kernel/cpu_ops_sbi.c new file mode 100644 index 000000000000..66f3cded91f5 --- /dev/null +++ b/arch/riscv/kernel/cpu_ops_sbi.c @@ -0,0 +1,81 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * HSM extension and cpu_ops implementation. + * + * Copyright (c) 2020 Western Digital Corporation or its affiliates. + */ + +#include <linux/init.h> +#include <linux/mm.h> +#include <asm/cpu_ops.h> +#include <asm/sbi.h> +#include <asm/smp.h> + +extern char secondary_start_sbi[]; +const struct cpu_operations cpu_ops_sbi; + +static int sbi_hsm_hart_start(unsigned long hartid, unsigned long saddr, + unsigned long priv) +{ + struct sbiret ret; + + ret = sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_START, + hartid, saddr, priv, 0, 0, 0); + if (ret.error) + return sbi_err_map_linux_errno(ret.error); + else + return 0; +} + +#ifdef CONFIG_HOTPLUG_CPU +static int sbi_hsm_hart_stop(void) +{ + struct sbiret ret; + + ret = sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_STOP, 0, 0, 0, 0, 0, 0); + + if (ret.error) + return sbi_err_map_linux_errno(ret.error); + else + return 0; +} + +static int sbi_hsm_hart_get_status(unsigned long hartid) +{ + struct sbiret ret; + + ret = sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_STATUS, + hartid, 0, 0, 0, 0, 0); + if (ret.error) + return sbi_err_map_linux_errno(ret.error); + else + return ret.value; +} +#endif + +static int sbi_cpu_start(unsigned int cpuid, struct task_struct *tidle) +{ + int rc; + unsigned long boot_addr = __pa_symbol(secondary_start_sbi); + int hartid = cpuid_to_hartid_map(cpuid); + + cpu_update_secondary_bootdata(cpuid, tidle); + rc = sbi_hsm_hart_start(hartid, boot_addr, 0); + + return rc; +} + +static int sbi_cpu_prepare(unsigned int cpuid) +{ + if (!cpu_ops_sbi.cpu_start) { + pr_err("cpu start method not defined for CPU [%d]\n", cpuid); + return -ENODEV; + } + return 0; +} + +const struct cpu_operations cpu_ops_sbi = { + .name = "sbi", + .cpu_prepare = sbi_cpu_prepare, + .cpu_start = sbi_cpu_start, +}; diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S index 173507395a6b..e5115d5e0b3a 100644 --- a/arch/riscv/kernel/head.S +++ b/arch/riscv/kernel/head.S @@ -99,11 +99,37 @@ relocate: ret #endif /* CONFIG_MMU */ #ifdef CONFIG_SMP + .global secondary_start_sbi +secondary_start_sbi: + /* Mask all interrupts */ + csrw CSR_IE, zero + csrw CSR_IP, zero + + /* Load the global pointer */ + .option push + .option norelax + la gp, __global_pointer$ + .option pop + + /* + * Disable FPU to detect illegal usage of + * floating point in kernel space + */ + li t0, SR_FS + csrc CSR_STATUS, t0 + /* Set trap vector to spin forever to help debug */ la a3, .Lsecondary_park csrw CSR_TVEC, a3 slli a3, a0, LGREG + la a4, __cpu_up_stack_pointer + la a5, __cpu_up_task_pointer + add a4, a3, a4 + add a5, a3, a5 + REG_L sp, (a4) + REG_L tp, (a5) + .global secondary_start_common secondary_start_common: diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c index e89396a2a1af..4e9922790f6e 100644 --- a/arch/riscv/kernel/smpboot.c +++ b/arch/riscv/kernel/smpboot.c @@ -143,7 +143,7 @@ void __init smp_cpus_done(unsigned int max_cpus) /* * C entry point for a secondary processor. */ -asmlinkage __visible void __init smp_callin(void) +asmlinkage __visible void smp_callin(void) { struct mm_struct *mm = &init_mm; diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c index a4d136355f78..23a57b92cd1d 100644 --- a/arch/riscv/kernel/traps.c +++ b/arch/riscv/kernel/traps.c @@ -148,7 +148,7 @@ int is_valid_bugaddr(unsigned long pc) } #endif /* CONFIG_GENERIC_BUG */ -void __init trap_init(void) +void trap_init(void) { /* * Set sup0 scratch register to 0, indicating to exception vector |