summaryrefslogtreecommitdiff
path: root/arch/s390/kernel/smp.c
diff options
context:
space:
mode:
authorAlexander Gordeev <agordeev@linux.ibm.com>2021-05-06 16:26:53 +0200
committerVasily Gorbik <gor@linux.ibm.com>2021-06-07 17:07:00 +0200
commit587704efb3dea5685a2f571b75bd3dc47f73fec1 (patch)
tree5201a0c2078733d0b8ec057833c3e3112e8d5f03 /arch/s390/kernel/smp.c
parent5789284710aa121416524acc54ac0d8c27c3c2ef (diff)
s390/smp: do not preserve boot CPU lowcore on hotplug
Once the kernel is running the boot CPU lowcore becomes freeable and does not differ from the secondary CPU ones in any way. Make use of it and do not preserve the boot CPU lowcore on unplugging. That allows returning unused memory when the boot CPU is offline and makes the code more clear. Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com> Reviewed-by: Heiko Carstens <hca@linux.ibm.com> Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
Diffstat (limited to 'arch/s390/kernel/smp.c')
-rw-r--r--arch/s390/kernel/smp.c47
1 files changed, 18 insertions, 29 deletions
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 1e1d61a4d294..eaca08b1688f 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -194,20 +194,12 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
unsigned long async_stack, nodat_stack, mcck_stack;
struct lowcore *lc;
- if (pcpu != &pcpu_devices[0]) {
- pcpu->lowcore = (struct lowcore *)
- __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
- nodat_stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
- if (!pcpu->lowcore || !nodat_stack)
- goto out;
- } else {
- nodat_stack = pcpu->lowcore->nodat_stack - STACK_INIT_OFFSET;
- }
+ lc = (struct lowcore *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
+ nodat_stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
async_stack = stack_alloc();
mcck_stack = stack_alloc();
- if (!async_stack || !mcck_stack)
- goto out_stack;
- lc = pcpu->lowcore;
+ if (!lc || !nodat_stack || !async_stack || !mcck_stack)
+ goto out;
memcpy(lc, &S390_lowcore, 512);
memset((char *) lc + 512, 0, sizeof(*lc) - 512);
lc->async_stack = async_stack + STACK_INIT_OFFSET;
@@ -220,40 +212,37 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
if (nmi_alloc_per_cpu(lc))
- goto out_stack;
+ goto out;
lowcore_ptr[cpu] = lc;
+ pcpu->lowcore = lc;
pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc);
return 0;
-out_stack:
+out:
stack_free(mcck_stack);
stack_free(async_stack);
-out:
- if (pcpu != &pcpu_devices[0]) {
- free_pages(nodat_stack, THREAD_SIZE_ORDER);
- free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
- }
+ free_pages(nodat_stack, THREAD_SIZE_ORDER);
+ free_pages((unsigned long) lc, LC_ORDER);
return -ENOMEM;
}
static void pcpu_free_lowcore(struct pcpu *pcpu)
{
- unsigned long async_stack, nodat_stack, mcck_stack, lowcore;
-
- nodat_stack = pcpu->lowcore->nodat_stack - STACK_INIT_OFFSET;
- async_stack = pcpu->lowcore->async_stack - STACK_INIT_OFFSET;
- mcck_stack = pcpu->lowcore->mcck_stack - STACK_INIT_OFFSET;
- lowcore = (unsigned long) pcpu->lowcore;
+ unsigned long async_stack, nodat_stack, mcck_stack;
+ struct lowcore *lc;
+ lc = pcpu->lowcore;
+ nodat_stack = lc->nodat_stack - STACK_INIT_OFFSET;
+ async_stack = lc->async_stack - STACK_INIT_OFFSET;
+ mcck_stack = lc->mcck_stack - STACK_INIT_OFFSET;
pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0);
lowcore_ptr[pcpu - pcpu_devices] = NULL;
- nmi_free_per_cpu(pcpu->lowcore);
+ pcpu->lowcore = NULL;
+ nmi_free_per_cpu(lc);
stack_free(async_stack);
stack_free(mcck_stack);
- if (pcpu == &pcpu_devices[0])
- return;
free_pages(nodat_stack, THREAD_SIZE_ORDER);
- free_pages(lowcore, LC_ORDER);
+ free_pages((unsigned long) lc, LC_ORDER);
}
static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)