diff options
author | Christophe Leroy <christophe.leroy@csgroup.eu> | 2020-09-27 09:16:26 +0000 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2020-12-04 01:01:16 +1100 |
commit | b2df3f60b452ab496adcef1b2f9c2560f6d8e8e0 (patch) | |
tree | 14aba1cde1c4ec6251410f13de145502abe7a249 /arch/powerpc/kernel/vdso.c | |
parent | 7461a4f79ba16dc7733c07c00883a10c7e46b602 (diff) |
powerpc/vdso: Simplify arch_setup_additional_pages() exit
To simplify arch_setup_additional_pages() exit, rename
it __arch_setup_additional_pages() and create a caller
arch_setup_additional_pages() which does the locking.
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/603c1d039d3f928ee95e547fcd2219fcf4c3b514.1601197618.git.christophe.leroy@csgroup.eu
Diffstat (limited to 'arch/powerpc/kernel/vdso.c')
-rw-r--r-- | arch/powerpc/kernel/vdso.c | 40 |
1 files changed, 21 insertions, 19 deletions
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index 448ecaa27ac5..a976c5e4a7ac 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c @@ -122,7 +122,7 @@ struct lib64_elfinfo * This is called from binfmt_elf, we create the special vma for the * vDSO and insert it into the mm struct tree */ -int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) +static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { struct mm_struct *mm = current->mm; struct page **vdso_pagelist; @@ -130,9 +130,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) unsigned long vdso_base; int rc; - if (!vdso_ready) - return 0; - if (is_32bit_task()) { vdso_pagelist = vdso32_pagelist; vdso_size = &vdso32_end - &vdso32_start; @@ -148,8 +145,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) vdso_base = 0; } - current->mm->context.vdso_base = 0; - /* Add a page to the vdso size for the data page */ vdso_size += PAGE_SIZE; @@ -159,15 +154,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) * and end up putting it elsewhere. * Add enough to the size so that the result can be aligned. */ - if (mmap_write_lock_killable(mm)) - return -EINTR; vdso_base = get_unmapped_area(NULL, vdso_base, vdso_size + ((VDSO_ALIGNMENT - 1) & PAGE_MASK), 0, 0); - if (IS_ERR_VALUE(vdso_base)) { - rc = vdso_base; - goto fail_mmapsem; - } + if (IS_ERR_VALUE(vdso_base)) + return vdso_base; /* Add required alignment. */ vdso_base = ALIGN(vdso_base, VDSO_ALIGNMENT); @@ -193,15 +184,26 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) VM_READ|VM_EXEC| VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, vdso_pagelist); - if (rc) { - current->mm->context.vdso_base = 0; - goto fail_mmapsem; - } + return rc; +} - mmap_write_unlock(mm); - return 0; +int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) +{ + struct mm_struct *mm = current->mm; + int rc; + + mm->context.vdso_base = 0; + + if (!vdso_ready) + return 0; + + if (mmap_write_lock_killable(mm)) + return -EINTR; + + rc = __arch_setup_additional_pages(bprm, uses_interp); + if (rc) + mm->context.vdso_base = 0; - fail_mmapsem: mmap_write_unlock(mm); return rc; } |