summaryrefslogtreecommitdiff
path: root/arch/powerpc
diff options
context:
space:
mode:
authorChristophe Leroy <christophe.leroy@csgroup.eu>2020-09-27 09:16:27 +0000
committerMichael Ellerman <mpe@ellerman.id.au>2020-12-04 01:01:16 +1100
commitc1bab64360e6850ca54305d2f1902dac829c9752 (patch)
tree7c591101b593c301f6cca9099ef8eedf7a44cff4 /arch/powerpc
parentb2df3f60b452ab496adcef1b2f9c2560f6d8e8e0 (diff)
powerpc/vdso: Move to _install_special_mapping() and remove arch_vma_name()
Copied from commit 2fea7f6c98f5 ("arm64: vdso: move to _install_special_mapping and remove arch_vma_name"). Use the new _install_special_mapping() API added by commit a62c34bd2a8a ("x86, mm: Improve _install_special_mapping and fix x86 vdso naming") which obsolete install_special_mapping(). And remove arch_vma_name() as the name is handled by the new API. Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu> Signed-off-by: kernel test robot <lkp@intel.com> [mpe: Squash fix to use PTR_ERR_OR_ZERO() from lkp] Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/e7e5dfe0f93234e31051f2a610b4b07f50b0082f.1601197618.git.christophe.leroy@csgroup.eu
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/kernel/vdso.c42
1 files changed, 19 insertions, 23 deletions
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index a976c5e4a7ac..67fb4c7e504c 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -49,7 +49,6 @@
static unsigned int vdso32_pages;
static void *vdso32_kbase;
-static struct page **vdso32_pagelist;
unsigned long vdso32_sigtramp;
unsigned long vdso32_rt_sigtramp;
@@ -57,7 +56,6 @@ extern char vdso32_start, vdso32_end;
extern char vdso64_start, vdso64_end;
static void *vdso64_kbase = &vdso64_start;
static unsigned int vdso64_pages;
-static struct page **vdso64_pagelist;
#ifdef CONFIG_PPC64
unsigned long vdso64_rt_sigtramp;
#endif /* CONFIG_PPC64 */
@@ -118,6 +116,14 @@ struct lib64_elfinfo
};
+static struct vm_special_mapping vdso32_spec __ro_after_init = {
+ .name = "[vdso]",
+};
+
+static struct vm_special_mapping vdso64_spec __ro_after_init = {
+ .name = "[vdso]",
+};
+
/*
* This is called from binfmt_elf, we create the special vma for the
* vDSO and insert it into the mm struct tree
@@ -125,17 +131,17 @@ struct lib64_elfinfo
static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
struct mm_struct *mm = current->mm;
- struct page **vdso_pagelist;
+ struct vm_special_mapping *vdso_spec;
+ struct vm_area_struct *vma;
unsigned long vdso_size;
unsigned long vdso_base;
- int rc;
if (is_32bit_task()) {
- vdso_pagelist = vdso32_pagelist;
+ vdso_spec = &vdso32_spec;
vdso_size = &vdso32_end - &vdso32_start;
vdso_base = VDSO32_MBASE;
} else {
- vdso_pagelist = vdso64_pagelist;
+ vdso_spec = &vdso64_spec;
vdso_size = &vdso64_end - &vdso64_start;
/*
* On 64bit we don't have a preferred map address. This
@@ -166,7 +172,7 @@ static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_int
/*
* Put vDSO base into mm struct. We need to do this before calling
* install_special_mapping or the perf counter mmap tracking code
- * will fail to recognise it as a vDSO (since arch_vma_name fails).
+ * will fail to recognise it as a vDSO.
*/
current->mm->context.vdso_base = vdso_base;
@@ -180,11 +186,10 @@ static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_int
* It's fine to use that for setting breakpoints in the vDSO code
* pages though.
*/
- rc = install_special_mapping(mm, vdso_base, vdso_size,
- VM_READ|VM_EXEC|
- VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
- vdso_pagelist);
- return rc;
+ vma = _install_special_mapping(mm, vdso_base, vdso_size,
+ VM_READ | VM_EXEC | VM_MAYREAD |
+ VM_MAYWRITE | VM_MAYEXEC, vdso_spec);
+ return PTR_ERR_OR_ZERO(vma);
}
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
@@ -208,15 +213,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
return rc;
}
-const char *arch_vma_name(struct vm_area_struct *vma)
-{
- if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso_base)
- return "[vdso]";
- return NULL;
-}
-
-
-
#ifdef CONFIG_VDSO32
static void * __init find_section32(Elf32_Ehdr *ehdr, const char *secname,
unsigned long *size)
@@ -737,10 +733,10 @@ static int __init vdso_init(void)
}
if (IS_ENABLED(CONFIG_VDSO32))
- vdso32_pagelist = vdso_setup_pages(&vdso32_start, &vdso32_end);
+ vdso32_spec.pages = vdso_setup_pages(&vdso32_start, &vdso32_end);
if (IS_ENABLED(CONFIG_PPC64))
- vdso64_pagelist = vdso_setup_pages(&vdso64_start, &vdso64_end);
+ vdso64_spec.pages = vdso_setup_pages(&vdso64_start, &vdso64_end);
smp_wmb();
vdso_ready = 1;