diff options
author | Christophe Leroy <christophe.leroy@csgroup.eu> | 2020-09-27 09:16:29 +0000 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2020-12-04 01:01:16 +1100 |
commit | c102f07667486dc4a6ae1e3fe7aa67135cb40e3e (patch) | |
tree | e1a97fc85b33c477ce3a5519a5623b482ba4adc8 /arch/powerpc | |
parent | 526a9c4a7234cccf6d900c6e82d79356f974cbfd (diff) |
powerpc/vdso: Replace vdso_base by vdso
All other architectures but s390 use a void pointer named 'vdso'
to reference the VDSO mapping.
In a following patch, the VDSO data page will be put in front of
text, vdso_base will then not anymore point to VDSO text.
To avoid confusion between vdso_base and VDSO text, rename vdso_base
into vdso and make it a void __user *.
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/8e6cefe474aa4ceba028abb729485cd46c140990.1601197618.git.christophe.leroy@csgroup.eu
Diffstat (limited to 'arch/powerpc')
-rw-r--r-- | arch/powerpc/include/asm/book3s/32/mmu-hash.h | 2 | ||||
-rw-r--r-- | arch/powerpc/include/asm/book3s/64/mmu.h | 2 | ||||
-rw-r--r-- | arch/powerpc/include/asm/elf.h | 2 | ||||
-rw-r--r-- | arch/powerpc/include/asm/mmu_context.h | 6 | ||||
-rw-r--r-- | arch/powerpc/include/asm/nohash/32/mmu-40x.h | 2 | ||||
-rw-r--r-- | arch/powerpc/include/asm/nohash/32/mmu-44x.h | 2 | ||||
-rw-r--r-- | arch/powerpc/include/asm/nohash/32/mmu-8xx.h | 2 | ||||
-rw-r--r-- | arch/powerpc/include/asm/nohash/mmu-book3e.h | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/signal_32.c | 8 | ||||
-rw-r--r-- | arch/powerpc/kernel/signal_64.c | 4 | ||||
-rw-r--r-- | arch/powerpc/kernel/vdso.c | 8 | ||||
-rw-r--r-- | arch/powerpc/perf/callchain_32.c | 8 | ||||
-rw-r--r-- | arch/powerpc/perf/callchain_64.c | 4 |
13 files changed, 27 insertions, 25 deletions
diff --git a/arch/powerpc/include/asm/book3s/32/mmu-hash.h b/arch/powerpc/include/asm/book3s/32/mmu-hash.h index 2e277ca0170f..331187661236 100644 --- a/arch/powerpc/include/asm/book3s/32/mmu-hash.h +++ b/arch/powerpc/include/asm/book3s/32/mmu-hash.h @@ -90,7 +90,7 @@ struct hash_pte { typedef struct { unsigned long id; - unsigned long vdso_base; + void __user *vdso; } mm_context_t; void update_bats(void); diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h index e0b52940e43c..ad0837d8076d 100644 --- a/arch/powerpc/include/asm/book3s/64/mmu.h +++ b/arch/powerpc/include/asm/book3s/64/mmu.h @@ -111,7 +111,7 @@ typedef struct { struct hash_mm_context *hash_context; - unsigned long vdso_base; + void __user *vdso; /* * pagetable fragment support */ diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h index 53ed2ca40151..4ecc372c408e 100644 --- a/arch/powerpc/include/asm/elf.h +++ b/arch/powerpc/include/asm/elf.h @@ -169,7 +169,7 @@ do { \ NEW_AUX_ENT(AT_DCACHEBSIZE, dcache_bsize); \ NEW_AUX_ENT(AT_ICACHEBSIZE, icache_bsize); \ NEW_AUX_ENT(AT_UCACHEBSIZE, ucache_bsize); \ - VDSO_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso_base); \ + VDSO_AUX_ENT(AT_SYSINFO_EHDR, (unsigned long)current->mm->context.vdso);\ ARCH_DLINFO_CACHE_GEOMETRY; \ } while (0) diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h index e02aa793420b..d54358cb5be1 100644 --- a/arch/powerpc/include/asm/mmu_context.h +++ b/arch/powerpc/include/asm/mmu_context.h @@ -262,8 +262,10 @@ extern void arch_exit_mmap(struct mm_struct *mm); static inline void arch_unmap(struct mm_struct *mm, unsigned long start, unsigned long end) { - if (start <= mm->context.vdso_base && mm->context.vdso_base < end) - mm->context.vdso_base = 0; + unsigned long vdso_base = (unsigned long)mm->context.vdso; + + if (start <= vdso_base && vdso_base < end) + mm->context.vdso = NULL; } #ifdef CONFIG_PPC_MEM_KEYS diff --git a/arch/powerpc/include/asm/nohash/32/mmu-40x.h b/arch/powerpc/include/asm/nohash/32/mmu-40x.h index 74f4edb5916e..8a8f13a22cf4 100644 --- a/arch/powerpc/include/asm/nohash/32/mmu-40x.h +++ b/arch/powerpc/include/asm/nohash/32/mmu-40x.h @@ -57,7 +57,7 @@ typedef struct { unsigned int id; unsigned int active; - unsigned long vdso_base; + void __user *vdso; } mm_context_t; #endif /* !__ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/nohash/32/mmu-44x.h b/arch/powerpc/include/asm/nohash/32/mmu-44x.h index 28aa3b339c5e..2d92a39d8f2e 100644 --- a/arch/powerpc/include/asm/nohash/32/mmu-44x.h +++ b/arch/powerpc/include/asm/nohash/32/mmu-44x.h @@ -108,7 +108,7 @@ extern unsigned int tlb_44x_index; typedef struct { unsigned int id; unsigned int active; - unsigned long vdso_base; + void __user *vdso; } mm_context_t; /* patch sites */ diff --git a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h index 0bd1b144eb76..478249959baa 100644 --- a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h +++ b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h @@ -181,7 +181,7 @@ void mmu_pin_tlb(unsigned long top, bool readonly); typedef struct { unsigned int id; unsigned int active; - unsigned long vdso_base; + void __user *vdso; void *pte_frag; } mm_context_t; diff --git a/arch/powerpc/include/asm/nohash/mmu-book3e.h b/arch/powerpc/include/asm/nohash/mmu-book3e.h index b41004664312..e43a418d3ccd 100644 --- a/arch/powerpc/include/asm/nohash/mmu-book3e.h +++ b/arch/powerpc/include/asm/nohash/mmu-book3e.h @@ -238,7 +238,7 @@ extern unsigned int tlbcam_index; typedef struct { unsigned int id; unsigned int active; - unsigned long vdso_base; + void __user *vdso; } mm_context_t; /* Page size definitions, common between 32 and 64-bit diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index 123682299d4f..e45aafef4c5b 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c @@ -801,8 +801,8 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset, } /* Save user registers on the stack */ - if (vdso32_rt_sigtramp && tsk->mm->context.vdso_base) { - tramp = tsk->mm->context.vdso_base + vdso32_rt_sigtramp; + if (vdso32_rt_sigtramp && tsk->mm->context.vdso) { + tramp = (unsigned long)tsk->mm->context.vdso + vdso32_rt_sigtramp; } else { tramp = (unsigned long)mctx->mc_pad; /* Set up the sigreturn trampoline: li r0,sigret; sc */ @@ -901,8 +901,8 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset, else unsafe_save_user_regs(regs, mctx, tm_mctx, 1, failed); - if (vdso32_sigtramp && tsk->mm->context.vdso_base) { - tramp = tsk->mm->context.vdso_base + vdso32_sigtramp; + if (vdso32_sigtramp && tsk->mm->context.vdso) { + tramp = (unsigned long)tsk->mm->context.vdso + vdso32_sigtramp; } else { tramp = (unsigned long)mctx->mc_pad; /* Set up the sigreturn trampoline: li r0,sigret; sc */ diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index 7df088b9ad0f..68e850bd5ef7 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c @@ -854,8 +854,8 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, tsk->thread.fp_state.fpscr = 0; /* Set up to return from userspace. */ - if (vdso64_rt_sigtramp && tsk->mm->context.vdso_base) { - regs->nip = tsk->mm->context.vdso_base + vdso64_rt_sigtramp; + if (vdso64_rt_sigtramp && tsk->mm->context.vdso) { + regs->nip = (unsigned long)tsk->mm->context.vdso + vdso64_rt_sigtramp; } else { err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]); if (err) diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index 2b975759a04d..5214cd4909f8 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c @@ -123,7 +123,7 @@ static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struc if (new_size != text_size + PAGE_SIZE) return -EINVAL; - current->mm->context.vdso_base = new_vma->vm_start; + current->mm->context.vdso = (void __user *)new_vma->vm_start; return 0; } @@ -198,7 +198,7 @@ static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_int * install_special_mapping or the perf counter mmap tracking code * will fail to recognise it as a vDSO. */ - current->mm->context.vdso_base = vdso_base; + mm->context.vdso = (void __user *)vdso_base; /* * our vma flags don't have VM_WRITE so by default, the process isn't @@ -221,7 +221,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) struct mm_struct *mm = current->mm; int rc; - mm->context.vdso_base = 0; + mm->context.vdso = NULL; if (!vdso_ready) return 0; @@ -231,7 +231,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) rc = __arch_setup_additional_pages(bprm, uses_interp); if (rc) - mm->context.vdso_base = 0; + mm->context.vdso = NULL; mmap_write_unlock(mm); return rc; diff --git a/arch/powerpc/perf/callchain_32.c b/arch/powerpc/perf/callchain_32.c index 64e4013d8060..b32e94047fb9 100644 --- a/arch/powerpc/perf/callchain_32.c +++ b/arch/powerpc/perf/callchain_32.c @@ -59,8 +59,8 @@ static int is_sigreturn_32_address(unsigned int nip, unsigned int fp) { if (nip == fp + offsetof(struct signal_frame_32, mctx.mc_pad)) return 1; - if (vdso32_sigtramp && current->mm->context.vdso_base && - nip == current->mm->context.vdso_base + vdso32_sigtramp) + if (vdso32_sigtramp && current->mm->context.vdso && + nip == (unsigned long)current->mm->context.vdso + vdso32_sigtramp) return 1; return 0; } @@ -70,8 +70,8 @@ static int is_rt_sigreturn_32_address(unsigned int nip, unsigned int fp) if (nip == fp + offsetof(struct rt_signal_frame_32, uc.uc_mcontext.mc_pad)) return 1; - if (vdso32_rt_sigtramp && current->mm->context.vdso_base && - nip == current->mm->context.vdso_base + vdso32_rt_sigtramp) + if (vdso32_rt_sigtramp && current->mm->context.vdso && + nip == (unsigned long)current->mm->context.vdso + vdso32_rt_sigtramp) return 1; return 0; } diff --git a/arch/powerpc/perf/callchain_64.c b/arch/powerpc/perf/callchain_64.c index 0777b04a0c56..6b9c06058c33 100644 --- a/arch/powerpc/perf/callchain_64.c +++ b/arch/powerpc/perf/callchain_64.c @@ -68,8 +68,8 @@ static int is_sigreturn_64_address(unsigned long nip, unsigned long fp) { if (nip == fp + offsetof(struct signal_frame_64, tramp)) return 1; - if (vdso64_rt_sigtramp && current->mm->context.vdso_base && - nip == current->mm->context.vdso_base + vdso64_rt_sigtramp) + if (vdso64_rt_sigtramp && current->mm->context.vdso && + nip == (unsigned long)current->mm->context.vdso + vdso64_rt_sigtramp) return 1; return 0; } |