summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChristophe Leroy <christophe.leroy@csgroup.eu>2020-10-22 06:29:36 +0000
committerMichael Ellerman <mpe@ellerman.id.au>2020-12-09 16:46:55 +1100
commitfd1b4b7f51d0d75b73eeda41ef459ea7791aaab2 (patch)
tree5d4e2f634c8cefb7c6cfd18ac91ed3a150213634
parentf265512582a047e09390b1b41384f365d7dc806f (diff)
powerpc/32s: Split and inline flush_tlb_mm() and flush_tlb_page()
flush_tlb_mm() and flush_tlb_page() handle both the MMU_FTR_HPTE_TABLE case and the other case. The non MMU_FTR_HPTE_TABLE case is trivial as it is only a call to _tlbie()/_tlbia() which is not worth a dedicated function. Make flush_tlb_mm() and flush_tlb_page() hash specific and call them from tlbflush.h based on mmu_has_feature(MMU_FTR_HPTE_TABLE). Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/11e932ded41ba6d9b251d89b7afa33cc060d3aa4.1603348103.git.christophe.leroy@csgroup.eu
-rw-r--r--arch/powerpc/include/asm/book3s/32/tlbflush.h20
-rw-r--r--arch/powerpc/mm/book3s32/tlb.c17
2 files changed, 22 insertions, 15 deletions
diff --git a/arch/powerpc/include/asm/book3s/32/tlbflush.h b/arch/powerpc/include/asm/book3s/32/tlbflush.h
index f392a619138d..542765944531 100644
--- a/arch/powerpc/include/asm/book3s/32/tlbflush.h
+++ b/arch/powerpc/include/asm/book3s/32/tlbflush.h
@@ -6,8 +6,8 @@
/*
* TLB flushing for "classic" hash-MMU 32-bit CPUs, 6xx, 7xx, 7xxx
*/
-extern void flush_tlb_mm(struct mm_struct *mm);
-extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
+void hash__flush_tlb_mm(struct mm_struct *mm);
+void hash__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
@@ -22,6 +22,22 @@ static inline void _tlbie(unsigned long address)
#endif
void _tlbia(void);
+static inline void flush_tlb_mm(struct mm_struct *mm)
+{
+ if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
+ hash__flush_tlb_mm(mm);
+ else
+ _tlbia();
+}
+
+static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
+{
+ if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
+ hash__flush_tlb_page(vma, vmaddr);
+ else
+ _tlbie(vmaddr);
+}
+
static inline void local_flush_tlb_page(struct vm_area_struct *vma,
unsigned long vmaddr)
{
diff --git a/arch/powerpc/mm/book3s32/tlb.c b/arch/powerpc/mm/book3s32/tlb.c
index ae5dbba95805..65389bfe2eb8 100644
--- a/arch/powerpc/mm/book3s32/tlb.c
+++ b/arch/powerpc/mm/book3s32/tlb.c
@@ -118,15 +118,10 @@ EXPORT_SYMBOL(flush_tlb_kernel_range);
/*
* Flush all the (user) entries for the address space described by mm.
*/
-void flush_tlb_mm(struct mm_struct *mm)
+void hash__flush_tlb_mm(struct mm_struct *mm)
{
struct vm_area_struct *mp;
- if (!mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
- _tlbia();
- return;
- }
-
/*
* It is safe to go down the mm's list of vmas when called
* from dup_mmap, holding mmap_lock. It would also be safe from
@@ -136,23 +131,19 @@ void flush_tlb_mm(struct mm_struct *mm)
for (mp = mm->mmap; mp != NULL; mp = mp->vm_next)
flush_range(mp->vm_mm, mp->vm_start, mp->vm_end);
}
-EXPORT_SYMBOL(flush_tlb_mm);
+EXPORT_SYMBOL(hash__flush_tlb_mm);
-void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
+void hash__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
{
struct mm_struct *mm;
pmd_t *pmd;
- if (!mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
- _tlbie(vmaddr);
- return;
- }
mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm;
pmd = pmd_off(mm, vmaddr);
if (!pmd_none(*pmd))
flush_hash_pages(mm->context.id, vmaddr, pmd_val(*pmd), 1);
}
-EXPORT_SYMBOL(flush_tlb_page);
+EXPORT_SYMBOL(hash__flush_tlb_page);
/*
* For each address in the range, find the pte for the address