summaryrefslogtreecommitdiff
path: root/arch/m68k/include/asm/motorola_pgalloc.h
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2020-01-31 13:45:38 +0100
committerGeert Uytterhoeven <geert@linux-m68k.org>2020-02-10 10:57:48 +0100
commit61c64a25ae8df45c2cd2f76343e20c3d266382ea (patch)
tree5429aa4d674960c388cf23d3be026bda006d157a /arch/m68k/include/asm/motorola_pgalloc.h
parentef9285f69f0efbc75d01cbb09fe65882effd0a25 (diff)
m68k: mm: Use table allocator for pgtables
With the new page-table layout, using full (4k) pages for (256 byte) pte-tables is immensely wastefull. Move the pte-tables over to the same allocator already used for the (512 byte) higher level tables (pgd/pmd). This reduces the pte-table waste from 15x to 2x. Due to no longer being bound to 16 consecutive tables, this might actually already be more efficient than the old code for sparse tables. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Will Deacon <will@kernel.org> Acked-by: Greg Ungerer <gerg@linux-m68k.org> Tested-by: Michael Schmitz <schmitzmic@gmail.com> Tested-by: Greg Ungerer <gerg@linux-m68k.org> Link: https://lore.kernel.org/r/20200131125403.825295149@infradead.org Signed-off-by: Geert Uytterhoeven <geert@linux-m68k.org>
Diffstat (limited to 'arch/m68k/include/asm/motorola_pgalloc.h')
-rw-r--r--arch/m68k/include/asm/motorola_pgalloc.h44
1 files changed, 9 insertions, 35 deletions
diff --git a/arch/m68k/include/asm/motorola_pgalloc.h b/arch/m68k/include/asm/motorola_pgalloc.h
index 85af0c602e9f..c15e04277be7 100644
--- a/arch/m68k/include/asm/motorola_pgalloc.h
+++ b/arch/m68k/include/asm/motorola_pgalloc.h
@@ -13,54 +13,28 @@ extern int free_pointer_table(pmd_t *);
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
- pte_t *pte;
-
- pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
- if (pte)
- mmu_page_ctor(pte);
-
- return pte;
+ return (pte_t *)get_pointer_table();
}
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
- mmu_page_dtor(pte);
- free_page((unsigned long) pte);
+ free_pointer_table((void *)pte);
}
static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
{
- struct page *page;
-
- page = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
- if(!page)
- return NULL;
- if (!pgtable_pte_page_ctor(page)) {
- __free_page(page);
- return NULL;
- }
-
- mmu_page_ctor(kmap(page));
- kunmap(page);
-
- return page;
+ return (pte_t *)get_pointer_table();
}
-static inline void pte_free(struct mm_struct *mm, pgtable_t page)
+static inline void pte_free(struct mm_struct *mm, pgtable_t pgtable)
{
- pgtable_pte_page_dtor(page);
- mmu_page_dtor(kmap(page));
- kunmap(page);
- __free_page(page);
+ free_pointer_table((void *)pgtable);
}
-static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page,
+static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pgtable,
unsigned long address)
{
- pgtable_pte_page_dtor(page);
- mmu_page_dtor(kmap(page));
- kunmap(page);
- __free_page(page);
+ free_pointer_table((void *)pgtable);
}
@@ -99,9 +73,9 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t page)
{
- pmd_set(pmd, page_address(page));
+ pmd_set(pmd, page);
}
-#define pmd_pgtable(pmd) pmd_page(pmd)
+#define pmd_pgtable(pmd) ((pgtable_t)__pmd_page(pmd))
static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
{