diff options
author | Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> | 2020-07-13 20:37:48 +0530 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2020-07-29 21:09:37 +1000 |
commit | ef26b76d1af61b90eb0dd3da58ad4f97d8e028f8 (patch) | |
tree | 03244033b205138afd43756ddc8b727110bec62b | |
parent | 81a413259a224f0d1783c41a74f18864d4f3d67e (diff) |
powerpc/hugetlb/cma: Allocate gigantic hugetlb pages using CMA
commit: cf11e85fc08c ("mm: hugetlb: optionally allocate gigantic hugepages using cma")
added support for allocating gigantic hugepages using CMA. This patch
enables the same for powerpc
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200713150749.25245-1-aneesh.kumar@linux.ibm.com
-rw-r--r-- | arch/powerpc/include/asm/hugetlb.h | 7 | ||||
-rw-r--r-- | arch/powerpc/kernel/setup-common.c | 3 | ||||
-rw-r--r-- | arch/powerpc/mm/hugetlbpage.c | 18 |
3 files changed, 28 insertions, 0 deletions
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h index 551a9d4d3958..013165e62618 100644 --- a/arch/powerpc/include/asm/hugetlb.h +++ b/arch/powerpc/include/asm/hugetlb.h @@ -57,6 +57,7 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t pte, int dirty); +void gigantic_hugetlb_cma_reserve(void) __init; #include <asm-generic/hugetlb.h> #else /* ! CONFIG_HUGETLB_PAGE */ @@ -71,6 +72,12 @@ static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr, { return NULL; } + + +static inline void __init gigantic_hugetlb_cma_reserve(void) +{ +} + #endif /* CONFIG_HUGETLB_PAGE */ #endif /* _ASM_POWERPC_HUGETLB_H */ diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 9d3faac53295..b198b0ff25bc 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -928,6 +928,9 @@ void __init setup_arch(char **cmdline_p) /* Reserve large chunks of memory for use by CMA for KVM. */ kvm_cma_reserve(); + /* Reserve large chunks of memory for us by CMA for hugetlb */ + gigantic_hugetlb_cma_reserve(); + klp_init_thread_info(&init_task); init_mm.start_code = (unsigned long)_stext; diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index e9bfbccd975d..26292544630f 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -684,3 +684,21 @@ void flush_dcache_icache_hugepage(struct page *page) } } } + +void __init gigantic_hugetlb_cma_reserve(void) +{ + unsigned long order = 0; + + if (radix_enabled()) + order = PUD_SHIFT - PAGE_SHIFT; + else if (!firmware_has_feature(FW_FEATURE_LPAR) && mmu_psize_defs[MMU_PAGE_16G].shift) + /* + * For pseries we do use ibm,expected#pages for reserving 16G pages. + */ + order = mmu_psize_to_shift(MMU_PAGE_16G) - PAGE_SHIFT; + + if (order) { + VM_WARN_ON(order < MAX_ORDER); + hugetlb_cma_reserve(order); + } +} |