summaryrefslogtreecommitdiff
path: root/arch/sparc/mm/init_64.c
diff options
context:
space:
mode:
authorNitin Gupta <nitin.m.gupta@oracle.com>2016-07-29 00:54:21 -0700
committerDavid S. Miller <davem@davemloft.net>2016-07-29 10:49:16 -0700
commit7bc3777ca19cf9ecc5533980210f29c51df7fe5e (patch)
treebaaa60bd0a6dfe46a902d7e151005b013f340e7f /arch/sparc/mm/init_64.c
parentaf1b1a9b36b8f9d583d4b4f90dd8946ed0cd4bd0 (diff)
sparc64: Trim page tables for 8M hugepages
For PMD aligned (8M) hugepages, we currently allocate all four page table levels which is wasteful. We now allocate till PMD level only which saves memory usage from page tables. Also, when freeing page table for 8M hugepage backed region, make sure we don't try to access non-existent PTE level. Orabug: 22630259 Signed-off-by: Nitin Gupta <nitin.m.gupta@oracle.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc/mm/init_64.c')
-rw-r--r--arch/sparc/mm/init_64.c6
1 files changed, 4 insertions, 2 deletions
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index aef153f9fdac..65457c9f1365 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -347,10 +347,12 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
if ((mm->context.hugetlb_pte_count || mm->context.thp_pte_count) &&
- is_hugetlb_pte(pte))
+ is_hugetlb_pte(pte)) {
+ /* We are fabricating 8MB pages using 4MB real hw pages. */
+ pte_val(pte) |= (address & (1UL << REAL_HPAGE_SHIFT));
__update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT,
address, pte_val(pte));
- else
+ } else
#endif
__update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT,
address, pte_val(pte));