summaryrefslogtreecommitdiff
path: root/arch/sparc/mm/init_64.c
diff options
context:
space:
mode:
authorNitin Gupta <nitin.m.gupta@oracle.com>2017-02-01 16:16:36 -0800
committerDavid S. Miller <davem@davemloft.net>2017-02-23 08:30:28 -0800
commitc7d9f77d33a779ad582d8b2284ba007931ebd894 (patch)
treed6a8de21e9a42c25a883e98f96935c1d52456053 /arch/sparc/mm/init_64.c
parent0d88b86694e0b176c1b9ca10cee95863065e2471 (diff)
sparc64: Multi-page size support
Add support for using multiple hugepage sizes simultaneously on mainline. Currently, support for 256M has been added which can be used along with 8M pages. Page tables are set like this (e.g. for 256M page): VA + (8M * x) -> PA + (8M * x) (sz bit = 256M) where x in [0, 31] and TSB is set similarly: VA + (4M * x) -> PA + (4M * x) (sz bit = 256M) where x in [0, 63] - Testing Tested on Sonoma (which supports 256M pages) by running stream benchmark instances in parallel: one instance uses 8M pages and another uses 256M pages, consuming 48G each. Boot params used: default_hugepagesz=256M hugepagesz=256M hugepages=300 hugepagesz=8M hugepages=10000 Signed-off-by: Nitin Gupta <nitin.m.gupta@oracle.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc/mm/init_64.c')
-rw-r--r--arch/sparc/mm/init_64.c42
1 files changed, 41 insertions, 1 deletions
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 5d2f91511c60..7ed3975d04cc 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -324,6 +324,46 @@ static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_inde
tsb_insert(tsb, tag, tte);
}
+#ifdef CONFIG_HUGETLB_PAGE
+static int __init setup_hugepagesz(char *string)
+{
+ unsigned long long hugepage_size;
+ unsigned int hugepage_shift;
+ unsigned short hv_pgsz_idx;
+ unsigned int hv_pgsz_mask;
+ int rc = 0;
+
+ hugepage_size = memparse(string, &string);
+ hugepage_shift = ilog2(hugepage_size);
+
+ switch (hugepage_shift) {
+ case HPAGE_256MB_SHIFT:
+ hv_pgsz_mask = HV_PGSZ_MASK_256MB;
+ hv_pgsz_idx = HV_PGSZ_IDX_256MB;
+ break;
+ case HPAGE_SHIFT:
+ hv_pgsz_mask = HV_PGSZ_MASK_4MB;
+ hv_pgsz_idx = HV_PGSZ_IDX_4MB;
+ break;
+ default:
+ hv_pgsz_mask = 0;
+ }
+
+ if ((hv_pgsz_mask & cpu_pgsz_mask) == 0U) {
+ pr_warn("hugepagesz=%llu not supported by MMU.\n",
+ hugepage_size);
+ goto out;
+ }
+
+ hugetlb_add_hstate(hugepage_shift - PAGE_SHIFT);
+ rc = 1;
+
+out:
+ return rc;
+}
+__setup("hugepagesz=", setup_hugepagesz);
+#endif /* CONFIG_HUGETLB_PAGE */
+
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
{
struct mm_struct *mm;
@@ -347,7 +387,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
if ((mm->context.hugetlb_pte_count || mm->context.thp_pte_count) &&
- is_hugetlb_pte(pte)) {
+ is_hugetlb_pmd(__pmd(pte_val(pte)))) {
/* We are fabricating 8MB pages using 4MB real hw pages. */
pte_val(pte) |= (address & (1UL << REAL_HPAGE_SHIFT));
__update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT,