diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2018-03-14 20:23:25 +0100 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2018-03-14 20:23:25 +0100 |
commit | 745dd37f9d67c75d6cf1a1bebfcca71bdeb7a34c (patch) | |
tree | e556543ed86b7f49e4972d8ea048ff9e46592b4d /mm | |
parent | 02428742639bc3300c8c527b054d0ec0bdf5571d (diff) | |
parent | 18a955219bf7d9008ce480d4451b6b8bf4483a22 (diff) |
Merge branch 'x86/urgent' into x86/mm to pick up dependencies
Diffstat (limited to 'mm')
-rw-r--r-- | mm/gup.c | 7 | ||||
-rw-r--r-- | mm/hugetlb.c | 2 | ||||
-rw-r--r-- | mm/memblock.c | 10 | ||||
-rw-r--r-- | mm/page_alloc.c | 9 |
4 files changed, 18 insertions, 10 deletions
@@ -516,7 +516,7 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma, } if (ret & VM_FAULT_RETRY) { - if (nonblocking) + if (nonblocking && !(fault_flags & FAULT_FLAG_RETRY_NOWAIT)) *nonblocking = 0; return -EBUSY; } @@ -890,7 +890,10 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk, break; } if (*locked) { - /* VM_FAULT_RETRY didn't trigger */ + /* + * VM_FAULT_RETRY didn't trigger or it was a + * FOLL_NOWAIT. + */ if (!pages_done) pages_done = ret; break; diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 7c204e3d132b..a963f2034dfc 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1583,7 +1583,7 @@ static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask, page = NULL; } else { h->surplus_huge_pages++; - h->nr_huge_pages_node[page_to_nid(page)]++; + h->surplus_huge_pages_node[page_to_nid(page)]++; } out_unlock: diff --git a/mm/memblock.c b/mm/memblock.c index 5a9ca2a1751b..b6ba6b7adadc 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -1107,7 +1107,7 @@ unsigned long __init_memblock memblock_next_valid_pfn(unsigned long pfn, struct memblock_type *type = &memblock.memory; unsigned int right = type->cnt; unsigned int mid, left = 0; - phys_addr_t addr = PFN_PHYS(pfn + 1); + phys_addr_t addr = PFN_PHYS(++pfn); do { mid = (right + left) / 2; @@ -1118,15 +1118,15 @@ unsigned long __init_memblock memblock_next_valid_pfn(unsigned long pfn, type->regions[mid].size)) left = mid + 1; else { - /* addr is within the region, so pfn + 1 is valid */ - return min(pfn + 1, max_pfn); + /* addr is within the region, so pfn is valid */ + return pfn; } } while (left < right); if (right == type->cnt) - return max_pfn; + return -1UL; else - return min(PHYS_PFN(type->regions[right].base), max_pfn); + return PHYS_PFN(type->regions[right].base); } /** diff --git a/mm/page_alloc.c b/mm/page_alloc.c index cb416723538f..3d974cb2a1a1 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5359,9 +5359,14 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, /* * Skip to the pfn preceding the next valid one (or * end_pfn), such that we hit a valid pfn (or end_pfn) - * on our next iteration of the loop. + * on our next iteration of the loop. Note that it needs + * to be pageblock aligned even when the region itself + * is not. move_freepages_block() can shift ahead of + * the valid region but still depends on correct page + * metadata. */ - pfn = memblock_next_valid_pfn(pfn, end_pfn) - 1; + pfn = (memblock_next_valid_pfn(pfn, end_pfn) & + ~(pageblock_nr_pages-1)) - 1; #endif continue; } |