diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-03-09 16:42:25 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-03-09 16:42:25 -0800 |
commit | cfc79ae844d835b6eda5de199dd08c84cc4e6087 (patch) | |
tree | eb99a85c6ce9b2b7ad205bd10bac6ab65bb5cd98 | |
parent | c68a2cf07ad726c09a8724258e25ca9400fdf9ee (diff) | |
parent | ac68b1b3b9c73e652dc7ce0585672e23c5a2dca4 (diff) |
Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton:
"8 fixes"
* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
lib/test_kmod.c: fix limit check on number of test devices created
selftests/vm/run_vmtests: adjust hugetlb size according to nr_cpus
mm/page_alloc: fix memmap_init_zone pageblock alignment
mm/memblock.c: hardcode the end_pfn being -1
mm/gup.c: teach get_user_pages_unlocked to handle FOLL_NOWAIT
lib/bug.c: exclude non-BUG/WARN exceptions from report_bug()
bug: use %pB in BUG and stack protector failure
hugetlb: fix surplus pages accounting
-rw-r--r-- | kernel/panic.c | 2 | ||||
-rw-r--r-- | lib/bug.c | 4 | ||||
-rw-r--r-- | lib/test_kmod.c | 2 | ||||
-rw-r--r-- | mm/gup.c | 7 | ||||
-rw-r--r-- | mm/hugetlb.c | 2 | ||||
-rw-r--r-- | mm/memblock.c | 10 | ||||
-rw-r--r-- | mm/page_alloc.c | 9 | ||||
-rwxr-xr-x | tools/testing/selftests/vm/run_vmtests | 25 |
8 files changed, 40 insertions, 21 deletions
diff --git a/kernel/panic.c b/kernel/panic.c index 2cfef408fec9..4b794f1d8561 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -640,7 +640,7 @@ device_initcall(register_warn_debugfs); */ __visible void __stack_chk_fail(void) { - panic("stack-protector: Kernel stack is corrupted in: %p\n", + panic("stack-protector: Kernel stack is corrupted in: %pB\n", __builtin_return_address(0)); } EXPORT_SYMBOL(__stack_chk_fail); diff --git a/lib/bug.c b/lib/bug.c index c1b0fad31b10..1077366f496b 100644 --- a/lib/bug.c +++ b/lib/bug.c @@ -150,6 +150,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs) return BUG_TRAP_TYPE_NONE; bug = find_bug(bugaddr); + if (!bug) + return BUG_TRAP_TYPE_NONE; file = NULL; line = 0; @@ -191,7 +193,7 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs) if (file) pr_crit("kernel BUG at %s:%u!\n", file, line); else - pr_crit("Kernel BUG at %p [verbose debug info unavailable]\n", + pr_crit("Kernel BUG at %pB [verbose debug info unavailable]\n", (void *)bugaddr); return BUG_TRAP_TYPE_BUG; diff --git a/lib/test_kmod.c b/lib/test_kmod.c index e372b97eee13..0e5b7a61460b 100644 --- a/lib/test_kmod.c +++ b/lib/test_kmod.c @@ -1141,7 +1141,7 @@ static struct kmod_test_device *register_test_dev_kmod(void) mutex_lock(®_dev_mutex); /* int should suffice for number of devices, test for wrap */ - if (unlikely(num_test_devs + 1) < 0) { + if (num_test_devs + 1 == INT_MAX) { pr_err("reached limit of number of test devices\n"); goto out; } @@ -516,7 +516,7 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma, } if (ret & VM_FAULT_RETRY) { - if (nonblocking) + if (nonblocking && !(fault_flags & FAULT_FLAG_RETRY_NOWAIT)) *nonblocking = 0; return -EBUSY; } @@ -890,7 +890,10 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk, break; } if (*locked) { - /* VM_FAULT_RETRY didn't trigger */ + /* + * VM_FAULT_RETRY didn't trigger or it was a + * FOLL_NOWAIT. + */ if (!pages_done) pages_done = ret; break; diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 7c204e3d132b..a963f2034dfc 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1583,7 +1583,7 @@ static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask, page = NULL; } else { h->surplus_huge_pages++; - h->nr_huge_pages_node[page_to_nid(page)]++; + h->surplus_huge_pages_node[page_to_nid(page)]++; } out_unlock: diff --git a/mm/memblock.c b/mm/memblock.c index 5a9ca2a1751b..b6ba6b7adadc 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -1107,7 +1107,7 @@ unsigned long __init_memblock memblock_next_valid_pfn(unsigned long pfn, struct memblock_type *type = &memblock.memory; unsigned int right = type->cnt; unsigned int mid, left = 0; - phys_addr_t addr = PFN_PHYS(pfn + 1); + phys_addr_t addr = PFN_PHYS(++pfn); do { mid = (right + left) / 2; @@ -1118,15 +1118,15 @@ unsigned long __init_memblock memblock_next_valid_pfn(unsigned long pfn, type->regions[mid].size)) left = mid + 1; else { - /* addr is within the region, so pfn + 1 is valid */ - return min(pfn + 1, max_pfn); + /* addr is within the region, so pfn is valid */ + return pfn; } } while (left < right); if (right == type->cnt) - return max_pfn; + return -1UL; else - return min(PHYS_PFN(type->regions[right].base), max_pfn); + return PHYS_PFN(type->regions[right].base); } /** diff --git a/mm/page_alloc.c b/mm/page_alloc.c index cb416723538f..3d974cb2a1a1 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5359,9 +5359,14 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, /* * Skip to the pfn preceding the next valid one (or * end_pfn), such that we hit a valid pfn (or end_pfn) - * on our next iteration of the loop. + * on our next iteration of the loop. Note that it needs + * to be pageblock aligned even when the region itself + * is not. move_freepages_block() can shift ahead of + * the valid region but still depends on correct page + * metadata. */ - pfn = memblock_next_valid_pfn(pfn, end_pfn) - 1; + pfn = (memblock_next_valid_pfn(pfn, end_pfn) & + ~(pageblock_nr_pages-1)) - 1; #endif continue; } diff --git a/tools/testing/selftests/vm/run_vmtests b/tools/testing/selftests/vm/run_vmtests index d2561895a021..22d564673830 100755 --- a/tools/testing/selftests/vm/run_vmtests +++ b/tools/testing/selftests/vm/run_vmtests @@ -2,25 +2,33 @@ # SPDX-License-Identifier: GPL-2.0 #please run as root -#we need 256M, below is the size in kB -needmem=262144 mnt=./huge exitcode=0 -#get pagesize and freepages from /proc/meminfo +#get huge pagesize and freepages from /proc/meminfo while read name size unit; do if [ "$name" = "HugePages_Free:" ]; then freepgs=$size fi if [ "$name" = "Hugepagesize:" ]; then - pgsize=$size + hpgsize_KB=$size fi done < /proc/meminfo +# Simple hugetlbfs tests have a hardcoded minimum requirement of +# huge pages totaling 256MB (262144KB) in size. The userfaultfd +# hugetlb test requires a minimum of 2 * nr_cpus huge pages. Take +# both of these requirements into account and attempt to increase +# number of huge pages available. +nr_cpus=$(nproc) +hpgsize_MB=$((hpgsize_KB / 1024)) +half_ufd_size_MB=$((((nr_cpus * hpgsize_MB + 127) / 128) * 128)) +needmem_KB=$((half_ufd_size_MB * 2 * 1024)) + #set proper nr_hugepages -if [ -n "$freepgs" ] && [ -n "$pgsize" ]; then +if [ -n "$freepgs" ] && [ -n "$hpgsize_KB" ]; then nr_hugepgs=`cat /proc/sys/vm/nr_hugepages` - needpgs=`expr $needmem / $pgsize` + needpgs=$((needmem_KB / hpgsize_KB)) tries=2 while [ $tries -gt 0 ] && [ $freepgs -lt $needpgs ]; do lackpgs=$(( $needpgs - $freepgs )) @@ -107,8 +115,9 @@ fi echo "---------------------------" echo "running userfaultfd_hugetlb" echo "---------------------------" -# 256MB total huge pages == 128MB src and 128MB dst -./userfaultfd hugetlb 128 32 $mnt/ufd_test_file +# Test requires source and destination huge pages. Size of source +# (half_ufd_size_MB) is passed as argument to test. +./userfaultfd hugetlb $half_ufd_size_MB 32 $mnt/ufd_test_file if [ $? -ne 0 ]; then echo "[FAIL]" exitcode=1 |