diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-07-10 16:58:42 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-07-10 16:58:42 -0700 |
commit | 9967468c0a109644e4a1f5b39b39bf86fe7507a7 (patch) | |
tree | 72b7764c6dd6d74ede25688545a2e0c29dde3a2b /fs | |
parent | 548aa0e3c516d906dae5edb1fc9a1ad2e490120a (diff) | |
parent | dd83c161fbcc5d8be637ab159c0de015cbff5ba4 (diff) |
Merge branch 'akpm' (patches from Andrew)
Merge more updates from Andrew Morton:
- most of the rest of MM
- KASAN updates
- lib/ updates
- checkpatch updates
- some binfmt_elf changes
- various misc bits
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (115 commits)
kernel/exit.c: avoid undefined behaviour when calling wait4()
kernel/signal.c: avoid undefined behaviour in kill_something_info
binfmt_elf: safely increment argv pointers
s390: reduce ELF_ET_DYN_BASE
powerpc: move ELF_ET_DYN_BASE to 4GB / 4MB
arm64: move ELF_ET_DYN_BASE to 4GB / 4MB
arm: move ELF_ET_DYN_BASE to 4MB
binfmt_elf: use ELF_ET_DYN_BASE only for PIE
fs, epoll: short circuit fetching events if thread has been killed
checkpatch: improve multi-line alignment test
checkpatch: improve macro reuse test
checkpatch: change format of --color argument to --color[=WHEN]
checkpatch: silence perl 5.26.0 unescaped left brace warnings
checkpatch: improve tests for multiple line function definitions
checkpatch: remove false warning for commit reference
checkpatch: fix stepping through statements with $stat and ctx_statement_block
checkpatch: [HLP]LIST_HEAD is also declaration
checkpatch: warn when a MAINTAINERS entry isn't [A-Z]:\t
checkpatch: improve the unnecessary OOM message test
lib/bsearch.c: micro-optimize pivot position calculation
...
Diffstat (limited to 'fs')
-rw-r--r-- | fs/binfmt_elf.c | 79 | ||||
-rw-r--r-- | fs/buffer.c | 43 | ||||
-rw-r--r-- | fs/dcache.c | 5 | ||||
-rw-r--r-- | fs/eventpoll.c | 10 | ||||
-rw-r--r-- | fs/hugetlbfs/inode.c | 11 | ||||
-rw-r--r-- | fs/proc/generic.c | 32 | ||||
-rw-r--r-- | fs/proc/task_mmu.c | 1 |
7 files changed, 106 insertions, 75 deletions
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 5075fd5c62c8..879ff9c7ffd0 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -163,8 +163,6 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec, unsigned long p = bprm->p; int argc = bprm->argc; int envc = bprm->envc; - elf_addr_t __user *argv; - elf_addr_t __user *envp; elf_addr_t __user *sp; elf_addr_t __user *u_platform; elf_addr_t __user *u_base_platform; @@ -304,38 +302,38 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec, /* Now, let's put argc (and argv, envp if appropriate) on the stack */ if (__put_user(argc, sp++)) return -EFAULT; - argv = sp; - envp = argv + argc + 1; - /* Populate argv and envp */ + /* Populate list of argv pointers back to argv strings. */ p = current->mm->arg_end = current->mm->arg_start; while (argc-- > 0) { size_t len; - if (__put_user((elf_addr_t)p, argv++)) + if (__put_user((elf_addr_t)p, sp++)) return -EFAULT; len = strnlen_user((void __user *)p, MAX_ARG_STRLEN); if (!len || len > MAX_ARG_STRLEN) return -EINVAL; p += len; } - if (__put_user(0, argv)) + if (__put_user(0, sp++)) return -EFAULT; - current->mm->arg_end = current->mm->env_start = p; + current->mm->arg_end = p; + + /* Populate list of envp pointers back to envp strings. */ + current->mm->env_end = current->mm->env_start = p; while (envc-- > 0) { size_t len; - if (__put_user((elf_addr_t)p, envp++)) + if (__put_user((elf_addr_t)p, sp++)) return -EFAULT; len = strnlen_user((void __user *)p, MAX_ARG_STRLEN); if (!len || len > MAX_ARG_STRLEN) return -EINVAL; p += len; } - if (__put_user(0, envp)) + if (__put_user(0, sp++)) return -EFAULT; current->mm->env_end = p; /* Put the elf_info on the stack in the right place. */ - sp = (elf_addr_t __user *)envp + 1; if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t))) return -EFAULT; return 0; @@ -927,17 +925,60 @@ static int load_elf_binary(struct linux_binprm *bprm) elf_flags = MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE; vaddr = elf_ppnt->p_vaddr; + /* + * If we are loading ET_EXEC or we have already performed + * the ET_DYN load_addr calculations, proceed normally. + */ if (loc->elf_ex.e_type == ET_EXEC || load_addr_set) { elf_flags |= MAP_FIXED; } else if (loc->elf_ex.e_type == ET_DYN) { - /* Try and get dynamic programs out of the way of the - * default mmap base, as well as whatever program they - * might try to exec. This is because the brk will - * follow the loader, and is not movable. */ - load_bias = ELF_ET_DYN_BASE - vaddr; - if (current->flags & PF_RANDOMIZE) - load_bias += arch_mmap_rnd(); - load_bias = ELF_PAGESTART(load_bias); + /* + * This logic is run once for the first LOAD Program + * Header for ET_DYN binaries to calculate the + * randomization (load_bias) for all the LOAD + * Program Headers, and to calculate the entire + * size of the ELF mapping (total_size). (Note that + * load_addr_set is set to true later once the + * initial mapping is performed.) + * + * There are effectively two types of ET_DYN + * binaries: programs (i.e. PIE: ET_DYN with INTERP) + * and loaders (ET_DYN without INTERP, since they + * _are_ the ELF interpreter). The loaders must + * be loaded away from programs since the program + * may otherwise collide with the loader (especially + * for ET_EXEC which does not have a randomized + * position). For example to handle invocations of + * "./ld.so someprog" to test out a new version of + * the loader, the subsequent program that the + * loader loads must avoid the loader itself, so + * they cannot share the same load range. Sufficient + * room for the brk must be allocated with the + * loader as well, since brk must be available with + * the loader. + * + * Therefore, programs are loaded offset from + * ELF_ET_DYN_BASE and loaders are loaded into the + * independently randomized mmap region (0 load_bias + * without MAP_FIXED). + */ + if (elf_interpreter) { + load_bias = ELF_ET_DYN_BASE; + if (current->flags & PF_RANDOMIZE) + load_bias += arch_mmap_rnd(); + elf_flags |= MAP_FIXED; + } else + load_bias = 0; + + /* + * Since load_bias is used for all subsequent loading + * calculations, we must lower it by the first vaddr + * so that the remaining calculations based on the + * ELF vaddrs will be correctly offset. The result + * is then page aligned. + */ + load_bias = ELF_PAGESTART(load_bias - vaddr); + total_size = total_mapping_size(elf_phdata, loc->elf_ex.e_phnum); if (!total_size) { diff --git a/fs/buffer.c b/fs/buffer.c index ea0e05ec2916..5715dac7821f 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -1281,44 +1281,31 @@ static inline void check_irqs_on(void) } /* - * The LRU management algorithm is dopey-but-simple. Sorry. + * Install a buffer_head into this cpu's LRU. If not already in the LRU, it is + * inserted at the front, and the buffer_head at the back if any is evicted. + * Or, if already in the LRU it is moved to the front. */ static void bh_lru_install(struct buffer_head *bh) { - struct buffer_head *evictee = NULL; + struct buffer_head *evictee = bh; + struct bh_lru *b; + int i; check_irqs_on(); bh_lru_lock(); - if (__this_cpu_read(bh_lrus.bhs[0]) != bh) { - struct buffer_head *bhs[BH_LRU_SIZE]; - int in; - int out = 0; - - get_bh(bh); - bhs[out++] = bh; - for (in = 0; in < BH_LRU_SIZE; in++) { - struct buffer_head *bh2 = - __this_cpu_read(bh_lrus.bhs[in]); - if (bh2 == bh) { - __brelse(bh2); - } else { - if (out >= BH_LRU_SIZE) { - BUG_ON(evictee != NULL); - evictee = bh2; - } else { - bhs[out++] = bh2; - } - } + b = this_cpu_ptr(&bh_lrus); + for (i = 0; i < BH_LRU_SIZE; i++) { + swap(evictee, b->bhs[i]); + if (evictee == bh) { + bh_lru_unlock(); + return; } - while (out < BH_LRU_SIZE) - bhs[out++] = NULL; - memcpy(this_cpu_ptr(&bh_lrus.bhs), bhs, sizeof(bhs)); } - bh_lru_unlock(); - if (evictee) - __brelse(evictee); + get_bh(bh); + bh_lru_unlock(); + brelse(evictee); } /* diff --git a/fs/dcache.c b/fs/dcache.c index 7ece68d0d4db..6c30be668487 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -1160,11 +1160,12 @@ void shrink_dcache_sb(struct super_block *sb) LIST_HEAD(dispose); freed = list_lru_walk(&sb->s_dentry_lru, - dentry_lru_isolate_shrink, &dispose, UINT_MAX); + dentry_lru_isolate_shrink, &dispose, 1024); this_cpu_sub(nr_dentry_unused, freed); shrink_dentry_list(&dispose); - } while (freed > 0); + cond_resched(); + } while (list_lru_count(&sb->s_dentry_lru) > 0); } EXPORT_SYMBOL(shrink_dcache_sb); diff --git a/fs/eventpoll.c b/fs/eventpoll.c index b1c8e23ddf65..a6d194831ed8 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -1748,6 +1748,16 @@ fetch_events: * to TASK_INTERRUPTIBLE before doing the checks. */ set_current_state(TASK_INTERRUPTIBLE); + /* + * Always short-circuit for fatal signals to allow + * threads to make a timely exit without the chance of + * finding more events available and fetching + * repeatedly. + */ + if (fatal_signal_pending(current)) { + res = -EINTR; + break; + } if (ep_events_available(ep) || timed_out) break; if (signal_pending(current)) { diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index d44f5456eb9b..52388611635e 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -851,6 +851,16 @@ static int hugetlbfs_migrate_page(struct address_space *mapping, return MIGRATEPAGE_SUCCESS; } +static int hugetlbfs_error_remove_page(struct address_space *mapping, + struct page *page) +{ + struct inode *inode = mapping->host; + + remove_huge_page(page); + hugetlb_fix_reserve_counts(inode); + return 0; +} + static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf) { struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb); @@ -966,6 +976,7 @@ static const struct address_space_operations hugetlbfs_aops = { .write_end = hugetlbfs_write_end, .set_page_dirty = hugetlbfs_set_page_dirty, .migratepage = hugetlbfs_migrate_page, + .error_remove_page = hugetlbfs_error_remove_page, }; diff --git a/fs/proc/generic.c b/fs/proc/generic.c index 9425c0d97262..e3cda0b5968f 100644 --- a/fs/proc/generic.c +++ b/fs/proc/generic.c @@ -180,7 +180,6 @@ static int xlate_proc_name(const char *name, struct proc_dir_entry **ret, } static DEFINE_IDA(proc_inum_ida); -static DEFINE_SPINLOCK(proc_inum_lock); /* protects the above */ #define PROC_DYNAMIC_FIRST 0xF0000000U @@ -190,37 +189,20 @@ static DEFINE_SPINLOCK(proc_inum_lock); /* protects the above */ */ int proc_alloc_inum(unsigned int *inum) { - unsigned int i; - int error; + int i; -retry: - if (!ida_pre_get(&proc_inum_ida, GFP_KERNEL)) - return -ENOMEM; + i = ida_simple_get(&proc_inum_ida, 0, UINT_MAX - PROC_DYNAMIC_FIRST + 1, + GFP_KERNEL); + if (i < 0) + return i; - spin_lock_irq(&proc_inum_lock); - error = ida_get_new(&proc_inum_ida, &i); - spin_unlock_irq(&proc_inum_lock); - if (error == -EAGAIN) - goto retry; - else if (error) - return error; - - if (i > UINT_MAX - PROC_DYNAMIC_FIRST) { - spin_lock_irq(&proc_inum_lock); - ida_remove(&proc_inum_ida, i); - spin_unlock_irq(&proc_inum_lock); - return -ENOSPC; - } - *inum = PROC_DYNAMIC_FIRST + i; + *inum = PROC_DYNAMIC_FIRST + (unsigned int)i; return 0; } void proc_free_inum(unsigned int inum) { - unsigned long flags; - spin_lock_irqsave(&proc_inum_lock, flags); - ida_remove(&proc_inum_ida, inum - PROC_DYNAMIC_FIRST); - spin_unlock_irqrestore(&proc_inum_lock, flags); + ida_simple_remove(&proc_inum_ida, inum - PROC_DYNAMIC_FIRST); } /* diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 520802da059c..b836fd61ed87 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -298,7 +298,6 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid) pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT; } - /* We don't show the stack guard page in /proc/maps */ start = vma->vm_start; end = vma->vm_end; |