diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-05-31 18:10:18 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-05-31 18:10:18 -0700 |
commit | 08615d7d85e5aa02c05bf6c4dde87d940e7f85f6 (patch) | |
tree | 18906149d313d25914160aca21cedf54b3a7e818 /arch | |
parent | 9fdadb2cbaf4b482dfd6086e8bd3d2db071a1702 (diff) | |
parent | 0a4dd35c67b144d8ef9432120105f1aab9293ee9 (diff) |
Merge branch 'akpm' (Andrew's patch-bomb)
Merge misc patches from Andrew Morton:
- the "misc" tree - stuff from all over the map
- checkpatch updates
- fatfs
- kmod changes
- procfs
- cpumask
- UML
- kexec
- mqueue
- rapidio
- pidns
- some checkpoint-restore feature work. Reluctantly. Most of it
delayed a release. I'm still rather worried that we don't have a
clear roadmap to completion for this work.
* emailed from Andrew Morton <akpm@linux-foundation.org>: (78 patches)
kconfig: update compression algorithm info
c/r: prctl: add ability to set new mm_struct::exe_file
c/r: prctl: extend PR_SET_MM to set up more mm_struct entries
c/r: procfs: add arg_start/end, env_start/end and exit_code members to /proc/$pid/stat
syscalls, x86: add __NR_kcmp syscall
fs, proc: introduce /proc/<pid>/task/<tid>/children entry
sysctl: make kernel.ns_last_pid control dependent on CHECKPOINT_RESTORE
aio/vfs: cleanup of rw_copy_check_uvector() and compat_rw_copy_check_uvector()
eventfd: change int to __u64 in eventfd_signal()
fs/nls: add Apple NLS
pidns: make killed children autoreap
pidns: use task_active_pid_ns in do_notify_parent
rapidio/tsi721: add DMA engine support
rapidio: add DMA engine support for RIO data transfers
ipc/mqueue: add rbtree node caching support
tools/selftests: add mq_perf_tests
ipc/mqueue: strengthen checks on mqueue creation
ipc/mqueue: correct mq_attr_ok test
ipc/mqueue: improve performance of send/recv
selftests: add mq_open_tests
...
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm/kernel/smp.c | 8 | ||||
-rw-r--r-- | arch/blackfin/kernel/trace.c | 32 | ||||
-rw-r--r-- | arch/powerpc/mm/mmu_context_nohash.c | 11 | ||||
-rw-r--r-- | arch/sh/kernel/smp.c | 7 | ||||
-rw-r--r-- | arch/um/kernel/reboot.c | 13 | ||||
-rw-r--r-- | arch/um/kernel/trap.c | 24 | ||||
-rw-r--r-- | arch/x86/syscalls/syscall_32.tbl | 1 | ||||
-rw-r--r-- | arch/x86/syscalls/syscall_64.tbl | 2 |
8 files changed, 52 insertions, 46 deletions
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index b735521a4a54..2c7217d971db 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -109,7 +109,6 @@ static void percpu_timer_stop(void); int __cpu_disable(void) { unsigned int cpu = smp_processor_id(); - struct task_struct *p; int ret; ret = platform_cpu_disable(cpu); @@ -139,12 +138,7 @@ int __cpu_disable(void) flush_cache_all(); local_flush_tlb_all(); - read_lock(&tasklist_lock); - for_each_process(p) { - if (p->mm) - cpumask_clear_cpu(cpu, mm_cpumask(p->mm)); - } - read_unlock(&tasklist_lock); + clear_tasks_mm_cpumask(cpu); return 0; } diff --git a/arch/blackfin/kernel/trace.c b/arch/blackfin/kernel/trace.c index 44bbf2f564cb..f7f7a18abca9 100644 --- a/arch/blackfin/kernel/trace.c +++ b/arch/blackfin/kernel/trace.c @@ -10,6 +10,8 @@ #include <linux/hardirq.h> #include <linux/thread_info.h> #include <linux/mm.h> +#include <linux/oom.h> +#include <linux/sched.h> #include <linux/uaccess.h> #include <linux/module.h> #include <linux/kallsyms.h> @@ -27,8 +29,7 @@ void decode_address(char *buf, unsigned long address) { struct task_struct *p; struct mm_struct *mm; - unsigned long flags, offset; - unsigned char in_atomic = (bfin_read_IPEND() & 0x10) || in_atomic(); + unsigned long offset; struct rb_node *n; #ifdef CONFIG_KALLSYMS @@ -112,17 +113,17 @@ void decode_address(char *buf, unsigned long address) * mappings of all our processes and see if we can't be a whee * bit more specific */ - write_lock_irqsave(&tasklist_lock, flags); + read_lock(&tasklist_lock); for_each_process(p) { - mm = (in_atomic ? p->mm : get_task_mm(p)); - if (!mm) - continue; + struct task_struct *t; - if (!down_read_trylock(&mm->mmap_sem)) { - if (!in_atomic) - mmput(mm); + t = find_lock_task_mm(p); + if (!t) continue; - } + + mm = t->mm; + if (!down_read_trylock(&mm->mmap_sem)) + goto __continue; for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) { struct vm_area_struct *vma; @@ -131,7 +132,7 @@ void decode_address(char *buf, unsigned long address) if (address >= vma->vm_start && address < vma->vm_end) { char _tmpbuf[256]; - char *name = p->comm; + char *name = t->comm; struct file *file = vma->vm_file; if (file) { @@ -164,8 +165,7 @@ void decode_address(char *buf, unsigned long address) name, vma->vm_start, vma->vm_end); up_read(&mm->mmap_sem); - if (!in_atomic) - mmput(mm); + task_unlock(t); if (buf[0] == '\0') sprintf(buf, "[ %s ] dynamic memory", name); @@ -175,8 +175,8 @@ void decode_address(char *buf, unsigned long address) } up_read(&mm->mmap_sem); - if (!in_atomic) - mmput(mm); +__continue: + task_unlock(t); } /* @@ -186,7 +186,7 @@ void decode_address(char *buf, unsigned long address) sprintf(buf, "/* kernel dynamic memory */"); done: - write_unlock_irqrestore(&tasklist_lock, flags); + read_unlock(&tasklist_lock); } #define EXPAND_LEN ((1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN) * 256 - 1) diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c index 5b63bd3da4a9..e779642c25e5 100644 --- a/arch/powerpc/mm/mmu_context_nohash.c +++ b/arch/powerpc/mm/mmu_context_nohash.c @@ -333,9 +333,7 @@ static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned int)(long)hcpu; -#ifdef CONFIG_HOTPLUG_CPU - struct task_struct *p; -#endif + /* We don't touch CPU 0 map, it's allocated at aboot and kept * around forever */ @@ -358,12 +356,7 @@ static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self, stale_map[cpu] = NULL; /* We also clear the cpu_vm_mask bits of CPUs going away */ - read_lock(&tasklist_lock); - for_each_process(p) { - if (p->mm) - cpumask_clear_cpu(cpu, mm_cpumask(p->mm)); - } - read_unlock(&tasklist_lock); + clear_tasks_mm_cpumask(cpu); break; #endif /* CONFIG_HOTPLUG_CPU */ } diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c index b86e9ca79455..2062aa88af41 100644 --- a/arch/sh/kernel/smp.c +++ b/arch/sh/kernel/smp.c @@ -123,7 +123,6 @@ void native_play_dead(void) int __cpu_disable(void) { unsigned int cpu = smp_processor_id(); - struct task_struct *p; int ret; ret = mp_ops->cpu_disable(cpu); @@ -153,11 +152,7 @@ int __cpu_disable(void) flush_cache_all(); local_flush_tlb_all(); - read_lock(&tasklist_lock); - for_each_process(p) - if (p->mm) - cpumask_clear_cpu(cpu, mm_cpumask(p->mm)); - read_unlock(&tasklist_lock); + clear_tasks_mm_cpumask(cpu); return 0; } diff --git a/arch/um/kernel/reboot.c b/arch/um/kernel/reboot.c index 4d93dff6b371..3d15243ce692 100644 --- a/arch/um/kernel/reboot.c +++ b/arch/um/kernel/reboot.c @@ -4,7 +4,9 @@ */ #include "linux/sched.h" +#include "linux/spinlock.h" #include "linux/slab.h" +#include "linux/oom.h" #include "kern_util.h" #include "os.h" #include "skas.h" @@ -22,13 +24,18 @@ static void kill_off_processes(void) struct task_struct *p; int pid; + read_lock(&tasklist_lock); for_each_process(p) { - if (p->mm == NULL) - continue; + struct task_struct *t; - pid = p->mm->context.id.u.pid; + t = find_lock_task_mm(p); + if (!t) + continue; + pid = t->mm->context.id.u.pid; + task_unlock(t); os_kill_ptraced_process(pid, 1); } + read_unlock(&tasklist_lock); } } diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c index dafc94715950..3be60765c0e2 100644 --- a/arch/um/kernel/trap.c +++ b/arch/um/kernel/trap.c @@ -30,6 +30,8 @@ int handle_page_fault(unsigned long address, unsigned long ip, pmd_t *pmd; pte_t *pte; int err = -EFAULT; + unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | + (is_write ? FAULT_FLAG_WRITE : 0); *code_out = SEGV_MAPERR; @@ -40,6 +42,7 @@ int handle_page_fault(unsigned long address, unsigned long ip, if (in_atomic()) goto out_nosemaphore; +retry: down_read(&mm->mmap_sem); vma = find_vma(mm, address); if (!vma) @@ -65,7 +68,11 @@ good_area: do { int fault; - fault = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0); + fault = handle_mm_fault(mm, vma, address, flags); + + if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) + goto out_nosemaphore; + if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) { goto out_of_memory; @@ -75,10 +82,17 @@ good_area: } BUG(); } - if (fault & VM_FAULT_MAJOR) - current->maj_flt++; - else - current->min_flt++; + if (flags & FAULT_FLAG_ALLOW_RETRY) { + if (fault & VM_FAULT_MAJOR) + current->maj_flt++; + else + current->min_flt++; + if (fault & VM_FAULT_RETRY) { + flags &= ~FAULT_FLAG_ALLOW_RETRY; + + goto retry; + } + } pgd = pgd_offset(mm, address); pud = pud_offset(pgd, address); diff --git a/arch/x86/syscalls/syscall_32.tbl b/arch/x86/syscalls/syscall_32.tbl index 29f9f0554f7d..7a35a6e71d44 100644 --- a/arch/x86/syscalls/syscall_32.tbl +++ b/arch/x86/syscalls/syscall_32.tbl @@ -355,3 +355,4 @@ 346 i386 setns sys_setns 347 i386 process_vm_readv sys_process_vm_readv compat_sys_process_vm_readv 348 i386 process_vm_writev sys_process_vm_writev compat_sys_process_vm_writev +349 i386 kcmp sys_kcmp diff --git a/arch/x86/syscalls/syscall_64.tbl b/arch/x86/syscalls/syscall_64.tbl index dd29a9ea27c5..51171aeff0dc 100644 --- a/arch/x86/syscalls/syscall_64.tbl +++ b/arch/x86/syscalls/syscall_64.tbl @@ -318,6 +318,8 @@ 309 common getcpu sys_getcpu 310 64 process_vm_readv sys_process_vm_readv 311 64 process_vm_writev sys_process_vm_writev +312 64 kcmp sys_kcmp + # # x32-specific system call numbers start at 512 to avoid cache impact # for native 64-bit operation. |