diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/cgroup/cgroup.c | 1 | ||||
-rw-r--r-- | kernel/crash_core.c | 4 | ||||
-rw-r--r-- | kernel/events/core.c | 2 | ||||
-rw-r--r-- | kernel/events/uprobes.c | 4 | ||||
-rw-r--r-- | kernel/fork.c | 1 | ||||
-rw-r--r-- | kernel/kthread.c | 19 | ||||
-rw-r--r-- | kernel/sysctl.c | 8 | ||||
-rw-r--r-- | kernel/watchdog.c | 12 |
8 files changed, 25 insertions, 26 deletions
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index 21ecc6ee6a6d..9cc8c3a686b1 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -577,6 +577,7 @@ out_unlock: rcu_read_unlock(); return css; } +EXPORT_SYMBOL_GPL(cgroup_get_e_css); static void cgroup_get_live(struct cgroup *cgrp) { diff --git a/kernel/crash_core.c b/kernel/crash_core.c index 684a6061a13a..da449c1cdca7 100644 --- a/kernel/crash_core.c +++ b/kernel/crash_core.c @@ -455,7 +455,7 @@ static int __init crash_save_vmcoreinfo_init(void) VMCOREINFO_SYMBOL(_stext); VMCOREINFO_SYMBOL(vmap_area_list); -#ifndef CONFIG_NEED_MULTIPLE_NODES +#ifndef CONFIG_NUMA VMCOREINFO_SYMBOL(mem_map); VMCOREINFO_SYMBOL(contig_page_data); #endif @@ -484,7 +484,7 @@ static int __init crash_save_vmcoreinfo_init(void) VMCOREINFO_OFFSET(page, compound_head); VMCOREINFO_OFFSET(pglist_data, node_zones); VMCOREINFO_OFFSET(pglist_data, nr_zones); -#ifdef CONFIG_FLAT_NODE_MEM_MAP +#ifdef CONFIG_FLATMEM VMCOREINFO_OFFSET(pglist_data, node_mem_map); #endif VMCOREINFO_OFFSET(pglist_data, node_start_pfn); diff --git a/kernel/events/core.c b/kernel/events/core.c index 4576413b6230..464917096e73 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -8309,8 +8309,6 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) if (vma->vm_flags & VM_DENYWRITE) flags |= MAP_DENYWRITE; - if (vma->vm_flags & VM_MAYEXEC) - flags |= MAP_EXECUTABLE; if (vma->vm_flags & VM_LOCKED) flags |= MAP_LOCKED; if (is_vm_hugetlb_page(vma)) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index a481ef696143..af24dc3febbe 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -2047,8 +2047,8 @@ static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp) struct vm_area_struct *vma; mmap_read_lock(mm); - vma = find_vma(mm, bp_vaddr); - if (vma && vma->vm_start <= bp_vaddr) { + vma = vma_lookup(mm, bp_vaddr); + if (vma) { if (valid_vma(vma, false)) { struct inode *inode = file_inode(vma->vm_file); loff_t offset = vaddr_to_offset(vma, bp_vaddr); diff --git a/kernel/fork.c b/kernel/fork.c index b4386ff6a641..bc94b2cc5995 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1035,7 +1035,6 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, mm_pgtables_bytes_init(mm); mm->map_count = 0; mm->locked_vm = 0; - atomic_set(&mm->has_pinned, 0); atomic64_set(&mm->pinned_vm, 0); memset(&mm->rss_stat, 0, sizeof(mm->rss_stat)); spin_lock_init(&mm->page_table_lock); diff --git a/kernel/kthread.c b/kernel/kthread.c index 6b0a30a944b3..5b37a8567168 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -1162,14 +1162,14 @@ static bool __kthread_cancel_work(struct kthread_work *work) * modify @dwork's timer so that it expires after @delay. If @delay is zero, * @work is guaranteed to be queued immediately. * - * Return: %true if @dwork was pending and its timer was modified, - * %false otherwise. + * Return: %false if @dwork was idle and queued, %true otherwise. * * A special case is when the work is being canceled in parallel. * It might be caused either by the real kthread_cancel_delayed_work_sync() * or yet another kthread_mod_delayed_work() call. We let the other command - * win and return %false here. The caller is supposed to synchronize these - * operations a reasonable way. + * win and return %true here. The return value can be used for reference + * counting and the number of queued works stays the same. Anyway, the caller + * is supposed to synchronize these operations a reasonable way. * * This function is safe to call from any context including IRQ handler. * See __kthread_cancel_work() and kthread_delayed_work_timer_fn() @@ -1181,13 +1181,15 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker, { struct kthread_work *work = &dwork->work; unsigned long flags; - int ret = false; + int ret; raw_spin_lock_irqsave(&worker->lock, flags); /* Do not bother with canceling when never queued. */ - if (!work->worker) + if (!work->worker) { + ret = false; goto fast_queue; + } /* Work must not be used with >1 worker, see kthread_queue_work() */ WARN_ON_ONCE(work->worker != worker); @@ -1205,8 +1207,11 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker, * be used for reference counting. */ kthread_cancel_delayed_work_timer(work, &flags); - if (work->canceling) + if (work->canceling) { + /* The number of works in the queue does not change. */ + ret = true; goto out; + } ret = __kthread_cancel_work(work); fast_queue: diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 8c8c220637ce..bade84290e24 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -2921,11 +2921,11 @@ static struct ctl_table vm_table[] = { .extra2 = &one_thousand, }, { - .procname = "percpu_pagelist_fraction", - .data = &percpu_pagelist_fraction, - .maxlen = sizeof(percpu_pagelist_fraction), + .procname = "percpu_pagelist_high_fraction", + .data = &percpu_pagelist_high_fraction, + .maxlen = sizeof(percpu_pagelist_high_fraction), .mode = 0644, - .proc_handler = percpu_pagelist_fraction_sysctl_handler, + .proc_handler = percpu_pagelist_high_fraction_sysctl_handler, .extra1 = SYSCTL_ZERO, }, { diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 92d3bcc5a5e0..ad912511a0c0 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -92,7 +92,7 @@ __setup("nmi_watchdog=", hardlockup_panic_setup); * own hardlockup detector. * * watchdog_nmi_enable/disable can be implemented to start and stop when - * softlockup watchdog threads start and stop. The arch must select the + * softlockup watchdog start and stop. The arch must select the * SOFTLOCKUP_DETECTOR Kconfig. */ int __weak watchdog_nmi_enable(unsigned int cpu) @@ -335,7 +335,7 @@ static DEFINE_PER_CPU(struct completion, softlockup_completion); static DEFINE_PER_CPU(struct cpu_stop_work, softlockup_stop_work); /* - * The watchdog thread function - touches the timestamp. + * The watchdog feed function - touches the timestamp. * * It only runs once every sample_period seconds (4 seconds by * default) to reset the softlockup timestamp. If this gets delayed @@ -558,11 +558,7 @@ static void lockup_detector_reconfigure(void) } /* - * Create the watchdog thread infrastructure and configure the detector(s). - * - * The threads are not unparked as watchdog_allowed_mask is empty. When - * the threads are successfully initialized, take the proper locks and - * unpark the threads in the watchdog_cpumask if the watchdog is enabled. + * Create the watchdog infrastructure and configure the detector(s). */ static __init void lockup_detector_setup(void) { @@ -628,7 +624,7 @@ void lockup_detector_soft_poweroff(void) #ifdef CONFIG_SYSCTL -/* Propagate any changes to the watchdog threads */ +/* Propagate any changes to the watchdog infrastructure */ static void proc_watchdog_update(void) { /* Remove impossible cpus to keep sysctl output clean. */ |