summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-12-11 18:48:45 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-11 18:48:45 -0800
commit0a27044c83fe8f52fd8fd1964d17fa7538ea0771 (patch)
treecd6df6ca2154c03249a6f64b62bca09fed1de686
parenteedb3d3304b59c64c811522f4ebaaf83124deeac (diff)
parent008847f66c38712f2819cd956969519006ebc11d (diff)
Merge branch 'for-3.19' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
Pull workqueue update from Tejun Heo: "Work items which may be involved in memory reclaim path may be executed by the rescuer under memory pressure. When a rescuer gets activated, it processes whatever are on the pending list and then goes back to sleep until the manager kicks it again which involves 100ms delay. This is problematic for self-requeueing work items or the ones running on ordered workqueues as there always is only one work item on the pending list when the rescuer kicks in. The execution of that work item produces more to execute but the rescuer won't see them until after the said 100ms has passed, so such workqueues would only execute one work item every 100ms under prolonged memory pressure, which BTW may be being prolonged due to the slow execution. Neil wrote up a patch which fixes this issue by keeping the rescuer working as long as the target workqueue is busy but doesn't have enough workers" * 'for-3.19' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq: workqueue: allow rescuer thread to do more work. workqueue: invert the order between pool->lock and wq_mayday_lock workqueue: cosmetic update in rescuer_thread()
-rw-r--r--kernel/workqueue.c30
1 files changed, 24 insertions, 6 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 09b685daee3d..6202b08f1933 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1804,8 +1804,8 @@ static void pool_mayday_timeout(unsigned long __pool)
struct worker_pool *pool = (void *)__pool;
struct work_struct *work;
- spin_lock_irq(&wq_mayday_lock); /* for wq->maydays */
- spin_lock(&pool->lock);
+ spin_lock_irq(&pool->lock);
+ spin_lock(&wq_mayday_lock); /* for wq->maydays */
if (need_to_create_worker(pool)) {
/*
@@ -1818,8 +1818,8 @@ static void pool_mayday_timeout(unsigned long __pool)
send_mayday(work);
}
- spin_unlock(&pool->lock);
- spin_unlock_irq(&wq_mayday_lock);
+ spin_unlock(&wq_mayday_lock);
+ spin_unlock_irq(&pool->lock);
mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
}
@@ -2248,12 +2248,30 @@ repeat:
* Slurp in all works issued via this workqueue and
* process'em.
*/
- WARN_ON_ONCE(!list_empty(&rescuer->scheduled));
+ WARN_ON_ONCE(!list_empty(scheduled));
list_for_each_entry_safe(work, n, &pool->worklist, entry)
if (get_work_pwq(work) == pwq)
move_linked_works(work, scheduled, &n);
- process_scheduled_works(rescuer);
+ if (!list_empty(scheduled)) {
+ process_scheduled_works(rescuer);
+
+ /*
+ * The above execution of rescued work items could
+ * have created more to rescue through
+ * pwq_activate_first_delayed() or chained
+ * queueing. Let's put @pwq back on mayday list so
+ * that such back-to-back work items, which may be
+ * being used to relieve memory pressure, don't
+ * incur MAYDAY_INTERVAL delay inbetween.
+ */
+ if (need_to_create_worker(pool)) {
+ spin_lock(&wq_mayday_lock);
+ get_pwq(pwq);
+ list_move_tail(&pwq->mayday_node, &wq->maydays);
+ spin_unlock(&wq_mayday_lock);
+ }
+ }
/*
* Put the reference grabbed by send_mayday(). @pool won't