summaryrefslogtreecommitdiff
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c18
1 files changed, 2 insertions, 16 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 770c1a8128bf..794724efb733 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1649,16 +1649,8 @@ static void busy_worker_rebind_fn(struct work_struct *work)
struct worker *worker = container_of(work, struct worker, rebind_work);
struct global_cwq *gcwq = worker->pool->gcwq;
- worker_maybe_bind_and_lock(worker);
-
- /*
- * %WORKER_REBIND must be cleared even if the above binding failed;
- * otherwise, we may confuse the next CPU_UP cycle or oops / get
- * stuck by calling idle_worker_rebind() prematurely. If CPU went
- * down again inbetween, %WORKER_UNBOUND would be set, so clearing
- * %WORKER_REBIND is always safe.
- */
- worker_clr_flags(worker, WORKER_REBIND);
+ if (worker_maybe_bind_and_lock(worker))
+ worker_clr_flags(worker, WORKER_UNBOUND);
spin_unlock_irq(&gcwq->lock);
}
@@ -1721,15 +1713,9 @@ static void rebind_workers(struct global_cwq *gcwq)
/* rebind busy workers */
for_each_busy_worker(worker, i, pos, gcwq) {
- unsigned long worker_flags = worker->flags;
struct work_struct *rebind_work = &worker->rebind_work;
struct workqueue_struct *wq;
- /* morph UNBOUND to REBIND atomically */
- worker_flags &= ~WORKER_UNBOUND;
- worker_flags |= WORKER_REBIND;
- ACCESS_ONCE(worker->flags) = worker_flags;
-
if (test_and_set_bit(WORK_STRUCT_PENDING_BIT,
work_data_bits(rebind_work)))
continue;