diff options
author | Steven Rostedt <srostedt@redhat.com> | 2012-07-16 08:07:44 +0000 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2012-08-03 00:00:53 -0400 |
commit | a18db7044bd053e6e1ec7987b77d9cc7ba30c3f1 (patch) | |
tree | e2289798d650b4dbe6ab331d92ce71feba5b33b6 | |
parent | a833ce479387c74d9f542618db53cb62e4b85ca6 (diff) |
workqueue: Revert workqueue: Fix PF_THREAD_BOUND abuse
Revert commit
Author: Peter Zijlstra <a.p.zijlstra@chello.nl>
Date: Mon Oct 3 12:43:25 2011 +0200
workqueue: Fix PF_THREAD_BOUND abuse
As TREAD_BOUND no longer affects cpu down, and this code introduced
a lot of races with taking down a CPU.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r-- | kernel/workqueue.c | 29 |
1 files changed, 9 insertions, 20 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index bc867e8c5ca9..1e7f9c7805a5 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -1283,14 +1283,8 @@ __acquires(&gcwq->lock) return false; if (task_cpu(task) == gcwq->cpu && cpumask_equal(¤t->cpus_allowed, - get_cpu_mask(gcwq->cpu))) { - /* - * Since we're binding to a particular cpu and need to - * stay there for correctness, mark us PF_THREAD_BOUND. - */ - task->flags |= PF_THREAD_BOUND; + get_cpu_mask(gcwq->cpu))) return true; - } spin_unlock_irq(&gcwq->lock); /* @@ -1304,18 +1298,6 @@ __acquires(&gcwq->lock) } } -static void worker_unbind_and_unlock(struct worker *worker) -{ - struct global_cwq *gcwq = worker->gcwq; - struct task_struct *task = worker->task; - - /* - * Its no longer required we're PF_THREAD_BOUND, the work is done. - */ - task->flags &= ~PF_THREAD_BOUND; - spin_unlock_irq(&gcwq->lock); -} - static struct worker *alloc_worker(void) { struct worker *worker; @@ -1378,9 +1360,15 @@ static struct worker *create_worker(struct global_cwq *gcwq, bool bind) if (IS_ERR(worker->task)) goto fail; + /* + * A rogue worker will become a regular one if CPU comes + * online later on. Make sure every worker has + * PF_THREAD_BOUND set. + */ if (bind && !on_unbound_cpu) kthread_bind(worker->task, gcwq->cpu); else { + worker->task->flags |= PF_THREAD_BOUND; if (on_unbound_cpu) worker->flags |= WORKER_UNBOUND; } @@ -2057,7 +2045,7 @@ repeat: if (keep_working(gcwq)) wake_up_worker(gcwq); - worker_unbind_and_unlock(rescuer); + spin_unlock_irq(&gcwq->lock); } schedule(); @@ -3007,6 +2995,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt, if (IS_ERR(rescuer->task)) goto err; + rescuer->task->flags |= PF_THREAD_BOUND; wake_up_process(rescuer->task); } |