aboutsummaryrefslogtreecommitdiff
path: root/kernel/sched/core.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r--kernel/sched/core.c81
1 files changed, 70 insertions, 11 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index ed9550c87f66..ed0f841d4d5c 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -523,9 +523,15 @@ static bool set_nr_if_polling(struct task_struct *p)
#endif
#endif
-void wake_q_add(struct wake_q_head *head, struct task_struct *task)
+void __wake_q_add(struct wake_q_head *head, struct task_struct *task,
+ bool sleeper)
{
- struct wake_q_node *node = &task->wake_q;
+ struct wake_q_node *node;
+
+ if (sleeper)
+ node = &task->wake_q_sleeper;
+ else
+ node = &task->wake_q;
/*
* Atomically grab the task, if ->wake_q is !nil already it means
@@ -554,11 +560,17 @@ void __wake_up_q(struct wake_q_head *head, bool sleeper)
while (node != WAKE_Q_TAIL) {
struct task_struct *task;
- task = container_of(node, struct task_struct, wake_q);
+ if (sleeper)
+ task = container_of(node, struct task_struct, wake_q_sleeper);
+ else
+ task = container_of(node, struct task_struct, wake_q);
BUG_ON(!task);
/* task can safely be re-inserted now */
node = node->next;
- task->wake_q.next = NULL;
+ if (sleeper)
+ task->wake_q_sleeper.next = NULL;
+ else
+ task->wake_q.next = NULL;
/*
* wake_up_process() implies a wmb() to pair with the queueing
@@ -1212,18 +1224,14 @@ void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_ma
p->nr_cpus_allowed = cpumask_weight(new_mask);
}
-void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
+static void __do_set_cpus_allowed_tail(struct task_struct *p,
+ const struct cpumask *new_mask)
{
struct rq *rq = task_rq(p);
bool queued, running;
lockdep_assert_held(&p->pi_lock);
- if (__migrate_disabled(p)) {
- cpumask_copy(&p->cpus_allowed, new_mask);
- return;
- }
-
queued = task_on_rq_queued(p);
running = task_current(rq, p);
@@ -1246,6 +1254,20 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
enqueue_task(rq, p, ENQUEUE_RESTORE);
}
+void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
+{
+ if (__migrate_disabled(p)) {
+ lockdep_assert_held(&p->pi_lock);
+
+ cpumask_copy(&p->cpus_allowed, new_mask);
+#if defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_SMP)
+ p->migrate_disable_update = 1;
+#endif
+ return;
+ }
+ __do_set_cpus_allowed_tail(p, new_mask);
+}
+
static DEFINE_PER_CPU(struct cpumask, sched_cpumasks);
static DEFINE_MUTEX(sched_down_mutex);
static cpumask_t sched_down_cpumask;
@@ -2212,7 +2234,7 @@ EXPORT_SYMBOL(wake_up_process);
*/
int wake_up_lock_sleeper(struct task_struct *p)
{
- return try_to_wake_up(p, TASK_ALL, WF_LOCK_SLEEPER);
+ return try_to_wake_up(p, TASK_UNINTERRUPTIBLE, WF_LOCK_SLEEPER);
}
int wake_up_state(struct task_struct *p, unsigned int state)
@@ -3231,6 +3253,43 @@ void migrate_enable(void)
*/
p->migrate_disable = 0;
+ if (p->migrate_disable_update) {
+ unsigned long flags;
+ struct rq *rq;
+
+ rq = task_rq_lock(p, &flags);
+ update_rq_clock(rq);
+
+ __do_set_cpus_allowed_tail(p, &p->cpus_allowed);
+ task_rq_unlock(rq, p, &flags);
+
+ p->migrate_disable_update = 0;
+
+ WARN_ON(smp_processor_id() != task_cpu(p));
+ if (!cpumask_test_cpu(task_cpu(p), &p->cpus_allowed)) {
+ const struct cpumask *cpu_valid_mask = cpu_active_mask;
+ struct migration_arg arg;
+ unsigned int dest_cpu;
+
+ if (p->flags & PF_KTHREAD) {
+ /*
+ * Kernel threads are allowed on online && !active CPUs
+ */
+ cpu_valid_mask = cpu_online_mask;
+ }
+ dest_cpu = cpumask_any_and(cpu_valid_mask, &p->cpus_allowed);
+ arg.task = p;
+ arg.dest_cpu = dest_cpu;
+
+ unpin_current_cpu();
+ preempt_lazy_enable();
+ preempt_enable();
+ stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
+ tlb_migrate_finish(p->mm);
+ return;
+ }
+ }
+
unpin_current_cpu();
preempt_enable();
preempt_lazy_enable();