aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2010-03-16 14:31:44 -0700
committerThomas Gleixner <tglx@linutronix.de>2010-03-16 14:36:28 -0700
commit5d2740b70e7f6ad29104aec72956fb6e4d143809 (patch)
tree3a3dfef708fbdf732eb896ca5d9f95b2633ff0b1
parentcd4ac9b7a1c80cfcd9b8e76a1f6855a0329d982f (diff)
sched: Break out from load_balancing on rq_lock contention
Signed-off-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--kernel/sched.c12
-rw-r--r--kernel/sched_fair.c14
2 files changed, 26 insertions, 0 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 1bd521b42533..8b29177fccf5 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -851,7 +851,11 @@ late_initcall(sched_init_debug);
* Number of tasks to iterate in a single balance run.
* Limited because this is done with IRQs disabled.
*/
+#ifndef CONFIG_PREEMPT
const_debug unsigned int sysctl_sched_nr_migrate = 32;
+#else
+const_debug unsigned int sysctl_sched_nr_migrate = 8;
+#endif
/*
* ratelimit for updating the group shares.
@@ -3490,6 +3494,10 @@ next:
*/
if (idle == CPU_NEWLY_IDLE)
goto out;
+
+ if (raw_spin_is_contended(&this_rq->lock) ||
+ raw_spin_is_contended(&busiest->lock))
+ goto out;
#endif
/*
@@ -3546,6 +3554,10 @@ static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
*/
if (idle == CPU_NEWLY_IDLE && this_rq->nr_running)
break;
+
+ if (raw_spin_is_contended(&this_rq->lock) ||
+ raw_spin_is_contended(&busiest->lock))
+ break;
#endif
} while (class && max_load_move > total_load_moved);
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index cff45e488188..5240469ad44a 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1915,6 +1915,20 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
rem_load_move -= moved_load;
if (rem_load_move < 0)
break;
+
+#ifdef CONFIG_PREEMPT
+ /*
+ * NEWIDLE balancing is a source of latency, so preemptible
+ * kernels will stop after the first task is pulled to minimize
+ * the critical section.
+ */
+ if (idle == CPU_NEWLY_IDLE && this_rq->nr_running)
+ break;
+
+ if (raw_spin_is_contended(&this_rq->lock) ||
+ raw_spin_is_contended(&busiest->lock))
+ break;
+#endif
}
rcu_read_unlock();