aboutsummaryrefslogtreecommitdiff
path: root/kernel/sched_rt.c
diff options
context:
space:
mode:
authorDhaval Giani <dhaval@linux.vnet.ibm.com>2008-04-19 19:44:59 +0200
committerIngo Molnar <mingo@elte.hu>2008-04-19 19:44:59 +0200
commit354d60c2ff72d86627dfe2089d186824abf4bb8e (patch)
tree10cea61ce7036448ed7246820c5575df2a61bb3b /kernel/sched_rt.c
parentea736ed5d353d7a3aa1cf8ce4cf8d947bc353fb2 (diff)
sched: mix tasks and groups
This patch allows tasks and groups to exist in the same cfs_rq. With this change the CFS group scheduling follows a 1/(M+N) model from a 1/(1+N) fairness model where M tasks and N groups exist at the cfs_rq level. [a.p.zijlstra@chello.nl: rt bits and assorted fixes] Signed-off-by: Dhaval Giani <dhaval@linux.vnet.ibm.com> Signed-off-by: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r--kernel/sched_rt.c15
1 files changed, 9 insertions, 6 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 8ff824565e0..201a69382a4 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -374,11 +374,15 @@ static void update_curr_rt(struct rq *rq)
curr->se.exec_start = rq->clock;
cpuacct_charge(curr, delta_exec);
- spin_lock(&rt_rq->rt_runtime_lock);
- rt_rq->rt_time += delta_exec;
- if (sched_rt_runtime_exceeded(rt_rq))
- resched_task(curr);
- spin_unlock(&rt_rq->rt_runtime_lock);
+ for_each_sched_rt_entity(rt_se) {
+ rt_rq = rt_rq_of_se(rt_se);
+
+ spin_lock(&rt_rq->rt_runtime_lock);
+ rt_rq->rt_time += delta_exec;
+ if (sched_rt_runtime_exceeded(rt_rq))
+ resched_task(curr);
+ spin_unlock(&rt_rq->rt_runtime_lock);
+ }
}
static inline
@@ -477,7 +481,6 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
* entries, we must remove entries top - down.
*
* XXX: O(1/2 h^2) because we can only walk up, not down the chain.
- * doesn't matter much for now, as h=2 for GROUP_SCHED.
*/
static void dequeue_rt_stack(struct task_struct *p)
{