aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2007-10-15 17:00:07 +0200
committerIngo Molnar <mingo@elte.hu>2007-10-15 17:00:07 +0200
commit02e0431a3db554019b816936b597d618256b705d (patch)
tree2d3d9c5d95e9a2ccc248d78a3ffe950be0e77b3b /kernel
parent35a6ff5417bf94c9e19b6b55a9eb6eea14cc7be7 (diff)
sched: better min_vruntime tracking
Better min_vruntime tracking: update it every time 'curr' is updated - not just when a task is enqueued into the tree. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Mike Galbraith <efault@gmx.de> Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched_fair.c40
1 files changed, 33 insertions, 7 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index e3081fb65d6..ec445cadbb0 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -116,22 +116,28 @@ static inline struct task_struct *task_of(struct sched_entity *se)
* Scheduling class tree data structure manipulation methods:
*/
+static inline u64
+max_vruntime(u64 min_vruntime, u64 vruntime)
+{
+ if ((vruntime > min_vruntime) ||
+ (min_vruntime > (1ULL << 61) && vruntime < (1ULL << 50)))
+ min_vruntime = vruntime;
+
+ return min_vruntime;
+}
+
static inline void
set_leftmost(struct cfs_rq *cfs_rq, struct rb_node *leftmost)
{
struct sched_entity *se;
cfs_rq->rb_leftmost = leftmost;
- if (leftmost) {
+ if (leftmost)
se = rb_entry(leftmost, struct sched_entity, run_node);
- if ((se->vruntime > cfs_rq->min_vruntime) ||
- (cfs_rq->min_vruntime > (1ULL << 61) &&
- se->vruntime < (1ULL << 50)))
- cfs_rq->min_vruntime = se->vruntime;
- }
}
-s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
+static inline s64
+entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
return se->fair_key - cfs_rq->min_vruntime;
}
@@ -254,6 +260,7 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
unsigned long delta_exec)
{
unsigned long delta_exec_weighted;
+ u64 next_vruntime, min_vruntime;
schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max));
@@ -265,6 +272,25 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
&curr->load);
}
curr->vruntime += delta_exec_weighted;
+
+ /*
+ * maintain cfs_rq->min_vruntime to be a monotonic increasing
+ * value tracking the leftmost vruntime in the tree.
+ */
+ if (first_fair(cfs_rq)) {
+ next_vruntime = __pick_next_entity(cfs_rq)->vruntime;
+
+ /* min_vruntime() := !max_vruntime() */
+ min_vruntime = max_vruntime(curr->vruntime, next_vruntime);
+ if (min_vruntime == next_vruntime)
+ min_vruntime = curr->vruntime;
+ else
+ min_vruntime = next_vruntime;
+ } else
+ min_vruntime = curr->vruntime;
+
+ cfs_rq->min_vruntime =
+ max_vruntime(cfs_rq->min_vruntime, min_vruntime);
}
static void update_curr(struct cfs_rq *cfs_rq)