aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJuri Lelli <juri.lelli@arm.com>2015-07-07 19:24:24 +0100
committerVincent Guittot <vincent.guittot@linaro.org>2015-07-27 17:59:43 +0200
commitab6a4acc9b002ef292a977f0c52d3672d6451fe1 (patch)
tree70f483f03c9575355cc417df402920276acd61bb
parent1f450e1bfeff66d2084b0df3edb305b02a885120 (diff)
sched/fair: add triggers for OPP change requests
Each time a task is {en,de}queued we might need to adapt the current frequency to the new usage. Add triggers on {en,de}queue_task_fair() for this purpose. Only trigger a freq request if we are effectively waking up or going to sleep. Filter out load balancing related calls to reduce the number of triggers. cc: Ingo Molnar <mingo@redhat.com> cc: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Juri Lelli <juri.lelli@arm.com>
-rw-r--r--kernel/sched/fair.c42
1 files changed, 40 insertions, 2 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 5e1268438a43..a70d2d0ad65a 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4261,7 +4261,10 @@ static inline void hrtick_update(struct rq *rq)
}
#endif
+static unsigned int capacity_margin = 1280; /* ~20% margin */
+
static bool cpu_overutilized(int cpu);
+static unsigned long get_cpu_usage(int cpu);
struct static_key __sched_energy_freq __read_mostly = STATIC_KEY_INIT_FALSE;
/*
@@ -4312,6 +4315,26 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
if (!task_new && !rq->rd->overutilized &&
cpu_overutilized(rq->cpu))
rq->rd->overutilized = true;
+ /*
+ * We want to trigger a freq switch request only for tasks that
+ * are waking up; this is because we get here also during
+ * load balancing, but in these cases it seems wise to trigger
+ * as single request after load balancing is done.
+ *
+ * XXX: how about fork()? Do we need a special flag/something
+ * to tell if we are here after a fork() (wakeup_task_new)?
+ *
+ * Also, we add a margin (same ~20% used for the tipping point)
+ * to our request to provide some head room if p's utilization
+ * further increases.
+ */
+ if (sched_energy_freq() && !task_new) {
+ unsigned long req_cap = get_cpu_usage(cpu_of(rq));
+
+ req_cap = req_cap * capacity_margin
+ >> SCHED_CAPACITY_SHIFT;
+ cpufreq_sched_set_cap(cpu_of(rq), req_cap);
+ }
}
hrtick_update(rq);
}
@@ -4373,6 +4396,23 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
if (!se) {
sub_nr_running(rq, 1);
update_rq_runnable_avg(rq, 1);
+ /*
+ * We want to trigger a freq switch request only for tasks that
+ * are going to sleep; this is because we get here also during
+ * load balancing, but in these cases it seems wise to trigger
+ * as single request after load balancing is done.
+ *
+ * Also, we add a margin (same ~20% used for the tipping point)
+ * to our request to provide some head room if p's utilization
+ * further increases.
+ */
+ if (sched_energy_freq() && task_sleep) {
+ unsigned long req_cap = get_cpu_usage(cpu_of(rq));
+
+ req_cap = req_cap * capacity_margin
+ >> SCHED_CAPACITY_SHIFT;
+ cpufreq_sched_set_cap(cpu_of(rq), req_cap);
+ }
}
hrtick_update(rq);
}
@@ -4939,8 +4979,6 @@ static int find_new_capacity(struct energy_env *eenv,
return idx;
}
-static unsigned int capacity_margin = 1280; /* ~20% margin */
-
static bool cpu_overutilized(int cpu)
{
return (capacity_of(cpu) * 1024) <