aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPatrick Bellasi <patrick.bellasi@arm.com>2015-06-26 09:55:06 +0100
committerVincent Guittot <vincent.guittot@linaro.org>2015-08-10 17:57:01 +0200
commitbcd96bea8bc74c32f815d1570a7be27ef25b84ec (patch)
tree4c7a00cda86dcc943da418b71f61058b82476988
parent6dbe5b7682f1b62ba81ce24328b3d5b63b3ea5c0 (diff)
WIP: sched/fair: add boosted CPU usage
The CPU usage signal is used by EAS as an estimation of the overall currently bandwidth allocated on a CPU. When SchedDVFS is in use, this signal affects the selection of the operating points (OPP) required to accommodate all the workload allocated in a CPU. A convenient way to boost the performances of the tasks running on a CPU, which is also little intrusive, is to boost the CPU usage signal each time it is used to select an OPP. This patch introduces a new function: get_boosted_cpu_usage(cpu) to returns a boosted value for the usage of a specified CPU. The margin added to the original usage is: 1. computed by the boosting strategy introduced by a previous patch 2. proportional to the system-wide boost value defined by the sysctl interface, this one also introduced by a previous patch The boosted signal is used by SchedDVFS transparently each time it requires to get an estimation of the capacity required for a CPU. Change-Id: I4a3612c7ceddb8b68a1896d05ff3407cb5bf8141 Signed-off-by: Patrick Bellasi <patrick.bellasi@arm.com>
-rw-r--r--kernel/sched/fair.c56
1 files changed, 50 insertions, 6 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index ebb7ae487775..21c7bd31524f 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4265,6 +4265,7 @@ static unsigned int capacity_margin = 1280; /* ~20% margin */
static bool cpu_overutilized(int cpu);
static unsigned long get_cpu_usage(int cpu);
+static inline unsigned long get_boosted_cpu_usage(int cpu);
struct static_key __sched_energy_freq __read_mostly = STATIC_KEY_INIT_FALSE;
/*
@@ -4327,7 +4328,8 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
* further increases.
*/
if (sched_energy_freq() && (task_new || task_wakeup)) {
- unsigned long req_cap = get_cpu_usage(cpu_of(rq));
+ unsigned long req_cap =
+ get_boosted_cpu_usage(cpu_of(rq));
req_cap = req_cap * capacity_margin
>> SCHED_CAPACITY_SHIFT;
@@ -4405,7 +4407,8 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
* further increases.
*/
if (sched_energy_freq() && task_sleep) {
- unsigned long req_cap = get_cpu_usage(cpu_of(rq));
+ unsigned long req_cap =
+ get_boosted_cpu_usage(cpu_of(rq));
if (rq->cfs.nr_running) {
req_cap = req_cap * capacity_margin
@@ -5343,6 +5346,45 @@ static inline bool task_fits_cpu(struct task_struct *p, int cpu)
return __task_fits(p, cpu, get_cpu_usage(cpu));
}
+#ifdef CONFIG_SCHED_TUNE
+
+static inline unsigned int
+schedtune_cpu_margin(unsigned long usage)
+{
+ unsigned int boost;
+ unsigned long margin;
+
+ boost = get_sysctl_sched_cfs_boost();
+ if (boost == 0)
+ return 0;
+ margin = schedtune_margin(usage, boost);
+
+ return margin;
+}
+
+#else /* CONFIG_SCHED_TUNE */
+
+static inline unsigned int
+schedtune_cpu_margin(unsigned long usage)
+{
+ return 0;
+}
+
+#endif /* CONFIG_SCHED_TUNE */
+
+static inline unsigned long
+get_boosted_cpu_usage(int cpu)
+{
+ unsigned long usage;
+ unsigned long margin;
+
+ usage = get_cpu_usage(cpu);
+ margin = schedtune_cpu_margin(usage);
+
+ usage += margin;
+ return usage;
+}
+
/*
* find_idlest_group finds and returns the least busy CPU group within the
* domain.
@@ -7941,7 +7983,8 @@ more_balance:
* tasks.
*/
if (sched_energy_freq() && cur_ld_moved) {
- unsigned long req_cap = get_cpu_usage(env.src_cpu);
+ unsigned long req_cap =
+ get_boosted_cpu_usage(env.src_cpu);
req_cap = req_cap * capacity_margin
>> SCHED_CAPACITY_SHIFT;
@@ -7970,7 +8013,7 @@ more_balance:
*/
if (sched_energy_freq()) {
unsigned long req_cap =
- get_cpu_usage(env.dst_cpu);
+ get_boosted_cpu_usage(env.dst_cpu);
req_cap = req_cap * capacity_margin
>> SCHED_CAPACITY_SHIFT;
@@ -8343,7 +8386,7 @@ static int active_load_balance_cpu_stop(void *data)
*/
if (sched_energy_freq()) {
unsigned long req_cap =
- get_cpu_usage(env.src_cpu);
+ get_boosted_cpu_usage(env.src_cpu);
req_cap = req_cap * capacity_margin
>> SCHED_CAPACITY_SHIFT;
@@ -8368,7 +8411,8 @@ out_unlock:
* further increases.
*/
if (sched_energy_freq()) {
- unsigned long req_cap = get_cpu_usage(target_cpu);
+ unsigned long req_cap =
+ get_boosted_cpu_usage(target_cpu);
req_cap = req_cap * capacity_margin
>> SCHED_CAPACITY_SHIFT;