aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPatrick Bellasi <patrick.bellasi@arm.com>2015-06-26 09:55:06 +0100
committerJon Medhurst <tixy@linaro.org>2016-04-13 11:44:19 +0100
commit2817b1dd303def0e5014fdaf071fc4d0e5ba237f (patch)
treef0643e71e5ad35f74a60709e69f418fec78d10ff
parent1558aceffaddfe313fc7016ea901741d053b7dfb (diff)
sched/fair: add boosted CPU usage
The CPU usage signal is used by the scheduler as an estimation of the overall bandwidth currently allocated on a CPU. When SchedDVFS is in use, this signal affects the selection of the operating points (OPP) required to accommodate all the workload allocated in a CPU. A convenient way to boost the performance of tasks running on a CPU, which is also little intrusive, is to boost the CPU usage signal each time it is used to select an OPP. This patch introduces a new function: get_boosted_cpu_usage(cpu) to return a boosted value for the usage of a specified CPU. The margin added to the original usage is: 1. computed based on the "boosting strategy" in use 2. proportional to the system-wide boost value defined by provided user-space interface The boosted signal is used by SchedDVFS (transparently) each time it requires to get an estimation of the capacity required for a CPU. Change-Id: I92db4404eef236b736be8a9345e6e0018fbf489a cc: Ingo Molnar <mingo@redhat.com> cc: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Patrick Bellasi <patrick.bellasi@arm.com>
-rw-r--r--kernel/sched/fair.c33
1 files changed, 32 insertions, 1 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index df06f3cb73f1..c149a53852dc 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3974,6 +3974,8 @@ static inline void hrtick_update(struct rq *rq)
}
#endif
+static inline unsigned long boosted_cpu_util(int cpu);
+
static void update_capacity_of(int cpu)
{
unsigned long req_cap;
@@ -3982,7 +3984,8 @@ static void update_capacity_of(int cpu)
return;
/* Convert scale-invariant capacity to cpu. */
- req_cap = cpu_util(cpu) * SCHED_CAPACITY_SCALE / capacity_orig_of(cpu);
+ req_cap = boosted_cpu_util(cpu);
+ req_cap = req_cap * SCHED_CAPACITY_SCALE / capacity_orig_of(cpu);
set_cfs_cpu_capacity(cpu, true, req_cap);
}
@@ -4935,8 +4938,36 @@ schedtune_margin(unsigned long signal, unsigned long boost)
return margin;
}
+static inline unsigned int
+schedtune_cpu_margin(unsigned long util)
+{
+ unsigned int boost = get_sysctl_sched_cfs_boost();
+
+ if (boost == 0)
+ return 0;
+
+ return schedtune_margin(util, boost);
+}
+
+#else /* CONFIG_SCHED_TUNE */
+
+static inline unsigned int
+schedtune_cpu_margin(unsigned long util)
+{
+ return 0;
+}
+
#endif /* CONFIG_SCHED_TUNE */
+static inline unsigned long
+boosted_cpu_util(int cpu)
+{
+ unsigned long util = cpu_util(cpu);
+ unsigned long margin = schedtune_cpu_margin(util);
+
+ return util + margin;
+}
+
/*
* find_idlest_group finds and returns the least busy CPU group within the
* domain.