aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPatrick Bellasi <patrick.bellasi@arm.com>2016-01-14 18:31:53 +0000
committerPunit Agrawal <punit.agrawal@arm.com>2016-03-21 14:57:36 +0000
commitaa0c021706f3049cca12f22bdaca4ce56157e056 (patch)
tree16ec4c075663374bb04856a5dfb29dbd64626147
parentfa75e9d655a9ac65390512d234e6885ca817b865 (diff)
sched/fair: add boosted task utilization
The task utilization signal, which is derived from PELT signals and properly scaled to be architecture and frequency invariant, is used by EAS as an estimation of the task requirements in terms of CPU bandwidth. When the energy aware scheduler is in use, this signal affects the CPU selection. Thus, a convenient way to bias that decision, which is also little intrusive, is to boost the task utilization signal each time it is required to support them. This patch introduces the new function: boosted_task_util(task) which returns a boosted value for the utilization of the specified task. The margin added to the original utilization is: 1. computed based on the "boosting strategy" in use 2. proportional to boost value defined either by the sysctl interface, when global boosting is in use, or the "taskgroup" value, when per-task boosting is enabled. The boosted signal is used by EAS a. transparently, via its integration into the task_fits() function b. explicitly, in the energy-aware wakeup path Signed-off-by: Patrick Bellasi <patrick.bellasi@arm.com>
-rw-r--r--kernel/sched/fair.c42
-rw-r--r--kernel/sched/tune.c14
-rw-r--r--kernel/sched/tune.h1
3 files changed, 55 insertions, 2 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 2dbe1ff0a90b..f582d58daea5 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5048,11 +5048,13 @@ static inline unsigned long task_util(struct task_struct *p)
unsigned int capacity_margin = 1280; /* ~20% margin */
+static inline unsigned long boosted_task_util(struct task_struct *task);
+
static inline bool __task_fits(struct task_struct *p, int cpu, int util)
{
unsigned long capacity = capacity_of(cpu);
- util += task_util(p);
+ util += boosted_task_util(p);
return (capacity * 1024) > (util * capacity_margin);
}
@@ -5133,6 +5135,27 @@ schedtune_cpu_margin(unsigned long util, int cpu)
return schedtune_margin(util, boost);
}
+static inline unsigned long
+schedtune_task_margin(struct task_struct *task)
+{
+ unsigned int boost;
+ unsigned long util;
+ unsigned long margin;
+
+#ifdef CONFIG_CGROUP_SCHEDTUNE
+ boost = schedtune_task_boost(task);
+#else
+ boost = get_sysctl_sched_cfs_boost();
+#endif
+ if (boost == 0)
+ return 0;
+
+ util = task_util(task);
+ margin = schedtune_margin(util, boost);
+
+ return margin;
+}
+
#else /* CONFIG_SCHED_TUNE */
static inline unsigned int
@@ -5141,6 +5164,12 @@ schedtune_cpu_margin(unsigned long util, int cpu)
return 0;
}
+static inline unsigned int
+schedtune_task_margin(struct task_struct *task)
+{
+ return 0;
+}
+
#endif /* CONFIG_SCHED_TUNE */
static inline unsigned long
@@ -5152,6 +5181,15 @@ boosted_cpu_util(int cpu)
return util + margin;
}
+static inline unsigned long
+boosted_task_util(struct task_struct *task)
+{
+ unsigned long util = task_util(task);
+ unsigned long margin = schedtune_task_margin(task);
+
+ return util + margin;
+}
+
/*
* find_idlest_group finds and returns the least busy CPU group within the
* domain.
@@ -5386,7 +5424,7 @@ static int energy_aware_wake_cpu(struct task_struct *p, int target)
* so prev_cpu will receive a negative bias due to the double
* accounting. However, the blocked utilization may be zero.
*/
- int new_util = cpu_util(i) + task_util(p);
+ int new_util = cpu_util(i) + boosted_task_util(p);
if (new_util > capacity_orig_of(i))
continue;
diff --git a/kernel/sched/tune.c b/kernel/sched/tune.c
index ccc3540dcaf2..3253a8732ba5 100644
--- a/kernel/sched/tune.c
+++ b/kernel/sched/tune.c
@@ -240,6 +240,20 @@ int schedtune_cpu_boost(int cpu)
return bg->boost_max;
}
+int schedtune_task_boost(struct task_struct *p)
+{
+ struct schedtune *st;
+ int task_boost;
+
+ /* Get task boost value */
+ rcu_read_lock();
+ st = task_schedtune(p);
+ task_boost = st->boost;
+ rcu_read_unlock();
+
+ return task_boost;
+}
+
static u64
boost_read(struct cgroup_subsys_state *css, struct cftype *cft)
{
diff --git a/kernel/sched/tune.h b/kernel/sched/tune.h
index 561b5171a19b..d756ce7b06e0 100644
--- a/kernel/sched/tune.h
+++ b/kernel/sched/tune.h
@@ -4,6 +4,7 @@
#ifdef CONFIG_CGROUP_SCHEDTUNE
int schedtune_cpu_boost(int cpu);
+int schedtune_task_boost(struct task_struct *tsk);
void schedtune_enqueue_task(struct task_struct *p, int cpu);
void schedtune_dequeue_task(struct task_struct *p, int cpu);