aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJuri Lelli <juri.lelli@arm.com>2015-06-26 12:14:23 +0100
committerJon Medhurst <tixy@linaro.org>2016-04-13 11:44:18 +0100
commit8072aba74af2dd872babad5dd0ae74d00e48694e (patch)
treefdb3c3bdaf9a5ea50e042ef9cfbd2872f07696eb
parentc10a139243a63741c14919de2d74c451caadd52d (diff)
sched/{core,fair}: trigger OPP change request on fork()
Patch "sched/fair: add triggers for OPP change requests" introduced OPP change triggers for enqueue_task_fair(), but the trigger was operating only for wakeups. Fact is that it makes sense to consider wakeup_new also (i.e., fork()), as we don't know anything about a newly created task and thus we most certainly want to jump to max OPP to not harm performance too much. However, it is not currently possible (or at least it wasn't evident to me how to do so :/) to tell new wakeups from other (non wakeup) operations. This patch introduces an additional flag in sched.h that is only set at fork() time and it is then consumed in enqueue_task_fair() for our purpose. Change-Id: I0e485e7a2e6386f276eefa7920b2fc34f7877c22 cc: Ingo Molnar <mingo@redhat.com> cc: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Juri Lelli <juri.lelli@arm.com> Signed-off-by: Steve Muckle <smuckle@linaro.org> (am from https://patchwork.kernel.org/patch/7805001/) Signed-off-by: Punit Agrawal <punit.agrawal@arm.com>
-rw-r--r--kernel/sched/core.c2
-rw-r--r--kernel/sched/fair.c9
-rw-r--r--kernel/sched/sched.h4
3 files changed, 7 insertions, 8 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index e2ae2a4e8d64..70aa6d3ec008 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2332,7 +2332,7 @@ void wake_up_new_task(struct task_struct *p)
#endif
rq = __task_rq_lock(p);
- activate_task(rq, p, 0);
+ activate_task(rq, p, ENQUEUE_WAKEUP_NEW);
p->on_rq = TASK_ON_RQ_QUEUED;
trace_sched_wakeup_new(p, true);
check_preempt_curr(rq, p, WF_FORK);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index f7ab6bfd81a8..139546c22b6a 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4001,7 +4001,8 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
{
struct cfs_rq *cfs_rq;
struct sched_entity *se = &p->se;
- int task_new = !(flags & ENQUEUE_WAKEUP);
+ int task_new = flags & ENQUEUE_WAKEUP_NEW;
+ int task_wakeup = flags & ENQUEUE_WAKEUP;
for_each_sched_entity(se) {
if (se->on_rq)
@@ -4045,12 +4046,8 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
* because we get here also during load balancing, but
* in these cases it seems wise to trigger as single
* request after load balancing is done.
- *
- * XXX: how about fork()? Do we need a special
- * flag/something to tell if we are here after a
- * fork() (wakeup_task_new)?
*/
- if (!task_new)
+ if (task_new || task_wakeup)
update_capacity_of(cpu_of(rq));
}
hrtick_update(rq);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 467cb04675ef..ce91ef821f71 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1128,7 +1128,9 @@ static const u32 prio_to_wmult[40] = {
#else
#define ENQUEUE_WAKING 0
#endif
-#define ENQUEUE_REPLENISH 8
+#define ENQUEUE_REPLENISH 0x08
+#define ENQUEUE_RESTORE 0x10
+#define ENQUEUE_WAKEUP_NEW 0x20
#define DEQUEUE_SLEEP 1