aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorViresh Kumar <viresh.kumar@linaro.org>2013-04-03 13:39:42 +0530
committerViresh Kumar <viresh.kumar@linaro.org>2013-04-03 13:39:42 +0530
commit6a610f223c0e9d2f7c102dac7b936cccc6476e0d (patch)
tree391acab7b6e292384454ea37616dcbabd43400a7
parent9a379826b40b6b9197898471c93271d1b7b6b191 (diff)
parent56f2c5357692853ea09b7172fa203b9ede0c99fa (diff)
Merge branch 'upstream-runnable-load-avg-in-load-balance-v2' into big-LITTLE-MP-upstream-v2
Conflicts: include/linux/sched.h
-rw-r--r--include/linux/sched.h3
-rw-r--r--kernel/sched/core.c10
-rw-r--r--kernel/sched/fair.c53
3 files changed, 49 insertions, 17 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index e2ed2d217f6..fbca8583f06 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1041,6 +1041,7 @@ struct sched_domain;
#else
#define ENQUEUE_WAKING 0
#endif
+#define ENQUEUE_NEWTASK 8
#define DEQUEUE_SLEEP 1
@@ -1166,8 +1167,8 @@ struct sched_entity {
/* rq "owned" by this entity/group: */
struct cfs_rq *my_q;
#endif
+
#ifdef CONFIG_SMP
- /* Per-entity load-tracking */
struct sched_avg avg;
#endif
};
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index f166474e435..d16b9635441 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1564,6 +1564,8 @@ static void __sched_fork(struct task_struct *p)
#ifdef CONFIG_SMP
p->se.avg.runnable_avg_period = 0;
p->se.avg.runnable_avg_sum = 0;
+ p->se.avg.decay_count = 0;
+ p->se.avg.load_avg_contrib = 0;
#endif
#ifdef CONFIG_SCHEDSTATS
memset(&p->se.statistics, 0, sizeof(p->se.statistics));
@@ -1709,7 +1711,7 @@ void wake_up_new_task(struct task_struct *p)
#endif
rq = __task_rq_lock(p);
- activate_task(rq, p, 0);
+ activate_task(rq, p, ENQUEUE_NEWTASK);
p->on_rq = 1;
trace_sched_wakeup_new(p, true);
check_preempt_curr(rq, p, WF_FORK);
@@ -2528,7 +2530,7 @@ static void __update_cpu_load(struct rq *this_rq, unsigned long this_load,
void update_idle_cpu_load(struct rq *this_rq)
{
unsigned long curr_jiffies = ACCESS_ONCE(jiffies);
- unsigned long load = this_rq->load.weight;
+ unsigned long load = (unsigned long)this_rq->cfs.runnable_load_avg;
unsigned long pending_updates;
/*
@@ -2578,7 +2580,7 @@ static void update_cpu_load_active(struct rq *this_rq)
* See the mess around update_idle_cpu_load() / update_cpu_load_nohz().
*/
this_rq->last_load_update_tick = jiffies;
- __update_cpu_load(this_rq, this_rq->load.weight, 1);
+ __update_cpu_load(this_rq, this_rq->cfs.runnable_load_avg, 1);
calc_load_account_active(this_rq);
}
@@ -2684,8 +2686,8 @@ void scheduler_tick(void)
raw_spin_lock(&rq->lock);
update_rq_clock(rq);
- update_cpu_load_active(rq);
curr->sched_class->task_tick(rq, curr, 0);
+ update_cpu_load_active(rq);
raw_spin_unlock(&rq->lock);
perf_event_task_tick();
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 01b3806e385..d66fa87ebdb 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1572,8 +1572,9 @@ static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
/* Add the load generated by se into cfs_rq's child load-average */
static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
struct sched_entity *se,
- int wakeup)
+ int flags)
{
+ int wakeup = flags & ENQUEUE_WAKEUP;
/*
* We track migrations using entity decay_count <= 0, on a wake-up
* migration we use a negative decay count to track the remote decays
@@ -1607,6 +1608,12 @@ static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
update_entity_load_avg(se, 0);
}
+ /*
+ * set the initial load avg of new task same as its load
+ * in order to avoid brust fork make few cpu too heavier
+ */
+ if (flags & ENQUEUE_NEWTASK)
+ se->avg.load_avg_contrib = se->load.weight;
cfs_rq->runnable_load_avg += se->avg.load_avg_contrib;
/* we force update consideration on load-balancer moves */
update_cfs_rq_blocked_load(cfs_rq, !wakeup);
@@ -1778,7 +1785,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
* Update run-time statistics of the 'current'.
*/
update_curr(cfs_rq);
- enqueue_entity_load_avg(cfs_rq, se, flags & ENQUEUE_WAKEUP);
+ enqueue_entity_load_avg(cfs_rq, se, flags);
account_entity_enqueue(cfs_rq, se);
update_cfs_shares(cfs_rq);
@@ -2976,7 +2983,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
/* Used instead of source_load when we know the type == 0 */
static unsigned long weighted_cpuload(const int cpu)
{
- return cpu_rq(cpu)->load.weight;
+ return (unsigned long)cpu_rq(cpu)->cfs.runnable_load_avg;
}
/*
@@ -3023,7 +3030,7 @@ static unsigned long cpu_avg_load_per_task(int cpu)
unsigned long nr_running = ACCESS_ONCE(rq->nr_running);
if (nr_running)
- return rq->load.weight / nr_running;
+ return (unsigned long)rq->cfs.runnable_load_avg / nr_running;
return 0;
}
@@ -3052,7 +3059,8 @@ static void task_waking_fair(struct task_struct *p)
#ifdef CONFIG_FAIR_GROUP_SCHED
/*
- * effective_load() calculates the load change as seen from the root_task_group
+ * effective_load() calculates the runnable load average change as seen from
+ * the root_task_group
*
* Adding load to a group doesn't make a group heavier, but can cause movement
* of group shares between cpus. Assuming the shares were perfectly aligned one
@@ -3100,6 +3108,9 @@ static void task_waking_fair(struct task_struct *p)
* Therefore the effective change in loads on CPU 0 would be 5/56 (3/8 - 2/7)
* times the weight of the group. The effect on CPU 1 would be -4/56 (4/8 -
* 4/7) times the weight of the group.
+ *
+ * After get effective_load of the load moving, will engaged the sched entity's
+ * runnable avg.
*/
static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
{
@@ -3174,6 +3185,7 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
struct task_group *tg;
unsigned long weight;
int balanced;
+ int runnable_avg;
idx = sd->wake_idx;
this_cpu = smp_processor_id();
@@ -3189,13 +3201,19 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
if (sync) {
tg = task_group(current);
weight = current->se.load.weight;
+ runnable_avg = current->se.avg.runnable_avg_sum * NICE_0_LOAD
+ / (current->se.avg.runnable_avg_period + 1);
- this_load += effective_load(tg, this_cpu, -weight, -weight);
- load += effective_load(tg, prev_cpu, 0, -weight);
+ this_load += effective_load(tg, this_cpu, -weight, -weight)
+ * runnable_avg >> NICE_0_SHIFT;
+ load += effective_load(tg, prev_cpu, 0, -weight)
+ * runnable_avg >> NICE_0_SHIFT;
}
tg = task_group(p);
weight = p->se.load.weight;
+ runnable_avg = p->se.avg.runnable_avg_sum * NICE_0_LOAD
+ / (p->se.avg.runnable_avg_period + 1);
/*
* In low-load situations, where prev_cpu is idle and this_cpu is idle
@@ -3207,16 +3225,18 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
* task to be woken on this_cpu.
*/
if (this_load > 0) {
- s64 this_eff_load, prev_eff_load;
+ s64 this_eff_load, prev_eff_load, tmp_eff_load;
this_eff_load = 100;
this_eff_load *= power_of(prev_cpu);
- this_eff_load *= this_load +
- effective_load(tg, this_cpu, weight, weight);
+ tmp_eff_load = effective_load(tg, this_cpu, weight, weight)
+ * runnable_avg >> NICE_0_SHIFT;
+ this_eff_load *= this_load + tmp_eff_load;
prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
prev_eff_load *= power_of(this_cpu);
- prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
+ prev_eff_load *= load + (effective_load(tg, prev_cpu, 0, weight)
+ * runnable_avg >> NICE_0_SHIFT);
balanced = this_eff_load <= prev_eff_load;
} else
@@ -4090,6 +4110,15 @@ static unsigned long task_h_load(struct task_struct *p);
static const unsigned int sched_nr_migrate_break = 32;
+static unsigned long task_h_load_avg(struct task_struct *p)
+{
+ u32 period = p->se.avg.runnable_avg_period;
+ if (!period)
+ return 0;
+
+ return task_h_load(p) * p->se.avg.runnable_avg_sum / period;
+}
+
/*
* move_tasks tries to move up to imbalance weighted load from busiest to
* this_rq, as part of a balancing operation within domain "sd".
@@ -4125,7 +4154,7 @@ static int move_tasks(struct lb_env *env)
if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
goto next;
- load = task_h_load(p);
+ load = task_h_load_avg(p);
if (sched_feat(LB_MIN) && load < 204 && !env->sd->nr_balance_failed)
goto next;