diff options
author | Vincent Guittot <vincent.guittot@linaro.org> | 2013-05-31 11:40:53 +0200 |
---|---|---|
committer | Vincent Guittot <vincent.guittot@linaro.org> | 2013-08-02 15:44:06 +0200 |
commit | aacc71b063113fad406c6815e9cb0a50ac01fa0c (patch) | |
tree | 832ebb05d28ac452cfd6d5396236e62e55dccd16 | |
parent | 5af5c825acf5ce3b290f7298973f55e32cb7bce7 (diff) |
sched: get CPU's activity statistic
Monitor the activity level of each group of each sched_domain level. The
activity is the amount of cpu_power that is currently used on a CPU. We use
the runnable_avg_sum and _period to evaluate this activity level. In the
special use case where the CPU is fully loaded by more than 1 task, the
activity level is set above the cpu_power in order to reflect the overload of
The cpu
Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
-rw-r--r-- | kernel/sched/fair.c | 25 |
1 files changed, 25 insertions, 0 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 9ff4e45a7ce9..68c1b3606b04 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -179,6 +179,11 @@ void sched_init_granularity(void) } #ifdef CONFIG_SMP +static unsigned long available_of(int cpu) +{ + return cpu_rq(cpu)->cpu_available; +} + #ifdef CONFIG_SCHED_PACKING_TASKS /* * Save the id of the optimal CPU that should be used to pack small tasks @@ -3501,6 +3506,22 @@ done: return target; } +static int get_cpu_activity(int cpu) +{ + struct rq *rq = cpu_rq(cpu); + u32 sum = rq->avg.runnable_avg_sum; + u32 period = rq->avg.runnable_avg_period; + + sum = min(sum, period); + + if (sum == period) { + u32 overload = rq->nr_running > 1 ? 1 : 0; + return available_of(cpu) + overload; + } + + return (sum * available_of(cpu)) / period; +} + /* * sched_balance_self: balance the current task (running on cpu) in domains * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and @@ -4384,6 +4405,7 @@ struct sd_lb_stats { struct sched_group *busiest; /* Busiest group in this sd */ struct sched_group *this; /* Local group in this sd */ unsigned long total_load; /* Total load of all groups in sd */ + unsigned long total_activity; /* Total activity of all groups in sd */ unsigned long total_pwr; /* Total power of all groups in sd */ unsigned long avg_load; /* Average load across all groups in sd */ @@ -4412,6 +4434,7 @@ struct sd_lb_stats { struct sg_lb_stats { unsigned long avg_load; /*Avg load across the CPUs of the group */ unsigned long group_load; /* Total load over the CPUs of the group */ + unsigned long group_activity; /* Total activity of the group */ unsigned long sum_nr_running; /* Nr tasks running in the group */ unsigned long sum_weighted_load; /* Weighted load of group's tasks */ unsigned long group_capacity; @@ -4670,6 +4693,7 @@ static inline void update_sg_lb_stats(struct lb_env *env, } sgs->group_load += load; + sgs->group_activity += get_cpu_activity(i); sgs->sum_nr_running += nr_running; sgs->sum_weighted_load += weighted_cpuload(i); if (idle_cpu(i)) @@ -4793,6 +4817,7 @@ static inline void update_sd_lb_stats(struct lb_env *env, return; sds->total_load += sgs.group_load; + sds->total_activity += sgs.group_activity; sds->total_pwr += sg->sgp->power; /* |