aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVincent Guittot <vincent.guittot@linaro.org>2011-10-21 09:02:47 +0200
committerAndrey Konovalov <andrey.konovalov@linaro.org>2012-02-14 23:16:27 +0400
commitb64923ea8a8c898f1377a121314b8f6b00009dfc (patch)
tree9a1a0ddb8b57e80ab7b2ee5eeb959ef44a2c3dca
parent4779de4762d5843e47b51e0548348e32eef01682 (diff)
sched: Ensure cpu_power periodic update
With a lot of small task, the softirq sched is nearly never called when no_hz is enable. Te load_balance is mainly called with the newly_idle mode which doesn't update the cpu_power. Add a next_update field which ensure a maximum update period when there is short activity Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org> Conflicts: include/linux/sched.h
-rw-r--r--include/linux/sched.h1
-rw-r--r--kernel/sched/fair.c24
2 files changed, 17 insertions, 8 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 7d379a6bfd8..804dadb555c 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -909,6 +909,7 @@ struct sched_group_power {
* Number of busy cpus in this group.
*/
atomic_t nr_busy_cpus;
+ unsigned long next_update;
};
struct sched_group {
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 7c6414fc669..8e77a6bd597 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -215,6 +215,8 @@ calc_delta_mine(unsigned long delta_exec, unsigned long weight,
const struct sched_class fair_sched_class;
+static unsigned long __read_mostly max_load_balance_interval = HZ/10;
+
/**************************************************************
* CFS operations on generic schedulable entities:
*/
@@ -3776,6 +3778,11 @@ void update_group_power(struct sched_domain *sd, int cpu)
struct sched_domain *child = sd->child;
struct sched_group *group, *sdg = sd->groups;
unsigned long power;
+ unsigned long interval;
+
+ interval = msecs_to_jiffies(sd->balance_interval);
+ interval = clamp(interval, 1UL, max_load_balance_interval);
+ sdg->sgp->next_update = jiffies + interval;
if (!child) {
update_cpu_power(sd, cpu);
@@ -3883,12 +3890,15 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
* domains. In the newly idle case, we will allow all the cpu's
* to do the newly idle load balance.
*/
- if (idle != CPU_NEWLY_IDLE && local_group) {
- if (balance_cpu != this_cpu) {
- *balance = 0;
- return;
- }
- update_group_power(sd, this_cpu);
+ if (local_group) {
+ if (idle != CPU_NEWLY_IDLE) {
+ if (balance_cpu != this_cpu) {
+ *balance = 0;
+ return;
+ }
+ update_group_power(sd, this_cpu);
+ } else if (time_after_eq(jiffies, group->sgp->next_update))
+ update_group_power(sd, this_cpu);
}
/* Adjust by relative CPU power of the group */
@@ -4945,8 +4955,6 @@ static int __cpuinit sched_ilb_notifier(struct notifier_block *nfb,
static DEFINE_SPINLOCK(balancing);
-static unsigned long __read_mostly max_load_balance_interval = HZ/10;
-
/*
* Scale the max load_balance interval with the number of CPUs in the system.
* This trades load-balance latency on larger machines for less cross talk.