aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2015-09-07 15:05:42 +0200
committerJon Medhurst <tixy@linaro.org>2016-04-13 11:39:58 +0100
commit0c028b4deddfa32b3ecde0f11619ad2bbd547b54 (patch)
treed862dfcaa3d55a15c36c655b91ddb3ea98af835b /kernel
parent1af353501d22929883db30f7ae938eae92bf59e9 (diff)
sched/fair: Rename scale() to cap_scale()
Rename scale() to cap_scale() to better reflect its purpose, it is after all not a general purpose scale function, it has SCHED_CAPACITY_SHIFT hardcoded in it. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org> (cherry picked from commit 54a21385facbdcd89a78e8c3e5025f04c5f2b59c) Signed-off-by: Ricky Liang <jcliang@chromium.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/fair.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index d21a03a74ff0..9fece11a0fcf 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2284,7 +2284,7 @@ static u32 __compute_runnable_contrib(u64 n)
return contrib + runnable_avg_yN_sum[n];
}
-#define scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
+#define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
/*
* We can represent the historical contribution to runnable average as the
@@ -2357,7 +2357,7 @@ __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
* period and accrue it.
*/
delta_w = 1024 - delta_w;
- scaled_delta_w = scale(delta_w, scale_freq);
+ scaled_delta_w = cap_scale(delta_w, scale_freq);
if (weight) {
sa->load_sum += weight * scaled_delta_w;
if (cfs_rq) {
@@ -2366,7 +2366,7 @@ __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
}
}
if (running)
- sa->util_sum += scale(scaled_delta_w, scale_cpu);
+ sa->util_sum += cap_scale(scaled_delta_w, scale_cpu);
delta -= delta_w;
@@ -2383,25 +2383,25 @@ __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
/* Efficiently calculate \sum (1..n_period) 1024*y^i */
contrib = __compute_runnable_contrib(periods);
- contrib = scale(contrib, scale_freq);
+ contrib = cap_scale(contrib, scale_freq);
if (weight) {
sa->load_sum += weight * contrib;
if (cfs_rq)
cfs_rq->runnable_load_sum += weight * contrib;
}
if (running)
- sa->util_sum += scale(contrib, scale_cpu);
+ sa->util_sum += cap_scale(contrib, scale_cpu);
}
/* Remainder of delta accrued against u_0` */
- scaled_delta = scale(delta, scale_freq);
+ scaled_delta = cap_scale(delta, scale_freq);
if (weight) {
sa->load_sum += weight * scaled_delta;
if (cfs_rq)
cfs_rq->runnable_load_sum += weight * scaled_delta;
}
if (running)
- sa->util_sum += scale(scaled_delta, scale_cpu);
+ sa->util_sum += cap_scale(scaled_delta, scale_cpu);
sa->period_contrib += delta;