aboutsummaryrefslogtreecommitdiff
path: root/kernel/sched/fair.c
diff options
context:
space:
mode:
authorMichal Nazarewicz <mina86@mina86.com>2013-11-10 20:42:01 +0100
committerIngo Molnar <mingo@kernel.org>2013-11-13 13:33:55 +0100
commit85b088e934b9943322bfe37077289ae60f1b3414 (patch)
tree2ad7a5376ecf31e10604794f98c8e8c63e42aeca /kernel/sched/fair.c
parent911b2898b3c9fe0048e9485ad1629ed4fce330fd (diff)
sched/fair: Avoid integer overflow
sa->runnable_avg_sum is of type u32 but after shifting it by NICE_0_SHIFT bits it is promoted to u64. This of course makes no sense, since the result will never be more then 32-bit long. Casting sa->runnable_avg_sum to u64 before it is shifted, fixes this problem. Reviewed-by: Ben Segall <bsegall@google.com> Signed-off-by: Michal Nazarewicz <mina86@mina86.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1384112521-25177-1-git-send-email-mpn@google.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r--kernel/sched/fair.c2
1 files changed, 1 insertions, 1 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 201be782b5b..e8b652ebe02 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2178,7 +2178,7 @@ static inline void __update_tg_runnable_avg(struct sched_avg *sa,
long contrib;
/* The fraction of a cpu used by this cfs_rq */
- contrib = div_u64(sa->runnable_avg_sum << NICE_0_SHIFT,
+ contrib = div_u64((u64)sa->runnable_avg_sum << NICE_0_SHIFT,
sa->runnable_avg_period + 1);
contrib -= cfs_rq->tg_runnable_contrib;