aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndi Kleen <ak@linux.intel.com>2017-03-19 21:02:22 +1100
committerStephen Rothwell <sfr@canb.auug.org.au>2017-03-28 15:50:45 +1100
commitfab8501b09d13bd1f864b4a77b3cf455e2f17f2b (patch)
tree2bac8037c16961c306e1d9bc8fb67cfd219d0146
parent3b2ea36f97dc095c1d5a12ee8025e0c464266a16 (diff)
kernel/sched/fair.c: uninline __update_load_avg()
This is a very complex function, which is called in multiple places. It is unlikely that inlining or not inlining it makes any difference for its run time. This saves around 13k text in my kernel text data bss dec hex filename 9083992 5367600 11116544 25568136 1862388 vmlinux-before-load-avg 9070166 5367600 11116544 25554310 185ed86 vmlinux-load-avg Link: http://lkml.kernel.org/r/20170315021431.13107-4-andi@firstfloor.org Signed-off-by: Andi Kleen <ak@linux.intel.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r--kernel/sched/fair.c2
1 files changed, 1 insertions, 1 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 168c7f580ea7..5af8ab98ba3e 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2848,7 +2848,7 @@ static u32 __compute_runnable_contrib(u64 n)
* load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... )
* = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
*/
-static __always_inline int
+static int
__update_load_avg(u64 now, int cpu, struct sched_avg *sa,
unsigned long weight, int running, struct cfs_rq *cfs_rq)
{