aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVincent Guittot <vincent.guittot@linaro.org>2015-04-22 19:04:48 +0200
committerVincent Guittot <vincent.guittot@linaro.org>2015-04-22 19:04:48 +0200
commit1fa8fefbfd4e269248a0d428143b7d16f5b6f4ef (patch)
tree4e63b9b6b3d7864e3192094892c2b04f97c70add
parent94339237a88f4122d01166a208fdd217afb5dc74 (diff)
Revert "sched: add debug trace"test-sched-tasks-packing
This reverts commit 94339237a88f4122d01166a208fdd217afb5dc74.
-rw-r--r--include/trace/events/sched.h364
-rw-r--r--kernel/sched/fair.c60
2 files changed, 4 insertions, 420 deletions
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index be6e12d2fc56..30fedaf3e56a 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -553,370 +553,6 @@ TRACE_EVENT(sched_wake_idle_without_ipi,
TP_printk("cpu=%d", __entry->cpu)
);
-
-/*
- * Tracepoint for buddy update
- */
-TRACE_EVENT(sched_buddy,
-
- TP_PROTO(int cpu, int packing, long capacity, long usage, long perf),
-
- TP_ARGS( cpu, packing, capacity, usage, perf),
-
- TP_STRUCT__entry(
- __field( int, cpu )
- __field( int, packing )
- __field( long, capacity )
- __field( long, usage )
- __field( long, perf )
- ),
-
- TP_fast_assign(
- __entry->cpu = cpu;
- __entry->packing = packing;
- __entry->capacity = capacity;
- __entry->usage = usage;
- __entry->perf = perf;
- ),
-
- TP_printk("cpu:%d packing:%d capacity:%ld usage:%ld perf:%ld",
- __entry->cpu, __entry->packing,
- __entry->capacity, __entry->usage, __entry->perf)
-);
-
-/*
- * Tracepoint for load tracking of a se
- */
-TRACE_EVENT(sched_load_se,
-
- TP_PROTO(struct sched_avg *sa, int pid, int run),
-
- TP_ARGS(sa, pid, run),
-
- TP_STRUCT__entry(
- __field( unsigned long, usage )
- __field( unsigned long, contrib )
- __field( int, run )
- __field( int, pid )
- ),
-
- TP_fast_assign(
- __entry->usage = sa->utilization_avg_contrib;
- __entry->contrib = sa->load_avg_contrib;
- __entry->run = run;
- __entry->pid = pid;
- ),
-
- TP_printk("pid %d : usage=%lu contrib=%lu run=%d",
- __entry->pid, __entry->usage,
- __entry->contrib, __entry->run)
-);
-
-/*
- * Tracepoint for load tracking on a rq
- */
-TRACE_EVENT(sched_load_contrib,
-
- TP_PROTO(int cpu, unsigned long util, unsigned long butil, unsigned long runnable, unsigned long blocked),
-
- TP_ARGS( cpu, util, butil, runnable, blocked),
-
- TP_STRUCT__entry(
- __field( int, cpu )
- __field( unsigned long, util )
- __field( unsigned long, butil )
- __field( unsigned long, runnable )
- __field( unsigned long, blocked )
- ),
-
- TP_fast_assign(
- __entry->cpu = cpu;
- __entry->util = util;
- __entry->butil = butil;
- __entry->runnable = runnable;
- __entry->blocked = blocked;
- ),
-
- TP_printk("cpu:%d util=%lu blocked=%lu runnable=%lu blocked=%lu",
- __entry->cpu, __entry->util, __entry->butil, __entry->runnable, __entry->blocked)
-);
-
-TRACE_EVENT(sched_cpu_usage,
-
- TP_PROTO(int cpu, unsigned long usage),
-
- TP_ARGS(cpu, usage),
-
- TP_STRUCT__entry(
- __field( int, cpu )
- __field( unsigned long, usage )
- ),
-
- TP_fast_assign(
- __entry->cpu = cpu;
- __entry->usage = usage;
- ),
-
- TP_printk("cpu:%d usage=%lu", __entry->cpu, __entry->usage)
-);
-
-TRACE_EVENT(sched_lb,
-
- TP_PROTO(int cpu, int level, int idle),
-
- TP_ARGS(cpu, level, idle),
-
- TP_STRUCT__entry(
- __field( int, cpu )
- __field( int, level )
- __field( int, idle )
- ),
-
- TP_fast_assign(
- __entry->cpu = cpu;
- __entry->level = level;
- __entry->idle = idle;
- ),
-
- TP_printk("cpu:%d level=%d idle:%d", __entry->cpu, __entry->level, __entry->idle)
-);
-
-
-
-TRACE_EVENT(sched_sg_lb_stats,
-
- TP_PROTO(int cpu, unsigned long load, unsigned long usage, unsigned long capacity, unsigned long capacity_orig, unsigned long nr_running),
-
- TP_ARGS(cpu, load, usage, capacity, capacity_orig, nr_running),
-
- TP_STRUCT__entry(
- __field( int, cpu )
- __field( unsigned long, load )
- __field( unsigned long, usage )
- __field( unsigned long, capacity )
- __field( unsigned long, capacity_orig )
- __field( unsigned long, nr_running )
- ),
-
- TP_fast_assign(
- __entry->cpu = cpu;
- __entry->load = load;
- __entry->usage = usage;
- __entry->capacity = capacity;
- __entry->capacity_orig = capacity_orig;
- __entry->nr_running = nr_running;
- ),
-
- TP_printk("1st cpu:%d load=%lu usage=%lu capacity=%lu orig=%lu nr=%lu",
- __entry->cpu, __entry->load, __entry->usage,
- __entry->capacity, __entry->capacity_orig,
- __entry->nr_running)
-);
-
-TRACE_EVENT(sched_sd_lb_stats,
-
- TP_PROTO(unsigned long load, unsigned long usage, unsigned long capacity),
-
- TP_ARGS(load, usage, capacity),
-
- TP_STRUCT__entry(
- __field( unsigned long, load )
- __field( unsigned long, usage )
- __field( unsigned long, capacity )
- ),
-
- TP_fast_assign(
- __entry->load = load;
- __entry->usage = usage;
- __entry->capacity = capacity;
- ),
-
- TP_printk("load=%lu usage=%lu capacity=%lu",
- __entry->load, __entry->usage,
- __entry->capacity)
-);
-
-TRACE_EVENT(sched_fbg,
-
- TP_PROTO(int level, int lcpu, int bcpu, int idle),
-
- TP_ARGS(level, lcpu, bcpu, idle),
-
- TP_STRUCT__entry(
- __field( int, level )
- __field( int, lcpu )
- __field( int, bcpu )
- __field( int, idle )
- ),
-
- TP_fast_assign(
- __entry->level = level;
- __entry->lcpu = lcpu;
- __entry->bcpu = bcpu;
- __entry->idle = idle;
- ),
-
- TP_printk(" level=%d 1st local cpu=%d 1st busiest cpu=%d, idle=%d",
- __entry->level, __entry->lcpu,
- __entry->bcpu, __entry->idle)
-);
-
-TRACE_EVENT(sched_fbq,
-
- TP_PROTO(int cpu, int busy_cpu, struct sched_group *busiest, int imbalance),
-
- TP_ARGS(cpu, busy_cpu, busiest, imbalance),
-
- TP_STRUCT__entry(
- __field( int, cpu )
- __field( int, busy_cpu )
- __field( struct sched_group *, busiest )
- __field( int, imbalance )
- ),
-
- TP_fast_assign(
- __entry->cpu = cpu;
- __entry->busy_cpu = busy_cpu;
- __entry->busiest = busiest;
- __entry->imbalance = imbalance;
- ),
-
- TP_printk("cpu: %d rq=%d busiest=%lx imbalance=%d",
- __entry->cpu, __entry->busy_cpu, (long)__entry->busiest, __entry->imbalance)
-);
-
-TRACE_EVENT(sched_nohz_kick,
-
- TP_PROTO(int cpu, int nr, int busy),
-
- TP_ARGS(cpu, nr, busy),
-
- TP_STRUCT__entry(
- __field( int, cpu )
- __field( int, nr )
- __field( int, busy )
- ),
-
- TP_fast_assign(
- __entry->cpu = cpu;
- __entry->nr = nr;
- __entry->busy = busy;
- ),
-
- TP_printk("cpu: %d nr=%d busy=%d",
- __entry->cpu, __entry->nr, __entry->busy)
-);
-
-/*
- * Tracepoint for showing load tracking of a se
- */
-TRACE_EVENT(sched_nohz,
-
- TP_PROTO(int next, int nr),
-
- TP_ARGS(next, nr),
-
- TP_STRUCT__entry(
- __field( int, next )
- __field( int, nr )
- ),
-
- TP_fast_assign(
- __entry->next = next;
- __entry->nr = nr
- ),
-
- TP_printk("next %d nr%d",
- __entry->next, __entry->nr)
-);
-
-TRACE_EVENT(sched_affine,
-
- TP_PROTO(int task, int prev, int new),
-
- TP_ARGS(task, prev, new),
-
- TP_STRUCT__entry(
- __field( int, task )
- __field( int, prev )
- __field( int, new )
- ),
-
- TP_fast_assign(
- __entry->task = task;
- __entry->prev = prev;
- __entry->new = new;
- ),
-
- TP_printk("task %d prev %d new %d",
- __entry->task, __entry->prev, __entry->new)
-);
-
-TRACE_EVENT(sched_wake_affine,
-
- TP_PROTO(s64 this, s64 thisl, s64 prev, s64 prevl),
-
- TP_ARGS(this, thisl, prev, prevl),
-
- TP_STRUCT__entry(
- __field( s64, this )
- __field( s64, thisl )
- __field( s64, prev )
- __field( s64, prevl )
- ),
-
- TP_fast_assign(
- __entry->this = this;
- __entry->thisl = thisl;
- __entry->prev = prev;
- __entry->prevl = prevl;
- ),
-
- TP_printk("this %lld load %lld prev %lld load %lld",
- __entry->this, __entry->thisl, __entry->prev, __entry->prevl)
-);
-
-TRACE_EVENT(sched_update_buddy,
-
- TP_PROTO(int cpu, int buddy, int business),
-
- TP_ARGS(cpu, buddy, business),
-
- TP_STRUCT__entry(
- __field( int, cpu )
- __field( int, buddy )
- __field( int, business )
- ),
-
- TP_fast_assign(
- __entry->cpu = cpu;
- __entry->buddy = buddy;
- __entry->business = business;
- ),
-
- TP_printk("cpu %d buddy %d business %d",
- __entry->cpu, __entry->buddy, __entry->business)
-);
-
-TRACE_EVENT(sched_check_perf,
-
- TP_PROTO(int cpu, struct sched_group *group),
-
- TP_ARGS(cpu, group),
-
- TP_STRUCT__entry(
- __field( int, cpu )
- __field( int, th )
- ),
-
- TP_fast_assign(
- __entry->cpu = cpu;
- __entry->th = group->sgc->perf_thres;
- ),
-
- TP_printk("cpu:%d threshold %d",
- __entry->cpu, __entry->th)
-);
#endif /* _TRACE_SCHED_H */
/* This part must be outside protection */
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 430d59078fa7..682b4fed06db 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -31,9 +31,10 @@
#include <linux/migrate.h>
#include <linux/task_work.h>
+#include <trace/events/sched.h>
+
#include "sched.h"
-#include <trace/events/sched.h>
/*
* Targeted preemption latency for CPU-bound tasks:
* (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
@@ -321,7 +322,6 @@ bool update_packing_buddy(int cpu, struct sched_domain *sd, long activity, long
/* Get the state of 1st CPU of the power group */
if (!is_leader_cpu(cpu, sd)) {
per_cpu(sd_pack_buddy, cpu).packing = is_packing_cpu(get_leader(cpu));
- trace_sched_buddy(cpu, per_cpu(sd_pack_buddy, cpu).packing, capacity, activity, perf);
return (per_cpu(sd_pack_buddy, cpu).packing != packing);
}
@@ -369,7 +369,6 @@ bool update_packing_buddy(int cpu, struct sched_domain *sd, long activity, long
vperf = 0;
}
- trace_sched_buddy(cpu, per_cpu(sd_pack_buddy, cpu).packing, capacity, activity, perf);
return (per_cpu(sd_pack_buddy, cpu).packing != packing);
}
@@ -3035,14 +3034,8 @@ static inline void update_entity_load_avg(struct sched_entity *se,
now = cfs_rq_clock_task(group_cfs_rq(se));
if (!__update_entity_runnable_avg(now, cpu, &se->avg, se->on_rq,
- cfs_rq->curr == se)) {
- trace_sched_load_se(&se->avg,
- entity_is_task(se) ? task_of(se)->pid : (int)(se),
- cfs_rq->curr == se);
+ cfs_rq->curr == se))
return;
- }
- trace_sched_load_se(&se->avg, entity_is_task(se) ? task_of(se)->pid : (int)(se),
- cfs_rq->curr == se);
contrib_delta = __update_entity_load_avg_contrib(se);
utilization_delta = __update_entity_utilization_avg_contrib(se);
@@ -3058,11 +3051,6 @@ static inline void update_entity_load_avg(struct sched_entity *se,
subtract_utilization_blocked_contrib(cfs_rq,
-utilization_delta);
}
- trace_sched_load_contrib(cpu,
- cfs_rq->utilization_load_avg,
- cfs_rq->utilization_blocked_avg,
- cfs_rq->runnable_load_avg,
- cfs_rq->blocked_load_avg);
}
/*
@@ -3148,12 +3136,6 @@ static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
cfs_rq->utilization_load_avg += se->avg.utilization_avg_contrib;
/* we force update consideration on load-balancer moves */
update_cfs_rq_blocked_load(cfs_rq, !wakeup);
- trace_sched_load_contrib(cpu_of(rq_of(cfs_rq)),
- cfs_rq->utilization_load_avg,
- cfs_rq->utilization_blocked_avg,
- cfs_rq->runnable_load_avg,
- cfs_rq->blocked_load_avg);
-
}
/*
@@ -3177,11 +3159,6 @@ static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
se->avg.utilization_avg_contrib;
se->avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
} /* migrations, e.g. sleep=0 leave decay_count == 0 */
- trace_sched_load_contrib(cpu_of(rq_of(cfs_rq)),
- cfs_rq->utilization_load_avg,
- cfs_rq->utilization_blocked_avg,
- cfs_rq->runnable_load_avg,
- cfs_rq->blocked_load_avg);
}
/*
@@ -4897,7 +4874,7 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
}
balanced = this_eff_load <= prev_eff_load;
- trace_sched_wake_affine(this_eff_load, this_load, prev_eff_load, load);
+
schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts);
if (!balanced)
@@ -5094,16 +5071,9 @@ static int get_cpu_usage(int cpu)
{
unsigned long usage = cpu_rq(cpu)->cfs.utilization_load_avg;
unsigned long capacity = capacity_orig_of(cpu);
- trace_sched_load_contrib(cpu,
- cpu_rq(cpu)->cfs.utilization_load_avg,
- cpu_rq(cpu)->cfs.utilization_blocked_avg,
- cpu_rq(cpu)->cfs.runnable_load_avg,
- cpu_rq(cpu)->cfs.blocked_load_avg);
usage += cpu_rq(cpu)->cfs.utilization_blocked_avg;
- trace_sched_cpu_usage(cpu, usage);
-
if (usage >= SCHED_LOAD_SCALE)
return capacity;
@@ -5162,9 +5132,6 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
if (sd_flag & SD_BALANCE_WAKE) {
new_cpu = select_idle_sibling(p, prev_cpu);
-
- trace_sched_affine(task_cpu(p), prev_cpu, new_cpu);
-
goto unlock;
}
@@ -6635,11 +6602,6 @@ static inline void update_sg_lb_stats(struct lb_env *env,
sgs->group_no_capacity = group_is_overloaded(env, sgs);
sgs->group_type = group_classify(env, group, sgs);
-
- trace_sched_sg_lb_stats(cpumask_first(sched_group_cpus(group)),
- sgs->group_load, sgs->group_usage,
- sgs->group_capacity, group->sgc->capacity_orig,
- sgs->sum_nr_running);
}
/**
@@ -6798,8 +6760,6 @@ next_group:
env->dst_rq->rd->overload = overload;
}
- trace_sched_sd_lb_stats(sds->total_load, sds->total_usage, sds->total_capacity);
-
}
/**
@@ -7371,15 +7331,8 @@ redo:
schedstat_inc(sd, lb_nobusyg[idle]);
goto out_balanced;
}
- trace_sched_fbg(env.sd->level, this_cpu,
- cpumask_first(sched_group_cpus(group)), idle);
-
busiest = find_busiest_queue(&env, group);
- trace_sched_fbq(this_cpu,
- busiest ? cpu_of(busiest) : -1,
- group, env.imbalance);
-
if (!busiest) {
schedstat_inc(sd, lb_nobusyq[idle]);
goto out_balanced;
@@ -7677,7 +7630,6 @@ static int idle_balance(struct rq *this_rq)
}
if (sd->flags & SD_BALANCE_NEWIDLE) {
- trace_sched_lb(this_cpu, sd->level, CPU_NEWLY_IDLE);
t0 = sched_clock_cpu(this_cpu);
pulled_task = load_balance(this_cpu, this_rq,
@@ -8036,7 +7988,6 @@ static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
}
if (time_after_eq(jiffies, sd->last_balance + interval)) {
- trace_sched_lb(cpu, sd->level, idle);
if (load_balance(cpu, rq, sd, idle, &continue_balancing)) {
/*
* The LBF_DST_PINNED logic could have changed
@@ -8120,7 +8071,6 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
this_rq->next_balance = rq->next_balance;
}
nohz.next_balance = this_rq->next_balance;
- trace_sched_nohz(nohz.next_balance, atomic_read(&nohz.nr_cpus));
end:
clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu));
}
@@ -8154,8 +8104,6 @@ static inline bool nohz_kick_needed(struct rq *rq)
set_cpu_sd_state_busy();
nohz_balance_exit_idle(cpu);
- trace_sched_nohz_kick(cpu, rq->nr_running, atomic_read(&nohz.nr_cpus));
-
/*
* None are in tickless mode and hence no need for NOHZ idle load
* balancing.