diff options
Diffstat (limited to 'kernel/smp.c')
-rw-r--r-- | kernel/smp.c | 12 |
1 files changed, 10 insertions, 2 deletions
diff --git a/kernel/smp.c b/kernel/smp.c index 4dba0f7b72a..23ccc67dcbb 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -12,6 +12,8 @@ #include <linux/gfp.h> #include <linux/smp.h> #include <linux/cpu.h> +#define CREATE_TRACE_POINTS +#include <trace/events/smp.h> #include "smpboot.h" @@ -159,8 +161,10 @@ void generic_exec_single(int cpu, struct call_single_data *csd, int wait) * locking and barrier primitives. Generic code isn't really * equipped to do the right thing... */ - if (ipi) + if (ipi) { + trace_smp_call_func_send(csd->func, cpu); arch_send_call_function_single_ipi(cpu); + } if (wait) csd_lock_wait(csd); @@ -197,8 +201,9 @@ void generic_smp_call_function_single_interrupt(void) * so save them away before making the call: */ csd_flags = csd->flags; - + trace_smp_call_func_entry(csd->func); csd->func(csd->info); + trace_smp_call_func_exit(csd->func); /* * Unlocked CSDs are valid through generic_exec_single(): @@ -228,6 +233,7 @@ int smp_call_function_single(int cpu, smp_call_func_t func, void *info, int this_cpu; int err = 0; + trace_smp_call_func_send(func, cpu); /* * prevent preemption and reschedule on another processor, * as well as CPU removal @@ -245,7 +251,9 @@ int smp_call_function_single(int cpu, smp_call_func_t func, void *info, if (cpu == this_cpu) { local_irq_save(flags); + trace_smp_call_func_entry(func); func(info); + trace_smp_call_func_exit(func); local_irq_restore(flags); } else { if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) { |