aboutsummaryrefslogtreecommitdiff
path: root/include/trace/events
diff options
context:
space:
mode:
Diffstat (limited to 'include/trace/events')
-rw-r--r--include/trace/events/android_fs.h31
-rw-r--r--include/trace/events/android_fs_template.h79
-rw-r--r--include/trace/events/cpufreq_interactive.h112
-rw-r--r--include/trace/events/cpufreq_sched.h87
-rw-r--r--include/trace/events/gpu.h143
-rw-r--r--include/trace/events/mmc.h91
-rw-r--r--include/trace/events/power.h51
-rw-r--r--include/trace/events/sched.h549
8 files changed, 1142 insertions, 1 deletions
diff --git a/include/trace/events/android_fs.h b/include/trace/events/android_fs.h
new file mode 100644
index 000000000000..531da433a7bc
--- /dev/null
+++ b/include/trace/events/android_fs.h
@@ -0,0 +1,31 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM android_fs
+
+#if !defined(_TRACE_ANDROID_FS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_ANDROID_FS_H
+
+#include <linux/tracepoint.h>
+#include <trace/events/android_fs_template.h>
+
+DEFINE_EVENT(android_fs_data_start_template, android_fs_dataread_start,
+ TP_PROTO(struct inode *inode, loff_t offset, int bytes,
+ pid_t pid, char *command),
+ TP_ARGS(inode, offset, bytes, pid, command));
+
+DEFINE_EVENT(android_fs_data_end_template, android_fs_dataread_end,
+ TP_PROTO(struct inode *inode, loff_t offset, int bytes),
+ TP_ARGS(inode, offset, bytes));
+
+DEFINE_EVENT(android_fs_data_start_template, android_fs_datawrite_start,
+ TP_PROTO(struct inode *inode, loff_t offset, int bytes,
+ pid_t pid, char *command),
+ TP_ARGS(inode, offset, bytes, pid, command));
+
+DEFINE_EVENT(android_fs_data_end_template, android_fs_datawrite_end,
+ TP_PROTO(struct inode *inode, loff_t offset, int bytes),
+ TP_ARGS(inode, offset, bytes));
+
+#endif /* _TRACE_ANDROID_FS_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/android_fs_template.h b/include/trace/events/android_fs_template.h
new file mode 100644
index 000000000000..618988b047c1
--- /dev/null
+++ b/include/trace/events/android_fs_template.h
@@ -0,0 +1,79 @@
+#if !defined(_TRACE_ANDROID_FS_TEMPLATE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_ANDROID_FS_TEMPLATE_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(android_fs_data_start_template,
+ TP_PROTO(struct inode *inode, loff_t offset, int bytes,
+ pid_t pid, char *command),
+ TP_ARGS(inode, offset, bytes, pid, command),
+ TP_STRUCT__entry(
+ __array(char, path, MAX_FILTER_STR_VAL);
+ __field(char *, pathname);
+ __field(loff_t, offset);
+ __field(int, bytes);
+ __field(loff_t, i_size);
+ __string(cmdline, command);
+ __field(pid_t, pid);
+ __field(ino_t, ino);
+ ),
+ TP_fast_assign(
+ {
+ struct dentry *d;
+
+ /*
+ * Grab a reference to the inode here because
+ * d_obtain_alias() will either drop the inode
+ * reference if it locates an existing dentry
+ * or transfer the reference to the new dentry
+ * created. In our case, the file is still open,
+ * so the dentry is guaranteed to exist (connected),
+ * so d_obtain_alias() drops the reference we
+ * grabbed here.
+ */
+ ihold(inode);
+ d = d_obtain_alias(inode);
+ if (!IS_ERR(d)) {
+ __entry->pathname = dentry_path(d,
+ __entry->path,
+ MAX_FILTER_STR_VAL);
+ dput(d);
+ } else
+ __entry->pathname = ERR_PTR(-EINVAL);
+ __entry->offset = offset;
+ __entry->bytes = bytes;
+ __entry->i_size = i_size_read(inode);
+ __assign_str(cmdline, command);
+ __entry->pid = pid;
+ __entry->ino = inode->i_ino;
+ }
+ ),
+ TP_printk("entry_name %s, offset %llu, bytes %d, cmdline %s,"
+ " pid %d, i_size %llu, ino %lu",
+ (IS_ERR(__entry->pathname) ? "ERROR" : __entry->pathname),
+ __entry->offset, __entry->bytes, __get_str(cmdline),
+ __entry->pid, __entry->i_size,
+ (unsigned long) __entry->ino)
+);
+
+DECLARE_EVENT_CLASS(android_fs_data_end_template,
+ TP_PROTO(struct inode *inode, loff_t offset, int bytes),
+ TP_ARGS(inode, offset, bytes),
+ TP_STRUCT__entry(
+ __field(ino_t, ino);
+ __field(loff_t, offset);
+ __field(int, bytes);
+ ),
+ TP_fast_assign(
+ {
+ __entry->ino = inode->i_ino;
+ __entry->offset = offset;
+ __entry->bytes = bytes;
+ }
+ ),
+ TP_printk("ino %lu, offset %llu, bytes %d",
+ (unsigned long) __entry->ino,
+ __entry->offset, __entry->bytes)
+);
+
+#endif /* _TRACE_ANDROID_FS_TEMPLATE_H */
diff --git a/include/trace/events/cpufreq_interactive.h b/include/trace/events/cpufreq_interactive.h
new file mode 100644
index 000000000000..951e6ca12da8
--- /dev/null
+++ b/include/trace/events/cpufreq_interactive.h
@@ -0,0 +1,112 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cpufreq_interactive
+
+#if !defined(_TRACE_CPUFREQ_INTERACTIVE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_CPUFREQ_INTERACTIVE_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(set,
+ TP_PROTO(u32 cpu_id, unsigned long targfreq,
+ unsigned long actualfreq),
+ TP_ARGS(cpu_id, targfreq, actualfreq),
+
+ TP_STRUCT__entry(
+ __field( u32, cpu_id )
+ __field(unsigned long, targfreq )
+ __field(unsigned long, actualfreq )
+ ),
+
+ TP_fast_assign(
+ __entry->cpu_id = (u32) cpu_id;
+ __entry->targfreq = targfreq;
+ __entry->actualfreq = actualfreq;
+ ),
+
+ TP_printk("cpu=%u targ=%lu actual=%lu",
+ __entry->cpu_id, __entry->targfreq,
+ __entry->actualfreq)
+);
+
+DEFINE_EVENT(set, cpufreq_interactive_setspeed,
+ TP_PROTO(u32 cpu_id, unsigned long targfreq,
+ unsigned long actualfreq),
+ TP_ARGS(cpu_id, targfreq, actualfreq)
+);
+
+DECLARE_EVENT_CLASS(loadeval,
+ TP_PROTO(unsigned long cpu_id, unsigned long load,
+ unsigned long curtarg, unsigned long curactual,
+ unsigned long newtarg),
+ TP_ARGS(cpu_id, load, curtarg, curactual, newtarg),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, cpu_id )
+ __field(unsigned long, load )
+ __field(unsigned long, curtarg )
+ __field(unsigned long, curactual )
+ __field(unsigned long, newtarg )
+ ),
+
+ TP_fast_assign(
+ __entry->cpu_id = cpu_id;
+ __entry->load = load;
+ __entry->curtarg = curtarg;
+ __entry->curactual = curactual;
+ __entry->newtarg = newtarg;
+ ),
+
+ TP_printk("cpu=%lu load=%lu cur=%lu actual=%lu targ=%lu",
+ __entry->cpu_id, __entry->load, __entry->curtarg,
+ __entry->curactual, __entry->newtarg)
+);
+
+DEFINE_EVENT(loadeval, cpufreq_interactive_target,
+ TP_PROTO(unsigned long cpu_id, unsigned long load,
+ unsigned long curtarg, unsigned long curactual,
+ unsigned long newtarg),
+ TP_ARGS(cpu_id, load, curtarg, curactual, newtarg)
+);
+
+DEFINE_EVENT(loadeval, cpufreq_interactive_already,
+ TP_PROTO(unsigned long cpu_id, unsigned long load,
+ unsigned long curtarg, unsigned long curactual,
+ unsigned long newtarg),
+ TP_ARGS(cpu_id, load, curtarg, curactual, newtarg)
+);
+
+DEFINE_EVENT(loadeval, cpufreq_interactive_notyet,
+ TP_PROTO(unsigned long cpu_id, unsigned long load,
+ unsigned long curtarg, unsigned long curactual,
+ unsigned long newtarg),
+ TP_ARGS(cpu_id, load, curtarg, curactual, newtarg)
+);
+
+TRACE_EVENT(cpufreq_interactive_boost,
+ TP_PROTO(const char *s),
+ TP_ARGS(s),
+ TP_STRUCT__entry(
+ __string(s, s)
+ ),
+ TP_fast_assign(
+ __assign_str(s, s);
+ ),
+ TP_printk("%s", __get_str(s))
+);
+
+TRACE_EVENT(cpufreq_interactive_unboost,
+ TP_PROTO(const char *s),
+ TP_ARGS(s),
+ TP_STRUCT__entry(
+ __string(s, s)
+ ),
+ TP_fast_assign(
+ __assign_str(s, s);
+ ),
+ TP_printk("%s", __get_str(s))
+);
+
+#endif /* _TRACE_CPUFREQ_INTERACTIVE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/cpufreq_sched.h b/include/trace/events/cpufreq_sched.h
new file mode 100644
index 000000000000..a46cd088e969
--- /dev/null
+++ b/include/trace/events/cpufreq_sched.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2015 Steve Muckle <smuckle@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cpufreq_sched
+
+#if !defined(_TRACE_CPUFREQ_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_CPUFREQ_SCHED_H
+
+#include <linux/sched.h>
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(cpufreq_sched_throttled,
+ TP_PROTO(unsigned int rem),
+ TP_ARGS(rem),
+ TP_STRUCT__entry(
+ __field( unsigned int, rem)
+ ),
+ TP_fast_assign(
+ __entry->rem = rem;
+ ),
+ TP_printk("throttled - %d usec remaining", __entry->rem)
+);
+
+TRACE_EVENT(cpufreq_sched_request_opp,
+ TP_PROTO(int cpu,
+ unsigned long capacity,
+ unsigned int freq_new,
+ unsigned int requested_freq),
+ TP_ARGS(cpu, capacity, freq_new, requested_freq),
+ TP_STRUCT__entry(
+ __field( int, cpu)
+ __field( unsigned long, capacity)
+ __field( unsigned int, freq_new)
+ __field( unsigned int, requested_freq)
+ ),
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ __entry->capacity = capacity;
+ __entry->freq_new = freq_new;
+ __entry->requested_freq = requested_freq;
+ ),
+ TP_printk("cpu %d cap change, cluster cap request %ld => OPP %d "
+ "(cur %d)",
+ __entry->cpu, __entry->capacity, __entry->freq_new,
+ __entry->requested_freq)
+);
+
+TRACE_EVENT(cpufreq_sched_update_capacity,
+ TP_PROTO(int cpu,
+ bool request,
+ struct sched_capacity_reqs *scr,
+ unsigned long new_capacity),
+ TP_ARGS(cpu, request, scr, new_capacity),
+ TP_STRUCT__entry(
+ __field( int, cpu)
+ __field( bool, request)
+ __field( unsigned long, cfs)
+ __field( unsigned long, rt)
+ __field( unsigned long, dl)
+ __field( unsigned long, total)
+ __field( unsigned long, new_total)
+ ),
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ __entry->request = request;
+ __entry->cfs = scr->cfs;
+ __entry->rt = scr->rt;
+ __entry->dl = scr->dl;
+ __entry->total = scr->total;
+ __entry->new_total = new_capacity;
+ ),
+ TP_printk("cpu=%d set_cap=%d cfs=%ld rt=%ld dl=%ld old_tot=%ld "
+ "new_tot=%ld",
+ __entry->cpu, __entry->request, __entry->cfs, __entry->rt,
+ __entry->dl, __entry->total, __entry->new_total)
+);
+
+#endif /* _TRACE_CPUFREQ_SCHED_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/gpu.h b/include/trace/events/gpu.h
new file mode 100644
index 000000000000..7e15cdfafe5a
--- /dev/null
+++ b/include/trace/events/gpu.h
@@ -0,0 +1,143 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM gpu
+
+#if !defined(_TRACE_GPU_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_GPU_H
+
+#include <linux/tracepoint.h>
+#include <linux/time.h>
+
+#define show_secs_from_ns(ns) \
+ ({ \
+ u64 t = ns + (NSEC_PER_USEC / 2); \
+ do_div(t, NSEC_PER_SEC); \
+ t; \
+ })
+
+#define show_usecs_from_ns(ns) \
+ ({ \
+ u64 t = ns + (NSEC_PER_USEC / 2) ; \
+ u32 rem; \
+ do_div(t, NSEC_PER_USEC); \
+ rem = do_div(t, USEC_PER_SEC); \
+ })
+
+/*
+ * The gpu_sched_switch event indicates that a switch from one GPU context to
+ * another occurred on one of the GPU hardware blocks.
+ *
+ * The gpu_name argument identifies the GPU hardware block. Each independently
+ * scheduled GPU hardware block should have a different name. This may be used
+ * in different ways for different GPUs. For example, if a GPU includes
+ * multiple processing cores it may use names "GPU 0", "GPU 1", etc. If a GPU
+ * includes a separately scheduled 2D and 3D hardware block, it might use the
+ * names "2D" and "3D".
+ *
+ * The timestamp argument is the timestamp at which the switch occurred on the
+ * GPU. These timestamps are in units of nanoseconds and must use
+ * approximately the same time as sched_clock, though they need not come from
+ * any CPU clock. The timestamps for a single hardware block must be
+ * monotonically nondecreasing. This means that if a variable compensation
+ * offset is used to translate from some other clock to the sched_clock, then
+ * care must be taken when increasing that offset, and doing so may result in
+ * multiple events with the same timestamp.
+ *
+ * The next_ctx_id argument identifies the next context that was running on
+ * the GPU hardware block. A value of 0 indicates that the hardware block
+ * will be idle.
+ *
+ * The next_prio argument indicates the priority of the next context at the
+ * time of the event. The exact numeric values may mean different things for
+ * different GPUs, but they should follow the rule that lower values indicate a
+ * higher priority.
+ *
+ * The next_job_id argument identifies the batch of work that the GPU will be
+ * working on. This should correspond to a job_id that was previously traced
+ * as a gpu_job_enqueue event when the batch of work was created.
+ */
+TRACE_EVENT(gpu_sched_switch,
+
+ TP_PROTO(const char *gpu_name, u64 timestamp,
+ u32 next_ctx_id, s32 next_prio, u32 next_job_id),
+
+ TP_ARGS(gpu_name, timestamp, next_ctx_id, next_prio, next_job_id),
+
+ TP_STRUCT__entry(
+ __string( gpu_name, gpu_name )
+ __field( u64, timestamp )
+ __field( u32, next_ctx_id )
+ __field( s32, next_prio )
+ __field( u32, next_job_id )
+ ),
+
+ TP_fast_assign(
+ __assign_str(gpu_name, gpu_name);
+ __entry->timestamp = timestamp;
+ __entry->next_ctx_id = next_ctx_id;
+ __entry->next_prio = next_prio;
+ __entry->next_job_id = next_job_id;
+ ),
+
+ TP_printk("gpu_name=%s ts=%llu.%06lu next_ctx_id=%lu next_prio=%ld "
+ "next_job_id=%lu",
+ __get_str(gpu_name),
+ (unsigned long long)show_secs_from_ns(__entry->timestamp),
+ (unsigned long)show_usecs_from_ns(__entry->timestamp),
+ (unsigned long)__entry->next_ctx_id,
+ (long)__entry->next_prio,
+ (unsigned long)__entry->next_job_id)
+);
+
+/*
+ * The gpu_job_enqueue event indicates that a batch of work has been queued up
+ * to be processed by the GPU. This event is not intended to indicate that
+ * the batch of work has been submitted to the GPU hardware, but rather that
+ * it has been submitted to the GPU kernel driver.
+ *
+ * This event should be traced on the thread that initiated the work being
+ * queued. For example, if a batch of work is submitted to the kernel by a
+ * userland thread, the event should be traced on that thread.
+ *
+ * The ctx_id field identifies the GPU context in which the batch of work
+ * being queued is to be run.
+ *
+ * The job_id field identifies the batch of work being queued within the given
+ * GPU context. The first batch of work submitted for a given GPU context
+ * should have a job_id of 0, and each subsequent batch of work should
+ * increment the job_id by 1.
+ *
+ * The type field identifies the type of the job being enqueued. The job
+ * types may be different for different GPU hardware. For example, a GPU may
+ * differentiate between "2D", "3D", and "compute" jobs.
+ */
+TRACE_EVENT(gpu_job_enqueue,
+
+ TP_PROTO(u32 ctx_id, u32 job_id, const char *type),
+
+ TP_ARGS(ctx_id, job_id, type),
+
+ TP_STRUCT__entry(
+ __field( u32, ctx_id )
+ __field( u32, job_id )
+ __string( type, type )
+ ),
+
+ TP_fast_assign(
+ __entry->ctx_id = ctx_id;
+ __entry->job_id = job_id;
+ __assign_str(type, type);
+ ),
+
+ TP_printk("ctx_id=%lu job_id=%lu type=%s",
+ (unsigned long)__entry->ctx_id,
+ (unsigned long)__entry->job_id,
+ __get_str(type))
+);
+
+#undef show_secs_from_ns
+#undef show_usecs_from_ns
+
+#endif /* _TRACE_GPU_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/mmc.h b/include/trace/events/mmc.h
new file mode 100644
index 000000000000..82b368dbcefc
--- /dev/null
+++ b/include/trace/events/mmc.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mmc
+
+#if !defined(_TRACE_MMC_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_MMC_H
+
+#include <linux/tracepoint.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/core.h>
+
+/*
+ * Unconditional logging of mmc block erase operations,
+ * including cmd, address, size
+ */
+DECLARE_EVENT_CLASS(mmc_blk_erase_class,
+ TP_PROTO(unsigned int cmd, unsigned int addr, unsigned int size),
+ TP_ARGS(cmd, addr, size),
+ TP_STRUCT__entry(
+ __field(unsigned int, cmd)
+ __field(unsigned int, addr)
+ __field(unsigned int, size)
+ ),
+ TP_fast_assign(
+ __entry->cmd = cmd;
+ __entry->addr = addr;
+ __entry->size = size;
+ ),
+ TP_printk("cmd=%u,addr=0x%08x,size=0x%08x",
+ __entry->cmd, __entry->addr, __entry->size)
+);
+
+DEFINE_EVENT(mmc_blk_erase_class, mmc_blk_erase_start,
+ TP_PROTO(unsigned int cmd, unsigned int addr, unsigned int size),
+ TP_ARGS(cmd, addr, size));
+
+DEFINE_EVENT(mmc_blk_erase_class, mmc_blk_erase_end,
+ TP_PROTO(unsigned int cmd, unsigned int addr, unsigned int size),
+ TP_ARGS(cmd, addr, size));
+
+/*
+ * Logging of start of read or write mmc block operation,
+ * including cmd, address, size
+ */
+DECLARE_EVENT_CLASS(mmc_blk_rw_class,
+ TP_PROTO(unsigned int cmd, unsigned int addr, struct mmc_data *data),
+ TP_ARGS(cmd, addr, data),
+ TP_STRUCT__entry(
+ __field(unsigned int, cmd)
+ __field(unsigned int, addr)
+ __field(unsigned int, size)
+ ),
+ TP_fast_assign(
+ __entry->cmd = cmd;
+ __entry->addr = addr;
+ __entry->size = data->blocks;
+ ),
+ TP_printk("cmd=%u,addr=0x%08x,size=0x%08x",
+ __entry->cmd, __entry->addr, __entry->size)
+);
+
+DEFINE_EVENT_CONDITION(mmc_blk_rw_class, mmc_blk_rw_start,
+ TP_PROTO(unsigned int cmd, unsigned int addr, struct mmc_data *data),
+ TP_ARGS(cmd, addr, data),
+ TP_CONDITION(((cmd == MMC_READ_MULTIPLE_BLOCK) ||
+ (cmd == MMC_WRITE_MULTIPLE_BLOCK)) &&
+ data));
+
+DEFINE_EVENT_CONDITION(mmc_blk_rw_class, mmc_blk_rw_end,
+ TP_PROTO(unsigned int cmd, unsigned int addr, struct mmc_data *data),
+ TP_ARGS(cmd, addr, data),
+ TP_CONDITION(((cmd == MMC_READ_MULTIPLE_BLOCK) ||
+ (cmd == MMC_WRITE_MULTIPLE_BLOCK)) &&
+ data));
+#endif /* _TRACE_MMC_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/power.h b/include/trace/events/power.h
index 284244ebfe8d..8924cc2b4ca8 100644
--- a/include/trace/events/power.h
+++ b/include/trace/events/power.h
@@ -120,6 +120,38 @@ DEFINE_EVENT(cpu, cpu_frequency,
TP_ARGS(frequency, cpu_id)
);
+TRACE_EVENT(cpu_frequency_limits,
+
+ TP_PROTO(unsigned int max_freq, unsigned int min_freq,
+ unsigned int cpu_id),
+
+ TP_ARGS(max_freq, min_freq, cpu_id),
+
+ TP_STRUCT__entry(
+ __field( u32, min_freq )
+ __field( u32, max_freq )
+ __field( u32, cpu_id )
+ ),
+
+ TP_fast_assign(
+ __entry->min_freq = min_freq;
+ __entry->max_freq = max_freq;
+ __entry->cpu_id = cpu_id;
+ ),
+
+ TP_printk("min=%lu max=%lu cpu_id=%lu",
+ (unsigned long)__entry->min_freq,
+ (unsigned long)__entry->max_freq,
+ (unsigned long)__entry->cpu_id)
+);
+
+DEFINE_EVENT(cpu, cpu_capacity,
+
+ TP_PROTO(unsigned int capacity, unsigned int cpu_id),
+
+ TP_ARGS(capacity, cpu_id)
+);
+
TRACE_EVENT(device_pm_callback_start,
TP_PROTO(struct device *dev, const char *pm_ops, int event),
@@ -273,6 +305,25 @@ DEFINE_EVENT(clock, clock_set_rate,
TP_ARGS(name, state, cpu_id)
);
+TRACE_EVENT(clock_set_parent,
+
+ TP_PROTO(const char *name, const char *parent_name),
+
+ TP_ARGS(name, parent_name),
+
+ TP_STRUCT__entry(
+ __string( name, name )
+ __string( parent_name, parent_name )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, name);
+ __assign_str(parent_name, parent_name);
+ ),
+
+ TP_printk("%s parent=%s", __get_str(name), __get_str(parent_name))
+);
+
/*
* The power domain events are used for power domains transitions
*/
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 9b90c57517a9..dffaffab4bc8 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -191,6 +191,31 @@ TRACE_EVENT(sched_migrate_task,
__entry->orig_cpu, __entry->dest_cpu)
);
+/*
+ * Tracepoint for a CPU going offline/online:
+ */
+TRACE_EVENT(sched_cpu_hotplug,
+
+ TP_PROTO(int affected_cpu, int error, int status),
+
+ TP_ARGS(affected_cpu, error, status),
+
+ TP_STRUCT__entry(
+ __field( int, affected_cpu )
+ __field( int, error )
+ __field( int, status )
+ ),
+
+ TP_fast_assign(
+ __entry->affected_cpu = affected_cpu;
+ __entry->error = error;
+ __entry->status = status;
+ ),
+
+ TP_printk("cpu %d %s error=%d", __entry->affected_cpu,
+ __entry->status ? "online" : "offline", __entry->error)
+);
+
DECLARE_EVENT_CLASS(sched_process_template,
TP_PROTO(struct task_struct *p),
@@ -219,7 +244,7 @@ DECLARE_EVENT_CLASS(sched_process_template,
DEFINE_EVENT(sched_process_template, sched_process_free,
TP_PROTO(struct task_struct *p),
TP_ARGS(p));
-
+
/*
* Tracepoint for a task exiting:
@@ -374,6 +399,30 @@ DEFINE_EVENT(sched_stat_template, sched_stat_blocked,
TP_ARGS(tsk, delay));
/*
+ * Tracepoint for recording the cause of uninterruptible sleep.
+ */
+TRACE_EVENT(sched_blocked_reason,
+
+ TP_PROTO(struct task_struct *tsk),
+
+ TP_ARGS(tsk),
+
+ TP_STRUCT__entry(
+ __field( pid_t, pid )
+ __field( void*, caller )
+ __field( bool, io_wait )
+ ),
+
+ TP_fast_assign(
+ __entry->pid = tsk->pid;
+ __entry->caller = (void*)get_wchan(tsk);
+ __entry->io_wait = tsk->in_iowait;
+ ),
+
+ TP_printk("pid=%d iowait=%d caller=%pS", __entry->pid, __entry->io_wait, __entry->caller)
+);
+
+/*
* Tracepoint for accounting runtime (time the task is executing
* on a CPU).
*/
@@ -562,6 +611,504 @@ TRACE_EVENT(sched_wake_idle_without_ipi,
TP_printk("cpu=%d", __entry->cpu)
);
+
+TRACE_EVENT(sched_contrib_scale_f,
+
+ TP_PROTO(int cpu, unsigned long freq_scale_factor,
+ unsigned long cpu_scale_factor),
+
+ TP_ARGS(cpu, freq_scale_factor, cpu_scale_factor),
+
+ TP_STRUCT__entry(
+ __field(int, cpu)
+ __field(unsigned long, freq_scale_factor)
+ __field(unsigned long, cpu_scale_factor)
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ __entry->freq_scale_factor = freq_scale_factor;
+ __entry->cpu_scale_factor = cpu_scale_factor;
+ ),
+
+ TP_printk("cpu=%d freq_scale_factor=%lu cpu_scale_factor=%lu",
+ __entry->cpu, __entry->freq_scale_factor,
+ __entry->cpu_scale_factor)
+);
+
+#ifdef CONFIG_SMP
+
+/*
+ * Tracepoint for accounting sched averages for tasks.
+ */
+TRACE_EVENT(sched_load_avg_task,
+
+ TP_PROTO(struct task_struct *tsk, struct sched_avg *avg),
+
+ TP_ARGS(tsk, avg),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field( int, cpu )
+ __field( unsigned long, load_avg )
+ __field( unsigned long, util_avg )
+ __field( u64, load_sum )
+ __field( u32, util_sum )
+ __field( u32, period_contrib )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+ __entry->pid = tsk->pid;
+ __entry->cpu = task_cpu(tsk);
+ __entry->load_avg = avg->load_avg;
+ __entry->util_avg = avg->util_avg;
+ __entry->load_sum = avg->load_sum;
+ __entry->util_sum = avg->util_sum;
+ __entry->period_contrib = avg->period_contrib;
+ ),
+
+ TP_printk("comm=%s pid=%d cpu=%d load_avg=%lu util_avg=%lu load_sum=%llu"
+ " util_sum=%u period_contrib=%u",
+ __entry->comm,
+ __entry->pid,
+ __entry->cpu,
+ __entry->load_avg,
+ __entry->util_avg,
+ (u64)__entry->load_sum,
+ (u32)__entry->util_sum,
+ (u32)__entry->period_contrib)
+);
+
+/*
+ * Tracepoint for accounting sched averages for cpus.
+ */
+TRACE_EVENT(sched_load_avg_cpu,
+
+ TP_PROTO(int cpu, struct cfs_rq *cfs_rq),
+
+ TP_ARGS(cpu, cfs_rq),
+
+ TP_STRUCT__entry(
+ __field( int, cpu )
+ __field( unsigned long, load_avg )
+ __field( unsigned long, util_avg )
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ __entry->load_avg = cfs_rq->avg.load_avg;
+ __entry->util_avg = cfs_rq->avg.util_avg;
+ ),
+
+ TP_printk("cpu=%d load_avg=%lu util_avg=%lu",
+ __entry->cpu, __entry->load_avg, __entry->util_avg)
+);
+
+/*
+ * Tracepoint for sched_tune_config settings
+ */
+TRACE_EVENT(sched_tune_config,
+
+ TP_PROTO(int boost),
+
+ TP_ARGS(boost),
+
+ TP_STRUCT__entry(
+ __field( int, boost )
+ ),
+
+ TP_fast_assign(
+ __entry->boost = boost;
+ ),
+
+ TP_printk("boost=%d ", __entry->boost)
+);
+
+/*
+ * Tracepoint for accounting CPU boosted utilization
+ */
+TRACE_EVENT(sched_boost_cpu,
+
+ TP_PROTO(int cpu, unsigned long util, long margin),
+
+ TP_ARGS(cpu, util, margin),
+
+ TP_STRUCT__entry(
+ __field( int, cpu )
+ __field( unsigned long, util )
+ __field(long, margin )
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ __entry->util = util;
+ __entry->margin = margin;
+ ),
+
+ TP_printk("cpu=%d util=%lu margin=%ld",
+ __entry->cpu,
+ __entry->util,
+ __entry->margin)
+);
+
+/*
+ * Tracepoint for schedtune_tasks_update
+ */
+TRACE_EVENT(sched_tune_tasks_update,
+
+ TP_PROTO(struct task_struct *tsk, int cpu, int tasks, int idx,
+ int boost, int max_boost),
+
+ TP_ARGS(tsk, cpu, tasks, idx, boost, max_boost),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field( int, cpu )
+ __field( int, tasks )
+ __field( int, idx )
+ __field( int, boost )
+ __field( int, max_boost )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+ __entry->pid = tsk->pid;
+ __entry->cpu = cpu;
+ __entry->tasks = tasks;
+ __entry->idx = idx;
+ __entry->boost = boost;
+ __entry->max_boost = max_boost;
+ ),
+
+ TP_printk("pid=%d comm=%s "
+ "cpu=%d tasks=%d idx=%d boost=%d max_boost=%d",
+ __entry->pid, __entry->comm,
+ __entry->cpu, __entry->tasks, __entry->idx,
+ __entry->boost, __entry->max_boost)
+);
+
+/*
+ * Tracepoint for schedtune_boostgroup_update
+ */
+TRACE_EVENT(sched_tune_boostgroup_update,
+
+ TP_PROTO(int cpu, int variation, int max_boost),
+
+ TP_ARGS(cpu, variation, max_boost),
+
+ TP_STRUCT__entry(
+ __field( int, cpu )
+ __field( int, variation )
+ __field( int, max_boost )
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ __entry->variation = variation;
+ __entry->max_boost = max_boost;
+ ),
+
+ TP_printk("cpu=%d variation=%d max_boost=%d",
+ __entry->cpu, __entry->variation, __entry->max_boost)
+);
+
+/*
+ * Tracepoint for accounting task boosted utilization
+ */
+TRACE_EVENT(sched_boost_task,
+
+ TP_PROTO(struct task_struct *tsk, unsigned long util, long margin),
+
+ TP_ARGS(tsk, util, margin),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field( unsigned long, util )
+ __field( long, margin )
+
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+ __entry->pid = tsk->pid;
+ __entry->util = util;
+ __entry->margin = margin;
+ ),
+
+ TP_printk("comm=%s pid=%d util=%lu margin=%ld",
+ __entry->comm, __entry->pid,
+ __entry->util,
+ __entry->margin)
+);
+
+/*
+ * Tracepoint for accounting sched group energy
+ */
+TRACE_EVENT(sched_energy_diff,
+
+ TP_PROTO(struct task_struct *tsk, int scpu, int dcpu, int udelta,
+ int nrgb, int nrga, int nrgd, int capb, int capa, int capd,
+ int nrgn, int nrgp),
+
+ TP_ARGS(tsk, scpu, dcpu, udelta,
+ nrgb, nrga, nrgd, capb, capa, capd,
+ nrgn, nrgp),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field( int, scpu )
+ __field( int, dcpu )
+ __field( int, udelta )
+ __field( int, nrgb )
+ __field( int, nrga )
+ __field( int, nrgd )
+ __field( int, capb )
+ __field( int, capa )
+ __field( int, capd )
+ __field( int, nrgn )
+ __field( int, nrgp )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+ __entry->pid = tsk->pid;
+ __entry->scpu = scpu;
+ __entry->dcpu = dcpu;
+ __entry->udelta = udelta;
+ __entry->nrgb = nrgb;
+ __entry->nrga = nrga;
+ __entry->nrgd = nrgd;
+ __entry->capb = capb;
+ __entry->capa = capa;
+ __entry->capd = capd;
+ __entry->nrgn = nrgn;
+ __entry->nrgp = nrgp;
+ ),
+
+ TP_printk("pid=%d comm=%s "
+ "src_cpu=%d dst_cpu=%d usage_delta=%d "
+ "nrg_before=%d nrg_after=%d nrg_diff=%d "
+ "cap_before=%d cap_after=%d cap_delta=%d "
+ "nrg_delta=%d nrg_payoff=%d",
+ __entry->pid, __entry->comm,
+ __entry->scpu, __entry->dcpu, __entry->udelta,
+ __entry->nrgb, __entry->nrga, __entry->nrgd,
+ __entry->capb, __entry->capa, __entry->capd,
+ __entry->nrgn, __entry->nrgp)
+);
+
+/*
+ * Tracepoint for schedtune_tasks_update
+ */
+TRACE_EVENT(sched_tune_filter,
+
+ TP_PROTO(int nrg_delta, int cap_delta,
+ int nrg_gain, int cap_gain,
+ int payoff, int region),
+
+ TP_ARGS(nrg_delta, cap_delta, nrg_gain, cap_gain, payoff, region),
+
+ TP_STRUCT__entry(
+ __field( int, nrg_delta )
+ __field( int, cap_delta )
+ __field( int, nrg_gain )
+ __field( int, cap_gain )
+ __field( int, payoff )
+ __field( int, region )
+ ),
+
+ TP_fast_assign(
+ __entry->nrg_delta = nrg_delta;
+ __entry->cap_delta = cap_delta;
+ __entry->nrg_gain = nrg_gain;
+ __entry->cap_gain = cap_gain;
+ __entry->payoff = payoff;
+ __entry->region = region;
+ ),
+
+ TP_printk("nrg_delta=%d cap_delta=%d nrg_gain=%d cap_gain=%d payoff=%d region=%d",
+ __entry->nrg_delta, __entry->cap_delta,
+ __entry->nrg_gain, __entry->cap_gain,
+ __entry->payoff, __entry->region)
+);
+
+/*
+ * Tracepoint for system overutilized flag
+ */
+TRACE_EVENT(sched_overutilized,
+
+ TP_PROTO(bool overutilized),
+
+ TP_ARGS(overutilized),
+
+ TP_STRUCT__entry(
+ __field( bool, overutilized )
+ ),
+
+ TP_fast_assign(
+ __entry->overutilized = overutilized;
+ ),
+
+ TP_printk("overutilized=%d",
+ __entry->overutilized ? 1 : 0)
+);
+#ifdef CONFIG_SCHED_WALT
+struct rq;
+
+TRACE_EVENT(walt_update_task_ravg,
+
+ TP_PROTO(struct task_struct *p, struct rq *rq, int evt,
+ u64 wallclock, u64 irqtime),
+
+ TP_ARGS(p, rq, evt, wallclock, irqtime),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field( pid_t, cur_pid )
+ __field(unsigned int, cur_freq )
+ __field( u64, wallclock )
+ __field( u64, mark_start )
+ __field( u64, delta_m )
+ __field( u64, win_start )
+ __field( u64, delta )
+ __field( u64, irqtime )
+ __field( int, evt )
+ __field(unsigned int, demand )
+ __field(unsigned int, sum )
+ __field( int, cpu )
+ __field( u64, cs )
+ __field( u64, ps )
+ __field( u32, curr_window )
+ __field( u32, prev_window )
+ __field( u64, nt_cs )
+ __field( u64, nt_ps )
+ __field( u32, active_windows )
+ ),
+
+ TP_fast_assign(
+ __entry->wallclock = wallclock;
+ __entry->win_start = rq->window_start;
+ __entry->delta = (wallclock - rq->window_start);
+ __entry->evt = evt;
+ __entry->cpu = rq->cpu;
+ __entry->cur_pid = rq->curr->pid;
+ __entry->cur_freq = rq->cur_freq;
+ memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+ __entry->pid = p->pid;
+ __entry->mark_start = p->ravg.mark_start;
+ __entry->delta_m = (wallclock - p->ravg.mark_start);
+ __entry->demand = p->ravg.demand;
+ __entry->sum = p->ravg.sum;
+ __entry->irqtime = irqtime;
+ __entry->cs = rq->curr_runnable_sum;
+ __entry->ps = rq->prev_runnable_sum;
+ __entry->curr_window = p->ravg.curr_window;
+ __entry->prev_window = p->ravg.prev_window;
+ __entry->nt_cs = rq->nt_curr_runnable_sum;
+ __entry->nt_ps = rq->nt_prev_runnable_sum;
+ __entry->active_windows = p->ravg.active_windows;
+ ),
+
+ TP_printk("wc %llu ws %llu delta %llu event %d cpu %d cur_freq %u cur_pid %d task %d (%s) ms %llu delta %llu demand %u sum %u irqtime %llu"
+ " cs %llu ps %llu cur_window %u prev_window %u nt_cs %llu nt_ps %llu active_wins %u"
+ , __entry->wallclock, __entry->win_start, __entry->delta,
+ __entry->evt, __entry->cpu,
+ __entry->cur_freq, __entry->cur_pid,
+ __entry->pid, __entry->comm, __entry->mark_start,
+ __entry->delta_m, __entry->demand,
+ __entry->sum, __entry->irqtime,
+ __entry->cs, __entry->ps,
+ __entry->curr_window, __entry->prev_window,
+ __entry->nt_cs, __entry->nt_ps,
+ __entry->active_windows
+ )
+);
+
+TRACE_EVENT(walt_update_history,
+
+ TP_PROTO(struct rq *rq, struct task_struct *p, u32 runtime, int samples,
+ int evt),
+
+ TP_ARGS(rq, p, runtime, samples, evt),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field(unsigned int, runtime )
+ __field( int, samples )
+ __field( int, evt )
+ __field( u64, demand )
+ __field(unsigned int, walt_avg )
+ __field(unsigned int, pelt_avg )
+ __array( u32, hist, RAVG_HIST_SIZE_MAX)
+ __field( int, cpu )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+ __entry->pid = p->pid;
+ __entry->runtime = runtime;
+ __entry->samples = samples;
+ __entry->evt = evt;
+ __entry->demand = p->ravg.demand;
+ __entry->walt_avg = (__entry->demand << 10);
+ do_div(__entry->walt_avg, walt_ravg_window);
+ __entry->pelt_avg = p->se.avg.util_avg;
+ memcpy(__entry->hist, p->ravg.sum_history,
+ RAVG_HIST_SIZE_MAX * sizeof(u32));
+ __entry->cpu = rq->cpu;
+ ),
+
+ TP_printk("%d (%s): runtime %u samples %d event %d demand %llu"
+ " walt %u pelt %u (hist: %u %u %u %u %u) cpu %d",
+ __entry->pid, __entry->comm,
+ __entry->runtime, __entry->samples, __entry->evt,
+ __entry->demand,
+ __entry->walt_avg,
+ __entry->pelt_avg,
+ __entry->hist[0], __entry->hist[1],
+ __entry->hist[2], __entry->hist[3],
+ __entry->hist[4], __entry->cpu)
+);
+
+TRACE_EVENT(walt_migration_update_sum,
+
+ TP_PROTO(struct rq *rq, struct task_struct *p),
+
+ TP_ARGS(rq, p),
+
+ TP_STRUCT__entry(
+ __field(int, cpu )
+ __field(int, pid )
+ __field( u64, cs )
+ __field( u64, ps )
+ __field( s64, nt_cs )
+ __field( s64, nt_ps )
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu_of(rq);
+ __entry->cs = rq->curr_runnable_sum;
+ __entry->ps = rq->prev_runnable_sum;
+ __entry->nt_cs = (s64)rq->nt_curr_runnable_sum;
+ __entry->nt_ps = (s64)rq->nt_prev_runnable_sum;
+ __entry->pid = p->pid;
+ ),
+
+ TP_printk("cpu %d: cs %llu ps %llu nt_cs %lld nt_ps %lld pid %d",
+ __entry->cpu, __entry->cs, __entry->ps,
+ __entry->nt_cs, __entry->nt_ps, __entry->pid)
+);
+#endif /* CONFIG_SCHED_WALT */
+
+#endif /* CONFIG_SMP */
+
#endif /* _TRACE_SCHED_H */
/* This part must be outside protection */