aboutsummaryrefslogtreecommitdiff
path: root/include/trace
diff options
context:
space:
mode:
Diffstat (limited to 'include/trace')
-rw-r--r--include/trace/define_trace.h10
-rw-r--r--include/trace/events/9p.h176
-rw-r--r--include/trace/events/asoc.h25
-rw-r--r--include/trace/events/block.h20
-rw-r--r--include/trace/events/ext4.h480
-rw-r--r--include/trace/events/module.h2
-rw-r--r--include/trace/events/rcu.h459
-rw-r--r--include/trace/events/regmap.h136
-rw-r--r--include/trace/events/rpm.h99
-rw-r--r--include/trace/events/sched.h9
-rw-r--r--include/trace/events/vmscan.h8
-rw-r--r--include/trace/events/writeback.h171
-rw-r--r--include/trace/ftrace.h3
13 files changed, 1528 insertions, 70 deletions
diff --git a/include/trace/define_trace.h b/include/trace/define_trace.h
index da39b22636f..b0b4eb24d59 100644
--- a/include/trace/define_trace.h
+++ b/include/trace/define_trace.h
@@ -21,16 +21,6 @@
#undef CREATE_TRACE_POINTS
#include <linux/stringify.h>
-/*
- * module.h includes tracepoints, and because ftrace.h
- * pulls in module.h:
- * trace/ftrace.h -> linux/ftrace_event.h -> linux/perf_event.h ->
- * linux/ftrace.h -> linux/module.h
- * we must include module.h here before we play with any of
- * the TRACE_EVENT() macros, otherwise the tracepoints included
- * by module.h may break the build.
- */
-#include <linux/module.h>
#undef TRACE_EVENT
#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
diff --git a/include/trace/events/9p.h b/include/trace/events/9p.h
new file mode 100644
index 00000000000..beeaed8398e
--- /dev/null
+++ b/include/trace/events/9p.h
@@ -0,0 +1,176 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM 9p
+
+#if !defined(_TRACE_9P_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_9P_H
+
+#include <linux/tracepoint.h>
+
+#define show_9p_op(type) \
+ __print_symbolic(type, \
+ { P9_TLERROR, "P9_TLERROR" }, \
+ { P9_RLERROR, "P9_RLERROR" }, \
+ { P9_TSTATFS, "P9_TSTATFS" }, \
+ { P9_RSTATFS, "P9_RSTATFS" }, \
+ { P9_TLOPEN, "P9_TLOPEN" }, \
+ { P9_RLOPEN, "P9_RLOPEN" }, \
+ { P9_TLCREATE, "P9_TLCREATE" }, \
+ { P9_RLCREATE, "P9_RLCREATE" }, \
+ { P9_TSYMLINK, "P9_TSYMLINK" }, \
+ { P9_RSYMLINK, "P9_RSYMLINK" }, \
+ { P9_TMKNOD, "P9_TMKNOD" }, \
+ { P9_RMKNOD, "P9_RMKNOD" }, \
+ { P9_TRENAME, "P9_TRENAME" }, \
+ { P9_RRENAME, "P9_RRENAME" }, \
+ { P9_TREADLINK, "P9_TREADLINK" }, \
+ { P9_RREADLINK, "P9_RREADLINK" }, \
+ { P9_TGETATTR, "P9_TGETATTR" }, \
+ { P9_RGETATTR, "P9_RGETATTR" }, \
+ { P9_TSETATTR, "P9_TSETATTR" }, \
+ { P9_RSETATTR, "P9_RSETATTR" }, \
+ { P9_TXATTRWALK, "P9_TXATTRWALK" }, \
+ { P9_RXATTRWALK, "P9_RXATTRWALK" }, \
+ { P9_TXATTRCREATE, "P9_TXATTRCREATE" }, \
+ { P9_RXATTRCREATE, "P9_RXATTRCREATE" }, \
+ { P9_TREADDIR, "P9_TREADDIR" }, \
+ { P9_RREADDIR, "P9_RREADDIR" }, \
+ { P9_TFSYNC, "P9_TFSYNC" }, \
+ { P9_RFSYNC, "P9_RFSYNC" }, \
+ { P9_TLOCK, "P9_TLOCK" }, \
+ { P9_RLOCK, "P9_RLOCK" }, \
+ { P9_TGETLOCK, "P9_TGETLOCK" }, \
+ { P9_RGETLOCK, "P9_RGETLOCK" }, \
+ { P9_TLINK, "P9_TLINK" }, \
+ { P9_RLINK, "P9_RLINK" }, \
+ { P9_TMKDIR, "P9_TMKDIR" }, \
+ { P9_RMKDIR, "P9_RMKDIR" }, \
+ { P9_TRENAMEAT, "P9_TRENAMEAT" }, \
+ { P9_RRENAMEAT, "P9_RRENAMEAT" }, \
+ { P9_TUNLINKAT, "P9_TUNLINKAT" }, \
+ { P9_RUNLINKAT, "P9_RUNLINKAT" }, \
+ { P9_TVERSION, "P9_TVERSION" }, \
+ { P9_RVERSION, "P9_RVERSION" }, \
+ { P9_TAUTH, "P9_TAUTH" }, \
+ { P9_RAUTH, "P9_RAUTH" }, \
+ { P9_TATTACH, "P9_TATTACH" }, \
+ { P9_RATTACH, "P9_RATTACH" }, \
+ { P9_TERROR, "P9_TERROR" }, \
+ { P9_RERROR, "P9_RERROR" }, \
+ { P9_TFLUSH, "P9_TFLUSH" }, \
+ { P9_RFLUSH, "P9_RFLUSH" }, \
+ { P9_TWALK, "P9_TWALK" }, \
+ { P9_RWALK, "P9_RWALK" }, \
+ { P9_TOPEN, "P9_TOPEN" }, \
+ { P9_ROPEN, "P9_ROPEN" }, \
+ { P9_TCREATE, "P9_TCREATE" }, \
+ { P9_RCREATE, "P9_RCREATE" }, \
+ { P9_TREAD, "P9_TREAD" }, \
+ { P9_RREAD, "P9_RREAD" }, \
+ { P9_TWRITE, "P9_TWRITE" }, \
+ { P9_RWRITE, "P9_RWRITE" }, \
+ { P9_TCLUNK, "P9_TCLUNK" }, \
+ { P9_RCLUNK, "P9_RCLUNK" }, \
+ { P9_TREMOVE, "P9_TREMOVE" }, \
+ { P9_RREMOVE, "P9_RREMOVE" }, \
+ { P9_TSTAT, "P9_TSTAT" }, \
+ { P9_RSTAT, "P9_RSTAT" }, \
+ { P9_TWSTAT, "P9_TWSTAT" }, \
+ { P9_RWSTAT, "P9_RWSTAT" })
+
+TRACE_EVENT(9p_client_req,
+ TP_PROTO(struct p9_client *clnt, int8_t type, int tag),
+
+ TP_ARGS(clnt, type, tag),
+
+ TP_STRUCT__entry(
+ __field( void *, clnt )
+ __field( __u8, type )
+ __field( __u32, tag )
+ ),
+
+ TP_fast_assign(
+ __entry->clnt = clnt;
+ __entry->type = type;
+ __entry->tag = tag;
+ ),
+
+ TP_printk("client %lu request %s tag %d",
+ (long)__entry->clnt, show_9p_op(__entry->type),
+ __entry->tag)
+ );
+
+TRACE_EVENT(9p_client_res,
+ TP_PROTO(struct p9_client *clnt, int8_t type, int tag, int err),
+
+ TP_ARGS(clnt, type, tag, err),
+
+ TP_STRUCT__entry(
+ __field( void *, clnt )
+ __field( __u8, type )
+ __field( __u32, tag )
+ __field( __u32, err )
+ ),
+
+ TP_fast_assign(
+ __entry->clnt = clnt;
+ __entry->type = type;
+ __entry->tag = tag;
+ __entry->err = err;
+ ),
+
+ TP_printk("client %lu response %s tag %d err %d",
+ (long)__entry->clnt, show_9p_op(__entry->type),
+ __entry->tag, __entry->err)
+);
+
+/* dump 32 bytes of protocol data */
+#define P9_PROTO_DUMP_SZ 32
+TRACE_EVENT(9p_protocol_dump,
+ TP_PROTO(struct p9_client *clnt, struct p9_fcall *pdu),
+
+ TP_ARGS(clnt, pdu),
+
+ TP_STRUCT__entry(
+ __field( void *, clnt )
+ __field( __u8, type )
+ __field( __u16, tag )
+ __array( unsigned char, line, P9_PROTO_DUMP_SZ )
+ ),
+
+ TP_fast_assign(
+ __entry->clnt = clnt;
+ __entry->type = pdu->id;
+ __entry->tag = pdu->tag;
+ memcpy(__entry->line, pdu->sdata, P9_PROTO_DUMP_SZ);
+ ),
+ TP_printk("clnt %lu %s(tag = %d)\n%.3x: "
+ "%02x %02x %02x %02x %02x %02x %02x %02x "
+ "%02x %02x %02x %02x %02x %02x %02x %02x\n"
+ "%.3x: "
+ "%02x %02x %02x %02x %02x %02x %02x %02x "
+ "%02x %02x %02x %02x %02x %02x %02x %02x\n",
+ (long)__entry->clnt, show_9p_op(__entry->type),
+ __entry->tag, 0,
+ __entry->line[0], __entry->line[1],
+ __entry->line[2], __entry->line[3],
+ __entry->line[4], __entry->line[5],
+ __entry->line[6], __entry->line[7],
+ __entry->line[8], __entry->line[9],
+ __entry->line[10], __entry->line[11],
+ __entry->line[12], __entry->line[13],
+ __entry->line[14], __entry->line[15],
+ 16,
+ __entry->line[16], __entry->line[17],
+ __entry->line[18], __entry->line[19],
+ __entry->line[20], __entry->line[21],
+ __entry->line[22], __entry->line[23],
+ __entry->line[24], __entry->line[25],
+ __entry->line[26], __entry->line[27],
+ __entry->line[28], __entry->line[29],
+ __entry->line[30], __entry->line[31])
+ );
+
+#endif /* _TRACE_9P_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/asoc.h b/include/trace/events/asoc.h
index 603f5a0f036..ab26f8aa3c7 100644
--- a/include/trace/events/asoc.h
+++ b/include/trace/events/asoc.h
@@ -216,6 +216,31 @@ DEFINE_EVENT(snd_soc_dapm_widget, snd_soc_dapm_widget_event_done,
);
+TRACE_EVENT(snd_soc_dapm_walk_done,
+
+ TP_PROTO(struct snd_soc_card *card),
+
+ TP_ARGS(card),
+
+ TP_STRUCT__entry(
+ __string( name, card->name )
+ __field( int, power_checks )
+ __field( int, path_checks )
+ __field( int, neighbour_checks )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, card->name);
+ __entry->power_checks = card->dapm_stats.power_checks;
+ __entry->path_checks = card->dapm_stats.path_checks;
+ __entry->neighbour_checks = card->dapm_stats.neighbour_checks;
+ ),
+
+ TP_printk("%s: checks %d power, %d path, %d neighbour",
+ __get_str(name), (int)__entry->power_checks,
+ (int)__entry->path_checks, (int)__entry->neighbour_checks)
+);
+
TRACE_EVENT(snd_soc_jack_irq,
TP_PROTO(const char *name),
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
index bf366547da2..05c5e61f0a7 100644
--- a/include/trace/events/block.h
+++ b/include/trace/events/block.h
@@ -8,6 +8,8 @@
#include <linux/blkdev.h>
#include <linux/tracepoint.h>
+#define RWBS_LEN 8
+
DECLARE_EVENT_CLASS(block_rq_with_error,
TP_PROTO(struct request_queue *q, struct request *rq),
@@ -19,7 +21,7 @@ DECLARE_EVENT_CLASS(block_rq_with_error,
__field( sector_t, sector )
__field( unsigned int, nr_sector )
__field( int, errors )
- __array( char, rwbs, 6 )
+ __array( char, rwbs, RWBS_LEN )
__dynamic_array( char, cmd, blk_cmd_buf_len(rq) )
),
@@ -104,7 +106,7 @@ DECLARE_EVENT_CLASS(block_rq,
__field( sector_t, sector )
__field( unsigned int, nr_sector )
__field( unsigned int, bytes )
- __array( char, rwbs, 6 )
+ __array( char, rwbs, RWBS_LEN )
__array( char, comm, TASK_COMM_LEN )
__dynamic_array( char, cmd, blk_cmd_buf_len(rq) )
),
@@ -183,7 +185,7 @@ TRACE_EVENT(block_bio_bounce,
__field( dev_t, dev )
__field( sector_t, sector )
__field( unsigned int, nr_sector )
- __array( char, rwbs, 6 )
+ __array( char, rwbs, RWBS_LEN )
__array( char, comm, TASK_COMM_LEN )
),
@@ -222,7 +224,7 @@ TRACE_EVENT(block_bio_complete,
__field( sector_t, sector )
__field( unsigned, nr_sector )
__field( int, error )
- __array( char, rwbs, 6 )
+ __array( char, rwbs, RWBS_LEN)
),
TP_fast_assign(
@@ -249,7 +251,7 @@ DECLARE_EVENT_CLASS(block_bio,
__field( dev_t, dev )
__field( sector_t, sector )
__field( unsigned int, nr_sector )
- __array( char, rwbs, 6 )
+ __array( char, rwbs, RWBS_LEN )
__array( char, comm, TASK_COMM_LEN )
),
@@ -321,7 +323,7 @@ DECLARE_EVENT_CLASS(block_get_rq,
__field( dev_t, dev )
__field( sector_t, sector )
__field( unsigned int, nr_sector )
- __array( char, rwbs, 6 )
+ __array( char, rwbs, RWBS_LEN )
__array( char, comm, TASK_COMM_LEN )
),
@@ -456,7 +458,7 @@ TRACE_EVENT(block_split,
__field( dev_t, dev )
__field( sector_t, sector )
__field( sector_t, new_sector )
- __array( char, rwbs, 6 )
+ __array( char, rwbs, RWBS_LEN )
__array( char, comm, TASK_COMM_LEN )
),
@@ -498,7 +500,7 @@ TRACE_EVENT(block_bio_remap,
__field( unsigned int, nr_sector )
__field( dev_t, old_dev )
__field( sector_t, old_sector )
- __array( char, rwbs, 6 )
+ __array( char, rwbs, RWBS_LEN)
),
TP_fast_assign(
@@ -542,7 +544,7 @@ TRACE_EVENT(block_rq_remap,
__field( unsigned int, nr_sector )
__field( dev_t, old_dev )
__field( sector_t, old_sector )
- __array( char, rwbs, 6 )
+ __array( char, rwbs, RWBS_LEN)
),
TP_fast_assign(
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index b50a5473624..748ff7cbe55 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -9,9 +9,12 @@
struct ext4_allocation_context;
struct ext4_allocation_request;
+struct ext4_extent;
struct ext4_prealloc_space;
struct ext4_inode_info;
struct mpage_da_data;
+struct ext4_map_blocks;
+struct ext4_extent;
#define EXT4_I(inode) (container_of(inode, struct ext4_inode_info, vfs_inode))
@@ -1032,9 +1035,9 @@ TRACE_EVENT(ext4_forget,
);
TRACE_EVENT(ext4_da_update_reserve_space,
- TP_PROTO(struct inode *inode, int used_blocks),
+ TP_PROTO(struct inode *inode, int used_blocks, int quota_claim),
- TP_ARGS(inode, used_blocks),
+ TP_ARGS(inode, used_blocks, quota_claim),
TP_STRUCT__entry(
__field( dev_t, dev )
@@ -1045,6 +1048,7 @@ TRACE_EVENT(ext4_da_update_reserve_space,
__field( int, reserved_data_blocks )
__field( int, reserved_meta_blocks )
__field( int, allocated_meta_blocks )
+ __field( int, quota_claim )
),
TP_fast_assign(
@@ -1053,19 +1057,24 @@ TRACE_EVENT(ext4_da_update_reserve_space,
__entry->mode = inode->i_mode;
__entry->i_blocks = inode->i_blocks;
__entry->used_blocks = used_blocks;
- __entry->reserved_data_blocks = EXT4_I(inode)->i_reserved_data_blocks;
- __entry->reserved_meta_blocks = EXT4_I(inode)->i_reserved_meta_blocks;
- __entry->allocated_meta_blocks = EXT4_I(inode)->i_allocated_meta_blocks;
+ __entry->reserved_data_blocks =
+ EXT4_I(inode)->i_reserved_data_blocks;
+ __entry->reserved_meta_blocks =
+ EXT4_I(inode)->i_reserved_meta_blocks;
+ __entry->allocated_meta_blocks =
+ EXT4_I(inode)->i_allocated_meta_blocks;
+ __entry->quota_claim = quota_claim;
),
TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu used_blocks %d "
"reserved_data_blocks %d reserved_meta_blocks %d "
- "allocated_meta_blocks %d",
+ "allocated_meta_blocks %d quota_claim %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
__entry->mode, __entry->i_blocks,
__entry->used_blocks, __entry->reserved_data_blocks,
- __entry->reserved_meta_blocks, __entry->allocated_meta_blocks)
+ __entry->reserved_meta_blocks, __entry->allocated_meta_blocks,
+ __entry->quota_claim)
);
TRACE_EVENT(ext4_da_reserve_space,
@@ -1386,6 +1395,87 @@ DEFINE_EVENT(ext4__truncate, ext4_truncate_exit,
TP_ARGS(inode)
);
+/* 'ux' is the uninitialized extent. */
+TRACE_EVENT(ext4_ext_convert_to_initialized_enter,
+ TP_PROTO(struct inode *inode, struct ext4_map_blocks *map,
+ struct ext4_extent *ux),
+
+ TP_ARGS(inode, map, ux),
+
+ TP_STRUCT__entry(
+ __field( ino_t, ino )
+ __field( dev_t, dev )
+ __field( ext4_lblk_t, m_lblk )
+ __field( unsigned, m_len )
+ __field( ext4_lblk_t, u_lblk )
+ __field( unsigned, u_len )
+ __field( ext4_fsblk_t, u_pblk )
+ ),
+
+ TP_fast_assign(
+ __entry->ino = inode->i_ino;
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->m_lblk = map->m_lblk;
+ __entry->m_len = map->m_len;
+ __entry->u_lblk = le32_to_cpu(ux->ee_block);
+ __entry->u_len = ext4_ext_get_actual_len(ux);
+ __entry->u_pblk = ext4_ext_pblock(ux);
+ ),
+
+ TP_printk("dev %d,%d ino %lu m_lblk %u m_len %u u_lblk %u u_len %u "
+ "u_pblk %llu",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino,
+ __entry->m_lblk, __entry->m_len,
+ __entry->u_lblk, __entry->u_len, __entry->u_pblk)
+);
+
+/*
+ * 'ux' is the uninitialized extent.
+ * 'ix' is the initialized extent to which blocks are transferred.
+ */
+TRACE_EVENT(ext4_ext_convert_to_initialized_fastpath,
+ TP_PROTO(struct inode *inode, struct ext4_map_blocks *map,
+ struct ext4_extent *ux, struct ext4_extent *ix),
+
+ TP_ARGS(inode, map, ux, ix),
+
+ TP_STRUCT__entry(
+ __field( ino_t, ino )
+ __field( dev_t, dev )
+ __field( ext4_lblk_t, m_lblk )
+ __field( unsigned, m_len )
+ __field( ext4_lblk_t, u_lblk )
+ __field( unsigned, u_len )
+ __field( ext4_fsblk_t, u_pblk )
+ __field( ext4_lblk_t, i_lblk )
+ __field( unsigned, i_len )
+ __field( ext4_fsblk_t, i_pblk )
+ ),
+
+ TP_fast_assign(
+ __entry->ino = inode->i_ino;
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->m_lblk = map->m_lblk;
+ __entry->m_len = map->m_len;
+ __entry->u_lblk = le32_to_cpu(ux->ee_block);
+ __entry->u_len = ext4_ext_get_actual_len(ux);
+ __entry->u_pblk = ext4_ext_pblock(ux);
+ __entry->i_lblk = le32_to_cpu(ix->ee_block);
+ __entry->i_len = ext4_ext_get_actual_len(ix);
+ __entry->i_pblk = ext4_ext_pblock(ix);
+ ),
+
+ TP_printk("dev %d,%d ino %lu m_lblk %u m_len %u "
+ "u_lblk %u u_len %u u_pblk %llu "
+ "i_lblk %u i_len %u i_pblk %llu ",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino,
+ __entry->m_lblk, __entry->m_len,
+ __entry->u_lblk, __entry->u_len, __entry->u_pblk,
+ __entry->i_lblk, __entry->i_len, __entry->i_pblk)
+);
+
DECLARE_EVENT_CLASS(ext4__map_blocks_enter,
TP_PROTO(struct inode *inode, ext4_lblk_t lblk,
unsigned int len, unsigned int flags),
@@ -1589,6 +1679,382 @@ DEFINE_EVENT(ext4__trim, ext4_trim_all_free,
TP_ARGS(sb, group, start, len)
);
+TRACE_EVENT(ext4_ext_handle_uninitialized_extents,
+ TP_PROTO(struct inode *inode, struct ext4_map_blocks *map,
+ unsigned int allocated, ext4_fsblk_t newblock),
+
+ TP_ARGS(inode, map, allocated, newblock),
+
+ TP_STRUCT__entry(
+ __field( ino_t, ino )
+ __field( dev_t, dev )
+ __field( ext4_lblk_t, lblk )
+ __field( ext4_fsblk_t, pblk )
+ __field( unsigned int, len )
+ __field( int, flags )
+ __field( unsigned int, allocated )
+ __field( ext4_fsblk_t, newblk )
+ ),
+
+ TP_fast_assign(
+ __entry->ino = inode->i_ino;
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->lblk = map->m_lblk;
+ __entry->pblk = map->m_pblk;
+ __entry->len = map->m_len;
+ __entry->flags = map->m_flags;
+ __entry->allocated = allocated;
+ __entry->newblk = newblock;
+ ),
+
+ TP_printk("dev %d,%d ino %lu m_lblk %u m_pblk %llu m_len %u flags %d"
+ "allocated %d newblock %llu",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino,
+ (unsigned) __entry->lblk, (unsigned long long) __entry->pblk,
+ __entry->len, __entry->flags,
+ (unsigned int) __entry->allocated,
+ (unsigned long long) __entry->newblk)
+);
+
+TRACE_EVENT(ext4_get_implied_cluster_alloc_exit,
+ TP_PROTO(struct super_block *sb, struct ext4_map_blocks *map, int ret),
+
+ TP_ARGS(sb, map, ret),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( ext4_lblk_t, lblk )
+ __field( ext4_fsblk_t, pblk )
+ __field( unsigned int, len )
+ __field( unsigned int, flags )
+ __field( int, ret )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = sb->s_dev;
+ __entry->lblk = map->m_lblk;
+ __entry->pblk = map->m_pblk;
+ __entry->len = map->m_len;
+ __entry->flags = map->m_flags;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("dev %d,%d m_lblk %u m_pblk %llu m_len %u m_flags %u ret %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->lblk, (unsigned long long) __entry->pblk,
+ __entry->len, __entry->flags, __entry->ret)
+);
+
+TRACE_EVENT(ext4_ext_put_in_cache,
+ TP_PROTO(struct inode *inode, ext4_lblk_t lblk, unsigned int len,
+ ext4_fsblk_t start),
+
+ TP_ARGS(inode, lblk, len, start),
+
+ TP_STRUCT__entry(
+ __field( ino_t, ino )
+ __field( dev_t, dev )
+ __field( ext4_lblk_t, lblk )
+ __field( unsigned int, len )
+ __field( ext4_fsblk_t, start )
+ ),
+
+ TP_fast_assign(
+ __entry->ino = inode->i_ino;
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->lblk = lblk;
+ __entry->len = len;
+ __entry->start = start;
+ ),
+
+ TP_printk("dev %d,%d ino %lu lblk %u len %u start %llu",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino,
+ (unsigned) __entry->lblk,
+ __entry->len,
+ (unsigned long long) __entry->start)
+);
+
+TRACE_EVENT(ext4_ext_in_cache,
+ TP_PROTO(struct inode *inode, ext4_lblk_t lblk, int ret),
+
+ TP_ARGS(inode, lblk, ret),
+
+ TP_STRUCT__entry(
+ __field( ino_t, ino )
+ __field( dev_t, dev )
+ __field( ext4_lblk_t, lblk )
+ __field( int, ret )
+ ),
+
+ TP_fast_assign(
+ __entry->ino = inode->i_ino;
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->lblk = lblk;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("dev %d,%d ino %lu lblk %u ret %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino,
+ (unsigned) __entry->lblk,
+ __entry->ret)
+
+);
+
+TRACE_EVENT(ext4_find_delalloc_range,
+ TP_PROTO(struct inode *inode, ext4_lblk_t from, ext4_lblk_t to,
+ int reverse, int found, ext4_lblk_t found_blk),
+
+ TP_ARGS(inode, from, to, reverse, found, found_blk),
+
+ TP_STRUCT__entry(
+ __field( ino_t, ino )
+ __field( dev_t, dev )
+ __field( ext4_lblk_t, from )
+ __field( ext4_lblk_t, to )
+ __field( int, reverse )
+ __field( int, found )
+ __field( ext4_lblk_t, found_blk )
+ ),
+
+ TP_fast_assign(
+ __entry->ino = inode->i_ino;
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->from = from;
+ __entry->to = to;
+ __entry->reverse = reverse;
+ __entry->found = found;
+ __entry->found_blk = found_blk;
+ ),
+
+ TP_printk("dev %d,%d ino %lu from %u to %u reverse %d found %d "
+ "(blk = %u)",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino,
+ (unsigned) __entry->from, (unsigned) __entry->to,
+ __entry->reverse, __entry->found,
+ (unsigned) __entry->found_blk)
+);
+
+TRACE_EVENT(ext4_get_reserved_cluster_alloc,
+ TP_PROTO(struct inode *inode, ext4_lblk_t lblk, unsigned int len),
+
+ TP_ARGS(inode, lblk, len),
+
+ TP_STRUCT__entry(
+ __field( ino_t, ino )
+ __field( dev_t, dev )
+ __field( ext4_lblk_t, lblk )
+ __field( unsigned int, len )
+ ),
+
+ TP_fast_assign(
+ __entry->ino = inode->i_ino;
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->lblk = lblk;
+ __entry->len = len;
+ ),
+
+ TP_printk("dev %d,%d ino %lu lblk %u len %u",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino,
+ (unsigned) __entry->lblk,
+ __entry->len)
+);
+
+TRACE_EVENT(ext4_ext_show_extent,
+ TP_PROTO(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk,
+ unsigned short len),
+
+ TP_ARGS(inode, lblk, pblk, len),
+
+ TP_STRUCT__entry(
+ __field( ino_t, ino )
+ __field( dev_t, dev )
+ __field( ext4_lblk_t, lblk )
+ __field( ext4_fsblk_t, pblk )
+ __field( unsigned short, len )
+ ),
+
+ TP_fast_assign(
+ __entry->ino = inode->i_ino;
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->lblk = lblk;
+ __entry->pblk = pblk;
+ __entry->len = len;
+ ),
+
+ TP_printk("dev %d,%d ino %lu lblk %u pblk %llu len %u",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino,
+ (unsigned) __entry->lblk,
+ (unsigned long long) __entry->pblk,
+ (unsigned short) __entry->len)
+);
+
+TRACE_EVENT(ext4_remove_blocks,
+ TP_PROTO(struct inode *inode, struct ext4_extent *ex,
+ ext4_lblk_t from, ext4_fsblk_t to,
+ ext4_fsblk_t partial_cluster),
+
+ TP_ARGS(inode, ex, from, to, partial_cluster),
+
+ TP_STRUCT__entry(
+ __field( ino_t, ino )
+ __field( dev_t, dev )
+ __field( ext4_lblk_t, ee_lblk )
+ __field( ext4_fsblk_t, ee_pblk )
+ __field( unsigned short, ee_len )
+ __field( ext4_lblk_t, from )
+ __field( ext4_lblk_t, to )
+ __field( ext4_fsblk_t, partial )
+ ),
+
+ TP_fast_assign(
+ __entry->ino = inode->i_ino;
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ee_lblk = cpu_to_le32(ex->ee_block);
+ __entry->ee_pblk = ext4_ext_pblock(ex);
+ __entry->ee_len = ext4_ext_get_actual_len(ex);
+ __entry->from = from;
+ __entry->to = to;
+ __entry->partial = partial_cluster;
+ ),
+
+ TP_printk("dev %d,%d ino %lu extent [%u(%llu), %u]"
+ "from %u to %u partial_cluster %u",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino,
+ (unsigned) __entry->ee_lblk,
+ (unsigned long long) __entry->ee_pblk,
+ (unsigned short) __entry->ee_len,
+ (unsigned) __entry->from,
+ (unsigned) __entry->to,
+ (unsigned) __entry->partial)
+);
+
+TRACE_EVENT(ext4_ext_rm_leaf,
+ TP_PROTO(struct inode *inode, ext4_lblk_t start,
+ struct ext4_extent *ex, ext4_fsblk_t partial_cluster),
+
+ TP_ARGS(inode, start, ex, partial_cluster),
+
+ TP_STRUCT__entry(
+ __field( ino_t, ino )
+ __field( dev_t, dev )
+ __field( ext4_lblk_t, start )
+ __field( ext4_lblk_t, ee_lblk )
+ __field( ext4_fsblk_t, ee_pblk )
+ __field( short, ee_len )
+ __field( ext4_fsblk_t, partial )
+ ),
+
+ TP_fast_assign(
+ __entry->ino = inode->i_ino;
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->start = start;
+ __entry->ee_lblk = le32_to_cpu(ex->ee_block);
+ __entry->ee_pblk = ext4_ext_pblock(ex);
+ __entry->ee_len = ext4_ext_get_actual_len(ex);
+ __entry->partial = partial_cluster;
+ ),
+
+ TP_printk("dev %d,%d ino %lu start_lblk %u last_extent [%u(%llu), %u]"
+ "partial_cluster %u",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino,
+ (unsigned) __entry->start,
+ (unsigned) __entry->ee_lblk,
+ (unsigned long long) __entry->ee_pblk,
+ (unsigned short) __entry->ee_len,
+ (unsigned) __entry->partial)
+);
+
+TRACE_EVENT(ext4_ext_rm_idx,
+ TP_PROTO(struct inode *inode, ext4_fsblk_t pblk),
+
+ TP_ARGS(inode, pblk),
+
+ TP_STRUCT__entry(
+ __field( ino_t, ino )
+ __field( dev_t, dev )
+ __field( ext4_fsblk_t, pblk )
+ ),
+
+ TP_fast_assign(
+ __entry->ino = inode->i_ino;
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->pblk = pblk;
+ ),
+
+ TP_printk("dev %d,%d ino %lu index_pblk %llu",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino,
+ (unsigned long long) __entry->pblk)
+);
+
+TRACE_EVENT(ext4_ext_remove_space,
+ TP_PROTO(struct inode *inode, ext4_lblk_t start, int depth),
+
+ TP_ARGS(inode, start, depth),
+
+ TP_STRUCT__entry(
+ __field( ino_t, ino )
+ __field( dev_t, dev )
+ __field( ext4_lblk_t, start )
+ __field( int, depth )
+ ),
+
+ TP_fast_assign(
+ __entry->ino = inode->i_ino;
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->start = start;
+ __entry->depth = depth;
+ ),
+
+ TP_printk("dev %d,%d ino %lu since %u depth %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino,
+ (unsigned) __entry->start,
+ __entry->depth)
+);
+
+TRACE_EVENT(ext4_ext_remove_space_done,
+ TP_PROTO(struct inode *inode, ext4_lblk_t start, int depth,
+ ext4_lblk_t partial, unsigned short eh_entries),
+
+ TP_ARGS(inode, start, depth, partial, eh_entries),
+
+ TP_STRUCT__entry(
+ __field( ino_t, ino )
+ __field( dev_t, dev )
+ __field( ext4_lblk_t, start )
+ __field( int, depth )
+ __field( ext4_lblk_t, partial )
+ __field( unsigned short, eh_entries )
+ ),
+
+ TP_fast_assign(
+ __entry->ino = inode->i_ino;
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->start = start;
+ __entry->depth = depth;
+ __entry->partial = partial;
+ __entry->eh_entries = eh_entries;
+ ),
+
+ TP_printk("dev %d,%d ino %lu since %u depth %d partial %u "
+ "remaining_entries %u",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino,
+ (unsigned) __entry->start,
+ __entry->depth,
+ (unsigned) __entry->partial,
+ (unsigned short) __entry->eh_entries)
+);
+
#endif /* _TRACE_EXT4_H */
/* This part must be outside protection */
diff --git a/include/trace/events/module.h b/include/trace/events/module.h
index 21a546d27c0..16193273741 100644
--- a/include/trace/events/module.h
+++ b/include/trace/events/module.h
@@ -1,6 +1,6 @@
/*
* Because linux/module.h has tracepoints in the header, and ftrace.h
- * eventually includes this file, define_trace.h includes linux/module.h
+ * used to include this file, define_trace.h includes linux/module.h
* But we do not want the module.h to override the TRACE_SYSTEM macro
* variable that define_trace.h is processing, so we only set it
* when module events are being processed, which would happen when
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
new file mode 100644
index 00000000000..669fbd62ec2
--- /dev/null
+++ b/include/trace/events/rcu.h
@@ -0,0 +1,459 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rcu
+
+#if !defined(_TRACE_RCU_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_RCU_H
+
+#include <linux/tracepoint.h>
+
+/*
+ * Tracepoint for start/end markers used for utilization calculations.
+ * By convention, the string is of the following forms:
+ *
+ * "Start <activity>" -- Mark the start of the specified activity,
+ * such as "context switch". Nesting is permitted.
+ * "End <activity>" -- Mark the end of the specified activity.
+ *
+ * An "@" character within "<activity>" is a comment character: Data
+ * reduction scripts will ignore the "@" and the remainder of the line.
+ */
+TRACE_EVENT(rcu_utilization,
+
+ TP_PROTO(char *s),
+
+ TP_ARGS(s),
+
+ TP_STRUCT__entry(
+ __field(char *, s)
+ ),
+
+ TP_fast_assign(
+ __entry->s = s;
+ ),
+
+ TP_printk("%s", __entry->s)
+);
+
+#ifdef CONFIG_RCU_TRACE
+
+#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
+
+/*
+ * Tracepoint for grace-period events: starting and ending a grace
+ * period ("start" and "end", respectively), a CPU noting the start
+ * of a new grace period or the end of an old grace period ("cpustart"
+ * and "cpuend", respectively), a CPU passing through a quiescent
+ * state ("cpuqs"), a CPU coming online or going offline ("cpuonl"
+ * and "cpuofl", respectively), and a CPU being kicked for being too
+ * long in dyntick-idle mode ("kick").
+ */
+TRACE_EVENT(rcu_grace_period,
+
+ TP_PROTO(char *rcuname, unsigned long gpnum, char *gpevent),
+
+ TP_ARGS(rcuname, gpnum, gpevent),
+
+ TP_STRUCT__entry(
+ __field(char *, rcuname)
+ __field(unsigned long, gpnum)
+ __field(char *, gpevent)
+ ),
+
+ TP_fast_assign(
+ __entry->rcuname = rcuname;
+ __entry->gpnum = gpnum;
+ __entry->gpevent = gpevent;
+ ),
+
+ TP_printk("%s %lu %s",
+ __entry->rcuname, __entry->gpnum, __entry->gpevent)
+);
+
+/*
+ * Tracepoint for grace-period-initialization events. These are
+ * distinguished by the type of RCU, the new grace-period number, the
+ * rcu_node structure level, the starting and ending CPU covered by the
+ * rcu_node structure, and the mask of CPUs that will be waited for.
+ * All but the type of RCU are extracted from the rcu_node structure.
+ */
+TRACE_EVENT(rcu_grace_period_init,
+
+ TP_PROTO(char *rcuname, unsigned long gpnum, u8 level,
+ int grplo, int grphi, unsigned long qsmask),
+
+ TP_ARGS(rcuname, gpnum, level, grplo, grphi, qsmask),
+
+ TP_STRUCT__entry(
+ __field(char *, rcuname)
+ __field(unsigned long, gpnum)
+ __field(u8, level)
+ __field(int, grplo)
+ __field(int, grphi)
+ __field(unsigned long, qsmask)
+ ),
+
+ TP_fast_assign(
+ __entry->rcuname = rcuname;
+ __entry->gpnum = gpnum;
+ __entry->level = level;
+ __entry->grplo = grplo;
+ __entry->grphi = grphi;
+ __entry->qsmask = qsmask;
+ ),
+
+ TP_printk("%s %lu %u %d %d %lx",
+ __entry->rcuname, __entry->gpnum, __entry->level,
+ __entry->grplo, __entry->grphi, __entry->qsmask)
+);
+
+/*
+ * Tracepoint for tasks blocking within preemptible-RCU read-side
+ * critical sections. Track the type of RCU (which one day might
+ * include SRCU), the grace-period number that the task is blocking
+ * (the current or the next), and the task's PID.
+ */
+TRACE_EVENT(rcu_preempt_task,
+
+ TP_PROTO(char *rcuname, int pid, unsigned long gpnum),
+
+ TP_ARGS(rcuname, pid, gpnum),
+
+ TP_STRUCT__entry(
+ __field(char *, rcuname)
+ __field(unsigned long, gpnum)
+ __field(int, pid)
+ ),
+
+ TP_fast_assign(
+ __entry->rcuname = rcuname;
+ __entry->gpnum = gpnum;
+ __entry->pid = pid;
+ ),
+
+ TP_printk("%s %lu %d",
+ __entry->rcuname, __entry->gpnum, __entry->pid)
+);
+
+/*
+ * Tracepoint for tasks that blocked within a given preemptible-RCU
+ * read-side critical section exiting that critical section. Track the
+ * type of RCU (which one day might include SRCU) and the task's PID.
+ */
+TRACE_EVENT(rcu_unlock_preempted_task,
+
+ TP_PROTO(char *rcuname, unsigned long gpnum, int pid),
+
+ TP_ARGS(rcuname, gpnum, pid),
+
+ TP_STRUCT__entry(
+ __field(char *, rcuname)
+ __field(unsigned long, gpnum)
+ __field(int, pid)
+ ),
+
+ TP_fast_assign(
+ __entry->rcuname = rcuname;
+ __entry->gpnum = gpnum;
+ __entry->pid = pid;
+ ),
+
+ TP_printk("%s %lu %d", __entry->rcuname, __entry->gpnum, __entry->pid)
+);
+
+/*
+ * Tracepoint for quiescent-state-reporting events. These are
+ * distinguished by the type of RCU, the grace-period number, the
+ * mask of quiescent lower-level entities, the rcu_node structure level,
+ * the starting and ending CPU covered by the rcu_node structure, and
+ * whether there are any blocked tasks blocking the current grace period.
+ * All but the type of RCU are extracted from the rcu_node structure.
+ */
+TRACE_EVENT(rcu_quiescent_state_report,
+
+ TP_PROTO(char *rcuname, unsigned long gpnum,
+ unsigned long mask, unsigned long qsmask,
+ u8 level, int grplo, int grphi, int gp_tasks),
+
+ TP_ARGS(rcuname, gpnum, mask, qsmask, level, grplo, grphi, gp_tasks),
+
+ TP_STRUCT__entry(
+ __field(char *, rcuname)
+ __field(unsigned long, gpnum)
+ __field(unsigned long, mask)
+ __field(unsigned long, qsmask)
+ __field(u8, level)
+ __field(int, grplo)
+ __field(int, grphi)
+ __field(u8, gp_tasks)
+ ),
+
+ TP_fast_assign(
+ __entry->rcuname = rcuname;
+ __entry->gpnum = gpnum;
+ __entry->mask = mask;
+ __entry->qsmask = qsmask;
+ __entry->level = level;
+ __entry->grplo = grplo;
+ __entry->grphi = grphi;
+ __entry->gp_tasks = gp_tasks;
+ ),
+
+ TP_printk("%s %lu %lx>%lx %u %d %d %u",
+ __entry->rcuname, __entry->gpnum,
+ __entry->mask, __entry->qsmask, __entry->level,
+ __entry->grplo, __entry->grphi, __entry->gp_tasks)
+);
+
+/*
+ * Tracepoint for quiescent states detected by force_quiescent_state().
+ * These trace events include the type of RCU, the grace-period number
+ * that was blocked by the CPU, the CPU itself, and the type of quiescent
+ * state, which can be "dti" for dyntick-idle mode, "ofl" for CPU offline,
+ * or "kick" when kicking a CPU that has been in dyntick-idle mode for
+ * too long.
+ */
+TRACE_EVENT(rcu_fqs,
+
+ TP_PROTO(char *rcuname, unsigned long gpnum, int cpu, char *qsevent),
+
+ TP_ARGS(rcuname, gpnum, cpu, qsevent),
+
+ TP_STRUCT__entry(
+ __field(char *, rcuname)
+ __field(unsigned long, gpnum)
+ __field(int, cpu)
+ __field(char *, qsevent)
+ ),
+
+ TP_fast_assign(
+ __entry->rcuname = rcuname;
+ __entry->gpnum = gpnum;
+ __entry->cpu = cpu;
+ __entry->qsevent = qsevent;
+ ),
+
+ TP_printk("%s %lu %d %s",
+ __entry->rcuname, __entry->gpnum,
+ __entry->cpu, __entry->qsevent)
+);
+
+#endif /* #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) */
+
+/*
+ * Tracepoint for dyntick-idle entry/exit events. These take a string
+ * as argument: "Start" for entering dyntick-idle mode and "End" for
+ * leaving it.
+ */
+TRACE_EVENT(rcu_dyntick,
+
+ TP_PROTO(char *polarity),
+
+ TP_ARGS(polarity),
+
+ TP_STRUCT__entry(
+ __field(char *, polarity)
+ ),
+
+ TP_fast_assign(
+ __entry->polarity = polarity;
+ ),
+
+ TP_printk("%s", __entry->polarity)
+);
+
+/*
+ * Tracepoint for the registration of a single RCU callback function.
+ * The first argument is the type of RCU, the second argument is
+ * a pointer to the RCU callback itself, and the third element is the
+ * new RCU callback queue length for the current CPU.
+ */
+TRACE_EVENT(rcu_callback,
+
+ TP_PROTO(char *rcuname, struct rcu_head *rhp, long qlen),
+
+ TP_ARGS(rcuname, rhp, qlen),
+
+ TP_STRUCT__entry(
+ __field(char *, rcuname)
+ __field(void *, rhp)
+ __field(void *, func)
+ __field(long, qlen)
+ ),
+
+ TP_fast_assign(
+ __entry->rcuname = rcuname;
+ __entry->rhp = rhp;
+ __entry->func = rhp->func;
+ __entry->qlen = qlen;
+ ),
+
+ TP_printk("%s rhp=%p func=%pf %ld",
+ __entry->rcuname, __entry->rhp, __entry->func, __entry->qlen)
+);
+
+/*
+ * Tracepoint for the registration of a single RCU callback of the special
+ * kfree() form. The first argument is the RCU type, the second argument
+ * is a pointer to the RCU callback, the third argument is the offset
+ * of the callback within the enclosing RCU-protected data structure,
+ * and the fourth argument is the new RCU callback queue length for the
+ * current CPU.
+ */
+TRACE_EVENT(rcu_kfree_callback,
+
+ TP_PROTO(char *rcuname, struct rcu_head *rhp, unsigned long offset,
+ long qlen),
+
+ TP_ARGS(rcuname, rhp, offset, qlen),
+
+ TP_STRUCT__entry(
+ __field(char *, rcuname)
+ __field(void *, rhp)
+ __field(unsigned long, offset)
+ __field(long, qlen)
+ ),
+
+ TP_fast_assign(
+ __entry->rcuname = rcuname;
+ __entry->rhp = rhp;
+ __entry->offset = offset;
+ __entry->qlen = qlen;
+ ),
+
+ TP_printk("%s rhp=%p func=%ld %ld",
+ __entry->rcuname, __entry->rhp, __entry->offset,
+ __entry->qlen)
+);
+
+/*
+ * Tracepoint for marking the beginning rcu_do_batch, performed to start
+ * RCU callback invocation. The first argument is the RCU flavor,
+ * the second is the total number of callbacks (including those that
+ * are not yet ready to be invoked), and the third argument is the
+ * current RCU-callback batch limit.
+ */
+TRACE_EVENT(rcu_batch_start,
+
+ TP_PROTO(char *rcuname, long qlen, int blimit),
+
+ TP_ARGS(rcuname, qlen, blimit),
+
+ TP_STRUCT__entry(
+ __field(char *, rcuname)
+ __field(long, qlen)
+ __field(int, blimit)
+ ),
+
+ TP_fast_assign(
+ __entry->rcuname = rcuname;
+ __entry->qlen = qlen;
+ __entry->blimit = blimit;
+ ),
+
+ TP_printk("%s CBs=%ld bl=%d",
+ __entry->rcuname, __entry->qlen, __entry->blimit)
+);
+
+/*
+ * Tracepoint for the invocation of a single RCU callback function.
+ * The first argument is the type of RCU, and the second argument is
+ * a pointer to the RCU callback itself.
+ */
+TRACE_EVENT(rcu_invoke_callback,
+
+ TP_PROTO(char *rcuname, struct rcu_head *rhp),
+
+ TP_ARGS(rcuname, rhp),
+
+ TP_STRUCT__entry(
+ __field(char *, rcuname)
+ __field(void *, rhp)
+ __field(void *, func)
+ ),
+
+ TP_fast_assign(
+ __entry->rcuname = rcuname;
+ __entry->rhp = rhp;
+ __entry->func = rhp->func;
+ ),
+
+ TP_printk("%s rhp=%p func=%pf",
+ __entry->rcuname, __entry->rhp, __entry->func)
+);
+
+/*
+ * Tracepoint for the invocation of a single RCU callback of the special
+ * kfree() form. The first argument is the RCU flavor, the second
+ * argument is a pointer to the RCU callback, and the third argument
+ * is the offset of the callback within the enclosing RCU-protected
+ * data structure.
+ */
+TRACE_EVENT(rcu_invoke_kfree_callback,
+
+ TP_PROTO(char *rcuname, struct rcu_head *rhp, unsigned long offset),
+
+ TP_ARGS(rcuname, rhp, offset),
+
+ TP_STRUCT__entry(
+ __field(char *, rcuname)
+ __field(void *, rhp)
+ __field(unsigned long, offset)
+ ),
+
+ TP_fast_assign(
+ __entry->rcuname = rcuname;
+ __entry->rhp = rhp;
+ __entry->offset = offset;
+ ),
+
+ TP_printk("%s rhp=%p func=%ld",
+ __entry->rcuname, __entry->rhp, __entry->offset)
+);
+
+/*
+ * Tracepoint for exiting rcu_do_batch after RCU callbacks have been
+ * invoked. The first argument is the name of the RCU flavor and
+ * the second argument is number of callbacks actually invoked.
+ */
+TRACE_EVENT(rcu_batch_end,
+
+ TP_PROTO(char *rcuname, int callbacks_invoked),
+
+ TP_ARGS(rcuname, callbacks_invoked),
+
+ TP_STRUCT__entry(
+ __field(char *, rcuname)
+ __field(int, callbacks_invoked)
+ ),
+
+ TP_fast_assign(
+ __entry->rcuname = rcuname;
+ __entry->callbacks_invoked = callbacks_invoked;
+ ),
+
+ TP_printk("%s CBs-invoked=%d",
+ __entry->rcuname, __entry->callbacks_invoked)
+);
+
+#else /* #ifdef CONFIG_RCU_TRACE */
+
+#define trace_rcu_grace_period(rcuname, gpnum, gpevent) do { } while (0)
+#define trace_rcu_grace_period_init(rcuname, gpnum, level, grplo, grphi, qsmask) do { } while (0)
+#define trace_rcu_preempt_task(rcuname, pid, gpnum) do { } while (0)
+#define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0)
+#define trace_rcu_quiescent_state_report(rcuname, gpnum, mask, qsmask, level, grplo, grphi, gp_tasks) do { } while (0)
+#define trace_rcu_fqs(rcuname, gpnum, cpu, qsevent) do { } while (0)
+#define trace_rcu_dyntick(polarity) do { } while (0)
+#define trace_rcu_callback(rcuname, rhp, qlen) do { } while (0)
+#define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen) do { } while (0)
+#define trace_rcu_batch_start(rcuname, qlen, blimit) do { } while (0)
+#define trace_rcu_invoke_callback(rcuname, rhp) do { } while (0)
+#define trace_rcu_invoke_kfree_callback(rcuname, rhp, offset) do { } while (0)
+#define trace_rcu_batch_end(rcuname, callbacks_invoked) do { } while (0)
+
+#endif /* #else #ifdef CONFIG_RCU_TRACE */
+
+#endif /* _TRACE_RCU_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/regmap.h b/include/trace/events/regmap.h
new file mode 100644
index 00000000000..1e3193b8fcc
--- /dev/null
+++ b/include/trace/events/regmap.h
@@ -0,0 +1,136 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM regmap
+
+#if !defined(_TRACE_REGMAP_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_REGMAP_H
+
+#include <linux/device.h>
+#include <linux/ktime.h>
+#include <linux/tracepoint.h>
+
+struct regmap;
+
+/*
+ * Log register events
+ */
+DECLARE_EVENT_CLASS(regmap_reg,
+
+ TP_PROTO(struct device *dev, unsigned int reg,
+ unsigned int val),
+
+ TP_ARGS(dev, reg, val),
+
+ TP_STRUCT__entry(
+ __string( name, dev_name(dev) )
+ __field( unsigned int, reg )
+ __field( unsigned int, val )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, dev_name(dev));
+ __entry->reg = reg;
+ __entry->val = val;
+ ),
+
+ TP_printk("%s reg=%x val=%x", __get_str(name),
+ (unsigned int)__entry->reg,
+ (unsigned int)__entry->val)
+);
+
+DEFINE_EVENT(regmap_reg, regmap_reg_write,
+
+ TP_PROTO(struct device *dev, unsigned int reg,
+ unsigned int val),
+
+ TP_ARGS(dev, reg, val)
+
+);
+
+DEFINE_EVENT(regmap_reg, regmap_reg_read,
+
+ TP_PROTO(struct device *dev, unsigned int reg,
+ unsigned int val),
+
+ TP_ARGS(dev, reg, val)
+
+);
+
+DECLARE_EVENT_CLASS(regmap_block,
+
+ TP_PROTO(struct device *dev, unsigned int reg, int count),
+
+ TP_ARGS(dev, reg, count),
+
+ TP_STRUCT__entry(
+ __string( name, dev_name(dev) )
+ __field( unsigned int, reg )
+ __field( int, count )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, dev_name(dev));
+ __entry->reg = reg;
+ __entry->count = count;
+ ),
+
+ TP_printk("%s reg=%x count=%d", __get_str(name),
+ (unsigned int)__entry->reg,
+ (int)__entry->count)
+);
+
+DEFINE_EVENT(regmap_block, regmap_hw_read_start,
+
+ TP_PROTO(struct device *dev, unsigned int reg, int count),
+
+ TP_ARGS(dev, reg, count)
+);
+
+DEFINE_EVENT(regmap_block, regmap_hw_read_done,
+
+ TP_PROTO(struct device *dev, unsigned int reg, int count),
+
+ TP_ARGS(dev, reg, count)
+);
+
+DEFINE_EVENT(regmap_block, regmap_hw_write_start,
+
+ TP_PROTO(struct device *dev, unsigned int reg, int count),
+
+ TP_ARGS(dev, reg, count)
+);
+
+DEFINE_EVENT(regmap_block, regmap_hw_write_done,
+
+ TP_PROTO(struct device *dev, unsigned int reg, int count),
+
+ TP_ARGS(dev, reg, count)
+);
+
+TRACE_EVENT(regcache_sync,
+
+ TP_PROTO(struct device *dev, const char *type,
+ const char *status),
+
+ TP_ARGS(dev, type, status),
+
+ TP_STRUCT__entry(
+ __string( name, dev_name(dev) )
+ __string( status, status )
+ __string( type, type )
+ __field( int, type )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, dev_name(dev));
+ __assign_str(status, status);
+ __assign_str(type, type);
+ ),
+
+ TP_printk("%s type=%s status=%s", __get_str(name),
+ __get_str(type), __get_str(status))
+);
+
+#endif /* _TRACE_REGMAP_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/rpm.h b/include/trace/events/rpm.h
new file mode 100644
index 00000000000..d62c558bf64
--- /dev/null
+++ b/include/trace/events/rpm.h
@@ -0,0 +1,99 @@
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rpm
+
+#if !defined(_TRACE_RUNTIME_POWER_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_RUNTIME_POWER_H
+
+#include <linux/ktime.h>
+#include <linux/tracepoint.h>
+#include <linux/device.h>
+
+/*
+ * The rpm_internal events are used for tracing some important
+ * runtime pm internal functions.
+ */
+DECLARE_EVENT_CLASS(rpm_internal,
+
+ TP_PROTO(struct device *dev, int flags),
+
+ TP_ARGS(dev, flags),
+
+ TP_STRUCT__entry(
+ __string( name, dev_name(dev) )
+ __field( int, flags )
+ __field( int , usage_count )
+ __field( int , disable_depth )
+ __field( int , runtime_auto )
+ __field( int , request_pending )
+ __field( int , irq_safe )
+ __field( int , child_count )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, dev_name(dev));
+ __entry->flags = flags;
+ __entry->usage_count = atomic_read(
+ &dev->power.usage_count);
+ __entry->disable_depth = dev->power.disable_depth;
+ __entry->runtime_auto = dev->power.runtime_auto;
+ __entry->request_pending = dev->power.request_pending;
+ __entry->irq_safe = dev->power.irq_safe;
+ __entry->child_count = atomic_read(
+ &dev->power.child_count);
+ ),
+
+ TP_printk("%s flags-%x cnt-%-2d dep-%-2d auto-%-1d p-%-1d"
+ " irq-%-1d child-%d",
+ __get_str(name), __entry->flags,
+ __entry->usage_count,
+ __entry->disable_depth,
+ __entry->runtime_auto,
+ __entry->request_pending,
+ __entry->irq_safe,
+ __entry->child_count
+ )
+);
+DEFINE_EVENT(rpm_internal, rpm_suspend,
+
+ TP_PROTO(struct device *dev, int flags),
+
+ TP_ARGS(dev, flags)
+);
+DEFINE_EVENT(rpm_internal, rpm_resume,
+
+ TP_PROTO(struct device *dev, int flags),
+
+ TP_ARGS(dev, flags)
+);
+DEFINE_EVENT(rpm_internal, rpm_idle,
+
+ TP_PROTO(struct device *dev, int flags),
+
+ TP_ARGS(dev, flags)
+);
+
+TRACE_EVENT(rpm_return_int,
+ TP_PROTO(struct device *dev, unsigned long ip, int ret),
+ TP_ARGS(dev, ip, ret),
+
+ TP_STRUCT__entry(
+ __string( name, dev_name(dev))
+ __field( unsigned long, ip )
+ __field( int, ret )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, dev_name(dev));
+ __entry->ip = ip;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("%pS:%s ret=%d", (void *)__entry->ip, __get_str(name),
+ __entry->ret)
+);
+
+#endif /* _TRACE_RUNTIME_POWER_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index f6334782a59..959ff18b63b 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -100,7 +100,7 @@ static inline long __trace_sched_switch_state(struct task_struct *p)
* For all intents and purposes a preempted task is a running task.
*/
if (task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)
- state = TASK_RUNNING;
+ state = TASK_RUNNING | TASK_STATE_MAX;
#endif
return state;
@@ -137,13 +137,14 @@ TRACE_EVENT(sched_switch,
__entry->next_prio = next->prio;
),
- TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s ==> next_comm=%s next_pid=%d next_prio=%d",
+ TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s%s ==> next_comm=%s next_pid=%d next_prio=%d",
__entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
- __entry->prev_state ?
- __print_flags(__entry->prev_state, "|",
+ __entry->prev_state & (TASK_STATE_MAX-1) ?
+ __print_flags(__entry->prev_state & (TASK_STATE_MAX-1), "|",
{ 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
{ 16, "Z" }, { 32, "X" }, { 64, "x" },
{ 128, "W" }) : "R",
+ __entry->prev_state & TASK_STATE_MAX ? "+" : "",
__entry->next_comm, __entry->next_pid, __entry->next_prio)
);
diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h
index 36851f7f13d..edc4b3d25a2 100644
--- a/include/trace/events/vmscan.h
+++ b/include/trace/events/vmscan.h
@@ -266,7 +266,7 @@ DECLARE_EVENT_CLASS(mm_vmscan_lru_isolate_template,
unsigned long nr_lumpy_taken,
unsigned long nr_lumpy_dirty,
unsigned long nr_lumpy_failed,
- int isolate_mode),
+ isolate_mode_t isolate_mode),
TP_ARGS(order, nr_requested, nr_scanned, nr_taken, nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed, isolate_mode),
@@ -278,7 +278,7 @@ DECLARE_EVENT_CLASS(mm_vmscan_lru_isolate_template,
__field(unsigned long, nr_lumpy_taken)
__field(unsigned long, nr_lumpy_dirty)
__field(unsigned long, nr_lumpy_failed)
- __field(int, isolate_mode)
+ __field(isolate_mode_t, isolate_mode)
),
TP_fast_assign(
@@ -312,7 +312,7 @@ DEFINE_EVENT(mm_vmscan_lru_isolate_template, mm_vmscan_lru_isolate,
unsigned long nr_lumpy_taken,
unsigned long nr_lumpy_dirty,
unsigned long nr_lumpy_failed,
- int isolate_mode),
+ isolate_mode_t isolate_mode),
TP_ARGS(order, nr_requested, nr_scanned, nr_taken, nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed, isolate_mode)
@@ -327,7 +327,7 @@ DEFINE_EVENT(mm_vmscan_lru_isolate_template, mm_vmscan_memcg_isolate,
unsigned long nr_lumpy_taken,
unsigned long nr_lumpy_dirty,
unsigned long nr_lumpy_failed,
- int isolate_mode),
+ isolate_mode_t isolate_mode),
TP_ARGS(order, nr_requested, nr_scanned, nr_taken, nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed, isolate_mode)
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index 6bca4cc0063..b99caa8b780 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -34,6 +34,7 @@ DECLARE_EVENT_CLASS(writeback_work_class,
__field(int, for_kupdate)
__field(int, range_cyclic)
__field(int, for_background)
+ __field(int, reason)
),
TP_fast_assign(
strncpy(__entry->name, dev_name(bdi->dev), 32);
@@ -43,16 +44,18 @@ DECLARE_EVENT_CLASS(writeback_work_class,
__entry->for_kupdate = work->for_kupdate;
__entry->range_cyclic = work->range_cyclic;
__entry->for_background = work->for_background;
+ __entry->reason = work->reason;
),
TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d "
- "kupdate=%d range_cyclic=%d background=%d",
+ "kupdate=%d range_cyclic=%d background=%d reason=%s",
__entry->name,
MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev),
__entry->nr_pages,
__entry->sync_mode,
__entry->for_kupdate,
__entry->range_cyclic,
- __entry->for_background
+ __entry->for_background,
+ wb_reason_name[__entry->reason]
)
);
#define DEFINE_WRITEBACK_WORK_EVENT(name) \
@@ -104,30 +107,6 @@ DEFINE_WRITEBACK_EVENT(writeback_bdi_register);
DEFINE_WRITEBACK_EVENT(writeback_bdi_unregister);
DEFINE_WRITEBACK_EVENT(writeback_thread_start);
DEFINE_WRITEBACK_EVENT(writeback_thread_stop);
-DEFINE_WRITEBACK_EVENT(balance_dirty_start);
-DEFINE_WRITEBACK_EVENT(balance_dirty_wait);
-
-TRACE_EVENT(balance_dirty_written,
-
- TP_PROTO(struct backing_dev_info *bdi, int written),
-
- TP_ARGS(bdi, written),
-
- TP_STRUCT__entry(
- __array(char, name, 32)
- __field(int, written)
- ),
-
- TP_fast_assign(
- strncpy(__entry->name, dev_name(bdi->dev), 32);
- __entry->written = written;
- ),
-
- TP_printk("bdi %s written %d",
- __entry->name,
- __entry->written
- )
-);
DECLARE_EVENT_CLASS(wbc_class,
TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi),
@@ -181,27 +160,31 @@ DEFINE_WBC_EVENT(wbc_writepage);
TRACE_EVENT(writeback_queue_io,
TP_PROTO(struct bdi_writeback *wb,
- unsigned long *older_than_this,
+ struct wb_writeback_work *work,
int moved),
- TP_ARGS(wb, older_than_this, moved),
+ TP_ARGS(wb, work, moved),
TP_STRUCT__entry(
__array(char, name, 32)
__field(unsigned long, older)
__field(long, age)
__field(int, moved)
+ __field(int, reason)
),
TP_fast_assign(
+ unsigned long *older_than_this = work->older_than_this;
strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
__entry->older = older_than_this ? *older_than_this : 0;
__entry->age = older_than_this ?
(jiffies - *older_than_this) * 1000 / HZ : -1;
__entry->moved = moved;
+ __entry->reason = work->reason;
),
- TP_printk("bdi %s: older=%lu age=%ld enqueue=%d",
+ TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s",
__entry->name,
__entry->older, /* older_than_this in jiffies */
__entry->age, /* older_than_this in relative milliseconds */
- __entry->moved)
+ __entry->moved,
+ wb_reason_name[__entry->reason])
);
TRACE_EVENT(global_dirty_state,
@@ -250,6 +233,124 @@ TRACE_EVENT(global_dirty_state,
)
);
+#define KBps(x) ((x) << (PAGE_SHIFT - 10))
+
+TRACE_EVENT(bdi_dirty_ratelimit,
+
+ TP_PROTO(struct backing_dev_info *bdi,
+ unsigned long dirty_rate,
+ unsigned long task_ratelimit),
+
+ TP_ARGS(bdi, dirty_rate, task_ratelimit),
+
+ TP_STRUCT__entry(
+ __array(char, bdi, 32)
+ __field(unsigned long, write_bw)
+ __field(unsigned long, avg_write_bw)
+ __field(unsigned long, dirty_rate)
+ __field(unsigned long, dirty_ratelimit)
+ __field(unsigned long, task_ratelimit)
+ __field(unsigned long, balanced_dirty_ratelimit)
+ ),
+
+ TP_fast_assign(
+ strlcpy(__entry->bdi, dev_name(bdi->dev), 32);
+ __entry->write_bw = KBps(bdi->write_bandwidth);
+ __entry->avg_write_bw = KBps(bdi->avg_write_bandwidth);
+ __entry->dirty_rate = KBps(dirty_rate);
+ __entry->dirty_ratelimit = KBps(bdi->dirty_ratelimit);
+ __entry->task_ratelimit = KBps(task_ratelimit);
+ __entry->balanced_dirty_ratelimit =
+ KBps(bdi->balanced_dirty_ratelimit);
+ ),
+
+ TP_printk("bdi %s: "
+ "write_bw=%lu awrite_bw=%lu dirty_rate=%lu "
+ "dirty_ratelimit=%lu task_ratelimit=%lu "
+ "balanced_dirty_ratelimit=%lu",
+ __entry->bdi,
+ __entry->write_bw, /* write bandwidth */
+ __entry->avg_write_bw, /* avg write bandwidth */
+ __entry->dirty_rate, /* bdi dirty rate */
+ __entry->dirty_ratelimit, /* base ratelimit */
+ __entry->task_ratelimit, /* ratelimit with position control */
+ __entry->balanced_dirty_ratelimit /* the balanced ratelimit */
+ )
+);
+
+TRACE_EVENT(balance_dirty_pages,
+
+ TP_PROTO(struct backing_dev_info *bdi,
+ unsigned long thresh,
+ unsigned long bg_thresh,
+ unsigned long dirty,
+ unsigned long bdi_thresh,
+ unsigned long bdi_dirty,
+ unsigned long dirty_ratelimit,
+ unsigned long task_ratelimit,
+ unsigned long dirtied,
+ long pause,
+ unsigned long start_time),
+
+ TP_ARGS(bdi, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty,
+ dirty_ratelimit, task_ratelimit,
+ dirtied, pause, start_time),
+
+ TP_STRUCT__entry(
+ __array( char, bdi, 32)
+ __field(unsigned long, limit)
+ __field(unsigned long, setpoint)
+ __field(unsigned long, dirty)
+ __field(unsigned long, bdi_setpoint)
+ __field(unsigned long, bdi_dirty)
+ __field(unsigned long, dirty_ratelimit)
+ __field(unsigned long, task_ratelimit)
+ __field(unsigned int, dirtied)
+ __field(unsigned int, dirtied_pause)
+ __field(unsigned long, paused)
+ __field( long, pause)
+ ),
+
+ TP_fast_assign(
+ unsigned long freerun = (thresh + bg_thresh) / 2;
+ strlcpy(__entry->bdi, dev_name(bdi->dev), 32);
+
+ __entry->limit = global_dirty_limit;
+ __entry->setpoint = (global_dirty_limit + freerun) / 2;
+ __entry->dirty = dirty;
+ __entry->bdi_setpoint = __entry->setpoint *
+ bdi_thresh / (thresh + 1);
+ __entry->bdi_dirty = bdi_dirty;
+ __entry->dirty_ratelimit = KBps(dirty_ratelimit);
+ __entry->task_ratelimit = KBps(task_ratelimit);
+ __entry->dirtied = dirtied;
+ __entry->dirtied_pause = current->nr_dirtied_pause;
+ __entry->pause = pause * 1000 / HZ;
+ __entry->paused = (jiffies - start_time) * 1000 / HZ;
+ ),
+
+
+ TP_printk("bdi %s: "
+ "limit=%lu setpoint=%lu dirty=%lu "
+ "bdi_setpoint=%lu bdi_dirty=%lu "
+ "dirty_ratelimit=%lu task_ratelimit=%lu "
+ "dirtied=%u dirtied_pause=%u "
+ "paused=%lu pause=%ld",
+ __entry->bdi,
+ __entry->limit,
+ __entry->setpoint,
+ __entry->dirty,
+ __entry->bdi_setpoint,
+ __entry->bdi_dirty,
+ __entry->dirty_ratelimit,
+ __entry->task_ratelimit,
+ __entry->dirtied,
+ __entry->dirtied_pause,
+ __entry->paused, /* ms */
+ __entry->pause /* ms */
+ )
+);
+
DECLARE_EVENT_CLASS(writeback_congest_waited_template,
TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
@@ -298,7 +399,7 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template,
__array(char, name, 32)
__field(unsigned long, ino)
__field(unsigned long, state)
- __field(unsigned long, age)
+ __field(unsigned long, dirtied_when)
__field(unsigned long, writeback_index)
__field(long, nr_to_write)
__field(unsigned long, wrote)
@@ -309,19 +410,19 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template,
dev_name(inode->i_mapping->backing_dev_info->dev), 32);
__entry->ino = inode->i_ino;
__entry->state = inode->i_state;
- __entry->age = (jiffies - inode->dirtied_when) *
- 1000 / HZ;
+ __entry->dirtied_when = inode->dirtied_when;
__entry->writeback_index = inode->i_mapping->writeback_index;
__entry->nr_to_write = nr_to_write;
__entry->wrote = nr_to_write - wbc->nr_to_write;
),
- TP_printk("bdi %s: ino=%lu state=%s age=%lu "
+ TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu "
"index=%lu to_write=%ld wrote=%lu",
__entry->name,
__entry->ino,
show_inode_state(__entry->state),
- __entry->age,
+ __entry->dirtied_when,
+ (jiffies - __entry->dirtied_when) / HZ,
__entry->writeback_index,
__entry->nr_to_write,
__entry->wrote
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 533c49f4804..769724944fc 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -711,6 +711,9 @@ __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
#undef __perf_count
#define __perf_count(c) __count = (c)
+#undef TP_perf_assign
+#define TP_perf_assign(args...) args
+
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
static notrace void \