aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/trace_clock.h16
-rw-r--r--include/linux/ftrace_event.h20
-rw-r--r--include/linux/kernel.h7
-rw-r--r--include/linux/ring_buffer.h3
-rw-r--r--include/linux/trace_clock.h2
-rw-r--r--include/linux/uprobes.h10
-rw-r--r--include/trace/ftrace.h76
-rw-r--r--include/trace/syscall.h23
8 files changed, 44 insertions, 113 deletions
diff --git a/include/asm-generic/trace_clock.h b/include/asm-generic/trace_clock.h
new file mode 100644
index 00000000000..6726f1bafb5
--- /dev/null
+++ b/include/asm-generic/trace_clock.h
@@ -0,0 +1,16 @@
+#ifndef _ASM_GENERIC_TRACE_CLOCK_H
+#define _ASM_GENERIC_TRACE_CLOCK_H
+/*
+ * Arch-specific trace clocks.
+ */
+
+/*
+ * Additional trace clocks added to the trace_clocks
+ * array in kernel/trace/trace.c
+ * None if the architecture has not defined it.
+ */
+#ifndef ARCH_TRACE_CLOCKS
+# define ARCH_TRACE_CLOCKS
+#endif
+
+#endif /* _ASM_GENERIC_TRACE_CLOCK_H */
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 642928cf57b..a3d489531d8 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -86,6 +86,12 @@ struct trace_iterator {
cpumask_var_t started;
};
+enum trace_iter_flags {
+ TRACE_FILE_LAT_FMT = 1,
+ TRACE_FILE_ANNOTATE = 2,
+ TRACE_FILE_TIME_IN_NS = 4,
+};
+
struct trace_event;
@@ -127,13 +133,13 @@ trace_current_buffer_lock_reserve(struct ring_buffer **current_buffer,
void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc);
-void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer,
- struct ring_buffer_event *event,
- unsigned long flags, int pc);
-void trace_nowake_buffer_unlock_commit_regs(struct ring_buffer *buffer,
- struct ring_buffer_event *event,
- unsigned long flags, int pc,
- struct pt_regs *regs);
+void trace_buffer_unlock_commit(struct ring_buffer *buffer,
+ struct ring_buffer_event *event,
+ unsigned long flags, int pc);
+void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
+ struct ring_buffer_event *event,
+ unsigned long flags, int pc,
+ struct pt_regs *regs);
void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event);
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index dd9900cabf8..d97ed589744 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -527,9 +527,6 @@ __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap);
extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode);
#else
-static inline __printf(1, 2)
-int trace_printk(const char *fmt, ...);
-
static inline void tracing_start(void) { }
static inline void tracing_stop(void) { }
static inline void ftrace_off_permanent(void) { }
@@ -539,8 +536,8 @@ static inline void tracing_on(void) { }
static inline void tracing_off(void) { }
static inline int tracing_is_on(void) { return 0; }
-static inline int
-trace_printk(const char *fmt, ...)
+static inline __printf(1, 2)
+int trace_printk(const char *fmt, ...)
{
return 0;
}
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index 6c8835f74f7..519777e3fa0 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -159,13 +159,14 @@ int ring_buffer_record_is_on(struct ring_buffer *buffer);
void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu);
void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu);
-unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu);
+u64 ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu);
unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu);
unsigned long ring_buffer_entries(struct ring_buffer *buffer);
unsigned long ring_buffer_overruns(struct ring_buffer *buffer);
unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu);
unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu);
unsigned long ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu);
+unsigned long ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu);
u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu);
void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
diff --git a/include/linux/trace_clock.h b/include/linux/trace_clock.h
index 4eb490237d4..d563f37e1a1 100644
--- a/include/linux/trace_clock.h
+++ b/include/linux/trace_clock.h
@@ -12,6 +12,8 @@
#include <linux/compiler.h>
#include <linux/types.h>
+#include <asm/trace_clock.h>
+
extern u64 notrace trace_clock_local(void);
extern u64 notrace trace_clock(void);
extern u64 notrace trace_clock_global(void);
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
index 24594571c5a..4f628a6fc5b 100644
--- a/include/linux/uprobes.h
+++ b/include/linux/uprobes.h
@@ -97,12 +97,12 @@ extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_con
extern void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
extern int uprobe_mmap(struct vm_area_struct *vma);
extern void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end);
+extern void uprobe_start_dup_mmap(void);
+extern void uprobe_end_dup_mmap(void);
extern void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm);
extern void uprobe_free_utask(struct task_struct *t);
extern void uprobe_copy_process(struct task_struct *t);
extern unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs);
-extern void __weak arch_uprobe_enable_step(struct arch_uprobe *arch);
-extern void __weak arch_uprobe_disable_step(struct arch_uprobe *arch);
extern int uprobe_post_sstep_notifier(struct pt_regs *regs);
extern int uprobe_pre_sstep_notifier(struct pt_regs *regs);
extern void uprobe_notify_resume(struct pt_regs *regs);
@@ -129,6 +129,12 @@ static inline void
uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
}
+static inline void uprobe_start_dup_mmap(void)
+{
+}
+static inline void uprobe_end_dup_mmap(void)
+{
+}
static inline void
uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm)
{
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index a763888a36f..40dc5e8fe34 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -545,8 +545,7 @@ ftrace_raw_event_##call(void *__data, proto) \
{ assign; } \
\
if (!filter_current_check_discard(buffer, event_call, entry, event)) \
- trace_nowake_buffer_unlock_commit(buffer, \
- event, irq_flags, pc); \
+ trace_buffer_unlock_commit(buffer, event, irq_flags, pc); \
}
/*
* The ftrace_test_probe is compiled out, it is only here as a build time check
@@ -620,79 +619,6 @@ __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
-/*
- * Define the insertion callback to perf events
- *
- * The job is very similar to ftrace_raw_event_<call> except that we don't
- * insert in the ring buffer but in a perf counter.
- *
- * static void ftrace_perf_<call>(proto)
- * {
- * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
- * struct ftrace_event_call *event_call = &event_<call>;
- * extern void perf_tp_event(int, u64, u64, void *, int);
- * struct ftrace_raw_##call *entry;
- * struct perf_trace_buf *trace_buf;
- * u64 __addr = 0, __count = 1;
- * unsigned long irq_flags;
- * struct trace_entry *ent;
- * int __entry_size;
- * int __data_size;
- * int __cpu
- * int pc;
- *
- * pc = preempt_count();
- *
- * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
- *
- * // Below we want to get the aligned size by taking into account
- * // the u32 field that will later store the buffer size
- * __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),
- * sizeof(u64));
- * __entry_size -= sizeof(u32);
- *
- * // Protect the non nmi buffer
- * // This also protects the rcu read side
- * local_irq_save(irq_flags);
- * __cpu = smp_processor_id();
- *
- * if (in_nmi())
- * trace_buf = rcu_dereference_sched(perf_trace_buf_nmi);
- * else
- * trace_buf = rcu_dereference_sched(perf_trace_buf);
- *
- * if (!trace_buf)
- * goto end;
- *
- * trace_buf = per_cpu_ptr(trace_buf, __cpu);
- *
- * // Avoid recursion from perf that could mess up the buffer
- * if (trace_buf->recursion++)
- * goto end_recursion;
- *
- * raw_data = trace_buf->buf;
- *
- * // Make recursion update visible before entering perf_tp_event
- * // so that we protect from perf recursions.
- *
- * barrier();
- *
- * //zero dead bytes from alignment to avoid stack leak to userspace:
- * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
- * entry = (struct ftrace_raw_<call> *)raw_data;
- * ent = &entry->ent;
- * tracing_generic_entry_update(ent, irq_flags, pc);
- * ent->type = event_call->id;
- *
- * <tstruct> <- do some jobs with dynamic arrays
- *
- * <assign> <- affect our values
- *
- * perf_tp_event(event_call->id, __addr, __count, entry,
- * __entry_size); <- submit them to perf counter
- *
- * }
- */
#ifdef CONFIG_PERF_EVENTS
diff --git a/include/trace/syscall.h b/include/trace/syscall.h
index 31966a4fb8c..84bc4197e73 100644
--- a/include/trace/syscall.h
+++ b/include/trace/syscall.h
@@ -31,27 +31,4 @@ struct syscall_metadata {
struct ftrace_event_call *exit_event;
};
-#ifdef CONFIG_FTRACE_SYSCALLS
-extern unsigned long arch_syscall_addr(int nr);
-extern int init_syscall_trace(struct ftrace_event_call *call);
-
-extern int reg_event_syscall_enter(struct ftrace_event_call *call);
-extern void unreg_event_syscall_enter(struct ftrace_event_call *call);
-extern int reg_event_syscall_exit(struct ftrace_event_call *call);
-extern void unreg_event_syscall_exit(struct ftrace_event_call *call);
-extern int
-ftrace_format_syscall(struct ftrace_event_call *call, struct trace_seq *s);
-enum print_line_t print_syscall_enter(struct trace_iterator *iter, int flags,
- struct trace_event *event);
-enum print_line_t print_syscall_exit(struct trace_iterator *iter, int flags,
- struct trace_event *event);
-#endif
-
-#ifdef CONFIG_PERF_EVENTS
-int perf_sysenter_enable(struct ftrace_event_call *call);
-void perf_sysenter_disable(struct ftrace_event_call *call);
-int perf_sysexit_enable(struct ftrace_event_call *call);
-void perf_sysexit_disable(struct ftrace_event_call *call);
-#endif
-
#endif /* _TRACE_SYSCALL_H */