aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2012-10-26 18:50:54 +0100
committerAnders Roxell <anders.roxell@linaro.org>2015-06-24 21:13:08 +0200
commite1854cf6719f007ed942d49d4c85e735258fd571 (patch)
treef86223165c21f872f53bd94e93ae607d81fcf8bc
parent3d60147779800c1d5d7a1f56ad78d92998607fd3 (diff)
sched: Add support for lazy preemption
It has become an obsession to mitigate the determinism vs. throughput loss of RT. Looking at the mainline semantics of preemption points gives a hint why RT sucks throughput wise for ordinary SCHED_OTHER tasks. One major issue is the wakeup of tasks which are right away preempting the waking task while the waking task holds a lock on which the woken task will block right after having preempted the wakee. In mainline this is prevented due to the implicit preemption disable of spin/rw_lock held regions. On RT this is not possible due to the fully preemptible nature of sleeping spinlocks. Though for a SCHED_OTHER task preempting another SCHED_OTHER task this is really not a correctness issue. RT folks are concerned about SCHED_FIFO/RR tasks preemption and not about the purely fairness driven SCHED_OTHER preemption latencies. So I introduced a lazy preemption mechanism which only applies to SCHED_OTHER tasks preempting another SCHED_OTHER task. Aside of the existing preempt_count each tasks sports now a preempt_lazy_count which is manipulated on lock acquiry and release. This is slightly incorrect as for lazyness reasons I coupled this on migrate_disable/enable so some other mechanisms get the same treatment (e.g. get_cpu_light). Now on the scheduler side instead of setting NEED_RESCHED this sets NEED_RESCHED_LAZY in case of a SCHED_OTHER/SCHED_OTHER preemption and therefor allows to exit the waking task the lock held region before the woken task preempts. That also works better for cross CPU wakeups as the other side can stay in the adaptive spinning loop. For RT class preemption there is no change. This simply sets NEED_RESCHED and forgoes the lazy preemption counter. Initial test do not expose any observable latency increasement, but history shows that I've been proven wrong before :) The lazy preemption mode is per default on, but with CONFIG_SCHED_DEBUG enabled it can be disabled via: # echo NO_PREEMPT_LAZY >/sys/kernel/debug/sched_features and reenabled via # echo PREEMPT_LAZY >/sys/kernel/debug/sched_features The test results so far are very machine and workload dependent, but there is a clear trend that it enhances the non RT workload performance. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
-rw-r--r--arch/x86/include/asm/preempt.h18
-rw-r--r--include/linux/ftrace_event.h1
-rw-r--r--include/linux/preempt.h29
-rw-r--r--include/linux/sched.h37
-rw-r--r--include/linux/thread_info.h12
-rw-r--r--kernel/Kconfig.preempt6
-rw-r--r--kernel/sched/core.c50
-rw-r--r--kernel/sched/fair.c16
-rw-r--r--kernel/sched/features.h3
-rw-r--r--kernel/sched/sched.h9
-rw-r--r--kernel/trace/trace.c41
-rw-r--r--kernel/trace/trace.h2
-rw-r--r--kernel/trace/trace_output.c13
13 files changed, 206 insertions, 31 deletions
diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
index 400873450e33..060d1b4e475d 100644
--- a/arch/x86/include/asm/preempt.h
+++ b/arch/x86/include/asm/preempt.h
@@ -85,17 +85,33 @@ static __always_inline void __preempt_count_sub(int val)
* a decrement which hits zero means we have no preempt_count and should
* reschedule.
*/
-static __always_inline bool __preempt_count_dec_and_test(void)
+static __always_inline bool ____preempt_count_dec_and_test(void)
{
GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e");
}
+static __always_inline bool __preempt_count_dec_and_test(void)
+{
+ if (____preempt_count_dec_and_test())
+ return true;
+#ifdef CONFIG_PREEMPT_LAZY
+ return test_thread_flag(TIF_NEED_RESCHED_LAZY);
+#else
+ return false;
+#endif
+}
+
/*
* Returns true when we need to resched and can (barring IRQ state).
*/
static __always_inline bool should_resched(void)
{
+#ifdef CONFIG_PREEMPT_LAZY
+ return unlikely(!raw_cpu_read_4(__preempt_count) || \
+ test_thread_flag(TIF_NEED_RESCHED_LAZY));
+#else
return unlikely(!raw_cpu_read_4(__preempt_count));
+#endif
}
#ifdef CONFIG_PREEMPT
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index d4db37ee5f2d..004fac22def7 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -63,6 +63,7 @@ struct trace_entry {
int pid;
unsigned short migrate_disable;
unsigned short padding;
+ unsigned char preempt_lazy_count;
};
#define FTRACE_MAX_EVENT \
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index 933912ec8268..66587bf4563a 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -33,6 +33,20 @@ extern void preempt_count_sub(int val);
#define preempt_count_inc() preempt_count_add(1)
#define preempt_count_dec() preempt_count_sub(1)
+#ifdef CONFIG_PREEMPT_LAZY
+#define add_preempt_lazy_count(val) do { preempt_lazy_count() += (val); } while (0)
+#define sub_preempt_lazy_count(val) do { preempt_lazy_count() -= (val); } while (0)
+#define inc_preempt_lazy_count() add_preempt_lazy_count(1)
+#define dec_preempt_lazy_count() sub_preempt_lazy_count(1)
+#define preempt_lazy_count() (current_thread_info()->preempt_lazy_count)
+#else
+#define add_preempt_lazy_count(val) do { } while (0)
+#define sub_preempt_lazy_count(val) do { } while (0)
+#define inc_preempt_lazy_count() do { } while (0)
+#define dec_preempt_lazy_count() do { } while (0)
+#define preempt_lazy_count() (0)
+#endif
+
#ifdef CONFIG_PREEMPT_COUNT
#define preempt_disable() \
@@ -41,6 +55,12 @@ do { \
barrier(); \
} while (0)
+#define preempt_lazy_disable() \
+do { \
+ inc_preempt_lazy_count(); \
+ barrier(); \
+} while (0)
+
#define sched_preempt_enable_no_resched() \
do { \
barrier(); \
@@ -69,6 +89,13 @@ do { \
__preempt_schedule(); \
} while (0)
+#define preempt_lazy_enable() \
+do { \
+ dec_preempt_lazy_count(); \
+ barrier(); \
+ preempt_check_resched(); \
+} while (0)
+
#else
#define preempt_enable() \
do { \
@@ -147,7 +174,7 @@ do { \
} while (0)
#define preempt_fold_need_resched() \
do { \
- if (tif_need_resched()) \
+ if (tif_need_resched_now()) \
set_preempt_need_resched(); \
} while (0)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 46ae98a12142..05353a40a462 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2793,6 +2793,43 @@ static inline int test_tsk_need_resched(struct task_struct *tsk)
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}
+#ifdef CONFIG_PREEMPT_LAZY
+static inline void set_tsk_need_resched_lazy(struct task_struct *tsk)
+{
+ set_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY);
+}
+
+static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk)
+{
+ clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY);
+}
+
+static inline int test_tsk_need_resched_lazy(struct task_struct *tsk)
+{
+ return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY));
+}
+
+static inline int need_resched_lazy(void)
+{
+ return test_thread_flag(TIF_NEED_RESCHED_LAZY);
+}
+
+static inline int need_resched_now(void)
+{
+ return test_thread_flag(TIF_NEED_RESCHED);
+}
+
+#else
+static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk) { }
+static inline int need_resched_lazy(void) { return 0; }
+
+static inline int need_resched_now(void)
+{
+ return test_thread_flag(TIF_NEED_RESCHED);
+}
+
+#endif
+
static inline int restart_syscall(void)
{
set_tsk_thread_flag(current, TIF_SIGPENDING);
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index ff307b548ed3..be9f9dc6a4e1 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -102,7 +102,17 @@ static inline int test_ti_thread_flag(struct thread_info *ti, int flag)
#define test_thread_flag(flag) \
test_ti_thread_flag(current_thread_info(), flag)
-#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED)
+#ifdef CONFIG_PREEMPT_LAZY
+#define tif_need_resched() (test_thread_flag(TIF_NEED_RESCHED) || \
+ test_thread_flag(TIF_NEED_RESCHED_LAZY))
+#define tif_need_resched_now() (test_thread_flag(TIF_NEED_RESCHED))
+#define tif_need_resched_lazy() test_thread_flag(TIF_NEED_RESCHED_LAZY))
+
+#else
+#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED)
+#define tif_need_resched_now() test_thread_flag(TIF_NEED_RESCHED)
+#define tif_need_resched_lazy() 0
+#endif
#if defined TIF_RESTORE_SIGMASK && !defined HAVE_SET_RESTORE_SIGMASK
/*
diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
index f8a2982bdbde..11dbe26a8279 100644
--- a/kernel/Kconfig.preempt
+++ b/kernel/Kconfig.preempt
@@ -6,6 +6,12 @@ config PREEMPT_RT_BASE
bool
select PREEMPT
+config HAVE_PREEMPT_LAZY
+ bool
+
+config PREEMPT_LAZY
+ def_bool y if HAVE_PREEMPT_LAZY && PREEMPT_RT_FULL
+
choice
prompt "Preemption Model"
default PREEMPT_NONE
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index be8af39900f7..e7e22c1d3fbb 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -632,6 +632,38 @@ void resched_curr(struct rq *rq)
trace_sched_wake_idle_without_ipi(cpu);
}
+#ifdef CONFIG_PREEMPT_LAZY
+void resched_curr_lazy(struct rq *rq)
+{
+ struct task_struct *curr = rq->curr;
+ int cpu;
+
+ if (!sched_feat(PREEMPT_LAZY)) {
+ resched_curr(rq);
+ return;
+ }
+
+ lockdep_assert_held(&rq->lock);
+
+ if (test_tsk_need_resched(curr))
+ return;
+
+ if (test_tsk_need_resched_lazy(curr))
+ return;
+
+ set_tsk_need_resched_lazy(curr);
+
+ cpu = cpu_of(rq);
+ if (cpu == smp_processor_id())
+ return;
+
+ /* NEED_RESCHED_LAZY must be visible before we test polling */
+ smp_mb();
+ if (!tsk_is_polling(curr))
+ smp_send_reschedule(cpu);
+}
+#endif
+
void resched_cpu(int cpu)
{
struct rq *rq = cpu_rq(cpu);
@@ -2021,6 +2053,9 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
p->on_cpu = 0;
#endif
init_task_preempt_count(p);
+#ifdef CONFIG_HAVE_PREEMPT_LAZY
+ task_thread_info(p)->preempt_lazy_count = 0;
+#endif
#ifdef CONFIG_SMP
plist_node_init(&p->pushable_tasks, MAX_PRIO);
RB_CLEAR_NODE(&p->pushable_dl_tasks);
@@ -2792,6 +2827,7 @@ void migrate_disable(void)
}
preempt_disable();
+ preempt_lazy_disable();
pin_current_cpu();
p->migrate_disable = 1;
preempt_enable();
@@ -2846,6 +2882,7 @@ void migrate_enable(void)
unpin_current_cpu();
preempt_enable();
+ preempt_lazy_enable();
}
EXPORT_SYMBOL(migrate_enable);
#else
@@ -2974,6 +3011,7 @@ need_resched:
next = pick_next_task(rq, prev);
clear_tsk_need_resched(prev);
+ clear_tsk_need_resched_lazy(prev);
clear_preempt_need_resched();
rq->skip_clock_update = 0;
@@ -3083,6 +3121,14 @@ asmlinkage __visible void __sched notrace preempt_schedule(void)
if (likely(!preemptible()))
return;
+#ifdef CONFIG_PREEMPT_LAZY
+ /*
+ * Check for lazy preemption
+ */
+ if (current_thread_info()->preempt_lazy_count &&
+ !test_thread_flag(TIF_NEED_RESCHED))
+ return;
+#endif
do {
__preempt_count_add(PREEMPT_ACTIVE);
/*
@@ -4834,7 +4880,9 @@ void init_idle(struct task_struct *idle, int cpu)
/* Set the preempt count _outside_ the spinlocks! */
init_idle_preempt_count(idle, cpu);
-
+#ifdef CONFIG_HAVE_PREEMPT_LAZY
+ task_thread_info(idle)->preempt_lazy_count = 0;
+#endif
/*
* The idle tasks have their own, simple scheduling class:
*/
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index ef2b104b254c..b51aad9b0464 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2951,7 +2951,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
ideal_runtime = sched_slice(cfs_rq, curr);
delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
if (delta_exec > ideal_runtime) {
- resched_curr(rq_of(cfs_rq));
+ resched_curr_lazy(rq_of(cfs_rq));
/*
* The current task ran long enough, ensure it doesn't get
* re-elected due to buddy favours.
@@ -2975,7 +2975,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
return;
if (delta > ideal_runtime)
- resched_curr(rq_of(cfs_rq));
+ resched_curr_lazy(rq_of(cfs_rq));
}
static void
@@ -3115,7 +3115,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
* validating it and just reschedule.
*/
if (queued) {
- resched_curr(rq_of(cfs_rq));
+ resched_curr_lazy(rq_of(cfs_rq));
return;
}
/*
@@ -3306,7 +3306,7 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
* hierarchy can be throttled
*/
if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
- resched_curr(rq_of(cfs_rq));
+ resched_curr_lazy(rq_of(cfs_rq));
}
static __always_inline
@@ -3925,7 +3925,7 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
if (delta < 0) {
if (rq->curr == p)
- resched_curr(rq);
+ resched_curr_lazy(rq);
return;
}
hrtick_start(rq, delta);
@@ -4792,7 +4792,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
return;
preempt:
- resched_curr(rq);
+ resched_curr_lazy(rq);
/*
* Only set the backward buddy when the current task is still
* on the rq. This can happen when a wakeup gets interleaved
@@ -7576,7 +7576,7 @@ static void task_fork_fair(struct task_struct *p)
* 'current' within the tree based on its new key value.
*/
swap(curr->vruntime, se->vruntime);
- resched_curr(rq);
+ resched_curr_lazy(rq);
}
se->vruntime -= cfs_rq->min_vruntime;
@@ -7601,7 +7601,7 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
*/
if (rq->curr == p) {
if (p->prio > oldprio)
- resched_curr(rq);
+ resched_curr_lazy(rq);
} else
check_preempt_curr(rq, p, 0);
}
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
index 10552a509306..fd6d61f1ee17 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -52,6 +52,9 @@ SCHED_FEAT(NONTASK_CAPACITY, true)
#ifdef CONFIG_PREEMPT_RT_FULL
SCHED_FEAT(TTWU_QUEUE, false)
+# ifdef CONFIG_PREEMPT_LAZY
+SCHED_FEAT(PREEMPT_LAZY, true)
+# endif
#else
/*
* Queue remote wakeups on the target CPU and process them
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index bb191c584970..9e0deba1566e 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1211,6 +1211,15 @@ extern void init_sched_dl_class(void);
extern void resched_curr(struct rq *rq);
extern void resched_cpu(int cpu);
+#ifdef CONFIG_PREEMPT_LAZY
+extern void resched_curr_lazy(struct rq *rq);
+#else
+static inline void resched_curr_lazy(struct rq *rq)
+{
+ resched_curr(rq);
+}
+#endif
+
extern struct rt_bandwidth def_rt_bandwidth;
extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 9d1a3f265049..7e8e95698307 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1579,6 +1579,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
struct task_struct *tsk = current;
entry->preempt_count = pc & 0xff;
+ entry->preempt_lazy_count = preempt_lazy_count();
entry->pid = (tsk) ? tsk->pid : 0;
entry->flags =
#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
@@ -1588,7 +1589,8 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
#endif
((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
- (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
+ (tif_need_resched_now() ? TRACE_FLAG_NEED_RESCHED : 0) |
+ (need_resched_lazy() ? TRACE_FLAG_NEED_RESCHED_LAZY : 0) |
(test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
entry->migrate_disable = (tsk) ? __migrate_disabled(tsk) & 0xFF : 0;
@@ -2511,15 +2513,17 @@ get_total_entries(struct trace_buffer *buf,
static void print_lat_help_header(struct seq_file *m)
{
- seq_puts(m, "# _------=> CPU# \n");
- seq_puts(m, "# / _-----=> irqs-off \n");
- seq_puts(m, "# | / _----=> need-resched \n");
- seq_puts(m, "# || / _---=> hardirq/softirq \n");
- seq_puts(m, "# ||| / _--=> preempt-depth \n");
- seq_puts(m, "# |||| / _--=> migrate-disable\n");
- seq_puts(m, "# ||||| / delay \n");
- seq_puts(m, "# cmd pid |||||| time | caller \n");
- seq_puts(m, "# \\ / ||||| \\ | / \n");
+ seq_puts(m, "# _--------=> CPU# \n");
+ seq_puts(m, "# / _-------=> irqs-off \n");
+ seq_puts(m, "# | / _------=> need-resched \n");
+ seq_puts(m, "# || / _-----=> need-resched_lazy \n");
+ seq_puts(m, "# ||| / _----=> hardirq/softirq \n");
+ seq_puts(m, "# |||| / _---=> preempt-depth \n");
+ seq_puts(m, "# ||||| / _--=> preempt-lazy-depth\n");
+ seq_puts(m, "# |||||| / _-=> migrate-disable \n");
+ seq_puts(m, "# ||||||| / delay \n");
+ seq_puts(m, "# cmd pid |||||||| time | caller \n");
+ seq_puts(m, "# \\ / |||||||| \\ | / \n");
}
static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
@@ -2543,13 +2547,16 @@ static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
{
print_event_info(buf, m);
- seq_puts(m, "# _-----=> irqs-off\n");
- seq_puts(m, "# / _----=> need-resched\n");
- seq_puts(m, "# | / _---=> hardirq/softirq\n");
- seq_puts(m, "# || / _--=> preempt-depth\n");
- seq_puts(m, "# ||| / delay\n");
- seq_puts(m, "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n");
- seq_puts(m, "# | | | |||| | |\n");
+ seq_puts(m, "# _-------=> irqs-off \n");
+ seq_puts(m, "# / _------=> need-resched \n");
+ seq_puts(m, "# |/ _-----=> need-resched_lazy \n");
+ seq_puts(m, "# ||/ _----=> hardirq/softirq \n");
+ seq_puts(m, "# |||/ _---=> preempt-depth \n");
+ seq_puts(m, "# ||||/ _--=> preempt-lazy-depth\n");
+ seq_puts(m, "# ||||| / _-=> migrate-disable \n");
+ seq_puts(m, "# |||||| / delay\n");
+ seq_puts(m, "# TASK-PID CPU# |||||| TIMESTAMP FUNCTION\n");
+ seq_puts(m, "# | | | |||||| | |\n");
}
void
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 385391fb1d3b..33ec7b68c3a6 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -119,6 +119,7 @@ struct kretprobe_trace_entry_head {
* NEED_RESCHED - reschedule is requested
* HARDIRQ - inside an interrupt handler
* SOFTIRQ - inside a softirq handler
+ * NEED_RESCHED_LAZY - lazy reschedule is requested
*/
enum trace_flag_type {
TRACE_FLAG_IRQS_OFF = 0x01,
@@ -127,6 +128,7 @@ enum trace_flag_type {
TRACE_FLAG_HARDIRQ = 0x08,
TRACE_FLAG_SOFTIRQ = 0x10,
TRACE_FLAG_PREEMPT_RESCHED = 0x20,
+ TRACE_FLAG_NEED_RESCHED_LAZY = 0x40,
};
#define TRACE_BUF_SIZE 1024
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index 8a0b28f796b8..d72684443868 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -410,6 +410,7 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
{
char hardsoft_irq;
char need_resched;
+ char need_resched_lazy;
char irqs_off;
int hardirq;
int softirq;
@@ -438,6 +439,8 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
need_resched = '.';
break;
}
+ need_resched_lazy =
+ (entry->flags & TRACE_FLAG_NEED_RESCHED_LAZY) ? 'L' : '.';
hardsoft_irq =
(hardirq && softirq) ? 'H' :
@@ -445,8 +448,9 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
softirq ? 's' :
'.';
- if (!trace_seq_printf(s, "%c%c%c",
- irqs_off, need_resched, hardsoft_irq))
+ if (!trace_seq_printf(s, "%c%c%c%c",
+ irqs_off, need_resched, need_resched_lazy,
+ hardsoft_irq))
return 0;
if (entry->preempt_count)
@@ -454,6 +458,11 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
else
ret = trace_seq_putc(s, '.');
+ if (entry->preempt_lazy_count)
+ ret = trace_seq_printf(s, "%x", entry->preempt_lazy_count);
+ else
+ ret = trace_seq_putc(s, '.');
+
if (entry->migrate_disable)
ret = trace_seq_printf(s, "%x", entry->migrate_disable);
else