aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cpu.c2
-rw-r--r--kernel/irq_work.c5
-rw-r--r--kernel/sched/core.c29
-rw-r--r--kernel/time/tick-sched.c10
-rw-r--r--kernel/timer.c2
-rw-r--r--kernel/trace/trace.c2
6 files changed, 37 insertions, 13 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 285e18b2c420..a3890754e407 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -242,7 +242,7 @@ static int sync_unplug_thread(void *data)
* we don't want any more work on this CPU.
*/
current->flags &= ~PF_NO_SETAFFINITY;
- do_set_cpus_allowed(current, cpu_present_mask);
+ set_cpus_allowed_ptr(current, cpu_present_mask);
migrate_me();
return 0;
}
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index 35d21f93bbe8..5f7d93d89c7f 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -16,6 +16,7 @@
#include <linux/tick.h>
#include <linux/cpu.h>
#include <linux/notifier.h>
+#include <linux/interrupt.h>
#include <asm/processor.h>
@@ -51,11 +52,7 @@ static bool irq_work_claim(struct irq_work *work)
return true;
}
-#ifdef CONFIG_PREEMPT_RT_FULL
-void arch_irq_work_raise(void)
-#else
void __weak arch_irq_work_raise(void)
-#endif
{
/*
* Lame architectures will get the timer tick callback
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 883669d3e293..71c0bb1cdeb0 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -696,17 +696,34 @@ static inline bool got_nohz_idle_kick(void)
#endif /* CONFIG_NO_HZ_COMMON */
#ifdef CONFIG_NO_HZ_FULL
+
+static int ksoftirqd_running(void)
+{
+ struct task_struct *softirqd;
+
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT_FULL))
+ return 0;
+ softirqd = this_cpu_ksoftirqd();
+ if (softirqd && softirqd->on_rq)
+ return 1;
+ return 0;
+}
+
bool sched_can_stop_tick(void)
{
- struct rq *rq;
+ struct rq *rq;
- rq = this_rq();
+ rq = this_rq();
- /* Make sure rq->nr_running update is visible after the IPI */
- smp_rmb();
+ /* Make sure rq->nr_running update is visible after the IPI */
+ smp_rmb();
- /* More than one running task need preemption */
- if (rq->nr_running > 1)
+ /*
+ * More than one running task need preemption
+ *
+ * NOTE, RT: if ksoftirqd is awake, subtract it.
+ */
+ if (rq->nr_running - ksoftirqd_running() > 1)
return false;
return true;
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index ab32130964b6..57d5bb1b3919 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -180,6 +180,11 @@ static bool can_stop_full_tick(void)
return false;
}
+ if (!arch_irq_work_has_interrupt()) {
+ trace_tick_stop(0, "missing irq work interrupt\n");
+ return false;
+ }
+
/* sched_clock_tick() needs us? */
#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
/*
@@ -221,7 +226,12 @@ void __tick_nohz_full_check(void)
static void nohz_full_kick_work_func(struct irq_work *work)
{
+ unsigned long flags;
+
+ /* ksoftirqd processes sirqs with interrupts enabled */
+ local_irq_save(flags);
__tick_nohz_full_check();
+ local_irq_restore(flags);
}
static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = {
diff --git a/kernel/timer.c b/kernel/timer.c
index 34fd2dbba3e3..36b9f10bb3c7 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1450,7 +1450,7 @@ void update_process_times(int user_tick)
scheduler_tick();
run_local_timers();
rcu_check_callbacks(cpu, user_tick);
-#if defined(CONFIG_IRQ_WORK)
+#if defined(CONFIG_IRQ_WORK) && !defined(CONFIG_PREEMPT_RT_FULL)
if (in_irq())
irq_work_run();
#endif
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index b262e442d6c3..a106d356de90 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -468,7 +468,7 @@ int __trace_puts(unsigned long ip, const char *str, int size)
local_save_flags(irq_flags);
buffer = global_trace.trace_buffer.buffer;
- event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
+ event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
irq_flags, pc);
if (!event)
return 0;