aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/linux/tick.h4
-rw-r--r--kernel/time/Kconfig1
-rw-r--r--kernel/time/tick-sched.c51
3 files changed, 56 insertions, 0 deletions
diff --git a/include/linux/tick.h b/include/linux/tick.h
index b4e3b0c9639..c2dcfb18f65 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -159,8 +159,12 @@ static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; }
#ifdef CONFIG_NO_HZ_FULL
extern int tick_nohz_full_cpu(int cpu);
+extern void tick_nohz_full_kick(void);
+extern void tick_nohz_full_kick_all(void);
#else
static inline int tick_nohz_full_cpu(int cpu) { return 0; }
+static inline void tick_nohz_full_kick(void) { }
+static inline void tick_nohz_full_kick_all(void) { }
#endif
diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig
index 358d601a4fe..fbb4c7eb92a 100644
--- a/kernel/time/Kconfig
+++ b/kernel/time/Kconfig
@@ -111,6 +111,7 @@ config NO_HZ_FULL
select RCU_USER_QS
select RCU_NOCB_CPU
select CONTEXT_TRACKING_FORCE
+ select IRQ_WORK
help
Adaptively try to shutdown the tick whenever possible, even when
the CPU is running tasks. Typically this requires running a single
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 369b5769fc9..2bcad5b904d 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -147,6 +147,57 @@ static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
static cpumask_var_t nohz_full_mask;
bool have_nohz_full_mask;
+/*
+ * Re-evaluate the need for the tick on the current CPU
+ * and restart it if necessary.
+ */
+static void tick_nohz_full_check(void)
+{
+ /*
+ * STUB for now, will be filled with the full tick stop/restart
+ * infrastructure patches
+ */
+}
+
+static void nohz_full_kick_work_func(struct irq_work *work)
+{
+ tick_nohz_full_check();
+}
+
+static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = {
+ .func = nohz_full_kick_work_func,
+};
+
+/*
+ * Kick the current CPU if it's full dynticks in order to force it to
+ * re-evaluate its dependency on the tick and restart it if necessary.
+ */
+void tick_nohz_full_kick(void)
+{
+ if (tick_nohz_full_cpu(smp_processor_id()))
+ irq_work_queue(&__get_cpu_var(nohz_full_kick_work));
+}
+
+static void nohz_full_kick_ipi(void *info)
+{
+ tick_nohz_full_check();
+}
+
+/*
+ * Kick all full dynticks CPUs in order to force these to re-evaluate
+ * their dependency on the tick and restart it if necessary.
+ */
+void tick_nohz_full_kick_all(void)
+{
+ if (!have_nohz_full_mask)
+ return;
+
+ preempt_disable();
+ smp_call_function_many(nohz_full_mask,
+ nohz_full_kick_ipi, NULL, false);
+ preempt_enable();
+}
+
int tick_nohz_full_cpu(int cpu)
{
if (!have_nohz_full_mask)