aboutsummaryrefslogtreecommitdiff
path: root/kernel/time/timer.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/time/timer.c')
-rw-r--r--kernel/time/timer.c104
1 files changed, 87 insertions, 17 deletions
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index bbc5d1114583..fee8682c209e 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -80,6 +80,9 @@ struct tvec_root {
struct tvec_base {
spinlock_t lock;
struct timer_list *running_timer;
+#ifdef CONFIG_PREEMPT_RT_FULL
+ wait_queue_head_t wait_for_running_timer;
+#endif
unsigned long timer_jiffies;
unsigned long next_timer;
unsigned long active_timers;
@@ -777,6 +780,39 @@ static struct tvec_base *lock_timer_base(struct timer_list *timer,
cpu_relax();
}
}
+#ifdef CONFIG_PREEMPT_RT_FULL
+static inline struct tvec_base *switch_timer_base(struct timer_list *timer,
+ struct tvec_base *old,
+ struct tvec_base *new)
+{
+ /*
+ * We cannot do the below because we might be preempted and
+ * then the preempter would see NULL and loop forever.
+ */
+ if (spin_trylock(&new->lock)) {
+ WRITE_ONCE(timer->flags,
+ (timer->flags & ~TIMER_BASEMASK) | new->cpu);
+ spin_unlock(&old->lock);
+ return new;
+ }
+ return old;
+}
+
+#else
+static inline struct tvec_base *switch_timer_base(struct timer_list *timer,
+ struct tvec_base *old,
+ struct tvec_base *new)
+{
+ /* See the comment in lock_timer_base() */
+ timer->flags |= TIMER_MIGRATING;
+
+ spin_unlock(&old->lock);
+ spin_lock(&new->lock);
+ WRITE_ONCE(timer->flags,
+ (timer->flags & ~TIMER_BASEMASK) | new->cpu);
+ return new;
+}
+#endif
static inline int
__mod_timer(struct timer_list *timer, unsigned long expires,
@@ -807,16 +843,8 @@ __mod_timer(struct timer_list *timer, unsigned long expires,
* handler yet has not finished. This also guarantees that
* the timer is serialized wrt itself.
*/
- if (likely(base->running_timer != timer)) {
- /* See the comment in lock_timer_base() */
- timer->flags |= TIMER_MIGRATING;
-
- spin_unlock(&base->lock);
- base = new_base;
- spin_lock(&base->lock);
- WRITE_ONCE(timer->flags,
- (timer->flags & ~TIMER_BASEMASK) | base->cpu);
- }
+ if (likely(base->running_timer != timer))
+ base = switch_timer_base(timer, base, new_base);
}
timer->expires = expires;
@@ -1006,6 +1034,33 @@ void add_timer_on(struct timer_list *timer, int cpu)
}
EXPORT_SYMBOL_GPL(add_timer_on);
+#ifdef CONFIG_PREEMPT_RT_FULL
+/*
+ * Wait for a running timer
+ */
+static void wait_for_running_timer(struct timer_list *timer)
+{
+ struct tvec_base *base;
+ u32 tf = timer->flags;
+
+ if (tf & TIMER_MIGRATING)
+ return;
+
+ base = per_cpu_ptr(&tvec_bases, tf & TIMER_CPUMASK);
+ wait_event(base->wait_for_running_timer,
+ base->running_timer != timer);
+}
+
+# define wakeup_timer_waiters(b) wake_up(&(b)->wait_for_running_timer)
+#else
+static inline void wait_for_running_timer(struct timer_list *timer)
+{
+ cpu_relax();
+}
+
+# define wakeup_timer_waiters(b) do { } while (0)
+#endif
+
/**
* del_timer - deactive a timer.
* @timer: the timer to be deactivated
@@ -1063,7 +1118,7 @@ int try_to_del_timer_sync(struct timer_list *timer)
}
EXPORT_SYMBOL(try_to_del_timer_sync);
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
/**
* del_timer_sync - deactivate a timer and wait for the handler to finish.
* @timer: the timer to be deactivated
@@ -1123,7 +1178,7 @@ int del_timer_sync(struct timer_list *timer)
int ret = try_to_del_timer_sync(timer);
if (ret >= 0)
return ret;
- cpu_relax();
+ wait_for_running_timer(timer);
}
}
EXPORT_SYMBOL(del_timer_sync);
@@ -1248,15 +1303,17 @@ static inline void __run_timers(struct tvec_base *base)
if (irqsafe) {
spin_unlock(&base->lock);
call_timer_fn(timer, fn, data);
+ base->running_timer = NULL;
spin_lock(&base->lock);
} else {
spin_unlock_irq(&base->lock);
call_timer_fn(timer, fn, data);
+ base->running_timer = NULL;
spin_lock_irq(&base->lock);
}
}
}
- base->running_timer = NULL;
+ wakeup_timer_waiters(base);
spin_unlock_irq(&base->lock);
}
@@ -1390,6 +1447,14 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
if (cpu_is_offline(smp_processor_id()))
return expires;
+#ifdef CONFIG_PREEMPT_RT_FULL
+ /*
+ * On PREEMPT_RT we cannot sleep here. As a result we can't take
+ * the base lock to check when the next timer is pending and so
+ * we assume the next jiffy.
+ */
+ return basem + TICK_NSEC;
+#endif
spin_lock(&base->lock);
if (base->active_timers) {
if (time_before_eq(base->next_timer, base->timer_jiffies))
@@ -1416,13 +1481,13 @@ void update_process_times(int user_tick)
/* Note: this timer irq context must be accounted for as well. */
account_process_tick(p, user_tick);
+ scheduler_tick();
run_local_timers();
rcu_check_callbacks(user_tick);
-#ifdef CONFIG_IRQ_WORK
+#if defined(CONFIG_IRQ_WORK)
if (in_irq())
irq_work_tick();
#endif
- scheduler_tick();
run_posix_cpu_timers(p);
}
@@ -1433,6 +1498,8 @@ static void run_timer_softirq(struct softirq_action *h)
{
struct tvec_base *base = this_cpu_ptr(&tvec_bases);
+ irq_work_tick_soft();
+
if (time_after_eq(jiffies, base->timer_jiffies))
__run_timers(base);
}
@@ -1589,7 +1656,7 @@ static void migrate_timers(int cpu)
BUG_ON(cpu_online(cpu));
old_base = per_cpu_ptr(&tvec_bases, cpu);
- new_base = get_cpu_ptr(&tvec_bases);
+ new_base = get_local_ptr(&tvec_bases);
/*
* The caller is globally serialized and nobody else
* takes two locks at once, deadlock is not possible.
@@ -1613,7 +1680,7 @@ static void migrate_timers(int cpu)
spin_unlock(&old_base->lock);
spin_unlock_irq(&new_base->lock);
- put_cpu_ptr(&tvec_bases);
+ put_local_ptr(&tvec_bases);
}
static int timer_cpu_notify(struct notifier_block *self,
@@ -1645,6 +1712,9 @@ static void __init init_timer_cpu(int cpu)
base->cpu = cpu;
spin_lock_init(&base->lock);
+#ifdef CONFIG_PREEMPT_RT_FULL
+ init_waitqueue_head(&base->wait_for_running_timer);
+#endif
base->timer_jiffies = jiffies;
base->next_timer = base->timer_jiffies;