aboutsummaryrefslogtreecommitdiff
path: root/kernel/rcupreempt.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/rcupreempt.c')
-rw-r--r--kernel/rcupreempt.c48
1 files changed, 44 insertions, 4 deletions
diff --git a/kernel/rcupreempt.c b/kernel/rcupreempt.c
index 5d59e850fb7..ce97a4df64d 100644
--- a/kernel/rcupreempt.c
+++ b/kernel/rcupreempt.c
@@ -147,7 +147,51 @@ struct rcu_ctrlblk {
wait_queue_head_t sched_wq; /* Place for rcu_sched to sleep. */
};
+struct rcu_dyntick_sched {
+ int dynticks;
+ int dynticks_snap;
+ int sched_qs;
+ int sched_qs_snap;
+ int sched_dynticks_snap;
+};
+
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_dyntick_sched, rcu_dyntick_sched) = {
+ .dynticks = 1,
+};
+
+void rcu_qsctr_inc(int cpu)
+{
+ struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
+
+ rdssp->sched_qs++;
+}
+
+#ifdef CONFIG_NO_HZ
+
+void rcu_enter_nohz(void)
+{
+ static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1);
+
+ smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
+ __get_cpu_var(rcu_dyntick_sched).dynticks++;
+ WARN_ON_RATELIMIT(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1, &rs);
+}
+
+void rcu_exit_nohz(void)
+{
+ static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1);
+
+ __get_cpu_var(rcu_dyntick_sched).dynticks++;
+ smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
+ WARN_ON_RATELIMIT(!(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1),
+ &rs);
+}
+
+#endif /* CONFIG_NO_HZ */
+
+
static DEFINE_PER_CPU(struct rcu_data, rcu_data);
+
static struct rcu_ctrlblk rcu_ctrlblk = {
.fliplock = __SPIN_LOCK_UNLOCKED(rcu_ctrlblk.fliplock),
.completed = 0,
@@ -427,10 +471,6 @@ static void __rcu_advance_callbacks(struct rcu_data *rdp)
}
}
-DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_dyntick_sched, rcu_dyntick_sched) = {
- .dynticks = 1,
-};
-
#ifdef CONFIG_NO_HZ
static DEFINE_PER_CPU(int, rcu_update_flag);