aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/fair.c5
-rw-r--r--kernel/sched/sched.h7
2 files changed, 11 insertions, 1 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 1fc9994c6f7..470b4d8de03 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5663,9 +5663,12 @@ static unsigned long scale_rt_capacity(int cpu)
*/
age_stamp = ACCESS_ONCE(rq->age_stamp);
avg = ACCESS_ONCE(rq->rt_avg);
+ delta = __rq_clock_broken(rq) - age_stamp;
- total = sched_avg_period() + (rq_clock(rq) - age_stamp);
+ if (unlikely(delta < 0))
+ delta = 0;
+ total = sched_avg_period() + delta;
used = div_u64(avg, total);
if (likely(used < SCHED_CAPACITY_SCALE))
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index e8889e934c5..68e1d7d98a2 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -548,13 +548,20 @@ DECLARE_PER_CPU(struct rq, runqueues);
#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
#define raw_rq() (&__raw_get_cpu_var(runqueues))
+static inline u64 __rq_clock_broken(struct rq *rq)
+{
+ return ACCESS_ONCE(rq->clock);
+}
+
static inline u64 rq_clock(struct rq *rq)
{
+ lockdep_assert_held(&rq->lock);
return rq->clock;
}
static inline u64 rq_clock_task(struct rq *rq)
{
+ lockdep_assert_held(&rq->lock);
return rq->clock_task;
}