diff options
author | Alex Shi <alex.shi@linaro.org> | 2018-03-20 02:25:07 +0000 |
---|---|---|
committer | Alex Shi <alex.shi@linaro.org> | 2018-03-20 02:25:07 +0000 |
commit | 031f431679b03ba7e791dd051fc13f51f806fcd7 (patch) | |
tree | 9018f8a4a6c04feaf0180cae915dfe804913ce34 /kernel/sched/rt.c | |
parent | f7901365aa48b20b25f8402e0fd1eea4ac6c55e7 (diff) | |
parent | 91db57e54eb9be1560454d6a370d67847b05735f (diff) |
Merge remote-tracking branch 'rt-stable/v4.4-rt' into linux-linaro-lsk-v4.4-rtlsk-v4.4-18.03-rt
Diffstat (limited to 'kernel/sched/rt.c')
-rw-r--r-- | kernel/sched/rt.c | 24 |
1 files changed, 15 insertions, 9 deletions
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 14f2d740edab..05837f42e994 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -1834,9 +1834,8 @@ static void push_rt_tasks(struct rq *rq) * the rt_loop_next will cause the iterator to perform another scan. * */ -static int rto_next_cpu(struct rq *rq) +static int rto_next_cpu(struct root_domain *rd) { - struct root_domain *rd = rq->rd; int next; int cpu; @@ -1912,19 +1911,24 @@ static void tell_cpu_to_push(struct rq *rq) * Otherwise it is finishing up and an ipi needs to be sent. */ if (rq->rd->rto_cpu < 0) - cpu = rto_next_cpu(rq); + cpu = rto_next_cpu(rq->rd); raw_spin_unlock(&rq->rd->rto_lock); rto_start_unlock(&rq->rd->rto_loop_start); - if (cpu >= 0) + if (cpu >= 0) { + /* Make sure the rd does not get freed while pushing */ + sched_get_rd(rq->rd); irq_work_queue_on(&rq->rd->rto_push_work, cpu); + } } /* Called from hardirq context */ void rto_push_irq_work_func(struct irq_work *work) { + struct root_domain *rd = + container_of(work, struct root_domain, rto_push_work); struct rq *rq; int cpu; @@ -1940,18 +1944,20 @@ void rto_push_irq_work_func(struct irq_work *work) raw_spin_unlock(&rq->lock); } - raw_spin_lock(&rq->rd->rto_lock); + raw_spin_lock(&rd->rto_lock); /* Pass the IPI to the next rt overloaded queue */ - cpu = rto_next_cpu(rq); + cpu = rto_next_cpu(rd); - raw_spin_unlock(&rq->rd->rto_lock); + raw_spin_unlock(&rd->rto_lock); - if (cpu < 0) + if (cpu < 0) { + sched_put_rd(rd); return; + } /* Try the next RT overloaded CPU */ - irq_work_queue_on(&rq->rd->rto_push_work, cpu); + irq_work_queue_on(&rd->rto_push_work, cpu); } #endif /* HAVE_RT_PUSH_IPI */ |