aboutsummaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c19
-rw-r--r--block/blk-ioc.c5
-rw-r--r--block/blk-iopoll.c3
-rw-r--r--block/blk-mq-cpu.c17
-rw-r--r--block/blk-mq.c42
-rw-r--r--block/blk-mq.h9
-rw-r--r--block/blk-softirq.c3
-rw-r--r--block/bounce.c4
8 files changed, 65 insertions, 37 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 03b5f8d77f37..4e7dded643a2 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -100,6 +100,9 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
INIT_LIST_HEAD(&rq->queuelist);
INIT_LIST_HEAD(&rq->timeout_list);
+#ifdef CONFIG_PREEMPT_RT_FULL
+ INIT_WORK(&rq->work, __blk_mq_complete_request_remote_work);
+#endif
rq->cpu = -1;
rq->q = q;
rq->__sector = (sector_t) -1;
@@ -194,7 +197,7 @@ EXPORT_SYMBOL(blk_delay_queue);
**/
void blk_start_queue(struct request_queue *q)
{
- WARN_ON(!irqs_disabled());
+ WARN_ON_NONRT(!irqs_disabled());
queue_flag_clear(QUEUE_FLAG_STOPPED, q);
__blk_run_queue(q);
@@ -661,7 +664,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
q->bypass_depth = 1;
__set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
- init_waitqueue_head(&q->mq_freeze_wq);
+ init_swait_head(&q->mq_freeze_wq);
if (blkcg_init_queue(q))
goto fail_bdi;
@@ -3077,7 +3080,7 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth,
blk_run_queue_async(q);
else
__blk_run_queue(q);
- spin_unlock(q->queue_lock);
+ spin_unlock_irq(q->queue_lock);
}
static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
@@ -3125,7 +3128,6 @@ EXPORT_SYMBOL(blk_check_plugged);
void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
{
struct request_queue *q;
- unsigned long flags;
struct request *rq;
LIST_HEAD(list);
unsigned int depth;
@@ -3145,11 +3147,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
q = NULL;
depth = 0;
- /*
- * Save and disable interrupts here, to avoid doing it for every
- * queue lock we have to take.
- */
- local_irq_save(flags);
while (!list_empty(&list)) {
rq = list_entry_rq(list.next);
list_del_init(&rq->queuelist);
@@ -3162,7 +3159,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
queue_unplugged(q, depth, from_schedule);
q = rq->q;
depth = 0;
- spin_lock(q->queue_lock);
+ spin_lock_irq(q->queue_lock);
}
/*
@@ -3189,8 +3186,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
*/
if (q)
queue_unplugged(q, depth, from_schedule);
-
- local_irq_restore(flags);
}
void blk_finish_plug(struct blk_plug *plug)
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index 1a27f45ec776..28f467e636cc 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -7,6 +7,7 @@
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/slab.h>
+#include <linux/delay.h>
#include "blk.h"
@@ -109,7 +110,7 @@ static void ioc_release_fn(struct work_struct *work)
spin_unlock(q->queue_lock);
} else {
spin_unlock_irqrestore(&ioc->lock, flags);
- cpu_relax();
+ cpu_chill();
spin_lock_irqsave_nested(&ioc->lock, flags, 1);
}
}
@@ -187,7 +188,7 @@ retry:
spin_unlock(icq->q->queue_lock);
} else {
spin_unlock_irqrestore(&ioc->lock, flags);
- cpu_relax();
+ cpu_chill();
goto retry;
}
}
diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
index 0736729d6494..3e21e31d0d7e 100644
--- a/block/blk-iopoll.c
+++ b/block/blk-iopoll.c
@@ -35,6 +35,7 @@ void blk_iopoll_sched(struct blk_iopoll *iop)
list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll));
__raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ);
local_irq_restore(flags);
+ preempt_check_resched_rt();
}
EXPORT_SYMBOL(blk_iopoll_sched);
@@ -132,6 +133,7 @@ static void blk_iopoll_softirq(struct softirq_action *h)
__raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ);
local_irq_enable();
+ preempt_check_resched_rt();
}
/**
@@ -201,6 +203,7 @@ static int blk_iopoll_cpu_notify(struct notifier_block *self,
this_cpu_ptr(&blk_cpu_iopoll));
__raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ);
local_irq_enable();
+ preempt_check_resched_rt();
}
return NOTIFY_OK;
diff --git a/block/blk-mq-cpu.c b/block/blk-mq-cpu.c
index bb3ed488f7b5..628c6c13c482 100644
--- a/block/blk-mq-cpu.c
+++ b/block/blk-mq-cpu.c
@@ -16,7 +16,7 @@
#include "blk-mq.h"
static LIST_HEAD(blk_mq_cpu_notify_list);
-static DEFINE_RAW_SPINLOCK(blk_mq_cpu_notify_lock);
+static DEFINE_SPINLOCK(blk_mq_cpu_notify_lock);
static int blk_mq_main_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
@@ -25,7 +25,10 @@ static int blk_mq_main_cpu_notify(struct notifier_block *self,
struct blk_mq_cpu_notifier *notify;
int ret = NOTIFY_OK;
- raw_spin_lock(&blk_mq_cpu_notify_lock);
+ if (action != CPU_POST_DEAD)
+ return NOTIFY_OK;
+
+ spin_lock(&blk_mq_cpu_notify_lock);
list_for_each_entry(notify, &blk_mq_cpu_notify_list, list) {
ret = notify->notify(notify->data, action, cpu);
@@ -33,7 +36,7 @@ static int blk_mq_main_cpu_notify(struct notifier_block *self,
break;
}
- raw_spin_unlock(&blk_mq_cpu_notify_lock);
+ spin_unlock(&blk_mq_cpu_notify_lock);
return ret;
}
@@ -41,16 +44,16 @@ void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier)
{
BUG_ON(!notifier->notify);
- raw_spin_lock(&blk_mq_cpu_notify_lock);
+ spin_lock(&blk_mq_cpu_notify_lock);
list_add_tail(&notifier->list, &blk_mq_cpu_notify_list);
- raw_spin_unlock(&blk_mq_cpu_notify_lock);
+ spin_unlock(&blk_mq_cpu_notify_lock);
}
void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier)
{
- raw_spin_lock(&blk_mq_cpu_notify_lock);
+ spin_lock(&blk_mq_cpu_notify_lock);
list_del(&notifier->list);
- raw_spin_unlock(&blk_mq_cpu_notify_lock);
+ spin_unlock(&blk_mq_cpu_notify_lock);
}
void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 2dc1fd6c5bdb..c473bd192a41 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -88,7 +88,7 @@ static int blk_mq_queue_enter(struct request_queue *q, gfp_t gfp)
if (!(gfp & __GFP_WAIT))
return -EBUSY;
- ret = wait_event_interruptible(q->mq_freeze_wq,
+ ret = swait_event_interruptible(q->mq_freeze_wq,
!q->mq_freeze_depth || blk_queue_dying(q));
if (blk_queue_dying(q))
return -ENODEV;
@@ -107,7 +107,7 @@ static void blk_mq_usage_counter_release(struct percpu_ref *ref)
struct request_queue *q =
container_of(ref, struct request_queue, mq_usage_counter);
- wake_up_all(&q->mq_freeze_wq);
+ swait_wake_all(&q->mq_freeze_wq);
}
void blk_mq_freeze_queue_start(struct request_queue *q)
@@ -127,7 +127,7 @@ EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start);
static void blk_mq_freeze_queue_wait(struct request_queue *q)
{
- wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter));
+ swait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter));
}
/*
@@ -151,7 +151,7 @@ void blk_mq_unfreeze_queue(struct request_queue *q)
spin_unlock_irq(q->queue_lock);
if (wake) {
percpu_ref_reinit(&q->mq_usage_counter);
- wake_up_all(&q->mq_freeze_wq);
+ swait_wake_all(&q->mq_freeze_wq);
}
}
EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
@@ -170,7 +170,7 @@ void blk_mq_wake_waiters(struct request_queue *q)
* dying, we need to ensure that processes currently waiting on
* the queue are notified as well.
*/
- wake_up_all(&q->mq_freeze_wq);
+ swait_wake_all(&q->mq_freeze_wq);
}
bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
@@ -217,6 +217,9 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
rq->resid_len = 0;
rq->sense = NULL;
+#ifdef CONFIG_PREEMPT_RT_FULL
+ INIT_WORK(&rq->work, __blk_mq_complete_request_remote_work);
+#endif
INIT_LIST_HEAD(&rq->timeout_list);
rq->timeout = 0;
@@ -346,6 +349,17 @@ void blk_mq_end_request(struct request *rq, int error)
}
EXPORT_SYMBOL(blk_mq_end_request);
+#ifdef CONFIG_PREEMPT_RT_FULL
+
+void __blk_mq_complete_request_remote_work(struct work_struct *work)
+{
+ struct request *rq = container_of(work, struct request, work);
+
+ rq->q->softirq_done_fn(rq);
+}
+
+#else
+
static void __blk_mq_complete_request_remote(void *data)
{
struct request *rq = data;
@@ -353,6 +367,8 @@ static void __blk_mq_complete_request_remote(void *data)
rq->q->softirq_done_fn(rq);
}
+#endif
+
static void blk_mq_ipi_complete_request(struct request *rq)
{
struct blk_mq_ctx *ctx = rq->mq_ctx;
@@ -364,19 +380,23 @@ static void blk_mq_ipi_complete_request(struct request *rq)
return;
}
- cpu = get_cpu();
+ cpu = get_cpu_light();
if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
shared = cpus_share_cache(cpu, ctx->cpu);
if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
+#ifdef CONFIG_PREEMPT_RT_FULL
+ schedule_work_on(ctx->cpu, &rq->work);
+#else
rq->csd.func = __blk_mq_complete_request_remote;
rq->csd.info = rq;
rq->csd.flags = 0;
smp_call_function_single_async(ctx->cpu, &rq->csd);
+#endif
} else {
rq->q->softirq_done_fn(rq);
}
- put_cpu();
+ put_cpu_light();
}
void __blk_mq_complete_request(struct request *rq)
@@ -905,14 +925,14 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
return;
if (!async) {
- int cpu = get_cpu();
+ int cpu = get_cpu_light();
if (cpumask_test_cpu(cpu, hctx->cpumask)) {
__blk_mq_run_hw_queue(hctx);
- put_cpu();
+ put_cpu_light();
return;
}
- put_cpu();
+ put_cpu_light();
}
kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
@@ -1589,7 +1609,7 @@ static int blk_mq_hctx_notify(void *data, unsigned long action,
{
struct blk_mq_hw_ctx *hctx = data;
- if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
+ if (action == CPU_POST_DEAD)
return blk_mq_hctx_cpu_offline(hctx, cpu);
/*
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 6a48c4c0d8a2..4b7cbf0e6e82 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -76,7 +76,10 @@ struct blk_align_bitmap {
static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
unsigned int cpu)
{
- return per_cpu_ptr(q->queue_ctx, cpu);
+ struct blk_mq_ctx *ctx;
+
+ ctx = per_cpu_ptr(q->queue_ctx, cpu);
+ return ctx;
}
/*
@@ -87,12 +90,12 @@ static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
*/
static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
{
- return __blk_mq_get_ctx(q, get_cpu());
+ return __blk_mq_get_ctx(q, get_cpu_light());
}
static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
{
- put_cpu();
+ put_cpu_light();
}
struct blk_mq_alloc_data {
diff --git a/block/blk-softirq.c b/block/blk-softirq.c
index 53b1737e978d..81c3c0a62edf 100644
--- a/block/blk-softirq.c
+++ b/block/blk-softirq.c
@@ -51,6 +51,7 @@ static void trigger_softirq(void *data)
raise_softirq_irqoff(BLOCK_SOFTIRQ);
local_irq_restore(flags);
+ preempt_check_resched_rt();
}
/*
@@ -93,6 +94,7 @@ static int blk_cpu_notify(struct notifier_block *self, unsigned long action,
this_cpu_ptr(&blk_cpu_done));
raise_softirq_irqoff(BLOCK_SOFTIRQ);
local_irq_enable();
+ preempt_check_resched_rt();
}
return NOTIFY_OK;
@@ -150,6 +152,7 @@ do_local:
goto do_local;
local_irq_restore(flags);
+ preempt_check_resched_rt();
}
/**
diff --git a/block/bounce.c b/block/bounce.c
index ed9dd8067120..39d123e0a989 100644
--- a/block/bounce.c
+++ b/block/bounce.c
@@ -54,11 +54,11 @@ static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom)
unsigned long flags;
unsigned char *vto;
- local_irq_save(flags);
+ local_irq_save_nort(flags);
vto = kmap_atomic(to->bv_page);
memcpy(vto + to->bv_offset, vfrom, to->bv_len);
kunmap_atomic(vto);
- local_irq_restore(flags);
+ local_irq_restore_nort(flags);
}
#else /* CONFIG_HIGHMEM */