aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinux Build Service Account <lnxbuild@localhost>2018-11-26 02:23:07 -0800
committerGerrit - the friendly Code Review server <code-review@localhost>2018-11-26 02:23:06 -0800
commitc92eaf63c10f29df83bf24f32f25ec2030ffccb8 (patch)
treede09e912ee9e074bbb080508e18723a6937e7c92
parenta81e0f2639542300f8754692bddccdd740bdf24a (diff)
parent7f7b479154ad805a96945cfb232a428288a9c6fb (diff)
Merge changes Ia9edf908,I22a138c0,Ia590e36d,Ie29a02ce,Ic3b7ed55 into kernel.lnx.4.9.r12-relLA.UM.7.6.2.r1-04300-89xx.0
* changes: mmc: cmdq: Add timeout in case of mmc_cmdq_halt_on_empty_queue() mmc: add more trace logs and debug information mmc: host: cmdq: Check if tag info extraced from CQTERRI is valid mmc: CMDQ Error handling context syncronization mmc: Fix error handling path when there is an active Discard (DCMD)
-rw-r--r--drivers/mmc/card/block.c153
-rw-r--r--drivers/mmc/card/queue.c8
-rw-r--r--drivers/mmc/core/core.c69
-rw-r--r--drivers/mmc/core/core.h3
-rw-r--r--drivers/mmc/core/debugfs.c6
-rw-r--r--drivers/mmc/core/host.c4
-rw-r--r--drivers/mmc/core/mmc.c2
-rw-r--r--drivers/mmc/host/cmdq_hci.c79
-rw-r--r--drivers/mmc/host/cmdq_hci.h4
-rw-r--r--drivers/mmc/host/sdhci.c12
-rw-r--r--include/linux/mmc/core.h7
-rw-r--r--include/linux/mmc/host.h4
12 files changed, 276 insertions, 75 deletions
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index a1488fe62e98..04ac554ccd90 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -1214,7 +1214,7 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
mmc_get_card(card);
if (mmc_card_cmdq(card)) {
- err = mmc_cmdq_halt_on_empty_queue(card->host);
+ err = mmc_cmdq_halt_on_empty_queue(card->host, 0);
if (err) {
pr_err("%s: halt failed while doing %s err (%d)\n",
mmc_hostname(card->host),
@@ -1867,11 +1867,6 @@ static struct mmc_cmdq_req *mmc_blk_cmdq_prep_discard_req(struct mmc_queue *mq,
struct mmc_cmdq_req *cmdq_req;
struct mmc_queue_req *active_mqrq;
- BUG_ON(req->tag > card->ext_csd.cmdq_depth);
- BUG_ON(test_and_set_bit(req->tag, &host->cmdq_ctx.active_reqs));
-
- set_bit(CMDQ_STATE_DCMD_ACTIVE, &ctx_info->curr_state);
-
active_mqrq = &mq->mqrq_cmdq[req->tag];
active_mqrq->req = req;
@@ -1879,6 +1874,17 @@ static struct mmc_cmdq_req *mmc_blk_cmdq_prep_discard_req(struct mmc_queue *mq,
cmdq_req->cmdq_req_flags |= QBR;
cmdq_req->mrq.cmd = &cmdq_req->cmd;
cmdq_req->tag = req->tag;
+
+ /*
+ * To avoid potential race condition with the error handler work,
+ * do the following:
+ * 1. set init_completion() only once
+ * 2. set the CMDQ_STATE_DCMD_ACTIVE only after it's tag is set
+ */
+ init_completion(&cmdq_req->mrq.completion);
+ WARN_ON(req->tag > card->ext_csd.cmdq_depth);
+ WARN_ON(test_and_set_bit(req->tag, &host->cmdq_ctx.active_reqs));
+ set_bit(CMDQ_STATE_DCMD_ACTIVE, &ctx_info->curr_state);
return cmdq_req;
}
@@ -1922,8 +1928,21 @@ static int mmc_blk_cmdq_issue_discard_rq(struct mmc_queue *mq,
}
err = mmc_cmdq_erase(cmdq_req, card, from, nr, arg);
clear_dcmd:
- mmc_host_clk_hold(card->host);
- blk_complete_request(req);
+ /*
+ * If some other request got an error while there is a DCMD request
+ * in the command queue, then err will be updated with -EAGAIN by the
+ * error handler, which indicates that caller must not call
+ * blk_complete_request() and let the request by handled by error
+ * hanlder. In all other cases, the caller only must call
+ * blk_complete_request().
+ */
+ if (err != -EAGAIN) {
+ mmc_host_clk_hold(card->host);
+ blk_complete_request(req);
+ } else {
+ pr_err("%s: err(%d) handled by cmdq-error handler\n",
+ __func__, err);
+ }
out:
return err ? 1 : 0;
}
@@ -2028,8 +2047,13 @@ static int mmc_blk_cmdq_issue_secdiscard_rq(struct mmc_queue *mq,
MMC_SECURE_TRIM2_ARG);
}
clear_dcmd:
- mmc_host_clk_hold(card->host);
- blk_complete_request(req);
+ if (err != -EAGAIN) {
+ mmc_host_clk_hold(card->host);
+ blk_complete_request(req);
+ } else {
+ pr_err("%s: err(%d) handled by cmdq-error handler\n",
+ __func__, err);
+ }
out:
return err ? 1 : 0;
}
@@ -3212,8 +3236,14 @@ static int mmc_blk_cmdq_issue_rw_rq(struct mmc_queue *mq, struct request *req)
struct mmc_cmdq_req *mc_rq;
u8 active_small_sector_read = 0;
int ret = 0;
+ unsigned long timeout_ms = 10000; /* 10 sec safe timeout */
+
+ mmc_cmdq_up_rwsem(host);
+ mmc_deferred_scaling(host, timeout_ms);
+ ret = mmc_cmdq_down_rwsem(host, req);
+ if (ret)
+ return ret;
- mmc_deferred_scaling(host);
mmc_cmdq_clk_scaling_start_busy(host, true);
BUG_ON((req->tag < 0) || (req->tag > card->ext_csd.cmdq_depth));
@@ -3246,9 +3276,18 @@ static int mmc_blk_cmdq_issue_rw_rq(struct mmc_queue *mq, struct request *req)
* empty faster and we will be able to scale up to Nominal frequency
* when needed.
*/
- if (!ret && (host->clk_scaling.state == MMC_LOAD_LOW))
- wait_event_interruptible(ctx->queue_empty_wq,
- (!ctx->active_reqs));
+
+ if (!ret && (host->clk_scaling.state == MMC_LOAD_LOW)) {
+
+ ret = wait_event_interruptible_timeout(ctx->queue_empty_wq,
+ (!ctx->active_reqs &&
+ !test_bit(CMDQ_STATE_ERR, &ctx->curr_state)),
+ msecs_to_jiffies(5000));
+ if (!ret)
+ pr_err("%s: queue_empty_wq timeout case? ret = (%d)\n",
+ __func__, ret);
+ ret = 0;
+ }
if (ret) {
/* clear pending request */
@@ -3339,20 +3378,17 @@ out:
}
/**
- * is_cmdq_dcmd_req - Checks if tag belongs to DCMD request.
+ * get_cmdq_req_by_tag - returns cmdq_rq based on tag.
* @q: request_queue pointer.
* @tag: tag number of request to check.
*
- * This function checks if the request with tag number "tag"
- * is a DCMD request or not based on cmdq_req_flags set.
- *
- * returns true if DCMD req, otherwise false.
*/
-static bool is_cmdq_dcmd_req(struct request_queue *q, int tag)
+static struct mmc_cmdq_req *get_cmdq_req_by_tag(struct request_queue *q,
+ int tag)
{
struct request *req;
struct mmc_queue_req *mq_rq;
- struct mmc_cmdq_req *cmdq_req;
+ struct mmc_cmdq_req *cmdq_req = NULL;
req = blk_queue_find_tag(q, tag);
if (WARN_ON(!req))
@@ -3361,9 +3397,8 @@ static bool is_cmdq_dcmd_req(struct request_queue *q, int tag)
if (WARN_ON(!mq_rq))
goto out;
cmdq_req = &(mq_rq->cmdq_req);
- return (cmdq_req->cmdq_req_flags & DCMD);
out:
- return -ENOENT;
+ return cmdq_req;
}
/**
@@ -3383,7 +3418,9 @@ static void mmc_blk_cmdq_reset_all(struct mmc_host *host, int err)
struct mmc_cmdq_context_info *ctx_info = &host->cmdq_ctx;
struct request_queue *q;
int itag = 0;
- int ret = 0;
+ struct mmc_cmdq_req *cmdq_req = NULL;
+ struct mmc_request *dcmd_mrq;
+ bool is_err_mrq_dcmd = false;
if (WARN_ON(!mrq))
return;
@@ -3399,18 +3436,31 @@ static void mmc_blk_cmdq_reset_all(struct mmc_host *host, int err)
mmc_blk_cmdq_reset(host, false);
+ if (mrq->cmdq_req->cmdq_req_flags & DCMD)
+ is_err_mrq_dcmd = true;
+
for_each_set_bit(itag, &ctx_info->active_reqs,
host->num_cq_slots) {
- ret = is_cmdq_dcmd_req(q, itag);
- if (WARN_ON(ret == -ENOENT))
+ cmdq_req = get_cmdq_req_by_tag(q, itag);
+ if (WARN_ON(!cmdq_req))
continue;
- if (!ret) {
+ if (!(cmdq_req->cmdq_req_flags & DCMD)) {
WARN_ON(!test_and_clear_bit(itag,
&ctx_info->data_active_reqs));
mmc_cmdq_post_req(host, itag, err);
} else {
- clear_bit(CMDQ_STATE_DCMD_ACTIVE,
- &ctx_info->curr_state);
+ dcmd_mrq = &cmdq_req->mrq;
+ WARN_ON(!test_and_clear_bit(CMDQ_STATE_DCMD_ACTIVE,
+ &ctx_info->curr_state));
+ pr_debug("%s: cmd(%u), req_op(%llu)\n", __func__,
+ dcmd_mrq->cmd->opcode, req_op(dcmd_mrq->req));
+ if (!is_err_mrq_dcmd && !dcmd_mrq->cmd->error &&
+ (req_op(dcmd_mrq->req) == REQ_OP_SECURE_ERASE ||
+ req_op(dcmd_mrq->req) == REQ_OP_DISCARD)) {
+ dcmd_mrq->cmd->error = -EAGAIN;
+ complete(&dcmd_mrq->completion);
+ }
+
}
WARN_ON(!test_and_clear_bit(itag,
&ctx_info->active_reqs));
@@ -3538,6 +3588,7 @@ static void mmc_blk_cmdq_err(struct mmc_queue *mq)
if (WARN_ON(!mrq))
return;
+ down_write(&ctx_info->err_rwsem);
q = mrq->req->q;
err = mmc_cmdq_halt(host, true);
if (err) {
@@ -3590,6 +3641,24 @@ reset:
host->err_mrq = NULL;
clear_bit(CMDQ_STATE_REQ_TIMED_OUT, &ctx_info->curr_state);
WARN_ON(!test_and_clear_bit(CMDQ_STATE_ERR, &ctx_info->curr_state));
+
+#ifdef CONFIG_MMC_CLKGATE
+ pr_err("%s: clk-rqs(%d), claim-cnt(%d), claimed(%d), claimer(%s)\n",
+ __func__, host->clk_requests, host->claim_cnt, host->claimed,
+ host->claimer->comm);
+#else
+ pr_err("%s: claim-cnt(%d), claimed(%d), claimer(%s)\n", __func__,
+ host->claim_cnt, host->claimed, host->claimer->comm);
+#endif
+ sched_show_task(mq->thread);
+ if (host->claimed && host->claimer)
+ sched_show_task(host->claimer);
+#ifdef CONFIG_MMC_CLKGATE
+ WARN_ON(host->clk_requests < 0);
+#endif
+ WARN_ON(host->claim_cnt < 0);
+
+ up_write(&ctx_info->err_rwsem);
wake_up(&ctx_info->wait);
}
@@ -3604,6 +3673,16 @@ void mmc_blk_cmdq_complete_rq(struct request *rq)
struct mmc_queue *mq = (struct mmc_queue *)rq->q->queuedata;
int err = 0;
bool is_dcmd = false;
+ bool err_rwsem = false;
+
+ if (down_read_trylock(&ctx_info->err_rwsem)) {
+ err_rwsem = true;
+ } else {
+ pr_err("%s: err_rwsem lock failed to acquire => err handler active\n",
+ __func__);
+ WARN_ON_ONCE(!test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state));
+ goto out;
+ }
if (mrq->cmd && mrq->cmd->error)
err = mrq->cmd->error;
@@ -3625,12 +3704,6 @@ void mmc_blk_cmdq_complete_rq(struct request *rq)
}
goto out;
}
- /*
- * In case of error CMDQ is expected to be either in halted
- * or disable state so cannot receive any completion of
- * other requests.
- */
- WARN_ON(test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state));
/* clear pending request */
BUG_ON(!test_and_clear_bit(cmdq_req->tag,
@@ -3664,9 +3737,10 @@ void mmc_blk_cmdq_complete_rq(struct request *rq)
out:
mmc_cmdq_clk_scaling_stop_busy(host, true, is_dcmd);
- if (!(err || cmdq_req->resp_err)) {
+ if (err_rwsem && !(err || cmdq_req->resp_err)) {
mmc_host_clk_release(host);
wake_up(&ctx_info->wait);
+ host->last_completed_rq_time = ktime_get();
mmc_put_card(host->card);
}
@@ -3676,6 +3750,8 @@ out:
if (blk_queue_stopped(mq->queue) && !ctx_info->active_reqs)
complete(&mq->cmdq_shutdown_complete);
+ if (err_rwsem)
+ up_read(&ctx_info->err_rwsem);
return;
}
@@ -4056,6 +4132,7 @@ static int mmc_blk_cmdq_issue_rq(struct mmc_queue *mq, struct request *req)
if (mmc_req_is_special(req) &&
(card->quirks & MMC_QUIRK_CMDQ_EMPTY_BEFORE_DCMD) &&
ctx->active_small_sector_read_reqs) {
+ mmc_cmdq_up_rwsem(host);
ret = wait_event_interruptible(ctx->queue_empty_wq,
!ctx->active_reqs);
if (ret) {
@@ -4064,6 +4141,10 @@ static int mmc_blk_cmdq_issue_rq(struct mmc_queue *mq, struct request *req)
__func__, ret);
BUG_ON(1);
}
+ ret = mmc_cmdq_down_rwsem(host, req);
+ if (ret)
+ return ret;
+
/* clear the counter now */
ctx->active_small_sector_read_reqs = 0;
/*
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 66165d979173..21e4fbcfb0c4 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -133,7 +133,14 @@ static int mmc_cmdq_thread(void *d)
if (kthread_should_stop())
break;
+ ret = mmc_cmdq_down_rwsem(host, mq->cmdq_req_peeked);
+ if (ret) {
+ mmc_cmdq_up_rwsem(host);
+ continue;
+ }
ret = mq->cmdq_issue_fn(mq, mq->cmdq_req_peeked);
+ mmc_cmdq_up_rwsem(host);
+
/*
* Don't requeue if issue_fn fails.
* Recovery will be come by completion softirq
@@ -645,6 +652,7 @@ int mmc_cmdq_init(struct mmc_queue *mq, struct mmc_card *card)
init_waitqueue_head(&card->host->cmdq_ctx.queue_empty_wq);
init_waitqueue_head(&card->host->cmdq_ctx.wait);
+ init_rwsem(&card->host->cmdq_ctx.err_rwsem);
mq->mqrq_cmdq = kzalloc(
sizeof(struct mmc_queue_req) * q_depth, GFP_KERNEL);
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 69465f8a58f3..eb9ff362184e 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -137,6 +137,34 @@ static bool mmc_is_data_request(struct mmc_request *mmc_request)
}
}
+void mmc_cmdq_up_rwsem(struct mmc_host *host)
+{
+ struct mmc_cmdq_context_info *ctx = &host->cmdq_ctx;
+
+ up_read(&ctx->err_rwsem);
+}
+EXPORT_SYMBOL(mmc_cmdq_up_rwsem);
+
+int mmc_cmdq_down_rwsem(struct mmc_host *host, struct request *rq)
+{
+ struct mmc_cmdq_context_info *ctx = &host->cmdq_ctx;
+
+ down_read(&ctx->err_rwsem);
+ /*
+ * This is to prevent a case where issue context has already
+ * called blk_queue_start_tag(), immediately after which error
+ * handler work has run and called blk_queue_invalidate_tags().
+ * In this case, the issue context should check for REQ_QUEUED
+ * before proceeding with that request. It should ideally call
+ * blk_queue_start_tag() again on the requeued request.
+ */
+ if (!(rq->cmd_flags & REQ_QUEUED))
+ return -EINVAL;
+ else
+ return 0;
+}
+EXPORT_SYMBOL(mmc_cmdq_down_rwsem);
+
static void mmc_clk_scaling_start_busy(struct mmc_host *host, bool lock_needed)
{
struct mmc_devfeq_clk_scaling *clk_scaling = &host->clk_scaling;
@@ -345,12 +373,23 @@ static bool mmc_is_valid_state_for_clk_scaling(struct mmc_host *host)
return R1_CURRENT_STATE(status) == R1_STATE_TRAN;
}
-int mmc_cmdq_halt_on_empty_queue(struct mmc_host *host)
+int mmc_cmdq_halt_on_empty_queue(struct mmc_host *host, unsigned long timeout)
{
int err = 0;
- err = wait_event_interruptible(host->cmdq_ctx.queue_empty_wq,
- (!host->cmdq_ctx.active_reqs));
+ if (!timeout) {
+ err = wait_event_interruptible(host->cmdq_ctx.queue_empty_wq,
+ (!host->cmdq_ctx.active_reqs));
+ } else {
+ err = wait_event_interruptible_timeout(
+ host->cmdq_ctx.queue_empty_wq,
+ (!host->cmdq_ctx.active_reqs),
+ msecs_to_jiffies(timeout));
+ if (!err)
+ pr_err("%s: halt_on_empty_queue timeout case: err(%d)\n",
+ __func__, err);
+ }
+
if (host->cmdq_ctx.active_reqs) {
pr_err("%s: %s: unexpected active requests (%lu)\n",
mmc_hostname(host), __func__,
@@ -371,7 +410,8 @@ out:
EXPORT_SYMBOL(mmc_cmdq_halt_on_empty_queue);
int mmc_clk_update_freq(struct mmc_host *host,
- unsigned long freq, enum mmc_load state)
+ unsigned long freq, enum mmc_load state,
+ unsigned long timeout)
{
int err = 0;
bool cmdq_mode;
@@ -413,7 +453,7 @@ int mmc_clk_update_freq(struct mmc_host *host,
}
if (cmdq_mode) {
- err = mmc_cmdq_halt_on_empty_queue(host);
+ err = mmc_cmdq_halt_on_empty_queue(host, timeout);
if (err) {
pr_err("%s: %s: failed halting queue (%d)\n",
mmc_hostname(host), __func__, err);
@@ -427,12 +467,16 @@ int mmc_clk_update_freq(struct mmc_host *host,
goto invalid_state;
}
+ MMC_TRACE(host, "clock scale state %d freq %lu\n",
+ state, freq);
err = host->bus_ops->change_bus_speed(host, &freq);
if (!err)
host->clk_scaling.curr_freq = freq;
else
pr_err("%s: %s: failed (%d) at freq=%lu\n",
mmc_hostname(host), __func__, err, freq);
+ MMC_TRACE(host, "clock scale state %d freq %lu done with err %d\n",
+ state, freq, err);
invalid_state:
if (cmdq_mode) {
@@ -542,7 +586,7 @@ static int mmc_devfreq_set_target(struct device *dev,
clk_scaling->need_freq_change = false;
mmc_host_clk_hold(host);
- err = mmc_clk_update_freq(host, *freq, clk_scaling->state);
+ err = mmc_clk_update_freq(host, *freq, clk_scaling->state, 0);
if (err && err != -EAGAIN) {
pr_err("%s: clock scale to %lu failed with error %d\n",
mmc_hostname(host), *freq, err);
@@ -568,7 +612,7 @@ out:
* This function does clock scaling in case "need_freq_change" flag was set
* by the clock scaling logic.
*/
-void mmc_deferred_scaling(struct mmc_host *host)
+void mmc_deferred_scaling(struct mmc_host *host, unsigned long timeout)
{
unsigned long target_freq;
int err;
@@ -598,7 +642,7 @@ void mmc_deferred_scaling(struct mmc_host *host)
target_freq, current->comm);
err = mmc_clk_update_freq(host, target_freq,
- host->clk_scaling.state);
+ host->clk_scaling.state, timeout);
if (err && err != -EAGAIN) {
pr_err("%s: failed on deferred scale clocks (%d)\n",
mmc_hostname(host), err);
@@ -1204,7 +1248,7 @@ static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
led_trigger_event(host->led, LED_FULL);
if (mmc_is_data_request(mrq)) {
- mmc_deferred_scaling(host);
+ mmc_deferred_scaling(host, 0);
mmc_clk_scaling_start_busy(host, true);
}
@@ -1844,14 +1888,15 @@ int mmc_cmdq_wait_for_dcmd(struct mmc_host *host,
struct mmc_command *cmd = mrq->cmd;
int err = 0;
- init_completion(&mrq->completion);
mrq->done = mmc_cmdq_dcmd_req_done;
err = mmc_cmdq_start_req(host, cmdq_req);
if (err)
return err;
+ mmc_cmdq_up_rwsem(host);
wait_for_completion_io(&mrq->completion);
- if (cmd->error) {
+ err = mmc_cmdq_down_rwsem(host, mrq->req);
+ if (err || cmd->error) {
pr_err("%s: DCMD %d failed with err %d\n",
mmc_hostname(host), cmd->opcode,
cmd->error);
@@ -3720,7 +3765,7 @@ static int mmc_cmdq_send_erase_cmd(struct mmc_cmdq_req *cmdq_req,
if (err) {
pr_err("mmc_erase: group start error %d, status %#x\n",
err, cmd->resp[0]);
- return -EIO;
+ return err;
}
return 0;
}
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index 2adf42c914cb..240dbd39c314 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -26,7 +26,8 @@ void mmc_init_erase(struct mmc_card *card);
void mmc_set_chip_select(struct mmc_host *host, int mode);
void mmc_set_clock(struct mmc_host *host, unsigned int hz);
int mmc_clk_update_freq(struct mmc_host *host,
- unsigned long freq, enum mmc_load state);
+ unsigned long freq, enum mmc_load state,
+ unsigned long timeout);
void mmc_gate_clock(struct mmc_host *host);
void mmc_ungate_clock(struct mmc_host *host);
void mmc_set_ungated(struct mmc_host *host);
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
index 15c3e9e30e97..c3183c18d596 100644
--- a/drivers/mmc/core/debugfs.c
+++ b/drivers/mmc/core/debugfs.c
@@ -275,7 +275,7 @@ static int mmc_scale_set(void *data, u64 val)
mmc_host_clk_hold(host);
/* change frequency from sysfs manually */
- err = mmc_clk_update_freq(host, val, host->clk_scaling.state);
+ err = mmc_clk_update_freq(host, val, host->clk_scaling.state, 0);
if (err == -EAGAIN)
err = 0;
else if (err)
@@ -547,7 +547,7 @@ static int mmc_dbg_card_status_get(void *data, u64 *val)
mmc_get_card(card);
if (mmc_card_cmdq(card)) {
- ret = mmc_cmdq_halt_on_empty_queue(card->host);
+ ret = mmc_cmdq_halt_on_empty_queue(card->host, 0);
if (ret) {
pr_err("%s: halt failed while doing %s err (%d)\n",
mmc_hostname(card->host), __func__,
@@ -589,7 +589,7 @@ static int mmc_ext_csd_open(struct inode *inode, struct file *filp)
mmc_get_card(card);
if (mmc_card_cmdq(card)) {
- err = mmc_cmdq_halt_on_empty_queue(card->host);
+ err = mmc_cmdq_halt_on_empty_queue(card->host, 0);
if (err) {
pr_err("%s: halt failed while doing %s err (%d)\n",
mmc_hostname(card->host), __func__,
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 45df01b50154..edb0a554ac31 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -155,6 +155,7 @@ static void mmc_host_clk_gate_delayed(struct mmc_host *host)
mmc_gate_clock(host);
spin_lock_irqsave(&host->clk_lock, flags);
pr_debug("%s: gated MCI clock\n", mmc_hostname(host));
+ MMC_TRACE(host, "clocks are gated\n");
}
spin_unlock_irqrestore(&host->clk_lock, flags);
mutex_unlock(&host->clk_gate_mutex);
@@ -193,6 +194,7 @@ void mmc_host_clk_hold(struct mmc_host *host)
spin_lock_irqsave(&host->clk_lock, flags);
pr_debug("%s: ungated MCI clock\n", mmc_hostname(host));
+ MMC_TRACE(host, "clocks are ungated\n");
}
host->clk_requests++;
spin_unlock_irqrestore(&host->clk_lock, flags);
@@ -765,7 +767,7 @@ static ssize_t store_enable(struct device *dev,
host->clk_scaling.state = MMC_LOAD_HIGH;
/* Set to max. frequency when disabling */
mmc_clk_update_freq(host, host->card->clk_scaling_highest,
- host->clk_scaling.state);
+ host->clk_scaling.state, 0);
} else if (value) {
/* Unmask host capability and resume scaling */
host->caps2 |= MMC_CAP2_CLK_SCALE;
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 99165f7ac961..ed6ca15bf5a9 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -3087,7 +3087,7 @@ static int mmc_pre_hibernate(struct mmc_host *host)
host->caps2 &= ~MMC_CAP2_CLK_SCALE;
host->clk_scaling.state = MMC_LOAD_HIGH;
ret = mmc_clk_update_freq(host, host->card->clk_scaling_highest,
- host->clk_scaling.state);
+ host->clk_scaling.state, 0);
if (ret)
pr_err("%s: %s: Setting clk frequency to max failed: %d\n",
mmc_hostname(host), __func__, ret);
diff --git a/drivers/mmc/host/cmdq_hci.c b/drivers/mmc/host/cmdq_hci.c
index 214871744305..87fc883e8a73 100644
--- a/drivers/mmc/host/cmdq_hci.c
+++ b/drivers/mmc/host/cmdq_hci.c
@@ -864,6 +864,33 @@ out:
return err;
}
+static int cmdq_get_first_valid_tag(struct cmdq_host *cq_host)
+{
+ u32 dbr_set = 0, tag = 0;
+
+ dbr_set = cmdq_readl(cq_host, CQTDBR);
+ if (!dbr_set) {
+ pr_err("%s: spurious/force error interrupt\n",
+ mmc_hostname(cq_host->mmc));
+ cmdq_halt_poll(cq_host->mmc, false);
+ mmc_host_clr_halt(cq_host->mmc);
+ return -EINVAL;
+ }
+
+ tag = ffs(dbr_set) - 1;
+ pr_err("%s: error tag selected: tag = %d\n",
+ mmc_hostname(cq_host->mmc), tag);
+ return tag;
+}
+
+static bool cmdq_is_valid_tag(struct mmc_host *mmc, unsigned int tag)
+{
+ struct mmc_cmdq_context_info *ctx_info = &mmc->cmdq_ctx;
+
+ return
+ (!!(ctx_info->data_active_reqs & (1 << tag)) || tag == DCMD_SLOT);
+}
+
static void cmdq_finish_data(struct mmc_host *mmc, unsigned int tag)
{
struct mmc_request *mrq;
@@ -897,7 +924,7 @@ static void cmdq_finish_data(struct mmc_host *mmc, unsigned int tag)
mrq->done(mrq);
}
-irqreturn_t cmdq_irq(struct mmc_host *mmc, int err)
+irqreturn_t cmdq_irq(struct mmc_host *mmc, int err, bool is_cmd_err)
{
u32 status;
unsigned long tag = 0, comp_status;
@@ -959,18 +986,10 @@ irqreturn_t cmdq_irq(struct mmc_host *mmc, int err)
* have caused such error, so check for any first
* bit set in doorbell and proceed with an error.
*/
- dbr_set = cmdq_readl(cq_host, CQTDBR);
- if (!dbr_set) {
- pr_err("%s: spurious/force error interrupt\n",
- mmc_hostname(mmc));
- cmdq_halt_poll(mmc, false);
- mmc_host_clr_halt(mmc);
- return IRQ_HANDLED;
- }
+ tag = cmdq_get_first_valid_tag(cq_host);
+ if (tag == -EINVAL)
+ goto hac;
- tag = ffs(dbr_set) - 1;
- pr_err("%s: error tag selected: tag = %lu\n",
- mmc_hostname(mmc), tag);
mrq = get_req_by_tag(cq_host, tag);
if (mrq->data)
mrq->data->error = err;
@@ -985,10 +1004,24 @@ irqreturn_t cmdq_irq(struct mmc_host *mmc, int err)
goto skip_cqterri;
}
- if (err_info & CQ_RMEFV) {
+ if (is_cmd_err && (err_info & CQ_RMEFV)) {
tag = GET_CMD_ERR_TAG(err_info);
pr_err("%s: CMD err tag: %lu\n", __func__, tag);
+ /*
+ * In some cases CQTERRI is not providing reliable tag
+ * info. If the tag is not valid, complete the request
+ * with any valid tag so that all tags will get
+ * requeued.
+ */
+ if (!cmdq_is_valid_tag(mmc, tag)) {
+ pr_err("%s: CMD err tag is invalid: %lu\n",
+ __func__, tag);
+ tag = cmdq_get_first_valid_tag(cq_host);
+ if (tag == -EINVAL)
+ goto hac;
+ }
+
mrq = get_req_by_tag(cq_host, tag);
/* CMD44/45/46/47 will not have a valid cmd */
if (mrq->cmd)
@@ -998,8 +1031,26 @@ irqreturn_t cmdq_irq(struct mmc_host *mmc, int err)
} else {
tag = GET_DAT_ERR_TAG(err_info);
pr_err("%s: Dat err tag: %lu\n", __func__, tag);
+
+ /*
+ * In some cases CQTERRI is not providing reliable tag
+ * info. If the tag is not valid, complete the request
+ * with any valid tag so that all tags will get
+ * requeued.
+ */
+ if (!cmdq_is_valid_tag(mmc, tag)) {
+ pr_err("%s: CMD err tag is invalid: %lu\n",
+ __func__, tag);
+ tag = cmdq_get_first_valid_tag(cq_host);
+ if (tag == -EINVAL)
+ goto hac;
+ }
mrq = get_req_by_tag(cq_host, tag);
- mrq->data->error = err;
+
+ if (mrq->data)
+ mrq->data->error = err;
+ else
+ mrq->cmd->error = err;
}
skip_cqterri:
diff --git a/drivers/mmc/host/cmdq_hci.h b/drivers/mmc/host/cmdq_hci.h
index 1aabce98d199..0349989e4126 100644
--- a/drivers/mmc/host/cmdq_hci.h
+++ b/drivers/mmc/host/cmdq_hci.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -244,7 +244,7 @@ static inline u32 cmdq_readl(struct cmdq_host *host, int reg)
return readl_relaxed(host->mmio + reg);
}
-extern irqreturn_t cmdq_irq(struct mmc_host *mmc, int err);
+extern irqreturn_t cmdq_irq(struct mmc_host *mmc, int err, bool is_cmd_err);
extern int cmdq_init(struct cmdq_host *cq_host, struct mmc_host *mmc,
bool dma64);
extern struct cmdq_host *cmdq_pltfm_init(struct platform_device *pdev);
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 36aecd2f8228..d9321d00dd68 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -149,6 +149,7 @@ static void sdhci_dumpregs(struct sdhci_host *host)
}
host->mmc->err_occurred = true;
+ host->mmc->last_failed_rq_time = ktime_get();
if (host->ops->dump_vendor_regs)
host->ops->dump_vendor_regs(host);
@@ -3300,13 +3301,18 @@ static irqreturn_t sdhci_cmdq_irq(struct sdhci_host *host, u32 intmask)
int err = 0;
u32 mask = 0;
irqreturn_t ret;
+ bool is_cmd_err = false;
- if (intmask & SDHCI_INT_CMD_MASK)
+ if (intmask & SDHCI_INT_CMD_MASK) {
err = sdhci_get_cmd_err(host, intmask);
- else if (intmask & SDHCI_INT_DATA_MASK)
+ is_cmd_err = true;
+ } else if (intmask & SDHCI_INT_DATA_MASK) {
err = sdhci_get_data_err(host, intmask);
+ if (intmask & SDHCI_INT_DATA_TIMEOUT)
+ is_cmd_err = sdhci_card_busy(host->mmc);
+ }
- ret = cmdq_irq(host->mmc, err);
+ ret = cmdq_irq(host->mmc, err, is_cmd_err);
if (err) {
/* Clear the error interrupts */
mask = intmask & SDHCI_INT_ERROR_MASK;
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index 48b7281a5e25..bc771bfb28c9 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -148,7 +148,8 @@ struct mmc_cmdq_req;
extern int mmc_cmdq_discard_queue(struct mmc_host *host, u32 tasks);
extern int mmc_cmdq_halt(struct mmc_host *host, bool enable);
-extern int mmc_cmdq_halt_on_empty_queue(struct mmc_host *host);
+extern int mmc_cmdq_halt_on_empty_queue(struct mmc_host *host,
+ unsigned long timeout);
extern void mmc_cmdq_post_req(struct mmc_host *host, int tag, int err);
extern int mmc_cmdq_start_req(struct mmc_host *host,
struct mmc_cmdq_req *cmdq_req);
@@ -231,12 +232,14 @@ extern int mmc_detect_card_removed(struct mmc_host *host);
extern void mmc_blk_init_bkops_statistics(struct mmc_card *card);
-extern void mmc_deferred_scaling(struct mmc_host *host);
+extern void mmc_deferred_scaling(struct mmc_host *host, unsigned long timeout);
extern void mmc_cmdq_clk_scaling_start_busy(struct mmc_host *host,
bool lock_needed);
extern void mmc_cmdq_clk_scaling_stop_busy(struct mmc_host *host,
bool lock_needed, bool is_cmdq_dcmd);
extern int mmc_recovery_fallback_lower_speed(struct mmc_host *host);
+extern void mmc_cmdq_up_rwsem(struct mmc_host *host);
+extern int mmc_cmdq_down_rwsem(struct mmc_host *host, struct request *rq);
/**
* mmc_claim_host - exclusively claim a host
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 149b71810477..9ad76f2590fe 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -286,6 +286,7 @@ struct mmc_slot {
* @wait waiting for all conditions described in
* mmc_cmdq_ready_wait to be satisified before
* issuing the new request to LLD.
+ * @err_rwsem synchronizes issue/completion/error-handler ctx
*/
struct mmc_cmdq_context_info {
unsigned long active_reqs; /* in-flight requests */
@@ -299,6 +300,7 @@ struct mmc_cmdq_context_info {
wait_queue_head_t queue_empty_wq;
wait_queue_head_t wait;
int active_small_sector_read_reqs;
+ struct rw_semaphore err_rwsem;
};
/**
@@ -582,6 +584,8 @@ struct mmc_host {
bool err_occurred;
u32 err_stats[MMC_ERR_MAX];
+ ktime_t last_failed_rq_time;
+ ktime_t last_completed_rq_time;
struct mmc_async_req *areq; /* active async req */
struct mmc_context_info context_info; /* async synchronization info */