aboutsummaryrefslogtreecommitdiff
path: root/drivers/mmc/core/block.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mmc/core/block.c')
-rw-r--r--drivers/mmc/core/block.c16
1 files changed, 12 insertions, 4 deletions
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 672ab90c4b2d..35292e36a1fb 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -2409,8 +2409,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
struct mmc_blk_data *md;
int devidx, ret;
char cap_str[10];
- bool cache_enabled = false;
- bool fua_enabled = false;
+ bool cache_enabled, avoid_fua, fua_enabled = false;
devidx = ida_simple_get(&mmc_blk_ida, 0, max_devices, GFP_KERNEL);
if (devidx < 0) {
@@ -2494,11 +2493,20 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
card->ext_csd.rel_sectors)) {
md->flags |= MMC_BLK_REL_WR;
+ }
+
+ /*
+ * REQ_FUA is supported through eMMC reliable writes, which has been
+ * reported to be a bit costly for some eMMCs. In these cases, let's
+ * rely on the flush requests (REQ_OP_FLUSH) instead, if we can use the
+ * cache-control feature too.
+ */
+ cache_enabled = mmc_cache_enabled(card->host);
+ avoid_fua = cache_enabled && mmc_card_avoid_rel_write(card);
+ if (md->flags & MMC_BLK_REL_WR && !avoid_fua) {
fua_enabled = true;
cache_enabled = true;
}
- if (mmc_cache_enabled(card->host))
- cache_enabled = true;
blk_queue_write_cache(md->queue.queue, cache_enabled, fua_enabled);