aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinaro CI <ci_notify@linaro.org>2020-09-11 11:16:17 +0000
committerLinaro CI <ci_notify@linaro.org>2020-09-11 11:16:17 +0000
commit39e5a2a20deb0338a661be3fe2f72ec4e4052b1f (patch)
treeb6b5099cff4a593d814f2cbbb4a89e41c103af32
parentb94cf041ee43653e39e6d31bb7444560db3de197 (diff)
parent3e542ba3b96bfe990116a41943999bc440e50e2d (diff)
Merge remote-tracking branch 'iommu/tracking-qcomlt-iommu' into integration-linux-qcomlt
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c48
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.c124
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.h21
3 files changed, 175 insertions, 18 deletions
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
index be4318044f96..147af11049eb 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
@@ -23,6 +23,53 @@ static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = {
{ }
};
+static int qcom_smmu_cfg_probe(struct arm_smmu_device *smmu)
+{
+ unsigned int last_s2cr = ARM_SMMU_GR0_S2CR(smmu->num_mapping_groups - 1);
+ u32 smr;
+ u32 reg;
+ int i;
+
+ /*
+ * With some firmware writes to S2CR of type FAULT are ignored, and
+ * writing BYPASS will end up as FAULT in the register. Perform a write
+ * to S2CR to detect if this is the case with the current firmware.
+ */
+ arm_smmu_gr0_write(smmu, last_s2cr, FIELD_PREP(ARM_SMMU_S2CR_TYPE, S2CR_TYPE_BYPASS) |
+ FIELD_PREP(ARM_SMMU_S2CR_CBNDX, 0xff) |
+ FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, S2CR_PRIVCFG_DEFAULT));
+ reg = arm_smmu_gr0_read(smmu, last_s2cr);
+ if (FIELD_GET(ARM_SMMU_S2CR_TYPE, reg) != S2CR_TYPE_BYPASS) {
+ smmu->qcom_bypass_quirk = true;
+
+ /*
+ * With firmware ignoring writes of type FAULT, booting the
+ * Linux kernel with disable_bypass disabled (i.e. "enable
+ * bypass") the initialization during probe will leave mappings
+ * in an inconsistent state. Avoid this by configuring all
+ * S2CRs to BYPASS.
+ */
+ for (i = 0; i < smmu->num_mapping_groups; i++) {
+ smmu->s2crs[i].type = S2CR_TYPE_BYPASS;
+ smmu->s2crs[i].privcfg = S2CR_PRIVCFG_DEFAULT;
+ smmu->s2crs[i].cbndx = 0xff;
+ smmu->s2crs[i].count = 0;
+ }
+ }
+
+ for (i = 0; i < smmu->num_mapping_groups; i++) {
+ smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i));
+
+ if (FIELD_GET(ARM_SMMU_SMR_VALID, smr)) {
+ smmu->smrs[i].id = FIELD_GET(ARM_SMMU_SMR_ID, smr);
+ smmu->smrs[i].mask = FIELD_GET(ARM_SMMU_SMR_MASK, smr);
+ smmu->smrs[i].valid = true;
+ }
+ }
+
+ return 0;
+}
+
static int qcom_smmu_def_domain_type(struct device *dev)
{
const struct of_device_id *match =
@@ -61,6 +108,7 @@ static int qcom_smmu500_reset(struct arm_smmu_device *smmu)
}
static const struct arm_smmu_impl qcom_smmu_impl = {
+ .cfg_probe = qcom_smmu_cfg_probe,
.def_domain_type = qcom_smmu_def_domain_type,
.reset = qcom_smmu500_reset,
};
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c
index 09c42af9f31e..cbff8e5589f2 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c
@@ -65,24 +65,10 @@ module_param(disable_bypass, bool, S_IRUGO);
MODULE_PARM_DESC(disable_bypass,
"Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
-struct arm_smmu_s2cr {
- struct iommu_group *group;
- int count;
- enum arm_smmu_s2cr_type type;
- enum arm_smmu_s2cr_privcfg privcfg;
- u8 cbndx;
-};
-
#define s2cr_init_val (struct arm_smmu_s2cr){ \
.type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
}
-struct arm_smmu_smr {
- u16 mask;
- u16 id;
- bool valid;
-};
-
struct arm_smmu_cb {
u64 ttbr[2];
u32 tcr[2];
@@ -651,7 +637,9 @@ static void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx)
/* SCTLR */
reg = ARM_SMMU_SCTLR_CFIE | ARM_SMMU_SCTLR_CFRE | ARM_SMMU_SCTLR_AFE |
- ARM_SMMU_SCTLR_TRE | ARM_SMMU_SCTLR_M;
+ ARM_SMMU_SCTLR_TRE;
+ if (cfg->m)
+ reg |= ARM_SMMU_SCTLR_M;
if (stage1)
reg |= ARM_SMMU_SCTLR_S1_ASIDPNE;
if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
@@ -661,7 +649,8 @@ static void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx)
}
static int arm_smmu_init_domain_context(struct iommu_domain *domain,
- struct arm_smmu_device *smmu)
+ struct arm_smmu_device *smmu,
+ bool boot_domain)
{
int irq, start, ret = 0;
unsigned long ias, oas;
@@ -676,7 +665,11 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
if (smmu_domain->smmu)
goto out_unlock;
- if (domain->type == IOMMU_DOMAIN_IDENTITY) {
+ /*
+ * Nothing to do for IDENTITY domains,unless disabled context banks are
+ * used to emulate bypass mappings on Qualcomm platforms.
+ */
+ if (domain->type == IOMMU_DOMAIN_IDENTITY && !smmu->qcom_bypass_quirk) {
smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
smmu_domain->smmu = smmu;
goto out_unlock;
@@ -776,6 +769,15 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
ret = -EINVAL;
goto out_unlock;
}
+
+ /*
+ * Use the last context bank for identity mappings during boot, to
+ * avoid overwriting in-use bank configuration while we're setting up
+ * the new mappings.
+ */
+ if (boot_domain)
+ start = smmu->num_context_banks - 1;
+
ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
smmu->num_context_banks);
if (ret < 0)
@@ -824,6 +826,10 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
domain->geometry.aperture_end = (1UL << ias) - 1;
domain->geometry.force_aperture = true;
+ /* Enable translation for non-identity context banks */
+ if (domain->type != IOMMU_DOMAIN_IDENTITY)
+ cfg->m = true;
+
/* Initialise the context bank with our page table cfg */
arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
arm_smmu_write_context_bank(smmu, cfg->cbndx);
@@ -1157,7 +1163,10 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct arm_smmu_master_cfg *cfg;
struct arm_smmu_device *smmu;
+ bool free_identity_domain = false;
+ int idx;
int ret;
+ int i;
if (!fwspec || fwspec->ops != &arm_smmu_ops) {
dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
@@ -1182,7 +1191,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
return ret;
/* Ensure that the domain is finalised */
- ret = arm_smmu_init_domain_context(domain, smmu);
+ ret = arm_smmu_init_domain_context(domain, smmu, false);
if (ret < 0)
goto rpm_put;
@@ -1198,10 +1207,35 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
goto rpm_put;
}
+ /* Decrement use counter for any references to the identity domain */
+ mutex_lock(&smmu->stream_map_mutex);
+ if (smmu->identity) {
+ struct arm_smmu_domain *identity = to_smmu_domain(smmu->identity);
+
+ for_each_cfg_sme(cfg, fwspec, i, idx) {
+ dev_err(smmu->dev, "%s() %#x\n", __func__, smmu->smrs[idx].id);
+ if (smmu->s2crs[idx].cbndx == identity->cfg.cbndx) {
+ smmu->num_identity_masters--;
+ if (smmu->num_identity_masters == 0)
+ free_identity_domain = true;
+ }
+ }
+ }
+ mutex_unlock(&smmu->stream_map_mutex);
+
/* Looks ok, so add the device to the domain */
ret = arm_smmu_domain_add_master(smmu_domain, cfg, fwspec);
/*
+ * The last stream map to reference the identity domain has been
+ * overwritten, so it's now okay to free it.
+ */
+ if (free_identity_domain) {
+ arm_smmu_domain_free(smmu->identity);
+ smmu->identity = NULL;
+ }
+
+ /*
* Setup an autosuspend delay to avoid bouncing runpm state.
* Otherwise, if a driver for a suspended consumer device
* unmaps buffers, it will runpm resume/suspend for each one.
@@ -1931,6 +1965,56 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
return 0;
}
+int arm_smmu_setup_identity(struct arm_smmu_device *smmu)
+{
+ struct device *dev = smmu->dev;
+ int cbndx = 0xff;
+ int type = S2CR_TYPE_BYPASS;
+ int ret;
+ int i;
+
+ if (smmu->qcom_bypass_quirk) {
+ /* Create a IDENTITY domain to use for all inherited streams */
+ smmu->identity = arm_smmu_domain_alloc(IOMMU_DOMAIN_IDENTITY);
+ if (!smmu->identity) {
+ dev_err(dev, "failed to create identity domain\n");
+ return -ENOMEM;
+ }
+
+ smmu->identity->pgsize_bitmap = smmu->pgsize_bitmap;
+ smmu->identity->type = IOMMU_DOMAIN_IDENTITY;
+ smmu->identity->ops = &arm_smmu_ops;
+
+ ret = arm_smmu_init_domain_context(smmu->identity, smmu, true);
+ if (ret < 0) {
+ dev_err(dev, "failed to initialize identity domain: %d\n", ret);
+ return ret;
+ }
+
+ type = S2CR_TYPE_TRANS;
+ cbndx = to_smmu_domain(smmu->identity)->cfg.cbndx;
+ }
+
+ for (i = 0; i < smmu->num_mapping_groups; i++) {
+ if (smmu->smrs[i].valid) {
+ smmu->s2crs[i].type = type;
+ smmu->s2crs[i].privcfg = S2CR_PRIVCFG_DEFAULT;
+ smmu->s2crs[i].cbndx = cbndx;
+ smmu->s2crs[i].count++;
+
+ smmu->num_identity_masters++;
+ }
+ }
+
+ /* If no mappings where found, free the identiy domain again */
+ if (smmu->identity && !smmu->num_identity_masters) {
+ arm_smmu_domain_free(smmu->identity);
+ smmu->identity = NULL;
+ }
+
+ return 0;
+}
+
struct arm_smmu_match_data {
enum arm_smmu_arch_version version;
enum arm_smmu_implementation model;
@@ -2190,6 +2274,10 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
if (err)
return err;
+ err = arm_smmu_setup_identity(smmu);
+ if (err)
+ return err;
+
if (smmu->version == ARM_SMMU_V2) {
if (smmu->num_context_banks > smmu->num_context_irqs) {
dev_err(dev,
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.h b/drivers/iommu/arm/arm-smmu/arm-smmu.h
index d890a4a968e8..31b42060e824 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.h
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.h
@@ -254,6 +254,21 @@ enum arm_smmu_implementation {
QCOM_SMMUV2,
};
+struct arm_smmu_s2cr {
+ struct iommu_group *group;
+ int count;
+ enum arm_smmu_s2cr_type type;
+ enum arm_smmu_s2cr_privcfg privcfg;
+ u8 cbndx;
+};
+
+struct arm_smmu_smr {
+ u16 mask;
+ u16 id;
+ bool valid;
+ bool pinned;
+};
+
struct arm_smmu_device {
struct device *dev;
@@ -308,6 +323,11 @@ struct arm_smmu_device {
/* IOMMU core code handle */
struct iommu_device iommu;
+
+ struct iommu_domain *identity;
+ unsigned int num_identity_masters;
+
+ bool qcom_bypass_quirk;
};
enum arm_smmu_context_fmt {
@@ -326,6 +346,7 @@ struct arm_smmu_cfg {
};
enum arm_smmu_cbar_type cbar;
enum arm_smmu_context_fmt fmt;
+ bool m;
};
#define ARM_SMMU_INVALID_IRPTNDX 0xff