aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinaro CI <ci_notify@linaro.org>2019-09-04 10:39:50 +0000
committerLinaro CI <ci_notify@linaro.org>2019-09-04 10:39:50 +0000
commit92031a5f6d699b098919a6aac55ec31f02c89ddb (patch)
tree50c9251a6cf3dfe48a86816888c35c0d17ae0961
parent96f41e6e746180d1606cc31e4b8cc5602c852bda (diff)
parent6e79043bc13226542ffa347bf52d23a20aee4956 (diff)
Merge remote-tracking branch 'iommu/tracking-qcomlt-iommu' into integration-linux-qcomlt
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845.dtsi1
-rw-r--r--drivers/firmware/qcom_scm-32.c17
-rw-r--r--drivers/firmware/qcom_scm-64.c181
-rw-r--r--drivers/firmware/qcom_scm.c18
-rw-r--r--drivers/firmware/qcom_scm.h9
-rw-r--r--drivers/iommu/arm-smmu-regs.h4
-rw-r--r--drivers/iommu/arm-smmu.c250
-rw-r--r--include/linux/qcom_scm.h6
8 files changed, 431 insertions, 55 deletions
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index 4babff5f19b5..71291064cb6e 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -2502,6 +2502,7 @@
<GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH>;
+ qcom,smmu-500-fw-impl-errata;
};
lpasscc: clock-controller@17014000 {
diff --git a/drivers/firmware/qcom_scm-32.c b/drivers/firmware/qcom_scm-32.c
index 215061c581e1..e64654d7dda7 100644
--- a/drivers/firmware/qcom_scm-32.c
+++ b/drivers/firmware/qcom_scm-32.c
@@ -614,3 +614,20 @@ int __qcom_scm_io_writel(struct device *dev, phys_addr_t addr, unsigned int val)
return qcom_scm_call_atomic2(QCOM_SCM_SVC_IO, QCOM_SCM_IO_WRITE,
addr, val);
}
+
+int __qcom_scm_io_readl_atomic(struct device *dev, phys_addr_t addr,
+ unsigned int *val)
+{
+ return -ENODEV;
+}
+
+int __qcom_scm_io_writel_atomic(struct device *dev, phys_addr_t addr,
+ unsigned int val)
+{
+ return -ENODEV;
+}
+
+int __qcom_scm_qsmmu500_wait_safe_toggle(struct device *dev, bool enable)
+{
+ return -ENODEV;
+}
diff --git a/drivers/firmware/qcom_scm-64.c b/drivers/firmware/qcom_scm-64.c
index 91d5ad7cf58b..cb4712579188 100644
--- a/drivers/firmware/qcom_scm-64.c
+++ b/drivers/firmware/qcom_scm-64.c
@@ -62,32 +62,71 @@ static DEFINE_MUTEX(qcom_scm_lock);
#define FIRST_EXT_ARG_IDX 3
#define N_REGISTER_ARGS (MAX_QCOM_SCM_ARGS - N_EXT_QCOM_SCM_ARGS + 1)
-/**
- * qcom_scm_call() - Invoke a syscall in the secure world
- * @dev: device
- * @svc_id: service identifier
- * @cmd_id: command identifier
- * @desc: Descriptor structure containing arguments and return values
- *
- * Sends a command to the SCM and waits for the command to finish processing.
- * This should *only* be called in pre-emptible context.
-*/
-static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id,
- const struct qcom_scm_desc *desc,
- struct arm_smccc_res *res)
+static void __qcom_scm_call_do(const struct qcom_scm_desc *desc,
+ struct arm_smccc_res *res, u32 fn_id,
+ u64 x5, u32 type)
+{
+ u64 cmd;
+ struct arm_smccc_quirk quirk = {.id = ARM_SMCCC_QUIRK_QCOM_A6};
+
+ cmd = ARM_SMCCC_CALL_VAL(type, qcom_smccc_convention,
+ ARM_SMCCC_OWNER_SIP, fn_id);
+
+ quirk.state.a6 = 0;
+
+ do {
+ arm_smccc_smc_quirk(cmd, desc->arginfo, desc->args[0],
+ desc->args[1], desc->args[2], x5,
+ quirk.state.a6, 0, res, &quirk);
+
+ if (res->a0 == QCOM_SCM_INTERRUPTED)
+ cmd = res->a0;
+
+ } while (res->a0 == QCOM_SCM_INTERRUPTED);
+}
+
+static void qcom_scm_call_do(const struct qcom_scm_desc *desc,
+ struct arm_smccc_res *res, u32 fn_id,
+ u64 x5, bool atomic)
+{
+ int retry_count = 0;
+
+ if (!atomic) {
+ do {
+ mutex_lock(&qcom_scm_lock);
+
+ __qcom_scm_call_do(desc, res, fn_id, x5,
+ ARM_SMCCC_STD_CALL);
+
+ mutex_unlock(&qcom_scm_lock);
+
+ if (res->a0 == QCOM_SCM_V2_EBUSY) {
+ if (retry_count++ > QCOM_SCM_EBUSY_MAX_RETRY)
+ break;
+ msleep(QCOM_SCM_EBUSY_WAIT_MS);
+ }
+ } while (res->a0 == QCOM_SCM_V2_EBUSY);
+ } else {
+ __qcom_scm_call_do(desc, res, fn_id, x5, ARM_SMCCC_FAST_CALL);
+ }
+}
+
+static int ___qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id,
+ const struct qcom_scm_desc *desc,
+ struct arm_smccc_res *res, bool atomic)
{
int arglen = desc->arginfo & 0xf;
- int retry_count = 0, i;
+ int i;
u32 fn_id = QCOM_SCM_FNID(svc_id, cmd_id);
- u64 cmd, x5 = desc->args[FIRST_EXT_ARG_IDX];
+ u64 x5 = desc->args[FIRST_EXT_ARG_IDX];
dma_addr_t args_phys = 0;
void *args_virt = NULL;
size_t alloc_len;
- struct arm_smccc_quirk quirk = {.id = ARM_SMCCC_QUIRK_QCOM_A6};
+ gfp_t flag = atomic ? GFP_ATOMIC : GFP_KERNEL;
if (unlikely(arglen > N_REGISTER_ARGS)) {
alloc_len = N_EXT_QCOM_SCM_ARGS * sizeof(u64);
- args_virt = kzalloc(PAGE_ALIGN(alloc_len), GFP_KERNEL);
+ args_virt = kzalloc(PAGE_ALIGN(alloc_len), flag);
if (!args_virt)
return -ENOMEM;
@@ -117,33 +156,7 @@ static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id,
x5 = args_phys;
}
- do {
- mutex_lock(&qcom_scm_lock);
-
- cmd = ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL,
- qcom_smccc_convention,
- ARM_SMCCC_OWNER_SIP, fn_id);
-
- quirk.state.a6 = 0;
-
- do {
- arm_smccc_smc_quirk(cmd, desc->arginfo, desc->args[0],
- desc->args[1], desc->args[2], x5,
- quirk.state.a6, 0, res, &quirk);
-
- if (res->a0 == QCOM_SCM_INTERRUPTED)
- cmd = res->a0;
-
- } while (res->a0 == QCOM_SCM_INTERRUPTED);
-
- mutex_unlock(&qcom_scm_lock);
-
- if (res->a0 == QCOM_SCM_V2_EBUSY) {
- if (retry_count++ > QCOM_SCM_EBUSY_MAX_RETRY)
- break;
- msleep(QCOM_SCM_EBUSY_WAIT_MS);
- }
- } while (res->a0 == QCOM_SCM_V2_EBUSY);
+ qcom_scm_call_do(desc, res, fn_id, x5, atomic);
if (args_virt) {
dma_unmap_single(dev, args_phys, alloc_len, DMA_TO_DEVICE);
@@ -157,6 +170,41 @@ static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id,
}
/**
+ * qcom_scm_call() - Invoke a syscall in the secure world
+ * @dev: device
+ * @svc_id: service identifier
+ * @cmd_id: command identifier
+ * @desc: Descriptor structure containing arguments and return values
+ *
+ * Sends a command to the SCM and waits for the command to finish processing.
+ * This should *only* be called in pre-emptible context.
+ */
+static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id,
+ const struct qcom_scm_desc *desc,
+ struct arm_smccc_res *res)
+{
+ return ___qcom_scm_call(dev, svc_id, cmd_id, desc, res, false);
+}
+
+/**
+ * qcom_scm_call_atomic() - atomic variation of qcom_scm_call()
+ * @dev: device
+ * @svc_id: service identifier
+ * @cmd_id: command identifier
+ * @desc: Descriptor structure containing arguments and return values
+ * @res: Structure containing results from SMC/HVC call
+ *
+ * Sends a command to the SCM and waits for the command to finish processing.
+ * This should be called in atomic context only.
+ */
+static int qcom_scm_call_atomic(struct device *dev, u32 svc_id, u32 cmd_id,
+ const struct qcom_scm_desc *desc,
+ struct arm_smccc_res *res)
+{
+ return ___qcom_scm_call(dev, svc_id, cmd_id, desc, res, true);
+}
+
+/**
* qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
* @entry: Entry point function for the cpus
* @cpus: The cpumask of cpus that will use the entry point
@@ -502,3 +550,48 @@ int __qcom_scm_io_writel(struct device *dev, phys_addr_t addr, unsigned int val)
return qcom_scm_call(dev, QCOM_SCM_SVC_IO, QCOM_SCM_IO_WRITE,
&desc, &res);
}
+
+int __qcom_scm_io_readl_atomic(struct device *dev, phys_addr_t addr,
+ unsigned int *val)
+{
+ struct qcom_scm_desc desc = {0};
+ struct arm_smccc_res res;
+ int ret;
+
+ desc.args[0] = addr;
+ desc.arginfo = QCOM_SCM_ARGS(1);
+
+ ret = qcom_scm_call_atomic(dev, QCOM_SCM_SVC_IO, QCOM_SCM_IO_READ,
+ &desc, &res);
+ if (ret >= 0)
+ *val = res.a1;
+
+ return ret < 0 ? ret : 0;
+}
+
+int __qcom_scm_io_writel_atomic(struct device *dev, phys_addr_t addr,
+ unsigned int val)
+{
+ struct qcom_scm_desc desc = {0};
+ struct arm_smccc_res res;
+
+ desc.args[0] = addr;
+ desc.args[1] = val;
+ desc.arginfo = QCOM_SCM_ARGS(2);
+
+ return qcom_scm_call_atomic(dev, QCOM_SCM_SVC_IO, QCOM_SCM_IO_WRITE,
+ &desc, &res);
+}
+
+int __qcom_scm_qsmmu500_wait_safe_toggle(struct device *dev, bool en)
+{
+ struct qcom_scm_desc desc = {0};
+ struct arm_smccc_res res;
+
+ desc.args[0] = QCOM_SCM_CONFIG_ERRATA1_CLIENT_ALL;
+ desc.args[1] = en;
+ desc.arginfo = QCOM_SCM_ARGS(2);
+
+ return qcom_scm_call_atomic(dev, QCOM_SCM_SVC_SMMU_PROGRAM,
+ QCOM_SCM_CONFIG_ERRATA1, &desc, &res);
+}
diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
index 2ddc118dba1b..eb4d979e8309 100644
--- a/drivers/firmware/qcom_scm.c
+++ b/drivers/firmware/qcom_scm.c
@@ -344,6 +344,12 @@ int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
}
EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init);
+int qcom_scm_qsmmu500_wait_safe_toggle(bool en)
+{
+ return __qcom_scm_qsmmu500_wait_safe_toggle(__scm->dev, en);
+}
+EXPORT_SYMBOL(qcom_scm_qsmmu500_wait_safe_toggle);
+
int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val)
{
return __qcom_scm_io_readl(__scm->dev, addr, val);
@@ -356,6 +362,18 @@ int qcom_scm_io_writel(phys_addr_t addr, unsigned int val)
}
EXPORT_SYMBOL(qcom_scm_io_writel);
+int qcom_scm_io_readl_atomic(phys_addr_t addr, unsigned int *val)
+{
+ return __qcom_scm_io_readl_atomic(__scm->dev, addr, val);
+}
+EXPORT_SYMBOL(qcom_scm_io_readl_atomic);
+
+int qcom_scm_io_writel_atomic(phys_addr_t addr, unsigned int val)
+{
+ return __qcom_scm_io_writel_atomic(__scm->dev, addr, val);
+}
+EXPORT_SYMBOL(qcom_scm_io_writel_atomic);
+
static void qcom_scm_set_download_mode(bool enable)
{
bool avail;
diff --git a/drivers/firmware/qcom_scm.h b/drivers/firmware/qcom_scm.h
index 99506bd873c0..5b386dceaa3e 100644
--- a/drivers/firmware/qcom_scm.h
+++ b/drivers/firmware/qcom_scm.h
@@ -29,6 +29,10 @@ extern void __qcom_scm_cpu_power_down(u32 flags);
#define QCOM_SCM_IO_WRITE 0x2
extern int __qcom_scm_io_readl(struct device *dev, phys_addr_t addr, unsigned int *val);
extern int __qcom_scm_io_writel(struct device *dev, phys_addr_t addr, unsigned int val);
+extern int __qcom_scm_io_readl_atomic(struct device *dev, phys_addr_t addr,
+ unsigned int *val);
+extern int __qcom_scm_io_writel_atomic(struct device *dev, phys_addr_t addr,
+ unsigned int val);
#define QCOM_SCM_SVC_INFO 0x6
#define QCOM_IS_CALL_AVAIL_CMD 0x1
@@ -91,10 +95,15 @@ extern int __qcom_scm_restore_sec_cfg(struct device *dev, u32 device_id,
u32 spare);
#define QCOM_SCM_IOMMU_SECURE_PTBL_SIZE 3
#define QCOM_SCM_IOMMU_SECURE_PTBL_INIT 4
+#define QCOM_SCM_SVC_SMMU_PROGRAM 0x15
+#define QCOM_SCM_CONFIG_ERRATA1 0x3
+#define QCOM_SCM_CONFIG_ERRATA1_CLIENT_ALL 0x2
extern int __qcom_scm_iommu_secure_ptbl_size(struct device *dev, u32 spare,
size_t *size);
extern int __qcom_scm_iommu_secure_ptbl_init(struct device *dev, u64 addr,
u32 size, u32 spare);
+extern int __qcom_scm_qsmmu500_wait_safe_toggle(struct device *dev,
+ bool enable);
#define QCOM_MEM_PROT_ASSIGN_ID 0x16
extern int __qcom_scm_assign_mem(struct device *dev,
phys_addr_t mem_region, size_t mem_sz,
diff --git a/drivers/iommu/arm-smmu-regs.h b/drivers/iommu/arm-smmu-regs.h
index 1c278f7ae888..12ec8c955b8f 100644
--- a/drivers/iommu/arm-smmu-regs.h
+++ b/drivers/iommu/arm-smmu-regs.h
@@ -93,7 +93,9 @@
#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
#define SMR_VALID (1 << 31)
#define SMR_MASK_SHIFT 16
+#define SMR_MASK_MASK 0x7fff
#define SMR_ID_SHIFT 0
+#define SMR_ID_MASK 0xffff
#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
#define S2CR_CBNDX_SHIFT 0
@@ -167,6 +169,8 @@ enum arm_smmu_s2cr_privcfg {
#define ARM_SMMU_CB_ATS1PR 0x800
#define ARM_SMMU_CB_ATSR 0x8f0
+#define ARM_SMMU_GID_QCOM_CUSTOM_CFG 0x300
+
#define SCTLR_S1_ASIDPNE (1 << 12)
#define SCTLR_CFCFG (1 << 7)
#define SCTLR_CFIE (1 << 6)
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 64977c131ee6..854a8773dc46 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -39,6 +39,7 @@
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/qcom_scm.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
@@ -132,6 +133,7 @@ struct arm_smmu_s2cr {
enum arm_smmu_s2cr_type type;
enum arm_smmu_s2cr_privcfg privcfg;
u8 cbndx;
+ bool handoff;
};
#define s2cr_init_val (struct arm_smmu_s2cr){ \
@@ -185,7 +187,8 @@ struct arm_smmu_device {
#define ARM_SMMU_FEAT_EXIDS (1 << 12)
u32 features;
-#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
+#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
+#define ARM_SMMU_OPT_QCOM_FW_IMPL_ERRATA (1 << 1)
u32 options;
enum arm_smmu_arch_version version;
enum arm_smmu_implementation model;
@@ -271,6 +274,7 @@ static bool using_legacy_binding, using_generic_binding;
static struct arm_smmu_option_prop arm_smmu_options[] = {
{ ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
+ { ARM_SMMU_OPT_QCOM_FW_IMPL_ERRATA, "qcom,smmu-500-fw-impl-errata" },
{ 0, NULL},
};
@@ -396,9 +400,22 @@ static int arm_smmu_register_legacy_master(struct device *dev,
return err;
}
-static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
+static int __arm_smmu_alloc_cb(struct arm_smmu_device *smmu, int start,
+ struct device *dev)
{
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+ unsigned long *map = smmu->context_map;
+ int end = smmu->num_context_banks;
+ int cbndx;
int idx;
+ int i;
+
+ for_each_cfg_sme(fwspec, i, idx) {
+ if (smmu->s2crs[idx].handoff) {
+ cbndx = smmu->s2crs[idx].cbndx;
+ goto found_handoff;
+ }
+ }
do {
idx = find_next_zero_bit(map, end, start);
@@ -407,6 +424,18 @@ static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
} while (test_and_set_bit(idx, map));
return idx;
+
+found_handoff:
+
+ for (i = 0; i < smmu->num_mapping_groups; i++) {
+ if (smmu->s2crs[i].cbndx == cbndx) {
+ smmu->s2crs[i].cbndx = 0;
+ smmu->s2crs[i].handoff = false;
+ smmu->s2crs[i].count -= 1;
+ }
+ }
+
+ return cbndx;
}
static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
@@ -547,12 +576,134 @@ static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size,
writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
}
+#define CUSTOM_CFG_MDP_SAFE_ENABLE BIT(15)
+#define CUSTOM_CFG_IFE1_SAFE_ENABLE BIT(14)
+#define CUSTOM_CFG_IFE0_SAFE_ENABLE BIT(13)
+
+static int __qsmmu500_wait_safe_toggle(struct arm_smmu_device *smmu, int en)
+{
+ int ret;
+ u32 val, gid_phys_base;
+ phys_addr_t reg;
+ struct vm_struct *vm;
+
+ /* We want physical address of SMMU, so the vm_area */
+ vm = find_vm_area(smmu->base);
+
+ /*
+ * GID (implementation defined address space) is located at
+ * SMMU_BASE + (2 × PAGESIZE).
+ */
+ gid_phys_base = vm->phys_addr + (2 << (smmu)->pgshift);
+ reg = gid_phys_base + ARM_SMMU_GID_QCOM_CUSTOM_CFG;
+
+ ret = qcom_scm_io_readl_atomic(reg, &val);
+ if (ret)
+ return ret;
+
+ if (en)
+ val |= CUSTOM_CFG_MDP_SAFE_ENABLE |
+ CUSTOM_CFG_IFE0_SAFE_ENABLE |
+ CUSTOM_CFG_IFE1_SAFE_ENABLE;
+ else
+ val &= ~(CUSTOM_CFG_MDP_SAFE_ENABLE |
+ CUSTOM_CFG_IFE0_SAFE_ENABLE |
+ CUSTOM_CFG_IFE1_SAFE_ENABLE);
+
+ ret = qcom_scm_io_writel_atomic(reg, val);
+
+ return ret;
+}
+
+static int qsmmu500_wait_safe_toggle(struct arm_smmu_device *smmu,
+ int en, bool is_fw_impl)
+{
+ if (is_fw_impl)
+ return qcom_scm_qsmmu500_wait_safe_toggle(en);
+ else
+ return __qsmmu500_wait_safe_toggle(smmu, en);
+}
+
+static void qcom_errata_tlb_sync(struct arm_smmu_domain *smmu_domain)
+{
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ void __iomem *base = ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
+ bool is_fw_impl;
+ u32 val;
+
+ writel_relaxed(0, base + ARM_SMMU_CB_TLBSYNC);
+
+ if (!readl_poll_timeout_atomic(base + ARM_SMMU_CB_TLBSTATUS, val,
+ !(val & sTLBGSTATUS_GSACTIVE), 0, 100))
+ return;
+
+ is_fw_impl = smmu->options & ARM_SMMU_OPT_QCOM_FW_IMPL_ERRATA ?
+ true : false;
+
+ /* SCM call here to disable the wait-for-safe logic. */
+ if (WARN(qsmmu500_wait_safe_toggle(smmu, false, is_fw_impl),
+ "Failed to disable wait-safe logic, bad hw state\n"))
+ return;
+
+ if (!readl_poll_timeout_atomic(base + ARM_SMMU_CB_TLBSTATUS, val,
+ !(val & sTLBGSTATUS_GSACTIVE), 0, 10000))
+ return;
+
+ /* SCM call here to re-enable the wait-for-safe logic. */
+ WARN(qsmmu500_wait_safe_toggle(smmu, true, is_fw_impl),
+ "Failed to re-enable wait-safe logic, bad hw state\n");
+
+ dev_err_ratelimited(smmu->dev,
+ "TLB sync timed out -- SMMU in bad state\n");
+}
+
+static void qcom_errata_tlb_sync_context(void *cookie)
+{
+ struct arm_smmu_domain *smmu_domain = cookie;
+ unsigned long flags;
+
+ spin_lock_irqsave(&smmu_domain->cb_lock, flags);
+ qcom_errata_tlb_sync(smmu_domain);
+ spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
+}
+
+static void qcom_errata_tlb_inv_context_s1(void *cookie)
+{
+ struct arm_smmu_domain *smmu_domain = cookie;
+ struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+ void __iomem *base = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
+ unsigned long flags;
+
+ spin_lock_irqsave(&smmu_domain->cb_lock, flags);
+ writel_relaxed(cfg->asid, base + ARM_SMMU_CB_S1_TLBIASID);
+ qcom_errata_tlb_sync(cookie);
+ spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
+}
+
+static void qcom_errata_tlb_inv_range_nosync(unsigned long iova, size_t size,
+ size_t granule, bool leaf,
+ void *cookie)
+{
+ struct arm_smmu_domain *smmu_domain = cookie;
+ unsigned long flags;
+
+ spin_lock_irqsave(&smmu_domain->cb_lock, flags);
+ arm_smmu_tlb_inv_range_nosync(iova, size, granule, leaf, cookie);
+ spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
+}
+
static const struct iommu_gather_ops arm_smmu_s1_tlb_ops = {
.tlb_flush_all = arm_smmu_tlb_inv_context_s1,
.tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
.tlb_sync = arm_smmu_tlb_sync_context,
};
+static const struct iommu_gather_ops qcom_errata_s1_tlb_ops = {
+ .tlb_flush_all = qcom_errata_tlb_inv_context_s1,
+ .tlb_add_flush = qcom_errata_tlb_inv_range_nosync,
+ .tlb_sync = qcom_errata_tlb_sync_context,
+};
+
static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v2 = {
.tlb_flush_all = arm_smmu_tlb_inv_context_s2,
.tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
@@ -756,7 +907,8 @@ static void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx)
}
static int arm_smmu_init_domain_context(struct iommu_domain *domain,
- struct arm_smmu_device *smmu)
+ struct arm_smmu_device *smmu,
+ struct device *dev)
{
int irq, start, ret = 0;
unsigned long ias, oas;
@@ -842,7 +994,11 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
ias = min(ias, 32UL);
oas = min(oas, 32UL);
}
- smmu_domain->tlb_ops = &arm_smmu_s1_tlb_ops;
+ if (of_device_is_compatible(smmu->dev->of_node,
+ "qcom,sdm845-smmu-500"))
+ smmu_domain->tlb_ops = &qcom_errata_s1_tlb_ops;
+ else
+ smmu_domain->tlb_ops = &arm_smmu_s1_tlb_ops;
break;
case ARM_SMMU_DOMAIN_NESTED:
/*
@@ -870,8 +1026,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
ret = -EINVAL;
goto out_unlock;
}
- ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
- smmu->num_context_banks);
+ ret = __arm_smmu_alloc_cb(smmu, start, dev);
if (ret < 0)
goto out_unlock;
@@ -1054,23 +1209,35 @@ static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu)
{
void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
u32 smr;
+ int idx;
if (!smmu->smrs)
return;
+ for (idx = 0; idx < smmu->num_mapping_groups; idx++) {
+ smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(idx));
+ if (!(smr & SMR_VALID))
+ break;
+ }
+
+ if (idx == smmu->num_mapping_groups) {
+ dev_err(smmu->dev, "Unable to compute streamid_mask\n");
+ return;
+ }
+
/*
* SMR.ID bits may not be preserved if the corresponding MASK
* bits are set, so check each one separately. We can reject
* masters later if they try to claim IDs outside these masks.
*/
smr = smmu->streamid_mask << SMR_ID_SHIFT;
- writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
- smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
+ writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(idx));
+ smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(idx));
smmu->streamid_mask = smr >> SMR_ID_SHIFT;
smr = smmu->streamid_mask << SMR_MASK_SHIFT;
- writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
- smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
+ writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(idx));
+ smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(idx));
smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
}
@@ -1259,7 +1426,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
return ret;
/* Ensure that the domain is finalised */
- ret = arm_smmu_init_domain_context(domain, smmu);
+ ret = arm_smmu_init_domain_context(domain, smmu, dev);
if (ret < 0)
goto rpm_put;
@@ -1793,6 +1960,49 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
}
+static int arm_smmu_read_smr_state(struct arm_smmu_device *smmu)
+{
+ u32 smr, s2cr;
+ u32 mask;
+ u32 type;
+ u32 cbndx;
+ u32 privcfg;
+ u32 id;
+ int i;
+
+ for (i = 0; i < smmu->num_mapping_groups; i++) {
+ smr = readl_relaxed(ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(i));
+ mask = (smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK;
+ id = smr & SMR_ID_MASK;
+ if (!(smr & SMR_VALID))
+ continue;
+
+ s2cr = readl_relaxed(ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(i));
+ type = (s2cr >> S2CR_TYPE_SHIFT) & S2CR_TYPE_MASK;
+ cbndx = (s2cr >> S2CR_CBNDX_SHIFT) & S2CR_CBNDX_MASK;
+ privcfg = (s2cr >> S2CR_PRIVCFG_SHIFT) & S2CR_PRIVCFG_MASK;
+ if (type != S2CR_TYPE_TRANS)
+ continue;
+
+ smmu->smrs[i].mask = mask;
+ smmu->smrs[i].id = id;
+ smmu->smrs[i].valid = true;
+
+ smmu->s2crs[i].group = NULL;
+ smmu->s2crs[i].count = 1;
+ smmu->s2crs[i].type = type;
+ smmu->s2crs[i].privcfg = privcfg;
+ smmu->s2crs[i].cbndx = cbndx;
+ smmu->s2crs[i].handoff = true;
+
+ bitmap_set(smmu->context_map, cbndx, 1);
+
+ dev_err(smmu->dev, "Handoff smr: %x s2cr: %x cb: %d\n", smr, s2cr, cbndx);
+ }
+
+ return 0;
+}
+
static int arm_smmu_id_size_to_bits(int size)
{
switch (size) {
@@ -2018,6 +2228,8 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
smmu->ipa_size, smmu->pa_size);
+ arm_smmu_read_smr_state(smmu);
+
return 0;
}
@@ -2178,6 +2390,22 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
struct arm_smmu_device *smmu;
struct device *dev = &pdev->dev;
int num_irqs, i, err;
+ void __iomem *mdp_intf;
+
+ /*
+ * HACK: Booting the SDM845 with splash screen means that the MDP is
+ * pulling data from the framebuffer, which is remapped in the SMMU.
+ * Initializing the SMMU will remove this mapping and cause a sudden
+ * restart. The dependencies to start the display clocks are not yet in
+ * place. This hack just stops the MDP before initializing the SMMU,
+ * allowing us to rely on the bootloader to initialize the necessary
+ * clocks.
+ */
+ if (of_device_is_compatible(dev->of_node, "qcom,sdm845-smmu-500")) {
+ mdp_intf = ioremap(0xae6b800, 0x300);
+ writel(0, mdp_intf + 0);
+ iounmap(mdp_intf);
+ }
smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
if (!smmu) {
diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h
index 3f12cc77fb58..55bd31df2171 100644
--- a/include/linux/qcom_scm.h
+++ b/include/linux/qcom_scm.h
@@ -57,8 +57,11 @@ extern int qcom_scm_set_remote_state(u32 state, u32 id);
extern int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare);
extern int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size);
extern int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare);
+extern int qcom_scm_qsmmu500_wait_safe_toggle(bool en);
extern int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val);
extern int qcom_scm_io_writel(phys_addr_t addr, unsigned int val);
+extern int qcom_scm_io_readl_atomic(phys_addr_t addr, unsigned int *val);
+extern int qcom_scm_io_writel_atomic(phys_addr_t addr, unsigned int val);
#else
#include <linux/errno.h>
@@ -96,7 +99,10 @@ qcom_scm_set_remote_state(u32 state,u32 id) { return -ENODEV; }
static inline int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare) { return -ENODEV; }
static inline int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size) { return -ENODEV; }
static inline int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare) { return -ENODEV; }
+static inline int qcom_scm_qsmmu500_wait_safe_toggle(bool en) { return -ENODEV; }
static inline int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val) { return -ENODEV; }
static inline int qcom_scm_io_writel(phys_addr_t addr, unsigned int val) { return -ENODEV; }
+static inline int qcom_scm_io_readl_atomic(phys_addr_t addr, unsigned int *val) { return -ENODEV; }
+static inline int qcom_scm_io_writel_atomic(phys_addr_t addr, unsigned int val) { return -ENODEV; }
#endif
#endif