aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSrinivas Kandagatla <srinivas.kandagatla@linaro.org>2015-10-16 14:08:34 +0100
committerSrinivas Kandagatla <srinivas.kandagatla@linaro.org>2015-10-16 14:08:34 +0100
commit01f09c96a05020891955b7e47e63cc3e4be65687 (patch)
tree1a5cedf351d6f0ac305789695a3497f4ece4c6bd
parent36d257d29ad2d65bb348c9b6dcde92f9257994f5 (diff)
parent36b6eec76370678da376140bb08378665310fe6b (diff)
Merge branch 'tracking-qcomlt-iommu' into integration-linux-qcomlt
* tracking-qcomlt-iommu: (24 commits) iommu/msm: Fix "scheduling while atomic" bug drm/msm: temp: Add a check to be compatible against both iommuv0/v1 iommu: of: Handle IOMMU lookup failure with deferred probing or error iommu: of: Document the of_iommu_configure() function drivers: platform: Configure dma operations at probe time of: dma: Split of_configure_dma() into mask and ops configuration of: dma: Make of_dma_deconfigure() public of: dma: Move range size workaround to of_dma_get_range() arm: dma-mapping: Don't override dma_ops in arch_setup_dma_ops() temp: Add dummy msm_iommu_get_ctx and fix broken build iommu/msm: Set cacheability attributes without tex remap iommu/msm: Add support for generic master bindings iommu/msm: Move the contents from msm_iommu_dev.c to msm_iommu.c iommu/msm: Add DT adaptation DOWNSTREAM: drm/msm: use downstream iommu qcom: iommu: Make use of domain_alloc and domain_free arm64: provide dma cache routines with same API as 32 bit iommu: msm: Invalidate properly from iommu_unmap iommu: qcom: v1: fix wrong sg interator iommu: qcom: v1: rework secure part and build ...
-rw-r--r--Documentation/devicetree/bindings/iommu/msm,iommu-v0.txt59
-rw-r--r--arch/arm/mm/dma-mapping.c9
-rw-r--r--arch/arm64/include/asm/cacheflush.h6
-rw-r--r--arch/arm64/mm/cache.S4
-rw-r--r--drivers/base/platform.c9
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c43
-rw-r--r--drivers/iommu/Kconfig3
-rw-r--r--drivers/iommu/Makefile3
-rw-r--r--drivers/iommu/msm_iommu.c532
-rw-r--r--drivers/iommu/msm_iommu.h73
-rw-r--r--drivers/iommu/msm_iommu_dev.c392
-rw-r--r--drivers/iommu/msm_iommu_hw-8xxx.h6
-rw-r--r--drivers/iommu/of_iommu.c29
-rw-r--r--drivers/iommu/qcom/Kconfig43
-rw-r--r--drivers/iommu/qcom/Makefile7
-rw-r--r--drivers/iommu/qcom/msm_iommu-v1.c1538
-rw-r--r--drivers/iommu/qcom/msm_iommu.c206
-rw-r--r--drivers/iommu/qcom/msm_iommu_dev-v1.c627
-rw-r--r--drivers/iommu/qcom/msm_iommu_hw-v1.h2320
-rw-r--r--drivers/iommu/qcom/msm_iommu_pagetable.c645
-rw-r--r--drivers/iommu/qcom/msm_iommu_pagetable.h33
-rw-r--r--drivers/iommu/qcom/msm_iommu_perfmon.h233
-rw-r--r--drivers/iommu/qcom/msm_iommu_priv.h71
-rw-r--r--drivers/iommu/qcom/msm_iommu_sec.c876
-rw-r--r--drivers/of/address.c20
-rw-r--r--drivers/of/device.c77
-rw-r--r--drivers/of/of_pci.c3
-rw-r--r--drivers/of/platform.c16
-rw-r--r--include/dt-bindings/msm/msm-bus-ids.h629
-rw-r--r--include/linux/msm-bus-board.h198
-rw-r--r--include/linux/msm-bus.h139
-rw-r--r--include/linux/msm_iommu_domains.h239
-rw-r--r--include/linux/of_device.h14
-rw-r--r--include/linux/qcom_iommu.h388
-rw-r--r--include/soc/qcom/socinfo.h610
35 files changed, 9442 insertions, 658 deletions
diff --git a/Documentation/devicetree/bindings/iommu/msm,iommu-v0.txt b/Documentation/devicetree/bindings/iommu/msm,iommu-v0.txt
new file mode 100644
index 000000000000..21bfbfcc5ae3
--- /dev/null
+++ b/Documentation/devicetree/bindings/iommu/msm,iommu-v0.txt
@@ -0,0 +1,59 @@
+* QCOM IOMMU
+
+The QCOM IOMMU is an implementation compatible with the ARM VMSA short
+descriptor page tables. It provides address translation for bus masters outside
+of the CPU, each connected to the IOMMU through a port called micro-TLB.
+
+Required Properties:
+
+ - compatible: Must contain "qcom,iommu-v0".
+ - reg: Base address and size of the IOMMU registers.
+ - interrupts: Specifiers for the MMU fault interrupts. For instances that
+ support secure mode two interrupts must be specified, for non-secure and
+ secure mode, in that order. For instances that don't support secure mode a
+ single interrupt must be specified.
+ - #iommu-cells: This is the total number of stream ids that a master would
+ use during transactions which will be specified as a list
+ as a part of iommus property below.
+ - ncb: The total number of context banks in the IOMMU.
+ - clocks : List of clocks to be used during SMMU register access. See
+ Documentation/devicetree/bindings/clock/clock-bindings.txt
+ for information about the format. For each clock specified
+ here, there must be a corresponding entry in clock-names
+ (see below).
+
+ - clock-names : List of clock names corresponding to the clocks specified in
+ the "clocks" property (above). See
+ Documentation/devicetree/bindings/clock/clock-bindings.txt
+ for more info.
+
+Each bus master connected to an IOMMU must reference the IOMMU in its device
+node with the following property:
+
+ - iommus: A reference to the IOMMU in multiple cells. The first cell is a
+ phandle to the IOMMU and the second cell is the list of the
+ stream ids used by the device.
+
+Example: mdp iommu and its bus master
+
+ mdp_port0: qcom,iommu@7500000 {
+ compatible = "msm,iommu-v0";
+ #iommu-cells = <2>;
+ clock-names =
+ "smmu_pclk",
+ "iommu_clk";
+ clocks =
+ <&mmcc SMMU_AHB_CLK>,
+ <&mmcc MDP_AXI_CLK>;
+ reg = <0x07500000 0x100000>;
+ interrupts =
+ <GIC_SPI 63 0>,
+ <GIC_SPI 64 0>;
+ ncb = <2>;
+ };
+
+ mdp: qcom,mdp@5100000 {
+ compatible = "qcom,mdp";
+ ...
+ iommus = <&mdp_port0 0 2>;
+ };
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 1a7815e5421b..7e2b75f8b046 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -2126,6 +2126,15 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
struct dma_map_ops *dma_ops;
dev->archdata.dma_coherent = coherent;
+
+ /*
+ * Don't override the dma_ops if they have already been set. Ideally
+ * this should be the only location where dma_ops are set, remove this
+ * check when all other callers of set_dma_ops will have disappeared.
+ */
+ if (dev->archdata.dma_ops)
+ return;
+
if (arm_setup_iommu_dma_ops(dev, dma_base, size, iommu))
dma_ops = arm_get_iommu_dma_map_ops(coherent);
else
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index c75b8d027eb1..12137e615cf9 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -85,6 +85,12 @@ static inline void flush_cache_page(struct vm_area_struct *vma,
extern void __dma_map_area(const void *, size_t, int);
extern void __dma_unmap_area(const void *, size_t, int);
extern void __dma_flush_range(const void *, const void *);
+extern void __dma_inv_range(const void *, const void *);
+extern void __dma_clean_range(const void *, const void *);
+
+#define dmac_flush_range __dma_flush_range
+#define dmac_inv_range __dma_inv_range
+#define dmac_clean_range __dma_clean_range
/*
* Copy user data from/to a page which is mapped into a different
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index eb48d5df4a0f..a17430174087 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -113,7 +113,7 @@ ENTRY(__inval_cache_range)
* - start - virtual start address of region
* - end - virtual end address of region
*/
-__dma_inv_range:
+ENTRY(__dma_inv_range)
dcache_line_size x2, x3
sub x3, x2, #1
tst x1, x3 // end cache line aligned?
@@ -139,7 +139,7 @@ ENDPROC(__dma_inv_range)
* - start - virtual start address of region
* - end - virtual end address of region
*/
-__dma_clean_range:
+ENTRY(__dma_clean_range)
dcache_line_size x2, x3
sub x3, x2, #1
bic x0, x0, x3
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index f80aaaf9f610..af0dd0284675 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -512,6 +512,10 @@ static int platform_drv_probe(struct device *_dev)
if (ret < 0)
return ret;
+ ret = of_dma_configure_ops(_dev, _dev->of_node);
+ if (ret < 0)
+ goto done;
+
ret = dev_pm_domain_attach(_dev, true);
if (ret != -EPROBE_DEFER) {
ret = drv->probe(dev);
@@ -519,6 +523,10 @@ static int platform_drv_probe(struct device *_dev)
dev_pm_domain_detach(_dev, true);
}
+ if (ret)
+ of_dma_deconfigure(_dev);
+
+done:
if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) {
dev_warn(_dev, "probe deferral not supported\n");
ret = -ENXIO;
@@ -540,6 +548,7 @@ static int platform_drv_remove(struct device *_dev)
ret = drv->remove(dev);
dev_pm_domain_detach(_dev, true);
+ of_dma_deconfigure(_dev);
return ret;
}
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index 7ac2f1997e4a..f033d48cd1f3 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -18,6 +18,8 @@
#include "msm_drv.h"
#include "msm_mmu.h"
+#define DUMMY_CONTEXT 0x1
+
struct msm_iommu {
struct msm_mmu base;
struct iommu_domain *domain;
@@ -33,14 +35,51 @@ static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev,
static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt)
{
+ struct device *dev = mmu->dev;
struct msm_iommu *iommu = to_msm_iommu(mmu);
- return iommu_attach_device(iommu->domain, mmu->dev);
+ int i, ret;
+
+ for (i = 0; i < cnt; i++) {
+ struct device *msm_iommu_get_ctx(const char *ctx_name);
+ struct device *ctx = msm_iommu_get_ctx(names[i]);
+ if (IS_ERR_OR_NULL(ctx)) {
+ dev_warn(dev, "couldn't get %s context", names[i]);
+ continue;
+ }
+
+ if (ctx == (struct device *)DUMMY_CONTEXT) {
+ return iommu_attach_device(iommu->domain, mmu->dev);
+ } else {
+ ret = iommu_attach_device(iommu->domain, ctx);
+ }
+
+ if (ret) {
+ dev_warn(dev, "could not attach iommu to %s", names[i]);
+ return ret;
+ }
+ }
+
+ return 0;
}
static void msm_iommu_detach(struct msm_mmu *mmu, const char **names, int cnt)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
- iommu_detach_device(iommu->domain, mmu->dev);
+ int i;
+
+ for (i = 0; i < cnt; i++) {
+ struct device *msm_iommu_get_ctx(const char *ctx_name);
+ struct device *ctx = msm_iommu_get_ctx(names[i]);
+ if (IS_ERR_OR_NULL(ctx))
+ continue;
+
+ if (ctx == (struct device *)DUMMY_CONTEXT) {
+ iommu_detach_device(iommu->domain, mmu->dev);
+ break;
+ } else {
+ iommu_detach_device(iommu->domain, ctx);
+ }
+ }
}
static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova,
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index d9da766719c8..f0c75fc36029 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -65,7 +65,6 @@ config MSM_IOMMU
bool "MSM IOMMU Support"
depends on ARM
depends on ARCH_MSM8X60 || ARCH_MSM8960 || COMPILE_TEST
- depends on BROKEN
select IOMMU_API
help
Support for the IOMMUs found on certain Qualcomm SOCs.
@@ -78,6 +77,8 @@ config IOMMU_PGTABLES_L2
def_bool y
depends on MSM_IOMMU && MMU && SMP && CPU_DCACHE_DISABLE=n
+source "drivers/iommu/qcom/Kconfig"
+
# AMD IOMMU support
config AMD_IOMMU
bool "AMD IOMMU support"
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index c6dcc513d711..c665bbb5704d 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -5,7 +5,8 @@ obj-$(CONFIG_IOMMU_IO_PGTABLE) += io-pgtable.o
obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o
obj-$(CONFIG_IOMMU_IOVA) += iova.o
obj-$(CONFIG_OF_IOMMU) += of_iommu.o
-obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o
+obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o
+obj-$(CONFIG_QCOM_IOMMU_V1) += qcom/
obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o
obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
obj-$(CONFIG_ARM_SMMU) += arm-smmu.o
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index 123a71c31e39..cf871ffc2aa3 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -27,6 +27,8 @@
#include <linux/slab.h>
#include <linux/iommu.h>
#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/of_iommu.h>
#include <asm/cacheflush.h>
#include <asm/sizes.h>
@@ -39,15 +41,12 @@ __asm__ __volatile__ ( \
" mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \
: "=r" (reg))
-#define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0)
-#define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1)
-
/* bitmap of the page sizes currently supported */
#define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
-
-static int msm_iommu_tex_class[4];
+#define DUMMY_CONTEXT 1
DEFINE_SPINLOCK(msm_iommu_lock);
+static LIST_HEAD(qcom_iommu_devices);
struct msm_priv {
unsigned long *pgtable;
@@ -55,40 +54,87 @@ struct msm_priv {
struct iommu_domain domain;
};
+struct device *msm_iommu_get_ctx(const char *ctx_name)
+{
+ return (struct device *) DUMMY_CONTEXT;
+}
+EXPORT_SYMBOL(msm_iommu_get_ctx);
+
static struct msm_priv *to_msm_priv(struct iommu_domain *dom)
{
return container_of(dom, struct msm_priv, domain);
}
-static int __enable_clocks(struct msm_iommu_drvdata *drvdata)
+static int __enable_clocks(struct msm_iommu_dev *iommu)
{
int ret;
- ret = clk_enable(drvdata->pclk);
+ ret = clk_enable(iommu->pclk);
if (ret)
goto fail;
- if (drvdata->clk) {
- ret = clk_enable(drvdata->clk);
+ if (iommu->clk) {
+ ret = clk_enable(iommu->clk);
if (ret)
- clk_disable(drvdata->pclk);
+ clk_disable(iommu->pclk);
}
fail:
return ret;
}
-static void __disable_clocks(struct msm_iommu_drvdata *drvdata)
+static void __disable_clocks(struct msm_iommu_dev *iommu)
{
- clk_disable(drvdata->clk);
- clk_disable(drvdata->pclk);
+ if (iommu->clk)
+ clk_disable(iommu->clk);
+ clk_disable(iommu->pclk);
+}
+
+static void msm_iommu_reset(void __iomem *base, int ncb)
+{
+ int ctx;
+
+ SET_RPUE(base, 0);
+ SET_RPUEIE(base, 0);
+ SET_ESRRESTORE(base, 0);
+ SET_TBE(base, 0);
+ SET_CR(base, 0);
+ SET_SPDMBE(base, 0);
+ SET_TESTBUSCR(base, 0);
+ SET_TLBRSW(base, 0);
+ SET_GLOBAL_TLBIALL(base, 0);
+ SET_RPU_ACR(base, 0);
+ SET_TLBLKCRWE(base, 1);
+
+ for (ctx = 0; ctx < ncb; ctx++) {
+ SET_BPRCOSH(base, ctx, 0);
+ SET_BPRCISH(base, ctx, 0);
+ SET_BPRCNSH(base, ctx, 0);
+ SET_BPSHCFG(base, ctx, 0);
+ SET_BPMTCFG(base, ctx, 0);
+ SET_ACTLR(base, ctx, 0);
+ SET_SCTLR(base, ctx, 0);
+ SET_FSRRESTORE(base, ctx, 0);
+ SET_TTBR0(base, ctx, 0);
+ SET_TTBR1(base, ctx, 0);
+ SET_TTBCR(base, ctx, 0);
+ SET_BFBCR(base, ctx, 0);
+ SET_PAR(base, ctx, 0);
+ SET_FAR(base, ctx, 0);
+ SET_CTX_TLBIALL(base, ctx, 0);
+ SET_TLBFLPTER(base, ctx, 0);
+ SET_TLBSLPTER(base, ctx, 0);
+ SET_TLBLKCR(base, ctx, 0);
+ SET_CONTEXTIDR(base, ctx, 0);
+ }
}
static int __flush_iotlb(struct iommu_domain *domain)
{
struct msm_priv *priv = to_msm_priv(domain);
- struct msm_iommu_drvdata *iommu_drvdata;
- struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ struct msm_iommu_dev *iommu = NULL;
+ struct msm_iommu_ctx_dev *master;
int ret = 0;
+
#ifndef CONFIG_IOMMU_PGTABLES_L2
unsigned long *fl_table = priv->pgtable;
int i;
@@ -105,24 +151,67 @@ static int __flush_iotlb(struct iommu_domain *domain)
}
#endif
- list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
-
- BUG_ON(!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent);
-
- iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
- BUG_ON(!iommu_drvdata);
-
- ret = __enable_clocks(iommu_drvdata);
+ list_for_each_entry(iommu, &priv->list_attached, dom_node) {
+ ret = __enable_clocks(iommu);
if (ret)
goto fail;
- SET_CTX_TLBIALL(iommu_drvdata->base, ctx_drvdata->num, 0);
- __disable_clocks(iommu_drvdata);
+ list_for_each_entry(master, &iommu->ctx_list, list)
+ SET_CTX_TLBIALL(iommu->base, master->num, 0);
+
+ __disable_clocks(iommu);
}
fail:
return ret;
}
+static int msm_iommu_alloc_ctx(unsigned long *map, int start, int end)
+{
+ int idx;
+
+ do {
+ idx = find_next_zero_bit(map, end, start);
+ if (idx == end)
+ return -ENOSPC;
+ } while (test_and_set_bit(idx, map));
+
+ return idx;
+}
+
+static void msm_iommu_free_ctx(unsigned long *map, int idx)
+{
+ clear_bit(idx, map);
+}
+
+static void config_mids(struct msm_iommu_dev *iommu,
+ struct msm_iommu_ctx_dev *master)
+{
+ int mid, ctx, i;
+
+ for (i = 0; i < master->num_mids; i++) {
+ mid = master->mids[i];
+ ctx = master->num;
+
+ SET_M2VCBR_N(iommu->base, mid, 0);
+ SET_CBACR_N(iommu->base, ctx, 0);
+
+ /* Set VMID = 0 */
+ SET_VMID(iommu->base, mid, 0);
+
+ /* Set the context number for that MID to this context */
+ SET_CBNDX(iommu->base, mid, ctx);
+
+ /* Set MID associated with this context bank to 0*/
+ SET_CBVMID(iommu->base, ctx, 0);
+
+ /* Set the ASID for TLB tagging for this context */
+ SET_CONTEXTIDR_ASID(iommu->base, ctx, ctx);
+
+ /* Set security bit override to be Non-secure */
+ SET_NSCFG(iommu->base, mid, 3);
+ }
+}
+
static void __reset_context(void __iomem *base, int ctx)
{
SET_BPRCOSH(base, ctx, 0);
@@ -143,13 +232,10 @@ static void __reset_context(void __iomem *base, int ctx)
SET_TLBFLPTER(base, ctx, 0);
SET_TLBSLPTER(base, ctx, 0);
SET_TLBLKCR(base, ctx, 0);
- SET_PRRR(base, ctx, 0);
- SET_NMRR(base, ctx, 0);
}
static void __program_context(void __iomem *base, int ctx, phys_addr_t pgtable)
{
- unsigned int prrr, nmrr;
__reset_context(base, ctx);
/* Set up HTW mode */
@@ -179,15 +265,6 @@ static void __program_context(void __iomem *base, int ctx, phys_addr_t pgtable)
SET_RCOSH(base, ctx, 1);
SET_RCNSH(base, ctx, 1);
- /* Turn on TEX Remap */
- SET_TRE(base, ctx, 1);
-
- /* Set TEX remap attributes */
- RCP15_PRRR(prrr);
- RCP15_NMRR(nmrr);
- SET_PRRR(base, ctx, prrr);
- SET_NMRR(base, ctx, nmrr);
-
/* Turn on BFB prefetch */
SET_BFBDFE(base, ctx, 1);
@@ -272,94 +349,76 @@ static void msm_iommu_domain_free(struct iommu_domain *domain)
static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
{
- struct msm_priv *priv;
- struct msm_iommu_ctx_dev *ctx_dev;
- struct msm_iommu_drvdata *iommu_drvdata;
- struct msm_iommu_ctx_drvdata *ctx_drvdata;
- struct msm_iommu_ctx_drvdata *tmp_drvdata;
int ret = 0;
unsigned long flags;
+ struct msm_iommu_dev *iommu;
+ struct msm_priv *priv = to_msm_priv(domain);
+ struct msm_iommu_ctx_dev *master;
spin_lock_irqsave(&msm_iommu_lock, flags);
-
- priv = to_msm_priv(domain);
-
- if (!dev) {
- ret = -EINVAL;
- goto fail;
- }
-
- iommu_drvdata = dev_get_drvdata(dev->parent);
- ctx_drvdata = dev_get_drvdata(dev);
- ctx_dev = dev->platform_data;
-
- if (!iommu_drvdata || !ctx_drvdata || !ctx_dev) {
- ret = -EINVAL;
- goto fail;
- }
-
- if (!list_empty(&ctx_drvdata->attached_elm)) {
- ret = -EBUSY;
- goto fail;
- }
-
- list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm)
- if (tmp_drvdata == ctx_drvdata) {
- ret = -EBUSY;
- goto fail;
+ list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) {
+ master = list_first_entry(&iommu->ctx_list,
+ struct msm_iommu_ctx_dev,
+ list);
+ if (master->of_node == dev->of_node) {
+ ret = __enable_clocks(iommu);
+ if (ret)
+ goto fail;
+
+ list_for_each_entry(master, &iommu->ctx_list, list) {
+ if (master->num) {
+ dev_err(dev, "domain already attached");
+ ret = -EEXIST;
+ goto fail;
+ }
+ master->num =
+ msm_iommu_alloc_ctx(iommu->context_map,
+ 0, iommu->ncb);
+ if (IS_ERR_VALUE(master->num)) {
+ ret = -ENODEV;
+ goto fail;
+ }
+ config_mids(iommu, master);
+ __program_context(iommu->base, master->num,
+ __pa(priv->pgtable));
+ }
+ __disable_clocks(iommu);
+ list_add(&iommu->dom_node, &priv->list_attached);
}
+ }
- ret = __enable_clocks(iommu_drvdata);
- if (ret)
- goto fail;
-
- __program_context(iommu_drvdata->base, ctx_dev->num,
- __pa(priv->pgtable));
-
- __disable_clocks(iommu_drvdata);
- list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
ret = __flush_iotlb(domain);
-
fail:
spin_unlock_irqrestore(&msm_iommu_lock, flags);
+
return ret;
}
static void msm_iommu_detach_dev(struct iommu_domain *domain,
struct device *dev)
{
- struct msm_priv *priv;
- struct msm_iommu_ctx_dev *ctx_dev;
- struct msm_iommu_drvdata *iommu_drvdata;
- struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ struct msm_priv *priv = to_msm_priv(domain);
unsigned long flags;
+ struct msm_iommu_dev *iommu;
+ struct msm_iommu_ctx_dev *master;
int ret;
spin_lock_irqsave(&msm_iommu_lock, flags);
- priv = to_msm_priv(domain);
-
- if (!dev)
- goto fail;
-
- iommu_drvdata = dev_get_drvdata(dev->parent);
- ctx_drvdata = dev_get_drvdata(dev);
- ctx_dev = dev->platform_data;
-
- if (!iommu_drvdata || !ctx_drvdata || !ctx_dev)
- goto fail;
-
ret = __flush_iotlb(domain);
if (ret)
goto fail;
- ret = __enable_clocks(iommu_drvdata);
- if (ret)
- goto fail;
-
- __reset_context(iommu_drvdata->base, ctx_dev->num);
- __disable_clocks(iommu_drvdata);
- list_del_init(&ctx_drvdata->attached_elm);
+ list_for_each_entry(iommu, &priv->list_attached, dom_node) {
+ ret = __enable_clocks(iommu);
+ if (ret)
+ goto fail;
+ list_for_each_entry(master, &iommu->ctx_list, list) {
+ msm_iommu_free_ctx(iommu->context_map, master->num);
+ __reset_context(iommu->base, master->num);
+ }
+ __disable_clocks(iommu);
+ }
fail:
spin_unlock_irqrestore(&msm_iommu_lock, flags);
}
@@ -376,12 +435,14 @@ static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
unsigned long *sl_pte;
unsigned long sl_offset;
unsigned int pgprot;
- int ret = 0, tex, sh;
+ int ret = 0, tex = 0, sh;
spin_lock_irqsave(&msm_iommu_lock, flags);
sh = (prot & MSM_IOMMU_ATTR_SH) ? 1 : 0;
- tex = msm_iommu_tex_class[prot & MSM_IOMMU_CP_MASK];
+
+ if (prot & IOMMU_CACHE)
+ tex = FL_BUFFERABLE | FL_CACHEABLE;
if (tex < 0 || tex > NUM_TEX_CLASS - 1) {
ret = -EINVAL;
@@ -555,47 +616,46 @@ static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t va)
{
struct msm_priv *priv;
- struct msm_iommu_drvdata *iommu_drvdata;
- struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ struct msm_iommu_dev *iommu;
+ struct msm_iommu_ctx_dev *master;
unsigned int par;
unsigned long flags;
- void __iomem *base;
phys_addr_t ret = 0;
- int ctx;
spin_lock_irqsave(&msm_iommu_lock, flags);
priv = to_msm_priv(domain);
- if (list_empty(&priv->list_attached))
- goto fail;
+ iommu = list_first_entry(&priv->list_attached,
+ struct msm_iommu_dev, dom_node);
- ctx_drvdata = list_entry(priv->list_attached.next,
- struct msm_iommu_ctx_drvdata, attached_elm);
- iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
+ if (list_empty(&iommu->ctx_list))
+ goto fail;
- base = iommu_drvdata->base;
- ctx = ctx_drvdata->num;
+ master = list_first_entry(&iommu->ctx_list,
+ struct msm_iommu_ctx_dev, list);
+ if (!master)
+ goto fail;
- ret = __enable_clocks(iommu_drvdata);
+ ret = __enable_clocks(iommu);
if (ret)
goto fail;
/* Invalidate context TLB */
- SET_CTX_TLBIALL(base, ctx, 0);
- SET_V2PPR(base, ctx, va & V2Pxx_VA);
+ SET_CTX_TLBIALL(iommu->base, master->num, 0);
+ SET_V2PPR(iommu->base, master->num, va & V2Pxx_VA);
- par = GET_PAR(base, ctx);
+ par = GET_PAR(iommu->base, master->num);
/* We are dealing with a supersection */
- if (GET_NOFAULT_SS(base, ctx))
+ if (GET_NOFAULT_SS(iommu->base, master->num))
ret = (par & 0xFF000000) | (va & 0x00FFFFFF);
else /* Upper 20 bits from PAR, lower 12 from VA */
ret = (par & 0xFFFFF000) | (va & 0x00000FFF);
- if (GET_FAULT(base, ctx))
+ if (GET_FAULT(iommu->base, master->num))
ret = 0;
- __disable_clocks(iommu_drvdata);
+ __disable_clocks(iommu);
fail:
spin_unlock_irqrestore(&msm_iommu_lock, flags);
return ret;
@@ -629,49 +689,82 @@ static void print_ctx_regs(void __iomem *base, int ctx)
GET_TTBR0(base, ctx), GET_TTBR1(base, ctx));
pr_err("SCTLR = %08x ACTLR = %08x\n",
GET_SCTLR(base, ctx), GET_ACTLR(base, ctx));
- pr_err("PRRR = %08x NMRR = %08x\n",
- GET_PRRR(base, ctx), GET_NMRR(base, ctx));
+}
+
+static void insert_iommu_master(struct device *dev,
+ struct msm_iommu_dev *iommu,
+ struct of_phandle_args *spec)
+{
+ struct msm_iommu_ctx_dev *master;
+ int sid;
+
+ master = kzalloc(sizeof(*master), GFP_KERNEL);
+ master->of_node = dev->of_node;
+ list_add(&master->list, &iommu->ctx_list);
+
+ for (sid = 0; sid < spec->args_count; sid++)
+ master->mids[sid] = spec->args[sid];
+
+ master->num_mids = spec->args_count;
+}
+
+static int qcom_iommu_of_xlate(struct device *dev,
+ struct of_phandle_args *spec)
+{
+ struct msm_iommu_dev *iommu;
+ unsigned long flags;
+
+ spin_lock_irqsave(&msm_iommu_lock, flags);
+ list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) {
+ if (iommu->dev->of_node == spec->np)
+ break;
+ }
+
+ if (!iommu || (iommu->dev->of_node != spec->np))
+ return -ENODEV;
+
+ insert_iommu_master(dev, iommu, spec);
+ spin_unlock_irqrestore(&msm_iommu_lock, flags);
+
+ return 0;
}
irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id)
{
- struct msm_iommu_drvdata *drvdata = dev_id;
- void __iomem *base;
+ struct msm_iommu_dev *iommu = dev_id;
unsigned int fsr;
int i, ret;
spin_lock(&msm_iommu_lock);
- if (!drvdata) {
+ if (!iommu) {
pr_err("Invalid device ID in context interrupt handler\n");
goto fail;
}
- base = drvdata->base;
-
pr_err("Unexpected IOMMU page fault!\n");
- pr_err("base = %08x\n", (unsigned int) base);
+ pr_err("base = %08x\n", (unsigned int)iommu->base);
- ret = __enable_clocks(drvdata);
+ ret = __enable_clocks(iommu);
if (ret)
goto fail;
- for (i = 0; i < drvdata->ncb; i++) {
- fsr = GET_FSR(base, i);
+ for (i = 0; i < iommu->ncb; i++) {
+ fsr = GET_FSR(iommu->base, i);
if (fsr) {
pr_err("Fault occurred in context %d.\n", i);
pr_err("Interesting registers:\n");
- print_ctx_regs(base, i);
- SET_FSR(base, i, 0x4000000F);
+ print_ctx_regs(iommu->base, i);
+ SET_FSR(iommu->base, i, 0x4000000F);
}
}
- __disable_clocks(drvdata);
+ __disable_clocks(iommu);
fail:
spin_unlock(&msm_iommu_lock);
return 0;
}
-static const struct iommu_ops msm_iommu_ops = {
+static struct iommu_ops msm_iommu_ops = {
.capable = msm_iommu_capable,
.domain_alloc = msm_iommu_domain_alloc,
.domain_free = msm_iommu_domain_free,
@@ -682,54 +775,163 @@ static const struct iommu_ops msm_iommu_ops = {
.map_sg = default_iommu_map_sg,
.iova_to_phys = msm_iommu_iova_to_phys,
.pgsize_bitmap = MSM_IOMMU_PGSIZES,
+ .of_xlate = qcom_iommu_of_xlate,
};
-static int __init get_tex_class(int icp, int ocp, int mt, int nos)
+static int msm_iommu_probe(struct platform_device *pdev)
{
- int i = 0;
- unsigned int prrr = 0;
- unsigned int nmrr = 0;
- int c_icp, c_ocp, c_mt, c_nos;
-
- RCP15_PRRR(prrr);
- RCP15_NMRR(nmrr);
-
- for (i = 0; i < NUM_TEX_CLASS; i++) {
- c_nos = PRRR_NOS(prrr, i);
- c_mt = PRRR_MT(prrr, i);
- c_icp = NMRR_ICP(nmrr, i);
- c_ocp = NMRR_OCP(nmrr, i);
-
- if (icp == c_icp && ocp == c_ocp && c_mt == mt && c_nos == nos)
- return i;
+ struct resource *r;
+ struct msm_iommu_dev *iommu;
+ int ret, par, val;
+
+ iommu = devm_kzalloc(&pdev->dev, sizeof(*iommu), GFP_KERNEL);
+ if (!iommu)
+ return -ENODEV;
+
+ iommu->dev = &pdev->dev;
+ INIT_LIST_HEAD(&iommu->ctx_list);
+
+ iommu->pclk = devm_clk_get(iommu->dev, "smmu_pclk");
+ if (IS_ERR(iommu->pclk)) {
+ dev_err(iommu->dev, "could not get smmu_pclk\n");
+ return PTR_ERR(iommu->pclk);
+ }
+ ret = clk_prepare(iommu->pclk);
+ if (ret) {
+ dev_err(iommu->dev, "could not prepare smmu_pclk\n");
+ return ret;
+ }
+
+ iommu->clk = devm_clk_get(iommu->dev, "iommu_clk");
+ if (IS_ERR(iommu->clk)) {
+ dev_err(iommu->dev, "could not get iommu_clk\n");
+ clk_unprepare(iommu->pclk);
+ return PTR_ERR(iommu->clk);
+ }
+ ret = clk_prepare(iommu->clk);
+ if (ret) {
+ dev_err(iommu->dev, "could not prepare iommu_clk\n");
+ clk_unprepare(iommu->pclk);
+ return ret;
+ }
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ iommu->base = devm_ioremap_resource(iommu->dev, r);
+ if (IS_ERR(iommu->base)) {
+ dev_err(iommu->dev, "could not get iommu base\n");
+ ret = PTR_ERR(iommu->base);
+ goto fail;
+ }
+
+ iommu->irq = platform_get_irq(pdev, 0);
+ if (iommu->irq < 0) {
+ dev_err(iommu->dev, "could not get iommu irq\n");
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ ret = of_property_read_u32(iommu->dev->of_node, "ncb", &val);
+ if (ret) {
+ dev_err(iommu->dev, "could not get ncb\n");
+ goto fail;
+ }
+ iommu->ncb = val;
+
+ msm_iommu_reset(iommu->base, iommu->ncb);
+ SET_M(iommu->base, 0, 1);
+ SET_PAR(iommu->base, 0, 0);
+ SET_V2PCFG(iommu->base, 0, 1);
+ SET_V2PPR(iommu->base, 0, 0);
+ par = GET_PAR(iommu->base, 0);
+ SET_V2PCFG(iommu->base, 0, 0);
+ SET_M(iommu->base, 0, 0);
+
+ if (!par) {
+ pr_err("Invalid PAR value detected\n");
+ ret = -ENODEV;
+ goto fail;
}
- return -ENODEV;
+ ret = devm_request_threaded_irq(iommu->dev, iommu->irq, NULL,
+ msm_iommu_fault_handler,
+ IRQF_ONESHOT | IRQF_SHARED,
+ "msm_iommu_secure_irpt_handler",
+ iommu);
+ if (ret) {
+ pr_err("Request IRQ %d failed with ret=%d\n", iommu->irq, ret);
+ goto fail;
+ }
+
+ list_add(&iommu->dev_node, &qcom_iommu_devices);
+ of_iommu_set_ops(pdev->dev.of_node, &msm_iommu_ops);
+
+ platform_set_drvdata(pdev, iommu);
+
+ pr_info("device mapped at %p, irq %d with %d ctx banks\n",
+ iommu->base, iommu->irq, iommu->ncb);
+
+ return 0;
+fail:
+ clk_unprepare(iommu->clk);
+ clk_unprepare(iommu->pclk);
+ return ret;
}
-static void __init setup_iommu_tex_classes(void)
+static const struct of_device_id msm_iommu_dt_match[] = {
+ { .compatible = "qcom,iommu-v0"},
+ {}
+};
+
+static int msm_iommu_remove(struct platform_device *pdev)
+{
+ struct msm_iommu_dev *iommu = platform_get_drvdata(pdev);
+
+ clk_unprepare(iommu->clk);
+ clk_unprepare(iommu->pclk);
+ return 0;
+}
+
+static struct platform_driver msm_iommu_driver = {
+ .driver = {
+ .name = "msm_iommu",
+ .of_match_table = msm_iommu_dt_match,
+ },
+ .probe = msm_iommu_probe,
+ .remove = msm_iommu_remove,
+};
+
+static int __init msm_iommu_driver_init(void)
{
- msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED] =
- get_tex_class(CP_NONCACHED, CP_NONCACHED, MT_NORMAL, 1);
+ int ret;
- msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_WA] =
- get_tex_class(CP_WB_WA, CP_WB_WA, MT_NORMAL, 1);
+ ret = platform_driver_register(&msm_iommu_driver);
+ if (ret != 0)
+ pr_err("Failed to register IOMMU driver\n");
- msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_NWA] =
- get_tex_class(CP_WB_NWA, CP_WB_NWA, MT_NORMAL, 1);
+ return ret;
+}
- msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WT] =
- get_tex_class(CP_WT, CP_WT, MT_NORMAL, 1);
+static void __exit msm_iommu_driver_exit(void)
+{
+ platform_driver_unregister(&msm_iommu_driver);
}
+subsys_initcall(msm_iommu_driver_init);
+module_exit(msm_iommu_driver_exit);
+
static int __init msm_iommu_init(void)
{
- setup_iommu_tex_classes();
bus_set_iommu(&platform_bus_type, &msm_iommu_ops);
return 0;
}
-subsys_initcall(msm_iommu_init);
+static int __init msm_iommu_of_setup(struct device_node *np)
+{
+ msm_iommu_init();
+ return 0;
+}
+
+IOMMU_OF_DECLARE(msm_iommu_of, "qcom,iommu-v0", msm_iommu_of_setup);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>");
diff --git a/drivers/iommu/msm_iommu.h b/drivers/iommu/msm_iommu.h
index 5c7c955e6d25..4ca25d50d679 100644
--- a/drivers/iommu/msm_iommu.h
+++ b/drivers/iommu/msm_iommu.h
@@ -42,74 +42,53 @@
*/
#define MAX_NUM_MIDS 32
+/* Maximum number of context banks that can be present in IOMMU */
+#define IOMMU_MAX_CBS 128
+
/**
* struct msm_iommu_dev - a single IOMMU hardware instance
- * name Human-readable name given to this IOMMU HW instance
* ncb Number of context banks present on this IOMMU HW instance
+ * dev: IOMMU device
+ * irq: Interrupt number
+ * clk: The bus clock for this IOMMU hardware instance
+ * pclk: The clock for the IOMMU bus interconnect
+ * dev_node: list head in qcom_iommu_device_list
+ * dom_node: list head for domain
+ * ctx_list: list of 'struct msm_iommu_ctx_dev'
+ * context_map: Bitmap to track allocated context banks
*/
struct msm_iommu_dev {
- const char *name;
+ void __iomem *base;
int ncb;
+ struct device *dev;
+ int irq;
+ struct clk *clk;
+ struct clk *pclk;
+ struct list_head dev_node;
+ struct list_head dom_node;
+ struct list_head ctx_list;
+ DECLARE_BITMAP(context_map, IOMMU_MAX_CBS);
};
/**
* struct msm_iommu_ctx_dev - an IOMMU context bank instance
- * name Human-readable name given to this context bank
+ * of_node node ptr of client device
* num Index of this context bank within the hardware
* mids List of Machine IDs that are to be mapped into this context
* bank, terminated by -1. The MID is a set of signals on the
* AXI bus that identifies the function associated with a specific
* memory request. (See ARM spec).
+ * num_mids Total number of mids
+ * node list head in ctx_list
*/
struct msm_iommu_ctx_dev {
- const char *name;
+ struct device_node *of_node;
int num;
int mids[MAX_NUM_MIDS];
+ int num_mids;
+ struct list_head list;
};
-
-/**
- * struct msm_iommu_drvdata - A single IOMMU hardware instance
- * @base: IOMMU config port base address (VA)
- * @ncb The number of contexts on this IOMMU
- * @irq: Interrupt number
- * @clk: The bus clock for this IOMMU hardware instance
- * @pclk: The clock for the IOMMU bus interconnect
- *
- * A msm_iommu_drvdata holds the global driver data about a single piece
- * of an IOMMU hardware instance.
- */
-struct msm_iommu_drvdata {
- void __iomem *base;
- int irq;
- int ncb;
- struct clk *clk;
- struct clk *pclk;
-};
-
-/**
- * struct msm_iommu_ctx_drvdata - an IOMMU context bank instance
- * @num: Hardware context number of this context
- * @pdev: Platform device associated wit this HW instance
- * @attached_elm: List element for domains to track which devices are
- * attached to them
- *
- * A msm_iommu_ctx_drvdata holds the driver data for a single context bank
- * within each IOMMU hardware instance
- */
-struct msm_iommu_ctx_drvdata {
- int num;
- struct platform_device *pdev;
- struct list_head attached_elm;
-};
-
-/*
- * Look up an IOMMU context device by its context name. NULL if none found.
- * Useful for testing and drivers that do not yet fully have IOMMU stuff in
- * their platform devices.
- */
-struct device *msm_iommu_get_ctx(const char *ctx_name);
-
/*
* Interrupt handler for the IOMMU context fault interrupt. Hooking the
* interrupt is not supported in the API yet, but this will print an error
diff --git a/drivers/iommu/msm_iommu_dev.c b/drivers/iommu/msm_iommu_dev.c
deleted file mode 100644
index b6d01f97e537..000000000000
--- a/drivers/iommu/msm_iommu_dev.c
+++ /dev/null
@@ -1,392 +0,0 @@
-/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/clk.h>
-#include <linux/iommu.h>
-#include <linux/interrupt.h>
-#include <linux/err.h>
-#include <linux/slab.h>
-
-#include "msm_iommu_hw-8xxx.h"
-#include "msm_iommu.h"
-
-struct iommu_ctx_iter_data {
- /* input */
- const char *name;
-
- /* output */
- struct device *dev;
-};
-
-static struct platform_device *msm_iommu_root_dev;
-
-static int each_iommu_ctx(struct device *dev, void *data)
-{
- struct iommu_ctx_iter_data *res = data;
- struct msm_iommu_ctx_dev *c = dev->platform_data;
-
- if (!res || !c || !c->name || !res->name)
- return -EINVAL;
-
- if (!strcmp(res->name, c->name)) {
- res->dev = dev;
- return 1;
- }
- return 0;
-}
-
-static int each_iommu(struct device *dev, void *data)
-{
- return device_for_each_child(dev, data, each_iommu_ctx);
-}
-
-struct device *msm_iommu_get_ctx(const char *ctx_name)
-{
- struct iommu_ctx_iter_data r;
- int found;
-
- if (!msm_iommu_root_dev) {
- pr_err("No root IOMMU device.\n");
- goto fail;
- }
-
- r.name = ctx_name;
- found = device_for_each_child(&msm_iommu_root_dev->dev, &r, each_iommu);
-
- if (!found) {
- pr_err("Could not find context <%s>\n", ctx_name);
- goto fail;
- }
-
- return r.dev;
-fail:
- return NULL;
-}
-EXPORT_SYMBOL(msm_iommu_get_ctx);
-
-static void msm_iommu_reset(void __iomem *base, int ncb)
-{
- int ctx;
-
- SET_RPUE(base, 0);
- SET_RPUEIE(base, 0);
- SET_ESRRESTORE(base, 0);
- SET_TBE(base, 0);
- SET_CR(base, 0);
- SET_SPDMBE(base, 0);
- SET_TESTBUSCR(base, 0);
- SET_TLBRSW(base, 0);
- SET_GLOBAL_TLBIALL(base, 0);
- SET_RPU_ACR(base, 0);
- SET_TLBLKCRWE(base, 1);
-
- for (ctx = 0; ctx < ncb; ctx++) {
- SET_BPRCOSH(base, ctx, 0);
- SET_BPRCISH(base, ctx, 0);
- SET_BPRCNSH(base, ctx, 0);
- SET_BPSHCFG(base, ctx, 0);
- SET_BPMTCFG(base, ctx, 0);
- SET_ACTLR(base, ctx, 0);
- SET_SCTLR(base, ctx, 0);
- SET_FSRRESTORE(base, ctx, 0);
- SET_TTBR0(base, ctx, 0);
- SET_TTBR1(base, ctx, 0);
- SET_TTBCR(base, ctx, 0);
- SET_BFBCR(base, ctx, 0);
- SET_PAR(base, ctx, 0);
- SET_FAR(base, ctx, 0);
- SET_CTX_TLBIALL(base, ctx, 0);
- SET_TLBFLPTER(base, ctx, 0);
- SET_TLBSLPTER(base, ctx, 0);
- SET_TLBLKCR(base, ctx, 0);
- SET_PRRR(base, ctx, 0);
- SET_NMRR(base, ctx, 0);
- SET_CONTEXTIDR(base, ctx, 0);
- }
-}
-
-static int msm_iommu_probe(struct platform_device *pdev)
-{
- struct resource *r;
- struct clk *iommu_clk;
- struct clk *iommu_pclk;
- struct msm_iommu_drvdata *drvdata;
- struct msm_iommu_dev *iommu_dev = dev_get_platdata(&pdev->dev);
- void __iomem *regs_base;
- int ret, irq, par;
-
- if (pdev->id == -1) {
- msm_iommu_root_dev = pdev;
- return 0;
- }
-
- drvdata = kzalloc(sizeof(*drvdata), GFP_KERNEL);
-
- if (!drvdata) {
- ret = -ENOMEM;
- goto fail;
- }
-
- if (!iommu_dev) {
- ret = -ENODEV;
- goto fail;
- }
-
- iommu_pclk = clk_get(NULL, "smmu_pclk");
- if (IS_ERR(iommu_pclk)) {
- ret = -ENODEV;
- goto fail;
- }
-
- ret = clk_prepare_enable(iommu_pclk);
- if (ret)
- goto fail_enable;
-
- iommu_clk = clk_get(&pdev->dev, "iommu_clk");
-
- if (!IS_ERR(iommu_clk)) {
- if (clk_get_rate(iommu_clk) == 0)
- clk_set_rate(iommu_clk, 1);
-
- ret = clk_prepare_enable(iommu_clk);
- if (ret) {
- clk_put(iommu_clk);
- goto fail_pclk;
- }
- } else
- iommu_clk = NULL;
-
- r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "physbase");
- regs_base = devm_ioremap_resource(&pdev->dev, r);
- if (IS_ERR(regs_base)) {
- ret = PTR_ERR(regs_base);
- goto fail_clk;
- }
-
- irq = platform_get_irq_byname(pdev, "secure_irq");
- if (irq < 0) {
- ret = -ENODEV;
- goto fail_clk;
- }
-
- msm_iommu_reset(regs_base, iommu_dev->ncb);
-
- SET_M(regs_base, 0, 1);
- SET_PAR(regs_base, 0, 0);
- SET_V2PCFG(regs_base, 0, 1);
- SET_V2PPR(regs_base, 0, 0);
- par = GET_PAR(regs_base, 0);
- SET_V2PCFG(regs_base, 0, 0);
- SET_M(regs_base, 0, 0);
-
- if (!par) {
- pr_err("%s: Invalid PAR value detected\n", iommu_dev->name);
- ret = -ENODEV;
- goto fail_clk;
- }
-
- ret = request_irq(irq, msm_iommu_fault_handler, 0,
- "msm_iommu_secure_irpt_handler", drvdata);
- if (ret) {
- pr_err("Request IRQ %d failed with ret=%d\n", irq, ret);
- goto fail_clk;
- }
-
-
- drvdata->pclk = iommu_pclk;
- drvdata->clk = iommu_clk;
- drvdata->base = regs_base;
- drvdata->irq = irq;
- drvdata->ncb = iommu_dev->ncb;
-
- pr_info("device %s mapped at %p, irq %d with %d ctx banks\n",
- iommu_dev->name, regs_base, irq, iommu_dev->ncb);
-
- platform_set_drvdata(pdev, drvdata);
-
- clk_disable(iommu_clk);
-
- clk_disable(iommu_pclk);
-
- return 0;
-fail_clk:
- if (iommu_clk) {
- clk_disable(iommu_clk);
- clk_put(iommu_clk);
- }
-fail_pclk:
- clk_disable_unprepare(iommu_pclk);
-fail_enable:
- clk_put(iommu_pclk);
-fail:
- kfree(drvdata);
- return ret;
-}
-
-static int msm_iommu_remove(struct platform_device *pdev)
-{
- struct msm_iommu_drvdata *drv = NULL;
-
- drv = platform_get_drvdata(pdev);
- if (drv) {
- if (drv->clk) {
- clk_unprepare(drv->clk);
- clk_put(drv->clk);
- }
- clk_unprepare(drv->pclk);
- clk_put(drv->pclk);
- memset(drv, 0, sizeof(*drv));
- kfree(drv);
- }
- return 0;
-}
-
-static int msm_iommu_ctx_probe(struct platform_device *pdev)
-{
- struct msm_iommu_ctx_dev *c = dev_get_platdata(&pdev->dev);
- struct msm_iommu_drvdata *drvdata;
- struct msm_iommu_ctx_drvdata *ctx_drvdata;
- int i, ret;
-
- if (!c || !pdev->dev.parent)
- return -EINVAL;
-
- drvdata = dev_get_drvdata(pdev->dev.parent);
- if (!drvdata)
- return -ENODEV;
-
- ctx_drvdata = kzalloc(sizeof(*ctx_drvdata), GFP_KERNEL);
- if (!ctx_drvdata)
- return -ENOMEM;
-
- ctx_drvdata->num = c->num;
- ctx_drvdata->pdev = pdev;
-
- INIT_LIST_HEAD(&ctx_drvdata->attached_elm);
- platform_set_drvdata(pdev, ctx_drvdata);
-
- ret = clk_prepare_enable(drvdata->pclk);
- if (ret)
- goto fail;
-
- if (drvdata->clk) {
- ret = clk_prepare_enable(drvdata->clk);
- if (ret) {
- clk_disable_unprepare(drvdata->pclk);
- goto fail;
- }
- }
-
- /* Program the M2V tables for this context */
- for (i = 0; i < MAX_NUM_MIDS; i++) {
- int mid = c->mids[i];
- if (mid == -1)
- break;
-
- SET_M2VCBR_N(drvdata->base, mid, 0);
- SET_CBACR_N(drvdata->base, c->num, 0);
-
- /* Set VMID = 0 */
- SET_VMID(drvdata->base, mid, 0);
-
- /* Set the context number for that MID to this context */
- SET_CBNDX(drvdata->base, mid, c->num);
-
- /* Set MID associated with this context bank to 0*/
- SET_CBVMID(drvdata->base, c->num, 0);
-
- /* Set the ASID for TLB tagging for this context */
- SET_CONTEXTIDR_ASID(drvdata->base, c->num, c->num);
-
- /* Set security bit override to be Non-secure */
- SET_NSCFG(drvdata->base, mid, 3);
- }
-
- clk_disable(drvdata->clk);
- clk_disable(drvdata->pclk);
-
- dev_info(&pdev->dev, "context %s using bank %d\n", c->name, c->num);
- return 0;
-fail:
- kfree(ctx_drvdata);
- return ret;
-}
-
-static int msm_iommu_ctx_remove(struct platform_device *pdev)
-{
- struct msm_iommu_ctx_drvdata *drv = NULL;
- drv = platform_get_drvdata(pdev);
- if (drv) {
- memset(drv, 0, sizeof(struct msm_iommu_ctx_drvdata));
- kfree(drv);
- }
- return 0;
-}
-
-static struct platform_driver msm_iommu_driver = {
- .driver = {
- .name = "msm_iommu",
- },
- .probe = msm_iommu_probe,
- .remove = msm_iommu_remove,
-};
-
-static struct platform_driver msm_iommu_ctx_driver = {
- .driver = {
- .name = "msm_iommu_ctx",
- },
- .probe = msm_iommu_ctx_probe,
- .remove = msm_iommu_ctx_remove,
-};
-
-static int __init msm_iommu_driver_init(void)
-{
- int ret;
- ret = platform_driver_register(&msm_iommu_driver);
- if (ret != 0) {
- pr_err("Failed to register IOMMU driver\n");
- goto error;
- }
-
- ret = platform_driver_register(&msm_iommu_ctx_driver);
- if (ret != 0) {
- platform_driver_unregister(&msm_iommu_driver);
- pr_err("Failed to register IOMMU context driver\n");
- goto error;
- }
-
-error:
- return ret;
-}
-
-static void __exit msm_iommu_driver_exit(void)
-{
- platform_driver_unregister(&msm_iommu_ctx_driver);
- platform_driver_unregister(&msm_iommu_driver);
-}
-
-subsys_initcall(msm_iommu_driver_init);
-module_exit(msm_iommu_driver_exit);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>");
diff --git a/drivers/iommu/msm_iommu_hw-8xxx.h b/drivers/iommu/msm_iommu_hw-8xxx.h
index fc160101dead..84ba573927d1 100644
--- a/drivers/iommu/msm_iommu_hw-8xxx.h
+++ b/drivers/iommu/msm_iommu_hw-8xxx.h
@@ -61,8 +61,9 @@ do { \
#define FL_TYPE_TABLE (1 << 0)
#define FL_TYPE_SECT (2 << 0)
#define FL_SUPERSECTION (1 << 18)
-#define FL_AP_WRITE (1 << 10)
-#define FL_AP_READ (1 << 11)
+#define FL_AP0 (1 << 10)
+#define FL_AP1 (1 << 11)
+#define FL_AP2 (1 << 15)
#define FL_SHARED (1 << 16)
#define FL_BUFFERABLE (1 << 2)
#define FL_CACHEABLE (1 << 3)
@@ -77,6 +78,7 @@ do { \
#define SL_TYPE_SMALL (2 << 0)
#define SL_AP0 (1 << 4)
#define SL_AP1 (2 << 4)
+#define SL_AP2 (1 << 9)
#define SL_SHARED (1 << 10)
#define SL_BUFFERABLE (1 << 2)
#define SL_CACHEABLE (1 << 3)
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index 60ba238090d9..37b7c3e4c0b6 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -133,6 +133,19 @@ struct iommu_ops *of_iommu_get_ops(struct device_node *np)
return ops;
}
+/**
+ * of_iommu_configure - Configure and return the IOMMU for a device
+ * @dev: device for which to configure the IOMMU
+ * @master_np: device node of the bus master connected to the IOMMU
+ *
+ * The master_np parameter specifies the device node of the bus master seen by
+ * the IOMMU. This is usually the device node of the dev device, but can be the
+ * device node of a bridge when the device is dynamically discovered and
+ * instantiated and thus has no device node (such as PCI devices for instance).
+ *
+ * Return a pointer to the iommu_ops for the device, NULL if the device isn't
+ * connected to an IOMMU, or a negative value if an error occurs.
+ */
struct iommu_ops *of_iommu_configure(struct device *dev,
struct device_node *master_np)
{
@@ -159,8 +172,18 @@ struct iommu_ops *of_iommu_configure(struct device *dev,
np = iommu_spec.np;
ops = of_iommu_get_ops(np);
- if (!ops || !ops->of_xlate || ops->of_xlate(dev, &iommu_spec))
+ if (!ops) {
+ const struct of_device_id *oid;
+
+ oid = of_match_node(&__iommu_of_table, np);
+ ops = oid ? ERR_PTR(-EPROBE_DEFER) : NULL;
goto err_put_node;
+ }
+
+ if (!ops->of_xlate || ops->of_xlate(dev, &iommu_spec)) {
+ ops = NULL;
+ goto err_put_node;
+ }
of_node_put(np);
idx++;
@@ -170,7 +193,7 @@ struct iommu_ops *of_iommu_configure(struct device *dev,
err_put_node:
of_node_put(np);
- return NULL;
+ return ops;
}
void __init of_iommu_init(void)
@@ -181,7 +204,7 @@ void __init of_iommu_init(void)
for_each_matching_node_and_match(np, matches, &match) {
const of_iommu_init_fn init_fn = match->data;
- if (init_fn(np))
+ if (init_fn && init_fn(np))
pr_err("Failed to initialise IOMMU %s\n",
of_node_full_name(np));
}
diff --git a/drivers/iommu/qcom/Kconfig b/drivers/iommu/qcom/Kconfig
new file mode 100644
index 000000000000..a0e41bb305af
--- /dev/null
+++ b/drivers/iommu/qcom/Kconfig
@@ -0,0 +1,43 @@
+# Qualcomm IOMMU support
+
+# QCOM IOMMUv1 support
+config QCOM_IOMMU_V1
+ bool "Qualcomm IOMMUv1 Support"
+ depends on ARCH_QCOM
+ select IOMMU_API
+ select ARM_DMA_USE_IOMMU if ARM
+ select ARM64_DMA_USE_IOMMU if ARM64
+ help
+ Support for the IOMMUs (v1) found on certain Qualcomm SOCs.
+ These IOMMUs allow virtualization of the address space used by most
+ cores within the multimedia subsystem.
+
+ If unsure, say N here.
+
+config MMU500_ACTIVE_PREFETCH_BUG_WITH_SECTION_MAPPING
+ bool "Don't align virtual address at 1MB boundary"
+ depends on QCOM_IOMMU_V1
+ help
+ Say Y here if the MMU500 revision has a bug in active prefetch
+ which can cause TLB corruptions due to 1MB alignment of a buffer.
+ Here is the sequence which will surface this BUG.
+ 1) Create a 2-level mapping in v7S format for 1MB buffer. Start of
+ the buffer should be at even MB boundary.
+ 2) Create a section mapping for 1MB buffer adjacent to previous
+ mapping in step 1.
+ 3) Access last page from 2 level mapping followed by an access into
+ section mapped area.
+ 4) Step 3 will result into TLB corruption and this corruption can
+ lead to any misbehavior (like Permission fault) for sub-sequent
+ transactions.
+
+ If unsure, say Y here if IOMMU mapping will not exhaust the VA space.
+
+config IOMMU_PGTABLES_L2
+ bool "Allow SMMU page tables in the L2 cache (Experimental)"
+ depends on QCOM_IOMMU_V1 && MMU && SMP && CPU_DCACHE_DISABLE=n
+ help
+ Improves TLB miss latency at the expense of potential L2 pollution.
+ However, with large multimedia buffers, the TLB should mostly contain
+ section mappings and TLB misses should be quite infrequent.
+ Most people can probably say Y here.
diff --git a/drivers/iommu/qcom/Makefile b/drivers/iommu/qcom/Makefile
new file mode 100644
index 000000000000..e0b1159227be
--- /dev/null
+++ b/drivers/iommu/qcom/Makefile
@@ -0,0 +1,7 @@
+obj-$(CONFIG_QCOM_IOMMU_V1) += qcom-iommu.o
+
+qcom-iommu-y += msm_iommu.o
+qcom-iommu-y += msm_iommu-v1.o
+qcom-iommu-y += msm_iommu_dev-v1.o
+qcom-iommu-y += msm_iommu_sec.o
+qcom-iommu-y += msm_iommu_pagetable.o
diff --git a/drivers/iommu/qcom/msm_iommu-v1.c b/drivers/iommu/qcom/msm_iommu-v1.c
new file mode 100644
index 000000000000..82c7f2f16658
--- /dev/null
+++ b/drivers/iommu/qcom/msm_iommu-v1.c
@@ -0,0 +1,1538 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/iommu.h>
+#include <linux/msm-bus.h>
+#include <linux/clk.h>
+#include <linux/scatterlist.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/notifier.h>
+#include <linux/iopoll.h>
+#include <linux/qcom_iommu.h>
+#include <asm/sizes.h>
+
+#include "msm_iommu_hw-v1.h"
+#include "msm_iommu_priv.h"
+#include "msm_iommu_perfmon.h"
+#include "msm_iommu_pagetable.h"
+
+#ifdef CONFIG_IOMMU_LPAE
+/* bitmap of the page sizes currently supported */
+#define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_2M | SZ_32M | SZ_1G)
+#else
+/* bitmap of the page sizes currently supported */
+#define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
+#endif
+
+#define IOMMU_MSEC_STEP 10
+#define IOMMU_MSEC_TIMEOUT 5000
+
+static DEFINE_MUTEX(msm_iommu_lock);
+struct dump_regs_tbl_entry dump_regs_tbl[MAX_DUMP_REGS];
+
+static int __enable_regulators(struct msm_iommu_drvdata *drvdata)
+{
+ ++drvdata->powered_on;
+
+ return 0;
+}
+
+static void __disable_regulators(struct msm_iommu_drvdata *drvdata)
+{
+ --drvdata->powered_on;
+}
+
+static int apply_bus_vote(struct msm_iommu_drvdata *drvdata, unsigned int vote)
+{
+ int ret = 0;
+
+ if (drvdata->bus_client) {
+ ret = msm_bus_scale_client_update_request(drvdata->bus_client,
+ vote);
+ if (ret)
+ pr_err("%s: Failed to vote for bus: %d\n", __func__,
+ vote);
+ }
+
+ return ret;
+}
+
+static int __enable_clocks(struct msm_iommu_drvdata *drvdata)
+{
+ int ret;
+
+ ret = clk_prepare_enable(drvdata->iface);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(drvdata->core);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ clk_disable_unprepare(drvdata->iface);
+ return ret;
+}
+
+static void __disable_clocks(struct msm_iommu_drvdata *drvdata)
+{
+ clk_disable_unprepare(drvdata->core);
+ clk_disable_unprepare(drvdata->iface);
+}
+
+static void _iommu_lock_acquire(unsigned int need_extra_lock)
+{
+ mutex_lock(&msm_iommu_lock);
+}
+
+static void _iommu_lock_release(unsigned int need_extra_lock)
+{
+ mutex_unlock(&msm_iommu_lock);
+}
+
+struct iommu_access_ops iommu_access_ops_v1 = {
+ .iommu_power_on = __enable_regulators,
+ .iommu_power_off = __disable_regulators,
+ .iommu_bus_vote = apply_bus_vote,
+ .iommu_clk_on = __enable_clocks,
+ .iommu_clk_off = __disable_clocks,
+ .iommu_lock_acquire = _iommu_lock_acquire,
+ .iommu_lock_release = _iommu_lock_release,
+};
+
+static BLOCKING_NOTIFIER_HEAD(msm_iommu_notifier_list);
+
+void msm_iommu_register_notify(struct notifier_block *nb)
+{
+ blocking_notifier_chain_register(&msm_iommu_notifier_list, nb);
+}
+EXPORT_SYMBOL(msm_iommu_register_notify);
+
+#ifdef CONFIG_MSM_IOMMU_VBIF_CHECK
+
+#define VBIF_XIN_HALT_CTRL0 0x200
+#define VBIF_XIN_HALT_CTRL1 0x204
+#define VBIF_AXI_HALT_CTRL0 0x208
+#define VBIF_AXI_HALT_CTRL1 0x20C
+
+static void __halt_vbif_xin(void __iomem *vbif_base)
+{
+ pr_err("Halting VBIF_XIN\n");
+ writel_relaxed(0xFFFFFFFF, vbif_base + VBIF_XIN_HALT_CTRL0);
+}
+
+static void __dump_vbif_state(void __iomem *base, void __iomem *vbif_base)
+{
+ unsigned int reg_val;
+
+ reg_val = readl_relaxed(base + MICRO_MMU_CTRL);
+ pr_err("Value of SMMU_IMPLDEF_MICRO_MMU_CTRL = 0x%x\n", reg_val);
+
+ reg_val = readl_relaxed(vbif_base + VBIF_XIN_HALT_CTRL0);
+ pr_err("Value of VBIF_XIN_HALT_CTRL0 = 0x%x\n", reg_val);
+ reg_val = readl_relaxed(vbif_base + VBIF_XIN_HALT_CTRL1);
+ pr_err("Value of VBIF_XIN_HALT_CTRL1 = 0x%x\n", reg_val);
+ reg_val = readl_relaxed(vbif_base + VBIF_AXI_HALT_CTRL0);
+ pr_err("Value of VBIF_AXI_HALT_CTRL0 = 0x%x\n", reg_val);
+ reg_val = readl_relaxed(vbif_base + VBIF_AXI_HALT_CTRL1);
+ pr_err("Value of VBIF_AXI_HALT_CTRL1 = 0x%x\n", reg_val);
+}
+
+static int __check_vbif_state(struct msm_iommu_drvdata const *drvdata)
+{
+ phys_addr_t addr = (phys_addr_t) (drvdata->phys_base
+ - (phys_addr_t) 0x4000);
+ void __iomem *base = ioremap(addr, 0x1000);
+ int ret = 0;
+
+ if (base) {
+ __dump_vbif_state(drvdata->base, base);
+ __halt_vbif_xin(base);
+ __dump_vbif_state(drvdata->base, base);
+ iounmap(base);
+ } else {
+ pr_err("%s: Unable to ioremap\n", __func__);
+ ret = -ENOMEM;
+ }
+ return ret;
+}
+
+static void check_halt_state(struct msm_iommu_drvdata const *drvdata)
+{
+ int res;
+ unsigned int val;
+ void __iomem *base = drvdata->base;
+ char const *name = drvdata->name;
+
+ pr_err("Timed out waiting for IOMMU halt to complete for %s\n", name);
+ res = __check_vbif_state(drvdata);
+ if (res)
+ BUG();
+
+ pr_err("Checking if IOMMU halt completed for %s\n", name);
+
+ res = readl_poll_timeout(GLB_REG(MICRO_MMU_CTRL, base), val,
+ (val & MMU_CTRL_IDLE) == MMU_CTRL_IDLE, 0, 5000000);
+
+ if (res) {
+ pr_err("Timed out (again) waiting for IOMMU halt to complete for %s\n",
+ name);
+ } else {
+ pr_err("IOMMU halt completed. VBIF FIFO most likely not getting drained by master\n");
+ }
+ BUG();
+}
+
+static void check_tlb_sync_state(struct msm_iommu_drvdata const *drvdata,
+ int ctx, struct msm_iommu_priv *priv)
+{
+ int res;
+ unsigned int val;
+ void __iomem *base = drvdata->cb_base;
+ char const *name = drvdata->name;
+
+ pr_err("Timed out waiting for TLB SYNC to complete for %s (client: %s)\n",
+ name, priv->client_name);
+ blocking_notifier_call_chain(&msm_iommu_notifier_list, TLB_SYNC_TIMEOUT,
+ (void *) priv->client_name);
+ res = __check_vbif_state(drvdata);
+ if (res)
+ BUG();
+
+ pr_err("Checking if TLB sync completed for %s\n", name);
+
+ res = readl_poll_timeout(CTX_REG(CB_TLBSTATUS, base, ctx), val,
+ (val & CB_TLBSTATUS_SACTIVE) == 0, 0, 5000000);
+ if (res) {
+ pr_err("Timed out (again) waiting for TLB SYNC to complete for %s\n",
+ name);
+ } else {
+ pr_err("TLB Sync completed. VBIF FIFO most likely not getting drained by master\n");
+ }
+ BUG();
+}
+
+#else
+
+/*
+ * For targets without VBIF or for targets with the VBIF check disabled
+ * we directly just crash to capture the issue
+ */
+static void check_halt_state(struct msm_iommu_drvdata const *drvdata)
+{
+ BUG();
+}
+
+static void check_tlb_sync_state(struct msm_iommu_drvdata const *drvdata,
+ int ctx, struct msm_iommu_priv *priv)
+{
+ BUG();
+}
+
+#endif
+
+void iommu_halt(struct msm_iommu_drvdata const *iommu_drvdata)
+{
+ void __iomem *base = iommu_drvdata->base;
+ unsigned int val;
+ int res;
+
+ if (!iommu_drvdata->halt_enabled)
+ return;
+
+ SET_MICRO_MMU_CTRL_HALT_REQ(base, 1);
+ res = readl_poll_timeout(GLB_REG(MICRO_MMU_CTRL, base), val,
+ (val & MMU_CTRL_IDLE) == MMU_CTRL_IDLE, 0, 5000000);
+ if (res)
+ check_halt_state(iommu_drvdata);
+
+ /* Ensure device is idle before continuing */
+ mb();
+}
+
+void iommu_resume(const struct msm_iommu_drvdata *iommu_drvdata)
+{
+ if (!iommu_drvdata->halt_enabled)
+ return;
+
+ /* Ensure transactions have completed before releasing the halt */
+ mb();
+
+ SET_MICRO_MMU_CTRL_HALT_REQ(iommu_drvdata->base, 0);
+
+ /*
+ * Ensure write is complete before continuing to ensure
+ * we don't turn off clocks while transaction is still
+ * pending.
+ */
+ mb();
+}
+
+static void __sync_tlb(struct msm_iommu_drvdata *iommu_drvdata, int ctx,
+ struct msm_iommu_priv *priv)
+{
+ void __iomem *base = iommu_drvdata->cb_base;
+ unsigned int val;
+ unsigned int res;
+
+ SET_TLBSYNC(base, ctx, 0);
+ /* No barrier needed due to read dependency */
+
+ res = readl_poll_timeout(CTX_REG(CB_TLBSTATUS, base, ctx), val,
+ (val & CB_TLBSTATUS_SACTIVE) == 0, 0, 5000000);
+ if (res)
+ check_tlb_sync_state(iommu_drvdata, ctx, priv);
+}
+
+#ifdef CONFIG_MSM_IOMMU_TLBINVAL_ON_MAP
+static int __flush_iotlb_va(struct iommu_domain *domain, unsigned int va)
+{
+ struct msm_iommu_priv *priv = to_msm_priv(domain);
+ struct msm_iommu_drvdata *iommu_drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ int ret = 0;
+
+ list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
+ BUG_ON(!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent);
+
+ iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
+ BUG_ON(!iommu_drvdata);
+
+ ret = __enable_clocks(iommu_drvdata);
+ if (ret)
+ goto fail;
+
+ SET_TLBIVA(iommu_drvdata->cb_base, ctx_drvdata->num,
+ ctx_drvdata->asid | (va & CB_TLBIVA_VA));
+ mb();
+ __sync_tlb(iommu_drvdata, ctx_drvdata->num, priv);
+ __disable_clocks(iommu_drvdata);
+ }
+
+fail:
+ return ret;
+}
+#endif
+
+static int __flush_iotlb(struct iommu_domain *domain)
+{
+ struct msm_iommu_priv *priv = to_msm_priv(domain);
+ struct msm_iommu_drvdata *iommu_drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ int ret = 0;
+
+ list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
+ BUG_ON(!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent);
+
+ iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
+ BUG_ON(!iommu_drvdata);
+
+ ret = __enable_clocks(iommu_drvdata);
+ if (ret)
+ goto fail;
+
+ SET_TLBIASID(iommu_drvdata->cb_base, ctx_drvdata->num,
+ ctx_drvdata->asid);
+ __sync_tlb(iommu_drvdata, ctx_drvdata->num, priv);
+ __disable_clocks(iommu_drvdata);
+ }
+
+fail:
+ return ret;
+}
+
+/*
+ * May only be called for non-secure iommus
+ */
+static void __reset_iommu(struct msm_iommu_drvdata *iommu_drvdata)
+{
+ void __iomem *base = iommu_drvdata->base;
+ int i, smt_size, res;
+ unsigned long val;
+
+ /* SMMU_ACR is an implementation defined register.
+ * Resetting is not required for some implementation.
+ */
+ if (iommu_drvdata->model != MMU_500)
+ SET_ACR(base, 0);
+ SET_CR2(base, 0);
+ SET_GFAR(base, 0);
+ SET_GFSRRESTORE(base, 0);
+
+ /* Invalidate the entire non-secure TLB */
+ SET_TLBIALLNSNH(base, 0);
+ SET_TLBGSYNC(base, 0);
+ res = readl_poll_timeout(GLB_REG(TLBGSTATUS, base), val,
+ (val & TLBGSTATUS_GSACTIVE) == 0, 0, 5000000);
+ if (res)
+ BUG();
+
+ smt_size = GET_IDR0_NUMSMRG(base);
+
+ for (i = 0; i < smt_size; i++)
+ SET_SMR_VALID(base, i, 0);
+
+ mb();
+}
+
+static void __reset_iommu_secure(struct msm_iommu_drvdata *iommu_drvdata)
+{
+ void __iomem *base = iommu_drvdata->base;
+
+ if (iommu_drvdata->model != MMU_500)
+ SET_NSACR(base, 0);
+ SET_NSCR2(base, 0);
+ SET_NSGFAR(base, 0);
+ SET_NSGFSRRESTORE(base, 0);
+ mb();
+}
+
+static void __program_iommu_secure(struct msm_iommu_drvdata *iommu_drvdata)
+{
+ void __iomem *base = iommu_drvdata->base;
+
+ if (iommu_drvdata->model == MMU_500) {
+ SET_NSACR_SMTNMC_BPTLBEN(base, 1);
+ SET_NSACR_MMUDIS_BPTLBEN(base, 1);
+ SET_NSACR_S2CR_BPTLBEN(base, 1);
+ }
+ SET_NSCR0_SMCFCFG(base, 1);
+ SET_NSCR0_USFCFG(base, 1);
+ SET_NSCR0_STALLD(base, 1);
+ SET_NSCR0_GCFGFIE(base, 1);
+ SET_NSCR0_GCFGFRE(base, 1);
+ SET_NSCR0_GFIE(base, 1);
+ SET_NSCR0_GFRE(base, 1);
+ SET_NSCR0_CLIENTPD(base, 0);
+}
+
+/*
+ * May only be called for non-secure iommus
+ */
+static void __program_iommu(struct msm_iommu_drvdata *drvdata)
+{
+ __reset_iommu(drvdata);
+
+ if (!msm_iommu_get_scm_call_avail())
+ __reset_iommu_secure(drvdata);
+
+ if (drvdata->model == MMU_500) {
+ SET_ACR_SMTNMC_BPTLBEN(drvdata->base, 1);
+ SET_ACR_MMUDIS_BPTLBEN(drvdata->base, 1);
+ SET_ACR_S2CR_BPTLBEN(drvdata->base, 1);
+ }
+ SET_CR0_SMCFCFG(drvdata->base, 1);
+ SET_CR0_USFCFG(drvdata->base, 1);
+ SET_CR0_STALLD(drvdata->base, 1);
+ SET_CR0_GCFGFIE(drvdata->base, 1);
+ SET_CR0_GCFGFRE(drvdata->base, 1);
+ SET_CR0_GFIE(drvdata->base, 1);
+ SET_CR0_GFRE(drvdata->base, 1);
+ SET_CR0_CLIENTPD(drvdata->base, 0);
+
+ if (!msm_iommu_get_scm_call_avail())
+ __program_iommu_secure(drvdata);
+
+ if (drvdata->smmu_local_base)
+ writel_relaxed(0xFFFFFFFF,
+ drvdata->smmu_local_base + SMMU_INTR_SEL_NS);
+
+ mb(); /* Make sure writes complete before returning */
+}
+
+void program_iommu_bfb_settings(void __iomem *base,
+ const struct msm_iommu_bfb_settings *bfb_settings)
+{
+ unsigned int i;
+
+ if (bfb_settings)
+ for (i = 0; i < bfb_settings->length; i++)
+ SET_GLOBAL_REG(base, bfb_settings->regs[i],
+ bfb_settings->data[i]);
+
+ /* Make sure writes complete before returning */
+ mb();
+}
+
+static void __reset_context(struct msm_iommu_drvdata *iommu_drvdata, int ctx)
+{
+ void __iomem *base = iommu_drvdata->cb_base;
+
+ /* Don't set ACTLR to zero because if context bank is in
+ * bypass mode (say after iommu_detach), still this ACTLR
+ * value matters for micro-TLB caching.
+ */
+ if (iommu_drvdata->model != MMU_500)
+ SET_ACTLR(base, ctx, 0);
+ SET_FAR(base, ctx, 0);
+ SET_FSRRESTORE(base, ctx, 0);
+ SET_NMRR(base, ctx, 0);
+ SET_PAR(base, ctx, 0);
+ SET_PRRR(base, ctx, 0);
+ SET_SCTLR(base, ctx, 0);
+ SET_TTBCR(base, ctx, 0);
+ SET_TTBR0(base, ctx, 0);
+ SET_TTBR1(base, ctx, 0);
+ mb();
+}
+
+static void __release_smg(void __iomem *base)
+{
+ int i, smt_size;
+ smt_size = GET_IDR0_NUMSMRG(base);
+
+ /* Invalidate all SMGs */
+ for (i = 0; i < smt_size; i++)
+ if (GET_SMR_VALID(base, i))
+ SET_SMR_VALID(base, i, 0);
+}
+
+#ifdef CONFIG_IOMMU_LPAE
+static void msm_iommu_set_ASID(void __iomem *base, unsigned int ctx_num,
+ unsigned int asid)
+{
+ SET_CB_TTBR0_ASID(base, ctx_num, asid);
+}
+#else
+static void msm_iommu_set_ASID(void __iomem *base, unsigned int ctx_num,
+ unsigned int asid)
+{
+ SET_CB_CONTEXTIDR_ASID(base, ctx_num, asid);
+}
+#endif
+
+static void msm_iommu_assign_ASID(const struct msm_iommu_drvdata *iommu_drvdata,
+ struct msm_iommu_ctx_drvdata *curr_ctx,
+ struct msm_iommu_priv *priv)
+{
+ void __iomem *cb_base = iommu_drvdata->cb_base;
+
+ curr_ctx->asid = curr_ctx->num;
+ msm_iommu_set_ASID(cb_base, curr_ctx->num, curr_ctx->asid);
+}
+
+#ifdef CONFIG_IOMMU_LPAE
+static void msm_iommu_setup_ctx(void __iomem *base, unsigned int ctx)
+{
+ SET_CB_TTBCR_EAE(base, ctx, 1); /* Extended Address Enable (EAE) */
+}
+
+static void msm_iommu_setup_memory_remap(void __iomem *base, unsigned int ctx)
+{
+ SET_CB_MAIR0(base, ctx, msm_iommu_get_mair0());
+ SET_CB_MAIR1(base, ctx, msm_iommu_get_mair1());
+}
+
+static void msm_iommu_setup_pg_l2_redirect(void __iomem *base, unsigned int ctx)
+{
+ /*
+ * Configure page tables as inner-cacheable and shareable to reduce
+ * the TLB miss penalty.
+ */
+ SET_CB_TTBCR_SH0(base, ctx, 3); /* Inner shareable */
+ SET_CB_TTBCR_ORGN0(base, ctx, 1); /* outer cachable*/
+ SET_CB_TTBCR_IRGN0(base, ctx, 1); /* inner cachable*/
+ SET_CB_TTBCR_T0SZ(base, ctx, 0); /* 0GB-4GB */
+
+
+ SET_CB_TTBCR_SH1(base, ctx, 3); /* Inner shareable */
+ SET_CB_TTBCR_ORGN1(base, ctx, 1); /* outer cachable*/
+ SET_CB_TTBCR_IRGN1(base, ctx, 1); /* inner cachable*/
+ SET_CB_TTBCR_T1SZ(base, ctx, 0); /* TTBR1 not used */
+}
+
+#else
+
+static void msm_iommu_setup_ctx(void __iomem *base, unsigned int ctx)
+{
+ /* Turn on TEX Remap */
+ SET_CB_SCTLR_TRE(base, ctx, 1);
+}
+
+static void msm_iommu_setup_memory_remap(void __iomem *base, unsigned int ctx)
+{
+ SET_PRRR(base, ctx, msm_iommu_get_prrr());
+ SET_NMRR(base, ctx, msm_iommu_get_nmrr());
+}
+
+static void msm_iommu_setup_pg_l2_redirect(void __iomem *base, unsigned int ctx)
+{
+ /* Configure page tables as inner-cacheable and shareable to reduce
+ * the TLB miss penalty.
+ */
+ SET_CB_TTBR0_S(base, ctx, 1);
+ SET_CB_TTBR0_NOS(base, ctx, 1);
+ SET_CB_TTBR0_IRGN1(base, ctx, 0); /* WB, WA */
+ SET_CB_TTBR0_IRGN0(base, ctx, 1);
+ SET_CB_TTBR0_RGN(base, ctx, 1); /* WB, WA */
+}
+
+#endif
+
+static int program_m2v_table(struct device *dev, void __iomem *base)
+{
+ struct msm_iommu_ctx_drvdata *ctx_drvdata = dev_get_drvdata(dev);
+ u32 *sids = ctx_drvdata->sids;
+ u32 *sid_mask = ctx_drvdata->sid_mask;
+ unsigned int ctx = ctx_drvdata->num;
+ int num = 0, i, smt_size;
+ int len = ctx_drvdata->nsid;
+
+ smt_size = GET_IDR0_NUMSMRG(base);
+
+ /* Program the M2V tables for this context */
+ for (i = 0; i < len / sizeof(*sids); i++) {
+ for (; num < smt_size; num++)
+ if (GET_SMR_VALID(base, num) == 0)
+ break;
+ BUG_ON(num >= smt_size);
+
+ SET_SMR_VALID(base, num, 1);
+ SET_SMR_MASK(base, num, sid_mask[i]);
+ SET_SMR_ID(base, num, sids[i]);
+
+ SET_S2CR_N(base, num, 0);
+ SET_S2CR_CBNDX(base, num, ctx);
+ SET_S2CR_MEMATTR(base, num, 0x0A);
+ /* Set security bit override to be Non-secure */
+ SET_S2CR_NSCFG(base, num, 3);
+ }
+
+ return 0;
+}
+
+static void program_all_m2v_tables(struct msm_iommu_drvdata *iommu_drvdata)
+{
+ device_for_each_child(iommu_drvdata->dev, iommu_drvdata->base,
+ program_m2v_table);
+}
+
+static void __program_context(struct msm_iommu_drvdata *iommu_drvdata,
+ struct msm_iommu_ctx_drvdata *ctx_drvdata,
+ struct msm_iommu_priv *priv, bool is_secure,
+ bool program_m2v)
+{
+ phys_addr_t pn;
+ void __iomem *base = iommu_drvdata->base;
+ void __iomem *cb_base = iommu_drvdata->cb_base;
+ unsigned int ctx = ctx_drvdata->num;
+ phys_addr_t pgtable = __pa(priv->pt.fl_table);
+
+ __reset_context(iommu_drvdata, ctx);
+ msm_iommu_setup_ctx(cb_base, ctx);
+
+ if (priv->pt.redirect)
+ msm_iommu_setup_pg_l2_redirect(cb_base, ctx);
+
+ msm_iommu_setup_memory_remap(cb_base, ctx);
+
+ pn = pgtable >> CB_TTBR0_ADDR_SHIFT;
+ SET_CB_TTBR0_ADDR(cb_base, ctx, pn);
+
+ /* Enable context fault interrupt */
+ SET_CB_SCTLR_CFIE(cb_base, ctx, 1);
+
+ if (iommu_drvdata->model != MMU_500) {
+ /* Redirect all cacheable requests to L2 slave port. */
+ SET_CB_ACTLR_BPRCISH(cb_base, ctx, 1);
+ SET_CB_ACTLR_BPRCOSH(cb_base, ctx, 1);
+ SET_CB_ACTLR_BPRCNSH(cb_base, ctx, 1);
+ }
+
+ /* Enable private ASID namespace */
+ SET_CB_SCTLR_ASIDPNE(cb_base, ctx, 1);
+
+ if (!is_secure) {
+ if (program_m2v)
+ program_all_m2v_tables(iommu_drvdata);
+
+ SET_CBAR_N(base, ctx, 0);
+
+ /* Stage 1 Context with Stage 2 bypass */
+ SET_CBAR_TYPE(base, ctx, 1);
+
+ /* Route page faults to the non-secure interrupt */
+ SET_CBAR_IRPTNDX(base, ctx, 1);
+
+ /* Set VMID to non-secure HLOS */
+ SET_CBAR_VMID(base, ctx, 3);
+
+ /* Bypass is treated as inner-shareable */
+ SET_CBAR_BPSHCFG(base, ctx, 2);
+
+ /* Do not downgrade memory attributes */
+ SET_CBAR_MEMATTR(base, ctx, 0x0A);
+
+ }
+
+ msm_iommu_assign_ASID(iommu_drvdata, ctx_drvdata, priv);
+
+ /* Ensure that ASID assignment has completed before we use
+ * ASID for TLB invalidation. Here, mb() is required because
+ * both these registers are separated by more than 1KB. */
+ mb();
+
+ SET_TLBIASID(iommu_drvdata->cb_base, ctx_drvdata->num,
+ ctx_drvdata->asid);
+ __sync_tlb(iommu_drvdata, ctx_drvdata->num, priv);
+
+ /* Enable the MMU */
+ SET_CB_SCTLR_M(cb_base, ctx, 1);
+ mb();
+}
+
+#ifdef CONFIG_IOMMU_PGTABLES_L2
+#define INITIAL_REDIRECT_VAL 1
+#else
+#define INITIAL_REDIRECT_VAL 0
+#endif
+
+static struct iommu_domain * msm_iommu_domain_alloc(unsigned type)
+{
+ struct msm_iommu_priv *priv;
+ int ret = -ENOMEM;
+ struct iommu_domain *domain;
+
+ if (type != IOMMU_DOMAIN_UNMANAGED)
+ return NULL;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ goto fail_nomem;
+
+ priv->pt.redirect = INITIAL_REDIRECT_VAL;
+
+ INIT_LIST_HEAD(&priv->list_attached);
+
+ ret = msm_iommu_pagetable_alloc(&priv->pt);
+ if (ret)
+ goto fail_nomem;
+
+ domain = &priv->domain;
+
+ return domain;
+
+fail_nomem:
+ kfree(priv);
+ return 0;
+}
+
+static void msm_iommu_domain_free(struct iommu_domain *domain)
+{
+ struct msm_iommu_priv *priv;
+
+ mutex_lock(&msm_iommu_lock);
+ priv = to_msm_priv(domain);
+
+ if (priv)
+ msm_iommu_pagetable_free(&priv->pt);
+
+ kfree(priv);
+ mutex_unlock(&msm_iommu_lock);
+}
+
+static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
+{
+ struct msm_iommu_priv *priv;
+ struct msm_iommu_drvdata *iommu_drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ struct msm_iommu_ctx_drvdata *tmp_drvdata;
+ int ret = 0;
+ int is_secure;
+ bool set_m2v = false;
+
+ mutex_lock(&msm_iommu_lock);
+
+ priv = to_msm_priv(domain);
+ if (!priv || !dev) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ iommu_drvdata = dev_get_drvdata(dev->parent);
+ ctx_drvdata = dev_get_drvdata(dev);
+ if (!iommu_drvdata || !ctx_drvdata) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ ++ctx_drvdata->attach_count;
+
+ if (ctx_drvdata->attach_count > 1)
+ goto already_attached;
+
+ if (!list_empty(&ctx_drvdata->attached_elm)) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm)
+ if (tmp_drvdata == ctx_drvdata) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ is_secure = iommu_drvdata->sec_id != -1;
+
+ ret = __enable_regulators(iommu_drvdata);
+ if (ret)
+ goto unlock;
+
+ ret = apply_bus_vote(iommu_drvdata, 1);
+ if (ret)
+ goto unlock;
+
+ ret = __enable_clocks(iommu_drvdata);
+ if (ret) {
+ __disable_regulators(iommu_drvdata);
+ goto unlock;
+ }
+
+ /* We can only do this once */
+ if (!iommu_drvdata->ctx_attach_count) {
+ if (!is_secure) {
+ iommu_halt(iommu_drvdata);
+ __program_iommu(iommu_drvdata);
+ iommu_resume(iommu_drvdata);
+ } else {
+ ret = msm_iommu_sec_program_iommu(iommu_drvdata,
+ ctx_drvdata);
+ if (ret) {
+ __disable_regulators(iommu_drvdata);
+ __disable_clocks(iommu_drvdata);
+ goto unlock;
+ }
+ }
+ program_iommu_bfb_settings(iommu_drvdata->base,
+ iommu_drvdata->bfb_settings);
+ set_m2v = true;
+ }
+
+ iommu_halt(iommu_drvdata);
+
+ __program_context(iommu_drvdata, ctx_drvdata, priv, is_secure, set_m2v);
+
+ iommu_resume(iommu_drvdata);
+
+ /* Ensure TLB is clear */
+ if (iommu_drvdata->model != MMU_500) {
+ SET_TLBIASID(iommu_drvdata->cb_base, ctx_drvdata->num,
+ ctx_drvdata->asid);
+ __sync_tlb(iommu_drvdata, ctx_drvdata->num, priv);
+ }
+
+ __disable_clocks(iommu_drvdata);
+
+ list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
+ ctx_drvdata->attached_domain = domain;
+ ++iommu_drvdata->ctx_attach_count;
+
+already_attached:
+ mutex_unlock(&msm_iommu_lock);
+
+ msm_iommu_attached(dev->parent);
+ return ret;
+unlock:
+ mutex_unlock(&msm_iommu_lock);
+ return ret;
+}
+
+static void msm_iommu_detach_dev(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct msm_iommu_priv *priv;
+ struct msm_iommu_drvdata *iommu_drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ int ret;
+ int is_secure;
+
+ if (!dev)
+ return;
+
+ msm_iommu_detached(dev->parent);
+
+ mutex_lock(&msm_iommu_lock);
+ priv = to_msm_priv(domain);
+ if (!priv)
+ goto unlock;
+
+ iommu_drvdata = dev_get_drvdata(dev->parent);
+ ctx_drvdata = dev_get_drvdata(dev);
+ if (!iommu_drvdata || !ctx_drvdata || !ctx_drvdata->attached_domain)
+ goto unlock;
+
+ --ctx_drvdata->attach_count;
+ BUG_ON(ctx_drvdata->attach_count < 0);
+
+ if (ctx_drvdata->attach_count > 0)
+ goto unlock;
+
+ ret = __enable_clocks(iommu_drvdata);
+ if (ret)
+ goto unlock;
+
+ is_secure = iommu_drvdata->sec_id != -1;
+
+ if (iommu_drvdata->model == MMU_500) {
+ SET_TLBIASID(iommu_drvdata->cb_base, ctx_drvdata->num,
+ ctx_drvdata->asid);
+ __sync_tlb(iommu_drvdata, ctx_drvdata->num, priv);
+ }
+
+ ctx_drvdata->asid = -1;
+
+ __reset_context(iommu_drvdata, ctx_drvdata->num);
+
+ /*
+ * Only reset the M2V tables on the very last detach */
+ if (!is_secure && iommu_drvdata->ctx_attach_count == 1) {
+ iommu_halt(iommu_drvdata);
+ __release_smg(iommu_drvdata->base);
+ iommu_resume(iommu_drvdata);
+ }
+
+ __disable_clocks(iommu_drvdata);
+
+ apply_bus_vote(iommu_drvdata, 0);
+
+ __disable_regulators(iommu_drvdata);
+
+ list_del_init(&ctx_drvdata->attached_elm);
+ ctx_drvdata->attached_domain = NULL;
+ BUG_ON(iommu_drvdata->ctx_attach_count == 0);
+ --iommu_drvdata->ctx_attach_count;
+unlock:
+ mutex_unlock(&msm_iommu_lock);
+}
+
+static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
+ phys_addr_t pa, size_t len, int prot)
+{
+ struct msm_iommu_priv *priv;
+ int ret = 0;
+
+ mutex_lock(&msm_iommu_lock);
+
+ priv = to_msm_priv(domain);
+ if (!priv) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ ret = msm_iommu_pagetable_map(&priv->pt, va, pa, len, prot);
+ if (ret)
+ goto fail;
+
+#ifdef CONFIG_MSM_IOMMU_TLBINVAL_ON_MAP
+ ret = __flush_iotlb_va(domain, va);
+#endif
+
+fail:
+ mutex_unlock(&msm_iommu_lock);
+ return ret;
+}
+
+static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
+ size_t len)
+{
+ struct msm_iommu_priv *priv;
+ int ret = -ENODEV;
+
+ mutex_lock(&msm_iommu_lock);
+
+ priv = to_msm_priv(domain);
+ if (!priv)
+ goto fail;
+
+ ret = msm_iommu_pagetable_unmap(&priv->pt, va, len);
+ if (ret < 0)
+ goto fail;
+
+ ret = __flush_iotlb(domain);
+
+ msm_iommu_pagetable_free_tables(&priv->pt, va, len);
+fail:
+ mutex_unlock(&msm_iommu_lock);
+
+ /* the IOMMU API requires us to return how many bytes were unmapped */
+ len = ret ? 0 : len;
+ return len;
+}
+
+static size_t msm_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
+ struct scatterlist *sg, unsigned int nents,
+ int prot)
+{
+ struct msm_iommu_priv *priv;
+ struct scatterlist *tmp;
+ unsigned int len = 0;
+ int ret, i;
+
+ mutex_lock(&msm_iommu_lock);
+
+ priv = to_msm_priv(domain);
+ if (!priv) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ for_each_sg(sg, tmp, nents, i)
+ len += tmp->length;
+
+ ret = msm_iommu_pagetable_map_range(&priv->pt, iova, sg, len, prot);
+ if (ret)
+ goto fail;
+
+#ifdef CONFIG_MSM_IOMMU_TLBINVAL_ON_MAP
+ __flush_iotlb(domain);
+#endif
+
+fail:
+ mutex_unlock(&msm_iommu_lock);
+ return ret;
+}
+
+static int msm_iommu_unmap_range(struct iommu_domain *domain, unsigned int va,
+ unsigned int len)
+{
+ struct msm_iommu_priv *priv;
+
+ mutex_lock(&msm_iommu_lock);
+
+ priv = to_msm_priv(domain);
+ msm_iommu_pagetable_unmap_range(&priv->pt, va, len);
+
+ __flush_iotlb(domain);
+
+ msm_iommu_pagetable_free_tables(&priv->pt, va, len);
+ mutex_unlock(&msm_iommu_lock);
+
+ return 0;
+}
+
+#ifdef CONFIG_IOMMU_LPAE
+static phys_addr_t msm_iommu_get_phy_from_PAR(unsigned long va, u64 par)
+{
+ phys_addr_t phy;
+
+ /* Upper 28 bits from PAR, lower 12 from VA */
+ phy = (par & 0xFFFFFFF000ULL) | (va & 0x00000FFF);
+
+ return phy;
+}
+#else
+static phys_addr_t msm_iommu_get_phy_from_PAR(unsigned long va, u64 par)
+{
+ phys_addr_t phy;
+
+ /* We are dealing with a supersection */
+ if (par & CB_PAR_SS)
+ phy = (par & 0xFF000000) | (va & 0x00FFFFFF);
+ else /* Upper 20 bits from PAR, lower 12 from VA */
+ phy = (par & 0xFFFFF000) | (va & 0x00000FFF);
+
+ return phy;
+}
+#endif
+
+static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
+ phys_addr_t va)
+{
+ struct msm_iommu_priv *priv;
+ struct msm_iommu_drvdata *iommu_drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ u64 par;
+ void __iomem *base;
+ phys_addr_t ret = 0;
+ int ctx;
+ int i;
+
+ mutex_lock(&msm_iommu_lock);
+
+ priv = to_msm_priv(domain);
+ if (list_empty(&priv->list_attached))
+ goto fail;
+
+ ctx_drvdata = list_entry(priv->list_attached.next,
+ struct msm_iommu_ctx_drvdata, attached_elm);
+ iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
+
+ if (iommu_drvdata->model == MMU_500) {
+ ret = msm_iommu_iova_to_phys_soft(domain, va);
+ mutex_unlock(&msm_iommu_lock);
+ return ret;
+ }
+
+ base = iommu_drvdata->cb_base;
+ ctx = ctx_drvdata->num;
+
+ ret = __enable_clocks(iommu_drvdata);
+ if (ret) {
+ ret = 0; /* 0 indicates translation failed */
+ goto fail;
+ }
+
+ SET_ATS1PR(base, ctx, va & CB_ATS1PR_ADDR);
+ mb();
+ for (i = 0; i < IOMMU_MSEC_TIMEOUT; i += IOMMU_MSEC_STEP)
+ if (GET_CB_ATSR_ACTIVE(base, ctx) == 0)
+ break;
+ else
+ msleep(IOMMU_MSEC_STEP);
+
+ if (i >= IOMMU_MSEC_TIMEOUT) {
+ pr_err("%s: iova to phys timed out on %pa for %s (%s)\n",
+ __func__, &va, iommu_drvdata->name, ctx_drvdata->name);
+ ret = 0;
+ goto fail;
+ }
+
+ par = GET_PAR(base, ctx);
+ __disable_clocks(iommu_drvdata);
+
+ if (par & CB_PAR_F) {
+ unsigned int level = (par & CB_PAR_PLVL) >> CB_PAR_PLVL_SHIFT;
+ pr_err("IOMMU translation fault!\n");
+ pr_err("name = %s\n", iommu_drvdata->name);
+ pr_err("context = %s (%d)\n", ctx_drvdata->name,
+ ctx_drvdata->num);
+ pr_err("Interesting registers:\n");
+ pr_err("PAR = %16llx [%s%s%s%s%s%s%s%sPLVL%u %s]\n", par,
+ (par & CB_PAR_F) ? "F " : "",
+ (par & CB_PAR_TF) ? "TF " : "",
+ (par & CB_PAR_AFF) ? "AFF " : "",
+ (par & CB_PAR_PF) ? "PF " : "",
+ (par & CB_PAR_EF) ? "EF " : "",
+ (par & CB_PAR_TLBMCF) ? "TLBMCF " : "",
+ (par & CB_PAR_TLBLKF) ? "TLBLKF " : "",
+ (par & CB_PAR_ATOT) ? "ATOT " : "",
+ level,
+ (par & CB_PAR_STAGE) ? "S2 " : "S1 ");
+ ret = 0;
+ } else {
+ ret = msm_iommu_get_phy_from_PAR(va, par);
+ }
+
+fail:
+ mutex_unlock(&msm_iommu_lock);
+
+ return ret;
+}
+
+#ifdef CONFIG_IOMMU_LPAE
+static inline void print_ctx_mem_attr_regs(struct msm_iommu_context_reg regs[])
+{
+ pr_err("MAIR0 = %08x MAIR1 = %08x\n",
+ regs[DUMP_REG_MAIR0].val, regs[DUMP_REG_MAIR1].val);
+}
+#else
+static inline void print_ctx_mem_attr_regs(struct msm_iommu_context_reg regs[])
+{
+ pr_err("PRRR = %08x NMRR = %08x\n",
+ regs[DUMP_REG_PRRR].val, regs[DUMP_REG_NMRR].val);
+}
+#endif
+
+void print_ctx_regs(struct msm_iommu_context_reg regs[])
+{
+ uint32_t fsr = regs[DUMP_REG_FSR].val;
+ u64 ttbr;
+ enum dump_reg iter;
+
+ pr_err("FAR = %016llx\n",
+ COMBINE_DUMP_REG(
+ regs[DUMP_REG_FAR1].val,
+ regs[DUMP_REG_FAR0].val));
+ pr_err("PAR = %016llx\n",
+ COMBINE_DUMP_REG(
+ regs[DUMP_REG_PAR1].val,
+ regs[DUMP_REG_PAR0].val));
+ pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s]\n", fsr,
+ (fsr & 0x02) ? "TF " : "",
+ (fsr & 0x04) ? "AFF " : "",
+ (fsr & 0x08) ? "PF " : "",
+ (fsr & 0x10) ? "EF " : "",
+ (fsr & 0x20) ? "TLBMCF " : "",
+ (fsr & 0x40) ? "TLBLKF " : "",
+ (fsr & 0x80) ? "MHF " : "",
+ (fsr & 0x40000000) ? "SS " : "",
+ (fsr & 0x80000000) ? "MULTI " : "");
+
+ pr_err("FSYNR0 = %08x FSYNR1 = %08x\n",
+ regs[DUMP_REG_FSYNR0].val, regs[DUMP_REG_FSYNR1].val);
+
+ ttbr = COMBINE_DUMP_REG(regs[DUMP_REG_TTBR0_1].val,
+ regs[DUMP_REG_TTBR0_0].val);
+ if (regs[DUMP_REG_TTBR0_1].valid)
+ pr_err("TTBR0 = %016llx\n", ttbr);
+ else
+ pr_err("TTBR0 = %016llx (32b)\n", ttbr);
+
+ ttbr = COMBINE_DUMP_REG(regs[DUMP_REG_TTBR1_1].val,
+ regs[DUMP_REG_TTBR1_0].val);
+
+ if (regs[DUMP_REG_TTBR1_1].valid)
+ pr_err("TTBR1 = %016llx\n", ttbr);
+ else
+ pr_err("TTBR1 = %016llx (32b)\n", ttbr);
+
+ pr_err("SCTLR = %08x ACTLR = %08x\n",
+ regs[DUMP_REG_SCTLR].val, regs[DUMP_REG_ACTLR].val);
+ pr_err("CBAR = %08x CBFRSYNRA = %08x\n",
+ regs[DUMP_REG_CBAR_N].val, regs[DUMP_REG_CBFRSYNRA_N].val);
+ print_ctx_mem_attr_regs(regs);
+
+ for (iter = DUMP_REG_FIRST; iter < MAX_DUMP_REGS; ++iter)
+ if (!regs[iter].valid)
+ pr_err("NOTE: Value actually unknown for %s\n",
+ dump_regs_tbl[iter].name);
+}
+
+static void __print_ctx_regs(struct msm_iommu_drvdata *drvdata, int ctx,
+ unsigned int fsr)
+{
+ void __iomem *base = drvdata->base;
+ void __iomem *cb_base = drvdata->cb_base;
+ bool is_secure = drvdata->sec_id != -1;
+
+ struct msm_iommu_context_reg regs[MAX_DUMP_REGS];
+ unsigned int i;
+ memset(regs, 0, sizeof(regs));
+
+ for (i = DUMP_REG_FIRST; i < MAX_DUMP_REGS; ++i) {
+ struct msm_iommu_context_reg *r = &regs[i];
+ unsigned long regaddr = dump_regs_tbl[i].reg_offset;
+ if (is_secure &&
+ dump_regs_tbl[i].dump_reg_type != DRT_CTX_REG) {
+ r->valid = 0;
+ continue;
+ }
+ r->valid = 1;
+ switch (dump_regs_tbl[i].dump_reg_type) {
+ case DRT_CTX_REG:
+ r->val = GET_CTX_REG(regaddr, cb_base, ctx);
+ break;
+ case DRT_GLOBAL_REG:
+ r->val = GET_GLOBAL_REG(regaddr, base);
+ break;
+ case DRT_GLOBAL_REG_N:
+ r->val = GET_GLOBAL_REG_N(regaddr, ctx, base);
+ break;
+ default:
+ pr_info("Unknown dump_reg_type...\n");
+ r->valid = 0;
+ break;
+ }
+ }
+ print_ctx_regs(regs);
+}
+
+static void print_global_regs(void __iomem *base, unsigned int gfsr)
+{
+ pr_err("GFAR = %016llx\n", GET_GFAR(base));
+
+ pr_err("GFSR = %08x [%s%s%s%s%s%s%s%s%s%s]\n", gfsr,
+ (gfsr & 0x01) ? "ICF " : "",
+ (gfsr & 0x02) ? "USF " : "",
+ (gfsr & 0x04) ? "SMCF " : "",
+ (gfsr & 0x08) ? "UCBF " : "",
+ (gfsr & 0x10) ? "UCIF " : "",
+ (gfsr & 0x20) ? "CAF " : "",
+ (gfsr & 0x40) ? "EF " : "",
+ (gfsr & 0x80) ? "PF " : "",
+ (gfsr & 0x40000000) ? "SS " : "",
+ (gfsr & 0x80000000) ? "MULTI " : "");
+
+ pr_err("GFSYNR0 = %08x\n", GET_GFSYNR0(base));
+ pr_err("GFSYNR1 = %08x\n", GET_GFSYNR1(base));
+ pr_err("GFSYNR2 = %08x\n", GET_GFSYNR2(base));
+}
+
+irqreturn_t msm_iommu_global_fault_handler(int irq, void *dev_id)
+{
+ struct platform_device *pdev = dev_id;
+ struct msm_iommu_drvdata *drvdata;
+ unsigned int gfsr;
+ int ret;
+
+ mutex_lock(&msm_iommu_lock);
+ BUG_ON(!pdev);
+
+ drvdata = dev_get_drvdata(&pdev->dev);
+ BUG_ON(!drvdata);
+
+ if (!drvdata->powered_on) {
+ pr_err("Unexpected IOMMU global fault !!\n");
+ pr_err("name = %s\n", drvdata->name);
+ pr_err("Power is OFF. Can't read global fault information\n");
+ ret = IRQ_HANDLED;
+ goto fail;
+ }
+
+ if (drvdata->sec_id != -1) {
+ pr_err("NON-secure interrupt from secure %s\n", drvdata->name);
+ ret = IRQ_HANDLED;
+ goto fail;
+ }
+
+ ret = __enable_clocks(drvdata);
+ if (ret) {
+ ret = IRQ_NONE;
+ goto fail;
+ }
+
+ gfsr = GET_GFSR(drvdata->base);
+ if (gfsr) {
+ pr_err("Unexpected %s global fault !!\n", drvdata->name);
+ print_global_regs(drvdata->base, gfsr);
+ SET_GFSR(drvdata->base, gfsr);
+ ret = IRQ_HANDLED;
+ } else
+ ret = IRQ_NONE;
+
+ __disable_clocks(drvdata);
+fail:
+ mutex_unlock(&msm_iommu_lock);
+
+ return ret;
+}
+
+irqreturn_t msm_iommu_fault_handler_v2(int irq, void *dev_id)
+{
+ struct platform_device *pdev = dev_id;
+ struct msm_iommu_drvdata *drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ unsigned int fsr;
+ int ret;
+
+ phys_addr_t pagetable_phys;
+ u64 faulty_iova = 0;
+
+ mutex_lock(&msm_iommu_lock);
+
+ BUG_ON(!pdev);
+
+ drvdata = dev_get_drvdata(pdev->dev.parent);
+ BUG_ON(!drvdata);
+
+ ctx_drvdata = dev_get_drvdata(&pdev->dev);
+ BUG_ON(!ctx_drvdata);
+
+ if (!drvdata->powered_on) {
+ pr_err("Unexpected IOMMU page fault!\n");
+ pr_err("name = %s\n", drvdata->name);
+ pr_err("Power is OFF. Unable to read page fault information\n");
+ /*
+ * We cannot determine which context bank caused the issue so
+ * we just return handled here to ensure IRQ handler code is
+ * happy
+ */
+ ret = IRQ_HANDLED;
+ goto fail;
+ }
+
+ ret = __enable_clocks(drvdata);
+ if (ret) {
+ ret = IRQ_NONE;
+ goto fail;
+ }
+
+ fsr = GET_FSR(drvdata->cb_base, ctx_drvdata->num);
+ if (fsr) {
+ if (!ctx_drvdata->attached_domain) {
+ pr_err("Bad domain in interrupt handler\n");
+ ret = -ENOSYS;
+ } else {
+ faulty_iova =
+ GET_FAR(drvdata->cb_base, ctx_drvdata->num);
+ ret = report_iommu_fault(ctx_drvdata->attached_domain,
+ &ctx_drvdata->pdev->dev,
+ faulty_iova, 0);
+
+ }
+ if (ret == -ENOSYS) {
+ pr_err("Unexpected IOMMU page fault!\n");
+ pr_err("name = %s\n", drvdata->name);
+ pr_err("context = %s (%d)\n", ctx_drvdata->name,
+ ctx_drvdata->num);
+ pr_err("Interesting registers:\n");
+ __print_ctx_regs(drvdata,
+ ctx_drvdata->num, fsr);
+
+ if (ctx_drvdata->attached_domain) {
+ pagetable_phys = msm_iommu_iova_to_phys_soft(
+ ctx_drvdata->attached_domain,
+ faulty_iova);
+ pr_err("Page table in DDR shows PA = %x\n",
+ (unsigned int) pagetable_phys);
+ }
+ }
+
+ if (ret != -EBUSY)
+ SET_FSR(drvdata->cb_base, ctx_drvdata->num, fsr);
+ ret = IRQ_HANDLED;
+ } else
+ ret = IRQ_NONE;
+
+ __disable_clocks(drvdata);
+fail:
+ mutex_unlock(&msm_iommu_lock);
+
+ return ret;
+}
+
+static phys_addr_t msm_iommu_get_pt_base_addr(struct iommu_domain *domain)
+{
+ struct msm_iommu_priv *priv = to_msm_priv(domain);
+
+ return __pa(priv->pt.fl_table);
+}
+
+#define DUMP_REG_INIT(dump_reg, cb_reg, mbp, drt) \
+ do { \
+ dump_regs_tbl[dump_reg].reg_offset = cb_reg; \
+ dump_regs_tbl[dump_reg].name = #cb_reg; \
+ dump_regs_tbl[dump_reg].must_be_present = mbp; \
+ dump_regs_tbl[dump_reg].dump_reg_type = drt; \
+ } while (0)
+
+static void msm_iommu_build_dump_regs_table(void)
+{
+ DUMP_REG_INIT(DUMP_REG_FAR0, CB_FAR, 1, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_FAR1, CB_FAR + 4, 1, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_PAR0, CB_PAR, 1, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_PAR1, CB_PAR + 4, 1, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_FSR, CB_FSR, 1, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_FSYNR0, CB_FSYNR0, 1, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_FSYNR1, CB_FSYNR1, 1, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_TTBR0_0, CB_TTBR0, 1, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_TTBR0_1, CB_TTBR0 + 4, 0, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_TTBR1_0, CB_TTBR1, 1, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_TTBR1_1, CB_TTBR1 + 4, 0, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_SCTLR, CB_SCTLR, 1, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_ACTLR, CB_ACTLR, 1, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_PRRR, CB_PRRR, 1, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_NMRR, CB_NMRR, 1, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_CBAR_N, CBAR, 1, DRT_GLOBAL_REG_N);
+ DUMP_REG_INIT(DUMP_REG_CBFRSYNRA_N, CBFRSYNRA, 1, DRT_GLOBAL_REG_N);
+}
+
+#ifdef CONFIG_IOMMU_PGTABLES_L2
+static void __do_set_redirect(struct iommu_domain *domain, void *data)
+{
+ struct msm_iommu_priv *priv;
+ int *no_redirect = data;
+
+ mutex_lock(&msm_iommu_lock);
+ priv = to_msm_priv(domain);
+ priv->pt.redirect = !(*no_redirect);
+ mutex_unlock(&msm_iommu_lock);
+}
+
+static void __do_get_redirect(struct iommu_domain *domain, void *data)
+{
+ struct msm_iommu_priv *priv;
+ int *no_redirect = data;
+
+ mutex_lock(&msm_iommu_lock);
+ priv = to_msm_priv(domain);
+ *no_redirect = !priv->pt.redirect;
+ mutex_unlock(&msm_iommu_lock);
+}
+
+#else
+
+static void __do_set_redirect(struct iommu_domain *domain, void *data)
+{
+}
+
+static void __do_get_redirect(struct iommu_domain *domain, void *data)
+{
+}
+#endif
+
+static int msm_iommu_domain_set_attr(struct iommu_domain *domain,
+ enum iommu_attr attr, void *data)
+{
+ switch (attr) {
+ case DOMAIN_ATTR_QCOM_COHERENT_HTW_DISABLE:
+ __do_set_redirect(domain, data);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int msm_iommu_domain_get_attr(struct iommu_domain *domain,
+ enum iommu_attr attr, void *data)
+{
+ switch (attr) {
+ case DOMAIN_ATTR_QCOM_COHERENT_HTW_DISABLE:
+ __do_get_redirect(domain, data);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static struct iommu_ops msm_iommu_ops = {
+ .domain_alloc = msm_iommu_domain_alloc,
+ .domain_free = msm_iommu_domain_free,
+ .attach_dev = msm_iommu_attach_dev,
+ .detach_dev = msm_iommu_detach_dev,
+ .map = msm_iommu_map,
+ .unmap = msm_iommu_unmap,
+ .map_sg = msm_iommu_map_sg,
+/* .unmap_range = msm_iommu_unmap_range,*/
+ .iova_to_phys = msm_iommu_iova_to_phys,
+/* .get_pt_base_addr = msm_iommu_get_pt_base_addr,*/
+ .pgsize_bitmap = MSM_IOMMU_PGSIZES,
+ .domain_set_attr = msm_iommu_domain_set_attr,
+ .domain_get_attr = msm_iommu_domain_get_attr,
+};
+
+static int __init msm_iommu_init(void)
+{
+ int ret;
+
+ msm_iommu_pagetable_init();
+
+ ret = bus_set_iommu(&platform_bus_type, &msm_iommu_ops);
+ if (ret)
+ return ret;
+
+ msm_iommu_build_dump_regs_table();
+
+ return 0;
+}
+
+subsys_initcall(msm_iommu_init);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM SMMU v2 Driver");
diff --git a/drivers/iommu/qcom/msm_iommu.c b/drivers/iommu/qcom/msm_iommu.c
new file mode 100644
index 000000000000..08d8d10da2c4
--- /dev/null
+++ b/drivers/iommu/qcom/msm_iommu.c
@@ -0,0 +1,206 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/export.h>
+#include <linux/iommu.h>
+#include <linux/qcom_iommu.h>
+
+static DEFINE_MUTEX(iommu_list_lock);
+static LIST_HEAD(iommu_list);
+
+#define MRC(reg, processor, op1, crn, crm, op2) \
+__asm__ __volatile__ ( \
+" mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \
+: "=r" (reg))
+
+#define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0)
+#define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1)
+
+#define RCP15_MAIR0(reg) MRC(reg, p15, 0, c10, c2, 0)
+#define RCP15_MAIR1(reg) MRC(reg, p15, 0, c10, c2, 1)
+
+/* These values come from proc-v7-2level.S */
+#define PRRR_VALUE 0xff0a81a8
+#define NMRR_VALUE 0x40e040e0
+
+/* These values come from proc-v7-3level.S */
+#define MAIR0_VALUE 0xeeaa4400
+#define MAIR1_VALUE 0xff000004
+
+static struct iommu_access_ops *iommu_access_ops;
+
+struct bus_type msm_iommu_sec_bus_type = {
+ .name = "msm_iommu_sec_bus",
+};
+
+void msm_set_iommu_access_ops(struct iommu_access_ops *ops)
+{
+ iommu_access_ops = ops;
+}
+
+struct iommu_access_ops *msm_get_iommu_access_ops()
+{
+ BUG_ON(iommu_access_ops == NULL);
+ return iommu_access_ops;
+}
+EXPORT_SYMBOL(msm_get_iommu_access_ops);
+
+void msm_iommu_add_drv(struct msm_iommu_drvdata *drv)
+{
+ mutex_lock(&iommu_list_lock);
+ list_add(&drv->list, &iommu_list);
+ mutex_unlock(&iommu_list_lock);
+}
+
+void msm_iommu_remove_drv(struct msm_iommu_drvdata *drv)
+{
+ mutex_lock(&iommu_list_lock);
+ list_del(&drv->list);
+ mutex_unlock(&iommu_list_lock);
+}
+
+static int find_iommu_ctx(struct device *dev, void *data)
+{
+ struct msm_iommu_ctx_drvdata *c;
+
+ c = dev_get_drvdata(dev);
+ if (!c || !c->name)
+ return 0;
+
+ return !strcmp(data, c->name);
+}
+
+static struct device *find_context(struct device *dev, const char *name)
+{
+ return device_find_child(dev, (void *)name, find_iommu_ctx);
+}
+
+struct device *msm_iommu_get_ctx(const char *ctx_name)
+{
+ struct msm_iommu_drvdata *drv;
+ struct device *dev = NULL;
+
+ mutex_lock(&iommu_list_lock);
+ list_for_each_entry(drv, &iommu_list, list) {
+ dev = find_context(drv->dev, ctx_name);
+ if (dev)
+ break;
+ }
+ mutex_unlock(&iommu_list_lock);
+
+ put_device(dev);
+
+ if (!dev || !dev_get_drvdata(dev)) {
+ pr_debug("Could not find context <%s>\n", ctx_name);
+ dev = ERR_PTR(-EPROBE_DEFER);
+ }
+
+ return dev;
+}
+EXPORT_SYMBOL(msm_iommu_get_ctx);
+
+#ifdef CONFIG_ARM
+#ifdef CONFIG_IOMMU_LPAE
+#ifdef CONFIG_ARM_LPAE
+/*
+ * If CONFIG_ARM_LPAE AND CONFIG_IOMMU_LPAE are enabled we can use the MAIR
+ * register directly
+ */
+u32 msm_iommu_get_mair0(void)
+{
+ unsigned int mair0;
+
+ RCP15_MAIR0(mair0);
+ return mair0;
+}
+
+u32 msm_iommu_get_mair1(void)
+{
+ unsigned int mair1;
+
+ RCP15_MAIR1(mair1);
+ return mair1;
+}
+#else
+/*
+ * However, If CONFIG_ARM_LPAE is not enabled but CONFIG_IOMMU_LPAE is enabled
+ * we'll just use the hard coded values directly..
+ */
+u32 msm_iommu_get_mair0(void)
+{
+ return MAIR0_VALUE;
+}
+
+u32 msm_iommu_get_mair1(void)
+{
+ return MAIR1_VALUE;
+}
+#endif
+
+#else
+#ifdef CONFIG_ARM_LPAE
+/*
+ * If CONFIG_ARM_LPAE is enabled AND CONFIG_IOMMU_LPAE is disabled
+ * we must use the hardcoded values.
+ */
+u32 msm_iommu_get_prrr(void)
+{
+ return PRRR_VALUE;
+}
+
+u32 msm_iommu_get_nmrr(void)
+{
+ return NMRR_VALUE;
+}
+#else
+/*
+ * If both CONFIG_ARM_LPAE AND CONFIG_IOMMU_LPAE are disabled
+ * we can use the registers directly.
+ */
+#define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0)
+#define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1)
+
+u32 msm_iommu_get_prrr(void)
+{
+ u32 prrr;
+
+ RCP15_PRRR(prrr);
+ return prrr;
+}
+
+u32 msm_iommu_get_nmrr(void)
+{
+ u32 nmrr;
+
+ RCP15_NMRR(nmrr);
+ return nmrr;
+}
+#endif
+#endif
+#endif
+#ifdef CONFIG_ARM64
+u32 msm_iommu_get_prrr(void)
+{
+ return PRRR_VALUE;
+}
+
+u32 msm_iommu_get_nmrr(void)
+{
+ return NMRR_VALUE;
+}
+#endif
diff --git a/drivers/iommu/qcom/msm_iommu_dev-v1.c b/drivers/iommu/qcom/msm_iommu_dev-v1.c
new file mode 100644
index 000000000000..9520cc296e3f
--- /dev/null
+++ b/drivers/iommu/qcom/msm_iommu_dev-v1.c
@@ -0,0 +1,627 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/iommu.h>
+#include <linux/interrupt.h>
+#include <linux/msm-bus.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+
+#include "msm_iommu_hw-v1.h"
+#include <linux/qcom_iommu.h>
+#include "msm_iommu_perfmon.h"
+
+static const struct of_device_id msm_iommu_ctx_match_table[];
+
+#ifdef CONFIG_IOMMU_LPAE
+static const char *BFB_REG_NODE_NAME = "qcom,iommu-lpae-bfb-regs";
+static const char *BFB_DATA_NODE_NAME = "qcom,iommu-lpae-bfb-data";
+#else
+static const char *BFB_REG_NODE_NAME = "qcom,iommu-bfb-regs";
+static const char *BFB_DATA_NODE_NAME = "qcom,iommu-bfb-data";
+#endif
+
+static int msm_iommu_parse_bfb_settings(struct platform_device *pdev,
+ struct msm_iommu_drvdata *drvdata)
+{
+ struct msm_iommu_bfb_settings *bfb_settings;
+ u32 nreg, nval;
+ int ret;
+
+ /*
+ * It is not valid for a device to have the BFB_REG_NODE_NAME
+ * property but not the BFB_DATA_NODE_NAME property, and vice versa.
+ */
+ if (!of_get_property(pdev->dev.of_node, BFB_REG_NODE_NAME, &nreg)) {
+ if (of_get_property(pdev->dev.of_node, BFB_DATA_NODE_NAME,
+ &nval))
+ return -EINVAL;
+ return 0;
+ }
+
+ if (!of_get_property(pdev->dev.of_node, BFB_DATA_NODE_NAME, &nval))
+ return -EINVAL;
+
+ if (nreg >= sizeof(bfb_settings->regs))
+ return -EINVAL;
+
+ if (nval >= sizeof(bfb_settings->data))
+ return -EINVAL;
+
+ if (nval != nreg)
+ return -EINVAL;
+
+ bfb_settings = devm_kzalloc(&pdev->dev, sizeof(*bfb_settings),
+ GFP_KERNEL);
+ if (!bfb_settings)
+ return -ENOMEM;
+
+ ret = of_property_read_u32_array(pdev->dev.of_node,
+ BFB_REG_NODE_NAME,
+ bfb_settings->regs,
+ nreg / sizeof(*bfb_settings->regs));
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32_array(pdev->dev.of_node,
+ BFB_DATA_NODE_NAME,
+ bfb_settings->data,
+ nval / sizeof(*bfb_settings->data));
+ if (ret)
+ return ret;
+
+ bfb_settings->length = nreg / sizeof(*bfb_settings->regs);
+
+ drvdata->bfb_settings = bfb_settings;
+
+ return 0;
+}
+
+static int __get_bus_vote_client(struct platform_device *pdev,
+ struct msm_iommu_drvdata *drvdata)
+{
+ int ret = 0;
+ struct msm_bus_scale_pdata *bs_table;
+ const char *dummy;
+
+ /* Check whether bus scaling has been specified for this node */
+ ret = of_property_read_string(pdev->dev.of_node, "qcom,msm-bus,name",
+ &dummy);
+ if (ret)
+ return 0;
+
+ bs_table = msm_bus_cl_get_pdata(pdev);
+ if (bs_table) {
+ drvdata->bus_client = msm_bus_scale_register_client(bs_table);
+ if (IS_ERR(&drvdata->bus_client)) {
+ pr_err("%s(): Bus client register failed.\n", __func__);
+ ret = -EINVAL;
+ }
+ }
+
+ return ret;
+}
+
+static void __put_bus_vote_client(struct msm_iommu_drvdata *drvdata)
+{
+ msm_bus_scale_unregister_client(drvdata->bus_client);
+ drvdata->bus_client = 0;
+}
+
+/*
+ * CONFIG_IOMMU_NON_SECURE allows us to override the secure
+ * designation of SMMUs in device tree. With this config enabled
+ * all SMMUs will be programmed by this driver.
+ */
+#ifdef CONFIG_IOMMU_NON_SECURE
+static inline void get_secure_id(struct device_node *node,
+ struct msm_iommu_drvdata *drvdata)
+{
+}
+
+static inline void get_secure_ctx(struct device_node *node,
+ struct msm_iommu_drvdata *iommu_drvdata,
+ struct msm_iommu_ctx_drvdata *ctx_drvdata)
+{
+ ctx_drvdata->secure_context = 0;
+}
+#else
+static void get_secure_id(struct device_node *node,
+ struct msm_iommu_drvdata *drvdata)
+{
+ if (msm_iommu_get_scm_call_avail())
+ of_property_read_u32(node, "qcom,iommu-secure-id",
+ &drvdata->sec_id);
+}
+
+static void get_secure_ctx(struct device_node *node,
+ struct msm_iommu_drvdata *iommu_drvdata,
+ struct msm_iommu_ctx_drvdata *ctx_drvdata)
+{
+ u32 secure_ctx = 0;
+
+ if (msm_iommu_get_scm_call_avail())
+ secure_ctx = of_property_read_bool(node, "qcom,secure-context");
+
+ ctx_drvdata->secure_context = secure_ctx;
+}
+#endif
+
+static int msm_iommu_parse_dt(struct platform_device *pdev,
+ struct msm_iommu_drvdata *drvdata)
+{
+ struct device_node *child;
+ int ret;
+
+ drvdata->dev = &pdev->dev;
+
+ ret = __get_bus_vote_client(pdev, drvdata);
+ if (ret)
+ goto fail;
+
+ ret = msm_iommu_parse_bfb_settings(pdev, drvdata);
+ if (ret)
+ goto fail;
+
+ for_each_available_child_of_node(pdev->dev.of_node, child)
+ drvdata->ncb++;
+
+ ret = of_property_read_string(pdev->dev.of_node, "label",
+ &drvdata->name);
+ if (ret)
+ goto fail;
+
+ drvdata->sec_id = -1;
+ get_secure_id(pdev->dev.of_node, drvdata);
+
+ drvdata->halt_enabled = of_property_read_bool(pdev->dev.of_node,
+ "qcom,iommu-enable-halt");
+
+ msm_iommu_add_drv(drvdata);
+
+ return 0;
+
+fail:
+ __put_bus_vote_client(drvdata);
+ return ret;
+}
+
+static int msm_iommu_pmon_parse_dt(struct platform_device *pdev,
+ struct iommu_pmon *pmon_info)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = pdev->dev.of_node;
+ unsigned int cls_prop_size;
+ int ret, irq;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0 && irq == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ if (irq <= 0) {
+ pmon_info->iommu.evt_irq = -1;
+ return irq;
+ }
+
+ pmon_info->iommu.evt_irq = irq;
+
+ ret = of_property_read_u32(np, "qcom,iommu-pmu-ngroups",
+ &pmon_info->num_groups);
+ if (ret) {
+ dev_err(dev, "Error reading qcom,iommu-pmu-ngroups\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(np, "qcom,iommu-pmu-ncounters",
+ &pmon_info->num_counters);
+ if (ret) {
+ dev_err(dev, "Error reading qcom,iommu-pmu-ncounters\n");
+ return ret;
+ }
+
+ if (!of_get_property(np, "qcom,iommu-pmu-event-classes",
+ &cls_prop_size)) {
+ dev_err(dev, "Error reading qcom,iommu-pmu-event-classes\n");
+ return -EINVAL;
+ }
+
+ pmon_info->event_cls_supported = devm_kzalloc(dev, cls_prop_size,
+ GFP_KERNEL);
+ if (!pmon_info->event_cls_supported) {
+ dev_err(dev, "Unable to get memory for event class array\n");
+ return -ENOMEM;
+ }
+
+ pmon_info->nevent_cls_supported = cls_prop_size / sizeof(u32);
+
+ ret = of_property_read_u32_array(np, "qcom,iommu-pmu-event-classes",
+ pmon_info->event_cls_supported,
+ pmon_info->nevent_cls_supported);
+ if (ret) {
+ dev_err(dev, "Error reading qcom,iommu-pmu-event-classes\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int msm_iommu_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = pdev->dev.of_node;
+ struct iommu_pmon *pmon_info;
+ struct msm_iommu_drvdata *drvdata;
+ struct resource *res;
+ int ret;
+ int global_cfg_irq, global_client_irq;
+ u32 temp;
+ unsigned long rate;
+
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ drvdata->dev = dev;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iommu_base");
+ drvdata->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(drvdata->base))
+ return PTR_ERR(drvdata->base);
+
+ drvdata->glb_base = drvdata->base;
+ drvdata->phys_base = res->start;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "smmu_local_base");
+ drvdata->smmu_local_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(drvdata->smmu_local_base) &&
+ PTR_ERR(drvdata->smmu_local_base) != -EPROBE_DEFER)
+ drvdata->smmu_local_base = NULL;
+
+ if (of_device_is_compatible(np, "qcom,msm-mmu-500"))
+ drvdata->model = MMU_500;
+
+ drvdata->iface = devm_clk_get(dev, "iface_clk");
+ if (IS_ERR(drvdata->iface))
+ return PTR_ERR(drvdata->iface);
+
+ drvdata->core = devm_clk_get(dev, "core_clk");
+ if (IS_ERR(drvdata->core))
+ return PTR_ERR(drvdata->core);
+
+ if (!of_property_read_u32(np, "qcom,cb-base-offset", &temp))
+ drvdata->cb_base = drvdata->base + temp;
+ else
+ drvdata->cb_base = drvdata->base + 0x8000;
+
+ rate = clk_get_rate(drvdata->core);
+ if (!rate) {
+ rate = clk_round_rate(drvdata->core, 1000);
+ clk_set_rate(drvdata->core, rate);
+ }
+
+ dev_info(&pdev->dev, "iface: %lu, core: %lu\n",
+ clk_get_rate(drvdata->iface), clk_get_rate(drvdata->core));
+
+ ret = msm_iommu_parse_dt(pdev, drvdata);
+ if (ret)
+ return ret;
+
+ dev_info(dev, "device %s (model: %d) mapped at %p, with %d ctx banks\n",
+ drvdata->name, drvdata->model, drvdata->base, drvdata->ncb);
+
+ platform_set_drvdata(pdev, drvdata);
+
+ pmon_info = msm_iommu_pm_alloc(dev);
+ if (pmon_info) {
+ ret = msm_iommu_pmon_parse_dt(pdev, pmon_info);
+ if (ret) {
+ msm_iommu_pm_free(dev);
+ dev_info(dev, "%s: pmon not available\n",
+ drvdata->name);
+ } else {
+ pmon_info->iommu.base = drvdata->base;
+ pmon_info->iommu.ops = msm_get_iommu_access_ops();
+ pmon_info->iommu.hw_ops = iommu_pm_get_hw_ops_v1();
+ pmon_info->iommu.iommu_name = drvdata->name;
+ ret = msm_iommu_pm_iommu_register(pmon_info);
+ if (ret) {
+ dev_err(dev, "%s iommu register fail\n",
+ drvdata->name);
+ msm_iommu_pm_free(dev);
+ } else {
+ dev_dbg(dev, "%s iommu registered for pmon\n",
+ pmon_info->iommu.iommu_name);
+ }
+ }
+ }
+
+ global_cfg_irq = platform_get_irq_byname(pdev, "global_cfg_NS_irq");
+ if (global_cfg_irq < 0 && global_cfg_irq == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ if (global_cfg_irq > 0) {
+ ret = devm_request_threaded_irq(dev, global_cfg_irq,
+ NULL,
+ msm_iommu_global_fault_handler,
+ IRQF_ONESHOT | IRQF_SHARED |
+ IRQF_TRIGGER_RISING,
+ "msm_iommu_global_cfg_irq",
+ pdev);
+ if (ret < 0)
+ dev_err(dev, "Request Global CFG IRQ %d failed with ret=%d\n",
+ global_cfg_irq, ret);
+ }
+
+ global_client_irq =
+ platform_get_irq_byname(pdev, "global_client_NS_irq");
+ if (global_client_irq < 0 && global_client_irq == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ if (global_client_irq > 0) {
+ ret = devm_request_threaded_irq(dev, global_client_irq,
+ NULL,
+ msm_iommu_global_fault_handler,
+ IRQF_ONESHOT | IRQF_SHARED |
+ IRQF_TRIGGER_RISING,
+ "msm_iommu_global_client_irq",
+ pdev);
+ if (ret < 0)
+ dev_err(dev, "Request Global Client IRQ %d failed with ret=%d\n",
+ global_client_irq, ret);
+ }
+
+ ret = of_platform_populate(np, msm_iommu_ctx_match_table, NULL, dev);
+ if (ret)
+ dev_err(dev, "Failed to create iommu context device\n");
+
+ return ret;
+}
+
+static int msm_iommu_remove(struct platform_device *pdev)
+{
+ struct msm_iommu_drvdata *drv;
+
+ msm_iommu_pm_iommu_unregister(&pdev->dev);
+ msm_iommu_pm_free(&pdev->dev);
+
+ drv = platform_get_drvdata(pdev);
+ if (drv) {
+ __put_bus_vote_client(drv);
+ msm_iommu_remove_drv(drv);
+ platform_set_drvdata(pdev, NULL);
+ }
+
+ return 0;
+}
+
+static int msm_iommu_ctx_parse_dt(struct platform_device *pdev,
+ struct msm_iommu_ctx_drvdata *ctx_drvdata)
+{
+ struct resource *r, rp;
+ int irq = 0, ret = 0;
+ struct msm_iommu_drvdata *drvdata;
+ u32 nsid;
+ u32 n_sid_mask;
+ unsigned long cb_offset;
+
+ drvdata = dev_get_drvdata(pdev->dev.parent);
+
+ get_secure_ctx(pdev->dev.of_node, drvdata, ctx_drvdata);
+
+ if (ctx_drvdata->secure_context) {
+ irq = platform_get_irq(pdev, 1);
+ if (irq < 0 && irq == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ if (irq > 0) {
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ msm_iommu_secure_fault_handler_v2,
+ IRQF_ONESHOT | IRQF_SHARED,
+ "msm_iommu_secure_irq", pdev);
+ if (ret) {
+ pr_err("Request IRQ %d failed with ret=%d\n",
+ irq, ret);
+ return ret;
+ }
+ }
+ } else {
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0 && irq == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ if (irq > 0) {
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ msm_iommu_fault_handler_v2,
+ IRQF_ONESHOT | IRQF_SHARED,
+ "msm_iommu_nonsecure_irq", pdev);
+ if (ret) {
+ pr_err("Request IRQ %d failed with ret=%d\n",
+ irq, ret);
+ goto out;
+ }
+ }
+ }
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = of_address_to_resource(pdev->dev.parent->of_node, 0, &rp);
+ if (ret)
+ goto out;
+
+ /* Calculate the context bank number using the base addresses.
+ * Typically CB0 base address is 0x8000 pages away if the number
+ * of CBs are <=8. So, assume the offset 0x8000 until mentioned
+ * explicitely.
+ */
+ cb_offset = drvdata->cb_base - drvdata->base;
+ ctx_drvdata->num = (r->start - rp.start - cb_offset) >> CTX_SHIFT;
+
+ if (of_property_read_string(pdev->dev.of_node, "label",
+ &ctx_drvdata->name))
+ ctx_drvdata->name = dev_name(&pdev->dev);
+
+ if (!of_get_property(pdev->dev.of_node, "qcom,iommu-ctx-sids", &nsid)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (nsid >= sizeof(ctx_drvdata->sids)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (of_property_read_u32_array(pdev->dev.of_node, "qcom,iommu-ctx-sids",
+ ctx_drvdata->sids,
+ nsid / sizeof(*ctx_drvdata->sids))) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ctx_drvdata->nsid = nsid;
+ ctx_drvdata->asid = -1;
+
+ if (!of_get_property(pdev->dev.of_node, "qcom,iommu-sid-mask",
+ &n_sid_mask)) {
+ memset(ctx_drvdata->sid_mask, 0, MAX_NUM_SMR);
+ goto out;
+ }
+
+ if (n_sid_mask != nsid) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (of_property_read_u32_array(pdev->dev.of_node, "qcom,iommu-sid-mask",
+ ctx_drvdata->sid_mask,
+ n_sid_mask / sizeof(*ctx_drvdata->sid_mask))) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ctx_drvdata->n_sid_mask = n_sid_mask;
+
+out:
+ return ret;
+}
+
+static int msm_iommu_ctx_probe(struct platform_device *pdev)
+{
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ int ret;
+
+ if (!pdev->dev.parent)
+ return -EINVAL;
+
+ ctx_drvdata = devm_kzalloc(&pdev->dev, sizeof(*ctx_drvdata),
+ GFP_KERNEL);
+ if (!ctx_drvdata)
+ return -ENOMEM;
+
+ ctx_drvdata->pdev = pdev;
+ INIT_LIST_HEAD(&ctx_drvdata->attached_elm);
+
+ ret = msm_iommu_ctx_parse_dt(pdev, ctx_drvdata);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, ctx_drvdata);
+
+ dev_info(&pdev->dev, "context %s using bank %d\n",
+ ctx_drvdata->name, ctx_drvdata->num);
+
+ return 0;
+}
+
+static int msm_iommu_ctx_remove(struct platform_device *pdev)
+{
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static const struct of_device_id msm_iommu_match_table[] = {
+ { .compatible = "qcom,msm-smmu-v1", },
+ { .compatible = "qcom,msm-smmu-v2", },
+ {}
+};
+
+static struct platform_driver msm_iommu_driver = {
+ .driver = {
+ .name = "msm_iommu",
+ .of_match_table = msm_iommu_match_table,
+ },
+ .probe = msm_iommu_probe,
+ .remove = msm_iommu_remove,
+};
+
+static const struct of_device_id msm_iommu_ctx_match_table[] = {
+ { .compatible = "qcom,msm-smmu-v1-ctx", },
+ { .compatible = "qcom,msm-smmu-v2-ctx", },
+ {}
+};
+
+static struct platform_driver msm_iommu_ctx_driver = {
+ .driver = {
+ .name = "msm_iommu_ctx",
+ .of_match_table = msm_iommu_ctx_match_table,
+ },
+ .probe = msm_iommu_ctx_probe,
+ .remove = msm_iommu_ctx_remove,
+};
+
+static int __init msm_iommu_driver_init(void)
+{
+ int ret;
+
+ msm_iommu_check_scm_call_avail();
+ msm_set_iommu_access_ops(&iommu_access_ops_v1);
+ msm_iommu_sec_set_access_ops(&iommu_access_ops_v1);
+
+ ret = platform_driver_register(&msm_iommu_driver);
+ if (ret) {
+ pr_err("Failed to register IOMMU driver\n");
+ return ret;
+ }
+
+ ret = platform_driver_register(&msm_iommu_ctx_driver);
+ if (ret) {
+ pr_err("Failed to register IOMMU context driver\n");
+ platform_driver_unregister(&msm_iommu_driver);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void __exit msm_iommu_driver_exit(void)
+{
+ platform_driver_unregister(&msm_iommu_ctx_driver);
+ platform_driver_unregister(&msm_iommu_driver);
+}
+
+subsys_initcall(msm_iommu_driver_init);
+module_exit(msm_iommu_driver_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/qcom/msm_iommu_hw-v1.h b/drivers/iommu/qcom/msm_iommu_hw-v1.h
new file mode 100644
index 000000000000..53e2f4874adb
--- /dev/null
+++ b/drivers/iommu/qcom/msm_iommu_hw-v1.h
@@ -0,0 +1,2320 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_IOMMU_HW_V2_H
+#define __ARCH_ARM_MACH_MSM_IOMMU_HW_V2_H
+
+#define CTX_SHIFT 12
+
+#define CTX_REG(reg, base, ctx) \
+ ((base) + (reg) + ((ctx) << CTX_SHIFT))
+#define GLB_REG(reg, base) \
+ ((base) + (reg))
+#define GLB_REG_N(b, n, r) GLB_REG(b, ((r) + ((n) << 2)))
+#define GLB_FIELD(b, r) ((b) + (r))
+#define GLB_CTX_FIELD(b, c, r) (GLB_FIELD(b, r) + ((c) << CTX_SHIFT))
+#define GLB_FIELD_N(b, n, r) (GLB_FIELD(b, r) + ((n) << 2))
+
+
+#define GET_GLOBAL_REG(reg, base) (readl_relaxed(GLB_REG(reg, base)))
+#define GET_GLOBAL_REG_Q(reg, base) (readq_relaxed(GLB_REG(reg, base)))
+#define GET_CTX_REG(reg, base, ctx) (readl_relaxed(CTX_REG(reg, base, ctx)))
+#define GET_CTX_REG_Q(reg, base, ctx) (readq_relaxed(CTX_REG(reg, base, ctx)))
+
+#define SET_GLOBAL_REG(reg, base, val) writel_relaxed((val), GLB_REG(reg, base))
+#define SET_GLOBAL_REG_Q(reg, base, val) \
+ (writeq_relaxed((val), GLB_REG(reg, base)))
+
+#define SET_CTX_REG(reg, base, ctx, val) \
+ writel_relaxed((val), (CTX_REG(reg, base, ctx)))
+#define SET_CTX_REG_Q(reg, base, ctx, val) \
+ writeq_relaxed((val), CTX_REG(reg, base, ctx))
+
+/* Wrappers for numbered registers */
+#define SET_GLOBAL_REG_N(b, n, r, v) writel_relaxed(((v)), GLB_REG_N(b, n, r))
+#define GET_GLOBAL_REG_N(b, n, r) (readl_relaxed(GLB_REG_N(b, n, r)))
+
+/* Field wrappers */
+#define GET_GLOBAL_FIELD(b, r, F) \
+ GET_FIELD(GLB_FIELD(b, r), r##_##F##_MASK, r##_##F##_SHIFT)
+#define GET_CONTEXT_FIELD(b, c, r, F) \
+ GET_FIELD(GLB_CTX_FIELD(b, c, r), \
+ r##_##F##_MASK, r##_##F##_SHIFT)
+#define GET_CONTEXT_FIELD_Q(b, c, r, F) \
+ GET_FIELD_Q(GLB_CTX_FIELD(b, c, r), \
+ r##_##F##_MASK, r##_##F##_SHIFT)
+
+#define SET_GLOBAL_FIELD(b, r, F, v) \
+ SET_FIELD(GLB_FIELD(b, r), r##_##F##_MASK, r##_##F##_SHIFT, (v))
+#define SET_CONTEXT_FIELD(b, c, r, F, v) \
+ SET_FIELD(GLB_CTX_FIELD(b, c, r), \
+ r##_##F##_MASK, r##_##F##_SHIFT, (v))
+#define SET_CONTEXT_FIELD_Q(b, c, r, F, v) \
+ SET_FIELD_Q(GLB_CTX_FIELD(b, c, r), \
+ r##_##F##_MASK, r##_##F##_SHIFT, (v))
+
+/* Wrappers for numbered field registers */
+#define SET_GLOBAL_FIELD_N(b, n, r, F, v) \
+ SET_FIELD(GLB_FIELD_N(b, n, r), r##_##F##_MASK, r##_##F##_SHIFT, v)
+#define GET_GLOBAL_FIELD_N(b, n, r, F) \
+ GET_FIELD(GLB_FIELD_N(b, n, r), r##_##F##_MASK, r##_##F##_SHIFT)
+
+#define GET_FIELD(addr, mask, shift) ((readl_relaxed(addr) >> (shift)) & (mask))
+#define GET_FIELD_Q(addr, mask, shift) \
+ ((readq_relaxed(addr) >> (shift)) & (mask))
+
+#define SET_FIELD(addr, mask, shift, v) \
+do { \
+ int t = readl_relaxed(addr); \
+ writel_relaxed((t & ~((mask) << (shift))) + (((v) & \
+ (mask)) << (shift)), addr); \
+} while (0)
+
+#define SET_FIELD_Q(addr, mask, shift, v) \
+do { \
+ u64 t = readq_relaxed(addr); \
+ writeq_relaxed((t & ~(((u64) mask) << (shift))) + (((v) & \
+ ((u64) mask)) << (shift)), addr); \
+} while (0)
+
+
+/* Global register space 0 setters / getters */
+#define SET_CR0(b, v) SET_GLOBAL_REG(CR0, (b), (v))
+#define SET_SCR1(b, v) SET_GLOBAL_REG(SCR1, (b), (v))
+#define SET_CR2(b, v) SET_GLOBAL_REG(CR2, (b), (v))
+#define SET_ACR(b, v) SET_GLOBAL_REG(ACR, (b), (v))
+#define SET_IDR0(b, N, v) SET_GLOBAL_REG(IDR0, (b), (v))
+#define SET_IDR1(b, N, v) SET_GLOBAL_REG(IDR1, (b), (v))
+#define SET_IDR2(b, N, v) SET_GLOBAL_REG(IDR2, (b), (v))
+#define SET_IDR7(b, N, v) SET_GLOBAL_REG(IDR7, (b), (v))
+#define SET_GFAR(b, v) SET_GLOBAL_REG_Q(GFAR, (b), (v))
+#define SET_GFSR(b, v) SET_GLOBAL_REG(GFSR, (b), (v))
+#define SET_GFSRRESTORE(b, v) SET_GLOBAL_REG(GFSRRESTORE, (b), (v))
+#define SET_GFSYNR0(b, v) SET_GLOBAL_REG(GFSYNR0, (b), (v))
+#define SET_GFSYNR1(b, v) SET_GLOBAL_REG(GFSYNR1, (b), (v))
+#define SET_GFSYNR2(b, v) SET_GLOBAL_REG(GFSYNR2, (b), (v))
+#define SET_TLBIVMID(b, v) SET_GLOBAL_REG(TLBIVMID, (b), (v))
+#define SET_TLBIALLNSNH(b, v) SET_GLOBAL_REG(TLBIALLNSNH, (b), (v))
+#define SET_TLBIALLH(b, v) SET_GLOBAL_REG(TLBIALLH, (b), (v))
+#define SET_TLBGSYNC(b, v) SET_GLOBAL_REG(TLBGSYNC, (b), (v))
+#define SET_TLBGSTATUS(b, v) SET_GLOBAL_REG(TLBGSTATUS, (b), (v))
+#define SET_TLBIVAH(b, v) SET_GLOBAL_REG(TLBIVAH, (b), (v))
+#define SET_GATS1UR(b, v) SET_GLOBAL_REG(GATS1UR, (b), (v))
+#define SET_GATS1UW(b, v) SET_GLOBAL_REG(GATS1UW, (b), (v))
+#define SET_GATS1PR(b, v) SET_GLOBAL_REG(GATS1PR, (b), (v))
+#define SET_GATS1PW(b, v) SET_GLOBAL_REG(GATS1PW, (b), (v))
+#define SET_GATS12UR(b, v) SET_GLOBAL_REG(GATS12UR, (b), (v))
+#define SET_GATS12UW(b, v) SET_GLOBAL_REG(GATS12UW, (b), (v))
+#define SET_GATS12PR(b, v) SET_GLOBAL_REG(GATS12PR, (b), (v))
+#define SET_GATS12PW(b, v) SET_GLOBAL_REG(GATS12PW, (b), (v))
+#define SET_GPAR(b, v) SET_GLOBAL_REG(GPAR, (b), (v))
+#define SET_GATSR(b, v) SET_GLOBAL_REG(GATSR, (b), (v))
+#define SET_NSCR0(b, v) SET_GLOBAL_REG(NSCR0, (b), (v))
+#define SET_NSCR2(b, v) SET_GLOBAL_REG(NSCR2, (b), (v))
+#define SET_NSACR(b, v) SET_GLOBAL_REG(NSACR, (b), (v))
+#define SET_NSGFAR(b, v) SET_GLOBAL_REG(NSGFAR, (b), (v))
+#define SET_NSGFSRRESTORE(b, v) SET_GLOBAL_REG(NSGFSRRESTORE, (b), (v))
+#define SET_PMCR(b, v) SET_GLOBAL_REG(PMCR, (b), (v))
+#define SET_SMR_N(b, N, v) SET_GLOBAL_REG_N(SMR, N, (b), (v))
+#define SET_S2CR_N(b, N, v) SET_GLOBAL_REG_N(S2CR, N, (b), (v))
+
+#define GET_CR0(b) GET_GLOBAL_REG(CR0, (b))
+#define GET_SCR1(b) GET_GLOBAL_REG(SCR1, (b))
+#define GET_CR2(b) GET_GLOBAL_REG(CR2, (b))
+#define GET_ACR(b) GET_GLOBAL_REG(ACR, (b))
+#define GET_IDR0(b, N) GET_GLOBAL_REG(IDR0, (b))
+#define GET_IDR1(b, N) GET_GLOBAL_REG(IDR1, (b))
+#define GET_IDR2(b, N) GET_GLOBAL_REG(IDR2, (b))
+#define GET_IDR7(b, N) GET_GLOBAL_REG(IDR7, (b))
+#define GET_GFAR(b) GET_GLOBAL_REG_Q(GFAR, (b))
+#define GET_GFSR(b) GET_GLOBAL_REG(GFSR, (b))
+#define GET_GFSRRESTORE(b) GET_GLOBAL_REG(GFSRRESTORE, (b))
+#define GET_GFSYNR0(b) GET_GLOBAL_REG(GFSYNR0, (b))
+#define GET_GFSYNR1(b) GET_GLOBAL_REG(GFSYNR1, (b))
+#define GET_GFSYNR2(b) GET_GLOBAL_REG(GFSYNR2, (b))
+#define GET_TLBIVMID(b) GET_GLOBAL_REG(TLBIVMID, (b))
+#define GET_TLBIALLNSNH(b) GET_GLOBAL_REG(TLBIALLNSNH, (b))
+#define GET_TLBIALLH(b) GET_GLOBAL_REG(TLBIALLH, (b))
+#define GET_TLBGSYNC(b) GET_GLOBAL_REG(TLBGSYNC, (b))
+#define GET_TLBGSTATUS(b) GET_GLOBAL_REG(TLBGSTATUS, (b))
+#define GET_TLBIVAH(b) GET_GLOBAL_REG(TLBIVAH, (b))
+#define GET_GATS1UR(b) GET_GLOBAL_REG(GATS1UR, (b))
+#define GET_GATS1UW(b) GET_GLOBAL_REG(GATS1UW, (b))
+#define GET_GATS1PR(b) GET_GLOBAL_REG(GATS1PR, (b))
+#define GET_GATS1PW(b) GET_GLOBAL_REG(GATS1PW, (b))
+#define GET_GATS12UR(b) GET_GLOBAL_REG(GATS12UR, (b))
+#define GET_GATS12UW(b) GET_GLOBAL_REG(GATS12UW, (b))
+#define GET_GATS12PR(b) GET_GLOBAL_REG(GATS12PR, (b))
+#define GET_GATS12PW(b) GET_GLOBAL_REG(GATS12PW, (b))
+#define GET_GPAR(b) GET_GLOBAL_REG(GPAR, (b))
+#define GET_GATSR(b) GET_GLOBAL_REG(GATSR, (b))
+#define GET_NSCR0(b) GET_GLOBAL_REG(NSCR0, (b))
+#define GET_NSCR2(b) GET_GLOBAL_REG(NSCR2, (b))
+#define GET_NSACR(b) GET_GLOBAL_REG(NSACR, (b))
+#define GET_PMCR(b) GET_GLOBAL_REG(PMCR, (b))
+#define GET_SMR_N(b, N) GET_GLOBAL_REG_N(SMR, N, (b))
+#define GET_S2CR_N(b, N) GET_GLOBAL_REG_N(S2CR, N, (b))
+
+/* Global register space 1 setters / getters */
+#define SET_CBAR_N(b, N, v) SET_GLOBAL_REG_N(CBAR, N, (b), (v))
+#define SET_CBFRSYNRA_N(b, N, v) SET_GLOBAL_REG_N(CBFRSYNRA, N, (b), (v))
+
+#define GET_CBAR_N(b, N) GET_GLOBAL_REG_N(CBAR, N, (b))
+#define GET_CBFRSYNRA_N(b, N) GET_GLOBAL_REG_N(CBFRSYNRA, N, (b))
+
+/* Implementation defined register setters/getters */
+#define SET_MICRO_MMU_CTRL_HALT_REQ(b, v) \
+ SET_GLOBAL_FIELD(b, MICRO_MMU_CTRL, HALT_REQ, v)
+#define GET_MICRO_MMU_CTRL_IDLE(b) \
+ GET_GLOBAL_FIELD(b, MICRO_MMU_CTRL, IDLE)
+#define SET_MICRO_MMU_CTRL_RESERVED(b, v) \
+ SET_GLOBAL_FIELD(b, MICRO_MMU_CTRL, RESERVED, v)
+
+#define MMU_CTRL_IDLE (MICRO_MMU_CTRL_IDLE_MASK << MICRO_MMU_CTRL_IDLE_SHIFT)
+
+#define SET_PREDICTIONDIS0(b, v) SET_GLOBAL_REG(PREDICTIONDIS0, (b), (v))
+#define SET_PREDICTIONDIS1(b, v) SET_GLOBAL_REG(PREDICTIONDIS1, (b), (v))
+#define SET_S1L1BFBLP0(b, v) SET_GLOBAL_REG(S1L1BFBLP0, (b), (v))
+
+/* SSD register setters/getters */
+#define SET_SSDR_N(b, N, v) SET_GLOBAL_REG_N(SSDR_N, N, (b), (v))
+
+#define GET_SSDR_N(b, N) GET_GLOBAL_REG_N(SSDR_N, N, (b))
+
+/* Context bank register setters/getters */
+#define SET_SCTLR(b, c, v) SET_CTX_REG(CB_SCTLR, (b), (c), (v))
+#define SET_ACTLR(b, c, v) SET_CTX_REG(CB_ACTLR, (b), (c), (v))
+#define SET_RESUME(b, c, v) SET_CTX_REG(CB_RESUME, (b), (c), (v))
+#define SET_TTBCR(b, c, v) SET_CTX_REG(CB_TTBCR, (b), (c), (v))
+#define SET_CONTEXTIDR(b, c, v) SET_CTX_REG(CB_CONTEXTIDR, (b), (c), (v))
+#define SET_PRRR(b, c, v) SET_CTX_REG(CB_PRRR, (b), (c), (v))
+#define SET_NMRR(b, c, v) SET_CTX_REG(CB_NMRR, (b), (c), (v))
+#define SET_PAR(b, c, v) SET_CTX_REG(CB_PAR, (b), (c), (v))
+#define SET_FSR(b, c, v) SET_CTX_REG(CB_FSR, (b), (c), (v))
+#define SET_FSRRESTORE(b, c, v) SET_CTX_REG(CB_FSRRESTORE, (b), (c), (v))
+#define SET_FAR(b, c, v) SET_CTX_REG(CB_FAR, (b), (c), (v))
+#define SET_FSYNR0(b, c, v) SET_CTX_REG(CB_FSYNR0, (b), (c), (v))
+#define SET_FSYNR1(b, c, v) SET_CTX_REG(CB_FSYNR1, (b), (c), (v))
+#define SET_TLBIVA(b, c, v) SET_CTX_REG(CB_TLBIVA, (b), (c), (v))
+#define SET_TLBIVAA(b, c, v) SET_CTX_REG(CB_TLBIVAA, (b), (c), (v))
+#define SET_TLBIASID(b, c, v) SET_CTX_REG(CB_TLBIASID, (b), (c), (v))
+#define SET_TLBIALL(b, c, v) SET_CTX_REG(CB_TLBIALL, (b), (c), (v))
+#define SET_TLBIVAL(b, c, v) SET_CTX_REG(CB_TLBIVAL, (b), (c), (v))
+#define SET_TLBIVAAL(b, c, v) SET_CTX_REG(CB_TLBIVAAL, (b), (c), (v))
+#define SET_TLBSYNC(b, c, v) SET_CTX_REG(CB_TLBSYNC, (b), (c), (v))
+#define SET_TLBSTATUS(b, c, v) SET_CTX_REG(CB_TLBSTATUS, (b), (c), (v))
+#define SET_ATS1PR(b, c, v) SET_CTX_REG(CB_ATS1PR, (b), (c), (v))
+#define SET_ATS1PW(b, c, v) SET_CTX_REG(CB_ATS1PW, (b), (c), (v))
+#define SET_ATS1UR(b, c, v) SET_CTX_REG(CB_ATS1UR, (b), (c), (v))
+#define SET_ATS1UW(b, c, v) SET_CTX_REG(CB_ATS1UW, (b), (c), (v))
+#define SET_ATSR(b, c, v) SET_CTX_REG(CB_ATSR, (b), (c), (v))
+
+#define GET_SCTLR(b, c) GET_CTX_REG(CB_SCTLR, (b), (c))
+#define GET_ACTLR(b, c) GET_CTX_REG(CB_ACTLR, (b), (c))
+#define GET_RESUME(b, c) GET_CTX_REG(CB_RESUME, (b), (c))
+#define GET_TTBR0(b, c) GET_CTX_REG(CB_TTBR0, (b), (c))
+#define GET_TTBR1(b, c) GET_CTX_REG(CB_TTBR1, (b), (c))
+#define GET_TTBCR(b, c) GET_CTX_REG(CB_TTBCR, (b), (c))
+#define GET_CONTEXTIDR(b, c) GET_CTX_REG(CB_CONTEXTIDR, (b), (c))
+#define GET_PRRR(b, c) GET_CTX_REG(CB_PRRR, (b), (c))
+#define GET_NMRR(b, c) GET_CTX_REG(CB_NMRR, (b), (c))
+#define GET_PAR(b, c) GET_CTX_REG_Q(CB_PAR, (b), (c))
+#define GET_FSR(b, c) GET_CTX_REG(CB_FSR, (b), (c))
+#define GET_FSRRESTORE(b, c) GET_CTX_REG(CB_FSRRESTORE, (b), (c))
+#define GET_FAR(b, c) GET_CTX_REG_Q(CB_FAR, (b), (c))
+#define GET_FSYNR0(b, c) GET_CTX_REG(CB_FSYNR0, (b), (c))
+#define GET_FSYNR1(b, c) GET_CTX_REG(CB_FSYNR1, (b), (c))
+#define GET_TLBIVA(b, c) GET_CTX_REG(CB_TLBIVA, (b), (c))
+#define GET_TLBIVAA(b, c) GET_CTX_REG(CB_TLBIVAA, (b), (c))
+#define GET_TLBIASID(b, c) GET_CTX_REG(CB_TLBIASID, (b), (c))
+#define GET_TLBIALL(b, c) GET_CTX_REG(CB_TLBIALL, (b), (c))
+#define GET_TLBIVAL(b, c) GET_CTX_REG(CB_TLBIVAL, (b), (c))
+#define GET_TLBIVAAL(b, c) GET_CTX_REG(CB_TLBIVAAL, (b), (c))
+#define GET_TLBSYNC(b, c) GET_CTX_REG(CB_TLBSYNC, (b), (c))
+#define GET_TLBSTATUS(b, c) GET_CTX_REG(CB_TLBSTATUS, (b), (c))
+#define GET_ATS1PR(b, c) GET_CTX_REG(CB_ATS1PR, (b), (c))
+#define GET_ATS1PW(b, c) GET_CTX_REG(CB_ATS1PW, (b), (c))
+#define GET_ATS1UR(b, c) GET_CTX_REG(CB_ATS1UR, (b), (c))
+#define GET_ATS1UW(b, c) GET_CTX_REG(CB_ATS1UW, (b), (c))
+#define GET_ATSR(b, c) GET_CTX_REG(CB_ATSR, (b), (c))
+
+/* Global Register field setters / getters */
+/* Configuration Register: CR0/NSCR0 */
+#define SET_CR0_NSCFG(b, v) SET_GLOBAL_FIELD(b, CR0, NSCFG, v)
+#define SET_CR0_WACFG(b, v) SET_GLOBAL_FIELD(b, CR0, WACFG, v)
+#define SET_CR0_RACFG(b, v) SET_GLOBAL_FIELD(b, CR0, RACFG, v)
+#define SET_CR0_SHCFG(b, v) SET_GLOBAL_FIELD(b, CR0, SHCFG, v)
+#define SET_CR0_SMCFCFG(b, v) SET_GLOBAL_FIELD(b, CR0, SMCFCFG, v)
+#define SET_NSCR0_SMCFCFG(b, v) SET_GLOBAL_FIELD(b, NSCR0, SMCFCFG, v)
+#define SET_CR0_MTCFG(b, v) SET_GLOBAL_FIELD(b, CR0, MTCFG, v)
+#define SET_CR0_BSU(b, v) SET_GLOBAL_FIELD(b, CR0, BSU, v)
+#define SET_CR0_FB(b, v) SET_GLOBAL_FIELD(b, CR0, FB, v)
+#define SET_CR0_PTM(b, v) SET_GLOBAL_FIELD(b, CR0, PTM, v)
+#define SET_CR0_VMIDPNE(b, v) SET_GLOBAL_FIELD(b, CR0, VMIDPNE, v)
+#define SET_CR0_USFCFG(b, v) SET_GLOBAL_FIELD(b, CR0, USFCFG, v)
+#define SET_NSCR0_USFCFG(b, v) SET_GLOBAL_FIELD(b, NSCR0, USFCFG, v)
+#define SET_CR0_GSE(b, v) SET_GLOBAL_FIELD(b, CR0, GSE, v)
+#define SET_CR0_STALLD(b, v) SET_GLOBAL_FIELD(b, CR0, STALLD, v)
+#define SET_NSCR0_STALLD(b, v) SET_GLOBAL_FIELD(b, NSCR0, STALLD, v)
+#define SET_CR0_TRANSIENTCFG(b, v) SET_GLOBAL_FIELD(b, CR0, TRANSIENTCFG, v)
+#define SET_CR0_GCFGFIE(b, v) SET_GLOBAL_FIELD(b, CR0, GCFGFIE, v)
+#define SET_NSCR0_GCFGFIE(b, v) SET_GLOBAL_FIELD(b, NSCR0, GCFGFIE, v)
+#define SET_CR0_GCFGFRE(b, v) SET_GLOBAL_FIELD(b, CR0, GCFGFRE, v)
+#define SET_NSCR0_GCFGFRE(b, v) SET_GLOBAL_FIELD(b, NSCR0, GCFGFRE, v)
+#define SET_CR0_GFIE(b, v) SET_GLOBAL_FIELD(b, CR0, GFIE, v)
+#define SET_NSCR0_GFIE(b, v) SET_GLOBAL_FIELD(b, NSCR0, GFIE, v)
+#define SET_CR0_GFRE(b, v) SET_GLOBAL_FIELD(b, CR0, GFRE, v)
+#define SET_NSCR0_GFRE(b, v) SET_GLOBAL_FIELD(b, NSCR0, GFRE, v)
+#define SET_CR0_CLIENTPD(b, v) SET_GLOBAL_FIELD(b, CR0, CLIENTPD, v)
+#define SET_NSCR0_CLIENTPD(b, v) SET_GLOBAL_FIELD(b, NSCR0, CLIENTPD, v)
+
+#define SET_ACR_SMTNMC_BPTLBEN(b, v)\
+ SET_GLOBAL_FIELD(b, ACR, SMTNMC_BPTLBEN, v)
+#define SET_ACR_MMUDIS_BPTLBEN(b, v)\
+ SET_GLOBAL_FIELD(b, ACR, MMUDIS_BPTLBEN, v)
+#define SET_ACR_S2CR_BPTLBEN(b, v)\
+ SET_GLOBAL_FIELD(b, ACR, S2CR_BPTLBEN, v)
+
+#define SET_NSACR_SMTNMC_BPTLBEN(b, v)\
+ SET_GLOBAL_FIELD(b, NSACR, SMTNMC_BPTLBEN, v)
+#define SET_NSACR_MMUDIS_BPTLBEN(b, v)\
+ SET_GLOBAL_FIELD(b, NSACR, MMUDIS_BPTLBEN, v)
+#define SET_NSACR_S2CR_BPTLBEN(b, v)\
+ SET_GLOBAL_FIELD(b, NSACR, S2CR_BPTLBEN, v)
+
+#define GET_CR0_NSCFG(b) GET_GLOBAL_FIELD(b, CR0, NSCFG)
+#define GET_CR0_WACFG(b) GET_GLOBAL_FIELD(b, CR0, WACFG)
+#define GET_CR0_RACFG(b) GET_GLOBAL_FIELD(b, CR0, RACFG)
+#define GET_CR0_SHCFG(b) GET_GLOBAL_FIELD(b, CR0, SHCFG)
+#define GET_CR0_SMCFCFG(b) GET_GLOBAL_FIELD(b, CR0, SMCFCFG)
+#define GET_CR0_MTCFG(b) GET_GLOBAL_FIELD(b, CR0, MTCFG)
+#define GET_CR0_BSU(b) GET_GLOBAL_FIELD(b, CR0, BSU)
+#define GET_CR0_FB(b) GET_GLOBAL_FIELD(b, CR0, FB)
+#define GET_CR0_PTM(b) GET_GLOBAL_FIELD(b, CR0, PTM)
+#define GET_CR0_VMIDPNE(b) GET_GLOBAL_FIELD(b, CR0, VMIDPNE)
+#define GET_CR0_USFCFG(b) GET_GLOBAL_FIELD(b, CR0, USFCFG)
+#define GET_CR0_GSE(b) GET_GLOBAL_FIELD(b, CR0, GSE)
+#define GET_CR0_STALLD(b) GET_GLOBAL_FIELD(b, CR0, STALLD)
+#define GET_CR0_TRANSIENTCFG(b) GET_GLOBAL_FIELD(b, CR0, TRANSIENTCFG)
+#define GET_CR0_GCFGFIE(b) GET_GLOBAL_FIELD(b, CR0, GCFGFIE)
+#define GET_CR0_GCFGFRE(b) GET_GLOBAL_FIELD(b, CR0, GCFGFRE)
+#define GET_CR0_GFIE(b) GET_GLOBAL_FIELD(b, CR0, GFIE)
+#define GET_CR0_GFRE(b) GET_GLOBAL_FIELD(b, CR0, GFRE)
+#define GET_CR0_CLIENTPD(b) GET_GLOBAL_FIELD(b, CR0, CLIENTPD)
+
+/* Configuration Register: CR2 */
+#define SET_CR2_BPVMID(b, v) SET_GLOBAL_FIELD(b, CR2, BPVMID, v)
+
+#define GET_CR2_BPVMID(b) GET_GLOBAL_FIELD(b, CR2, BPVMID)
+
+/* Global Address Translation, Stage 1, Privileged Read: GATS1PR */
+#define SET_GATS1PR_ADDR(b, v) SET_GLOBAL_FIELD(b, GATS1PR, ADDR, v)
+#define SET_GATS1PR_NDX(b, v) SET_GLOBAL_FIELD(b, GATS1PR, NDX, v)
+
+#define GET_GATS1PR_ADDR(b) GET_GLOBAL_FIELD(b, GATS1PR, ADDR)
+#define GET_GATS1PR_NDX(b) GET_GLOBAL_FIELD(b, GATS1PR, NDX)
+
+/* Global Address Translation, Stage 1, Privileged Write: GATS1PW */
+#define SET_GATS1PW_ADDR(b, v) SET_GLOBAL_FIELD(b, GATS1PW, ADDR, v)
+#define SET_GATS1PW_NDX(b, v) SET_GLOBAL_FIELD(b, GATS1PW, NDX, v)
+
+#define GET_GATS1PW_ADDR(b) GET_GLOBAL_FIELD(b, GATS1PW, ADDR)
+#define GET_GATS1PW_NDX(b) GET_GLOBAL_FIELD(b, GATS1PW, NDX)
+
+/* Global Address Translation, Stage 1, User Read: GATS1UR */
+#define SET_GATS1UR_ADDR(b, v) SET_GLOBAL_FIELD(b, GATS1UR, ADDR, v)
+#define SET_GATS1UR_NDX(b, v) SET_GLOBAL_FIELD(b, GATS1UR, NDX, v)
+
+#define GET_GATS1UR_ADDR(b) GET_GLOBAL_FIELD(b, GATS1UR, ADDR)
+#define GET_GATS1UR_NDX(b) GET_GLOBAL_FIELD(b, GATS1UR, NDX)
+
+/* Global Address Translation, Stage 1, User Read: GATS1UW */
+#define SET_GATS1UW_ADDR(b, v) SET_GLOBAL_FIELD(b, GATS1UW, ADDR, v)
+#define SET_GATS1UW_NDX(b, v) SET_GLOBAL_FIELD(b, GATS1UW, NDX, v)
+
+#define GET_GATS1UW_ADDR(b) GET_GLOBAL_FIELD(b, GATS1UW, ADDR)
+#define GET_GATS1UW_NDX(b) GET_GLOBAL_FIELD(b, GATS1UW, NDX)
+
+/* Global Address Translation, Stage 1 and 2, Privileged Read: GATS12PR */
+#define SET_GATS12PR_ADDR(b, v) SET_GLOBAL_FIELD(b, GATS12PR, ADDR, v)
+#define SET_GATS12PR_NDX(b, v) SET_GLOBAL_FIELD(b, GATS12PR, NDX, v)
+
+#define GET_GATS12PR_ADDR(b) GET_GLOBAL_FIELD(b, GATS12PR, ADDR)
+#define GET_GATS12PR_NDX(b) GET_GLOBAL_FIELD(b, GATS12PR, NDX)
+
+/* Global Address Translation, Stage 1, Privileged Write: GATS1PW */
+#define SET_GATS12PW_ADDR(b, v) SET_GLOBAL_FIELD(b, GATS12PW, ADDR, v)
+#define SET_GATS12PW_NDX(b, v) SET_GLOBAL_FIELD(b, GATS12PW, NDX, v)
+
+#define GET_GATS12PW_ADDR(b) GET_GLOBAL_FIELD(b, GATS12PW, ADDR)
+#define GET_GATS12PW_NDX(b) GET_GLOBAL_FIELD(b, GATS12PW, NDX)
+
+/* Global Address Translation, Stage 1, User Read: GATS1UR */
+#define SET_GATS12UR_ADDR(b, v) SET_GLOBAL_FIELD(b, GATS12UR, ADDR, v)
+#define SET_GATS12UR_NDX(b, v) SET_GLOBAL_FIELD(b, GATS12UR, NDX, v)
+
+#define GET_GATS12UR_ADDR(b) GET_GLOBAL_FIELD(b, GATS12UR, ADDR)
+#define GET_GATS12UR_NDX(b) GET_GLOBAL_FIELD(b, GATS12UR, NDX)
+
+/* Global Address Translation, Stage 1, User Read: GATS1UW */
+#define SET_GATS12UW_ADDR(b, v) SET_GLOBAL_FIELD(b, GATS12UW, ADDR, v)
+#define SET_GATS12UW_NDX(b, v) SET_GLOBAL_FIELD(b, GATS12UW, NDX, v)
+
+#define GET_GATS12UW_ADDR(b) GET_GLOBAL_FIELD(b, GATS12UW, ADDR)
+#define GET_GATS12UW_NDX(b) GET_GLOBAL_FIELD(b, GATS12UW, NDX)
+
+/* Global Address Translation Status Register: GATSR */
+#define SET_GATSR_ACTIVE(b, v) SET_GLOBAL_FIELD(b, GATSR, ACTIVE, v)
+
+#define GET_GATSR_ACTIVE(b) GET_GLOBAL_FIELD(b, GATSR, ACTIVE)
+
+/* Global Fault Address Register: GFAR */
+#define SET_GFAR_FADDR(b, v) SET_GLOBAL_FIELD(b, GFAR, FADDR, v)
+
+#define GET_GFAR_FADDR(b) GET_GLOBAL_FIELD(b, GFAR, FADDR)
+
+/* Global Fault Status Register: GFSR */
+#define SET_GFSR_ICF(b, v) SET_GLOBAL_FIELD(b, GFSR, ICF, v)
+#define SET_GFSR_USF(b, v) SET_GLOBAL_FIELD(b, GFSR, USF, v)
+#define SET_GFSR_SMCF(b, v) SET_GLOBAL_FIELD(b, GFSR, SMCF, v)
+#define SET_GFSR_UCBF(b, v) SET_GLOBAL_FIELD(b, GFSR, UCBF, v)
+#define SET_GFSR_UCIF(b, v) SET_GLOBAL_FIELD(b, GFSR, UCIF, v)
+#define SET_GFSR_CAF(b, v) SET_GLOBAL_FIELD(b, GFSR, CAF, v)
+#define SET_GFSR_EF(b, v) SET_GLOBAL_FIELD(b, GFSR, EF, v)
+#define SET_GFSR_PF(b, v) SET_GLOBAL_FIELD(b, GFSR, PF, v)
+#define SET_GFSR_MULTI(b, v) SET_GLOBAL_FIELD(b, GFSR, MULTI, v)
+
+#define GET_GFSR_ICF(b) GET_GLOBAL_FIELD(b, GFSR, ICF)
+#define GET_GFSR_USF(b) GET_GLOBAL_FIELD(b, GFSR, USF)
+#define GET_GFSR_SMCF(b) GET_GLOBAL_FIELD(b, GFSR, SMCF)
+#define GET_GFSR_UCBF(b) GET_GLOBAL_FIELD(b, GFSR, UCBF)
+#define GET_GFSR_UCIF(b) GET_GLOBAL_FIELD(b, GFSR, UCIF)
+#define GET_GFSR_CAF(b) GET_GLOBAL_FIELD(b, GFSR, CAF)
+#define GET_GFSR_EF(b) GET_GLOBAL_FIELD(b, GFSR, EF)
+#define GET_GFSR_PF(b) GET_GLOBAL_FIELD(b, GFSR, PF)
+#define GET_GFSR_MULTI(b) GET_GLOBAL_FIELD(b, GFSR, MULTI)
+
+/* Global Fault Syndrome Register 0: GFSYNR0 */
+#define SET_GFSYNR0_NESTED(b, v) SET_GLOBAL_FIELD(b, GFSYNR0, NESTED, v)
+#define SET_GFSYNR0_WNR(b, v) SET_GLOBAL_FIELD(b, GFSYNR0, WNR, v)
+#define SET_GFSYNR0_PNU(b, v) SET_GLOBAL_FIELD(b, GFSYNR0, PNU, v)
+#define SET_GFSYNR0_IND(b, v) SET_GLOBAL_FIELD(b, GFSYNR0, IND, v)
+#define SET_GFSYNR0_NSSTATE(b, v) SET_GLOBAL_FIELD(b, GFSYNR0, NSSTATE, v)
+#define SET_GFSYNR0_NSATTR(b, v) SET_GLOBAL_FIELD(b, GFSYNR0, NSATTR, v)
+
+#define GET_GFSYNR0_NESTED(b) GET_GLOBAL_FIELD(b, GFSYNR0, NESTED)
+#define GET_GFSYNR0_WNR(b) GET_GLOBAL_FIELD(b, GFSYNR0, WNR)
+#define GET_GFSYNR0_PNU(b) GET_GLOBAL_FIELD(b, GFSYNR0, PNU)
+#define GET_GFSYNR0_IND(b) GET_GLOBAL_FIELD(b, GFSYNR0, IND)
+#define GET_GFSYNR0_NSSTATE(b) GET_GLOBAL_FIELD(b, GFSYNR0, NSSTATE)
+#define GET_GFSYNR0_NSATTR(b) GET_GLOBAL_FIELD(b, GFSYNR0, NSATTR)
+
+/* Global Fault Syndrome Register 1: GFSYNR1 */
+#define SET_GFSYNR1_SID(b, v) SET_GLOBAL_FIELD(b, GFSYNR1, SID, v)
+
+#define GET_GFSYNR1_SID(b) GET_GLOBAL_FIELD(b, GFSYNR1, SID)
+
+/* Global Physical Address Register: GPAR */
+#define SET_GPAR_F(b, v) SET_GLOBAL_FIELD(b, GPAR, F, v)
+#define SET_GPAR_SS(b, v) SET_GLOBAL_FIELD(b, GPAR, SS, v)
+#define SET_GPAR_OUTER(b, v) SET_GLOBAL_FIELD(b, GPAR, OUTER, v)
+#define SET_GPAR_INNER(b, v) SET_GLOBAL_FIELD(b, GPAR, INNER, v)
+#define SET_GPAR_SH(b, v) SET_GLOBAL_FIELD(b, GPAR, SH, v)
+#define SET_GPAR_NS(b, v) SET_GLOBAL_FIELD(b, GPAR, NS, v)
+#define SET_GPAR_NOS(b, v) SET_GLOBAL_FIELD(b, GPAR, NOS, v)
+#define SET_GPAR_PA(b, v) SET_GLOBAL_FIELD(b, GPAR, PA, v)
+#define SET_GPAR_TF(b, v) SET_GLOBAL_FIELD(b, GPAR, TF, v)
+#define SET_GPAR_AFF(b, v) SET_GLOBAL_FIELD(b, GPAR, AFF, v)
+#define SET_GPAR_PF(b, v) SET_GLOBAL_FIELD(b, GPAR, PF, v)
+#define SET_GPAR_EF(b, v) SET_GLOBAL_FIELD(b, GPAR, EF, v)
+#define SET_GPAR_TLCMCF(b, v) SET_GLOBAL_FIELD(b, GPAR, TLCMCF, v)
+#define SET_GPAR_TLBLKF(b, v) SET_GLOBAL_FIELD(b, GPAR, TLBLKF, v)
+#define SET_GPAR_UCBF(b, v) SET_GLOBAL_FIELD(b, GPAR, UCBF, v)
+
+#define GET_GPAR_F(b) GET_GLOBAL_FIELD(b, GPAR, F)
+#define GET_GPAR_SS(b) GET_GLOBAL_FIELD(b, GPAR, SS)
+#define GET_GPAR_OUTER(b) GET_GLOBAL_FIELD(b, GPAR, OUTER)
+#define GET_GPAR_INNER(b) GET_GLOBAL_FIELD(b, GPAR, INNER)
+#define GET_GPAR_SH(b) GET_GLOBAL_FIELD(b, GPAR, SH)
+#define GET_GPAR_NS(b) GET_GLOBAL_FIELD(b, GPAR, NS)
+#define GET_GPAR_NOS(b) GET_GLOBAL_FIELD(b, GPAR, NOS)
+#define GET_GPAR_PA(b) GET_GLOBAL_FIELD(b, GPAR, PA)
+#define GET_GPAR_TF(b) GET_GLOBAL_FIELD(b, GPAR, TF)
+#define GET_GPAR_AFF(b) GET_GLOBAL_FIELD(b, GPAR, AFF)
+#define GET_GPAR_PF(b) GET_GLOBAL_FIELD(b, GPAR, PF)
+#define GET_GPAR_EF(b) GET_GLOBAL_FIELD(b, GPAR, EF)
+#define GET_GPAR_TLCMCF(b) GET_GLOBAL_FIELD(b, GPAR, TLCMCF)
+#define GET_GPAR_TLBLKF(b) GET_GLOBAL_FIELD(b, GPAR, TLBLKF)
+#define GET_GPAR_UCBF(b) GET_GLOBAL_FIELD(b, GPAR, UCBF)
+
+/* Identification Register: IDR0 */
+#define SET_IDR0_NUMSMRG(b, v) SET_GLOBAL_FIELD(b, IDR0, NUMSMRG, v)
+#define SET_IDR0_NUMSIDB(b, v) SET_GLOBAL_FIELD(b, IDR0, NUMSIDB, v)
+#define SET_IDR0_BTM(b, v) SET_GLOBAL_FIELD(b, IDR0, BTM, v)
+#define SET_IDR0_CTTW(b, v) SET_GLOBAL_FIELD(b, IDR0, CTTW, v)
+#define SET_IDR0_NUMIRPT(b, v) SET_GLOBAL_FIELD(b, IDR0, NUMIRPT, v)
+#define SET_IDR0_PTFS(b, v) SET_GLOBAL_FIELD(b, IDR0, PTFS, v)
+#define SET_IDR0_SMS(b, v) SET_GLOBAL_FIELD(b, IDR0, SMS, v)
+#define SET_IDR0_NTS(b, v) SET_GLOBAL_FIELD(b, IDR0, NTS, v)
+#define SET_IDR0_S2TS(b, v) SET_GLOBAL_FIELD(b, IDR0, S2TS, v)
+#define SET_IDR0_S1TS(b, v) SET_GLOBAL_FIELD(b, IDR0, S1TS, v)
+#define SET_IDR0_SES(b, v) SET_GLOBAL_FIELD(b, IDR0, SES, v)
+
+#define GET_IDR0_NUMSMRG(b) GET_GLOBAL_FIELD(b, IDR0, NUMSMRG)
+#define GET_IDR0_NUMSIDB(b) GET_GLOBAL_FIELD(b, IDR0, NUMSIDB)
+#define GET_IDR0_BTM(b) GET_GLOBAL_FIELD(b, IDR0, BTM)
+#define GET_IDR0_CTTW(b) GET_GLOBAL_FIELD(b, IDR0, CTTW)
+#define GET_IDR0_NUMIRPT(b) GET_GLOBAL_FIELD(b, IDR0, NUMIRPT)
+#define GET_IDR0_PTFS(b) GET_GLOBAL_FIELD(b, IDR0, PTFS)
+#define GET_IDR0_SMS(b) GET_GLOBAL_FIELD(b, IDR0, SMS)
+#define GET_IDR0_NTS(b) GET_GLOBAL_FIELD(b, IDR0, NTS)
+#define GET_IDR0_S2TS(b) GET_GLOBAL_FIELD(b, IDR0, S2TS)
+#define GET_IDR0_S1TS(b) GET_GLOBAL_FIELD(b, IDR0, S1TS)
+#define GET_IDR0_SES(b) GET_GLOBAL_FIELD(b, IDR0, SES)
+
+/* Identification Register: IDR1 */
+#define SET_IDR1_NUMCB(b, v) SET_GLOBAL_FIELD(b, IDR1, NUMCB, v)
+#define SET_IDR1_NUMSSDNDXB(b, v) SET_GLOBAL_FIELD(b, IDR1, NUMSSDNDXB, v)
+#define SET_IDR1_SSDTP(b, v) SET_GLOBAL_FIELD(b, IDR1, SSDTP, v)
+#define SET_IDR1_SMCD(b, v) SET_GLOBAL_FIELD(b, IDR1, SMCD, v)
+#define SET_IDR1_NUMS2CB(b, v) SET_GLOBAL_FIELD(b, IDR1, NUMS2CB, v)
+#define SET_IDR1_NUMPAGENDXB(b, v) SET_GLOBAL_FIELD(b, IDR1, NUMPAGENDXB, v)
+#define SET_IDR1_PAGESIZE(b, v) SET_GLOBAL_FIELD(b, IDR1, PAGESIZE, v)
+
+#define GET_IDR1_NUMCB(b) GET_GLOBAL_FIELD(b, IDR1, NUMCB)
+#define GET_IDR1_NUMSSDNDXB(b) GET_GLOBAL_FIELD(b, IDR1, NUMSSDNDXB)
+#define GET_IDR1_SSDTP(b) GET_GLOBAL_FIELD(b, IDR1, SSDTP)
+#define GET_IDR1_SMCD(b) GET_GLOBAL_FIELD(b, IDR1, SMCD)
+#define GET_IDR1_NUMS2CB(b) GET_GLOBAL_FIELD(b, IDR1, NUMS2CB)
+#define GET_IDR1_NUMPAGENDXB(b) GET_GLOBAL_FIELD(b, IDR1, NUMPAGENDXB)
+#define GET_IDR1_PAGESIZE(b) GET_GLOBAL_FIELD(b, IDR1, PAGESIZE)
+
+/* Identification Register: IDR2 */
+#define SET_IDR2_IAS(b, v) SET_GLOBAL_FIELD(b, IDR2, IAS, v)
+#define SET_IDR2_OAS(b, v) SET_GLOBAL_FIELD(b, IDR2, OAS, v)
+
+#define GET_IDR2_IAS(b) GET_GLOBAL_FIELD(b, IDR2, IAS)
+#define GET_IDR2_OAS(b) GET_GLOBAL_FIELD(b, IDR2, OAS)
+
+/* Identification Register: IDR7 */
+#define SET_IDR7_MINOR(b, v) SET_GLOBAL_FIELD(b, IDR7, MINOR, v)
+#define SET_IDR7_MAJOR(b, v) SET_GLOBAL_FIELD(b, IDR7, MAJOR, v)
+
+#define GET_IDR7_MINOR(b) GET_GLOBAL_FIELD(b, IDR7, MINOR)
+#define GET_IDR7_MAJOR(b) GET_GLOBAL_FIELD(b, IDR7, MAJOR)
+
+/* Stream to Context Register: S2CR_N */
+#define SET_S2CR_CBNDX(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, CBNDX, v)
+#define SET_S2CR_SHCFG(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, SHCFG, v)
+#define SET_S2CR_MTCFG(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, MTCFG, v)
+#define SET_S2CR_MEMATTR(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, MEMATTR, v)
+#define SET_S2CR_TYPE(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, TYPE, v)
+#define SET_S2CR_NSCFG(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, NSCFG, v)
+#define SET_S2CR_RACFG(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, RACFG, v)
+#define SET_S2CR_WACFG(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, WACFG, v)
+#define SET_S2CR_PRIVCFG(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, PRIVCFG, v)
+#define SET_S2CR_INSTCFG(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, INSTCFG, v)
+#define SET_S2CR_TRANSIENTCFG(b, n, v) \
+ SET_GLOBAL_FIELD_N(b, n, S2CR, TRANSIENTCFG, v)
+#define SET_S2CR_VMID(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, VMID, v)
+#define SET_S2CR_BSU(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, BSU, v)
+#define SET_S2CR_FB(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, FB, v)
+
+#define GET_S2CR_CBNDX(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, CBNDX)
+#define GET_S2CR_SHCFG(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, SHCFG)
+#define GET_S2CR_MTCFG(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, MTCFG)
+#define GET_S2CR_MEMATTR(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, MEMATTR)
+#define GET_S2CR_TYPE(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, TYPE)
+#define GET_S2CR_NSCFG(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, NSCFG)
+#define GET_S2CR_RACFG(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, RACFG)
+#define GET_S2CR_WACFG(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, WACFG)
+#define GET_S2CR_PRIVCFG(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, PRIVCFG)
+#define GET_S2CR_INSTCFG(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, INSTCFG)
+#define GET_S2CR_TRANSIENTCFG(b, n) \
+ GET_GLOBAL_FIELD_N(b, n, S2CR, TRANSIENTCFG)
+#define GET_S2CR_VMID(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, VMID)
+#define GET_S2CR_BSU(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, BSU)
+#define GET_S2CR_FB(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, FB)
+
+/* Stream Match Register: SMR_N */
+#define SET_SMR_ID(b, n, v) SET_GLOBAL_FIELD_N(b, n, SMR, ID, v)
+#define SET_SMR_MASK(b, n, v) SET_GLOBAL_FIELD_N(b, n, SMR, MASK, v)
+#define SET_SMR_VALID(b, n, v) SET_GLOBAL_FIELD_N(b, n, SMR, VALID, v)
+
+#define GET_SMR_ID(b, n) GET_GLOBAL_FIELD_N(b, n, SMR, ID)
+#define GET_SMR_MASK(b, n) GET_GLOBAL_FIELD_N(b, n, SMR, MASK)
+#define GET_SMR_VALID(b, n) GET_GLOBAL_FIELD_N(b, n, SMR, VALID)
+
+/* Global TLB Status: TLBGSTATUS */
+#define SET_TLBGSTATUS_GSACTIVE(b, v) \
+ SET_GLOBAL_FIELD(b, TLBGSTATUS, GSACTIVE, v)
+
+#define GET_TLBGSTATUS_GSACTIVE(b) \
+ GET_GLOBAL_FIELD(b, TLBGSTATUS, GSACTIVE)
+
+/* Invalidate Hyp TLB by VA: TLBIVAH */
+#define SET_TLBIVAH_ADDR(b, v) SET_GLOBAL_FIELD(b, TLBIVAH, ADDR, v)
+
+#define GET_TLBIVAH_ADDR(b) GET_GLOBAL_FIELD(b, TLBIVAH, ADDR)
+
+/* Invalidate TLB by VMID: TLBIVMID */
+#define SET_TLBIVMID_VMID(b, v) SET_GLOBAL_FIELD(b, TLBIVMID, VMID, v)
+
+#define GET_TLBIVMID_VMID(b) GET_GLOBAL_FIELD(b, TLBIVMID, VMID)
+
+/* Global Register Space 1 Field setters/getters*/
+/* Context Bank Attribute Register: CBAR_N */
+#define SET_CBAR_VMID(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBAR, VMID, v)
+#define SET_CBAR_CBNDX(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBAR, CBNDX, v)
+#define SET_CBAR_BPSHCFG(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBAR, BPSHCFG, v)
+#define SET_CBAR_HYPC(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBAR, HYPC, v)
+#define SET_CBAR_FB(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBAR, FB, v)
+#define SET_CBAR_MEMATTR(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBAR, MEMATTR, v)
+#define SET_CBAR_TYPE(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBAR, TYPE, v)
+#define SET_CBAR_BSU(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBAR, BSU, v)
+#define SET_CBAR_RACFG(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBAR, RACFG, v)
+#define SET_CBAR_WACFG(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBAR, WACFG, v)
+#define SET_CBAR_IRPTNDX(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBAR, IRPTNDX, v)
+
+#define GET_CBAR_VMID(b, n) GET_GLOBAL_FIELD_N(b, n, CBAR, VMID)
+#define GET_CBAR_CBNDX(b, n) GET_GLOBAL_FIELD_N(b, n, CBAR, CBNDX)
+#define GET_CBAR_BPSHCFG(b, n) GET_GLOBAL_FIELD_N(b, n, CBAR, BPSHCFG)
+#define GET_CBAR_HYPC(b, n) GET_GLOBAL_FIELD_N(b, n, CBAR, HYPC)
+#define GET_CBAR_FB(b, n) GET_GLOBAL_FIELD_N(b, n, CBAR, FB)
+#define GET_CBAR_MEMATTR(b, n) GET_GLOBAL_FIELD_N(b, n, CBAR, MEMATTR)
+#define GET_CBAR_TYPE(b, n) GET_GLOBAL_FIELD_N(b, n, CBAR, TYPE)
+#define GET_CBAR_BSU(b, n) GET_GLOBAL_FIELD_N(b, n, CBAR, BSU)
+#define GET_CBAR_RACFG(b, n) GET_GLOBAL_FIELD_N(b, n, CBAR, RACFG)
+#define GET_CBAR_WACFG(b, n) GET_GLOBAL_FIELD_N(b, n, CBAR, WACFG)
+#define GET_CBAR_IRPTNDX(b, n) GET_GLOBAL_FIELD_N(b, n, CBAR, IRPTNDX)
+
+/* Context Bank Fault Restricted Syndrome Register A: CBFRSYNRA_N */
+#define SET_CBFRSYNRA_SID(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBFRSYNRA, SID, v)
+
+#define GET_CBFRSYNRA_SID(b, n) GET_GLOBAL_FIELD_N(b, n, CBFRSYNRA, SID)
+
+/* Stage 1 Context Bank Format Fields */
+#define SET_CB_ACTLR_REQPRIORITY (b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_ACTLR, REQPRIORITY, v)
+#define SET_CB_ACTLR_REQPRIORITYCFG(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_ACTLR, REQPRIORITYCFG, v)
+#define SET_CB_ACTLR_PRIVCFG(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_ACTLR, PRIVCFG, v)
+#define SET_CB_ACTLR_BPRCOSH(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_ACTLR, BPRCOSH, v)
+#define SET_CB_ACTLR_BPRCISH(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_ACTLR, BPRCISH, v)
+#define SET_CB_ACTLR_BPRCNSH(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_ACTLR, BPRCNSH, v)
+
+#define GET_CB_ACTLR_REQPRIORITY (b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_ACTLR, REQPRIORITY)
+#define GET_CB_ACTLR_REQPRIORITYCFG(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_ACTLR, REQPRIORITYCFG)
+#define GET_CB_ACTLR_PRIVCFG(b, c) GET_CONTEXT_FIELD(b, c, CB_ACTLR, PRIVCFG)
+#define GET_CB_ACTLR_BPRCOSH(b, c) GET_CONTEXT_FIELD(b, c, CB_ACTLR, BPRCOSH)
+#define GET_CB_ACTLR_BPRCISH(b, c) GET_CONTEXT_FIELD(b, c, CB_ACTLR, BPRCISH)
+#define GET_CB_ACTLR_BPRCNSH(b, c) GET_CONTEXT_FIELD(b, c, CB_ACTLR, BPRCNSH)
+
+/* Address Translation, Stage 1, Privileged Read: CB_ATS1PR */
+#define SET_CB_ATS1PR_ADDR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_ATS1PR, ADDR, v)
+
+#define GET_CB_ATS1PR_ADDR(b, c) GET_CONTEXT_FIELD(b, c, CB_ATS1PR, ADDR)
+
+/* Address Translation, Stage 1, Privileged Write: CB_ATS1PW */
+#define SET_CB_ATS1PW_ADDR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_ATS1PW, ADDR, v)
+
+#define GET_CB_ATS1PW_ADDR(b, c) GET_CONTEXT_FIELD(b, c, CB_ATS1PW, ADDR)
+
+/* Address Translation, Stage 1, User Read: CB_ATS1UR */
+#define SET_CB_ATS1UR_ADDR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_ATS1UR, ADDR, v)
+
+#define GET_CB_ATS1UR_ADDR(b, c) GET_CONTEXT_FIELD(b, c, CB_ATS1UR, ADDR)
+
+/* Address Translation, Stage 1, User Write: CB_ATS1UW */
+#define SET_CB_ATS1UW_ADDR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_ATS1UW, ADDR, v)
+
+#define GET_CB_ATS1UW_ADDR(b, c) GET_CONTEXT_FIELD(b, c, CB_ATS1UW, ADDR)
+
+/* Address Translation Status Register: CB_ATSR */
+#define SET_CB_ATSR_ACTIVE(b, c, v) SET_CONTEXT_FIELD(b, c, CB_ATSR, ACTIVE, v)
+
+#define GET_CB_ATSR_ACTIVE(b, c) GET_CONTEXT_FIELD(b, c, CB_ATSR, ACTIVE)
+
+/* Context ID Register: CB_CONTEXTIDR */
+#define SET_CB_CONTEXTIDR_ASID(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_CONTEXTIDR, ASID, v)
+#define SET_CB_CONTEXTIDR_PROCID(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_CONTEXTIDR, PROCID, v)
+
+#define GET_CB_CONTEXTIDR_ASID(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_CONTEXTIDR, ASID)
+#define GET_CB_CONTEXTIDR_PROCID(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_CONTEXTIDR, PROCID)
+
+/* Fault Address Register: CB_FAR */
+#define SET_CB_FAR_FADDR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FAR, FADDR, v)
+
+#define GET_CB_FAR_FADDR(b, c) GET_CONTEXT_FIELD(b, c, CB_FAR, FADDR)
+
+/* Fault Status Register: CB_FSR */
+#define SET_CB_FSR_TF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSR, TF, v)
+#define SET_CB_FSR_AFF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSR, AFF, v)
+#define SET_CB_FSR_PF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSR, PF, v)
+#define SET_CB_FSR_EF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSR, EF, v)
+#define SET_CB_FSR_TLBMCF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSR, TLBMCF, v)
+#define SET_CB_FSR_TLBLKF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSR, TLBLKF, v)
+#define SET_CB_FSR_SS(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSR, SS, v)
+#define SET_CB_FSR_MULTI(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSR, MULTI, v)
+
+#define GET_CB_FSR_TF(b, c) GET_CONTEXT_FIELD(b, c, CB_FSR, TF)
+#define GET_CB_FSR_AFF(b, c) GET_CONTEXT_FIELD(b, c, CB_FSR, AFF)
+#define GET_CB_FSR_PF(b, c) GET_CONTEXT_FIELD(b, c, CB_FSR, PF)
+#define GET_CB_FSR_EF(b, c) GET_CONTEXT_FIELD(b, c, CB_FSR, EF)
+#define GET_CB_FSR_TLBMCF(b, c) GET_CONTEXT_FIELD(b, c, CB_FSR, TLBMCF)
+#define GET_CB_FSR_TLBLKF(b, c) GET_CONTEXT_FIELD(b, c, CB_FSR, TLBLKF)
+#define GET_CB_FSR_SS(b, c) GET_CONTEXT_FIELD(b, c, CB_FSR, SS)
+#define GET_CB_FSR_MULTI(b, c) GET_CONTEXT_FIELD(b, c, CB_FSR, MULTI)
+
+/* Fault Syndrome Register 0: CB_FSYNR0 */
+#define SET_CB_FSYNR0_PLVL(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSYNR0, PLVL, v)
+#define SET_CB_FSYNR0_S1PTWF(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_FSYNR0, S1PTWF, v)
+#define SET_CB_FSYNR0_WNR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSYNR0, WNR, v)
+#define SET_CB_FSYNR0_PNU(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSYNR0, PNU, v)
+#define SET_CB_FSYNR0_IND(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSYNR0, IND, v)
+#define SET_CB_FSYNR0_NSSTATE(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_FSYNR0, NSSTATE, v)
+#define SET_CB_FSYNR0_NSATTR(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_FSYNR0, NSATTR, v)
+#define SET_CB_FSYNR0_ATOF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSYNR0, ATOF, v)
+#define SET_CB_FSYNR0_PTWF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSYNR0, PTWF, v)
+#define SET_CB_FSYNR0_AFR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSYNR0, AFR, v)
+#define SET_CB_FSYNR0_S1CBNDX(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_FSYNR0, S1CBNDX, v)
+
+#define GET_CB_FSYNR0_PLVL(b, c) GET_CONTEXT_FIELD(b, c, CB_FSYNR0, PLVL)
+#define GET_CB_FSYNR0_S1PTWF(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_FSYNR0, S1PTWF)
+#define GET_CB_FSYNR0_WNR(b, c) GET_CONTEXT_FIELD(b, c, CB_FSYNR0, WNR)
+#define GET_CB_FSYNR0_PNU(b, c) GET_CONTEXT_FIELD(b, c, CB_FSYNR0, PNU)
+#define GET_CB_FSYNR0_IND(b, c) GET_CONTEXT_FIELD(b, c, CB_FSYNR0, IND)
+#define GET_CB_FSYNR0_NSSTATE(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_FSYNR0, NSSTATE)
+#define GET_CB_FSYNR0_NSATTR(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_FSYNR0, NSATTR)
+#define GET_CB_FSYNR0_ATOF(b, c) GET_CONTEXT_FIELD(b, c, CB_FSYNR0, ATOF)
+#define GET_CB_FSYNR0_PTWF(b, c) GET_CONTEXT_FIELD(b, c, CB_FSYNR0, PTWF)
+#define GET_CB_FSYNR0_AFR(b, c) GET_CONTEXT_FIELD(b, c, CB_FSYNR0, AFR)
+#define GET_CB_FSYNR0_S1CBNDX(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_FSYNR0, S1CBNDX)
+
+/* Normal Memory Remap Register: CB_NMRR */
+#define SET_CB_NMRR_IR0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, IR0, v)
+#define SET_CB_NMRR_IR1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, IR1, v)
+#define SET_CB_NMRR_IR2(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, IR2, v)
+#define SET_CB_NMRR_IR3(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, IR3, v)
+#define SET_CB_NMRR_IR4(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, IR4, v)
+#define SET_CB_NMRR_IR5(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, IR5, v)
+#define SET_CB_NMRR_IR6(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, IR6, v)
+#define SET_CB_NMRR_IR7(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, IR7, v)
+#define SET_CB_NMRR_OR0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, OR0, v)
+#define SET_CB_NMRR_OR1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, OR1, v)
+#define SET_CB_NMRR_OR2(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, OR2, v)
+#define SET_CB_NMRR_OR3(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, OR3, v)
+#define SET_CB_NMRR_OR4(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, OR4, v)
+#define SET_CB_NMRR_OR5(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, OR5, v)
+#define SET_CB_NMRR_OR6(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, OR6, v)
+#define SET_CB_NMRR_OR7(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, OR7, v)
+
+#define GET_CB_NMRR_IR0(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, IR0)
+#define GET_CB_NMRR_IR1(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, IR1)
+#define GET_CB_NMRR_IR2(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, IR2)
+#define GET_CB_NMRR_IR3(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, IR3)
+#define GET_CB_NMRR_IR4(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, IR4)
+#define GET_CB_NMRR_IR5(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, IR5)
+#define GET_CB_NMRR_IR6(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, IR6)
+#define GET_CB_NMRR_IR7(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, IR7)
+#define GET_CB_NMRR_OR0(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, OR0)
+#define GET_CB_NMRR_OR1(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, OR1)
+#define GET_CB_NMRR_OR2(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, OR2)
+#define GET_CB_NMRR_OR3(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, OR3)
+#define GET_CB_NMRR_OR4(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, OR4)
+#define GET_CB_NMRR_OR5(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, OR5)
+
+/* Physical Address Register: CB_PAR */
+#define SET_CB_PAR_F(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, F, v)
+#define SET_CB_PAR_SS(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, SS, v)
+#define SET_CB_PAR_OUTER(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, OUTER, v)
+#define SET_CB_PAR_INNER(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, INNER, v)
+#define SET_CB_PAR_SH(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, SH, v)
+#define SET_CB_PAR_NS(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, NS, v)
+#define SET_CB_PAR_NOS(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, NOS, v)
+#define SET_CB_PAR_PA(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, PA, v)
+#define SET_CB_PAR_TF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, TF, v)
+#define SET_CB_PAR_AFF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, AFF, v)
+#define SET_CB_PAR_PF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, PF, v)
+#define SET_CB_PAR_TLBMCF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, TLBMCF, v)
+#define SET_CB_PAR_TLBLKF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, TLBLKF, v)
+#define SET_CB_PAR_ATOT(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, ATOT, v)
+#define SET_CB_PAR_PLVL(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, PLVL, v)
+#define SET_CB_PAR_STAGE(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, STAGE, v)
+
+#define GET_CB_PAR_F(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, F)
+#define GET_CB_PAR_SS(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, SS)
+#define GET_CB_PAR_OUTER(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, OUTER)
+#define GET_CB_PAR_INNER(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, INNER)
+#define GET_CB_PAR_SH(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, SH)
+#define GET_CB_PAR_NS(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, NS)
+#define GET_CB_PAR_NOS(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, NOS)
+#define GET_CB_PAR_PA(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, PA)
+#define GET_CB_PAR_TF(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, TF)
+#define GET_CB_PAR_AFF(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, AFF)
+#define GET_CB_PAR_PF(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, PF)
+#define GET_CB_PAR_TLBMCF(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, TLBMCF)
+#define GET_CB_PAR_TLBLKF(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, TLBLKF)
+#define GET_CB_PAR_ATOT(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, ATOT)
+#define GET_CB_PAR_PLVL(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, PLVL)
+#define GET_CB_PAR_STAGE(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, STAGE)
+
+/* Primary Region Remap Register: CB_PRRR */
+#define SET_CB_PRRR_TR0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, TR0, v)
+#define SET_CB_PRRR_TR1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, TR1, v)
+#define SET_CB_PRRR_TR2(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, TR2, v)
+#define SET_CB_PRRR_TR3(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, TR3, v)
+#define SET_CB_PRRR_TR4(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, TR4, v)
+#define SET_CB_PRRR_TR5(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, TR5, v)
+#define SET_CB_PRRR_TR6(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, TR6, v)
+#define SET_CB_PRRR_TR7(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, TR7, v)
+#define SET_CB_PRRR_DS0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, DS0, v)
+#define SET_CB_PRRR_DS1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, DS1, v)
+#define SET_CB_PRRR_NS0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, NS0, v)
+#define SET_CB_PRRR_NS1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, NS1, v)
+#define SET_CB_PRRR_NOS0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, NOS0, v)
+#define SET_CB_PRRR_NOS1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, NOS1, v)
+#define SET_CB_PRRR_NOS2(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, NOS2, v)
+#define SET_CB_PRRR_NOS3(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, NOS3, v)
+#define SET_CB_PRRR_NOS4(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, NOS4, v)
+#define SET_CB_PRRR_NOS5(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, NOS5, v)
+#define SET_CB_PRRR_NOS6(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, NOS6, v)
+#define SET_CB_PRRR_NOS7(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, NOS7, v)
+
+#define GET_CB_PRRR_TR0(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, TR0)
+#define GET_CB_PRRR_TR1(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, TR1)
+#define GET_CB_PRRR_TR2(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, TR2)
+#define GET_CB_PRRR_TR3(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, TR3)
+#define GET_CB_PRRR_TR4(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, TR4)
+#define GET_CB_PRRR_TR5(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, TR5)
+#define GET_CB_PRRR_TR6(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, TR6)
+#define GET_CB_PRRR_TR7(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, TR7)
+#define GET_CB_PRRR_DS0(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, DS0)
+#define GET_CB_PRRR_DS1(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, DS1)
+#define GET_CB_PRRR_NS0(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, NS0)
+#define GET_CB_PRRR_NS1(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, NS1)
+#define GET_CB_PRRR_NOS0(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, NOS0)
+#define GET_CB_PRRR_NOS1(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, NOS1)
+#define GET_CB_PRRR_NOS2(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, NOS2)
+#define GET_CB_PRRR_NOS3(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, NOS3)
+#define GET_CB_PRRR_NOS4(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, NOS4)
+#define GET_CB_PRRR_NOS5(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, NOS5)
+#define GET_CB_PRRR_NOS6(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, NOS6)
+#define GET_CB_PRRR_NOS7(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, NOS7)
+
+/* Transaction Resume: CB_RESUME */
+#define SET_CB_RESUME_TNR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_RESUME, TNR, v)
+
+#define GET_CB_RESUME_TNR(b, c) GET_CONTEXT_FIELD(b, c, CB_RESUME, TNR)
+
+/* System Control Register: CB_SCTLR */
+#define SET_CB_SCTLR_M(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, M, v)
+#define SET_CB_SCTLR_TRE(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, TRE, v)
+#define SET_CB_SCTLR_AFE(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, AFE, v)
+#define SET_CB_SCTLR_AFFD(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, AFFD, v)
+#define SET_CB_SCTLR_E(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, E, v)
+#define SET_CB_SCTLR_CFRE(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, CFRE, v)
+#define SET_CB_SCTLR_CFIE(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, CFIE, v)
+#define SET_CB_SCTLR_CFCFG(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, CFCFG, v)
+#define SET_CB_SCTLR_HUPCF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, HUPCF, v)
+#define SET_CB_SCTLR_WXN(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, WXN, v)
+#define SET_CB_SCTLR_UWXN(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, UWXN, v)
+#define SET_CB_SCTLR_ASIDPNE(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_SCTLR, ASIDPNE, v)
+#define SET_CB_SCTLR_TRANSIENTCFG(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_SCTLR, TRANSIENTCFG, v)
+#define SET_CB_SCTLR_MEMATTR(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_SCTLR, MEMATTR, v)
+#define SET_CB_SCTLR_MTCFG(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, MTCFG, v)
+#define SET_CB_SCTLR_SHCFG(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, SHCFG, v)
+#define SET_CB_SCTLR_RACFG(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, RACFG, v)
+#define SET_CB_SCTLR_WACFG(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, WACFG, v)
+#define SET_CB_SCTLR_NSCFG(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, NSCFG, v)
+
+#define GET_CB_SCTLR_M(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, M)
+#define GET_CB_SCTLR_TRE(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, TRE)
+#define GET_CB_SCTLR_AFE(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, AFE)
+#define GET_CB_SCTLR_AFFD(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, AFFD)
+#define GET_CB_SCTLR_E(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, E)
+#define GET_CB_SCTLR_CFRE(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, CFRE)
+#define GET_CB_SCTLR_CFIE(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, CFIE)
+#define GET_CB_SCTLR_CFCFG(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, CFCFG)
+#define GET_CB_SCTLR_HUPCF(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, HUPCF)
+#define GET_CB_SCTLR_WXN(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, WXN)
+#define GET_CB_SCTLR_UWXN(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, UWXN)
+#define GET_CB_SCTLR_ASIDPNE(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_SCTLR, ASIDPNE)
+#define GET_CB_SCTLR_TRANSIENTCFG(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_SCTLR, TRANSIENTCFG)
+#define GET_CB_SCTLR_MEMATTR(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_SCTLR, MEMATTR)
+#define GET_CB_SCTLR_MTCFG(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, MTCFG)
+#define GET_CB_SCTLR_SHCFG(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, SHCFG)
+#define GET_CB_SCTLR_RACFG(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, RACFG)
+#define GET_CB_SCTLR_WACFG(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, WACFG)
+#define GET_CB_SCTLR_NSCFG(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, NSCFG)
+
+/* Invalidate TLB by ASID: CB_TLBIASID */
+#define SET_CB_TLBIASID_ASID(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_TLBIASID, ASID, v)
+
+#define GET_CB_TLBIASID_ASID(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_TLBIASID, ASID)
+
+/* Invalidate TLB by VA: CB_TLBIVA */
+#define SET_CB_TLBIVA_ASID(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TLBIVA, ASID, v)
+#define SET_CB_TLBIVA_VA(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TLBIVA, VA, v)
+
+#define GET_CB_TLBIVA_ASID(b, c) GET_CONTEXT_FIELD(b, c, CB_TLBIVA, ASID)
+#define GET_CB_TLBIVA_VA(b, c) GET_CONTEXT_FIELD(b, c, CB_TLBIVA, VA)
+
+/* Invalidate TLB by VA, All ASID: CB_TLBIVAA */
+#define SET_CB_TLBIVAA_VA(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TLBIVAA, VA, v)
+
+#define GET_CB_TLBIVAA_VA(b, c) GET_CONTEXT_FIELD(b, c, CB_TLBIVAA, VA)
+
+/* Invalidate TLB by VA, All ASID, Last Level: CB_TLBIVAAL */
+#define SET_CB_TLBIVAAL_VA(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TLBIVAAL, VA, v)
+
+#define GET_CB_TLBIVAAL_VA(b, c) GET_CONTEXT_FIELD(b, c, CB_TLBIVAAL, VA)
+
+/* Invalidate TLB by VA, Last Level: CB_TLBIVAL */
+#define SET_CB_TLBIVAL_ASID(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_TLBIVAL, ASID, v)
+#define SET_CB_TLBIVAL_VA(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TLBIVAL, VA, v)
+
+#define GET_CB_TLBIVAL_ASID(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_TLBIVAL, ASID)
+#define GET_CB_TLBIVAL_VA(b, c) GET_CONTEXT_FIELD(b, c, CB_TLBIVAL, VA)
+
+/* TLB Status: CB_TLBSTATUS */
+#define SET_CB_TLBSTATUS_SACTIVE(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_TLBSTATUS, SACTIVE, v)
+
+#define GET_CB_TLBSTATUS_SACTIVE(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_TLBSTATUS, SACTIVE)
+
+/* Translation Table Base Control Register: CB_TTBCR */
+/* These are shared between VMSA and LPAE */
+#define GET_CB_TTBCR_EAE(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBCR, EAE)
+#define SET_CB_TTBCR_EAE(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, EAE, v)
+
+#define SET_CB_TTBCR_NSCFG0(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_TTBCR, NSCFG0, v)
+#define SET_CB_TTBCR_NSCFG1(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_TTBCR, NSCFG1, v)
+
+#define GET_CB_TTBCR_NSCFG0(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_TTBCR, NSCFG0)
+#define GET_CB_TTBCR_NSCFG1(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_TTBCR, NSCFG1)
+
+#ifdef CONFIG_IOMMU_LPAE
+
+/* LPAE format */
+
+/* Translation Table Base Register 0: CB_TTBR */
+#define SET_TTBR0(b, c, v) SET_CTX_REG_Q(CB_TTBR0, (b), (c), (v))
+#define SET_TTBR1(b, c, v) SET_CTX_REG_Q(CB_TTBR1, (b), (c), (v))
+
+#define SET_CB_TTBR0_ASID(b, c, v) SET_CONTEXT_FIELD_Q(b, c, CB_TTBR0, ASID, v)
+#define SET_CB_TTBR0_ADDR(b, c, v) SET_CONTEXT_FIELD_Q(b, c, CB_TTBR0, ADDR, v)
+
+#define GET_CB_TTBR0_ASID(b, c) GET_CONTEXT_FIELD_Q(b, c, CB_TTBR0, ASID)
+#define GET_CB_TTBR0_ADDR(b, c) GET_CONTEXT_FIELD_Q(b, c, CB_TTBR0, ADDR)
+#define GET_CB_TTBR0(b, c) GET_CTX_REG_Q(CB_TTBR0, (b), (c))
+
+/* Translation Table Base Control Register: CB_TTBCR */
+#define SET_CB_TTBCR_T0SZ(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, T0SZ, v)
+#define SET_CB_TTBCR_T1SZ(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, T1SZ, v)
+#define SET_CB_TTBCR_EPD0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, EPD0, v)
+#define SET_CB_TTBCR_EPD1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, EPD1, v)
+#define SET_CB_TTBCR_IRGN0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, IRGN0, v)
+#define SET_CB_TTBCR_IRGN1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, IRGN1, v)
+#define SET_CB_TTBCR_ORGN0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, ORGN0, v)
+#define SET_CB_TTBCR_ORGN1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, ORGN1, v)
+#define SET_CB_TTBCR_NSCFG0(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_TTBCR, NSCFG0, v)
+#define SET_CB_TTBCR_NSCFG1(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_TTBCR, NSCFG1, v)
+
+#define SET_CB_TTBCR_SH0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, SH0, v)
+#define SET_CB_TTBCR_SH1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, SH1, v)
+#define SET_CB_TTBCR_A1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, A1, v)
+
+#define GET_CB_TTBCR_T0SZ(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBCR, T0SZ)
+#define GET_CB_TTBCR_T1SZ(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBCR, T1SZ)
+#define GET_CB_TTBCR_EPD0(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBCR, EPD0)
+#define GET_CB_TTBCR_EPD1(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBCR, EPD1)
+#define GET_CB_TTBCR_IRGN0(b, c, v) GET_CONTEXT_FIELD(b, c, CB_TTBCR, IRGN0)
+#define GET_CB_TTBCR_IRGN1(b, c, v) GET_CONTEXT_FIELD(b, c, CB_TTBCR, IRGN1)
+#define GET_CB_TTBCR_ORGN0(b, c, v) GET_CONTEXT_FIELD(b, c, CB_TTBCR, ORGN0)
+#define GET_CB_TTBCR_ORGN1(b, c, v) GET_CONTEXT_FIELD(b, c, CB_TTBCR, ORGN1)
+
+#define SET_CB_MAIR0(b, c, v) SET_CTX_REG(CB_MAIR0, (b), (c), (v))
+#define SET_CB_MAIR1(b, c, v) SET_CTX_REG(CB_MAIR1, (b), (c), (v))
+
+#define GET_CB_MAIR0(b, c) GET_CTX_REG(CB_MAIR0, (b), (c))
+#define GET_CB_MAIR1(b, c) GET_CTX_REG(CB_MAIR1, (b), (c))
+#else
+#define SET_TTBR0(b, c, v) SET_CTX_REG(CB_TTBR0, (b), (c), (v))
+#define SET_TTBR1(b, c, v) SET_CTX_REG(CB_TTBR1, (b), (c), (v))
+
+#define SET_CB_TTBCR_PD0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, PD0, v)
+#define SET_CB_TTBCR_PD1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, PD1, v)
+
+#define SET_CB_TTBR0_IRGN1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR0, IRGN1, v)
+#define SET_CB_TTBR0_S(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR0, S, v)
+#define SET_CB_TTBR0_RGN(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR0, RGN, v)
+#define SET_CB_TTBR0_NOS(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR0, NOS, v)
+#define SET_CB_TTBR0_IRGN0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR0, IRGN0, v)
+#define SET_CB_TTBR0_ADDR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR0, ADDR, v)
+
+#define GET_CB_TTBR0_IRGN1(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR0, IRGN1)
+#define GET_CB_TTBR0_S(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR0, S)
+#define GET_CB_TTBR0_RGN(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR0, RGN)
+#define GET_CB_TTBR0_NOS(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR0, NOS)
+#define GET_CB_TTBR0_IRGN0(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR0, IRGN0)
+#define GET_CB_TTBR0_ADDR(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR0, ADDR)
+#endif
+
+/* Translation Table Base Register 1: CB_TTBR1 */
+#define SET_CB_TTBR1_IRGN1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR1, IRGN1, v)
+#define SET_CB_TTBR1_0S(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR1, S, v)
+#define SET_CB_TTBR1_RGN(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR1, RGN, v)
+#define SET_CB_TTBR1_NOS(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR1, NOS, v)
+#define SET_CB_TTBR1_IRGN0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR1, IRGN0, v)
+#define SET_CB_TTBR1_ADDR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR1, ADDR, v)
+
+#define GET_CB_TTBR1_IRGN1(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR1, IRGN1)
+#define GET_CB_TTBR1_0S(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR1, S)
+#define GET_CB_TTBR1_RGN(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR1, RGN)
+#define GET_CB_TTBR1_NOS(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR1, NOS)
+#define GET_CB_TTBR1_IRGN0(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR1, IRGN0)
+#define GET_CB_TTBR1_ADDR(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR1, ADDR)
+
+/* Global Register Space 0 */
+#define CR0 (0x0000)
+#define SCR1 (0x0004)
+#define CR2 (0x0008)
+#define ACR (0x0010)
+#define IDR0 (0x0020)
+#define IDR1 (0x0024)
+#define IDR2 (0x0028)
+#define IDR7 (0x003C)
+#define GFAR (0x0040)
+#define GFSR (0x0048)
+#define GFSRRESTORE (0x004C)
+#define GFSYNR0 (0x0050)
+#define GFSYNR1 (0x0054)
+#define GFSYNR2 (0x0058)
+#define TLBIVMID (0x0064)
+#define TLBIALLNSNH (0x0068)
+#define TLBIALLH (0x006C)
+#define TLBGSYNC (0x0070)
+#define TLBGSTATUS (0x0074)
+#define TLBIVAH (0x0078)
+#define GATS1UR (0x0100)
+#define GATS1UW (0x0108)
+#define GATS1PR (0x0110)
+#define GATS1PW (0x0118)
+#define GATS12UR (0x0120)
+#define GATS12UW (0x0128)
+#define GATS12PR (0x0130)
+#define GATS12PW (0x0138)
+#define GPAR (0x0180)
+#define GATSR (0x0188)
+#define NSCR0 (0x0400)
+#define NSCR2 (0x0408)
+#define NSACR (0x0410)
+#define NSGFAR (0x0440)
+#define NSGFSRRESTORE (0x044C)
+#define SMR (0x0800)
+#define S2CR (0x0C00)
+
+/* SMMU_LOCAL */
+#define SMMU_INTR_SEL_NS (0x2000)
+
+/* Global Register Space 1 */
+#define CBAR (0x1000)
+#define CBFRSYNRA (0x1400)
+
+/* Implementation defined Register Space */
+#define MICRO_MMU_CTRL (0x2000)
+#define PREDICTIONDIS0 (0x204C)
+#define PREDICTIONDIS1 (0x2050)
+#define S1L1BFBLP0 (0x215C)
+
+/* Performance Monitoring Register Space */
+#define PMEVCNTR_N (0x3000)
+#define PMEVTYPER_N (0x3400)
+#define PMCGCR_N (0x3800)
+#define PMCGSMR_N (0x3A00)
+#define PMCNTENSET_N (0x3C00)
+#define PMCNTENCLR_N (0x3C20)
+#define PMINTENSET_N (0x3C40)
+#define PMINTENCLR_N (0x3C60)
+#define PMOVSCLR_N (0x3C80)
+#define PMOVSSET_N (0x3CC0)
+#define PMCFGR (0x3E00)
+#define PMCR (0x3E04)
+#define PMCEID0 (0x3E20)
+#define PMCEID1 (0x3E24)
+#define PMAUTHSTATUS (0x3FB8)
+#define PMDEVTYPE (0x3FCC)
+
+/* Secure Status Determination Address Space */
+#define SSDR_N (0x4000)
+
+/* Stage 1 Context Bank Format */
+#define CB_SCTLR (0x000)
+#define CB_ACTLR (0x004)
+#define CB_RESUME (0x008)
+#define CB_TTBR0 (0x020)
+#define CB_TTBR1 (0x028)
+#define CB_TTBCR (0x030)
+#define CB_CONTEXTIDR (0x034)
+#define CB_PRRR (0x038)
+#define CB_MAIR0 (0x038)
+#define CB_NMRR (0x03C)
+#define CB_MAIR1 (0x03C)
+#define CB_PAR (0x050)
+#define CB_FSR (0x058)
+#define CB_FSRRESTORE (0x05C)
+#define CB_FAR (0x060)
+#define CB_FSYNR0 (0x068)
+#define CB_FSYNR1 (0x06C)
+#define CB_TLBIVA (0x600)
+#define CB_TLBIVAA (0x608)
+#define CB_TLBIASID (0x610)
+#define CB_TLBIALL (0x618)
+#define CB_TLBIVAL (0x620)
+#define CB_TLBIVAAL (0x628)
+#define CB_TLBSYNC (0x7F0)
+#define CB_TLBSTATUS (0x7F4)
+#define CB_ATS1PR (0x800)
+#define CB_ATS1PW (0x808)
+#define CB_ATS1UR (0x810)
+#define CB_ATS1UW (0x818)
+#define CB_ATSR (0x8F0)
+#define CB_PMXEVCNTR_N (0xE00)
+#define CB_PMXEVTYPER_N (0xE80)
+#define CB_PMCFGR (0xF00)
+#define CB_PMCR (0xF04)
+#define CB_PMCEID0 (0xF20)
+#define CB_PMCEID1 (0xF24)
+#define CB_PMCNTENSET (0xF40)
+#define CB_PMCNTENCLR (0xF44)
+#define CB_PMINTENSET (0xF48)
+#define CB_PMINTENCLR (0xF4C)
+#define CB_PMOVSCLR (0xF50)
+#define CB_PMOVSSET (0xF58)
+#define CB_PMAUTHSTATUS (0xFB8)
+
+/* Global Register Fields */
+/* Configuration Register: CR0 */
+#define CR0_NSCFG (CR0_NSCFG_MASK << CR0_NSCFG_SHIFT)
+#define CR0_WACFG (CR0_WACFG_MASK << CR0_WACFG_SHIFT)
+#define CR0_RACFG (CR0_RACFG_MASK << CR0_RACFG_SHIFT)
+#define CR0_SHCFG (CR0_SHCFG_MASK << CR0_SHCFG_SHIFT)
+#define CR0_SMCFCFG (CR0_SMCFCFG_MASK << CR0_SMCFCFG_SHIFT)
+#define CR0_MTCFG (CR0_MTCFG_MASK << CR0_MTCFG_SHIFT)
+#define CR0_MEMATTR (CR0_MEMATTR_MASK << CR0_MEMATTR_SHIFT)
+#define CR0_BSU (CR0_BSU_MASK << CR0_BSU_SHIFT)
+#define CR0_FB (CR0_FB_MASK << CR0_FB_SHIFT)
+#define CR0_PTM (CR0_PTM_MASK << CR0_PTM_SHIFT)
+#define CR0_VMIDPNE (CR0_VMIDPNE_MASK << CR0_VMIDPNE_SHIFT)
+#define CR0_USFCFG (CR0_USFCFG_MASK << CR0_USFCFG_SHIFT)
+#define CR0_GSE (CR0_GSE_MASK << CR0_GSE_SHIFT)
+#define CR0_STALLD (CR0_STALLD_MASK << CR0_STALLD_SHIFT)
+#define CR0_TRANSIENTCFG (CR0_TRANSIENTCFG_MASK << CR0_TRANSIENTCFG_SHIFT)
+#define CR0_GCFGFIE (CR0_GCFGFIE_MASK << CR0_GCFGFIE_SHIFT)
+#define CR0_GCFGFRE (CR0_GCFGFRE_MASK << CR0_GCFGFRE_SHIFT)
+#define CR0_GFIE (CR0_GFIE_MASK << CR0_GFIE_SHIFT)
+#define CR0_GFRE (CR0_GFRE_MASK << CR0_GFRE_SHIFT)
+#define CR0_CLIENTPD (CR0_CLIENTPD_MASK << CR0_CLIENTPD_SHIFT)
+
+/* Configuration Register: CR2 */
+#define CR2_BPVMID (CR2_BPVMID_MASK << CR2_BPVMID_SHIFT)
+
+/* Global Address Translation, Stage 1, Privileged Read: GATS1PR */
+#define GATS1PR_ADDR (GATS1PR_ADDR_MASK << GATS1PR_ADDR_SHIFT)
+#define GATS1PR_NDX (GATS1PR_NDX_MASK << GATS1PR_NDX_SHIFT)
+
+/* Global Address Translation, Stage 1, Privileged Write: GATS1PW */
+#define GATS1PW_ADDR (GATS1PW_ADDR_MASK << GATS1PW_ADDR_SHIFT)
+#define GATS1PW_NDX (GATS1PW_NDX_MASK << GATS1PW_NDX_SHIFT)
+
+/* Global Address Translation, Stage 1, User Read: GATS1UR */
+#define GATS1UR_ADDR (GATS1UR_ADDR_MASK << GATS1UR_ADDR_SHIFT)
+#define GATS1UR_NDX (GATS1UR_NDX_MASK << GATS1UR_NDX_SHIFT)
+
+/* Global Address Translation, Stage 1, User Write: GATS1UW */
+#define GATS1UW_ADDR (GATS1UW_ADDR_MASK << GATS1UW_ADDR_SHIFT)
+#define GATS1UW_NDX (GATS1UW_NDX_MASK << GATS1UW_NDX_SHIFT)
+
+/* Global Address Translation, Stage 1 and 2, Privileged Read: GATS1PR */
+#define GATS12PR_ADDR (GATS12PR_ADDR_MASK << GATS12PR_ADDR_SHIFT)
+#define GATS12PR_NDX (GATS12PR_NDX_MASK << GATS12PR_NDX_SHIFT)
+
+/* Global Address Translation, Stage 1 and 2, Privileged Write: GATS1PW */
+#define GATS12PW_ADDR (GATS12PW_ADDR_MASK << GATS12PW_ADDR_SHIFT)
+#define GATS12PW_NDX (GATS12PW_NDX_MASK << GATS12PW_NDX_SHIFT)
+
+/* Global Address Translation, Stage 1 and 2, User Read: GATS1UR */
+#define GATS12UR_ADDR (GATS12UR_ADDR_MASK << GATS12UR_ADDR_SHIFT)
+#define GATS12UR_NDX (GATS12UR_NDX_MASK << GATS12UR_NDX_SHIFT)
+
+/* Global Address Translation, Stage 1 and 2, User Write: GATS1UW */
+#define GATS12UW_ADDR (GATS12UW_ADDR_MASK << GATS12UW_ADDR_SHIFT)
+#define GATS12UW_NDX (GATS12UW_NDX_MASK << GATS12UW_NDX_SHIFT)
+
+/* Global Address Translation Status Register: GATSR */
+#define GATSR_ACTIVE (GATSR_ACTIVE_MASK << GATSR_ACTIVE_SHIFT)
+
+/* Global Fault Address Register: GFAR */
+#define GFAR_FADDR (GFAR_FADDR_MASK << GFAR_FADDR_SHIFT)
+
+/* Global Fault Status Register: GFSR */
+#define GFSR_ICF (GFSR_ICF_MASK << GFSR_ICF_SHIFT)
+#define GFSR_USF (GFSR_USF_MASK << GFSR_USF_SHIFT)
+#define GFSR_SMCF (GFSR_SMCF_MASK << GFSR_SMCF_SHIFT)
+#define GFSR_UCBF (GFSR_UCBF_MASK << GFSR_UCBF_SHIFT)
+#define GFSR_UCIF (GFSR_UCIF_MASK << GFSR_UCIF_SHIFT)
+#define GFSR_CAF (GFSR_CAF_MASK << GFSR_CAF_SHIFT)
+#define GFSR_EF (GFSR_EF_MASK << GFSR_EF_SHIFT)
+#define GFSR_PF (GFSR_PF_MASK << GFSR_PF_SHIFT)
+#define GFSR_MULTI (GFSR_MULTI_MASK << GFSR_MULTI_SHIFT)
+
+/* Global Fault Syndrome Register 0: GFSYNR0 */
+#define GFSYNR0_NESTED (GFSYNR0_NESTED_MASK << GFSYNR0_NESTED_SHIFT)
+#define GFSYNR0_WNR (GFSYNR0_WNR_MASK << GFSYNR0_WNR_SHIFT)
+#define GFSYNR0_PNU (GFSYNR0_PNU_MASK << GFSYNR0_PNU_SHIFT)
+#define GFSYNR0_IND (GFSYNR0_IND_MASK << GFSYNR0_IND_SHIFT)
+#define GFSYNR0_NSSTATE (GFSYNR0_NSSTATE_MASK << GFSYNR0_NSSTATE_SHIFT)
+#define GFSYNR0_NSATTR (GFSYNR0_NSATTR_MASK << GFSYNR0_NSATTR_SHIFT)
+
+/* Global Fault Syndrome Register 1: GFSYNR1 */
+#define GFSYNR1_SID (GFSYNR1_SID_MASK << GFSYNR1_SID_SHIFT)
+
+/* Global Physical Address Register: GPAR */
+#define GPAR_F (GPAR_F_MASK << GPAR_F_SHIFT)
+#define GPAR_SS (GPAR_SS_MASK << GPAR_SS_SHIFT)
+#define GPAR_OUTER (GPAR_OUTER_MASK << GPAR_OUTER_SHIFT)
+#define GPAR_INNER (GPAR_INNER_MASK << GPAR_INNER_SHIFT)
+#define GPAR_SH (GPAR_SH_MASK << GPAR_SH_SHIFT)
+#define GPAR_NS (GPAR_NS_MASK << GPAR_NS_SHIFT)
+#define GPAR_NOS (GPAR_NOS_MASK << GPAR_NOS_SHIFT)
+#define GPAR_PA (GPAR_PA_MASK << GPAR_PA_SHIFT)
+#define GPAR_TF (GPAR_TF_MASK << GPAR_TF_SHIFT)
+#define GPAR_AFF (GPAR_AFF_MASK << GPAR_AFF_SHIFT)
+#define GPAR_PF (GPAR_PF_MASK << GPAR_PF_SHIFT)
+#define GPAR_EF (GPAR_EF_MASK << GPAR_EF_SHIFT)
+#define GPAR_TLCMCF (GPAR_TLBMCF_MASK << GPAR_TLCMCF_SHIFT)
+#define GPAR_TLBLKF (GPAR_TLBLKF_MASK << GPAR_TLBLKF_SHIFT)
+#define GPAR_UCBF (GPAR_UCBF_MASK << GFAR_UCBF_SHIFT)
+
+/* Identification Register: IDR0 */
+#define IDR0_NUMSMRG (IDR0_NUMSMRG_MASK << IDR0_NUMSMGR_SHIFT)
+#define IDR0_NUMSIDB (IDR0_NUMSIDB_MASK << IDR0_NUMSIDB_SHIFT)
+#define IDR0_BTM (IDR0_BTM_MASK << IDR0_BTM_SHIFT)
+#define IDR0_CTTW (IDR0_CTTW_MASK << IDR0_CTTW_SHIFT)
+#define IDR0_NUMIRPT (IDR0_NUMIPRT_MASK << IDR0_NUMIRPT_SHIFT)
+#define IDR0_PTFS (IDR0_PTFS_MASK << IDR0_PTFS_SHIFT)
+#define IDR0_SMS (IDR0_SMS_MASK << IDR0_SMS_SHIFT)
+#define IDR0_NTS (IDR0_NTS_MASK << IDR0_NTS_SHIFT)
+#define IDR0_S2TS (IDR0_S2TS_MASK << IDR0_S2TS_SHIFT)
+#define IDR0_S1TS (IDR0_S1TS_MASK << IDR0_S1TS_SHIFT)
+#define IDR0_SES (IDR0_SES_MASK << IDR0_SES_SHIFT)
+
+/* Identification Register: IDR1 */
+#define IDR1_NUMCB (IDR1_NUMCB_MASK << IDR1_NUMCB_SHIFT)
+#define IDR1_NUMSSDNDXB (IDR1_NUMSSDNDXB_MASK << IDR1_NUMSSDNDXB_SHIFT)
+#define IDR1_SSDTP (IDR1_SSDTP_MASK << IDR1_SSDTP_SHIFT)
+#define IDR1_SMCD (IDR1_SMCD_MASK << IDR1_SMCD_SHIFT)
+#define IDR1_NUMS2CB (IDR1_NUMS2CB_MASK << IDR1_NUMS2CB_SHIFT)
+#define IDR1_NUMPAGENDXB (IDR1_NUMPAGENDXB_MASK << IDR1_NUMPAGENDXB_SHIFT)
+#define IDR1_PAGESIZE (IDR1_PAGESIZE_MASK << IDR1_PAGESIZE_SHIFT)
+
+/* Identification Register: IDR2 */
+#define IDR2_IAS (IDR2_IAS_MASK << IDR2_IAS_SHIFT)
+#define IDR1_OAS (IDR2_OAS_MASK << IDR2_OAS_SHIFT)
+
+/* Identification Register: IDR7 */
+#define IDR7_MINOR (IDR7_MINOR_MASK << IDR7_MINOR_SHIFT)
+#define IDR7_MAJOR (IDR7_MAJOR_MASK << IDR7_MAJOR_SHIFT)
+
+/* Stream to Context Register: S2CR */
+#define S2CR_CBNDX (S2CR_CBNDX_MASK << S2cR_CBNDX_SHIFT)
+#define S2CR_SHCFG (S2CR_SHCFG_MASK << s2CR_SHCFG_SHIFT)
+#define S2CR_MTCFG (S2CR_MTCFG_MASK << S2CR_MTCFG_SHIFT)
+#define S2CR_MEMATTR (S2CR_MEMATTR_MASK << S2CR_MEMATTR_SHIFT)
+#define S2CR_TYPE (S2CR_TYPE_MASK << S2CR_TYPE_SHIFT)
+#define S2CR_NSCFG (S2CR_NSCFG_MASK << S2CR_NSCFG_SHIFT)
+#define S2CR_RACFG (S2CR_RACFG_MASK << S2CR_RACFG_SHIFT)
+#define S2CR_WACFG (S2CR_WACFG_MASK << S2CR_WACFG_SHIFT)
+#define S2CR_PRIVCFG (S2CR_PRIVCFG_MASK << S2CR_PRIVCFG_SHIFT)
+#define S2CR_INSTCFG (S2CR_INSTCFG_MASK << S2CR_INSTCFG_SHIFT)
+#define S2CR_TRANSIENTCFG (S2CR_TRANSIENTCFG_MASK << S2CR_TRANSIENTCFG_SHIFT)
+#define S2CR_VMID (S2CR_VMID_MASK << S2CR_VMID_SHIFT)
+#define S2CR_BSU (S2CR_BSU_MASK << S2CR_BSU_SHIFT)
+#define S2CR_FB (S2CR_FB_MASK << S2CR_FB_SHIFT)
+
+/* Stream Match Register: SMR */
+#define SMR_ID (SMR_ID_MASK << SMR_ID_SHIFT)
+#define SMR_MASK (SMR_MASK_MASK << SMR_MASK_SHIFT)
+#define SMR_VALID (SMR_VALID_MASK << SMR_VALID_SHIFT)
+
+/* Global TLB Status: TLBGSTATUS */
+#define TLBGSTATUS_GSACTIVE (TLBGSTATUS_GSACTIVE_MASK << \
+ TLBGSTATUS_GSACTIVE_SHIFT)
+/* Invalidate Hyp TLB by VA: TLBIVAH */
+#define TLBIVAH_ADDR (TLBIVAH_ADDR_MASK << TLBIVAH_ADDR_SHIFT)
+
+/* Invalidate TLB by VMID: TLBIVMID */
+#define TLBIVMID_VMID (TLBIVMID_VMID_MASK << TLBIVMID_VMID_SHIFT)
+
+/* Context Bank Attribute Register: CBAR */
+#define CBAR_VMID (CBAR_VMID_MASK << CBAR_VMID_SHIFT)
+#define CBAR_CBNDX (CBAR_CBNDX_MASK << CBAR_CBNDX_SHIFT)
+#define CBAR_BPSHCFG (CBAR_BPSHCFG_MASK << CBAR_BPSHCFG_SHIFT)
+#define CBAR_HYPC (CBAR_HYPC_MASK << CBAR_HYPC_SHIFT)
+#define CBAR_FB (CBAR_FB_MASK << CBAR_FB_SHIFT)
+#define CBAR_MEMATTR (CBAR_MEMATTR_MASK << CBAR_MEMATTR_SHIFT)
+#define CBAR_TYPE (CBAR_TYPE_MASK << CBAR_TYPE_SHIFT)
+#define CBAR_BSU (CBAR_BSU_MASK << CBAR_BSU_SHIFT)
+#define CBAR_RACFG (CBAR_RACFG_MASK << CBAR_RACFG_SHIFT)
+#define CBAR_WACFG (CBAR_WACFG_MASK << CBAR_WACFG_SHIFT)
+#define CBAR_IRPTNDX (CBAR_IRPTNDX_MASK << CBAR_IRPTNDX_SHIFT)
+
+/* Context Bank Fault Restricted Syndrome Register A: CBFRSYNRA */
+#define CBFRSYNRA_SID (CBFRSYNRA_SID_MASK << CBFRSYNRA_SID_SHIFT)
+
+/* Performance Monitoring Register Fields */
+
+/* Stage 1 Context Bank Format Fields */
+/* Auxiliary Control Register: CB_ACTLR */
+#define CB_ACTLR_REQPRIORITY \
+ (CB_ACTLR_REQPRIORITY_MASK << CB_ACTLR_REQPRIORITY_SHIFT)
+#define CB_ACTLR_REQPRIORITYCFG \
+ (CB_ACTLR_REQPRIORITYCFG_MASK << CB_ACTLR_REQPRIORITYCFG_SHIFT)
+#define CB_ACTLR_PRIVCFG (CB_ACTLR_PRIVCFG_MASK << CB_ACTLR_PRIVCFG_SHIFT)
+#define CB_ACTLR_BPRCOSH (CB_ACTLR_BPRCOSH_MASK << CB_ACTLR_BPRCOSH_SHIFT)
+#define CB_ACTLR_BPRCISH (CB_ACTLR_BPRCISH_MASK << CB_ACTLR_BPRCISH_SHIFT)
+#define CB_ACTLR_BPRCNSH (CB_ACTLR_BPRCNSH_MASK << CB_ACTLR_BPRCNSH_SHIFT)
+
+/* Address Translation, Stage 1, Privileged Read: CB_ATS1PR */
+#define CB_ATS1PR_ADDR (CB_ATS1PR_ADDR_MASK << CB_ATS1PR_ADDR_SHIFT)
+
+/* Address Translation, Stage 1, Privileged Write: CB_ATS1PW */
+#define CB_ATS1PW_ADDR (CB_ATS1PW_ADDR_MASK << CB_ATS1PW_ADDR_SHIFT)
+
+/* Address Translation, Stage 1, User Read: CB_ATS1UR */
+#define CB_ATS1UR_ADDR (CB_ATS1UR_ADDR_MASK << CB_ATS1UR_ADDR_SHIFT)
+
+/* Address Translation, Stage 1, User Write: CB_ATS1UW */
+#define CB_ATS1UW_ADDR (CB_ATS1UW_ADDR_MASK << CB_ATS1UW_ADDR_SHIFT)
+
+/* Address Translation Status Register: CB_ATSR */
+#define CB_ATSR_ACTIVE (CB_ATSR_ACTIVE_MASK << CB_ATSR_ACTIVE_SHIFT)
+
+/* Context ID Register: CB_CONTEXTIDR */
+#define CB_CONTEXTIDR_ASID (CB_CONTEXTIDR_ASID_MASK << \
+ CB_CONTEXTIDR_ASID_SHIFT)
+#define CB_CONTEXTIDR_PROCID (CB_CONTEXTIDR_PROCID_MASK << \
+ CB_CONTEXTIDR_PROCID_SHIFT)
+
+/* Fault Address Register: CB_FAR */
+#define CB_FAR_FADDR (CB_FAR_FADDR_MASK << CB_FAR_FADDR_SHIFT)
+
+/* Fault Status Register: CB_FSR */
+#define CB_FSR_TF (CB_FSR_TF_MASK << CB_FSR_TF_SHIFT)
+#define CB_FSR_AFF (CB_FSR_AFF_MASK << CB_FSR_AFF_SHIFT)
+#define CB_FSR_PF (CB_FSR_PF_MASK << CB_FSR_PF_SHIFT)
+#define CB_FSR_EF (CB_FSR_EF_MASK << CB_FSR_EF_SHIFT)
+#define CB_FSR_TLBMCF (CB_FSR_TLBMCF_MASK << CB_FSR_TLBMCF_SHIFT)
+#define CB_FSR_TLBLKF (CB_FSR_TLBLKF_MASK << CB_FSR_TLBLKF_SHIFT)
+#define CB_FSR_SS (CB_FSR_SS_MASK << CB_FSR_SS_SHIFT)
+#define CB_FSR_MULTI (CB_FSR_MULTI_MASK << CB_FSR_MULTI_SHIFT)
+
+/* Fault Syndrome Register 0: CB_FSYNR0 */
+#define CB_FSYNR0_PLVL (CB_FSYNR0_PLVL_MASK << CB_FSYNR0_PLVL_SHIFT)
+#define CB_FSYNR0_S1PTWF (CB_FSYNR0_S1PTWF_MASK << CB_FSYNR0_S1PTWF_SHIFT)
+#define CB_FSYNR0_WNR (CB_FSYNR0_WNR_MASK << CB_FSYNR0_WNR_SHIFT)
+#define CB_FSYNR0_PNU (CB_FSYNR0_PNU_MASK << CB_FSYNR0_PNU_SHIFT)
+#define CB_FSYNR0_IND (CB_FSYNR0_IND_MASK << CB_FSYNR0_IND_SHIFT)
+#define CB_FSYNR0_NSSTATE (CB_FSYNR0_NSSTATE_MASK << CB_FSYNR0_NSSTATE_SHIFT)
+#define CB_FSYNR0_NSATTR (CB_FSYNR0_NSATTR_MASK << CB_FSYNR0_NSATTR_SHIFT)
+#define CB_FSYNR0_ATOF (CB_FSYNR0_ATOF_MASK << CB_FSYNR0_ATOF_SHIFT)
+#define CB_FSYNR0_PTWF (CB_FSYNR0_PTWF_MASK << CB_FSYNR0_PTWF_SHIFT)
+#define CB_FSYNR0_AFR (CB_FSYNR0_AFR_MASK << CB_FSYNR0_AFR_SHIFT)
+#define CB_FSYNR0_S1CBNDX (CB_FSYNR0_S1CBNDX_MASK << CB_FSYNR0_S1CBNDX_SHIFT)
+
+/* Normal Memory Remap Register: CB_NMRR */
+#define CB_NMRR_IR0 (CB_NMRR_IR0_MASK << CB_NMRR_IR0_SHIFT)
+#define CB_NMRR_IR1 (CB_NMRR_IR1_MASK << CB_NMRR_IR1_SHIFT)
+#define CB_NMRR_IR2 (CB_NMRR_IR2_MASK << CB_NMRR_IR2_SHIFT)
+#define CB_NMRR_IR3 (CB_NMRR_IR3_MASK << CB_NMRR_IR3_SHIFT)
+#define CB_NMRR_IR4 (CB_NMRR_IR4_MASK << CB_NMRR_IR4_SHIFT)
+#define CB_NMRR_IR5 (CB_NMRR_IR5_MASK << CB_NMRR_IR5_SHIFT)
+#define CB_NMRR_IR6 (CB_NMRR_IR6_MASK << CB_NMRR_IR6_SHIFT)
+#define CB_NMRR_IR7 (CB_NMRR_IR7_MASK << CB_NMRR_IR7_SHIFT)
+#define CB_NMRR_OR0 (CB_NMRR_OR0_MASK << CB_NMRR_OR0_SHIFT)
+#define CB_NMRR_OR1 (CB_NMRR_OR1_MASK << CB_NMRR_OR1_SHIFT)
+#define CB_NMRR_OR2 (CB_NMRR_OR2_MASK << CB_NMRR_OR2_SHIFT)
+#define CB_NMRR_OR3 (CB_NMRR_OR3_MASK << CB_NMRR_OR3_SHIFT)
+#define CB_NMRR_OR4 (CB_NMRR_OR4_MASK << CB_NMRR_OR4_SHIFT)
+#define CB_NMRR_OR5 (CB_NMRR_OR5_MASK << CB_NMRR_OR5_SHIFT)
+#define CB_NMRR_OR6 (CB_NMRR_OR6_MASK << CB_NMRR_OR6_SHIFT)
+#define CB_NMRR_OR7 (CB_NMRR_OR7_MASK << CB_NMRR_OR7_SHIFT)
+
+/* Physical Address Register: CB_PAR */
+#define CB_PAR_F (CB_PAR_F_MASK << CB_PAR_F_SHIFT)
+#define CB_PAR_SS (CB_PAR_SS_MASK << CB_PAR_SS_SHIFT)
+#define CB_PAR_OUTER (CB_PAR_OUTER_MASK << CB_PAR_OUTER_SHIFT)
+#define CB_PAR_INNER (CB_PAR_INNER_MASK << CB_PAR_INNER_SHIFT)
+#define CB_PAR_SH (CB_PAR_SH_MASK << CB_PAR_SH_SHIFT)
+#define CB_PAR_NS (CB_PAR_NS_MASK << CB_PAR_NS_SHIFT)
+#define CB_PAR_NOS (CB_PAR_NOS_MASK << CB_PAR_NOS_SHIFT)
+#define CB_PAR_PA (CB_PAR_PA_MASK << CB_PAR_PA_SHIFT)
+#define CB_PAR_TF (CB_PAR_TF_MASK << CB_PAR_TF_SHIFT)
+#define CB_PAR_AFF (CB_PAR_AFF_MASK << CB_PAR_AFF_SHIFT)
+#define CB_PAR_PF (CB_PAR_PF_MASK << CB_PAR_PF_SHIFT)
+#define CB_PAR_EF (CB_PAR_EF_MASK << CB_PAR_EF_SHIFT)
+#define CB_PAR_TLBMCF (CB_PAR_TLBMCF_MASK << CB_PAR_TLBMCF_SHIFT)
+#define CB_PAR_TLBLKF (CB_PAR_TLBLKF_MASK << CB_PAR_TLBLKF_SHIFT)
+#define CB_PAR_ATOT (CB_PAR_ATOT_MASK << CB_PAR_ATOT_SHIFT)
+#define CB_PAR_PLVL (CB_PAR_PLVL_MASK << CB_PAR_PLVL_SHIFT)
+#define CB_PAR_STAGE (CB_PAR_STAGE_MASK << CB_PAR_STAGE_SHIFT)
+
+/* Primary Region Remap Register: CB_PRRR */
+#define CB_PRRR_TR0 (CB_PRRR_TR0_MASK << CB_PRRR_TR0_SHIFT)
+#define CB_PRRR_TR1 (CB_PRRR_TR1_MASK << CB_PRRR_TR1_SHIFT)
+#define CB_PRRR_TR2 (CB_PRRR_TR2_MASK << CB_PRRR_TR2_SHIFT)
+#define CB_PRRR_TR3 (CB_PRRR_TR3_MASK << CB_PRRR_TR3_SHIFT)
+#define CB_PRRR_TR4 (CB_PRRR_TR4_MASK << CB_PRRR_TR4_SHIFT)
+#define CB_PRRR_TR5 (CB_PRRR_TR5_MASK << CB_PRRR_TR5_SHIFT)
+#define CB_PRRR_TR6 (CB_PRRR_TR6_MASK << CB_PRRR_TR6_SHIFT)
+#define CB_PRRR_TR7 (CB_PRRR_TR7_MASK << CB_PRRR_TR7_SHIFT)
+#define CB_PRRR_DS0 (CB_PRRR_DS0_MASK << CB_PRRR_DS0_SHIFT)
+#define CB_PRRR_DS1 (CB_PRRR_DS1_MASK << CB_PRRR_DS1_SHIFT)
+#define CB_PRRR_NS0 (CB_PRRR_NS0_MASK << CB_PRRR_NS0_SHIFT)
+#define CB_PRRR_NS1 (CB_PRRR_NS1_MASK << CB_PRRR_NS1_SHIFT)
+#define CB_PRRR_NOS0 (CB_PRRR_NOS0_MASK << CB_PRRR_NOS0_SHIFT)
+#define CB_PRRR_NOS1 (CB_PRRR_NOS1_MASK << CB_PRRR_NOS1_SHIFT)
+#define CB_PRRR_NOS2 (CB_PRRR_NOS2_MASK << CB_PRRR_NOS2_SHIFT)
+#define CB_PRRR_NOS3 (CB_PRRR_NOS3_MASK << CB_PRRR_NOS3_SHIFT)
+#define CB_PRRR_NOS4 (CB_PRRR_NOS4_MASK << CB_PRRR_NOS4_SHIFT)
+#define CB_PRRR_NOS5 (CB_PRRR_NOS5_MASK << CB_PRRR_NOS5_SHIFT)
+#define CB_PRRR_NOS6 (CB_PRRR_NOS6_MASK << CB_PRRR_NOS6_SHIFT)
+#define CB_PRRR_NOS7 (CB_PRRR_NOS7_MASK << CB_PRRR_NOS7_SHIFT)
+
+/* Transaction Resume: CB_RESUME */
+#define CB_RESUME_TNR (CB_RESUME_TNR_MASK << CB_RESUME_TNR_SHIFT)
+
+/* System Control Register: CB_SCTLR */
+#define CB_SCTLR_M (CB_SCTLR_M_MASK << CB_SCTLR_M_SHIFT)
+#define CB_SCTLR_TRE (CB_SCTLR_TRE_MASK << CB_SCTLR_TRE_SHIFT)
+#define CB_SCTLR_AFE (CB_SCTLR_AFE_MASK << CB_SCTLR_AFE_SHIFT)
+#define CB_SCTLR_AFFD (CB_SCTLR_AFFD_MASK << CB_SCTLR_AFFD_SHIFT)
+#define CB_SCTLR_E (CB_SCTLR_E_MASK << CB_SCTLR_E_SHIFT)
+#define CB_SCTLR_CFRE (CB_SCTLR_CFRE_MASK << CB_SCTLR_CFRE_SHIFT)
+#define CB_SCTLR_CFIE (CB_SCTLR_CFIE_MASK << CB_SCTLR_CFIE_SHIFT)
+#define CB_SCTLR_CFCFG (CB_SCTLR_CFCFG_MASK << CB_SCTLR_CFCFG_SHIFT)
+#define CB_SCTLR_HUPCF (CB_SCTLR_HUPCF_MASK << CB_SCTLR_HUPCF_SHIFT)
+#define CB_SCTLR_WXN (CB_SCTLR_WXN_MASK << CB_SCTLR_WXN_SHIFT)
+#define CB_SCTLR_UWXN (CB_SCTLR_UWXN_MASK << CB_SCTLR_UWXN_SHIFT)
+#define CB_SCTLR_ASIDPNE (CB_SCTLR_ASIDPNE_MASK << CB_SCTLR_ASIDPNE_SHIFT)
+#define CB_SCTLR_TRANSIENTCFG (CB_SCTLR_TRANSIENTCFG_MASK << \
+ CB_SCTLR_TRANSIENTCFG_SHIFT)
+#define CB_SCTLR_MEMATTR (CB_SCTLR_MEMATTR_MASK << CB_SCTLR_MEMATTR_SHIFT)
+#define CB_SCTLR_MTCFG (CB_SCTLR_MTCFG_MASK << CB_SCTLR_MTCFG_SHIFT)
+#define CB_SCTLR_SHCFG (CB_SCTLR_SHCFG_MASK << CB_SCTLR_SHCFG_SHIFT)
+#define CB_SCTLR_RACFG (CB_SCTLR_RACFG_MASK << CB_SCTLR_RACFG_SHIFT)
+#define CB_SCTLR_WACFG (CB_SCTLR_WACFG_MASK << CB_SCTLR_WACFG_SHIFT)
+#define CB_SCTLR_NSCFG (CB_SCTLR_NSCFG_MASK << CB_SCTLR_NSCFG_SHIFT)
+
+/* Invalidate TLB by ASID: CB_TLBIASID */
+#define CB_TLBIASID_ASID (CB_TLBIASID_ASID_MASK << CB_TLBIASID_ASID_SHIFT)
+
+/* Invalidate TLB by VA: CB_TLBIVA */
+#define CB_TLBIVA_ASID (CB_TLBIVA_ASID_MASK << CB_TLBIVA_ASID_SHIFT)
+#define CB_TLBIVA_VA (CB_TLBIVA_VA_MASK << CB_TLBIVA_VA_SHIFT)
+
+/* Invalidate TLB by VA, All ASID: CB_TLBIVAA */
+#define CB_TLBIVAA_VA (CB_TLBIVAA_VA_MASK << CB_TLBIVAA_VA_SHIFT)
+
+/* Invalidate TLB by VA, All ASID, Last Level: CB_TLBIVAAL */
+#define CB_TLBIVAAL_VA (CB_TLBIVAAL_VA_MASK << CB_TLBIVAAL_VA_SHIFT)
+
+/* Invalidate TLB by VA, Last Level: CB_TLBIVAL */
+#define CB_TLBIVAL_ASID (CB_TLBIVAL_ASID_MASK << CB_TLBIVAL_ASID_SHIFT)
+#define CB_TLBIVAL_VA (CB_TLBIVAL_VA_MASK << CB_TLBIVAL_VA_SHIFT)
+
+/* TLB Status: CB_TLBSTATUS */
+#define CB_TLBSTATUS_SACTIVE (CB_TLBSTATUS_SACTIVE_MASK << \
+ CB_TLBSTATUS_SACTIVE_SHIFT)
+
+/* Translation Table Base Control Register: CB_TTBCR */
+#define CB_TTBCR_EAE (CB_TTBCR_EAE_MASK << CB_TTBCR_EAE_SHIFT)
+
+#define CB_TTBR0_ADDR (CB_TTBR0_ADDR_MASK << CB_TTBR0_ADDR_SHIFT)
+
+#ifdef CONFIG_IOMMU_LPAE
+/* Translation Table Base Register: CB_TTBR */
+#define CB_TTBR0_ASID (CB_TTBR0_ASID_MASK << CB_TTBR0_ASID_SHIFT)
+#define CB_TTBR1_ASID (CB_TTBR1_ASID_MASK << CB_TTBR1_ASID_SHIFT)
+
+/* Translation Table Base Control Register: CB_TTBCR */
+#define CB_TTBCR_T0SZ (CB_TTBCR_T0SZ_MASK << CB_TTBCR_T0SZ_SHIFT)
+#define CB_TTBCR_T1SZ (CB_TTBCR_T1SZ_MASK << CB_TTBCR_T1SZ_SHIFT)
+#define CB_TTBCR_EPD0 (CB_TTBCR_EPD0_MASK << CB_TTBCR_EPD0_SHIFT)
+#define CB_TTBCR_EPD1 (CB_TTBCR_EPD1_MASK << CB_TTBCR_EPD1_SHIFT)
+#define CB_TTBCR_IRGN0 (CB_TTBCR_IRGN0_MASK << CB_TTBCR_IRGN0_SHIFT)
+#define CB_TTBCR_IRGN1 (CB_TTBCR_IRGN1_MASK << CB_TTBCR_IRGN1_SHIFT)
+#define CB_TTBCR_ORGN0 (CB_TTBCR_ORGN0_MASK << CB_TTBCR_ORGN0_SHIFT)
+#define CB_TTBCR_ORGN1 (CB_TTBCR_ORGN1_MASK << CB_TTBCR_ORGN1_SHIFT)
+#define CB_TTBCR_NSCFG0 (CB_TTBCR_NSCFG0_MASK << CB_TTBCR_NSCFG0_SHIFT)
+#define CB_TTBCR_NSCFG1 (CB_TTBCR_NSCFG1_MASK << CB_TTBCR_NSCFG1_SHIFT)
+#define CB_TTBCR_SH0 (CB_TTBCR_SH0_MASK << CB_TTBCR_SH0_SHIFT)
+#define CB_TTBCR_SH1 (CB_TTBCR_SH1_MASK << CB_TTBCR_SH1_SHIFT)
+#define CB_TTBCR_A1 (CB_TTBCR_A1_MASK << CB_TTBCR_A1_SHIFT)
+
+#else
+
+/* Translation Table Base Register 0: CB_TTBR0 */
+#define CB_TTBR0_IRGN1 (CB_TTBR0_IRGN1_MASK << CB_TTBR0_IRGN1_SHIFT)
+#define CB_TTBR0_S (CB_TTBR0_S_MASK << CB_TTBR0_S_SHIFT)
+#define CB_TTBR0_RGN (CB_TTBR0_RGN_MASK << CB_TTBR0_RGN_SHIFT)
+#define CB_TTBR0_NOS (CB_TTBR0_NOS_MASK << CB_TTBR0_NOS_SHIFT)
+#define CB_TTBR0_IRGN0 (CB_TTBR0_IRGN0_MASK << CB_TTBR0_IRGN0_SHIFT)
+
+/* Translation Table Base Register 1: CB_TTBR1 */
+#define CB_TTBR1_IRGN1 (CB_TTBR1_IRGN1_MASK << CB_TTBR1_IRGN1_SHIFT)
+#define CB_TTBR1_S (CB_TTBR1_S_MASK << CB_TTBR1_S_SHIFT)
+#define CB_TTBR1_RGN (CB_TTBR1_RGN_MASK << CB_TTBR1_RGN_SHIFT)
+#define CB_TTBR1_NOS (CB_TTBR1_NOS_MASK << CB_TTBR1_NOS_SHIFT)
+#define CB_TTBR1_IRGN0 (CB_TTBR1_IRGN0_MASK << CB_TTBR1_IRGN0_SHIFT)
+#endif
+
+/* Global Register Masks */
+/* Configuration Register 0 */
+#define CR0_NSCFG_MASK 0x03
+#define CR0_WACFG_MASK 0x03
+#define CR0_RACFG_MASK 0x03
+#define CR0_SHCFG_MASK 0x03
+#define CR0_SMCFCFG_MASK 0x01
+#define NSCR0_SMCFCFG_MASK 0x01
+#define CR0_MTCFG_MASK 0x01
+#define CR0_MEMATTR_MASK 0x0F
+#define CR0_BSU_MASK 0x03
+#define CR0_FB_MASK 0x01
+#define CR0_PTM_MASK 0x01
+#define CR0_VMIDPNE_MASK 0x01
+#define CR0_USFCFG_MASK 0x01
+#define NSCR0_USFCFG_MASK 0x01
+#define CR0_GSE_MASK 0x01
+#define CR0_STALLD_MASK 0x01
+#define NSCR0_STALLD_MASK 0x01
+#define CR0_TRANSIENTCFG_MASK 0x03
+#define CR0_GCFGFIE_MASK 0x01
+#define NSCR0_GCFGFIE_MASK 0x01
+#define CR0_GCFGFRE_MASK 0x01
+#define NSCR0_GCFGFRE_MASK 0x01
+#define CR0_GFIE_MASK 0x01
+#define NSCR0_GFIE_MASK 0x01
+#define CR0_GFRE_MASK 0x01
+#define NSCR0_GFRE_MASK 0x01
+#define CR0_CLIENTPD_MASK 0x01
+#define NSCR0_CLIENTPD_MASK 0x01
+
+/* ACR */
+#define ACR_SMTNMC_BPTLBEN_MASK 0x01
+#define ACR_MMUDIS_BPTLBEN_MASK 0x01
+#define ACR_S2CR_BPTLBEN_MASK 0x01
+
+/* NSACR */
+#define NSACR_SMTNMC_BPTLBEN_MASK 0x01
+#define NSACR_MMUDIS_BPTLBEN_MASK 0x01
+#define NSACR_S2CR_BPTLBEN_MASK 0x01
+
+/* Configuration Register 2 */
+#define CR2_BPVMID_MASK 0xFF
+
+/* Global Address Translation, Stage 1, Privileged Read: GATS1PR */
+#define GATS1PR_ADDR_MASK 0xFFFFF
+#define GATS1PR_NDX_MASK 0xFF
+
+/* Global Address Translation, Stage 1, Privileged Write: GATS1PW */
+#define GATS1PW_ADDR_MASK 0xFFFFF
+#define GATS1PW_NDX_MASK 0xFF
+
+/* Global Address Translation, Stage 1, User Read: GATS1UR */
+#define GATS1UR_ADDR_MASK 0xFFFFF
+#define GATS1UR_NDX_MASK 0xFF
+
+/* Global Address Translation, Stage 1, User Write: GATS1UW */
+#define GATS1UW_ADDR_MASK 0xFFFFF
+#define GATS1UW_NDX_MASK 0xFF
+
+/* Global Address Translation, Stage 1 and 2, Privileged Read: GATS1PR */
+#define GATS12PR_ADDR_MASK 0xFFFFF
+#define GATS12PR_NDX_MASK 0xFF
+
+/* Global Address Translation, Stage 1 and 2, Privileged Write: GATS1PW */
+#define GATS12PW_ADDR_MASK 0xFFFFF
+#define GATS12PW_NDX_MASK 0xFF
+
+/* Global Address Translation, Stage 1 and 2, User Read: GATS1UR */
+#define GATS12UR_ADDR_MASK 0xFFFFF
+#define GATS12UR_NDX_MASK 0xFF
+
+/* Global Address Translation, Stage 1 and 2, User Write: GATS1UW */
+#define GATS12UW_ADDR_MASK 0xFFFFF
+#define GATS12UW_NDX_MASK 0xFF
+
+/* Global Address Translation Status Register: GATSR */
+#define GATSR_ACTIVE_MASK 0x01
+
+/* Global Fault Address Register: GFAR */
+#define GFAR_FADDR_MASK 0xFFFFFFFF
+
+/* Global Fault Status Register: GFSR */
+#define GFSR_ICF_MASK 0x01
+#define GFSR_USF_MASK 0x01
+#define GFSR_SMCF_MASK 0x01
+#define GFSR_UCBF_MASK 0x01
+#define GFSR_UCIF_MASK 0x01
+#define GFSR_CAF_MASK 0x01
+#define GFSR_EF_MASK 0x01
+#define GFSR_PF_MASK 0x01
+#define GFSR_MULTI_MASK 0x01
+
+/* Global Fault Syndrome Register 0: GFSYNR0 */
+#define GFSYNR0_NESTED_MASK 0x01
+#define GFSYNR0_WNR_MASK 0x01
+#define GFSYNR0_PNU_MASK 0x01
+#define GFSYNR0_IND_MASK 0x01
+#define GFSYNR0_NSSTATE_MASK 0x01
+#define GFSYNR0_NSATTR_MASK 0x01
+
+/* Global Fault Syndrome Register 1: GFSYNR1 */
+#define GFSYNR1_SID_MASK 0x7FFF
+#define GFSYNr1_SSD_IDX_MASK 0x7FFF
+
+/* Global Physical Address Register: GPAR */
+#define GPAR_F_MASK 0x01
+#define GPAR_SS_MASK 0x01
+#define GPAR_OUTER_MASK 0x03
+#define GPAR_INNER_MASK 0x03
+#define GPAR_SH_MASK 0x01
+#define GPAR_NS_MASK 0x01
+#define GPAR_NOS_MASK 0x01
+#define GPAR_PA_MASK 0xFFFFF
+#define GPAR_TF_MASK 0x01
+#define GPAR_AFF_MASK 0x01
+#define GPAR_PF_MASK 0x01
+#define GPAR_EF_MASK 0x01
+#define GPAR_TLBMCF_MASK 0x01
+#define GPAR_TLBLKF_MASK 0x01
+#define GPAR_UCBF_MASK 0x01
+
+/* Identification Register: IDR0 */
+#define IDR0_NUMSMRG_MASK 0xFF
+#define IDR0_NUMSIDB_MASK 0x0F
+#define IDR0_BTM_MASK 0x01
+#define IDR0_CTTW_MASK 0x01
+#define IDR0_NUMIPRT_MASK 0xFF
+#define IDR0_PTFS_MASK 0x01
+#define IDR0_SMS_MASK 0x01
+#define IDR0_NTS_MASK 0x01
+#define IDR0_S2TS_MASK 0x01
+#define IDR0_S1TS_MASK 0x01
+#define IDR0_SES_MASK 0x01
+
+/* Identification Register: IDR1 */
+#define IDR1_NUMCB_MASK 0xFF
+#define IDR1_NUMSSDNDXB_MASK 0x0F
+#define IDR1_SSDTP_MASK 0x01
+#define IDR1_SMCD_MASK 0x01
+#define IDR1_NUMS2CB_MASK 0xFF
+#define IDR1_NUMPAGENDXB_MASK 0x07
+#define IDR1_PAGESIZE_MASK 0x01
+
+/* Identification Register: IDR2 */
+#define IDR2_IAS_MASK 0x0F
+#define IDR2_OAS_MASK 0x0F
+
+/* Identification Register: IDR7 */
+#define IDR7_MINOR_MASK 0x0F
+#define IDR7_MAJOR_MASK 0x0F
+
+/* Stream to Context Register: S2CR */
+#define S2CR_CBNDX_MASK 0xFF
+#define S2CR_SHCFG_MASK 0x03
+#define S2CR_MTCFG_MASK 0x01
+#define S2CR_MEMATTR_MASK 0x0F
+#define S2CR_TYPE_MASK 0x03
+#define S2CR_NSCFG_MASK 0x03
+#define S2CR_RACFG_MASK 0x03
+#define S2CR_WACFG_MASK 0x03
+#define S2CR_PRIVCFG_MASK 0x03
+#define S2CR_INSTCFG_MASK 0x03
+#define S2CR_TRANSIENTCFG_MASK 0x03
+#define S2CR_VMID_MASK 0xFF
+#define S2CR_BSU_MASK 0x03
+#define S2CR_FB_MASK 0x01
+
+/* Stream Match Register: SMR */
+#define SMR_ID_MASK 0x7FFF
+#define SMR_MASK_MASK 0x7FFF
+#define SMR_VALID_MASK 0x01
+
+/* Global TLB Status: TLBGSTATUS */
+#define TLBGSTATUS_GSACTIVE_MASK 0x01
+
+/* Invalidate Hyp TLB by VA: TLBIVAH */
+#define TLBIVAH_ADDR_MASK 0xFFFFF
+
+/* Invalidate TLB by VMID: TLBIVMID */
+#define TLBIVMID_VMID_MASK 0xFF
+
+/* Global Register Space 1 Mask */
+/* Context Bank Attribute Register: CBAR */
+#define CBAR_VMID_MASK 0xFF
+#define CBAR_CBNDX_MASK 0x03
+#define CBAR_BPSHCFG_MASK 0x03
+#define CBAR_HYPC_MASK 0x01
+#define CBAR_FB_MASK 0x01
+#define CBAR_MEMATTR_MASK 0x0F
+#define CBAR_TYPE_MASK 0x03
+#define CBAR_BSU_MASK 0x03
+#define CBAR_RACFG_MASK 0x03
+#define CBAR_WACFG_MASK 0x03
+#define CBAR_IRPTNDX_MASK 0xFF
+
+/* Context Bank Fault Restricted Syndrome Register A: CBFRSYNRA */
+#define CBFRSYNRA_SID_MASK 0x7FFF
+
+/* Implementation defined register space masks */
+#define MICRO_MMU_CTRL_RESERVED_MASK 0x03
+#define MICRO_MMU_CTRL_HALT_REQ_MASK 0x01
+#define MICRO_MMU_CTRL_IDLE_MASK 0x01
+
+/* Stage 1 Context Bank Format Masks */
+/* Auxiliary Control Register: CB_ACTLR */
+#define CB_ACTLR_REQPRIORITY_MASK 0x3
+#define CB_ACTLR_REQPRIORITYCFG_MASK 0x1
+#define CB_ACTLR_PRIVCFG_MASK 0x3
+#define CB_ACTLR_BPRCOSH_MASK 0x1
+#define CB_ACTLR_BPRCISH_MASK 0x1
+#define CB_ACTLR_BPRCNSH_MASK 0x1
+
+/* Address Translation, Stage 1, Privileged Read: CB_ATS1PR */
+#define CB_ATS1PR_ADDR_MASK 0xFFFFF
+
+/* Address Translation, Stage 1, Privileged Write: CB_ATS1PW */
+#define CB_ATS1PW_ADDR_MASK 0xFFFFF
+
+/* Address Translation, Stage 1, User Read: CB_ATS1UR */
+#define CB_ATS1UR_ADDR_MASK 0xFFFFF
+
+/* Address Translation, Stage 1, User Write: CB_ATS1UW */
+#define CB_ATS1UW_ADDR_MASK 0xFFFFF
+
+/* Address Translation Status Register: CB_ATSR */
+#define CB_ATSR_ACTIVE_MASK 0x01
+
+/* Context ID Register: CB_CONTEXTIDR */
+#define CB_CONTEXTIDR_ASID_MASK 0xFF
+#define CB_CONTEXTIDR_PROCID_MASK 0xFFFFFF
+
+/* Fault Address Register: CB_FAR */
+#define CB_FAR_FADDR_MASK 0xFFFFFFFF
+
+/* Fault Status Register: CB_FSR */
+#define CB_FSR_TF_MASK 0x01
+#define CB_FSR_AFF_MASK 0x01
+#define CB_FSR_PF_MASK 0x01
+#define CB_FSR_EF_MASK 0x01
+#define CB_FSR_TLBMCF_MASK 0x01
+#define CB_FSR_TLBLKF_MASK 0x01
+#define CB_FSR_SS_MASK 0x01
+#define CB_FSR_MULTI_MASK 0x01
+
+/* Fault Syndrome Register 0: CB_FSYNR0 */
+#define CB_FSYNR0_PLVL_MASK 0x03
+#define CB_FSYNR0_S1PTWF_MASK 0x01
+#define CB_FSYNR0_WNR_MASK 0x01
+#define CB_FSYNR0_PNU_MASK 0x01
+#define CB_FSYNR0_IND_MASK 0x01
+#define CB_FSYNR0_NSSTATE_MASK 0x01
+#define CB_FSYNR0_NSATTR_MASK 0x01
+#define CB_FSYNR0_ATOF_MASK 0x01
+#define CB_FSYNR0_PTWF_MASK 0x01
+#define CB_FSYNR0_AFR_MASK 0x01
+#define CB_FSYNR0_S1CBNDX_MASK 0xFF
+
+/* Normal Memory Remap Register: CB_NMRR */
+#define CB_NMRR_IR0_MASK 0x03
+#define CB_NMRR_IR1_MASK 0x03
+#define CB_NMRR_IR2_MASK 0x03
+#define CB_NMRR_IR3_MASK 0x03
+#define CB_NMRR_IR4_MASK 0x03
+#define CB_NMRR_IR5_MASK 0x03
+#define CB_NMRR_IR6_MASK 0x03
+#define CB_NMRR_IR7_MASK 0x03
+#define CB_NMRR_OR0_MASK 0x03
+#define CB_NMRR_OR1_MASK 0x03
+#define CB_NMRR_OR2_MASK 0x03
+#define CB_NMRR_OR3_MASK 0x03
+#define CB_NMRR_OR4_MASK 0x03
+#define CB_NMRR_OR5_MASK 0x03
+#define CB_NMRR_OR6_MASK 0x03
+#define CB_NMRR_OR7_MASK 0x03
+
+/* Physical Address Register: CB_PAR */
+#define CB_PAR_F_MASK 0x01
+#define CB_PAR_SS_MASK 0x01
+#define CB_PAR_OUTER_MASK 0x03
+#define CB_PAR_INNER_MASK 0x07
+#define CB_PAR_SH_MASK 0x01
+#define CB_PAR_NS_MASK 0x01
+#define CB_PAR_NOS_MASK 0x01
+#define CB_PAR_PA_MASK 0xFFFFF
+#define CB_PAR_TF_MASK 0x01
+#define CB_PAR_AFF_MASK 0x01
+#define CB_PAR_PF_MASK 0x01
+#define CB_PAR_EF_MASK 0x01
+#define CB_PAR_TLBMCF_MASK 0x01
+#define CB_PAR_TLBLKF_MASK 0x01
+#define CB_PAR_ATOT_MASK 0x01ULL
+#define CB_PAR_PLVL_MASK 0x03ULL
+#define CB_PAR_STAGE_MASK 0x01ULL
+
+/* Primary Region Remap Register: CB_PRRR */
+#define CB_PRRR_TR0_MASK 0x03
+#define CB_PRRR_TR1_MASK 0x03
+#define CB_PRRR_TR2_MASK 0x03
+#define CB_PRRR_TR3_MASK 0x03
+#define CB_PRRR_TR4_MASK 0x03
+#define CB_PRRR_TR5_MASK 0x03
+#define CB_PRRR_TR6_MASK 0x03
+#define CB_PRRR_TR7_MASK 0x03
+#define CB_PRRR_DS0_MASK 0x01
+#define CB_PRRR_DS1_MASK 0x01
+#define CB_PRRR_NS0_MASK 0x01
+#define CB_PRRR_NS1_MASK 0x01
+#define CB_PRRR_NOS0_MASK 0x01
+#define CB_PRRR_NOS1_MASK 0x01
+#define CB_PRRR_NOS2_MASK 0x01
+#define CB_PRRR_NOS3_MASK 0x01
+#define CB_PRRR_NOS4_MASK 0x01
+#define CB_PRRR_NOS5_MASK 0x01
+#define CB_PRRR_NOS6_MASK 0x01
+#define CB_PRRR_NOS7_MASK 0x01
+
+/* Transaction Resume: CB_RESUME */
+#define CB_RESUME_TNR_MASK 0x01
+
+/* System Control Register: CB_SCTLR */
+#define CB_SCTLR_M_MASK 0x01
+#define CB_SCTLR_TRE_MASK 0x01
+#define CB_SCTLR_AFE_MASK 0x01
+#define CB_SCTLR_AFFD_MASK 0x01
+#define CB_SCTLR_E_MASK 0x01
+#define CB_SCTLR_CFRE_MASK 0x01
+#define CB_SCTLR_CFIE_MASK 0x01
+#define CB_SCTLR_CFCFG_MASK 0x01
+#define CB_SCTLR_HUPCF_MASK 0x01
+#define CB_SCTLR_WXN_MASK 0x01
+#define CB_SCTLR_UWXN_MASK 0x01
+#define CB_SCTLR_ASIDPNE_MASK 0x01
+#define CB_SCTLR_TRANSIENTCFG_MASK 0x03
+#define CB_SCTLR_MEMATTR_MASK 0x0F
+#define CB_SCTLR_MTCFG_MASK 0x01
+#define CB_SCTLR_SHCFG_MASK 0x03
+#define CB_SCTLR_RACFG_MASK 0x03
+#define CB_SCTLR_WACFG_MASK 0x03
+#define CB_SCTLR_NSCFG_MASK 0x03
+
+/* Invalidate TLB by ASID: CB_TLBIASID */
+#define CB_TLBIASID_ASID_MASK 0xFF
+
+/* Invalidate TLB by VA: CB_TLBIVA */
+#define CB_TLBIVA_ASID_MASK 0xFF
+#define CB_TLBIVA_VA_MASK 0xFFFFF
+
+/* Invalidate TLB by VA, All ASID: CB_TLBIVAA */
+#define CB_TLBIVAA_VA_MASK 0xFFFFF
+
+/* Invalidate TLB by VA, All ASID, Last Level: CB_TLBIVAAL */
+#define CB_TLBIVAAL_VA_MASK 0xFFFFF
+
+/* Invalidate TLB by VA, Last Level: CB_TLBIVAL */
+#define CB_TLBIVAL_ASID_MASK 0xFF
+#define CB_TLBIVAL_VA_MASK 0xFFFFF
+
+/* TLB Status: CB_TLBSTATUS */
+#define CB_TLBSTATUS_SACTIVE_MASK 0x01
+
+/* Translation Table Base Control Register: CB_TTBCR */
+#define CB_TTBCR_T0SZ_MASK 0x07
+#define CB_TTBCR_T1SZ_MASK 0x07
+#define CB_TTBCR_EPD0_MASK 0x01
+#define CB_TTBCR_EPD1_MASK 0x01
+#define CB_TTBCR_IRGN0_MASK 0x03
+#define CB_TTBCR_IRGN1_MASK 0x03
+#define CB_TTBCR_ORGN0_MASK 0x03
+#define CB_TTBCR_ORGN1_MASK 0x03
+#define CB_TTBCR_NSCFG0_MASK 0x01
+#define CB_TTBCR_NSCFG1_MASK 0x01
+#define CB_TTBCR_SH0_MASK 0x03
+#define CB_TTBCR_SH1_MASK 0x03
+#define CB_TTBCR_A1_MASK 0x01
+#define CB_TTBCR_EAE_MASK 0x01
+
+/* Translation Table Base Register 0/1: CB_TTBR */
+#ifdef CONFIG_IOMMU_LPAE
+#define CB_TTBR0_ADDR_MASK 0x7FFFFFFFFULL
+#define CB_TTBR0_ASID_MASK 0xFF
+#define CB_TTBR1_ASID_MASK 0xFF
+#else
+#define CB_TTBR0_IRGN1_MASK 0x01
+#define CB_TTBR0_S_MASK 0x01
+#define CB_TTBR0_RGN_MASK 0x01
+#define CB_TTBR0_NOS_MASK 0x01
+#define CB_TTBR0_IRGN0_MASK 0x01
+#define CB_TTBR0_ADDR_MASK 0xFFFFFF
+
+#define CB_TTBR1_IRGN1_MASK 0x1
+#define CB_TTBR1_S_MASK 0x1
+#define CB_TTBR1_RGN_MASK 0x1
+#define CB_TTBR1_NOS_MASK 0X1
+#define CB_TTBR1_IRGN0_MASK 0X1
+#endif
+
+/* Global Register Shifts */
+/* Configuration Register: CR0 */
+#define CR0_NSCFG_SHIFT 28
+#define CR0_WACFG_SHIFT 26
+#define CR0_RACFG_SHIFT 24
+#define CR0_SHCFG_SHIFT 22
+#define CR0_SMCFCFG_SHIFT 21
+#define NSCR0_SMCFCFG_SHIFT 21
+#define CR0_MTCFG_SHIFT 20
+#define CR0_MEMATTR_SHIFT 16
+#define CR0_BSU_SHIFT 14
+#define CR0_FB_SHIFT 13
+#define CR0_PTM_SHIFT 12
+#define CR0_VMIDPNE_SHIFT 11
+#define CR0_USFCFG_SHIFT 10
+#define NSCR0_USFCFG_SHIFT 10
+#define CR0_GSE_SHIFT 9
+#define CR0_STALLD_SHIFT 8
+#define NSCR0_STALLD_SHIFT 8
+#define CR0_TRANSIENTCFG_SHIFT 6
+#define CR0_GCFGFIE_SHIFT 5
+#define NSCR0_GCFGFIE_SHIFT 5
+#define CR0_GCFGFRE_SHIFT 4
+#define NSCR0_GCFGFRE_SHIFT 4
+#define CR0_GFIE_SHIFT 2
+#define NSCR0_GFIE_SHIFT 2
+#define CR0_GFRE_SHIFT 1
+#define NSCR0_GFRE_SHIFT 1
+#define CR0_CLIENTPD_SHIFT 0
+#define NSCR0_CLIENTPD_SHIFT 0
+
+/* ACR */
+#define ACR_SMTNMC_BPTLBEN_SHIFT 8
+#define ACR_MMUDIS_BPTLBEN_SHIFT 9
+#define ACR_S2CR_BPTLBEN_SHIFT 10
+
+/* NSACR */
+#define NSACR_SMTNMC_BPTLBEN_SHIFT 8
+#define NSACR_MMUDIS_BPTLBEN_SHIFT 9
+#define NSACR_S2CR_BPTLBEN_SHIFT 10
+
+/* Configuration Register: CR2 */
+#define CR2_BPVMID_SHIFT 0
+
+/* Global Address Translation, Stage 1, Privileged Read: GATS1PR */
+#define GATS1PR_ADDR_SHIFT 12
+#define GATS1PR_NDX_SHIFT 0
+
+/* Global Address Translation, Stage 1, Privileged Write: GATS1PW */
+#define GATS1PW_ADDR_SHIFT 12
+#define GATS1PW_NDX_SHIFT 0
+
+/* Global Address Translation, Stage 1, User Read: GATS1UR */
+#define GATS1UR_ADDR_SHIFT 12
+#define GATS1UR_NDX_SHIFT 0
+
+/* Global Address Translation, Stage 1, User Write: GATS1UW */
+#define GATS1UW_ADDR_SHIFT 12
+#define GATS1UW_NDX_SHIFT 0
+
+/* Global Address Translation, Stage 1 and 2, Privileged Read: GATS12PR */
+#define GATS12PR_ADDR_SHIFT 12
+#define GATS12PR_NDX_SHIFT 0
+
+/* Global Address Translation, Stage 1 and 2, Privileged Write: GATS12PW */
+#define GATS12PW_ADDR_SHIFT 12
+#define GATS12PW_NDX_SHIFT 0
+
+/* Global Address Translation, Stage 1 and 2, User Read: GATS12UR */
+#define GATS12UR_ADDR_SHIFT 12
+#define GATS12UR_NDX_SHIFT 0
+
+/* Global Address Translation, Stage 1 and 2, User Write: GATS12UW */
+#define GATS12UW_ADDR_SHIFT 12
+#define GATS12UW_NDX_SHIFT 0
+
+/* Global Address Translation Status Register: GATSR */
+#define GATSR_ACTIVE_SHIFT 0
+
+/* Global Fault Address Register: GFAR */
+#define GFAR_FADDR_SHIFT 0
+
+/* Global Fault Status Register: GFSR */
+#define GFSR_ICF_SHIFT 0
+#define GFSR_USF_SHIFT 1
+#define GFSR_SMCF_SHIFT 2
+#define GFSR_UCBF_SHIFT 3
+#define GFSR_UCIF_SHIFT 4
+#define GFSR_CAF_SHIFT 5
+#define GFSR_EF_SHIFT 6
+#define GFSR_PF_SHIFT 7
+#define GFSR_MULTI_SHIFT 31
+
+/* Global Fault Syndrome Register 0: GFSYNR0 */
+#define GFSYNR0_NESTED_SHIFT 0
+#define GFSYNR0_WNR_SHIFT 1
+#define GFSYNR0_PNU_SHIFT 2
+#define GFSYNR0_IND_SHIFT 3
+#define GFSYNR0_NSSTATE_SHIFT 4
+#define GFSYNR0_NSATTR_SHIFT 5
+
+/* Global Fault Syndrome Register 1: GFSYNR1 */
+#define GFSYNR1_SID_SHIFT 0
+
+/* Global Physical Address Register: GPAR */
+#define GPAR_F_SHIFT 0
+#define GPAR_SS_SHIFT 1
+#define GPAR_OUTER_SHIFT 2
+#define GPAR_INNER_SHIFT 4
+#define GPAR_SH_SHIFT 7
+#define GPAR_NS_SHIFT 9
+#define GPAR_NOS_SHIFT 10
+#define GPAR_PA_SHIFT 12
+#define GPAR_TF_SHIFT 1
+#define GPAR_AFF_SHIFT 2
+#define GPAR_PF_SHIFT 3
+#define GPAR_EF_SHIFT 4
+#define GPAR_TLCMCF_SHIFT 5
+#define GPAR_TLBLKF_SHIFT 6
+#define GFAR_UCBF_SHIFT 30
+
+/* Identification Register: IDR0 */
+#define IDR0_NUMSMRG_SHIFT 0
+#define IDR0_NUMSIDB_SHIFT 9
+#define IDR0_BTM_SHIFT 13
+#define IDR0_CTTW_SHIFT 14
+#define IDR0_NUMIRPT_SHIFT 16
+#define IDR0_PTFS_SHIFT 24
+#define IDR0_SMS_SHIFT 27
+#define IDR0_NTS_SHIFT 28
+#define IDR0_S2TS_SHIFT 29
+#define IDR0_S1TS_SHIFT 30
+#define IDR0_SES_SHIFT 31
+
+/* Identification Register: IDR1 */
+#define IDR1_NUMCB_SHIFT 0
+#define IDR1_NUMSSDNDXB_SHIFT 8
+#define IDR1_SSDTP_SHIFT 12
+#define IDR1_SMCD_SHIFT 15
+#define IDR1_NUMS2CB_SHIFT 16
+#define IDR1_NUMPAGENDXB_SHIFT 28
+#define IDR1_PAGESIZE_SHIFT 31
+
+/* Identification Register: IDR2 */
+#define IDR2_IAS_SHIFT 0
+#define IDR2_OAS_SHIFT 4
+
+/* Identification Register: IDR7 */
+#define IDR7_MINOR_SHIFT 0
+#define IDR7_MAJOR_SHIFT 4
+
+/* Stream to Context Register: S2CR */
+#define S2CR_CBNDX_SHIFT 0
+#define s2CR_SHCFG_SHIFT 8
+#define S2CR_MTCFG_SHIFT 11
+#define S2CR_MEMATTR_SHIFT 12
+#define S2CR_TYPE_SHIFT 16
+#define S2CR_NSCFG_SHIFT 18
+#define S2CR_RACFG_SHIFT 20
+#define S2CR_WACFG_SHIFT 22
+#define S2CR_PRIVCFG_SHIFT 24
+#define S2CR_INSTCFG_SHIFT 26
+#define S2CR_TRANSIENTCFG_SHIFT 28
+#define S2CR_VMID_SHIFT 0
+#define S2CR_BSU_SHIFT 24
+#define S2CR_FB_SHIFT 26
+
+/* Stream Match Register: SMR */
+#define SMR_ID_SHIFT 0
+#define SMR_MASK_SHIFT 16
+#define SMR_VALID_SHIFT 31
+
+/* Global TLB Status: TLBGSTATUS */
+#define TLBGSTATUS_GSACTIVE_SHIFT 0
+
+/* Invalidate Hyp TLB by VA: TLBIVAH */
+#define TLBIVAH_ADDR_SHIFT 12
+
+/* Invalidate TLB by VMID: TLBIVMID */
+#define TLBIVMID_VMID_SHIFT 0
+
+/* Context Bank Attribute Register: CBAR */
+#define CBAR_VMID_SHIFT 0
+#define CBAR_CBNDX_SHIFT 8
+#define CBAR_BPSHCFG_SHIFT 8
+#define CBAR_HYPC_SHIFT 10
+#define CBAR_FB_SHIFT 11
+#define CBAR_MEMATTR_SHIFT 12
+#define CBAR_TYPE_SHIFT 16
+#define CBAR_BSU_SHIFT 18
+#define CBAR_RACFG_SHIFT 20
+#define CBAR_WACFG_SHIFT 22
+#define CBAR_IRPTNDX_SHIFT 24
+
+/* Context Bank Fault Restricted Syndrome Register A: CBFRSYNRA */
+#define CBFRSYNRA_SID_SHIFT 0
+
+/* Implementation defined register space shift */
+#define MICRO_MMU_CTRL_RESERVED_SHIFT 0x00
+#define MICRO_MMU_CTRL_HALT_REQ_SHIFT 0x02
+#define MICRO_MMU_CTRL_IDLE_SHIFT 0x03
+
+/* Stage 1 Context Bank Format Shifts */
+/* Auxiliary Control Register: CB_ACTLR */
+#define CB_ACTLR_REQPRIORITY_SHIFT 0
+#define CB_ACTLR_REQPRIORITYCFG_SHIFT 4
+#define CB_ACTLR_PRIVCFG_SHIFT 8
+#define CB_ACTLR_BPRCOSH_SHIFT 28
+#define CB_ACTLR_BPRCISH_SHIFT 29
+#define CB_ACTLR_BPRCNSH_SHIFT 30
+
+/* Address Translation, Stage 1, Privileged Read: CB_ATS1PR */
+#define CB_ATS1PR_ADDR_SHIFT 12
+
+/* Address Translation, Stage 1, Privileged Write: CB_ATS1PW */
+#define CB_ATS1PW_ADDR_SHIFT 12
+
+/* Address Translation, Stage 1, User Read: CB_ATS1UR */
+#define CB_ATS1UR_ADDR_SHIFT 12
+
+/* Address Translation, Stage 1, User Write: CB_ATS1UW */
+#define CB_ATS1UW_ADDR_SHIFT 12
+
+/* Address Translation Status Register: CB_ATSR */
+#define CB_ATSR_ACTIVE_SHIFT 0
+
+/* Context ID Register: CB_CONTEXTIDR */
+#define CB_CONTEXTIDR_ASID_SHIFT 0
+#define CB_CONTEXTIDR_PROCID_SHIFT 8
+
+/* Fault Address Register: CB_FAR */
+#define CB_FAR_FADDR_SHIFT 0
+
+/* Fault Status Register: CB_FSR */
+#define CB_FSR_TF_SHIFT 1
+#define CB_FSR_AFF_SHIFT 2
+#define CB_FSR_PF_SHIFT 3
+#define CB_FSR_EF_SHIFT 4
+#define CB_FSR_TLBMCF_SHIFT 5
+#define CB_FSR_TLBLKF_SHIFT 6
+#define CB_FSR_SS_SHIFT 30
+#define CB_FSR_MULTI_SHIFT 31
+
+/* Fault Syndrome Register 0: CB_FSYNR0 */
+#define CB_FSYNR0_PLVL_SHIFT 0
+#define CB_FSYNR0_S1PTWF_SHIFT 3
+#define CB_FSYNR0_WNR_SHIFT 4
+#define CB_FSYNR0_PNU_SHIFT 5
+#define CB_FSYNR0_IND_SHIFT 6
+#define CB_FSYNR0_NSSTATE_SHIFT 7
+#define CB_FSYNR0_NSATTR_SHIFT 8
+#define CB_FSYNR0_ATOF_SHIFT 9
+#define CB_FSYNR0_PTWF_SHIFT 10
+#define CB_FSYNR0_AFR_SHIFT 11
+#define CB_FSYNR0_S1CBNDX_SHIFT 16
+
+/* Normal Memory Remap Register: CB_NMRR */
+#define CB_NMRR_IR0_SHIFT 0
+#define CB_NMRR_IR1_SHIFT 2
+#define CB_NMRR_IR2_SHIFT 4
+#define CB_NMRR_IR3_SHIFT 6
+#define CB_NMRR_IR4_SHIFT 8
+#define CB_NMRR_IR5_SHIFT 10
+#define CB_NMRR_IR6_SHIFT 12
+#define CB_NMRR_IR7_SHIFT 14
+#define CB_NMRR_OR0_SHIFT 16
+#define CB_NMRR_OR1_SHIFT 18
+#define CB_NMRR_OR2_SHIFT 20
+#define CB_NMRR_OR3_SHIFT 22
+#define CB_NMRR_OR4_SHIFT 24
+#define CB_NMRR_OR5_SHIFT 26
+#define CB_NMRR_OR6_SHIFT 28
+#define CB_NMRR_OR7_SHIFT 30
+
+/* Physical Address Register: CB_PAR */
+#define CB_PAR_F_SHIFT 0
+#define CB_PAR_SS_SHIFT 1
+#define CB_PAR_OUTER_SHIFT 2
+#define CB_PAR_INNER_SHIFT 4
+#define CB_PAR_SH_SHIFT 7
+#define CB_PAR_NS_SHIFT 9
+#define CB_PAR_NOS_SHIFT 10
+#define CB_PAR_PA_SHIFT 12
+#define CB_PAR_TF_SHIFT 1
+#define CB_PAR_AFF_SHIFT 2
+#define CB_PAR_PF_SHIFT 3
+#define CB_PAR_EF_SHIFT 4
+#define CB_PAR_TLBMCF_SHIFT 5
+#define CB_PAR_TLBLKF_SHIFT 6
+#define CB_PAR_ATOT_SHIFT 31
+#define CB_PAR_PLVL_SHIFT 32
+#define CB_PAR_STAGE_SHIFT 35
+
+/* Primary Region Remap Register: CB_PRRR */
+#define CB_PRRR_TR0_SHIFT 0
+#define CB_PRRR_TR1_SHIFT 2
+#define CB_PRRR_TR2_SHIFT 4
+#define CB_PRRR_TR3_SHIFT 6
+#define CB_PRRR_TR4_SHIFT 8
+#define CB_PRRR_TR5_SHIFT 10
+#define CB_PRRR_TR6_SHIFT 12
+#define CB_PRRR_TR7_SHIFT 14
+#define CB_PRRR_DS0_SHIFT 16
+#define CB_PRRR_DS1_SHIFT 17
+#define CB_PRRR_NS0_SHIFT 18
+#define CB_PRRR_NS1_SHIFT 19
+#define CB_PRRR_NOS0_SHIFT 24
+#define CB_PRRR_NOS1_SHIFT 25
+#define CB_PRRR_NOS2_SHIFT 26
+#define CB_PRRR_NOS3_SHIFT 27
+#define CB_PRRR_NOS4_SHIFT 28
+#define CB_PRRR_NOS5_SHIFT 29
+#define CB_PRRR_NOS6_SHIFT 30
+#define CB_PRRR_NOS7_SHIFT 31
+
+/* Transaction Resume: CB_RESUME */
+#define CB_RESUME_TNR_SHIFT 0
+
+/* System Control Register: CB_SCTLR */
+#define CB_SCTLR_M_SHIFT 0
+#define CB_SCTLR_TRE_SHIFT 1
+#define CB_SCTLR_AFE_SHIFT 2
+#define CB_SCTLR_AFFD_SHIFT 3
+#define CB_SCTLR_E_SHIFT 4
+#define CB_SCTLR_CFRE_SHIFT 5
+#define CB_SCTLR_CFIE_SHIFT 6
+#define CB_SCTLR_CFCFG_SHIFT 7
+#define CB_SCTLR_HUPCF_SHIFT 8
+#define CB_SCTLR_WXN_SHIFT 9
+#define CB_SCTLR_UWXN_SHIFT 10
+#define CB_SCTLR_ASIDPNE_SHIFT 12
+#define CB_SCTLR_TRANSIENTCFG_SHIFT 14
+#define CB_SCTLR_MEMATTR_SHIFT 16
+#define CB_SCTLR_MTCFG_SHIFT 20
+#define CB_SCTLR_SHCFG_SHIFT 22
+#define CB_SCTLR_RACFG_SHIFT 24
+#define CB_SCTLR_WACFG_SHIFT 26
+#define CB_SCTLR_NSCFG_SHIFT 28
+
+/* Invalidate TLB by ASID: CB_TLBIASID */
+#define CB_TLBIASID_ASID_SHIFT 0
+
+/* Invalidate TLB by VA: CB_TLBIVA */
+#define CB_TLBIVA_ASID_SHIFT 0
+#define CB_TLBIVA_VA_SHIFT 12
+
+/* Invalidate TLB by VA, All ASID: CB_TLBIVAA */
+#define CB_TLBIVAA_VA_SHIFT 12
+
+/* Invalidate TLB by VA, All ASID, Last Level: CB_TLBIVAAL */
+#define CB_TLBIVAAL_VA_SHIFT 12
+
+/* Invalidate TLB by VA, Last Level: CB_TLBIVAL */
+#define CB_TLBIVAL_ASID_SHIFT 0
+#define CB_TLBIVAL_VA_SHIFT 12
+
+/* TLB Status: CB_TLBSTATUS */
+#define CB_TLBSTATUS_SACTIVE_SHIFT 0
+
+/* Translation Table Base Control Register: CB_TTBCR */
+#define CB_TTBCR_T0SZ_SHIFT 0
+#define CB_TTBCR_T1SZ_SHIFT 16
+#define CB_TTBCR_EPD0_SHIFT 4
+#define CB_TTBCR_EPD1_SHIFT 5
+#define CB_TTBCR_NSCFG0_SHIFT 14
+#define CB_TTBCR_NSCFG1_SHIFT 30
+#define CB_TTBCR_EAE_SHIFT 31
+#define CB_TTBCR_IRGN0_SHIFT 8
+#define CB_TTBCR_IRGN1_SHIFT 24
+#define CB_TTBCR_ORGN0_SHIFT 10
+#define CB_TTBCR_ORGN1_SHIFT 26
+#define CB_TTBCR_A1_SHIFT 22
+#define CB_TTBCR_SH0_SHIFT 12
+#define CB_TTBCR_SH1_SHIFT 28
+
+/* Translation Table Base Register 0/1: CB_TTBR */
+#ifdef CONFIG_IOMMU_LPAE
+#define CB_TTBR0_ADDR_SHIFT 5
+#define CB_TTBR0_ASID_SHIFT 48
+#define CB_TTBR1_ASID_SHIFT 48
+#else
+#define CB_TTBR0_IRGN1_SHIFT 0
+#define CB_TTBR0_S_SHIFT 1
+#define CB_TTBR0_RGN_SHIFT 3
+#define CB_TTBR0_NOS_SHIFT 5
+#define CB_TTBR0_IRGN0_SHIFT 6
+#define CB_TTBR0_ADDR_SHIFT 14
+
+#define CB_TTBR1_IRGN1_SHIFT 0
+#define CB_TTBR1_S_SHIFT 1
+#define CB_TTBR1_RGN_SHIFT 3
+#define CB_TTBR1_NOS_SHIFT 5
+#define CB_TTBR1_IRGN0_SHIFT 6
+#define CB_TTBR1_ADDR_SHIFT 14
+#endif
+
+#endif
diff --git a/drivers/iommu/qcom/msm_iommu_pagetable.c b/drivers/iommu/qcom/msm_iommu_pagetable.c
new file mode 100644
index 000000000000..1f11abb9db7b
--- /dev/null
+++ b/drivers/iommu/qcom/msm_iommu_pagetable.c
@@ -0,0 +1,645 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/iommu.h>
+#include <linux/scatterlist.h>
+
+#include <asm/cacheflush.h>
+
+#include <linux/qcom_iommu.h>
+#include "msm_iommu_priv.h"
+#include <trace/events/kmem.h>
+#include "msm_iommu_pagetable.h"
+
+#define NUM_FL_PTE 4096
+#define NUM_SL_PTE 256
+#define GUARD_PTE 2
+#define NUM_TEX_CLASS 8
+
+/* First-level page table bits */
+#define FL_BASE_MASK 0xFFFFFC00
+#define FL_TYPE_TABLE (1 << 0)
+#define FL_TYPE_SECT (2 << 0)
+#define FL_SUPERSECTION (1 << 18)
+#define FL_AP0 (1 << 10)
+#define FL_AP1 (1 << 11)
+#define FL_AP2 (1 << 15)
+#define FL_SHARED (1 << 16)
+#define FL_BUFFERABLE (1 << 2)
+#define FL_CACHEABLE (1 << 3)
+#define FL_TEX0 (1 << 12)
+#define FL_OFFSET(va) (((va) & 0xFFF00000) >> 20)
+#define FL_NG (1 << 17)
+
+/* Second-level page table bits */
+#define SL_BASE_MASK_LARGE 0xFFFF0000
+#define SL_BASE_MASK_SMALL 0xFFFFF000
+#define SL_TYPE_LARGE (1 << 0)
+#define SL_TYPE_SMALL (2 << 0)
+#define SL_AP0 (1 << 4)
+#define SL_AP1 (2 << 4)
+#define SL_AP2 (1 << 9)
+#define SL_SHARED (1 << 10)
+#define SL_BUFFERABLE (1 << 2)
+#define SL_CACHEABLE (1 << 3)
+#define SL_TEX0 (1 << 6)
+#define SL_OFFSET(va) (((va) & 0xFF000) >> 12)
+#define SL_NG (1 << 11)
+
+/* Memory type and cache policy attributes */
+#define MT_SO 0
+#define MT_DEV 1
+#define MT_IOMMU_NORMAL 2
+#define CP_NONCACHED 0
+#define CP_WB_WA 1
+#define CP_WT 2
+#define CP_WB_NWA 3
+
+/* Sharability attributes of MSM IOMMU mappings */
+#define MSM_IOMMU_ATTR_NON_SH 0x0
+#define MSM_IOMMU_ATTR_SH 0x4
+
+/* Cacheability attributes of MSM IOMMU mappings */
+#define MSM_IOMMU_ATTR_NONCACHED 0x0
+#define MSM_IOMMU_ATTR_CACHED_WB_WA 0x1
+#define MSM_IOMMU_ATTR_CACHED_WB_NWA 0x2
+#define MSM_IOMMU_ATTR_CACHED_WT 0x3
+
+static int msm_iommu_tex_class[4];
+
+/* TEX Remap Registers */
+#define NMRR_ICP(nmrr, n) (((nmrr) & (3 << ((n) * 2))) >> ((n) * 2))
+#define NMRR_OCP(nmrr, n) (((nmrr) & (3 << ((n) * 2 + 16))) >> ((n) * 2 + 16))
+
+#define PRRR_NOS(prrr, n) ((prrr) & (1 << ((n) + 24)) ? 1 : 0)
+#define PRRR_MT(prrr, n) ((((prrr) & (3 << ((n) * 2))) >> ((n) * 2)))
+
+static inline void clean_pte(u32 *start, u32 *end, int redirect)
+{
+ if (!redirect)
+ dmac_flush_range(start, end);
+}
+
+int msm_iommu_pagetable_alloc(struct msm_iommu_pt *pt)
+{
+ pt->fl_table = (u32 *)__get_free_pages(GFP_KERNEL, get_order(SZ_16K));
+ if (!pt->fl_table)
+ return -ENOMEM;
+
+ pt->fl_table_shadow = (u32 *)__get_free_pages(GFP_KERNEL,
+ get_order(SZ_16K));
+ if (!pt->fl_table_shadow) {
+ free_pages((unsigned long)pt->fl_table, get_order(SZ_16K));
+ return -ENOMEM;
+ }
+
+ memset(pt->fl_table, 0, SZ_16K);
+ memset(pt->fl_table_shadow, 0, SZ_16K);
+ clean_pte(pt->fl_table, pt->fl_table + NUM_FL_PTE, pt->redirect);
+
+ return 0;
+}
+
+void msm_iommu_pagetable_free(struct msm_iommu_pt *pt)
+{
+ u32 *fl_table;
+ u32 *fl_table_shadow;
+ int i;
+
+ fl_table = pt->fl_table;
+ fl_table_shadow = pt->fl_table_shadow;
+ for (i = 0; i < NUM_FL_PTE; i++)
+ if ((fl_table[i] & 0x03) == FL_TYPE_TABLE)
+ free_page((unsigned long) __va(((fl_table[i]) &
+ FL_BASE_MASK)));
+ free_pages((unsigned long)fl_table, get_order(SZ_16K));
+ pt->fl_table = 0;
+
+ free_pages((unsigned long)fl_table_shadow, get_order(SZ_16K));
+ pt->fl_table_shadow = 0;
+}
+
+void msm_iommu_pagetable_free_tables(struct msm_iommu_pt *pt, unsigned long va,
+ size_t len)
+{
+ /*
+ * Adding 2 for worst case. We could be spanning 3 second level pages
+ * if we unmapped just over 1MB.
+ */
+ u32 n_entries = len / SZ_1M + 2;
+ u32 fl_offset = FL_OFFSET(va);
+ u32 i;
+
+ for (i = 0; i < n_entries && fl_offset < NUM_FL_PTE; ++i) {
+ u32 *fl_pte_shadow = pt->fl_table_shadow + fl_offset;
+ void *sl_table_va = __va(((*fl_pte_shadow) & ~0x1FF));
+ u32 sl_table = *fl_pte_shadow;
+
+ if (sl_table && !(sl_table & 0x1FF)) {
+ free_pages((unsigned long) sl_table_va,
+ get_order(SZ_4K));
+ *fl_pte_shadow = 0;
+ }
+ ++fl_offset;
+ }
+}
+
+static int __get_pgprot(int prot, int len)
+{
+ unsigned int pgprot;
+ int tex;
+
+ if (!(prot & (IOMMU_READ | IOMMU_WRITE))) {
+ prot |= IOMMU_READ | IOMMU_WRITE;
+ WARN_ONCE(1, "No attributes in iommu mapping; assuming RW\n");
+ }
+
+ if ((prot & IOMMU_WRITE) && !(prot & IOMMU_READ)) {
+ prot |= IOMMU_READ;
+ WARN_ONCE(1, "Write-only unsupported; falling back to RW\n");
+ }
+
+ if (prot & IOMMU_CACHE)
+ tex = (pgprot_val(PAGE_KERNEL) >> 2) & 0x07;
+ else
+ tex = msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED];
+
+ if (tex < 0 || tex > NUM_TEX_CLASS - 1)
+ return 0;
+
+ if (len == SZ_16M || len == SZ_1M) {
+ pgprot = FL_SHARED;
+ pgprot |= tex & 0x01 ? FL_BUFFERABLE : 0;
+ pgprot |= tex & 0x02 ? FL_CACHEABLE : 0;
+ pgprot |= tex & 0x04 ? FL_TEX0 : 0;
+ pgprot |= prot & IOMMU_PRIV ? FL_AP0 :
+ (FL_AP0 | FL_AP1);
+ pgprot |= prot & IOMMU_WRITE ? 0 : FL_AP2;
+ } else {
+ pgprot = SL_SHARED;
+ pgprot |= tex & 0x01 ? SL_BUFFERABLE : 0;
+ pgprot |= tex & 0x02 ? SL_CACHEABLE : 0;
+ pgprot |= tex & 0x04 ? SL_TEX0 : 0;
+ pgprot |= prot & IOMMU_PRIV ? SL_AP0 :
+ (SL_AP0 | SL_AP1);
+ pgprot |= prot & IOMMU_WRITE ? 0 : SL_AP2;
+ }
+
+ return pgprot;
+}
+
+static u32 *make_second_level(struct msm_iommu_pt *pt, u32 *fl_pte,
+ u32 *fl_pte_shadow)
+{
+ u32 *sl;
+ sl = (u32 *) __get_free_pages(GFP_KERNEL,
+ get_order(SZ_4K));
+
+ if (!sl) {
+ pr_debug("Could not allocate second level table\n");
+ goto fail;
+ }
+ memset(sl, 0, SZ_4K);
+ clean_pte(sl, sl + NUM_SL_PTE + GUARD_PTE, pt->redirect);
+
+ *fl_pte = ((((int)__pa(sl)) & FL_BASE_MASK) | \
+ FL_TYPE_TABLE);
+ *fl_pte_shadow = *fl_pte & ~0x1FF;
+
+ clean_pte(fl_pte, fl_pte + 1, pt->redirect);
+fail:
+ return sl;
+}
+
+static int sl_4k(u32 *sl_pte, phys_addr_t pa, unsigned int pgprot)
+{
+ int ret = 0;
+
+ if (*sl_pte) {
+ ret = -EBUSY;
+ goto fail;
+ }
+
+ *sl_pte = (pa & SL_BASE_MASK_SMALL) | SL_NG | SL_SHARED
+ | SL_TYPE_SMALL | pgprot;
+fail:
+ return ret;
+}
+
+static int sl_64k(u32 *sl_pte, phys_addr_t pa, unsigned int pgprot)
+{
+ int ret = 0;
+
+ int i;
+
+ for (i = 0; i < 16; i++)
+ if (*(sl_pte+i)) {
+ ret = -EBUSY;
+ goto fail;
+ }
+
+ for (i = 0; i < 16; i++)
+ *(sl_pte+i) = (pa & SL_BASE_MASK_LARGE) | SL_NG
+ | SL_SHARED | SL_TYPE_LARGE | pgprot;
+
+fail:
+ return ret;
+}
+
+static inline int fl_1m(u32 *fl_pte, phys_addr_t pa, int pgprot)
+{
+ if (*fl_pte)
+ return -EBUSY;
+
+ *fl_pte = (pa & 0xFFF00000) | FL_NG | FL_TYPE_SECT | FL_SHARED
+ | pgprot;
+
+ return 0;
+}
+
+static inline int fl_16m(u32 *fl_pte, phys_addr_t pa, int pgprot)
+{
+ int i;
+ int ret = 0;
+ for (i = 0; i < 16; i++)
+ if (*(fl_pte+i)) {
+ ret = -EBUSY;
+ goto fail;
+ }
+ for (i = 0; i < 16; i++)
+ *(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION
+ | FL_TYPE_SECT | FL_SHARED | FL_NG | pgprot;
+fail:
+ return ret;
+}
+
+int msm_iommu_pagetable_map(struct msm_iommu_pt *pt, unsigned long va,
+ phys_addr_t pa, size_t len, int prot)
+{
+ int ret;
+ struct scatterlist sg;
+
+ if (len != SZ_16M && len != SZ_1M &&
+ len != SZ_64K && len != SZ_4K) {
+ pr_debug("Bad size: %zd\n", len);
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ sg_init_table(&sg, 1);
+ sg_dma_address(&sg) = pa;
+ sg.length = len;
+
+ ret = msm_iommu_pagetable_map_range(pt, va, &sg, len, prot);
+
+fail:
+ return ret;
+}
+
+size_t msm_iommu_pagetable_unmap(struct msm_iommu_pt *pt, unsigned long va,
+ size_t len)
+{
+ msm_iommu_pagetable_unmap_range(pt, va, len);
+ return len;
+}
+
+static phys_addr_t get_phys_addr(struct scatterlist *sg)
+{
+ /*
+ * Try sg_dma_address first so that we can
+ * map carveout regions that do not have a
+ * struct page associated with them.
+ */
+ phys_addr_t pa = sg_dma_address(sg);
+ if (pa == 0)
+ pa = sg_phys(sg);
+ return pa;
+}
+
+/*
+ * For debugging we may want to force mappings to be 4K only
+ */
+#ifdef CONFIG_IOMMU_FORCE_4K_MAPPINGS
+static inline int is_fully_aligned(unsigned int va, phys_addr_t pa, size_t len,
+ int align)
+{
+ if (align == SZ_4K) {
+ return IS_ALIGNED(va, align) && IS_ALIGNED(pa, align)
+ && (len >= align);
+ } else {
+ return 0;
+ }
+}
+#else
+static inline int is_fully_aligned(unsigned int va, phys_addr_t pa, size_t len,
+ int align)
+{
+ return IS_ALIGNED(va, align) && IS_ALIGNED(pa, align)
+ && (len >= align);
+}
+#endif
+
+int msm_iommu_pagetable_map_range(struct msm_iommu_pt *pt, unsigned int va,
+ struct scatterlist *sg, unsigned int len, int prot)
+{
+ phys_addr_t pa;
+ unsigned int start_va = va;
+ unsigned int offset = 0;
+ u32 *fl_pte;
+ u32 *fl_pte_shadow;
+ u32 fl_offset;
+ u32 *sl_table = NULL;
+ u32 sl_offset, sl_start;
+ unsigned int chunk_size, chunk_offset = 0;
+ int ret = 0;
+ unsigned int pgprot4k, pgprot64k, pgprot1m, pgprot16m;
+
+ BUG_ON(len & (SZ_4K - 1));
+
+ pgprot4k = __get_pgprot(prot, SZ_4K);
+ pgprot64k = __get_pgprot(prot, SZ_64K);
+ pgprot1m = __get_pgprot(prot, SZ_1M);
+ pgprot16m = __get_pgprot(prot, SZ_16M);
+ if (!pgprot4k || !pgprot64k || !pgprot1m || !pgprot16m) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ fl_offset = FL_OFFSET(va); /* Upper 12 bits */
+ fl_pte = pt->fl_table + fl_offset; /* int pointers, 4 bytes */
+ fl_pte_shadow = pt->fl_table_shadow + fl_offset;
+ pa = get_phys_addr(sg);
+
+ while (offset < len) {
+ chunk_size = SZ_4K;
+
+ if (is_fully_aligned(va, pa, sg->length - chunk_offset,
+ SZ_16M))
+ chunk_size = SZ_16M;
+ else if (is_fully_aligned(va, pa, sg->length - chunk_offset,
+ SZ_1M))
+ chunk_size = SZ_1M;
+ /* 64k or 4k determined later */
+
+// trace_iommu_map_range(va, pa, sg->length, chunk_size);
+
+ /* for 1M and 16M, only first level entries are required */
+ if (chunk_size >= SZ_1M) {
+ if (chunk_size == SZ_16M) {
+ ret = fl_16m(fl_pte, pa, pgprot16m);
+ if (ret)
+ goto fail;
+ clean_pte(fl_pte, fl_pte + 16, pt->redirect);
+ fl_pte += 16;
+ fl_pte_shadow += 16;
+ } else if (chunk_size == SZ_1M) {
+ ret = fl_1m(fl_pte, pa, pgprot1m);
+ if (ret)
+ goto fail;
+ clean_pte(fl_pte, fl_pte + 1, pt->redirect);
+ fl_pte++;
+ fl_pte_shadow++;
+ }
+
+ offset += chunk_size;
+ chunk_offset += chunk_size;
+ va += chunk_size;
+ pa += chunk_size;
+
+ if (chunk_offset >= sg->length && offset < len) {
+ chunk_offset = 0;
+ sg = sg_next(sg);
+ pa = get_phys_addr(sg);
+ }
+ continue;
+ }
+ /* for 4K or 64K, make sure there is a second level table */
+ if (*fl_pte == 0) {
+ if (!make_second_level(pt, fl_pte, fl_pte_shadow)) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ }
+ if (!(*fl_pte & FL_TYPE_TABLE)) {
+ ret = -EBUSY;
+ goto fail;
+ }
+ sl_table = __va(((*fl_pte) & FL_BASE_MASK));
+ sl_offset = SL_OFFSET(va);
+ /* Keep track of initial position so we
+ * don't clean more than we have to
+ */
+ sl_start = sl_offset;
+
+ /* Build the 2nd level page table */
+ while (offset < len && sl_offset < NUM_SL_PTE) {
+ /* Map a large 64K page if the chunk is large enough and
+ * the pa and va are aligned
+ */
+
+ if (is_fully_aligned(va, pa, sg->length - chunk_offset,
+ SZ_64K))
+ chunk_size = SZ_64K;
+ else
+ chunk_size = SZ_4K;
+
+// trace_iommu_map_range(va, pa, sg->length,
+// chunk_size);
+
+ if (chunk_size == SZ_4K) {
+ sl_4k(&sl_table[sl_offset], pa, pgprot4k);
+ sl_offset++;
+ /* Increment map count */
+ (*fl_pte_shadow)++;
+ } else {
+ BUG_ON(sl_offset + 16 > NUM_SL_PTE);
+ sl_64k(&sl_table[sl_offset], pa, pgprot64k);
+ sl_offset += 16;
+ /* Increment map count */
+ *fl_pte_shadow += 16;
+ }
+
+ offset += chunk_size;
+ chunk_offset += chunk_size;
+ va += chunk_size;
+ pa += chunk_size;
+
+ if (chunk_offset >= sg->length && offset < len) {
+ chunk_offset = 0;
+ sg = sg_next(sg);
+ pa = get_phys_addr(sg);
+ }
+ }
+
+ clean_pte(sl_table + sl_start, sl_table + sl_offset,
+ pt->redirect);
+ fl_pte++;
+ fl_pte_shadow++;
+ sl_offset = 0;
+ }
+
+fail:
+ if (ret && offset > 0)
+ msm_iommu_pagetable_unmap_range(pt, start_va, offset);
+
+ return ret;
+}
+
+void msm_iommu_pagetable_unmap_range(struct msm_iommu_pt *pt, unsigned int va,
+ unsigned int len)
+{
+ unsigned int offset = 0;
+ u32 *fl_pte;
+ u32 *fl_pte_shadow;
+ u32 fl_offset;
+ u32 *sl_table;
+ u32 sl_start, sl_end;
+ int used;
+
+ BUG_ON(len & (SZ_4K - 1));
+
+ fl_offset = FL_OFFSET(va); /* Upper 12 bits */
+ fl_pte = pt->fl_table + fl_offset; /* int pointers, 4 bytes */
+ fl_pte_shadow = pt->fl_table_shadow + fl_offset;
+
+ while (offset < len) {
+ if (*fl_pte & FL_TYPE_TABLE) {
+ unsigned int n_entries;
+
+ sl_start = SL_OFFSET(va);
+ sl_table = __va(((*fl_pte) & FL_BASE_MASK));
+ sl_end = ((len - offset) / SZ_4K) + sl_start;
+
+ if (sl_end > NUM_SL_PTE)
+ sl_end = NUM_SL_PTE;
+ n_entries = sl_end - sl_start;
+
+ memset(sl_table + sl_start, 0, n_entries * 4);
+ clean_pte(sl_table + sl_start, sl_table + sl_end,
+ pt->redirect);
+
+ offset += n_entries * SZ_4K;
+ va += n_entries * SZ_4K;
+
+ BUG_ON((*fl_pte_shadow & 0x1FF) < n_entries);
+
+ /* Decrement map count */
+ *fl_pte_shadow -= n_entries;
+ used = *fl_pte_shadow & 0x1FF;
+
+ if (!used) {
+ *fl_pte = 0;
+ clean_pte(fl_pte, fl_pte + 1, pt->redirect);
+ }
+
+ sl_start = 0;
+ } else {
+ *fl_pte = 0;
+ *fl_pte_shadow = 0;
+
+ clean_pte(fl_pte, fl_pte + 1, pt->redirect);
+ va += SZ_1M;
+ offset += SZ_1M;
+ sl_start = 0;
+ }
+ fl_pte++;
+ fl_pte_shadow++;
+ }
+}
+
+phys_addr_t msm_iommu_iova_to_phys_soft(struct iommu_domain *domain,
+ phys_addr_t va)
+{
+ struct msm_iommu_priv *priv = to_msm_priv(domain);
+ struct msm_iommu_pt *pt = &priv->pt;
+ u32 *fl_pte;
+ u32 fl_offset;
+ u32 *sl_table = NULL;
+ u32 sl_offset;
+ u32 *sl_pte;
+
+ if (!pt->fl_table) {
+ pr_err("Page table doesn't exist\n");
+ return 0;
+ }
+
+ fl_offset = FL_OFFSET(va);
+ fl_pte = pt->fl_table + fl_offset;
+
+ if (*fl_pte & FL_TYPE_TABLE) {
+ sl_table = __va(((*fl_pte) & FL_BASE_MASK));
+ sl_offset = SL_OFFSET(va);
+ sl_pte = sl_table + sl_offset;
+ /* 64 KB section */
+ if (*sl_pte & SL_TYPE_LARGE)
+ return (*sl_pte & 0xFFFF0000) | (va & ~0xFFFF0000);
+ /* 4 KB section */
+ if (*sl_pte & SL_TYPE_SMALL)
+ return (*sl_pte & 0xFFFFF000) | (va & ~0xFFFFF000);
+ } else {
+ /* 16 MB section */
+ if (*fl_pte & FL_SUPERSECTION)
+ return (*fl_pte & 0xFF000000) | (va & ~0xFF000000);
+ /* 1 MB section */
+ if (*fl_pte & FL_TYPE_SECT)
+ return (*fl_pte & 0xFFF00000) | (va & ~0xFFF00000);
+ }
+ return 0;
+}
+
+static int __init get_tex_class(int icp, int ocp, int mt, int nos)
+{
+ int i = 0;
+ unsigned int prrr;
+ unsigned int nmrr;
+ int c_icp, c_ocp, c_mt, c_nos;
+
+ prrr = msm_iommu_get_prrr();
+ nmrr = msm_iommu_get_nmrr();
+
+ for (i = 0; i < NUM_TEX_CLASS; i++) {
+ c_nos = PRRR_NOS(prrr, i);
+ c_mt = PRRR_MT(prrr, i);
+ c_icp = NMRR_ICP(nmrr, i);
+ c_ocp = NMRR_OCP(nmrr, i);
+
+ if (icp == c_icp && ocp == c_ocp && c_mt == mt && c_nos == nos)
+ return i;
+ }
+
+ return -ENODEV;
+}
+
+static void __init setup_iommu_tex_classes(void)
+{
+ msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED] =
+ get_tex_class(CP_NONCACHED, CP_NONCACHED,
+ MT_IOMMU_NORMAL, 1);
+
+ msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_WA] =
+ get_tex_class(CP_WB_WA, CP_WB_WA, MT_IOMMU_NORMAL, 1);
+
+ msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_NWA] =
+ get_tex_class(CP_WB_NWA, CP_WB_NWA, MT_IOMMU_NORMAL, 1);
+
+ msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WT] =
+ get_tex_class(CP_WT, CP_WT, MT_IOMMU_NORMAL, 1);
+}
+
+void __init msm_iommu_pagetable_init(void)
+{
+ setup_iommu_tex_classes();
+}
diff --git a/drivers/iommu/qcom/msm_iommu_pagetable.h b/drivers/iommu/qcom/msm_iommu_pagetable.h
new file mode 100644
index 000000000000..12a8d274f95e
--- /dev/null
+++ b/drivers/iommu/qcom/msm_iommu_pagetable.h
@@ -0,0 +1,33 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_IOMMU_PAGETABLE_H
+#define __ARCH_ARM_MACH_MSM_IOMMU_PAGETABLE_H
+
+struct msm_iommu_pt;
+
+void msm_iommu_pagetable_init(void);
+int msm_iommu_pagetable_alloc(struct msm_iommu_pt *pt);
+void msm_iommu_pagetable_free(struct msm_iommu_pt *pt);
+int msm_iommu_pagetable_map(struct msm_iommu_pt *pt, unsigned long va,
+ phys_addr_t pa, size_t len, int prot);
+size_t msm_iommu_pagetable_unmap(struct msm_iommu_pt *pt, unsigned long va,
+ size_t len);
+int msm_iommu_pagetable_map_range(struct msm_iommu_pt *pt, unsigned int va,
+ struct scatterlist *sg, unsigned int len, int prot);
+void msm_iommu_pagetable_unmap_range(struct msm_iommu_pt *pt, unsigned int va,
+ unsigned int len);
+phys_addr_t msm_iommu_iova_to_phys_soft(struct iommu_domain *domain,
+ phys_addr_t va);
+void msm_iommu_pagetable_free_tables(struct msm_iommu_pt *pt, unsigned long va,
+ size_t len);
+#endif
diff --git a/drivers/iommu/qcom/msm_iommu_perfmon.h b/drivers/iommu/qcom/msm_iommu_perfmon.h
new file mode 100644
index 000000000000..45683f4ebd88
--- /dev/null
+++ b/drivers/iommu/qcom/msm_iommu_perfmon.h
@@ -0,0 +1,233 @@
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/irqreturn.h>
+
+#ifndef MSM_IOMMU_PERFMON_H
+#define MSM_IOMMU_PERFMON_H
+
+/**
+ * struct iommu_pmon_counter - container for a performance counter.
+ * @counter_no: counter number within the group
+ * @absolute_counter_no: counter number within IOMMU PMU
+ * @value: cached counter value
+ * @overflow_count: no of times counter has overflowed
+ * @enabled: indicates whether counter is enabled or not
+ * @current_event_class: current selected event class, -1 if none
+ * @counter_dir: debugfs directory for this counter
+ * @cnt_group: group this counter belongs to
+ */
+struct iommu_pmon_counter {
+ unsigned int counter_no;
+ unsigned int absolute_counter_no;
+ unsigned long value;
+ unsigned long overflow_count;
+ unsigned int enabled;
+ int current_event_class;
+ struct dentry *counter_dir;
+ struct iommu_pmon_cnt_group *cnt_group;
+};
+
+/**
+ * struct iommu_pmon_cnt_group - container for a perf mon counter group.
+ * @grp_no: group number
+ * @num_counters: number of counters in this group
+ * @counters: list of counter in this group
+ * @group_dir: debugfs directory for this group
+ * @pmon: pointer to the iommu_pmon object this group belongs to
+ */
+struct iommu_pmon_cnt_group {
+ unsigned int grp_no;
+ unsigned int num_counters;
+ struct iommu_pmon_counter *counters;
+ struct dentry *group_dir;
+ struct iommu_pmon *pmon;
+};
+
+/**
+ * struct iommu_info - container for a perf mon iommu info.
+ * @iommu_name: name of the iommu from device tree
+ * @base: virtual base address for this iommu
+ * @evt_irq: irq number for event overflow interrupt
+ * @iommu_dev: pointer to iommu device
+ * @ops: iommu access operations pointer.
+ * @hw_ops: iommu pm hw access operations pointer.
+ * @always_on: 1 if iommu is always on, 0 otherwise.
+ */
+struct iommu_info {
+ const char *iommu_name;
+ void *base;
+ int evt_irq;
+ struct device *iommu_dev;
+ struct iommu_access_ops *ops;
+ struct iommu_pm_hw_ops *hw_ops;
+ unsigned int always_on;
+};
+
+/**
+ * struct iommu_pmon - main container for a perf mon data.
+ * @iommu_dir: debugfs directory for this iommu
+ * @iommu: iommu_info instance
+ * @iommu_list: iommu_list head
+ * @cnt_grp: list of counter groups
+ * @num_groups: number of counter groups
+ * @num_counters: number of counters per group
+ * @event_cls_supported: an array of event classes supported for this PMU
+ * @nevent_cls_supported: number of event classes supported.
+ * @enabled: Indicates whether perf. mon is enabled or not
+ * @iommu_attached Indicates whether iommu is attached or not.
+ * @lock: mutex used to synchronize access to shared data
+ */
+struct iommu_pmon {
+ struct dentry *iommu_dir;
+ struct iommu_info iommu;
+ struct list_head iommu_list;
+ struct iommu_pmon_cnt_group *cnt_grp;
+ u32 num_groups;
+ u32 num_counters;
+ u32 *event_cls_supported;
+ u32 nevent_cls_supported;
+ unsigned int enabled;
+ unsigned int iommu_attach_count;
+ struct mutex lock;
+};
+
+/**
+ * struct iommu_hw_ops - Callbacks for accessing IOMMU HW
+ * @initialize_hw: Call to do any initialization before enabling ovf interrupts
+ * @is_hw_access_ok: Returns 1 if we can access HW, 0 otherwise
+ * @grp_enable: Call to enable a counter group
+ * @grp_disable: Call to disable a counter group
+ * @enable_pm: Call to enable PM
+ * @disable_pm: Call to disable PM
+ * @reset_counters: Call to reset counters
+ * @check_for_overflow: Call to check for overflow
+ * @evt_ovfl_int_handler: Overflow interrupt handler callback
+ * @counter_enable: Call to enable counters
+ * @counter_disable: Call to disable counters
+ * @ovfl_int_enable: Call to enable overflow interrupts
+ * @ovfl_int_disable: Call to disable overflow interrupts
+ * @set_event_class: Call to set event class
+ * @read_counter: Call to read a counter value
+ */
+struct iommu_pm_hw_ops {
+ void (*initialize_hw)(const struct iommu_pmon *);
+ unsigned int (*is_hw_access_OK)(const struct iommu_pmon *);
+ void (*grp_enable)(struct iommu_info *, unsigned int);
+ void (*grp_disable)(struct iommu_info *, unsigned int);
+ void (*enable_pm)(struct iommu_info *);
+ void (*disable_pm)(struct iommu_info *);
+ void (*reset_counters)(const struct iommu_info *);
+ void (*check_for_overflow)(struct iommu_pmon *);
+ irqreturn_t (*evt_ovfl_int_handler)(int, void *);
+ void (*counter_enable)(struct iommu_info *,
+ struct iommu_pmon_counter *);
+ void (*counter_disable)(struct iommu_info *,
+ struct iommu_pmon_counter *);
+ void (*ovfl_int_enable)(struct iommu_info *,
+ const struct iommu_pmon_counter *);
+ void (*ovfl_int_disable)(struct iommu_info *,
+ const struct iommu_pmon_counter *);
+ void (*set_event_class)(struct iommu_pmon *pmon, unsigned int,
+ unsigned int);
+ unsigned int (*read_counter)(struct iommu_pmon_counter *);
+};
+
+#define MSM_IOMMU_PMU_NO_EVENT_CLASS -1
+
+#ifdef CONFIG_MSM_IOMMU_PMON
+
+/**
+ * Get pointer to PMU hardware access functions for IOMMUv0 PMU
+ */
+struct iommu_pm_hw_ops *iommu_pm_get_hw_ops_v0(void);
+
+/**
+ * Get pointer to PMU hardware access functions for IOMMUv1 PMU
+ */
+struct iommu_pm_hw_ops *iommu_pm_get_hw_ops_v1(void);
+
+/**
+ * Allocate memory for performance monitor structure. Must
+ * be called before iommu_pm_iommu_register
+ */
+struct iommu_pmon *msm_iommu_pm_alloc(struct device *iommu_dev);
+
+/**
+ * Free memory previously allocated with iommu_pm_alloc
+ */
+void msm_iommu_pm_free(struct device *iommu_dev);
+
+/**
+ * Register iommu with the performance monitor module.
+ */
+int msm_iommu_pm_iommu_register(struct iommu_pmon *info);
+
+/**
+ * Unregister iommu with the performance monitor module.
+ */
+void msm_iommu_pm_iommu_unregister(struct device *dev);
+
+/**
+ * Called by iommu driver when attaching is complete
+ * Must NOT be called with IOMMU mutexes held.
+ * @param iommu_dev IOMMU device that is attached
+ */
+void msm_iommu_attached(struct device *dev);
+
+/**
+ * Called by iommu driver before detaching.
+ * Must NOT be called with IOMMU mutexes held.
+ * @param iommu_dev IOMMU device that is going to be detached
+ */
+void msm_iommu_detached(struct device *dev);
+#else
+static inline struct iommu_pm_hw_ops *iommu_pm_get_hw_ops_v0(void)
+{
+ return NULL;
+}
+
+static inline struct iommu_pm_hw_ops *iommu_pm_get_hw_ops_v1(void)
+{
+ return NULL;
+}
+
+static inline struct iommu_pmon *msm_iommu_pm_alloc(struct device *iommu_dev)
+{
+ return NULL;
+}
+
+static inline void msm_iommu_pm_free(struct device *iommu_dev)
+{
+ return;
+}
+
+static inline int msm_iommu_pm_iommu_register(struct iommu_pmon *info)
+{
+ return -EIO;
+}
+
+static inline void msm_iommu_pm_iommu_unregister(struct device *dev)
+{
+}
+
+static inline void msm_iommu_attached(struct device *dev)
+{
+}
+
+static inline void msm_iommu_detached(struct device *dev)
+{
+}
+#endif
+#endif
diff --git a/drivers/iommu/qcom/msm_iommu_priv.h b/drivers/iommu/qcom/msm_iommu_priv.h
new file mode 100644
index 000000000000..4de0d7ef19e6
--- /dev/null
+++ b/drivers/iommu/qcom/msm_iommu_priv.h
@@ -0,0 +1,71 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_IOMMU_PRIV_H
+#define MSM_IOMMU_PRIV_H
+
+/**
+ * struct msm_iommu_pt - Container for first level page table and its
+ * attributes.
+ * fl_table: Pointer to the first level page table.
+ * redirect: Set to 1 if L2 redirect for page tables are enabled, 0 otherwise.
+ * unaligned_fl_table: Original address of memory for the page table.
+ * fl_table is manually aligned (as per spec) but we need the original address
+ * to free the table.
+ * fl_table_shadow: This is "copy" of the fl_table with some differences.
+ * It stores the same information as fl_table except that instead of storing
+ * second level page table address + page table entry descriptor bits it
+ * stores the second level page table address and the number of used second
+ * level page tables entries. This is used to check whether we need to free
+ * the second level page table which allows us to also free the second level
+ * page table after doing a TLB invalidate which should catch bugs with
+ * clients trying to unmap an address that is being used.
+ * fl_table_shadow will use the lower 9 bits for the use count and the upper
+ * bits for the second level page table address.
+ * sl_table_shadow uses the same concept as fl_table_shadow but for LPAE 2nd
+ * level page tables.
+ */
+#ifdef CONFIG_IOMMU_LPAE
+struct msm_iommu_pt {
+ u64 *fl_table;
+ u64 **sl_table_shadow;
+ int redirect;
+ u64 *unaligned_fl_table;
+};
+#else
+struct msm_iommu_pt {
+ u32 *fl_table;
+ int redirect;
+ u32 *fl_table_shadow;
+};
+#endif
+/**
+ * struct msm_iommu_priv - Container for page table attributes and other
+ * private iommu domain information.
+ * attributes.
+ * pt: Page table attribute structure
+ * list_attached: List of devices (contexts) attached to this domain.
+ * client_name: Name of the domain client.
+ */
+struct msm_iommu_priv {
+ struct msm_iommu_pt pt;
+ struct list_head list_attached;
+ struct iommu_domain domain;
+ const char *client_name;
+};
+
+static inline struct msm_iommu_priv *to_msm_priv(struct iommu_domain *dom)
+{
+ return container_of(dom, struct msm_iommu_priv, domain);
+}
+
+#endif
diff --git a/drivers/iommu/qcom/msm_iommu_sec.c b/drivers/iommu/qcom/msm_iommu_sec.c
new file mode 100644
index 000000000000..33a1e988ae1e
--- /dev/null
+++ b/drivers/iommu/qcom/msm_iommu_sec.c
@@ -0,0 +1,876 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/iommu.h>
+#include <linux/clk.h>
+#include <linux/scatterlist.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/kmemleak.h>
+#include <linux/dma-mapping.h>
+#include <linux/qcom_scm.h>
+
+#include <asm/cacheflush.h>
+#include <asm/sizes.h>
+
+#include "msm_iommu_perfmon.h"
+#include "msm_iommu_hw-v1.h"
+#include "msm_iommu_priv.h"
+#include <linux/qcom_iommu.h>
+#include <trace/events/kmem.h>
+
+/* bitmap of the page sizes currently supported */
+#define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
+
+/* commands for SCM_SVC_MP */
+#define IOMMU_SECURE_CFG 2
+#define IOMMU_SECURE_PTBL_SIZE 3
+#define IOMMU_SECURE_PTBL_INIT 4
+#define IOMMU_SET_CP_POOL_SIZE 5
+#define IOMMU_SECURE_MAP 6
+#define IOMMU_SECURE_UNMAP 7
+#define IOMMU_SECURE_MAP2 0x0B
+#define IOMMU_SECURE_MAP2_FLAT 0x12
+#define IOMMU_SECURE_UNMAP2 0x0C
+#define IOMMU_SECURE_UNMAP2_FLAT 0x13
+#define IOMMU_TLBINVAL_FLAG 0x00000001
+
+/* commands for SCM_SVC_UTIL */
+#define IOMMU_DUMP_SMMU_FAULT_REGS 0X0C
+#define MAXIMUM_VIRT_SIZE (300*SZ_1M)
+
+
+#define MAKE_VERSION(major, minor, patch) \
+ (((major & 0x3FF) << 22) | ((minor & 0x3FF) << 12) | (patch & 0xFFF))
+
+
+static struct iommu_access_ops *iommu_access_ops;
+static int is_secure;
+
+static const struct of_device_id msm_smmu_list[] = {
+ { .compatible = "qcom,msm-smmu-v1", },
+ { .compatible = "qcom,msm-smmu-v2", },
+ { }
+};
+
+struct msm_scm_paddr_list {
+ unsigned int list;
+ unsigned int list_size;
+ unsigned int size;
+};
+
+struct msm_scm_mapping_info {
+ unsigned int id;
+ unsigned int ctx_id;
+ unsigned int va;
+ unsigned int size;
+};
+
+struct msm_scm_map2_req {
+ struct msm_scm_paddr_list plist;
+ struct msm_scm_mapping_info info;
+ unsigned int flags;
+};
+
+struct msm_scm_unmap2_req {
+ struct msm_scm_mapping_info info;
+ unsigned int flags;
+};
+
+struct msm_cp_pool_size {
+ uint32_t size;
+ uint32_t spare;
+};
+
+#define NUM_DUMP_REGS 14
+/*
+ * some space to allow the number of registers returned by the secure
+ * environment to grow
+ */
+#define WIGGLE_ROOM (NUM_DUMP_REGS * 2)
+/* Each entry is a (reg_addr, reg_val) pair, hence the * 2 */
+#define SEC_DUMP_SIZE ((NUM_DUMP_REGS * 2) + WIGGLE_ROOM)
+
+struct msm_scm_fault_regs_dump {
+ uint32_t dump_size;
+ uint32_t dump_data[SEC_DUMP_SIZE];
+} __aligned(PAGE_SIZE);
+
+void msm_iommu_sec_set_access_ops(struct iommu_access_ops *access_ops)
+{
+ iommu_access_ops = access_ops;
+}
+
+static int msm_iommu_dump_fault_regs(int smmu_id, int cb_num,
+ struct msm_scm_fault_regs_dump *regs)
+{
+ int ret;
+
+ dmac_clean_range(regs, regs + 1);
+
+ ret = qcom_scm_iommu_dump_fault_regs(smmu_id, cb_num,
+ virt_to_phys(regs), sizeof(*regs));
+
+ dmac_inv_range(regs, regs + 1);
+
+ return ret;
+}
+
+static int msm_iommu_reg_dump_to_regs(
+ struct msm_iommu_context_reg ctx_regs[],
+ struct msm_scm_fault_regs_dump *dump, struct msm_iommu_drvdata *drvdata,
+ struct msm_iommu_ctx_drvdata *ctx_drvdata)
+{
+ int i, j, ret = 0;
+ const uint32_t nvals = (dump->dump_size / sizeof(uint32_t));
+ uint32_t *it = (uint32_t *) dump->dump_data;
+ const uint32_t * const end = ((uint32_t *) dump) + nvals;
+ phys_addr_t phys_base = drvdata->phys_base;
+ int ctx = ctx_drvdata->num;
+
+ if (!nvals)
+ return -EINVAL;
+
+ for (i = 1; it < end; it += 2, i += 2) {
+ unsigned int reg_offset;
+ uint32_t addr = *it;
+ uint32_t val = *(it + 1);
+ struct msm_iommu_context_reg *reg = NULL;
+ if (addr < phys_base) {
+ pr_err("Bogus-looking register (0x%x) for Iommu with base at %pa. Skipping.\n",
+ addr, &phys_base);
+ continue;
+ }
+ reg_offset = addr - phys_base;
+
+ for (j = 0; j < MAX_DUMP_REGS; ++j) {
+ struct dump_regs_tbl_entry dump_reg = dump_regs_tbl[j];
+ void *test_reg;
+ unsigned int test_offset;
+ switch (dump_reg.dump_reg_type) {
+ case DRT_CTX_REG:
+ test_reg = CTX_REG(dump_reg.reg_offset,
+ drvdata->cb_base, ctx);
+ break;
+ case DRT_GLOBAL_REG:
+ test_reg = GLB_REG(
+ dump_reg.reg_offset, drvdata->glb_base);
+ break;
+ case DRT_GLOBAL_REG_N:
+ test_reg = GLB_REG_N(
+ drvdata->glb_base, ctx,
+ dump_reg.reg_offset);
+ break;
+ default:
+ pr_err("Unknown dump_reg_type: 0x%x\n",
+ dump_reg.dump_reg_type);
+ BUG();
+ break;
+ }
+ test_offset = test_reg - drvdata->glb_base;
+ if (test_offset == reg_offset) {
+ reg = &ctx_regs[j];
+ break;
+ }
+ }
+
+ if (reg == NULL) {
+ pr_debug("Unknown register in secure CB dump: %x\n",
+ addr);
+ continue;
+ }
+
+ if (reg->valid) {
+ WARN(1, "Invalid (repeated?) register in CB dump: %x\n",
+ addr);
+ continue;
+ }
+
+ reg->val = val;
+ reg->valid = true;
+ }
+
+ if (i != nvals) {
+ pr_err("Invalid dump! %d != %d\n", i, nvals);
+ ret = 1;
+ }
+
+ for (i = 0; i < MAX_DUMP_REGS; ++i) {
+ if (!ctx_regs[i].valid) {
+ if (dump_regs_tbl[i].must_be_present) {
+ pr_err("Register missing from dump for ctx %d: %s, 0x%x\n",
+ ctx,
+ dump_regs_tbl[i].name,
+ dump_regs_tbl[i].reg_offset);
+ ret = 1;
+ }
+ ctx_regs[i].val = 0xd00dfeed;
+ }
+ }
+
+ return ret;
+}
+
+irqreturn_t msm_iommu_secure_fault_handler_v2(int irq, void *dev_id)
+{
+ struct platform_device *pdev = dev_id;
+ struct msm_iommu_drvdata *drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ struct msm_scm_fault_regs_dump *regs;
+ int tmp, ret = IRQ_HANDLED;
+
+ iommu_access_ops->iommu_lock_acquire(0);
+
+ BUG_ON(!pdev);
+
+ drvdata = dev_get_drvdata(pdev->dev.parent);
+ BUG_ON(!drvdata);
+
+ ctx_drvdata = dev_get_drvdata(&pdev->dev);
+ BUG_ON(!ctx_drvdata);
+
+ regs = kzalloc(sizeof(*regs), GFP_KERNEL);
+ if (!regs) {
+ pr_err("%s: Couldn't allocate memory\n", __func__);
+ goto lock_release;
+ }
+
+ if (!drvdata->ctx_attach_count) {
+ pr_err("Unexpected IOMMU page fault from secure context bank!\n");
+ pr_err("name = %s\n", drvdata->name);
+ pr_err("Power is OFF. Unable to read page fault information\n");
+ /*
+ * We cannot determine which context bank caused the issue so
+ * we just return handled here to ensure IRQ handler code is
+ * happy
+ */
+ goto free_regs;
+ }
+
+ iommu_access_ops->iommu_clk_on(drvdata);
+ tmp = msm_iommu_dump_fault_regs(drvdata->sec_id,
+ ctx_drvdata->num, regs);
+ iommu_access_ops->iommu_clk_off(drvdata);
+
+ if (tmp) {
+ pr_err("%s: Couldn't dump fault registers (%d) %s, ctx: %d\n",
+ __func__, tmp, drvdata->name, ctx_drvdata->num);
+ goto free_regs;
+ } else {
+ struct msm_iommu_context_reg ctx_regs[MAX_DUMP_REGS];
+ memset(ctx_regs, 0, sizeof(ctx_regs));
+ tmp = msm_iommu_reg_dump_to_regs(
+ ctx_regs, regs, drvdata, ctx_drvdata);
+ if (tmp < 0) {
+ ret = IRQ_NONE;
+ pr_err("Incorrect response from secure environment\n");
+ goto free_regs;
+ }
+
+ if (ctx_regs[DUMP_REG_FSR].val) {
+ if (tmp)
+ pr_err("Incomplete fault register dump. Printout will be incomplete.\n");
+ if (!ctx_drvdata->attached_domain) {
+ pr_err("Bad domain in interrupt handler\n");
+ tmp = -ENOSYS;
+ } else {
+ tmp = report_iommu_fault(
+ ctx_drvdata->attached_domain,
+ &ctx_drvdata->pdev->dev,
+ COMBINE_DUMP_REG(
+ ctx_regs[DUMP_REG_FAR1].val,
+ ctx_regs[DUMP_REG_FAR0].val),
+ 0);
+ }
+
+ /* if the fault wasn't handled by someone else: */
+ if (tmp == -ENOSYS) {
+ pr_err("Unexpected IOMMU page fault from secure context bank!\n");
+ pr_err("name = %s\n", drvdata->name);
+ pr_err("context = %s (%d)\n", ctx_drvdata->name,
+ ctx_drvdata->num);
+ pr_err("Interesting registers:\n");
+ print_ctx_regs(ctx_regs);
+ }
+ } else {
+ ret = IRQ_NONE;
+ }
+ }
+free_regs:
+ kfree(regs);
+lock_release:
+ iommu_access_ops->iommu_lock_release(0);
+ return ret;
+}
+
+#define SCM_SVC_MP 0xc
+
+static int msm_iommu_sec_ptbl_init(void)
+{
+ struct device_node *np;
+ int psize[2] = {0, 0};
+ unsigned int spare = 0;
+ int ret;
+ int version;
+ /* Use a dummy device for dma_alloc_attrs allocation */
+ struct device dev = { 0 };
+ void *cpu_addr;
+ dma_addr_t paddr;
+ DEFINE_DMA_ATTRS(attrs);
+
+ for_each_matching_node(np, msm_smmu_list)
+ if (of_find_property(np, "qcom,iommu-secure-id", NULL) &&
+ of_device_is_available(np))
+ break;
+
+ if (!np)
+ return 0;
+
+ of_node_put(np);
+
+ version = qcom_scm_get_feat_version(SCM_SVC_MP);
+
+ if (version >= MAKE_VERSION(1, 1, 1)) {
+ ret = qcom_scm_iommu_set_cp_pool_size(MAXIMUM_VIRT_SIZE, 0);
+ if (ret) {
+ pr_err("scm call IOMMU_SET_CP_POOL_SIZE failed\n");
+ goto fail;
+ }
+ }
+
+ ret = qcom_scm_iommu_secure_ptbl_size(spare, psize);
+ if (ret) {
+ pr_err("scm call IOMMU_SECURE_PTBL_SIZE failed\n");
+ goto fail;
+ }
+
+ pr_err("iommu sec: psize[0]: %d, psize[1]: %d\n", psize[0], psize[1]);
+
+ if (psize[1]) {
+ pr_err("scm call IOMMU_SECURE_PTBL_SIZE failed\n");
+ goto fail;
+ }
+
+ dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
+ dev.coherent_dma_mask = DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
+
+ cpu_addr = dma_alloc_attrs(&dev, psize[0], &paddr, GFP_KERNEL, &attrs);
+ if (!cpu_addr) {
+ pr_err("%s: Failed to allocate %d bytes for PTBL\n",
+ __func__, psize[0]);
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ ret = qcom_scm_iommu_secure_ptbl_init(paddr, psize[0], spare);
+
+ if (ret) {
+ pr_err("scm call IOMMU_SECURE_PTBL_INIT failed (%d)\n", ret);
+ goto fail_mem;
+ }
+
+ return 0;
+
+fail_mem:
+ dma_free_attrs(&dev, psize[0], cpu_addr, paddr, &attrs);
+fail:
+ return ret;
+}
+
+int msm_iommu_sec_program_iommu(struct msm_iommu_drvdata *drvdata,
+ struct msm_iommu_ctx_drvdata *ctx_drvdata)
+{
+ if (drvdata->smmu_local_base) {
+ writel_relaxed(0xFFFFFFFF,
+ drvdata->smmu_local_base + SMMU_INTR_SEL_NS);
+ mb();
+ }
+
+ return qcom_scm_restore_sec_cfg(drvdata->sec_id, ctx_drvdata->num);
+}
+
+static int msm_iommu_sec_map2(struct msm_scm_map2_req *map)
+{
+ u32 flags;
+
+#ifdef CONFIG_MSM_IOMMU_TLBINVAL_ON_MAP
+ flags = IOMMU_TLBINVAL_FLAG;
+#else
+ flags = 0;
+#endif
+
+ return qcom_scm_iommu_secure_map(map->plist.list,
+ map->plist.list_size,
+ map->plist.size,
+ map->info.id,
+ map->info.ctx_id,
+ map->info.va,
+ map->info.size,
+ flags);
+}
+
+static int msm_iommu_sec_ptbl_map(struct msm_iommu_drvdata *iommu_drvdata,
+ struct msm_iommu_ctx_drvdata *ctx_drvdata,
+ unsigned long va, phys_addr_t pa, size_t len)
+{
+ struct msm_scm_map2_req map;
+ void *flush_va, *flush_va_end;
+ int ret = 0;
+
+ if (!IS_ALIGNED(va, SZ_1M) || !IS_ALIGNED(len, SZ_1M) ||
+ !IS_ALIGNED(pa, SZ_1M))
+ return -EINVAL;
+ map.plist.list = virt_to_phys(&pa);
+ map.plist.list_size = 1;
+ map.plist.size = len;
+ map.info.id = iommu_drvdata->sec_id;
+ map.info.ctx_id = ctx_drvdata->num;
+ map.info.va = va;
+ map.info.size = len;
+
+ flush_va = &pa;
+ flush_va_end = (void *)
+ (((unsigned long) flush_va) + sizeof(phys_addr_t));
+
+ /*
+ * Ensure that the buffer is in RAM by the time it gets to TZ
+ */
+ dmac_clean_range(flush_va, flush_va_end);
+
+ ret = msm_iommu_sec_map2(&map);
+ if (ret)
+ return -EINVAL;
+
+ return 0;
+}
+
+static unsigned int get_phys_addr(struct scatterlist *sg)
+{
+ /*
+ * Try sg_dma_address first so that we can
+ * map carveout regions that do not have a
+ * struct page associated with them.
+ */
+ unsigned int pa = sg_dma_address(sg);
+ if (pa == 0)
+ pa = sg_phys(sg);
+ return pa;
+}
+
+static int msm_iommu_sec_ptbl_map_range(struct msm_iommu_drvdata *iommu_drvdata,
+ struct msm_iommu_ctx_drvdata *ctx_drvdata,
+ unsigned long va, struct scatterlist *sg, size_t len)
+{
+ struct scatterlist *sgiter;
+ struct msm_scm_map2_req map;
+ unsigned int *pa_list = 0;
+ unsigned int pa, cnt;
+ void *flush_va, *flush_va_end;
+ unsigned int offset = 0, chunk_offset = 0;
+ int ret;
+
+ if (!IS_ALIGNED(va, SZ_1M) || !IS_ALIGNED(len, SZ_1M))
+ return -EINVAL;
+
+ map.info.id = iommu_drvdata->sec_id;
+ map.info.ctx_id = ctx_drvdata->num;
+ map.info.va = va;
+ map.info.size = len;
+
+ if (sg->length == len) {
+ /*
+ * physical address for secure mapping needs
+ * to be 1MB aligned
+ */
+ pa = get_phys_addr(sg);
+ if (!IS_ALIGNED(pa, SZ_1M))
+ return -EINVAL;
+ map.plist.list = virt_to_phys(&pa);
+ map.plist.list_size = 1;
+ map.plist.size = len;
+ flush_va = &pa;
+ } else {
+ sgiter = sg;
+ if (!IS_ALIGNED(sgiter->length, SZ_1M))
+ return -EINVAL;
+ cnt = sg->length / SZ_1M;
+ while ((sgiter = sg_next(sgiter))) {
+ if (!IS_ALIGNED(sgiter->length, SZ_1M))
+ return -EINVAL;
+ cnt += sgiter->length / SZ_1M;
+ }
+
+ pa_list = kmalloc(cnt * sizeof(*pa_list), GFP_KERNEL);
+ if (!pa_list)
+ return -ENOMEM;
+
+ sgiter = sg;
+ cnt = 0;
+ pa = get_phys_addr(sgiter);
+ if (!IS_ALIGNED(pa, SZ_1M)) {
+ kfree(pa_list);
+ return -EINVAL;
+ }
+ while (offset < len) {
+ pa += chunk_offset;
+ pa_list[cnt] = pa;
+ chunk_offset += SZ_1M;
+ offset += SZ_1M;
+ cnt++;
+
+ if (chunk_offset >= sgiter->length && offset < len) {
+ chunk_offset = 0;
+ sgiter = sg_next(sgiter);
+ pa = get_phys_addr(sgiter);
+ }
+ }
+
+ map.plist.list = virt_to_phys(pa_list);
+ map.plist.list_size = cnt;
+ map.plist.size = SZ_1M;
+ flush_va = pa_list;
+ }
+
+ /*
+ * Ensure that the buffer is in RAM by the time it gets to TZ
+ */
+ flush_va_end = (void *) (((unsigned long) flush_va) +
+ (map.plist.list_size * sizeof(*pa_list)));
+ dmac_clean_range(flush_va, flush_va_end);
+
+ ret = msm_iommu_sec_map2(&map);
+ kfree(pa_list);
+
+ return ret;
+}
+
+static int msm_iommu_sec_ptbl_unmap(struct msm_iommu_drvdata *iommu_drvdata,
+ struct msm_iommu_ctx_drvdata *ctx_drvdata,
+ unsigned long va, size_t len)
+{
+ if (!IS_ALIGNED(va, SZ_1M) || !IS_ALIGNED(len, SZ_1M))
+ return -EINVAL;
+
+ return qcom_scm_iommu_secure_unmap(iommu_drvdata->sec_id,
+ ctx_drvdata->num,
+ va,
+ len,
+ IOMMU_TLBINVAL_FLAG);
+}
+
+static struct iommu_domain * msm_iommu_domain_alloc(unsigned type)
+{
+ struct msm_iommu_priv *priv;
+ struct iommu_domain *domain;
+
+ if (type != IOMMU_DOMAIN_UNMANAGED)
+ return NULL;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return NULL;
+
+ INIT_LIST_HEAD(&priv->list_attached);
+ domain = &priv->domain;
+ return domain;
+}
+
+static void msm_iommu_domain_free(struct iommu_domain *domain)
+{
+ struct msm_iommu_priv *priv;
+
+ iommu_access_ops->iommu_lock_acquire(0);
+ priv = to_msm_priv(domain);
+
+ kfree(priv);
+ iommu_access_ops->iommu_lock_release(0);
+}
+
+static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
+{
+ struct msm_iommu_priv *priv;
+ struct msm_iommu_drvdata *iommu_drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ struct msm_iommu_ctx_drvdata *tmp_drvdata;
+ int ret = 0;
+
+ iommu_access_ops->iommu_lock_acquire(0);
+
+ priv = to_msm_priv(domain);
+ if (!priv || !dev) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ iommu_drvdata = dev_get_drvdata(dev->parent);
+ ctx_drvdata = dev_get_drvdata(dev);
+ if (!iommu_drvdata || !ctx_drvdata) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ if (!list_empty(&ctx_drvdata->attached_elm)) {
+ ret = -EBUSY;
+ goto fail;
+ }
+
+ list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm)
+ if (tmp_drvdata == ctx_drvdata) {
+ ret = -EBUSY;
+ goto fail;
+ }
+
+ ret = iommu_access_ops->iommu_power_on(iommu_drvdata);
+ if (ret)
+ goto fail;
+
+ /* We can only do this once */
+ if (!iommu_drvdata->ctx_attach_count) {
+ ret = iommu_access_ops->iommu_clk_on(iommu_drvdata);
+ if (ret) {
+ iommu_access_ops->iommu_power_off(iommu_drvdata);
+ goto fail;
+ }
+
+ ret = msm_iommu_sec_program_iommu(iommu_drvdata,
+ ctx_drvdata);
+
+ /* bfb settings are always programmed by HLOS */
+ program_iommu_bfb_settings(iommu_drvdata->base,
+ iommu_drvdata->bfb_settings);
+
+ iommu_access_ops->iommu_clk_off(iommu_drvdata);
+ if (ret) {
+ iommu_access_ops->iommu_power_off(iommu_drvdata);
+ goto fail;
+ }
+ }
+
+ list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
+ ctx_drvdata->attached_domain = domain;
+ ++iommu_drvdata->ctx_attach_count;
+
+ iommu_access_ops->iommu_lock_release(0);
+
+ msm_iommu_attached(dev->parent);
+ return ret;
+fail:
+ iommu_access_ops->iommu_lock_release(0);
+ return ret;
+}
+
+static void msm_iommu_detach_dev(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct msm_iommu_drvdata *iommu_drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+
+ if (!dev)
+ return;
+
+ msm_iommu_detached(dev->parent);
+
+ iommu_access_ops->iommu_lock_acquire(0);
+
+ iommu_drvdata = dev_get_drvdata(dev->parent);
+ ctx_drvdata = dev_get_drvdata(dev);
+ if (!iommu_drvdata || !ctx_drvdata || !ctx_drvdata->attached_domain)
+ goto fail;
+
+ list_del_init(&ctx_drvdata->attached_elm);
+ ctx_drvdata->attached_domain = NULL;
+
+ iommu_access_ops->iommu_power_off(iommu_drvdata);
+ BUG_ON(iommu_drvdata->ctx_attach_count == 0);
+ --iommu_drvdata->ctx_attach_count;
+fail:
+ iommu_access_ops->iommu_lock_release(0);
+}
+
+static int get_drvdata(struct iommu_domain *domain,
+ struct msm_iommu_drvdata **iommu_drvdata,
+ struct msm_iommu_ctx_drvdata **ctx_drvdata)
+{
+ struct msm_iommu_priv *priv = to_msm_priv(domain);
+ struct msm_iommu_ctx_drvdata *ctx;
+
+ list_for_each_entry(ctx, &priv->list_attached, attached_elm) {
+ if (ctx->attached_domain == domain)
+ break;
+ }
+
+ if (ctx->attached_domain != domain)
+ return -EINVAL;
+
+ *ctx_drvdata = ctx;
+ *iommu_drvdata = dev_get_drvdata(ctx->pdev->dev.parent);
+ return 0;
+}
+
+static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
+ phys_addr_t pa, size_t len, int prot)
+{
+ struct msm_iommu_drvdata *iommu_drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ int ret = 0;
+
+ iommu_access_ops->iommu_lock_acquire(0);
+
+ ret = get_drvdata(domain, &iommu_drvdata, &ctx_drvdata);
+ if (ret)
+ goto fail;
+
+ iommu_access_ops->iommu_clk_on(iommu_drvdata);
+ ret = msm_iommu_sec_ptbl_map(iommu_drvdata, ctx_drvdata,
+ va, pa, len);
+ iommu_access_ops->iommu_clk_off(iommu_drvdata);
+fail:
+ iommu_access_ops->iommu_lock_release(0);
+ return ret;
+}
+
+static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
+ size_t len)
+{
+ struct msm_iommu_drvdata *iommu_drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ int ret = -ENODEV;
+
+ iommu_access_ops->iommu_lock_acquire(0);
+
+ ret = get_drvdata(domain, &iommu_drvdata, &ctx_drvdata);
+ if (ret)
+ goto fail;
+
+ iommu_access_ops->iommu_clk_on(iommu_drvdata);
+ ret = msm_iommu_sec_ptbl_unmap(iommu_drvdata, ctx_drvdata,
+ va, len);
+ iommu_access_ops->iommu_clk_off(iommu_drvdata);
+fail:
+ iommu_access_ops->iommu_lock_release(0);
+
+ /* the IOMMU API requires us to return how many bytes were unmapped */
+ len = ret ? 0 : len;
+ return len;
+}
+
+static int msm_iommu_map_range(struct iommu_domain *domain, unsigned int va,
+ struct scatterlist *sg, unsigned int len,
+ int prot)
+{
+ int ret;
+ struct msm_iommu_drvdata *iommu_drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+
+ iommu_access_ops->iommu_lock_acquire(0);
+
+ ret = get_drvdata(domain, &iommu_drvdata, &ctx_drvdata);
+ if (ret)
+ goto fail;
+ iommu_access_ops->iommu_clk_on(iommu_drvdata);
+ ret = msm_iommu_sec_ptbl_map_range(iommu_drvdata, ctx_drvdata,
+ va, sg, len);
+ iommu_access_ops->iommu_clk_off(iommu_drvdata);
+fail:
+ iommu_access_ops->iommu_lock_release(0);
+ return ret;
+}
+
+
+static int msm_iommu_unmap_range(struct iommu_domain *domain, unsigned int va,
+ unsigned int len)
+{
+ struct msm_iommu_drvdata *iommu_drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ int ret = -EINVAL;
+
+ if (!IS_ALIGNED(va, SZ_1M) || !IS_ALIGNED(len, SZ_1M))
+ return -EINVAL;
+
+ iommu_access_ops->iommu_lock_acquire(0);
+
+ ret = get_drvdata(domain, &iommu_drvdata, &ctx_drvdata);
+ if (ret)
+ goto fail;
+
+ iommu_access_ops->iommu_clk_on(iommu_drvdata);
+ ret = msm_iommu_sec_ptbl_unmap(iommu_drvdata, ctx_drvdata, va, len);
+ iommu_access_ops->iommu_clk_off(iommu_drvdata);
+
+fail:
+ iommu_access_ops->iommu_lock_release(0);
+ return ret ? ret : 0;
+}
+
+static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
+ phys_addr_t va)
+{
+ return 0;
+}
+
+void msm_iommu_check_scm_call_avail(void)
+{
+ is_secure = qcom_scm_is_call_available(SCM_SVC_MP, IOMMU_SECURE_CFG);
+}
+
+int msm_iommu_get_scm_call_avail(void)
+{
+ return is_secure;
+}
+
+static struct iommu_ops msm_iommu_ops = {
+ .domain_alloc = msm_iommu_domain_alloc,
+ .domain_free = msm_iommu_domain_free,
+ .attach_dev = msm_iommu_attach_dev,
+ .detach_dev = msm_iommu_detach_dev,
+ .map = msm_iommu_map,
+ .unmap = msm_iommu_unmap,
+/* .map_range = msm_iommu_map_range,*/
+ .map_sg = default_iommu_map_sg,
+/* .unmap_range = msm_iommu_unmap_range,*/
+ .iova_to_phys = msm_iommu_iova_to_phys,
+ .pgsize_bitmap = MSM_IOMMU_PGSIZES,
+};
+
+static int __init msm_iommu_sec_init(void)
+{
+ int ret;
+
+ ret = bus_register(&msm_iommu_sec_bus_type);
+ if (ret)
+ return ret;
+
+ ret = bus_set_iommu(&msm_iommu_sec_bus_type, &msm_iommu_ops);
+ if (ret) {
+ bus_unregister(&msm_iommu_sec_bus_type);
+ return ret;
+ }
+
+ ret = msm_iommu_sec_ptbl_init();
+
+ return ret;
+}
+
+subsys_initcall(msm_iommu_sec_init);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM SMMU Secure Driver");
diff --git a/drivers/of/address.c b/drivers/of/address.c
index 384574c3987c..dbfab80f4d36 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -924,8 +924,8 @@ EXPORT_SYMBOL(of_io_request_and_map);
* CPU addr (phys_addr_t) : pna cells
* size : nsize cells
*
- * It returns -ENODEV if "dma-ranges" property was not found
- * for this device in DT.
+ * Return 0 on success, -ENODEV if the "dma-ranges" property was not found for
+ * this device in DT, or -EINVAL if the CPU address or size is invalid.
*/
int of_dma_get_range(struct device_node *np, u64 *dma_addr, u64 *paddr, u64 *size)
{
@@ -986,6 +986,22 @@ int of_dma_get_range(struct device_node *np, u64 *dma_addr, u64 *paddr, u64 *siz
*dma_addr = dmaaddr;
*size = of_read_number(ranges + naddr + pna, nsize);
+ /*
+ * DT nodes sometimes incorrectly set the size as a mask. Work around
+ * those incorrect DT by computing the size as mask + 1.
+ */
+ if (*size & 1) {
+ pr_warn("%s: size 0x%llx for dma-range in node(%s) set as mask\n",
+ __func__, *size, np->full_name);
+ *size = *size + 1;
+ }
+
+ if (!*size) {
+ pr_err("%s: invalid size zero for dma-range in node(%s)\n",
+ __func__, np->full_name);
+ ret = -EINVAL;
+ goto out;
+ }
pr_debug("dma_addr(%llx) cpu_addr(%llx) size(%llx)\n",
*dma_addr, *paddr, *size);
diff --git a/drivers/of/device.c b/drivers/of/device.c
index 8b91ea241b10..c4a2869c2cb0 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -70,7 +70,7 @@ int of_device_add(struct platform_device *ofdev)
}
/**
- * of_dma_configure - Setup DMA configuration
+ * of_dma_configure - Setup DMA masks and offset
* @dev: Device to apply DMA configuration
* @np: Pointer to OF node having DMA configuration
*
@@ -81,13 +81,11 @@ int of_device_add(struct platform_device *ofdev)
* can use a platform bus notifier and handle BUS_NOTIFY_ADD_DEVICE events
* to fix up DMA configuration.
*/
-void of_dma_configure(struct device *dev, struct device_node *np)
+void of_dma_configure_masks(struct device *dev, struct device_node *np)
{
- u64 dma_addr, paddr, size;
- int ret;
- bool coherent;
+ u64 dma_addr, paddr, size, range_mask;
unsigned long offset;
- struct iommu_ops *iommu;
+ int ret;
/*
* Set default coherent_dma_mask to 32 bit. Drivers are expected to
@@ -105,25 +103,11 @@ void of_dma_configure(struct device *dev, struct device_node *np)
ret = of_dma_get_range(np, &dma_addr, &paddr, &size);
if (ret < 0) {
- dma_addr = offset = 0;
- size = dev->coherent_dma_mask + 1;
+ range_mask = dev->coherent_dma_mask + 1;
+ offset = 0;
} else {
+ range_mask = DMA_BIT_MASK(ilog2(dma_addr + size));
offset = PFN_DOWN(paddr - dma_addr);
-
- /*
- * Add a work around to treat the size as mask + 1 in case
- * it is defined in DT as a mask.
- */
- if (size & 1) {
- dev_warn(dev, "Invalid size 0x%llx for dma-range\n",
- size);
- size = size + 1;
- }
-
- if (!size) {
- dev_err(dev, "Adjusted size 0x%llx invalid\n", size);
- return;
- }
dev_dbg(dev, "dma_pfn_offset(%#08lx)\n", offset);
}
@@ -133,22 +117,59 @@ void of_dma_configure(struct device *dev, struct device_node *np)
* Limit coherent and dma mask based on size and default mask
* set by the driver.
*/
- dev->coherent_dma_mask = min(dev->coherent_dma_mask,
- DMA_BIT_MASK(ilog2(dma_addr + size)));
- *dev->dma_mask = min((*dev->dma_mask),
- DMA_BIT_MASK(ilog2(dma_addr + size)));
+ dev->coherent_dma_mask = min(dev->coherent_dma_mask, range_mask);
+ *dev->dma_mask = min((*dev->dma_mask), range_mask);
+}
+EXPORT_SYMBOL_GPL(of_dma_configure_masks);
+
+/**
+ * of_dma_configure_ops - Setup DMA operations
+ * @dev: Device to apply DMA configuration
+ * @np: Pointer to OF node having DMA configuration
+ *
+ * Try to get devices's DMA configuration from DT and update it
+ * accordingly.
+ */
+int of_dma_configure_ops(struct device *dev, struct device_node *np)
+{
+ u64 dma_addr, paddr, size;
+ struct iommu_ops *iommu;
+ bool coherent;
+ int ret;
+
+ ret = of_dma_get_range(np, &dma_addr, &paddr, &size);
+ if (ret < 0) {
+ dma_addr = 0;
+ size = dev->coherent_dma_mask + 1;
+ }
coherent = of_dma_is_coherent(np);
dev_dbg(dev, "device is%sdma coherent\n",
coherent ? " " : " not ");
iommu = of_iommu_configure(dev, np);
+ if (IS_ERR(iommu))
+ return PTR_ERR(iommu);
dev_dbg(dev, "device is%sbehind an iommu\n",
iommu ? " " : " not ");
arch_setup_dma_ops(dev, dma_addr, size, iommu, coherent);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(of_dma_configure_ops);
+
+/**
+ * of_dma_deconfigure - Clean up DMA configuration
+ * @dev: Device for which to clean up DMA configuration
+ *
+ * Clean up all configuration performed by of_dma_configure_ops() and free all
+ * resources that have been allocated.
+ */
+void of_dma_deconfigure(struct device *dev)
+{
+ arch_teardown_dma_ops(dev);
}
-EXPORT_SYMBOL_GPL(of_dma_configure);
int of_device_register(struct platform_device *pdev)
{
diff --git a/drivers/of/of_pci.c b/drivers/of/of_pci.c
index 5751dc5b6494..c65803da2067 100644
--- a/drivers/of/of_pci.c
+++ b/drivers/of/of_pci.c
@@ -132,7 +132,8 @@ void of_pci_dma_configure(struct pci_dev *pci_dev)
if (!bridge->parent)
return;
- of_dma_configure(dev, bridge->parent->of_node);
+ of_dma_configure_masks(dev, bridge->parent->of_node);
+ of_dma_configure_ops(dev, bridge->parent->of_node);
pci_put_host_bridge_device(bridge);
}
EXPORT_SYMBOL_GPL(of_pci_dma_configure);
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 1001efaedcb8..338b6744ff1e 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -150,11 +150,6 @@ struct platform_device *of_device_alloc(struct device_node *np,
}
EXPORT_SYMBOL(of_device_alloc);
-static void of_dma_deconfigure(struct device *dev)
-{
- arch_teardown_dma_ops(dev);
-}
-
/**
* of_platform_device_create_pdata - Alloc, initialize and register an of_device
* @np: pointer to node to create device for
@@ -183,11 +178,10 @@ static struct platform_device *of_platform_device_create_pdata(
dev->dev.bus = &platform_bus_type;
dev->dev.platform_data = platform_data;
- of_dma_configure(&dev->dev, dev->dev.of_node);
+ of_dma_configure_masks(&dev->dev, dev->dev.of_node);
of_msi_configure(&dev->dev, dev->dev.of_node);
if (of_device_add(dev) != 0) {
- of_dma_deconfigure(&dev->dev);
platform_device_put(dev);
goto err_clear_flag;
}
@@ -247,7 +241,8 @@ static struct amba_device *of_amba_device_create(struct device_node *node,
dev_set_name(&dev->dev, "%s", bus_id);
else
of_device_make_bus_id(&dev->dev);
- of_dma_configure(&dev->dev, dev->dev.of_node);
+ of_dma_configure_masks(&dev->dev, dev->dev.of_node);
+ of_dma_configure_ops(&dev->dev, dev->dev.of_node);
/* Allow the HW Peripheral ID to be overridden */
prop = of_get_property(node, "arm,primecell-periphid", NULL);
@@ -479,11 +474,12 @@ static int of_platform_device_destroy(struct device *dev, void *data)
if (dev->bus == &platform_bus_type)
platform_device_unregister(to_platform_device(dev));
#ifdef CONFIG_ARM_AMBA
- else if (dev->bus == &amba_bustype)
+ else if (dev->bus == &amba_bustype) {
amba_device_unregister(to_amba_device(dev));
+ of_dma_deconfigure(dev);
+ }
#endif
- of_dma_deconfigure(dev);
of_node_clear_flag(dev->of_node, OF_POPULATED);
of_node_clear_flag(dev->of_node, OF_POPULATED_BUS);
return 0;
diff --git a/include/dt-bindings/msm/msm-bus-ids.h b/include/dt-bindings/msm/msm-bus-ids.h
new file mode 100644
index 000000000000..1815fa0ab136
--- /dev/null
+++ b/include/dt-bindings/msm/msm-bus-ids.h
@@ -0,0 +1,629 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_BUS_IDS_H
+#define __MSM_BUS_IDS_H
+
+/* Topology related enums */
+#define MSM_BUS_FAB_DEFAULT 0
+#define MSM_BUS_FAB_APPSS 0
+#define MSM_BUS_FAB_SYSTEM 1024
+#define MSM_BUS_FAB_MMSS 2048
+#define MSM_BUS_FAB_SYSTEM_FPB 3072
+#define MSM_BUS_FAB_CPSS_FPB 4096
+
+#define MSM_BUS_FAB_BIMC 0
+#define MSM_BUS_FAB_SYS_NOC 1024
+#define MSM_BUS_FAB_MMSS_NOC 2048
+#define MSM_BUS_FAB_OCMEM_NOC 3072
+#define MSM_BUS_FAB_PERIPH_NOC 4096
+#define MSM_BUS_FAB_CONFIG_NOC 5120
+#define MSM_BUS_FAB_OCMEM_VNOC 6144
+
+#define MSM_BUS_MASTER_FIRST 1
+#define MSM_BUS_MASTER_AMPSS_M0 1
+#define MSM_BUS_MASTER_AMPSS_M1 2
+#define MSM_BUS_APPSS_MASTER_FAB_MMSS 3
+#define MSM_BUS_APPSS_MASTER_FAB_SYSTEM 4
+#define MSM_BUS_SYSTEM_MASTER_FAB_APPSS 5
+#define MSM_BUS_MASTER_SPS 6
+#define MSM_BUS_MASTER_ADM_PORT0 7
+#define MSM_BUS_MASTER_ADM_PORT1 8
+#define MSM_BUS_SYSTEM_MASTER_ADM1_PORT0 9
+#define MSM_BUS_MASTER_ADM1_PORT1 10
+#define MSM_BUS_MASTER_LPASS_PROC 11
+#define MSM_BUS_MASTER_MSS_PROCI 12
+#define MSM_BUS_MASTER_MSS_PROCD 13
+#define MSM_BUS_MASTER_MSS_MDM_PORT0 14
+#define MSM_BUS_MASTER_LPASS 15
+#define MSM_BUS_SYSTEM_MASTER_CPSS_FPB 16
+#define MSM_BUS_SYSTEM_MASTER_SYSTEM_FPB 17
+#define MSM_BUS_SYSTEM_MASTER_MMSS_FPB 18
+#define MSM_BUS_MASTER_ADM1_CI 19
+#define MSM_BUS_MASTER_ADM0_CI 20
+#define MSM_BUS_MASTER_MSS_MDM_PORT1 21
+#define MSM_BUS_MASTER_MDP_PORT0 22
+#define MSM_BUS_MASTER_MDP_PORT1 23
+#define MSM_BUS_MMSS_MASTER_ADM1_PORT0 24
+#define MSM_BUS_MASTER_ROTATOR 25
+#define MSM_BUS_MASTER_GRAPHICS_3D 26
+#define MSM_BUS_MASTER_JPEG_DEC 27
+#define MSM_BUS_MASTER_GRAPHICS_2D_CORE0 28
+#define MSM_BUS_MASTER_VFE 29
+#define MSM_BUS_MASTER_VPE 30
+#define MSM_BUS_MASTER_JPEG_ENC 31
+#define MSM_BUS_MASTER_GRAPHICS_2D_CORE1 32
+#define MSM_BUS_MMSS_MASTER_APPS_FAB 33
+#define MSM_BUS_MASTER_HD_CODEC_PORT0 34
+#define MSM_BUS_MASTER_HD_CODEC_PORT1 35
+#define MSM_BUS_MASTER_SPDM 36
+#define MSM_BUS_MASTER_RPM 37
+#define MSM_BUS_MASTER_MSS 38
+#define MSM_BUS_MASTER_RIVA 39
+#define MSM_BUS_SYSTEM_MASTER_UNUSED_6 40
+#define MSM_BUS_MASTER_MSS_SW_PROC 41
+#define MSM_BUS_MASTER_MSS_FW_PROC 42
+#define MSM_BUS_MMSS_MASTER_UNUSED_2 43
+#define MSM_BUS_MASTER_GSS_NAV 44
+#define MSM_BUS_MASTER_PCIE 45
+#define MSM_BUS_MASTER_SATA 46
+#define MSM_BUS_MASTER_CRYPTO 47
+#define MSM_BUS_MASTER_VIDEO_CAP 48
+#define MSM_BUS_MASTER_GRAPHICS_3D_PORT1 49
+#define MSM_BUS_MASTER_VIDEO_ENC 50
+#define MSM_BUS_MASTER_VIDEO_DEC 51
+#define MSM_BUS_MASTER_LPASS_AHB 52
+#define MSM_BUS_MASTER_QDSS_BAM 53
+#define MSM_BUS_MASTER_SNOC_CFG 54
+#define MSM_BUS_MASTER_CRYPTO_CORE0 55
+#define MSM_BUS_MASTER_CRYPTO_CORE1 56
+#define MSM_BUS_MASTER_MSS_NAV 57
+#define MSM_BUS_MASTER_OCMEM_DMA 58
+#define MSM_BUS_MASTER_WCSS 59
+#define MSM_BUS_MASTER_QDSS_ETR 60
+#define MSM_BUS_MASTER_USB3 61
+#define MSM_BUS_MASTER_JPEG 62
+#define MSM_BUS_MASTER_VIDEO_P0 63
+#define MSM_BUS_MASTER_VIDEO_P1 64
+#define MSM_BUS_MASTER_MSS_PROC 65
+#define MSM_BUS_MASTER_JPEG_OCMEM 66
+#define MSM_BUS_MASTER_MDP_OCMEM 67
+#define MSM_BUS_MASTER_VIDEO_P0_OCMEM 68
+#define MSM_BUS_MASTER_VIDEO_P1_OCMEM 69
+#define MSM_BUS_MASTER_VFE_OCMEM 70
+#define MSM_BUS_MASTER_CNOC_ONOC_CFG 71
+#define MSM_BUS_MASTER_RPM_INST 72
+#define MSM_BUS_MASTER_RPM_DATA 73
+#define MSM_BUS_MASTER_RPM_SYS 74
+#define MSM_BUS_MASTER_DEHR 75
+#define MSM_BUS_MASTER_QDSS_DAP 76
+#define MSM_BUS_MASTER_TIC 77
+#define MSM_BUS_MASTER_SDCC_1 78
+#define MSM_BUS_MASTER_SDCC_3 79
+#define MSM_BUS_MASTER_SDCC_4 80
+#define MSM_BUS_MASTER_SDCC_2 81
+#define MSM_BUS_MASTER_TSIF 82
+#define MSM_BUS_MASTER_BAM_DMA 83
+#define MSM_BUS_MASTER_BLSP_2 84
+#define MSM_BUS_MASTER_USB_HSIC 85
+#define MSM_BUS_MASTER_BLSP_1 86
+#define MSM_BUS_MASTER_USB_HS 87
+#define MSM_BUS_MASTER_PNOC_CFG 88
+#define MSM_BUS_MASTER_V_OCMEM_GFX3D 89
+#define MSM_BUS_MASTER_IPA 90
+#define MSM_BUS_MASTER_QPIC 91
+#define MSM_BUS_MASTER_MDPE 92
+#define MSM_BUS_MASTER_USB_HS2 93
+#define MSM_BUS_MASTER_VPU 94
+#define MSM_BUS_MASTER_UFS 95
+#define MSM_BUS_MASTER_BCAST 96
+#define MSM_BUS_MASTER_CRYPTO_CORE2 97
+#define MSM_BUS_MASTER_EMAC 98
+#define MSM_BUS_MASTER_VPU_1 99
+#define MSM_BUS_MASTER_PCIE_1 100
+#define MSM_BUS_MASTER_USB3_1 101
+#define MSM_BUS_MASTER_CNOC_MNOC_MMSS_CFG 102
+#define MSM_BUS_MASTER_CNOC_MNOC_CFG 103
+#define MSM_BUS_MASTER_TCU_0 104
+#define MSM_BUS_MASTER_TCU_1 105
+#define MSM_BUS_MASTER_CPP 106
+#define MSM_BUS_MASTER_LAST 107
+
+#define MSM_BUS_SYSTEM_FPB_MASTER_SYSTEM MSM_BUS_SYSTEM_MASTER_SYSTEM_FPB
+#define MSM_BUS_CPSS_FPB_MASTER_SYSTEM MSM_BUS_SYSTEM_MASTER_CPSS_FPB
+
+#define MSM_BUS_SNOC_MM_INT_0 10000
+#define MSM_BUS_SNOC_MM_INT_1 10001
+#define MSM_BUS_SNOC_MM_INT_2 10002
+#define MSM_BUS_SNOC_MM_INT_BIMC 10003
+#define MSM_BUS_SNOC_INT_0 10004
+#define MSM_BUS_SNOC_INT_1 10005
+#define MSM_BUS_SNOC_INT_BIMC 10006
+#define MSM_BUS_SNOC_BIMC_0_MAS 10007
+#define MSM_BUS_SNOC_BIMC_1_MAS 10008
+#define MSM_BUS_SNOC_QDSS_INT 10009
+#define MSM_BUS_PNOC_SNOC_MAS 10010
+#define MSM_BUS_PNOC_SNOC_SLV 10011
+#define MSM_BUS_PNOC_INT_0 10012
+#define MSM_BUS_PNOC_INT_1 10013
+#define MSM_BUS_PNOC_M_0 10014
+#define MSM_BUS_PNOC_M_1 10015
+#define MSM_BUS_BIMC_SNOC_MAS 10016
+#define MSM_BUS_BIMC_SNOC_SLV 10017
+#define MSM_BUS_PNOC_SLV_0 10018
+#define MSM_BUS_PNOC_SLV_1 10019
+#define MSM_BUS_PNOC_SLV_2 10020
+#define MSM_BUS_PNOC_SLV_3 10021
+#define MSM_BUS_PNOC_SLV_4 10022
+#define MSM_BUS_PNOC_SLV_8 10023
+#define MSM_BUS_PNOC_SLV_9 10024
+#define MSM_BUS_SNOC_BIMC_0_SLV 10025
+#define MSM_BUS_SNOC_BIMC_1_SLV 10026
+#define MSM_BUS_MNOC_BIMC_MAS 10027
+#define MSM_BUS_MNOC_BIMC_SLV 10028
+#define MSM_BUS_BIMC_MNOC_MAS 10029
+#define MSM_BUS_BIMC_MNOC_SLV 10030
+#define MSM_BUS_SNOC_BIMC_MAS 10031
+#define MSM_BUS_SNOC_BIMC_SLV 10032
+#define MSM_BUS_CNOC_SNOC_MAS 10033
+#define MSM_BUS_CNOC_SNOC_SLV 10034
+#define MSM_BUS_SNOC_CNOC_MAS 10035
+#define MSM_BUS_SNOC_CNOC_SLV 10036
+#define MSM_BUS_OVNOC_SNOC_MAS 10037
+#define MSM_BUS_OVNOC_SNOC_SLV 10038
+#define MSM_BUS_SNOC_OVNOC_MAS 10039
+#define MSM_BUS_SNOC_OVNOC_SLV 10040
+#define MSM_BUS_SNOC_PNOC_MAS 10041
+#define MSM_BUS_SNOC_PNOC_SLV 10042
+#define MSM_BUS_BIMC_INT_APPS_EBI 10043
+#define MSM_BUS_BIMC_INT_APPS_SNOC 10044
+#define MSM_BUS_SNOC_BIMC_2_MAS 10045
+#define MSM_BUS_SNOC_BIMC_2_SLV 10046
+#define MSM_BUS_INT_LAST 10047
+
+#define MSM_BUS_SLAVE_FIRST 512
+#define MSM_BUS_SLAVE_EBI_CH0 512
+#define MSM_BUS_SLAVE_EBI_CH1 513
+#define MSM_BUS_SLAVE_AMPSS_L2 514
+#define MSM_BUS_APPSS_SLAVE_FAB_MMSS 515
+#define MSM_BUS_APPSS_SLAVE_FAB_SYSTEM 516
+#define MSM_BUS_SYSTEM_SLAVE_FAB_APPS 517
+#define MSM_BUS_SLAVE_SPS 518
+#define MSM_BUS_SLAVE_SYSTEM_IMEM 519
+#define MSM_BUS_SLAVE_AMPSS 520
+#define MSM_BUS_SLAVE_MSS 521
+#define MSM_BUS_SLAVE_LPASS 522
+#define MSM_BUS_SYSTEM_SLAVE_CPSS_FPB 523
+#define MSM_BUS_SYSTEM_SLAVE_SYSTEM_FPB 524
+#define MSM_BUS_SYSTEM_SLAVE_MMSS_FPB 525
+#define MSM_BUS_SLAVE_CORESIGHT 526
+#define MSM_BUS_SLAVE_RIVA 527
+#define MSM_BUS_SLAVE_SMI 528
+#define MSM_BUS_MMSS_SLAVE_FAB_APPS 529
+#define MSM_BUS_MMSS_SLAVE_FAB_APPS_1 530
+#define MSM_BUS_SLAVE_MM_IMEM 531
+#define MSM_BUS_SLAVE_CRYPTO 532
+#define MSM_BUS_SLAVE_SPDM 533
+#define MSM_BUS_SLAVE_RPM 534
+#define MSM_BUS_SLAVE_RPM_MSG_RAM 535
+#define MSM_BUS_SLAVE_MPM 536
+#define MSM_BUS_SLAVE_PMIC1_SSBI1_A 537
+#define MSM_BUS_SLAVE_PMIC1_SSBI1_B 538
+#define MSM_BUS_SLAVE_PMIC1_SSBI1_C 539
+#define MSM_BUS_SLAVE_PMIC2_SSBI2_A 540
+#define MSM_BUS_SLAVE_PMIC2_SSBI2_B 541
+#define MSM_BUS_SLAVE_GSBI1_UART 542
+#define MSM_BUS_SLAVE_GSBI2_UART 543
+#define MSM_BUS_SLAVE_GSBI3_UART 544
+#define MSM_BUS_SLAVE_GSBI4_UART 545
+#define MSM_BUS_SLAVE_GSBI5_UART 546
+#define MSM_BUS_SLAVE_GSBI6_UART 547
+#define MSM_BUS_SLAVE_GSBI7_UART 548
+#define MSM_BUS_SLAVE_GSBI8_UART 549
+#define MSM_BUS_SLAVE_GSBI9_UART 550
+#define MSM_BUS_SLAVE_GSBI10_UART 551
+#define MSM_BUS_SLAVE_GSBI11_UART 552
+#define MSM_BUS_SLAVE_GSBI12_UART 553
+#define MSM_BUS_SLAVE_GSBI1_QUP 554
+#define MSM_BUS_SLAVE_GSBI2_QUP 555
+#define MSM_BUS_SLAVE_GSBI3_QUP 556
+#define MSM_BUS_SLAVE_GSBI4_QUP 557
+#define MSM_BUS_SLAVE_GSBI5_QUP 558
+#define MSM_BUS_SLAVE_GSBI6_QUP 559
+#define MSM_BUS_SLAVE_GSBI7_QUP 560
+#define MSM_BUS_SLAVE_GSBI8_QUP 561
+#define MSM_BUS_SLAVE_GSBI9_QUP 562
+#define MSM_BUS_SLAVE_GSBI10_QUP 563
+#define MSM_BUS_SLAVE_GSBI11_QUP 564
+#define MSM_BUS_SLAVE_GSBI12_QUP 565
+#define MSM_BUS_SLAVE_EBI2_NAND 566
+#define MSM_BUS_SLAVE_EBI2_CS0 567
+#define MSM_BUS_SLAVE_EBI2_CS1 568
+#define MSM_BUS_SLAVE_EBI2_CS2 569
+#define MSM_BUS_SLAVE_EBI2_CS3 570
+#define MSM_BUS_SLAVE_EBI2_CS4 571
+#define MSM_BUS_SLAVE_EBI2_CS5 572
+#define MSM_BUS_SLAVE_USB_FS1 573
+#define MSM_BUS_SLAVE_USB_FS2 574
+#define MSM_BUS_SLAVE_TSIF 575
+#define MSM_BUS_SLAVE_MSM_TSSC 576
+#define MSM_BUS_SLAVE_MSM_PDM 577
+#define MSM_BUS_SLAVE_MSM_DIMEM 578
+#define MSM_BUS_SLAVE_MSM_TCSR 579
+#define MSM_BUS_SLAVE_MSM_PRNG 580
+#define MSM_BUS_SLAVE_GSS 581
+#define MSM_BUS_SLAVE_SATA 582
+#define MSM_BUS_SLAVE_USB3 583
+#define MSM_BUS_SLAVE_WCSS 584
+#define MSM_BUS_SLAVE_OCIMEM 585
+#define MSM_BUS_SLAVE_SNOC_OCMEM 586
+#define MSM_BUS_SLAVE_SERVICE_SNOC 587
+#define MSM_BUS_SLAVE_QDSS_STM 588
+#define MSM_BUS_SLAVE_CAMERA_CFG 589
+#define MSM_BUS_SLAVE_DISPLAY_CFG 590
+#define MSM_BUS_SLAVE_OCMEM_CFG 591
+#define MSM_BUS_SLAVE_CPR_CFG 592
+#define MSM_BUS_SLAVE_CPR_XPU_CFG 593
+#define MSM_BUS_SLAVE_MISC_CFG 594
+#define MSM_BUS_SLAVE_MISC_XPU_CFG 595
+#define MSM_BUS_SLAVE_VENUS_CFG 596
+#define MSM_BUS_SLAVE_MISC_VENUS_CFG 597
+#define MSM_BUS_SLAVE_GRAPHICS_3D_CFG 598
+#define MSM_BUS_SLAVE_MMSS_CLK_CFG 599
+#define MSM_BUS_SLAVE_MMSS_CLK_XPU_CFG 600
+#define MSM_BUS_SLAVE_MNOC_MPU_CFG 601
+#define MSM_BUS_SLAVE_ONOC_MPU_CFG 602
+#define MSM_BUS_SLAVE_SERVICE_MNOC 603
+#define MSM_BUS_SLAVE_OCMEM 604
+#define MSM_BUS_SLAVE_SERVICE_ONOC 605
+#define MSM_BUS_SLAVE_SDCC_1 606
+#define MSM_BUS_SLAVE_SDCC_3 607
+#define MSM_BUS_SLAVE_SDCC_2 608
+#define MSM_BUS_SLAVE_SDCC_4 609
+#define MSM_BUS_SLAVE_BAM_DMA 610
+#define MSM_BUS_SLAVE_BLSP_2 611
+#define MSM_BUS_SLAVE_USB_HSIC 612
+#define MSM_BUS_SLAVE_BLSP_1 613
+#define MSM_BUS_SLAVE_USB_HS 614
+#define MSM_BUS_SLAVE_PDM 615
+#define MSM_BUS_SLAVE_PERIPH_APU_CFG 616
+#define MSM_BUS_SLAVE_PNOC_MPU_CFG 617
+#define MSM_BUS_SLAVE_PRNG 618
+#define MSM_BUS_SLAVE_SERVICE_PNOC 619
+#define MSM_BUS_SLAVE_CLK_CTL 620
+#define MSM_BUS_SLAVE_CNOC_MSS 621
+#define MSM_BUS_SLAVE_SECURITY 622
+#define MSM_BUS_SLAVE_TCSR 623
+#define MSM_BUS_SLAVE_TLMM 624
+#define MSM_BUS_SLAVE_CRYPTO_0_CFG 625
+#define MSM_BUS_SLAVE_CRYPTO_1_CFG 626
+#define MSM_BUS_SLAVE_IMEM_CFG 627
+#define MSM_BUS_SLAVE_MESSAGE_RAM 628
+#define MSM_BUS_SLAVE_BIMC_CFG 629
+#define MSM_BUS_SLAVE_BOOT_ROM 630
+#define MSM_BUS_SLAVE_CNOC_MNOC_MMSS_CFG 631
+#define MSM_BUS_SLAVE_PMIC_ARB 632
+#define MSM_BUS_SLAVE_SPDM_WRAPPER 633
+#define MSM_BUS_SLAVE_DEHR_CFG 634
+#define MSM_BUS_SLAVE_QDSS_CFG 635
+#define MSM_BUS_SLAVE_RBCPR_CFG 636
+#define MSM_BUS_SLAVE_RBCPR_QDSS_APU_CFG 637
+#define MSM_BUS_SLAVE_SNOC_MPU_CFG 638
+#define MSM_BUS_SLAVE_CNOC_ONOC_CFG 639
+#define MSM_BUS_SLAVE_CNOC_MNOC_CFG 640
+#define MSM_BUS_SLAVE_PNOC_CFG 641
+#define MSM_BUS_SLAVE_SNOC_CFG 642
+#define MSM_BUS_SLAVE_EBI1_DLL_CFG 643
+#define MSM_BUS_SLAVE_PHY_APU_CFG 644
+#define MSM_BUS_SLAVE_EBI1_PHY_CFG 645
+#define MSM_BUS_SLAVE_SERVICE_CNOC 646
+#define MSM_BUS_SLAVE_IPS_CFG 647
+#define MSM_BUS_SLAVE_QPIC 648
+#define MSM_BUS_SLAVE_DSI_CFG 649
+#define MSM_BUS_SLAVE_UFS_CFG 650
+#define MSM_BUS_SLAVE_RBCPR_CX_CFG 651
+#define MSM_BUS_SLAVE_RBCPR_MX_CFG 652
+#define MSM_BUS_SLAVE_PCIE_CFG 653
+#define MSM_BUS_SLAVE_USB_PHYS_CFG 654
+#define MSM_BUS_SLAVE_VIDEO_CAP_CFG 655
+#define MSM_BUS_SLAVE_AVSYNC_CFG 656
+#define MSM_BUS_SLAVE_CRYPTO_2_CFG 657
+#define MSM_BUS_SLAVE_VPU_CFG 658
+#define MSM_BUS_SLAVE_BCAST_CFG 659
+#define MSM_BUS_SLAVE_KLM_CFG 660
+#define MSM_BUS_SLAVE_GENI_IR_CFG 661
+#define MSM_BUS_SLAVE_OCMEM_GFX 662
+#define MSM_BUS_SLAVE_CATS_128 663
+#define MSM_BUS_SLAVE_OCMEM_64 664
+#define MSM_BUS_SLAVE_PCIE_0 665
+#define MSM_BUS_SLAVE_PCIE_1 666
+#define MSM_BUS_SLAVE_PCIE_0_CFG 667
+#define MSM_BUS_SLAVE_PCIE_1_CFG 668
+#define MSM_BUS_SLAVE_SRVC_MNOC 669
+#define MSM_BUS_SLAVE_USB_HS2 670
+#define MSM_BUS_SLAVE_LAST 671
+
+#define MSM_BUS_SYSTEM_FPB_SLAVE_SYSTEM MSM_BUS_SYSTEM_SLAVE_SYSTEM_FPB
+#define MSM_BUS_CPSS_FPB_SLAVE_SYSTEM MSM_BUS_SYSTEM_SLAVE_CPSS_FPB
+
+/*
+ * ID's used in RPM messages
+ */
+#define ICBID_MASTER_APPSS_PROC 0
+#define ICBID_MASTER_MSS_PROC 1
+#define ICBID_MASTER_MNOC_BIMC 2
+#define ICBID_MASTER_SNOC_BIMC 3
+#define ICBID_MASTER_SNOC_BIMC_0 ICBID_MASTER_SNOC_BIMC
+#define ICBID_MASTER_CNOC_MNOC_MMSS_CFG 4
+#define ICBID_MASTER_CNOC_MNOC_CFG 5
+#define ICBID_MASTER_GFX3D 6
+#define ICBID_MASTER_JPEG 7
+#define ICBID_MASTER_MDP 8
+#define ICBID_MASTER_MDP0 ICBID_MASTER_MDP
+#define ICBID_MASTER_MDPS ICBID_MASTER_MDP
+#define ICBID_MASTER_VIDEO 9
+#define ICBID_MASTER_VIDEO_P0 ICBID_MASTER_VIDEO
+#define ICBID_MASTER_VIDEO_P1 10
+#define ICBID_MASTER_VFE 11
+#define ICBID_MASTER_CNOC_ONOC_CFG 12
+#define ICBID_MASTER_JPEG_OCMEM 13
+#define ICBID_MASTER_MDP_OCMEM 14
+#define ICBID_MASTER_VIDEO_P0_OCMEM 15
+#define ICBID_MASTER_VIDEO_P1_OCMEM 16
+#define ICBID_MASTER_VFE_OCMEM 17
+#define ICBID_MASTER_LPASS_AHB 18
+#define ICBID_MASTER_QDSS_BAM 19
+#define ICBID_MASTER_SNOC_CFG 20
+#define ICBID_MASTER_BIMC_SNOC 21
+#define ICBID_MASTER_CNOC_SNOC 22
+#define ICBID_MASTER_CRYPTO 23
+#define ICBID_MASTER_CRYPTO_CORE0 ICBID_MASTER_CRYPTO
+#define ICBID_MASTER_CRYPTO_CORE1 24
+#define ICBID_MASTER_LPASS_PROC 25
+#define ICBID_MASTER_MSS 26
+#define ICBID_MASTER_MSS_NAV 27
+#define ICBID_MASTER_OCMEM_DMA 28
+#define ICBID_MASTER_PNOC_SNOC 29
+#define ICBID_MASTER_WCSS 30
+#define ICBID_MASTER_QDSS_ETR 31
+#define ICBID_MASTER_USB3 32
+#define ICBID_MASTER_USB3_0 ICBID_MASTER_USB3
+#define ICBID_MASTER_SDCC_1 33
+#define ICBID_MASTER_SDCC_3 34
+#define ICBID_MASTER_SDCC_2 35
+#define ICBID_MASTER_SDCC_4 36
+#define ICBID_MASTER_TSIF 37
+#define ICBID_MASTER_BAM_DMA 38
+#define ICBID_MASTER_BLSP_2 39
+#define ICBID_MASTER_USB_HSIC 40
+#define ICBID_MASTER_BLSP_1 41
+#define ICBID_MASTER_USB_HS 42
+#define ICBID_MASTER_USB_HS1 ICBID_MASTER_USB_HS
+#define ICBID_MASTER_PNOC_CFG 43
+#define ICBID_MASTER_SNOC_PNOC 44
+#define ICBID_MASTER_RPM_INST 45
+#define ICBID_MASTER_RPM_DATA 46
+#define ICBID_MASTER_RPM_SYS 47
+#define ICBID_MASTER_DEHR 48
+#define ICBID_MASTER_QDSS_DAP 49
+#define ICBID_MASTER_SPDM 50
+#define ICBID_MASTER_TIC 51
+#define ICBID_MASTER_SNOC_CNOC 52
+#define ICBID_MASTER_GFX3D_OCMEM 53
+#define ICBID_MASTER_GFX3D_GMEM ICBID_MASTER_GFX3D_OCMEM
+#define ICBID_MASTER_OVIRT_SNOC 54
+#define ICBID_MASTER_SNOC_OVIRT 55
+#define ICBID_MASTER_SNOC_GVIRT ICBID_MASTER_SNOC_OVIRT
+#define ICBID_MASTER_ONOC_OVIRT 56
+#define ICBID_MASTER_USB_HS2 57
+#define ICBID_MASTER_QPIC 58
+#define ICBID_MASTER_IPA 59
+#define ICBID_MASTER_DSI 60
+#define ICBID_MASTER_MDP1 61
+#define ICBID_MASTER_MDPE ICBID_MASTER_MDP1
+#define ICBID_MASTER_VPU_PROC 62
+#define ICBID_MASTER_VPU 63
+#define ICBID_MASTER_VPU0 ICBID_MASTER_VPU
+#define ICBID_MASTER_CRYPTO_CORE2 64
+#define ICBID_MASTER_PCIE_0 65
+#define ICBID_MASTER_PCIE_1 66
+#define ICBID_MASTER_SATA 67
+#define ICBID_MASTER_UFS 68
+#define ICBID_MASTER_USB3_1 69
+#define ICBID_MASTER_VIDEO_OCMEM 70
+#define ICBID_MASTER_VPU1 71
+#define ICBID_MASTER_VCAP 72
+#define ICBID_MASTER_EMAC 73
+#define ICBID_MASTER_BCAST 74
+#define ICBID_MASTER_MMSS_PROC 75
+#define ICBID_MASTER_SNOC_BIMC_1 76
+#define ICBID_MASTER_SNOC_PCNOC 77
+#define ICBID_MASTER_AUDIO 78
+#define ICBID_MASTER_MM_INT_0 79
+#define ICBID_MASTER_MM_INT_1 80
+#define ICBID_MASTER_MM_INT_2 81
+#define ICBID_MASTER_MM_INT_BIMC 82
+#define ICBID_MASTER_MSS_INT 83
+#define ICBID_MASTER_PCNOC_CFG 84
+#define ICBID_MASTER_PCNOC_INT_0 85
+#define ICBID_MASTER_PCNOC_INT_1 86
+#define ICBID_MASTER_PCNOC_M_0 87
+#define ICBID_MASTER_PCNOC_M_1 88
+#define ICBID_MASTER_PCNOC_S_0 89
+#define ICBID_MASTER_PCNOC_S_1 90
+#define ICBID_MASTER_PCNOC_S_2 91
+#define ICBID_MASTER_PCNOC_S_3 92
+#define ICBID_MASTER_PCNOC_S_4 93
+#define ICBID_MASTER_PCNOC_S_6 94
+#define ICBID_MASTER_PCNOC_S_7 95
+#define ICBID_MASTER_PCNOC_S_8 96
+#define ICBID_MASTER_PCNOC_S_9 97
+#define ICBID_MASTER_QDSS_INT 98
+#define ICBID_MASTER_SNOC_INT_0 99
+#define ICBID_MASTER_SNOC_INT_1 100
+#define ICBID_MASTER_SNOC_INT_BIMC 101
+#define ICBID_MASTER_TCU_0 102
+#define ICBID_MASTER_TCU_1 103
+#define ICBID_MASTER_BIMC_INT_0 104
+#define ICBID_MASTER_BIMC_INT_1 105
+#define ICBID_MASTER_CAMERA 106
+#define ICBID_MASTER_RICA 107
+
+#define ICBID_SLAVE_EBI1 0
+#define ICBID_SLAVE_APPSS_L2 1
+#define ICBID_SLAVE_BIMC_SNOC 2
+#define ICBID_SLAVE_CAMERA_CFG 3
+#define ICBID_SLAVE_DISPLAY_CFG 4
+#define ICBID_SLAVE_OCMEM_CFG 5
+#define ICBID_SLAVE_CPR_CFG 6
+#define ICBID_SLAVE_CPR_XPU_CFG 7
+#define ICBID_SLAVE_MISC_CFG 8
+#define ICBID_SLAVE_MISC_XPU_CFG 9
+#define ICBID_SLAVE_VENUS_CFG 10
+#define ICBID_SLAVE_GFX3D_CFG 11
+#define ICBID_SLAVE_MMSS_CLK_CFG 12
+#define ICBID_SLAVE_MMSS_CLK_XPU_CFG 13
+#define ICBID_SLAVE_MNOC_MPU_CFG 14
+#define ICBID_SLAVE_ONOC_MPU_CFG 15
+#define ICBID_SLAVE_MNOC_BIMC 16
+#define ICBID_SLAVE_SERVICE_MNOC 17
+#define ICBID_SLAVE_OCMEM 18
+#define ICBID_SLAVE_GMEM ICBID_SLAVE_OCMEM
+#define ICBID_SLAVE_SERVICE_ONOC 19
+#define ICBID_SLAVE_APPSS 20
+#define ICBID_SLAVE_LPASS 21
+#define ICBID_SLAVE_USB3 22
+#define ICBID_SLAVE_USB3_0 ICBID_SLAVE_USB3
+#define ICBID_SLAVE_WCSS 23
+#define ICBID_SLAVE_SNOC_BIMC 24
+#define ICBID_SLAVE_SNOC_BIMC_0 ICBID_SLAVE_SNOC_BIMC
+#define ICBID_SLAVE_SNOC_CNOC 25
+#define ICBID_SLAVE_IMEM 26
+#define ICBID_SLAVE_OCIMEM ICBID_SLAVE_IMEM
+#define ICBID_SLAVE_SNOC_OVIRT 27
+#define ICBID_SLAVE_SNOC_GVIRT ICBID_SLAVE_SNOC_OVIRT
+#define ICBID_SLAVE_SNOC_PNOC 28
+#define ICBID_SLAVE_SNOC_PCNOC ICBID_SLAVE_SNOC_PNOC
+#define ICBID_SLAVE_SERVICE_SNOC 29
+#define ICBID_SLAVE_QDSS_STM 30
+#define ICBID_SLAVE_SDCC_1 31
+#define ICBID_SLAVE_SDCC_3 32
+#define ICBID_SLAVE_SDCC_2 33
+#define ICBID_SLAVE_SDCC_4 34
+#define ICBID_SLAVE_TSIF 35
+#define ICBID_SLAVE_BAM_DMA 36
+#define ICBID_SLAVE_BLSP_2 37
+#define ICBID_SLAVE_USB_HSIC 38
+#define ICBID_SLAVE_BLSP_1 39
+#define ICBID_SLAVE_USB_HS 40
+#define ICBID_SLAVE_USB_HS1 ICBID_SLAVE_USB_HS
+#define ICBID_SLAVE_PDM 41
+#define ICBID_SLAVE_PERIPH_APU_CFG 42
+#define ICBID_SLAVE_PNOC_MPU_CFG 43
+#define ICBID_SLAVE_PRNG 44
+#define ICBID_SLAVE_PNOC_SNOC 45
+#define ICBID_SLAVE_PCNOC_SNOC ICBID_SLAVE_PNOC_SNOC
+#define ICBID_SLAVE_SERVICE_PNOC 46
+#define ICBID_SLAVE_CLK_CTL 47
+#define ICBID_SLAVE_CNOC_MSS 48
+#define ICBID_SLAVE_PCNOC_MSS ICBID_SLAVE_CNOC_MSS
+#define ICBID_SLAVE_SECURITY 49
+#define ICBID_SLAVE_TCSR 50
+#define ICBID_SLAVE_TLMM 51
+#define ICBID_SLAVE_CRYPTO_0_CFG 52
+#define ICBID_SLAVE_CRYPTO_1_CFG 53
+#define ICBID_SLAVE_IMEM_CFG 54
+#define ICBID_SLAVE_MESSAGE_RAM 55
+#define ICBID_SLAVE_BIMC_CFG 56
+#define ICBID_SLAVE_BOOT_ROM 57
+#define ICBID_SLAVE_CNOC_MNOC_MMSS_CFG 58
+#define ICBID_SLAVE_PMIC_ARB 59
+#define ICBID_SLAVE_SPDM_WRAPPER 60
+#define ICBID_SLAVE_DEHR_CFG 61
+#define ICBID_SLAVE_MPM 62
+#define ICBID_SLAVE_QDSS_CFG 63
+#define ICBID_SLAVE_RBCPR_CFG 64
+#define ICBID_SLAVE_RBCPR_CX_CFG ICBID_SLAVE_RBCPR_CFG
+#define ICBID_SLAVE_RBCPR_QDSS_APU_CFG 65
+#define ICBID_SLAVE_CNOC_MNOC_CFG 66
+#define ICBID_SLAVE_SNOC_MPU_CFG 67
+#define ICBID_SLAVE_CNOC_ONOC_CFG 68
+#define ICBID_SLAVE_PNOC_CFG 69
+#define ICBID_SLAVE_SNOC_CFG 70
+#define ICBID_SLAVE_EBI1_DLL_CFG 71
+#define ICBID_SLAVE_PHY_APU_CFG 72
+#define ICBID_SLAVE_EBI1_PHY_CFG 73
+#define ICBID_SLAVE_RPM 74
+#define ICBID_SLAVE_CNOC_SNOC 75
+#define ICBID_SLAVE_SERVICE_CNOC 76
+#define ICBID_SLAVE_OVIRT_SNOC 77
+#define ICBID_SLAVE_OVIRT_OCMEM 78
+#define ICBID_SLAVE_USB_HS2 79
+#define ICBID_SLAVE_QPIC 80
+#define ICBID_SLAVE_IPS_CFG 81
+#define ICBID_SLAVE_DSI_CFG 82
+#define ICBID_SLAVE_USB3_1 83
+#define ICBID_SLAVE_PCIE_0 84
+#define ICBID_SLAVE_PCIE_1 85
+#define ICBID_SLAVE_PSS_SMMU_CFG 86
+#define ICBID_SLAVE_CRYPTO_2_CFG 87
+#define ICBID_SLAVE_PCIE_0_CFG 88
+#define ICBID_SLAVE_PCIE_1_CFG 89
+#define ICBID_SLAVE_SATA_CFG 90
+#define ICBID_SLAVE_SPSS_GENI_IR 91
+#define ICBID_SLAVE_UFS_CFG 92
+#define ICBID_SLAVE_AVSYNC_CFG 93
+#define ICBID_SLAVE_VPU_CFG 94
+#define ICBID_SLAVE_USB_PHY_CFG 95
+#define ICBID_SLAVE_RBCPR_MX_CFG 96
+#define ICBID_SLAVE_PCIE_PARF 97
+#define ICBID_SLAVE_VCAP_CFG 98
+#define ICBID_SLAVE_EMAC_CFG 99
+#define ICBID_SLAVE_BCAST_CFG 100
+#define ICBID_SLAVE_KLM_CFG 101
+#define ICBID_SLAVE_DISPLAY_PWM 102
+#define ICBID_SLAVE_GENI 103
+#define ICBID_SLAVE_SNOC_BIMC_1 104
+#define ICBID_SLAVE_AUDIO 105
+#define ICBID_SLAVE_CATS_0 106
+#define ICBID_SLAVE_CATS_1 107
+#define ICBID_SLAVE_MM_INT_0 108
+#define ICBID_SLAVE_MM_INT_1 109
+#define ICBID_SLAVE_MM_INT_2 110
+#define ICBID_SLAVE_MM_INT_BIMC 111
+#define ICBID_SLAVE_MMU_MODEM_XPU_CFG 112
+#define ICBID_SLAVE_MSS_INT 113
+#define ICBID_SLAVE_PCNOC_INT_0 114
+#define ICBID_SLAVE_PCNOC_INT_1 115
+#define ICBID_SLAVE_PCNOC_M_0 116
+#define ICBID_SLAVE_PCNOC_M_1 117
+#define ICBID_SLAVE_PCNOC_S_0 118
+#define ICBID_SLAVE_PCNOC_S_1 119
+#define ICBID_SLAVE_PCNOC_S_2 120
+#define ICBID_SLAVE_PCNOC_S_3 121
+#define ICBID_SLAVE_PCNOC_S_4 122
+#define ICBID_SLAVE_PCNOC_S_6 123
+#define ICBID_SLAVE_PCNOC_S_7 124
+#define ICBID_SLAVE_PCNOC_S_8 125
+#define ICBID_SLAVE_PCNOC_S_9 126
+#define ICBID_SLAVE_PRNG_XPU_CFG 127
+#define ICBID_SLAVE_QDSS_INT 128
+#define ICBID_SLAVE_RPM_XPU_CFG 129
+#define ICBID_SLAVE_SNOC_INT_0 130
+#define ICBID_SLAVE_SNOC_INT_1 131
+#define ICBID_SLAVE_SNOC_INT_BIMC 132
+#define ICBID_SLAVE_TCU 133
+#define ICBID_SLAVE_BIMC_INT_0 134
+#define ICBID_SLAVE_BIMC_INT_1 135
+#define ICBID_SLAVE_RICA_CFG 136
+
+#endif
diff --git a/include/linux/msm-bus-board.h b/include/linux/msm-bus-board.h
new file mode 100644
index 000000000000..c9d648d38ec4
--- /dev/null
+++ b/include/linux/msm-bus-board.h
@@ -0,0 +1,198 @@
+/* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ASM_ARCH_MSM_BUS_BOARD_H
+#define __ASM_ARCH_MSM_BUS_BOARD_H
+
+#include <linux/types.h>
+#include <linux/input.h>
+
+enum context {
+ DUAL_CTX,
+ ACTIVE_CTX,
+ NUM_CTX
+};
+
+struct msm_bus_fabric_registration {
+ unsigned int id;
+ const char *name;
+ struct msm_bus_node_info *info;
+ unsigned int len;
+ int ahb;
+ const char *fabclk[NUM_CTX];
+ const char *iface_clk;
+ unsigned int offset;
+ unsigned int haltid;
+ unsigned int rpm_enabled;
+ unsigned int nmasters;
+ unsigned int nslaves;
+ unsigned int ntieredslaves;
+ bool il_flag;
+ const struct msm_bus_board_algorithm *board_algo;
+ int hw_sel;
+ void *hw_data;
+ uint32_t qos_freq;
+ uint32_t qos_baseoffset;
+ u64 nr_lim_thresh;
+ uint32_t eff_fact;
+ uint32_t qos_delta;
+ bool virt;
+};
+
+struct msm_bus_device_node_registration {
+ struct msm_bus_node_device_type *info;
+ unsigned int num_devices;
+ bool virt;
+};
+
+enum msm_bus_bw_tier_type {
+ MSM_BUS_BW_TIER1 = 1,
+ MSM_BUS_BW_TIER2,
+ MSM_BUS_BW_COUNT,
+ MSM_BUS_BW_SIZE = 0x7FFFFFFF,
+};
+
+struct msm_bus_halt_vector {
+ uint32_t haltval;
+ uint32_t haltmask;
+};
+
+extern struct msm_bus_fabric_registration msm_bus_apps_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_sys_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_mm_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_sys_fpb_pdata;
+extern struct msm_bus_fabric_registration msm_bus_cpss_fpb_pdata;
+extern struct msm_bus_fabric_registration msm_bus_def_fab_pdata;
+
+extern struct msm_bus_fabric_registration msm_bus_8960_apps_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8960_sys_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8960_mm_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8960_sg_mm_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8960_sys_fpb_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8960_cpss_fpb_pdata;
+
+extern struct msm_bus_fabric_registration msm_bus_8064_apps_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8064_sys_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8064_mm_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8064_sys_fpb_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8064_cpss_fpb_pdata;
+
+extern struct msm_bus_fabric_registration msm_bus_9615_sys_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_9615_def_fab_pdata;
+
+extern struct msm_bus_fabric_registration msm_bus_8930_apps_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8930_sys_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8930_mm_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8930_sys_fpb_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8930_cpss_fpb_pdata;
+
+extern struct msm_bus_fabric_registration msm_bus_8974_sys_noc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8974_mmss_noc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8974_bimc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8974_ocmem_noc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8974_periph_noc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8974_config_noc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8974_ocmem_vnoc_pdata;
+
+extern struct msm_bus_fabric_registration msm_bus_9625_sys_noc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_9625_bimc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_9625_periph_noc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_9625_config_noc_pdata;
+
+extern int msm_bus_device_match_adhoc(struct device *dev, void *id);
+
+void msm_bus_rpm_set_mt_mask(void);
+int msm_bus_board_rpm_get_il_ids(uint16_t *id);
+int msm_bus_board_get_iid(int id);
+
+#define NFAB_MSM8226 6
+#define NFAB_MSM8610 5
+
+/*
+ * These macros specify the convention followed for allocating
+ * ids to fabrics, masters and slaves for 8x60.
+ *
+ * A node can be identified as a master/slave/fabric by using
+ * these ids.
+ */
+#define FABRIC_ID_KEY 1024
+#define SLAVE_ID_KEY ((FABRIC_ID_KEY) >> 1)
+#define MAX_FAB_KEY 7168 /* OR(All fabric ids) */
+#define INT_NODE_START 10000
+
+#define GET_FABID(id) ((id) & MAX_FAB_KEY)
+
+#define NODE_ID(id) ((id) & (FABRIC_ID_KEY - 1))
+#define IS_SLAVE(id) ((NODE_ID(id)) >= SLAVE_ID_KEY ? 1 : 0)
+#define CHECK_ID(iid, id) (((iid & id) != id) ? -ENXIO : iid)
+
+/*
+ * The following macros are used to format the data for port halt
+ * and unhalt requests.
+ */
+#define MSM_BUS_CLK_HALT 0x1
+#define MSM_BUS_CLK_HALT_MASK 0x1
+#define MSM_BUS_CLK_HALT_FIELDSIZE 0x1
+#define MSM_BUS_CLK_UNHALT 0x0
+
+#define MSM_BUS_MASTER_SHIFT(master, fieldsize) \
+ ((master) * (fieldsize))
+
+#define MSM_BUS_SET_BITFIELD(word, fieldmask, fieldvalue) \
+ { \
+ (word) &= ~(fieldmask); \
+ (word) |= (fieldvalue); \
+ }
+
+
+#define MSM_BUS_MASTER_HALT(u32haltmask, u32haltval, master) \
+ MSM_BUS_SET_BITFIELD(u32haltmask, \
+ MSM_BUS_CLK_HALT_MASK<<MSM_BUS_MASTER_SHIFT((master),\
+ MSM_BUS_CLK_HALT_FIELDSIZE), \
+ MSM_BUS_CLK_HALT_MASK<<MSM_BUS_MASTER_SHIFT((master),\
+ MSM_BUS_CLK_HALT_FIELDSIZE))\
+ MSM_BUS_SET_BITFIELD(u32haltval, \
+ MSM_BUS_CLK_HALT_MASK<<MSM_BUS_MASTER_SHIFT((master),\
+ MSM_BUS_CLK_HALT_FIELDSIZE), \
+ MSM_BUS_CLK_HALT<<MSM_BUS_MASTER_SHIFT((master),\
+ MSM_BUS_CLK_HALT_FIELDSIZE))\
+
+#define MSM_BUS_MASTER_UNHALT(u32haltmask, u32haltval, master) \
+ MSM_BUS_SET_BITFIELD(u32haltmask, \
+ MSM_BUS_CLK_HALT_MASK<<MSM_BUS_MASTER_SHIFT((master),\
+ MSM_BUS_CLK_HALT_FIELDSIZE), \
+ MSM_BUS_CLK_HALT_MASK<<MSM_BUS_MASTER_SHIFT((master),\
+ MSM_BUS_CLK_HALT_FIELDSIZE))\
+ MSM_BUS_SET_BITFIELD(u32haltval, \
+ MSM_BUS_CLK_HALT_MASK<<MSM_BUS_MASTER_SHIFT((master),\
+ MSM_BUS_CLK_HALT_FIELDSIZE), \
+ MSM_BUS_CLK_UNHALT<<MSM_BUS_MASTER_SHIFT((master),\
+ MSM_BUS_CLK_HALT_FIELDSIZE))\
+
+#define RPM_BUS_SLAVE_REQ 0x766c7362
+#define RPM_BUS_MASTER_REQ 0x73616d62
+
+enum msm_bus_rpm_slave_field_type {
+ RPM_SLAVE_FIELD_BW = 0x00007762,
+};
+
+enum msm_bus_rpm_mas_field_type {
+ RPM_MASTER_FIELD_BW = 0x00007762,
+ RPM_MASTER_FIELD_BW_T0 = 0x30747762,
+ RPM_MASTER_FIELD_BW_T1 = 0x31747762,
+ RPM_MASTER_FIELD_BW_T2 = 0x32747762,
+};
+
+#include <dt-bindings/msm/msm-bus-ids.h>
+
+
+#endif /*__ASM_ARCH_MSM_BUS_BOARD_H */
diff --git a/include/linux/msm-bus.h b/include/linux/msm-bus.h
new file mode 100644
index 000000000000..1f5edc964c49
--- /dev/null
+++ b/include/linux/msm-bus.h
@@ -0,0 +1,139 @@
+/* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ARCH_ARM_MACH_MSM_BUS_H
+#define _ARCH_ARM_MACH_MSM_BUS_H
+
+#include <linux/types.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+
+/*
+ * Macros for clients to convert their data to ib and ab
+ * Ws : Time window over which to transfer the data in SECONDS
+ * Bs : Size of the data block in bytes
+ * Per : Recurrence period
+ * Tb : Throughput bandwidth to prevent stalling
+ * R : Ratio of actual bandwidth used to Tb
+ * Ib : Instantaneous bandwidth
+ * Ab : Arbitrated bandwidth
+ *
+ * IB_RECURRBLOCK and AB_RECURRBLOCK:
+ * These are used if the requirement is to transfer a
+ * recurring block of data over a known time window.
+ *
+ * IB_THROUGHPUTBW and AB_THROUGHPUTBW:
+ * These are used for CPU style masters. Here the requirement
+ * is to have minimum throughput bandwidth available to avoid
+ * stalling.
+ */
+#define IB_RECURRBLOCK(Ws, Bs) ((Ws) == 0 ? 0 : ((Bs)/(Ws)))
+#define AB_RECURRBLOCK(Ws, Per) ((Ws) == 0 ? 0 : ((Bs)/(Per)))
+#define IB_THROUGHPUTBW(Tb) (Tb)
+#define AB_THROUGHPUTBW(Tb, R) ((Tb) * (R))
+
+struct msm_bus_vectors {
+ int src; /* Master */
+ int dst; /* Slave */
+ uint64_t ab; /* Arbitrated bandwidth */
+ uint64_t ib; /* Instantaneous bandwidth */
+};
+
+struct msm_bus_paths {
+ int num_paths;
+ struct msm_bus_vectors *vectors;
+};
+
+struct msm_bus_scale_pdata {
+ struct msm_bus_paths *usecase;
+ int num_usecases;
+ const char *name;
+ /*
+ * If the active_only flag is set to 1, the BW request is applied
+ * only when at least one CPU is active (powered on). If the flag
+ * is set to 0, then the BW request is always applied irrespective
+ * of the CPU state.
+ */
+ unsigned int active_only;
+};
+
+/* Scaling APIs */
+
+/*
+ * This function returns a handle to the client. This should be used to
+ * call msm_bus_scale_client_update_request.
+ * The function returns 0 if bus driver is unable to register a client
+ */
+
+#if (defined(CONFIG_MSM_BUS_SCALING) || defined(CONFIG_BUS_TOPOLOGY_ADHOC))
+int __init msm_bus_fabric_init_driver(void);
+uint32_t msm_bus_scale_register_client(struct msm_bus_scale_pdata *pdata);
+int msm_bus_scale_client_update_request(uint32_t cl, unsigned int index);
+void msm_bus_scale_unregister_client(uint32_t cl);
+/* AXI Port configuration APIs */
+int msm_bus_axi_porthalt(int master_port);
+int msm_bus_axi_portunhalt(int master_port);
+
+#else
+static inline int __init msm_bus_fabric_init_driver(void) { return 0; }
+
+static inline uint32_t
+msm_bus_scale_register_client(struct msm_bus_scale_pdata *pdata)
+{
+ return 1;
+}
+
+static inline int
+msm_bus_scale_client_update_request(uint32_t cl, unsigned int index)
+{
+ return 0;
+}
+
+static inline void
+msm_bus_scale_unregister_client(uint32_t cl)
+{
+}
+
+static inline int msm_bus_axi_porthalt(int master_port)
+{
+ return 0;
+}
+
+static inline int msm_bus_axi_portunhalt(int master_port)
+{
+ return 0;
+}
+#endif
+
+#if defined(CONFIG_OF) && defined(CONFIG_MSM_BUS_SCALING)
+struct msm_bus_scale_pdata *msm_bus_pdata_from_node(
+ struct platform_device *pdev, struct device_node *of_node);
+struct msm_bus_scale_pdata *msm_bus_cl_get_pdata(struct platform_device *pdev);
+void msm_bus_cl_clear_pdata(struct msm_bus_scale_pdata *pdata);
+#else
+static inline struct msm_bus_scale_pdata
+*msm_bus_cl_get_pdata(struct platform_device *pdev)
+{
+ return NULL;
+}
+
+static inline struct msm_bus_scale_pdata *msm_bus_pdata_from_node(
+ struct platform_device *pdev, struct device_node *of_node)
+{
+ return NULL;
+}
+
+static inline void msm_bus_cl_clear_pdata(struct msm_bus_scale_pdata *pdata)
+{
+}
+#endif
+#endif /*_ARCH_ARM_MACH_MSM_BUS_H*/
diff --git a/include/linux/msm_iommu_domains.h b/include/linux/msm_iommu_domains.h
new file mode 100644
index 000000000000..811d7737ddb5
--- /dev/null
+++ b/include/linux/msm_iommu_domains.h
@@ -0,0 +1,239 @@
+/*
+ * Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_MSM_IOMMU_DOMAINS_H
+#define _LINUX_MSM_IOMMU_DOMAINS_H
+
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/genalloc.h>
+#include <linux/rbtree.h>
+#include <linux/dma-buf.h>
+
+#define MSM_IOMMU_DOMAIN_SECURE 0x1
+
+enum {
+ VIDEO_DOMAIN,
+ CAMERA_DOMAIN,
+ DISPLAY_READ_DOMAIN,
+ DISPLAY_WRITE_DOMAIN,
+ ROTATOR_SRC_DOMAIN,
+ ROTATOR_DST_DOMAIN,
+ MAX_DOMAINS
+};
+
+enum {
+ VIDEO_FIRMWARE_POOL,
+ VIDEO_MAIN_POOL,
+ GEN_POOL,
+};
+
+struct mem_pool {
+ struct mutex pool_mutex;
+ struct gen_pool *gpool;
+ phys_addr_t paddr;
+ unsigned long size;
+ unsigned long free;
+ unsigned int id;
+};
+
+struct msm_iommu_domain {
+ /* iommu domain to map in */
+ struct iommu_domain *domain;
+ /* total number of allocations from this domain */
+ atomic_t allocation_cnt;
+ /* number of iova pools */
+ int npools;
+ /*
+ * array of gen_pools for allocating iovas.
+ * behavior is undefined if these overlap
+ */
+ struct mem_pool *iova_pools;
+};
+
+struct msm_iommu_domain_name {
+ char *name;
+ int domain;
+};
+
+struct iommu_domains_pdata {
+ struct msm_iommu_domain *domains;
+ int ndomains;
+ struct msm_iommu_domain_name *domain_names;
+ int nnames;
+ unsigned int domain_alloc_flags;
+};
+
+struct msm_iova_partition {
+ unsigned long start;
+ unsigned long size;
+};
+
+struct msm_iova_layout {
+ struct msm_iova_partition *partitions;
+ int npartitions;
+ const char *client_name;
+ unsigned int domain_flags;
+ unsigned int is_secure;
+};
+
+#if defined(CONFIG_QCOM_IOMMU)
+extern void msm_iommu_set_client_name(struct iommu_domain *domain,
+ char const *name);
+extern struct iommu_domain *msm_get_iommu_domain(int domain_num);
+extern int msm_allocate_iova_address(unsigned int iommu_domain,
+ unsigned int partition_no,
+ unsigned long size,
+ unsigned long align,
+ unsigned long *iova);
+
+extern void msm_free_iova_address(unsigned long iova,
+ unsigned int iommu_domain,
+ unsigned int partition_no,
+ unsigned long size);
+
+extern int msm_use_iommu(void);
+
+extern int msm_iommu_map_extra(struct iommu_domain *domain,
+ unsigned long start_iova,
+ phys_addr_t phys_addr,
+ unsigned long size,
+ unsigned long page_size,
+ int cached);
+
+extern void msm_iommu_unmap_extra(struct iommu_domain *domain,
+ unsigned long start_iova,
+ unsigned long size,
+ unsigned long page_size);
+
+extern int msm_iommu_map_contig_buffer(phys_addr_t phys,
+ unsigned int domain_no,
+ unsigned int partition_no,
+ unsigned long size,
+ unsigned long align,
+ unsigned long cached,
+ dma_addr_t *iova_val);
+
+extern void msm_iommu_unmap_contig_buffer(dma_addr_t iova,
+ unsigned int domain_no,
+ unsigned int partition_no,
+ unsigned long size);
+
+extern int msm_register_domain(struct msm_iova_layout *layout);
+extern int msm_unregister_domain(struct iommu_domain *domain);
+
+int msm_map_dma_buf(struct dma_buf *dma_buf, struct sg_table *table,
+ int domain_num, int partition_num, unsigned long align,
+ unsigned long iova_length, unsigned long *iova,
+ unsigned long *buffer_size,
+ unsigned long flags, unsigned long iommu_flags);
+
+void msm_unmap_dma_buf(struct sg_table *table, int domain_num,
+ int partition_num);
+#else
+static inline void msm_iommu_set_client_name(struct iommu_domain *domain,
+ char const *name)
+{
+}
+
+static inline struct iommu_domain *msm_get_iommu_domain(int subsys_id)
+{
+ return NULL;
+}
+
+static inline int msm_allocate_iova_address(unsigned int iommu_domain,
+ unsigned int partition_no,
+ unsigned long size,
+ unsigned long align,
+ unsigned long *iova)
+{
+ return -ENOMEM;
+}
+
+static inline void msm_free_iova_address(unsigned long iova,
+ unsigned int iommu_domain,
+ unsigned int partition_no,
+ unsigned long size) { }
+
+static inline int msm_use_iommu(void)
+{
+ return 0;
+}
+
+static inline int msm_iommu_map_extra(struct iommu_domain *domain,
+ unsigned long start_iova,
+ phys_addr_t phys_addr,
+ unsigned long size,
+ unsigned long page_size,
+ int cached)
+{
+ return -ENODEV;
+}
+
+static inline void msm_iommu_unmap_extra(struct iommu_domain *domain,
+ unsigned long start_iova,
+ unsigned long size,
+ unsigned long page_size)
+{
+}
+
+static inline int msm_iommu_map_contig_buffer(phys_addr_t phys,
+ unsigned int domain_no,
+ unsigned int partition_no,
+ unsigned long size,
+ unsigned long align,
+ unsigned long cached,
+ dma_addr_t *iova_val)
+{
+ *iova_val = phys;
+ return 0;
+}
+
+static inline void msm_iommu_unmap_contig_buffer(dma_addr_t iova,
+ unsigned int domain_no,
+ unsigned int partition_no,
+ unsigned long size)
+{
+}
+
+static inline int msm_register_domain(struct msm_iova_layout *layout)
+{
+ return -ENODEV;
+}
+
+static inline int msm_unregister_domain(struct iommu_domain *domain)
+{
+ return -ENODEV;
+}
+
+static inline int msm_map_dma_buf(struct dma_buf *dma_buf,
+ struct sg_table *table,
+ int domain_num, int partition_num,
+ unsigned long align,
+ unsigned long iova_length,
+ unsigned long *iova,
+ unsigned long *buffer_size,
+ unsigned long flags,
+ unsigned long iommu_flags)
+{
+ return -ENODEV;
+}
+
+static inline void msm_unmap_dma_buf(struct sg_table *table, int domain_num,
+ int partition_num)
+{
+
+}
+
+#endif /* CONFIG_QCOM_IOMMU */
+#endif /* _LINUX_MSM_IOMMU_DOMAINS_H */ \ No newline at end of file
diff --git a/include/linux/of_device.h b/include/linux/of_device.h
index cc7dd687a89d..4b8cb7da6b4a 100644
--- a/include/linux/of_device.h
+++ b/include/linux/of_device.h
@@ -55,7 +55,9 @@ static inline struct device_node *of_cpu_device_node_get(int cpu)
return of_node_get(cpu_dev->of_node);
}
-void of_dma_configure(struct device *dev, struct device_node *np);
+void of_dma_configure_masks(struct device *dev, struct device_node *np);
+int of_dma_configure_ops(struct device *dev, struct device_node *np);
+void of_dma_deconfigure(struct device *dev);
#else /* CONFIG_OF */
static inline int of_driver_match_device(struct device *dev,
@@ -98,7 +100,15 @@ static inline struct device_node *of_cpu_device_node_get(int cpu)
{
return NULL;
}
-static inline void of_dma_configure(struct device *dev, struct device_node *np)
+static inline void of_dma_configure_masks(struct device *dev,
+ struct device_node *np)
+{}
+static inline int of_dma_configure_ops(struct device *dev,
+ struct device_node *np)
+{
+ return 0;
+}
+static inline void of_dma_deconfigure(struct device *dev)
{}
#endif /* CONFIG_OF */
diff --git a/include/linux/qcom_iommu.h b/include/linux/qcom_iommu.h
new file mode 100644
index 000000000000..54f47b378ed6
--- /dev/null
+++ b/include/linux/qcom_iommu.h
@@ -0,0 +1,388 @@
+/* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_IOMMU_H
+#define MSM_IOMMU_H
+
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/list.h>
+#include <linux/regulator/consumer.h>
+#include <soc/qcom/socinfo.h>
+
+/* Private pgprot flag */
+#define IOMMU_PRIV 16
+
+extern pgprot_t pgprot_kernel;
+extern struct bus_type msm_iommu_sec_bus_type;
+extern struct iommu_access_ops iommu_access_ops_v0;
+extern struct iommu_access_ops iommu_access_ops_v1;
+
+/* Domain attributes */
+#define MSM_IOMMU_DOMAIN_PT_CACHEABLE 0x1
+#define MSM_IOMMU_DOMAIN_PT_SECURE 0x2
+
+/* Mask for the cache policy attribute */
+#define MSM_IOMMU_CP_MASK 0x03
+
+#define DOMAIN_ATTR_QCOM_COHERENT_HTW_DISABLE DOMAIN_ATTR_MAX
+
+/* Maximum number of Machine IDs that we are allowing to be mapped to the same
+ * context bank. The number of MIDs mapped to the same CB does not affect
+ * performance, but there is a practical limit on how many distinct MIDs may
+ * be present. These mappings are typically determined at design time and are
+ * not expected to change at run time.
+ */
+#define MAX_NUM_MIDS 32
+
+/* Maximum number of SMT entries allowed by the system */
+#define MAX_NUM_SMR 128
+
+#define MAX_NUM_BFB_REGS 32
+
+/**
+ * struct msm_iommu_dev - a single IOMMU hardware instance
+ * name Human-readable name given to this IOMMU HW instance
+ * ncb Number of context banks present on this IOMMU HW instance
+ */
+struct msm_iommu_dev {
+ const char *name;
+ int ncb;
+ int ttbr_split;
+};
+
+/**
+ * struct msm_iommu_ctx_dev - an IOMMU context bank instance
+ * name Human-readable name given to this context bank
+ * num Index of this context bank within the hardware
+ * mids List of Machine IDs that are to be mapped into this context
+ * bank, terminated by -1. The MID is a set of signals on the
+ * AXI bus that identifies the function associated with a specific
+ * memory request. (See ARM spec).
+ */
+struct msm_iommu_ctx_dev {
+ const char *name;
+ int num;
+ int mids[MAX_NUM_MIDS];
+};
+
+/**
+ * struct msm_iommu_bfb_settings - a set of IOMMU BFB tuning parameters
+ * regs An array of register offsets to configure
+ * data Values to write to corresponding registers
+ * length Number of valid entries in the offset/val arrays
+ */
+struct msm_iommu_bfb_settings {
+ unsigned int regs[MAX_NUM_BFB_REGS];
+ unsigned int data[MAX_NUM_BFB_REGS];
+ int length;
+};
+
+/**
+ * struct msm_iommu_drvdata - A single IOMMU hardware instance
+ * @base: IOMMU config port base address (VA)
+ * @glb_base: IOMMU config port base address for global register space (VA)
+ * @phys_base: IOMMU physical base address.
+ * @ncb The number of contexts on this IOMMU
+ * @irq: Interrupt number
+ * @core: The bus clock for this IOMMU hardware instance
+ * @iface: The clock for the IOMMU bus interconnect
+ * @name: Human-readable name of this IOMMU device
+ * @bfb_settings: Optional BFB performance tuning parameters
+ * @dev: Struct device this hardware instance is tied to
+ * @list: List head to link all iommus together
+ * @halt_enabled: Set to 1 if IOMMU halt is supported in the IOMMU, 0 otherwise.
+ * @ctx_attach_count: Count of how many context are attached.
+ * @bus_client : Bus client needed to vote for bus bandwidth.
+ * @needs_rem_spinlock : 1 if remote spinlock is needed, 0 otherwise
+ * @powered_on: Powered status of the IOMMU. 0 means powered off.
+ *
+ * A msm_iommu_drvdata holds the global driver data about a single piece
+ * of an IOMMU hardware instance.
+ */
+struct msm_iommu_drvdata {
+ void __iomem *base;
+ phys_addr_t phys_base;
+ void __iomem *glb_base;
+ void __iomem *cb_base;
+ void __iomem *smmu_local_base;
+ int ncb;
+ int ttbr_split;
+ struct clk *core;
+ struct clk *iface;
+ const char *name;
+ struct msm_iommu_bfb_settings *bfb_settings;
+ int sec_id;
+ struct device *dev;
+ struct list_head list;
+ int halt_enabled;
+ unsigned int ctx_attach_count;
+ unsigned int bus_client;
+ int needs_rem_spinlock;
+ int powered_on;
+ unsigned int model;
+};
+
+/**
+ * struct iommu_access_ops - Callbacks for accessing IOMMU
+ * @iommu_power_on: Turn on power to unit
+ * @iommu_power_off: Turn off power to unit
+ * @iommu_bus_vote: Vote for bus bandwidth
+ * @iommu_clk_on: Turn on clks to unit
+ * @iommu_clk_off: Turn off clks to unit
+ * @iommu_lock_initialize: Initialize the remote lock
+ * @iommu_lock_acquire: Acquire any locks needed
+ * @iommu_lock_release: Release locks needed
+ */
+struct iommu_access_ops {
+ int (*iommu_power_on)(struct msm_iommu_drvdata *);
+ void (*iommu_power_off)(struct msm_iommu_drvdata *);
+ int (*iommu_bus_vote)(struct msm_iommu_drvdata *drvdata,
+ unsigned int vote);
+ int (*iommu_clk_on)(struct msm_iommu_drvdata *);
+ void (*iommu_clk_off)(struct msm_iommu_drvdata *);
+ void * (*iommu_lock_initialize)(void);
+ void (*iommu_lock_acquire)(unsigned int need_extra_lock);
+ void (*iommu_lock_release)(unsigned int need_extra_lock);
+};
+
+void msm_iommu_add_drv(struct msm_iommu_drvdata *drv);
+void msm_iommu_remove_drv(struct msm_iommu_drvdata *drv);
+void program_iommu_bfb_settings(void __iomem *base,
+ const struct msm_iommu_bfb_settings *bfb_settings);
+void iommu_halt(const struct msm_iommu_drvdata *iommu_drvdata);
+void iommu_resume(const struct msm_iommu_drvdata *iommu_drvdata);
+
+/**
+ * struct msm_iommu_ctx_drvdata - an IOMMU context bank instance
+ * @num: Hardware context number of this context
+ * @pdev: Platform device associated wit this HW instance
+ * @attached_elm: List element for domains to track which devices are
+ * attached to them
+ * @attached_domain Domain currently attached to this context (if any)
+ * @name Human-readable name of this context device
+ * @sids List of Stream IDs mapped to this context
+ * @nsid Number of Stream IDs mapped to this context
+ * @secure_context true if this is a secure context programmed by
+ the secure environment, false otherwise
+ * @asid ASID used with this context.
+ * @attach_count Number of time this context has been attached.
+ *
+ * A msm_iommu_ctx_drvdata holds the driver data for a single context bank
+ * within each IOMMU hardware instance
+ */
+struct msm_iommu_ctx_drvdata {
+ int num;
+ struct platform_device *pdev;
+ struct list_head attached_elm;
+ struct iommu_domain *attached_domain;
+ const char *name;
+ u32 sids[MAX_NUM_SMR];
+ unsigned int nsid;
+ unsigned int secure_context;
+ int asid;
+ int attach_count;
+ u32 sid_mask[MAX_NUM_SMR];
+ unsigned int n_sid_mask;
+};
+
+enum dump_reg {
+ DUMP_REG_FIRST,
+ DUMP_REG_FAR0 = DUMP_REG_FIRST,
+ DUMP_REG_FAR1,
+ DUMP_REG_PAR0,
+ DUMP_REG_PAR1,
+ DUMP_REG_FSR,
+ DUMP_REG_FSYNR0,
+ DUMP_REG_FSYNR1,
+ DUMP_REG_TTBR0_0,
+ DUMP_REG_TTBR0_1,
+ DUMP_REG_TTBR1_0,
+ DUMP_REG_TTBR1_1,
+ DUMP_REG_SCTLR,
+ DUMP_REG_ACTLR,
+ DUMP_REG_PRRR,
+ DUMP_REG_MAIR0 = DUMP_REG_PRRR,
+ DUMP_REG_NMRR,
+ DUMP_REG_MAIR1 = DUMP_REG_NMRR,
+ DUMP_REG_CBAR_N,
+ DUMP_REG_CBFRSYNRA_N,
+ MAX_DUMP_REGS,
+};
+
+enum dump_reg_type {
+ DRT_CTX_REG,
+ DRT_GLOBAL_REG,
+ DRT_GLOBAL_REG_N,
+};
+
+enum model_id {
+ QSMMUv1 = 1,
+ QSMMUv2,
+ MMU_500 = 500,
+ MAX_MODEL,
+};
+
+struct dump_regs_tbl_entry {
+ /*
+ * To keep things context-bank-agnostic, we only store the
+ * register offset in `reg_offset'
+ */
+ unsigned int reg_offset;
+ const char *name;
+ int must_be_present;
+ enum dump_reg_type dump_reg_type;
+};
+extern struct dump_regs_tbl_entry dump_regs_tbl[MAX_DUMP_REGS];
+
+#define COMBINE_DUMP_REG(upper, lower) (((u64) upper << 32) | lower)
+
+struct msm_iommu_context_reg {
+ uint32_t val;
+ bool valid;
+};
+
+void print_ctx_regs(struct msm_iommu_context_reg regs[]);
+
+/*
+ * Interrupt handler for the IOMMU context fault interrupt. Hooking the
+ * interrupt is not supported in the API yet, but this will print an error
+ * message and dump useful IOMMU registers.
+ */
+irqreturn_t msm_iommu_global_fault_handler(int irq, void *dev_id);
+irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id);
+irqreturn_t msm_iommu_fault_handler_v2(int irq, void *dev_id);
+irqreturn_t msm_iommu_secure_fault_handler_v2(int irq, void *dev_id);
+
+enum {
+ PROC_APPS,
+ PROC_GPU,
+ PROC_MAX
+};
+
+/* Expose structure to allow kgsl iommu driver to use the same structure to
+ * communicate to GPU the addresses of the flag and turn variables.
+ */
+struct remote_iommu_petersons_spinlock {
+ uint32_t flag[PROC_MAX];
+ uint32_t turn;
+};
+
+#ifdef CONFIG_QCOM_IOMMU_V1
+void *msm_iommu_lock_initialize(void);
+void msm_iommu_mutex_lock(void);
+void msm_iommu_mutex_unlock(void);
+void msm_set_iommu_access_ops(struct iommu_access_ops *ops);
+struct iommu_access_ops *msm_get_iommu_access_ops(void);
+#else
+static inline void *msm_iommu_lock_initialize(void)
+{
+ return NULL;
+}
+static inline void msm_iommu_mutex_lock(void) { }
+static inline void msm_iommu_mutex_unlock(void) { }
+static inline void msm_set_iommu_access_ops(struct iommu_access_ops *ops)
+{
+
+}
+static inline struct iommu_access_ops *msm_get_iommu_access_ops(void)
+{
+ return NULL;
+}
+#endif
+
+#ifdef CONFIG_QCOM_IOMMU_V1
+/*
+ * Look up an IOMMU context device by its context name. NULL if none found.
+ * Useful for testing and drivers that do not yet fully have IOMMU stuff in
+ * their platform devices.
+ */
+struct device *msm_iommu_get_ctx(const char *ctx_name);
+#else
+static inline struct device *msm_iommu_get_ctx(const char *ctx_name)
+{
+ return NULL;
+}
+#endif
+
+/*
+ * Function to program the global registers of an IOMMU securely.
+ * This should only be called on IOMMUs for which kernel programming
+ * of global registers is not possible
+ */
+void msm_iommu_sec_set_access_ops(struct iommu_access_ops *access_ops);
+int msm_iommu_sec_program_iommu(struct msm_iommu_drvdata *drvdata,
+ struct msm_iommu_ctx_drvdata *ctx_drvdata);
+int is_vfe_secure(void);
+
+#ifdef CONFIG_MSM_IOMMU_V0
+static inline int msm_soc_version_supports_iommu_v0(void)
+{
+ static int soc_supports_v0 = -1;
+#ifdef CONFIG_OF
+ struct device_node *node;
+#endif
+
+ if (soc_supports_v0 != -1)
+ return soc_supports_v0;
+
+#ifdef CONFIG_OF
+ node = of_find_compatible_node(NULL, NULL, "qcom,msm-smmu-v0");
+ if (node) {
+ soc_supports_v0 = 1;
+ of_node_put(node);
+ return 1;
+ }
+#endif
+ if (cpu_is_msm8960() &&
+ SOCINFO_VERSION_MAJOR(socinfo_get_version()) < 2) {
+ soc_supports_v0 = 0;
+ return 0;
+ }
+
+ if (cpu_is_msm8x60() &&
+ (SOCINFO_VERSION_MAJOR(socinfo_get_version()) != 2 ||
+ SOCINFO_VERSION_MINOR(socinfo_get_version()) < 1)) {
+ soc_supports_v0 = 0;
+ return 0;
+ }
+
+ soc_supports_v0 = 1;
+ return 1;
+}
+#else
+static inline int msm_soc_version_supports_iommu_v0(void)
+{
+ return 0;
+}
+#endif
+
+int msm_iommu_get_scm_call_avail(void);
+void msm_iommu_check_scm_call_avail(void);
+
+u32 msm_iommu_get_mair0(void);
+u32 msm_iommu_get_mair1(void);
+u32 msm_iommu_get_prrr(void);
+u32 msm_iommu_get_nmrr(void);
+
+/* events for notifiers passed to msm_iommu_register_notify */
+#define TLB_SYNC_TIMEOUT 1
+
+#ifdef CONFIG_QCOM_IOMMU_V1
+void msm_iommu_register_notify(struct notifier_block *nb);
+#else
+static inline void msm_iommu_register_notify(struct notifier_block *nb)
+{
+}
+#endif
+
+#endif
diff --git a/include/soc/qcom/socinfo.h b/include/soc/qcom/socinfo.h
new file mode 100644
index 000000000000..29715506ed46
--- /dev/null
+++ b/include/soc/qcom/socinfo.h
@@ -0,0 +1,610 @@
+/* Copyright (c) 2009-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _ARCH_ARM_MACH_MSM_SOCINFO_H_
+#define _ARCH_ARM_MACH_MSM_SOCINFO_H_
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/of_fdt.h>
+#include <linux/of.h>
+
+#include <asm/cputype.h>
+/*
+ * SOC version type with major number in the upper 16 bits and minor
+ * number in the lower 16 bits. For example:
+ * 1.0 -> 0x00010000
+ * 2.3 -> 0x00020003
+ */
+#define SOCINFO_VERSION_MAJOR(ver) ((ver & 0xffff0000) >> 16)
+#define SOCINFO_VERSION_MINOR(ver) (ver & 0x0000ffff)
+
+#ifdef CONFIG_OF
+#define of_board_is_cdp() of_machine_is_compatible("qcom,cdp")
+#define of_board_is_sim() of_machine_is_compatible("qcom,sim")
+#define of_board_is_rumi() of_machine_is_compatible("qcom,rumi")
+#define of_board_is_fluid() of_machine_is_compatible("qcom,fluid")
+#define of_board_is_liquid() of_machine_is_compatible("qcom,liquid")
+#define of_board_is_dragonboard() \
+ of_machine_is_compatible("qcom,dragonboard")
+#define of_board_is_cdp() of_machine_is_compatible("qcom,cdp")
+#define of_board_is_mtp() of_machine_is_compatible("qcom,mtp")
+#define of_board_is_qrd() of_machine_is_compatible("qcom,qrd")
+#define of_board_is_xpm() of_machine_is_compatible("qcom,xpm")
+#define of_board_is_skuf() of_machine_is_compatible("qcom,skuf")
+#define of_board_is_sbc() of_machine_is_compatible("qcom,sbc")
+
+#define machine_is_msm8974() of_machine_is_compatible("qcom,msm8974")
+#define machine_is_msm9625() of_machine_is_compatible("qcom,msm9625")
+#define machine_is_msm8610() of_machine_is_compatible("qcom,msm8610")
+#define machine_is_msm8226() of_machine_is_compatible("qcom,msm8226")
+#define machine_is_apq8074() of_machine_is_compatible("qcom,apq8074")
+#define machine_is_msm8926() of_machine_is_compatible("qcom,msm8926")
+
+#define early_machine_is_msm8610() \
+ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8610")
+#define early_machine_is_msmferrum() \
+ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msmferrum")
+#define early_machine_is_msm8916() \
+ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8916")
+#define early_machine_is_msm8936() \
+ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8936")
+#define early_machine_is_msm8939() \
+ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8939")
+#define early_machine_is_apq8084() \
+ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,apq8084")
+#define early_machine_is_mdm9630() \
+ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,mdm9630")
+#define early_machine_is_msmzirc() \
+ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msmzirc")
+#define early_machine_is_fsm9900() \
+ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,fsm9900")
+#define early_machine_is_msm8994() \
+ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8994")
+#define early_machine_is_msm8992() \
+ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8992")
+#define early_machine_is_fsm9010() \
+ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,fsm9010")
+#else
+#define of_board_is_sim() 0
+#define of_board_is_rumi() 0
+#define of_board_is_fluid() 0
+#define of_board_is_liquid() 0
+#define of_board_is_dragonboard() 0
+#define of_board_is_cdp() 0
+#define of_board_is_mtp() 0
+#define of_board_is_qrd() 0
+#define of_board_is_xpm() 0
+#define of_board_is_skuf() 0
+#define of_board_is_sbc() 0
+
+#define machine_is_msm8974() 0
+#define machine_is_msm9625() 0
+#define machine_is_msm8610() 0
+#define machine_is_msm8226() 0
+#define machine_is_apq8074() 0
+#define machine_is_msm8926() 0
+
+#define early_machine_is_msm8610() 0
+#define early_machine_is_msmferrum() 0
+#define early_machine_is_msm8916() 0
+#define early_machine_is_msm8936() 0
+#define early_machine_is_msm8939() 0
+#define early_machine_is_apq8084() 0
+#define early_machine_is_mdm9630() 0
+#define early_machine_is_fsm9900() 0
+#define early_machine_is_fsm9010() 0
+#endif
+
+#define PLATFORM_SUBTYPE_MDM 1
+#define PLATFORM_SUBTYPE_INTERPOSERV3 2
+#define PLATFORM_SUBTYPE_SGLTE 6
+
+enum msm_cpu {
+ MSM_CPU_UNKNOWN = 0,
+ MSM_CPU_7X01,
+ MSM_CPU_7X25,
+ MSM_CPU_7X27,
+ MSM_CPU_8X50,
+ MSM_CPU_8X50A,
+ MSM_CPU_7X30,
+ MSM_CPU_8X55,
+ MSM_CPU_8X60,
+ MSM_CPU_8960,
+ MSM_CPU_8960AB,
+ MSM_CPU_7X27A,
+ FSM_CPU_9XXX,
+ MSM_CPU_7X25A,
+ MSM_CPU_7X25AA,
+ MSM_CPU_7X25AB,
+ MSM_CPU_8064,
+ MSM_CPU_8064AB,
+ MSM_CPU_8064AA,
+ MSM_CPU_8930,
+ MSM_CPU_8930AA,
+ MSM_CPU_8930AB,
+ MSM_CPU_7X27AA,
+ MSM_CPU_9615,
+ MSM_CPU_8974,
+ MSM_CPU_8974PRO_AA,
+ MSM_CPU_8974PRO_AB,
+ MSM_CPU_8974PRO_AC,
+ MSM_CPU_8627,
+ MSM_CPU_8625,
+ MSM_CPU_9625,
+ MSM_CPU_FERRUM,
+ MSM_CPU_8916,
+ MSM_CPU_8936,
+ MSM_CPU_8939,
+ MSM_CPU_8226,
+ MSM_CPU_8610,
+ MSM_CPU_8625Q,
+ MSM_CPU_8084,
+ MSM_CPU_9630,
+ FSM_CPU_9900,
+ MSM_CPU_ZIRC,
+ MSM_CPU_8994,
+ MSM_CPU_8992,
+ FSM_CPU_9010,
+};
+
+struct msm_soc_info {
+ enum msm_cpu generic_soc_type;
+ char *soc_id_string;
+};
+
+enum pmic_model {
+ PMIC_MODEL_PM8058 = 13,
+ PMIC_MODEL_PM8028 = 14,
+ PMIC_MODEL_PM8901 = 15,
+ PMIC_MODEL_PM8027 = 16,
+ PMIC_MODEL_ISL_9519 = 17,
+ PMIC_MODEL_PM8921 = 18,
+ PMIC_MODEL_PM8018 = 19,
+ PMIC_MODEL_PM8015 = 20,
+ PMIC_MODEL_PM8014 = 21,
+ PMIC_MODEL_PM8821 = 22,
+ PMIC_MODEL_PM8038 = 23,
+ PMIC_MODEL_PM8922 = 24,
+ PMIC_MODEL_PM8917 = 25,
+ PMIC_MODEL_UNKNOWN = 0xFFFFFFFF
+};
+
+enum msm_cpu socinfo_get_msm_cpu(void);
+uint32_t socinfo_get_id(void);
+uint32_t socinfo_get_version(void);
+uint32_t socinfo_get_raw_id(void);
+char *socinfo_get_build_id(void);
+uint32_t socinfo_get_platform_type(void);
+uint32_t socinfo_get_platform_subtype(void);
+uint32_t socinfo_get_platform_version(void);
+enum pmic_model socinfo_get_pmic_model(void);
+uint32_t socinfo_get_pmic_die_revision(void);
+int __init socinfo_init(void) __must_check;
+const int read_msm_cpu_type(void);
+const int get_core_count(void);
+const int cpu_is_krait(void);
+const int cpu_is_krait_v1(void);
+const int cpu_is_krait_v2(void);
+const int cpu_is_krait_v3(void);
+
+static inline int cpu_is_msm7x01(void)
+{
+#ifdef CONFIG_ARCH_MSM7X01A
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_7X01;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm7x25(void)
+{
+#ifdef CONFIG_ARCH_MSM7X25
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_7X25;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm7x27(void)
+{
+#if defined(CONFIG_ARCH_MSM7X27) && !defined(CONFIG_ARCH_MSM7X27A)
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_7X27;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm7x27a(void)
+{
+#ifdef CONFIG_ARCH_MSM7X27A
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_7X27A;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm7x27aa(void)
+{
+#ifdef CONFIG_ARCH_MSM7X27A
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_7X27AA;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm7x25a(void)
+{
+#ifdef CONFIG_ARCH_MSM7X27A
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_7X25A;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm7x25aa(void)
+{
+#ifdef CONFIG_ARCH_MSM7X27A
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_7X25AA;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm7x25ab(void)
+{
+#ifdef CONFIG_ARCH_MSM7X27A
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_7X25AB;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm7x30(void)
+{
+#ifdef CONFIG_ARCH_MSM7X30
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_7X30;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_qsd8x50(void)
+{
+#ifdef CONFIG_ARCH_QSD8X50
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8X50;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8x55(void)
+{
+#ifdef CONFIG_ARCH_MSM7X30
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8X55;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8x60(void)
+{
+#ifdef CONFIG_ARCH_MSM8X60
+ return read_msm_cpu_type() == MSM_CPU_8X60;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8960(void)
+{
+ return 0;
+}
+
+static inline int cpu_is_msm8960ab(void)
+{
+ return 0;
+}
+
+static inline int cpu_is_apq8064(void)
+{
+#ifdef CONFIG_ARCH_APQ8064
+ return read_msm_cpu_type() == MSM_CPU_8064;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_apq8064ab(void)
+{
+#ifdef CONFIG_ARCH_APQ8064
+ return read_msm_cpu_type() == MSM_CPU_8064AB;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_apq8064aa(void)
+{
+#ifdef CONFIG_ARCH_APQ8064
+ return read_msm_cpu_type() == MSM_CPU_8064AA;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8930(void)
+{
+#ifdef CONFIG_ARCH_MSM8930
+ return read_msm_cpu_type() == MSM_CPU_8930;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8930aa(void)
+{
+#ifdef CONFIG_ARCH_MSM8930
+ return read_msm_cpu_type() == MSM_CPU_8930AA;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8930ab(void)
+{
+#ifdef CONFIG_ARCH_MSM8930
+ return read_msm_cpu_type() == MSM_CPU_8930AB;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8627(void)
+{
+/* 8930 and 8627 will share the same CONFIG_ARCH type unless otherwise needed */
+#ifdef CONFIG_ARCH_MSM8930
+ return read_msm_cpu_type() == MSM_CPU_8627;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_fsm9xxx(void)
+{
+#ifdef CONFIG_ARCH_FSM9XXX
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == FSM_CPU_9XXX;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm9615(void)
+{
+#ifdef CONFIG_ARCH_MSM9615
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_9615;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8625(void)
+{
+#ifdef CONFIG_ARCH_MSM8625
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8625;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8974(void)
+{
+#ifdef CONFIG_ARCH_MSM8974
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8974;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8974pro_aa(void)
+{
+#ifdef CONFIG_ARCH_MSM8974
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8974PRO_AA;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8974pro_ab(void)
+{
+#ifdef CONFIG_ARCH_MSM8974
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8974PRO_AB;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8974pro_ac(void)
+{
+#ifdef CONFIG_ARCH_MSM8974
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8974PRO_AC;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msmferrum(void)
+{
+#ifdef CONFIG_ARCH_MSMFERRUM
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_FERRUM;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8916(void)
+{
+#ifdef CONFIG_ARCH_MSM8916
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8916;
+#else
+ return 0;
+#endif
+
+}
+
+static inline int cpu_is_msm8936(void)
+{
+#ifdef CONFIG_ARCH_MSM8916
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8936;
+#else
+ return 0;
+#endif
+
+}
+
+static inline int cpu_is_msm8939(void)
+{
+#ifdef CONFIG_ARCH_MSM8916
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8939;
+#else
+ return 0;
+#endif
+
+}
+
+static inline int cpu_is_msm8226(void)
+{
+#ifdef CONFIG_ARCH_MSM8226
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8226;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8610(void)
+{
+#ifdef CONFIG_ARCH_MSM8610
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8610;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8625q(void)
+{
+#ifdef CONFIG_ARCH_MSM8625
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8625Q;
+#else
+ return 0;
+#endif
+}
+
+static inline int soc_class_is_msm8960(void)
+{
+ return cpu_is_msm8960() || cpu_is_msm8960ab();
+}
+
+static inline int soc_class_is_apq8064(void)
+{
+ return cpu_is_apq8064() || cpu_is_apq8064ab() || cpu_is_apq8064aa();
+}
+
+static inline int soc_class_is_msm8930(void)
+{
+ return cpu_is_msm8930() || cpu_is_msm8930aa() || cpu_is_msm8930ab() ||
+ cpu_is_msm8627();
+}
+
+static inline int soc_class_is_msm8974(void)
+{
+ return cpu_is_msm8974() || cpu_is_msm8974pro_aa() ||
+ cpu_is_msm8974pro_ab() || cpu_is_msm8974pro_ac();
+}
+
+#endif