aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJon Medhurst <tixy@linaro.org>2015-07-21 14:42:50 +0100
committerJon Medhurst <tixy@linaro.org>2015-07-21 14:42:50 +0100
commit6e4d756b63b552136c6462fdd7c1c34fa9f22937 (patch)
tree3d7b25f2e2117fae0e6bc76514b368e9fc5b5ec0
parent55ee6e40f9c15b91676d0036bb927bdbea1e37bb (diff)
parent1c466fb884bdb8d8384f6dea0be2bb655dd674bc (diff)
Merge branch 'lsk-3.10-armlt-smmu' into integration-lsk-3.10-juno-androidlsk-3.10-armlt-juno-20150721
-rw-r--r--Documentation/devicetree/bindings/iommu/arm,smmu.txt70
-rw-r--r--MAINTAINERS7
-rw-r--r--drivers/iommu/Kconfig39
-rw-r--r--drivers/iommu/Makefile3
-rw-r--r--drivers/iommu/arm-smmu.c1777
-rw-r--r--drivers/iommu/io-pgtable-arm.c986
-rw-r--r--drivers/iommu/io-pgtable.c82
-rw-r--r--drivers/iommu/io-pgtable.h143
-rw-r--r--include/linux/iommu.h8
9 files changed, 3112 insertions, 3 deletions
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu.txt b/Documentation/devicetree/bindings/iommu/arm,smmu.txt
new file mode 100644
index 000000000000..e34c6cdd8ba8
--- /dev/null
+++ b/Documentation/devicetree/bindings/iommu/arm,smmu.txt
@@ -0,0 +1,70 @@
+* ARM System MMU Architecture Implementation
+
+ARM SoCs may contain an implementation of the ARM System Memory
+Management Unit Architecture, which can be used to provide 1 or 2 stages
+of address translation to bus masters external to the CPU.
+
+The SMMU may also raise interrupts in response to various fault
+conditions.
+
+** System MMU required properties:
+
+- compatible : Should be one of:
+
+ "arm,smmu-v1"
+ "arm,smmu-v2"
+ "arm,mmu-400"
+ "arm,mmu-500"
+
+ depending on the particular implementation and/or the
+ version of the architecture implemented.
+
+- reg : Base address and size of the SMMU.
+
+- #global-interrupts : The number of global interrupts exposed by the
+ device.
+
+- interrupts : Interrupt list, with the first #global-irqs entries
+ corresponding to the global interrupts and any
+ following entries corresponding to context interrupts,
+ specified in order of their indexing by the SMMU.
+
+ For SMMUv2 implementations, there must be exactly one
+ interrupt per context bank. In the case of a single,
+ combined interrupt, it must be listed multiple times.
+
+- mmu-masters : A list of phandles to device nodes representing bus
+ masters for which the SMMU can provide a translation
+ and their corresponding StreamIDs (see example below).
+ Each device node linked from this list must have a
+ "#stream-id-cells" property, indicating the number of
+ StreamIDs associated with it.
+
+** System MMU optional properties:
+
+- smmu-parent : When multiple SMMUs are chained together, this
+ property can be used to provide a phandle to the
+ parent SMMU (that is the next SMMU on the path going
+ from the mmu-masters towards memory) node for this
+ SMMU.
+
+Example:
+
+ smmu {
+ compatible = "arm,smmu-v1";
+ reg = <0xba5e0000 0x10000>;
+ #global-interrupts = <2>;
+ interrupts = <0 32 4>,
+ <0 33 4>,
+ <0 34 4>, /* This is the first context interrupt */
+ <0 35 4>,
+ <0 36 4>,
+ <0 37 4>;
+
+ /*
+ * Two DMA controllers, the first with two StreamIDs (0xd01d
+ * and 0xd01e) and the second with only one (0xd11c).
+ */
+ mmu-masters = <&dma0 0xd01d 0xd01e>,
+ <&dma1 0xd11c>;
+ };
diff --git a/MAINTAINERS b/MAINTAINERS
index 8a8a48b39cd8..60813bf73c64 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1318,6 +1318,13 @@ T: git git://git.xilinx.com/linux-xlnx.git
S: Supported
F: arch/arm/mach-zynq/
+ARM SMMU DRIVER
+M: Will Deacon <will.deacon@arm.com>
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S: Maintained
+F: drivers/iommu/arm-smmu.c
+F: drivers/iommu/io-pgtable-arm.c
+
ARM64 PORT (AARCH64 ARCHITECTURE)
M: Catalin Marinas <catalin.marinas@arm.com>
M: Will Deacon <will.deacon@arm.com>
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index c332fb98480d..a6b7c6227adf 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -13,6 +13,32 @@ menuconfig IOMMU_SUPPORT
if IOMMU_SUPPORT
+menu "Generic IOMMU Pagetable Support"
+
+# Selected by the actual pagetable implementations
+config IOMMU_IO_PGTABLE
+ bool
+
+config IOMMU_IO_PGTABLE_LPAE
+ bool "ARMv7/v8 Long Descriptor Format"
+ select IOMMU_IO_PGTABLE
+ help
+ Enable support for the ARM long descriptor pagetable format.
+ This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page
+ sizes at both stage-1 and stage-2, as well as address spaces
+ up to 48-bits in size.
+
+config IOMMU_IO_PGTABLE_LPAE_SELFTEST
+ bool "LPAE selftests"
+ depends on IOMMU_IO_PGTABLE_LPAE
+ help
+ Enable self-tests for LPAE page table allocator. This performs
+ a series of page-table consistency checks during boot.
+
+ If unsure, say N here.
+
+endmenu
+
config OF_IOMMU
def_bool y
depends on OF
@@ -261,4 +287,17 @@ config SHMOBILE_IOMMU_L1SIZE
default 256 if SHMOBILE_IOMMU_ADDRSIZE_64MB
default 128 if SHMOBILE_IOMMU_ADDRSIZE_32MB
+config ARM_SMMU
+ bool "ARM Ltd. System MMU (SMMU) Support"
+ depends on ARM64 || ARM
+ select IOMMU_API
+ select IOMMU_IO_PGTABLE_LPAE
+ select ARM_DMA_USE_IOMMU if ARM
+ help
+ Support for implementations of the ARM System MMU architecture
+ versions 1 and 2.
+
+ Say Y here if your SoC includes an IOMMU device implementing
+ the ARM SMMU architecture.
+
endif # IOMMU_SUPPORT
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index ef0e5207ad69..f6b95af4c80c 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -1,8 +1,11 @@
obj-$(CONFIG_IOMMU_API) += iommu.o
+obj-$(CONFIG_IOMMU_IO_PGTABLE) += io-pgtable.o
+obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o
obj-$(CONFIG_OF_IOMMU) += of_iommu.o
obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o
obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o
obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
+obj-$(CONFIG_ARM_SMMU) += arm-smmu.o
obj-$(CONFIG_DMAR_TABLE) += dmar.o
obj-$(CONFIG_INTEL_IOMMU) += iova.o intel-iommu.o
obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
new file mode 100644
index 000000000000..61eb249315ad
--- /dev/null
+++ b/drivers/iommu/arm-smmu.c
@@ -0,0 +1,1777 @@
+/*
+ * IOMMU API for ARM architected SMMU implementations.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) 2013 ARM Limited
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ *
+ * This driver currently supports:
+ * - SMMUv1 and v2 implementations
+ * - Stream-matching and stream-indexing
+ * - v7/v8 long-descriptor format
+ * - Non-secure access to the SMMU
+ * - Context fault reporting
+ */
+
+#define pr_fmt(fmt) "arm-smmu: " fmt
+
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iommu.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include <linux/amba/bus.h>
+
+#include "io-pgtable.h"
+
+/* Maximum number of stream IDs assigned to a single device */
+#define MAX_MASTER_STREAMIDS MAX_PHANDLE_ARGS
+
+/* Maximum number of context banks per SMMU */
+#define ARM_SMMU_MAX_CBS 128
+
+/* Maximum number of mapping groups per SMMU */
+#define ARM_SMMU_MAX_SMRS 128
+
+/* SMMU global address space */
+#define ARM_SMMU_GR0(smmu) ((smmu)->base)
+#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
+
+/*
+ * SMMU global address space with conditional offset to access secure
+ * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
+ * nsGFSYNR0: 0x450)
+ */
+#define ARM_SMMU_GR0_NS(smmu) \
+ ((smmu)->base + \
+ ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
+ ? 0x400 : 0))
+
+/* Configuration registers */
+#define ARM_SMMU_GR0_sCR0 0x0
+#define sCR0_CLIENTPD (1 << 0)
+#define sCR0_GFRE (1 << 1)
+#define sCR0_GFIE (1 << 2)
+#define sCR0_GCFGFRE (1 << 4)
+#define sCR0_GCFGFIE (1 << 5)
+#define sCR0_USFCFG (1 << 10)
+#define sCR0_VMIDPNE (1 << 11)
+#define sCR0_PTM (1 << 12)
+#define sCR0_FB (1 << 13)
+#define sCR0_BSU_SHIFT 14
+#define sCR0_BSU_MASK 0x3
+
+/* Identification registers */
+#define ARM_SMMU_GR0_ID0 0x20
+#define ARM_SMMU_GR0_ID1 0x24
+#define ARM_SMMU_GR0_ID2 0x28
+#define ARM_SMMU_GR0_ID3 0x2c
+#define ARM_SMMU_GR0_ID4 0x30
+#define ARM_SMMU_GR0_ID5 0x34
+#define ARM_SMMU_GR0_ID6 0x38
+#define ARM_SMMU_GR0_ID7 0x3c
+#define ARM_SMMU_GR0_sGFSR 0x48
+#define ARM_SMMU_GR0_sGFSYNR0 0x50
+#define ARM_SMMU_GR0_sGFSYNR1 0x54
+#define ARM_SMMU_GR0_sGFSYNR2 0x58
+
+#define ID0_S1TS (1 << 30)
+#define ID0_S2TS (1 << 29)
+#define ID0_NTS (1 << 28)
+#define ID0_SMS (1 << 27)
+#define ID0_CTTW (1 << 14)
+#define ID0_NUMIRPT_SHIFT 16
+#define ID0_NUMIRPT_MASK 0xff
+#define ID0_NUMSIDB_SHIFT 9
+#define ID0_NUMSIDB_MASK 0xf
+#define ID0_NUMSMRG_SHIFT 0
+#define ID0_NUMSMRG_MASK 0xff
+
+#define ID1_PAGESIZE (1 << 31)
+#define ID1_NUMPAGENDXB_SHIFT 28
+#define ID1_NUMPAGENDXB_MASK 7
+#define ID1_NUMS2CB_SHIFT 16
+#define ID1_NUMS2CB_MASK 0xff
+#define ID1_NUMCB_SHIFT 0
+#define ID1_NUMCB_MASK 0xff
+
+#define ID2_OAS_SHIFT 4
+#define ID2_OAS_MASK 0xf
+#define ID2_IAS_SHIFT 0
+#define ID2_IAS_MASK 0xf
+#define ID2_UBS_SHIFT 8
+#define ID2_UBS_MASK 0xf
+#define ID2_PTFS_4K (1 << 12)
+#define ID2_PTFS_16K (1 << 13)
+#define ID2_PTFS_64K (1 << 14)
+
+/* Global TLB invalidation */
+#define ARM_SMMU_GR0_TLBIVMID 0x64
+#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
+#define ARM_SMMU_GR0_TLBIALLH 0x6c
+#define ARM_SMMU_GR0_sTLBGSYNC 0x70
+#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
+#define sTLBGSTATUS_GSACTIVE (1 << 0)
+#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
+
+/* Stream mapping registers */
+#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
+#define SMR_VALID (1 << 31)
+#define SMR_MASK_SHIFT 16
+#define SMR_MASK_MASK 0x7fff
+#define SMR_ID_SHIFT 0
+#define SMR_ID_MASK 0x7fff
+
+#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
+#define S2CR_CBNDX_SHIFT 0
+#define S2CR_CBNDX_MASK 0xff
+#define S2CR_TYPE_SHIFT 16
+#define S2CR_TYPE_MASK 0x3
+#define S2CR_TYPE_TRANS (0 << S2CR_TYPE_SHIFT)
+#define S2CR_TYPE_BYPASS (1 << S2CR_TYPE_SHIFT)
+#define S2CR_TYPE_FAULT (2 << S2CR_TYPE_SHIFT)
+
+/* Context bank attribute registers */
+#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
+#define CBAR_VMID_SHIFT 0
+#define CBAR_VMID_MASK 0xff
+#define CBAR_S1_BPSHCFG_SHIFT 8
+#define CBAR_S1_BPSHCFG_MASK 3
+#define CBAR_S1_BPSHCFG_NSH 3
+#define CBAR_S1_MEMATTR_SHIFT 12
+#define CBAR_S1_MEMATTR_MASK 0xf
+#define CBAR_S1_MEMATTR_WB 0xf
+#define CBAR_TYPE_SHIFT 16
+#define CBAR_TYPE_MASK 0x3
+#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
+#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
+#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
+#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
+#define CBAR_IRPTNDX_SHIFT 24
+#define CBAR_IRPTNDX_MASK 0xff
+
+#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
+#define CBA2R_RW64_32BIT (0 << 0)
+#define CBA2R_RW64_64BIT (1 << 0)
+
+/* Translation context bank */
+#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
+#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
+
+#define ARM_SMMU_CB_SCTLR 0x0
+#define ARM_SMMU_CB_RESUME 0x8
+#define ARM_SMMU_CB_TTBCR2 0x10
+#define ARM_SMMU_CB_TTBR0_LO 0x20
+#define ARM_SMMU_CB_TTBR0_HI 0x24
+#define ARM_SMMU_CB_TTBR1_LO 0x28
+#define ARM_SMMU_CB_TTBR1_HI 0x2c
+#define ARM_SMMU_CB_TTBCR 0x30
+#define ARM_SMMU_CB_S1_MAIR0 0x38
+#define ARM_SMMU_CB_S1_MAIR1 0x3c
+#define ARM_SMMU_CB_FSR 0x58
+#define ARM_SMMU_CB_FAR_LO 0x60
+#define ARM_SMMU_CB_FAR_HI 0x64
+#define ARM_SMMU_CB_FSYNR0 0x68
+#define ARM_SMMU_CB_S1_TLBIVA 0x600
+#define ARM_SMMU_CB_S1_TLBIASID 0x610
+#define ARM_SMMU_CB_S1_TLBIVAL 0x620
+#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
+#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
+
+#define SCTLR_S1_ASIDPNE (1 << 12)
+#define SCTLR_CFCFG (1 << 7)
+#define SCTLR_CFIE (1 << 6)
+#define SCTLR_CFRE (1 << 5)
+#define SCTLR_E (1 << 4)
+#define SCTLR_AFE (1 << 2)
+#define SCTLR_TRE (1 << 1)
+#define SCTLR_M (1 << 0)
+#define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE)
+
+#define RESUME_RETRY (0 << 0)
+#define RESUME_TERMINATE (1 << 0)
+
+#define TTBCR2_SEP_SHIFT 15
+#define TTBCR2_SEP_MASK 0x7
+
+#define TTBCR2_ADDR_32 0
+#define TTBCR2_ADDR_36 1
+#define TTBCR2_ADDR_40 2
+#define TTBCR2_ADDR_42 3
+#define TTBCR2_ADDR_44 4
+#define TTBCR2_ADDR_48 5
+
+#define TTBRn_HI_ASID_SHIFT 16
+
+#define FSR_MULTI (1 << 31)
+#define FSR_SS (1 << 30)
+#define FSR_UUT (1 << 8)
+#define FSR_ASF (1 << 7)
+#define FSR_TLBLKF (1 << 6)
+#define FSR_TLBMCF (1 << 5)
+#define FSR_EF (1 << 4)
+#define FSR_PF (1 << 3)
+#define FSR_AFF (1 << 2)
+#define FSR_TF (1 << 1)
+
+#define FSR_IGN (FSR_AFF | FSR_ASF | \
+ FSR_TLBMCF | FSR_TLBLKF)
+#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
+ FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
+
+#define FSYNR0_WNR (1 << 4)
+
+static int force_stage;
+module_param_named(force_stage, force_stage, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(force_stage,
+ "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
+
+enum arm_smmu_arch_version {
+ ARM_SMMU_V1 = 1,
+ ARM_SMMU_V2,
+};
+
+struct arm_smmu_smr {
+ u8 idx;
+ u16 mask;
+ u16 id;
+};
+
+struct arm_smmu_master_cfg {
+ int num_streamids;
+ u16 streamids[MAX_MASTER_STREAMIDS];
+ struct arm_smmu_smr *smrs;
+};
+
+struct arm_smmu_master {
+ struct device_node *of_node;
+ struct rb_node node;
+ struct arm_smmu_master_cfg cfg;
+};
+
+struct arm_smmu_device {
+ struct device *dev;
+
+ void __iomem *base;
+ unsigned long size;
+ unsigned long pgshift;
+
+#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
+#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
+#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
+#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
+#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
+ u32 features;
+
+#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
+ u32 options;
+ enum arm_smmu_arch_version version;
+
+ u32 num_context_banks;
+ u32 num_s2_context_banks;
+ DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
+ atomic_t irptndx;
+
+ u32 num_mapping_groups;
+ DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS);
+
+ unsigned long va_size;
+ unsigned long ipa_size;
+ unsigned long pa_size;
+
+ u32 num_global_irqs;
+ u32 num_context_irqs;
+ unsigned int *irqs;
+
+ struct list_head list;
+ struct rb_root masters;
+};
+
+struct arm_smmu_cfg {
+ u8 cbndx;
+ u8 irptndx;
+ u32 cbar;
+};
+#define INVALID_IRPTNDX 0xff
+
+#define ARM_SMMU_CB_ASID(cfg) ((cfg)->cbndx)
+#define ARM_SMMU_CB_VMID(cfg) ((cfg)->cbndx + 1)
+
+enum arm_smmu_domain_stage {
+ ARM_SMMU_DOMAIN_S1 = 0,
+ ARM_SMMU_DOMAIN_S2,
+ ARM_SMMU_DOMAIN_NESTED,
+};
+
+struct arm_smmu_domain {
+ struct arm_smmu_device *smmu;
+ struct io_pgtable_ops *pgtbl_ops;
+ spinlock_t pgtbl_lock;
+ struct arm_smmu_cfg cfg;
+ enum arm_smmu_domain_stage stage;
+ struct mutex init_mutex; /* Protects smmu pointer */
+};
+
+static struct iommu_ops arm_smmu_ops;
+
+static DEFINE_SPINLOCK(arm_smmu_devices_lock);
+static LIST_HEAD(arm_smmu_devices);
+
+struct arm_smmu_option_prop {
+ u32 opt;
+ const char *prop;
+};
+
+static struct arm_smmu_option_prop arm_smmu_options[] = {
+ { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
+ { 0, NULL},
+};
+
+static void parse_driver_options(struct arm_smmu_device *smmu)
+{
+ int i = 0;
+
+ do {
+ if (of_property_read_bool(smmu->dev->of_node,
+ arm_smmu_options[i].prop)) {
+ smmu->options |= arm_smmu_options[i].opt;
+ dev_notice(smmu->dev, "option %s\n",
+ arm_smmu_options[i].prop);
+ }
+ } while (arm_smmu_options[++i].opt);
+}
+
+static struct device_node *dev_get_dev_node(struct device *dev)
+{
+ return dev->of_node;
+}
+
+static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu,
+ struct device_node *dev_node)
+{
+ struct rb_node *node = smmu->masters.rb_node;
+
+ while (node) {
+ struct arm_smmu_master *master;
+
+ master = container_of(node, struct arm_smmu_master, node);
+
+ if (dev_node < master->of_node)
+ node = node->rb_left;
+ else if (dev_node > master->of_node)
+ node = node->rb_right;
+ else
+ return master;
+ }
+
+ return NULL;
+}
+
+static struct arm_smmu_master_cfg *
+find_smmu_master_cfg(struct device *dev)
+{
+ struct arm_smmu_master_cfg *cfg = NULL;
+ struct iommu_group *group = iommu_group_get(dev);
+
+ if (group) {
+ cfg = iommu_group_get_iommudata(group);
+ iommu_group_put(group);
+ }
+
+ return cfg;
+}
+
+static int insert_smmu_master(struct arm_smmu_device *smmu,
+ struct arm_smmu_master *master)
+{
+ struct rb_node **new, *parent;
+
+ new = &smmu->masters.rb_node;
+ parent = NULL;
+ while (*new) {
+ struct arm_smmu_master *this
+ = container_of(*new, struct arm_smmu_master, node);
+
+ parent = *new;
+ if (master->of_node < this->of_node)
+ new = &((*new)->rb_left);
+ else if (master->of_node > this->of_node)
+ new = &((*new)->rb_right);
+ else
+ return -EEXIST;
+ }
+
+ rb_link_node(&master->node, parent, new);
+ rb_insert_color(&master->node, &smmu->masters);
+ return 0;
+}
+
+static int register_smmu_master(struct arm_smmu_device *smmu,
+ struct device *dev,
+ struct of_phandle_args *masterspec)
+{
+ int i;
+ struct arm_smmu_master *master;
+
+ master = find_smmu_master(smmu, masterspec->np);
+ if (master) {
+ dev_err(dev,
+ "rejecting multiple registrations for master device %s\n",
+ masterspec->np->name);
+ return -EBUSY;
+ }
+
+ if (masterspec->args_count > MAX_MASTER_STREAMIDS) {
+ dev_err(dev,
+ "reached maximum number (%d) of stream IDs for master device %s\n",
+ MAX_MASTER_STREAMIDS, masterspec->np->name);
+ return -ENOSPC;
+ }
+
+ master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
+ if (!master)
+ return -ENOMEM;
+
+ master->of_node = masterspec->np;
+ master->cfg.num_streamids = masterspec->args_count;
+
+ for (i = 0; i < master->cfg.num_streamids; ++i) {
+ u16 streamid = masterspec->args[i];
+
+ if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) &&
+ (streamid >= smmu->num_mapping_groups)) {
+ dev_err(dev,
+ "stream ID for master device %s greater than maximum allowed (%d)\n",
+ masterspec->np->name, smmu->num_mapping_groups);
+ return -ERANGE;
+ }
+ master->cfg.streamids[i] = streamid;
+ }
+ return insert_smmu_master(smmu, master);
+}
+
+static struct arm_smmu_device *find_smmu_for_device(struct device *dev)
+{
+ struct arm_smmu_device *smmu;
+ struct arm_smmu_master *master = NULL;
+ struct device_node *dev_node = dev_get_dev_node(dev);
+
+ spin_lock(&arm_smmu_devices_lock);
+ list_for_each_entry(smmu, &arm_smmu_devices, list) {
+ master = find_smmu_master(smmu, dev_node);
+ if (master)
+ break;
+ }
+ spin_unlock(&arm_smmu_devices_lock);
+
+ return master ? smmu : NULL;
+}
+
+static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
+{
+ int idx;
+
+ do {
+ idx = find_next_zero_bit(map, end, start);
+ if (idx == end)
+ return -ENOSPC;
+ } while (test_and_set_bit(idx, map));
+
+ return idx;
+}
+
+static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
+{
+ clear_bit(idx, map);
+}
+
+/* Wait for any pending TLB invalidations to complete */
+static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
+{
+ int count = 0;
+ void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
+
+ writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
+ while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
+ & sTLBGSTATUS_GSACTIVE) {
+ cpu_relax();
+ if (++count == TLB_LOOP_TIMEOUT) {
+ dev_err_ratelimited(smmu->dev,
+ "TLB sync timed out -- SMMU may be deadlocked\n");
+ return;
+ }
+ udelay(1);
+ }
+}
+
+static void arm_smmu_tlb_sync(void *cookie)
+{
+ struct arm_smmu_domain *smmu_domain = cookie;
+ __arm_smmu_tlb_sync(smmu_domain->smmu);
+}
+
+static void arm_smmu_tlb_inv_context(void *cookie)
+{
+ struct arm_smmu_domain *smmu_domain = cookie;
+ struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
+ void __iomem *base;
+
+ if (stage1) {
+ base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
+ writel_relaxed(ARM_SMMU_CB_ASID(cfg),
+ base + ARM_SMMU_CB_S1_TLBIASID);
+ } else {
+ base = ARM_SMMU_GR0(smmu);
+ writel_relaxed(ARM_SMMU_CB_VMID(cfg),
+ base + ARM_SMMU_GR0_TLBIVMID);
+ }
+
+ __arm_smmu_tlb_sync(smmu);
+}
+
+static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
+ bool leaf, void *cookie)
+{
+ struct arm_smmu_domain *smmu_domain = cookie;
+ struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
+ void __iomem *reg;
+
+ if (stage1) {
+ reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
+ reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
+
+ if (!IS_ENABLED(CONFIG_64BIT) || smmu->version == ARM_SMMU_V1) {
+ iova &= ~12UL;
+ iova |= ARM_SMMU_CB_ASID(cfg);
+ writel_relaxed(iova, reg);
+#ifdef CONFIG_64BIT
+ } else {
+ iova >>= 12;
+ iova |= (u64)ARM_SMMU_CB_ASID(cfg) << 48;
+ writeq_relaxed(iova, reg);
+#endif
+ }
+#ifdef CONFIG_64BIT
+ } else if (smmu->version == ARM_SMMU_V2) {
+ reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
+ reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
+ ARM_SMMU_CB_S2_TLBIIPAS2;
+ writeq_relaxed(iova >> 12, reg);
+#endif
+ } else {
+ reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
+ writel_relaxed(ARM_SMMU_CB_VMID(cfg), reg);
+ }
+}
+
+static void arm_smmu_flush_pgtable(void *addr, size_t size, void *cookie)
+{
+ struct arm_smmu_domain *smmu_domain = cookie;
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
+
+
+ /* Ensure new page tables are visible to the hardware walker */
+ if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) {
+ dsb(ishst);
+ } else {
+ /*
+ * If the SMMU can't walk tables in the CPU caches, treat them
+ * like non-coherent DMA since we need to flush the new entries
+ * all the way out to memory. There's no possibility of
+ * recursion here as the SMMU table walker will not be wired
+ * through another SMMU.
+ */
+ dma_map_page(smmu->dev, virt_to_page(addr), offset, size,
+ DMA_TO_DEVICE);
+ }
+}
+
+static struct iommu_gather_ops arm_smmu_gather_ops = {
+ .tlb_flush_all = arm_smmu_tlb_inv_context,
+ .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
+ .tlb_sync = arm_smmu_tlb_sync,
+ .flush_pgtable = arm_smmu_flush_pgtable,
+};
+
+static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
+{
+ int flags, ret;
+ u32 fsr, far, fsynr, resume;
+ unsigned long iova;
+ struct iommu_domain *domain = dev;
+ struct arm_smmu_domain *smmu_domain = domain->priv;
+ struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ void __iomem *cb_base;
+
+ cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
+ fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
+
+ if (!(fsr & FSR_FAULT))
+ return IRQ_NONE;
+
+ if (fsr & FSR_IGN)
+ dev_err_ratelimited(smmu->dev,
+ "Unexpected context fault (fsr 0x%x)\n",
+ fsr);
+
+ fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
+ flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
+
+ far = readl_relaxed(cb_base + ARM_SMMU_CB_FAR_LO);
+ iova = far;
+#ifdef CONFIG_64BIT
+ far = readl_relaxed(cb_base + ARM_SMMU_CB_FAR_HI);
+ iova |= ((unsigned long)far << 32);
+#endif
+
+ if (!report_iommu_fault(domain, smmu->dev, iova, flags)) {
+ ret = IRQ_HANDLED;
+ resume = RESUME_RETRY;
+ } else {
+ dev_err_ratelimited(smmu->dev,
+ "Unhandled context fault: iova=0x%08lx, fsynr=0x%x, cb=%d\n",
+ iova, fsynr, cfg->cbndx);
+ ret = IRQ_NONE;
+ resume = RESUME_TERMINATE;
+ }
+
+ /* Clear the faulting FSR */
+ writel(fsr, cb_base + ARM_SMMU_CB_FSR);
+
+ /* Retry or terminate any stalled transactions */
+ if (fsr & FSR_SS)
+ writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
+
+ return ret;
+}
+
+static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
+{
+ u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
+ struct arm_smmu_device *smmu = dev;
+ void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
+
+ gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
+ gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
+ gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
+ gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
+
+ if (!gfsr)
+ return IRQ_NONE;
+
+ dev_err_ratelimited(smmu->dev,
+ "Unexpected global fault, this could be serious\n");
+ dev_err_ratelimited(smmu->dev,
+ "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
+ gfsr, gfsynr0, gfsynr1, gfsynr2);
+
+ writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
+ return IRQ_HANDLED;
+}
+
+static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
+ struct io_pgtable_cfg *pgtbl_cfg)
+{
+ u32 reg;
+ bool stage1;
+ struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ void __iomem *cb_base, *gr0_base, *gr1_base;
+
+ gr0_base = ARM_SMMU_GR0(smmu);
+ gr1_base = ARM_SMMU_GR1(smmu);
+ stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
+ cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
+
+ /* CBAR */
+ reg = cfg->cbar;
+ if (smmu->version == ARM_SMMU_V1)
+ reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
+
+ /*
+ * Use the weakest shareability/memory types, so they are
+ * overridden by the ttbcr/pte.
+ */
+ if (stage1) {
+ reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
+ (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
+ } else {
+ reg |= ARM_SMMU_CB_VMID(cfg) << CBAR_VMID_SHIFT;
+ }
+ writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
+
+ if (smmu->version > ARM_SMMU_V1) {
+ /* CBA2R */
+#ifdef CONFIG_64BIT
+ reg = CBA2R_RW64_64BIT;
+#else
+ reg = CBA2R_RW64_32BIT;
+#endif
+ writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
+ }
+
+ /* TTBRs */
+ if (stage1) {
+ reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO);
+ reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0] >> 32;
+ reg |= ARM_SMMU_CB_ASID(cfg) << TTBRn_HI_ASID_SHIFT;
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI);
+
+ reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1_LO);
+ reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1] >> 32;
+ reg |= ARM_SMMU_CB_ASID(cfg) << TTBRn_HI_ASID_SHIFT;
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1_HI);
+ } else {
+ reg = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO);
+ reg = pgtbl_cfg->arm_lpae_s2_cfg.vttbr >> 32;
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI);
+ }
+
+ /* TTBCR */
+ if (stage1) {
+ reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
+ if (smmu->version > ARM_SMMU_V1) {
+ reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
+ switch (smmu->va_size) {
+ case 32:
+ reg |= (TTBCR2_ADDR_32 << TTBCR2_SEP_SHIFT);
+ break;
+ case 36:
+ reg |= (TTBCR2_ADDR_36 << TTBCR2_SEP_SHIFT);
+ break;
+ case 40:
+ reg |= (TTBCR2_ADDR_40 << TTBCR2_SEP_SHIFT);
+ break;
+ case 42:
+ reg |= (TTBCR2_ADDR_42 << TTBCR2_SEP_SHIFT);
+ break;
+ case 44:
+ reg |= (TTBCR2_ADDR_44 << TTBCR2_SEP_SHIFT);
+ break;
+ case 48:
+ reg |= (TTBCR2_ADDR_48 << TTBCR2_SEP_SHIFT);
+ break;
+ }
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
+ }
+ } else {
+ reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
+ }
+
+ /* MAIRs (stage-1 only) */
+ if (stage1) {
+ reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
+ reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR1);
+ }
+
+ /* SCTLR */
+ reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP;
+ if (stage1)
+ reg |= SCTLR_S1_ASIDPNE;
+#ifdef __BIG_ENDIAN
+ reg |= SCTLR_E;
+#endif
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
+}
+
+static int arm_smmu_init_domain_context(struct iommu_domain *domain,
+ struct arm_smmu_device *smmu)
+{
+ int irq, start, ret = 0;
+ unsigned long ias, oas;
+ struct io_pgtable_ops *pgtbl_ops;
+ struct io_pgtable_cfg pgtbl_cfg;
+ enum io_pgtable_fmt fmt;
+ struct arm_smmu_domain *smmu_domain = domain->priv;
+ struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+
+ mutex_lock(&smmu_domain->init_mutex);
+ if (smmu_domain->smmu)
+ goto out_unlock;
+
+ /*
+ * Mapping the requested stage onto what we support is surprisingly
+ * complicated, mainly because the spec allows S1+S2 SMMUs without
+ * support for nested translation. That means we end up with the
+ * following table:
+ *
+ * Requested Supported Actual
+ * S1 N S1
+ * S1 S1+S2 S1
+ * S1 S2 S2
+ * S1 S1 S1
+ * N N N
+ * N S1+S2 S2
+ * N S2 S2
+ * N S1 S1
+ *
+ * Note that you can't actually request stage-2 mappings.
+ */
+ if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
+ smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
+ if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
+ smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
+
+ switch (smmu_domain->stage) {
+ case ARM_SMMU_DOMAIN_S1:
+ cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
+ start = smmu->num_s2_context_banks;
+ ias = smmu->va_size;
+ oas = smmu->ipa_size;
+ if (IS_ENABLED(CONFIG_64BIT))
+ fmt = ARM_64_LPAE_S1;
+ else
+ fmt = ARM_32_LPAE_S1;
+ break;
+ case ARM_SMMU_DOMAIN_NESTED:
+ /*
+ * We will likely want to change this if/when KVM gets
+ * involved.
+ */
+ case ARM_SMMU_DOMAIN_S2:
+ cfg->cbar = CBAR_TYPE_S2_TRANS;
+ start = 0;
+ ias = smmu->ipa_size;
+ oas = smmu->pa_size;
+ if (IS_ENABLED(CONFIG_64BIT))
+ fmt = ARM_64_LPAE_S2;
+ else
+ fmt = ARM_32_LPAE_S2;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
+ smmu->num_context_banks);
+ if (IS_ERR_VALUE(ret))
+ goto out_unlock;
+
+ cfg->cbndx = ret;
+ if (smmu->version == ARM_SMMU_V1) {
+ cfg->irptndx = atomic_inc_return(&smmu->irptndx);
+ cfg->irptndx %= smmu->num_context_irqs;
+ } else {
+ cfg->irptndx = cfg->cbndx;
+ }
+
+ pgtbl_cfg = (struct io_pgtable_cfg) {
+ .pgsize_bitmap = arm_smmu_ops.pgsize_bitmap,
+ .ias = ias,
+ .oas = oas,
+ .tlb = &arm_smmu_gather_ops,
+ };
+
+ smmu_domain->smmu = smmu;
+ pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
+ if (!pgtbl_ops) {
+ ret = -ENOMEM;
+ goto out_clear_smmu;
+ }
+
+ /* Update our support page sizes to reflect the page table format */
+ arm_smmu_ops.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
+
+ /* Initialise the context bank with our page table cfg */
+ arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
+
+ /*
+ * Request context fault interrupt. Do this last to avoid the
+ * handler seeing a half-initialised domain state.
+ */
+ irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
+ ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED,
+ "arm-smmu-context-fault", domain);
+ if (IS_ERR_VALUE(ret)) {
+ dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
+ cfg->irptndx, irq);
+ cfg->irptndx = INVALID_IRPTNDX;
+ }
+
+ mutex_unlock(&smmu_domain->init_mutex);
+
+ /* Publish page table ops for map/unmap */
+ smmu_domain->pgtbl_ops = pgtbl_ops;
+ return 0;
+
+out_clear_smmu:
+ smmu_domain->smmu = NULL;
+out_unlock:
+ mutex_unlock(&smmu_domain->init_mutex);
+ return ret;
+}
+
+static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
+{
+ struct arm_smmu_domain *smmu_domain = domain->priv;
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+ void __iomem *cb_base;
+ int irq;
+
+ if (!smmu)
+ return;
+
+ /*
+ * Disable the context bank and free the page tables before freeing
+ * it.
+ */
+ cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
+ writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
+
+ if (cfg->irptndx != INVALID_IRPTNDX) {
+ irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
+ free_irq(irq, domain);
+ }
+
+ if (smmu_domain->pgtbl_ops)
+ free_io_pgtable_ops(smmu_domain->pgtbl_ops);
+
+ __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
+}
+
+static int arm_smmu_domain_init(struct iommu_domain *domain)
+{
+ struct arm_smmu_domain *smmu_domain;
+
+ /*
+ * Allocate the domain and initialise some of its data structures.
+ * We can't really do anything meaningful until we've added a
+ * master.
+ */
+ smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
+ if (!smmu_domain)
+ return -ENOMEM;
+
+ mutex_init(&smmu_domain->init_mutex);
+ spin_lock_init(&smmu_domain->pgtbl_lock);
+ domain->priv = smmu_domain;
+ return 0;
+}
+
+static void arm_smmu_domain_destroy(struct iommu_domain *domain)
+{
+ struct arm_smmu_domain *smmu_domain = domain->priv;
+
+ /*
+ * Free the domain resources. We assume that all devices have
+ * already been detached.
+ */
+ arm_smmu_destroy_domain_context(domain);
+ kfree(smmu_domain);
+}
+
+static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu,
+ struct arm_smmu_master_cfg *cfg)
+{
+ int i;
+ struct arm_smmu_smr *smrs;
+ void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
+
+ if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH))
+ return 0;
+
+ if (cfg->smrs)
+ return -EEXIST;
+
+ smrs = kmalloc_array(cfg->num_streamids, sizeof(*smrs), GFP_KERNEL);
+ if (!smrs) {
+ dev_err(smmu->dev, "failed to allocate %d SMRs\n",
+ cfg->num_streamids);
+ return -ENOMEM;
+ }
+
+ /* Allocate the SMRs on the SMMU */
+ for (i = 0; i < cfg->num_streamids; ++i) {
+ int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0,
+ smmu->num_mapping_groups);
+ if (IS_ERR_VALUE(idx)) {
+ dev_err(smmu->dev, "failed to allocate free SMR\n");
+ goto err_free_smrs;
+ }
+
+ smrs[i] = (struct arm_smmu_smr) {
+ .idx = idx,
+ .mask = 0, /* We don't currently share SMRs */
+ .id = cfg->streamids[i],
+ };
+ }
+
+ /* It worked! Now, poke the actual hardware */
+ for (i = 0; i < cfg->num_streamids; ++i) {
+ u32 reg = SMR_VALID | smrs[i].id << SMR_ID_SHIFT |
+ smrs[i].mask << SMR_MASK_SHIFT;
+ writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_SMR(smrs[i].idx));
+ }
+
+ cfg->smrs = smrs;
+ return 0;
+
+err_free_smrs:
+ while (--i >= 0)
+ __arm_smmu_free_bitmap(smmu->smr_map, smrs[i].idx);
+ kfree(smrs);
+ return -ENOSPC;
+}
+
+static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu,
+ struct arm_smmu_master_cfg *cfg)
+{
+ int i;
+ void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
+ struct arm_smmu_smr *smrs = cfg->smrs;
+
+ if (!smrs)
+ return;
+
+ /* Invalidate the SMRs before freeing back to the allocator */
+ for (i = 0; i < cfg->num_streamids; ++i) {
+ u8 idx = smrs[i].idx;
+
+ writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(idx));
+ __arm_smmu_free_bitmap(smmu->smr_map, idx);
+ }
+
+ cfg->smrs = NULL;
+ kfree(smrs);
+}
+
+static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
+ struct arm_smmu_master_cfg *cfg)
+{
+ int i, ret;
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
+
+ /* Devices in an IOMMU group may already be configured */
+ ret = arm_smmu_master_configure_smrs(smmu, cfg);
+ if (ret)
+ return ret == -EEXIST ? 0 : ret;
+
+ for (i = 0; i < cfg->num_streamids; ++i) {
+ u32 idx, s2cr;
+
+ idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
+ s2cr = S2CR_TYPE_TRANS |
+ (smmu_domain->cfg.cbndx << S2CR_CBNDX_SHIFT);
+ writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx));
+ }
+
+ return 0;
+}
+
+static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
+ struct arm_smmu_master_cfg *cfg)
+{
+ int i;
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
+
+ /* An IOMMU group is torn down by the first device to be removed */
+ if ((smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) && !cfg->smrs)
+ return;
+
+ /*
+ * We *must* clear the S2CR first, because freeing the SMR means
+ * that it can be re-allocated immediately.
+ */
+ for (i = 0; i < cfg->num_streamids; ++i) {
+ u32 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
+
+ writel_relaxed(S2CR_TYPE_BYPASS,
+ gr0_base + ARM_SMMU_GR0_S2CR(idx));
+ }
+
+ arm_smmu_master_free_smrs(smmu, cfg);
+}
+
+static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
+{
+ int ret;
+ struct arm_smmu_domain *smmu_domain = domain->priv;
+ struct arm_smmu_device *smmu;
+ struct arm_smmu_master_cfg *cfg;
+
+ smmu = find_smmu_for_device(dev);
+ if (!smmu) {
+ dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
+ return -ENXIO;
+ }
+
+ if (dev->archdata.iommu) {
+ dev_err(dev, "already attached to IOMMU domain\n");
+ return -EEXIST;
+ }
+
+ /* Ensure that the domain is finalised */
+ ret = arm_smmu_init_domain_context(domain, smmu);
+ if (IS_ERR_VALUE(ret))
+ return ret;
+
+ /*
+ * Sanity check the domain. We don't support domains across
+ * different SMMUs.
+ */
+ if (smmu_domain->smmu != smmu) {
+ dev_err(dev,
+ "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
+ dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
+ return -EINVAL;
+ }
+
+ /* Looks ok, so add the device to the domain */
+ cfg = find_smmu_master_cfg(dev);
+ if (!cfg)
+ return -ENODEV;
+
+ ret = arm_smmu_domain_add_master(smmu_domain, cfg);
+ if (!ret)
+ dev->archdata.iommu = domain;
+ return ret;
+}
+
+static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
+{
+ struct arm_smmu_domain *smmu_domain = domain->priv;
+ struct arm_smmu_master_cfg *cfg;
+
+ cfg = find_smmu_master_cfg(dev);
+ if (!cfg)
+ return;
+
+ dev->archdata.iommu = NULL;
+ arm_smmu_domain_remove_master(smmu_domain, cfg);
+}
+
+static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot)
+{
+ int ret;
+ unsigned long flags;
+ struct arm_smmu_domain *smmu_domain = domain->priv;
+ struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
+
+ if (!ops)
+ return -ENODEV;
+
+ spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
+ ret = ops->map(ops, iova, paddr, size, prot);
+ spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
+ return ret;
+}
+
+static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
+ size_t size)
+{
+ size_t ret;
+ unsigned long flags;
+ struct arm_smmu_domain *smmu_domain = domain->priv;
+ struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
+
+ if (!ops)
+ return 0;
+
+ spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
+ ret = ops->unmap(ops, iova, size);
+ spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
+ return ret;
+}
+
+static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
+ dma_addr_t iova)
+{
+ phys_addr_t ret;
+ unsigned long flags;
+ struct arm_smmu_domain *smmu_domain = domain->priv;
+ struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
+
+ if (!ops)
+ return 0;
+
+ spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
+ ret = ops->iova_to_phys(ops, iova);
+ spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
+ return ret;
+}
+
+static int arm_smmu_add_device(struct device *dev)
+{
+ struct arm_smmu_device *smmu;
+ struct arm_smmu_master_cfg *cfg;
+ struct iommu_group *group;
+ void (*releasefn)(void *) = NULL;
+ int ret;
+
+ smmu = find_smmu_for_device(dev);
+ if (!smmu)
+ return -ENODEV;
+
+ group = iommu_group_alloc();
+ if (IS_ERR(group)) {
+ dev_err(dev, "Failed to allocate IOMMU group\n");
+ return PTR_ERR(group);
+ }
+
+ {
+ struct arm_smmu_master *master;
+
+ master = find_smmu_master(smmu, dev->of_node);
+ if (!master) {
+ ret = -ENODEV;
+ goto out_put_group;
+ }
+
+ cfg = &master->cfg;
+ }
+
+ iommu_group_set_iommudata(group, cfg, releasefn);
+ ret = iommu_group_add_device(group, dev);
+
+out_put_group:
+ iommu_group_put(group);
+ return ret;
+}
+
+static void arm_smmu_remove_device(struct device *dev)
+{
+ iommu_group_remove_device(dev);
+}
+
+static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
+ enum iommu_attr attr, void *data)
+{
+ struct arm_smmu_domain *smmu_domain = domain->priv;
+
+ switch (attr) {
+ case DOMAIN_ATTR_NESTING:
+ *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
+ return 0;
+ default:
+ return -ENODEV;
+ }
+}
+
+static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
+ enum iommu_attr attr, void *data)
+{
+ int ret = 0;
+ struct arm_smmu_domain *smmu_domain = domain->priv;
+
+ mutex_lock(&smmu_domain->init_mutex);
+
+ switch (attr) {
+ case DOMAIN_ATTR_NESTING:
+ if (smmu_domain->smmu) {
+ ret = -EPERM;
+ goto out_unlock;
+ }
+
+ if (*(int *)data)
+ smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
+ else
+ smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
+
+ break;
+ default:
+ ret = -ENODEV;
+ }
+
+out_unlock:
+ mutex_unlock(&smmu_domain->init_mutex);
+ return ret;
+}
+
+static struct iommu_ops arm_smmu_ops = {
+ .domain_init = arm_smmu_domain_init,
+ .domain_destroy = arm_smmu_domain_destroy,
+ .attach_dev = arm_smmu_attach_dev,
+ .detach_dev = arm_smmu_detach_dev,
+ .map = arm_smmu_map,
+ .unmap = arm_smmu_unmap,
+ .iova_to_phys = arm_smmu_iova_to_phys,
+ .add_device = arm_smmu_add_device,
+ .remove_device = arm_smmu_remove_device,
+ .domain_get_attr = arm_smmu_domain_get_attr,
+ .domain_set_attr = arm_smmu_domain_set_attr,
+ .pgsize_bitmap = -1UL, /* Restricted during device attach */
+};
+
+static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
+{
+ void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
+ void __iomem *cb_base;
+ int i = 0;
+ u32 reg;
+
+ /* clear global FSR */
+ reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
+ writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
+
+ /* Mark all SMRn as invalid and all S2CRn as bypass */
+ for (i = 0; i < smmu->num_mapping_groups; ++i) {
+ writel_relaxed(0, gr0_base + ARM_SMMU_GR0_SMR(i));
+ writel_relaxed(S2CR_TYPE_BYPASS,
+ gr0_base + ARM_SMMU_GR0_S2CR(i));
+ }
+
+ /* Make sure all context banks are disabled and clear CB_FSR */
+ for (i = 0; i < smmu->num_context_banks; ++i) {
+ cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
+ writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
+ writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
+ }
+
+ /* Invalidate the TLB, just in case */
+ writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
+ writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
+
+ reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
+
+ /* Enable fault reporting */
+ reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
+
+ /* Disable TLB broadcasting. */
+ reg |= (sCR0_VMIDPNE | sCR0_PTM);
+
+ /* Enable client access, but bypass when no mapping is found */
+ reg &= ~(sCR0_CLIENTPD | sCR0_USFCFG);
+
+ /* Disable forced broadcasting */
+ reg &= ~sCR0_FB;
+
+ /* Don't upgrade barriers */
+ reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
+
+ /* Push the button */
+ __arm_smmu_tlb_sync(smmu);
+ writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
+}
+
+static int arm_smmu_id_size_to_bits(int size)
+{
+ switch (size) {
+ case 0:
+ return 32;
+ case 1:
+ return 36;
+ case 2:
+ return 40;
+ case 3:
+ return 42;
+ case 4:
+ return 44;
+ case 5:
+ default:
+ return 48;
+ }
+}
+
+static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
+{
+ unsigned long size;
+ void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
+ u32 id;
+
+ dev_notice(smmu->dev, "probing hardware configuration...\n");
+ dev_notice(smmu->dev, "SMMUv%d with:\n", smmu->version);
+
+ /* ID0 */
+ id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
+
+ /* Restrict available stages based on module parameter */
+ if (force_stage == 1)
+ id &= ~(ID0_S2TS | ID0_NTS);
+ else if (force_stage == 2)
+ id &= ~(ID0_S1TS | ID0_NTS);
+
+ if (id & ID0_S1TS) {
+ smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
+ dev_notice(smmu->dev, "\tstage 1 translation\n");
+ }
+
+ if (id & ID0_S2TS) {
+ smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
+ dev_notice(smmu->dev, "\tstage 2 translation\n");
+ }
+
+ if (id & ID0_NTS) {
+ smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
+ dev_notice(smmu->dev, "\tnested translation\n");
+ }
+
+ if (!(smmu->features &
+ (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
+ dev_err(smmu->dev, "\tno translation support!\n");
+ return -ENODEV;
+ }
+
+ if (id & ID0_CTTW) {
+ smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
+ dev_notice(smmu->dev, "\tcoherent table walk\n");
+ }
+
+ if (id & ID0_SMS) {
+ u32 smr, sid, mask;
+
+ smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
+ smmu->num_mapping_groups = (id >> ID0_NUMSMRG_SHIFT) &
+ ID0_NUMSMRG_MASK;
+ if (smmu->num_mapping_groups == 0) {
+ dev_err(smmu->dev,
+ "stream-matching supported, but no SMRs present!\n");
+ return -ENODEV;
+ }
+
+ smr = SMR_MASK_MASK << SMR_MASK_SHIFT;
+ smr |= (SMR_ID_MASK << SMR_ID_SHIFT);
+ writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
+ smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
+
+ mask = (smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK;
+ sid = (smr >> SMR_ID_SHIFT) & SMR_ID_MASK;
+ if ((mask & sid) != sid) {
+ dev_err(smmu->dev,
+ "SMR mask bits (0x%x) insufficient for ID field (0x%x)\n",
+ mask, sid);
+ return -ENODEV;
+ }
+
+ dev_notice(smmu->dev,
+ "\tstream matching with %u register groups, mask 0x%x",
+ smmu->num_mapping_groups, mask);
+ } else {
+ smmu->num_mapping_groups = (id >> ID0_NUMSIDB_SHIFT) &
+ ID0_NUMSIDB_MASK;
+ }
+
+ /* ID1 */
+ id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
+ smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
+
+ /* Check for size mismatch of SMMU address space from mapped region */
+ size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
+ size *= 2 << smmu->pgshift;
+ if (smmu->size != size)
+ dev_warn(smmu->dev,
+ "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
+ size, smmu->size);
+
+ smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
+ smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
+ if (smmu->num_s2_context_banks > smmu->num_context_banks) {
+ dev_err(smmu->dev, "impossible number of S2 context banks!\n");
+ return -ENODEV;
+ }
+ dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
+ smmu->num_context_banks, smmu->num_s2_context_banks);
+
+ /* ID2 */
+ id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
+ size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
+ smmu->ipa_size = size;
+
+ /* The output mask is also applied for bypass */
+ size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
+ smmu->pa_size = size;
+
+ /*
+ * What the page table walker can address actually depends on which
+ * descriptor format is in use, but since a) we don't know that yet,
+ * and b) it can vary per context bank, this will have to do...
+ */
+ dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size));
+
+ if (smmu->version == ARM_SMMU_V1) {
+ smmu->va_size = smmu->ipa_size;
+ size = SZ_4K | SZ_2M | SZ_1G;
+ } else {
+ size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
+ smmu->va_size = arm_smmu_id_size_to_bits(size);
+#ifndef CONFIG_64BIT
+ smmu->va_size = min(32UL, smmu->va_size);
+#endif
+ size = 0;
+ if (id & ID2_PTFS_4K)
+ size |= SZ_4K | SZ_2M | SZ_1G;
+ if (id & ID2_PTFS_16K)
+ size |= SZ_16K | SZ_32M;
+ if (id & ID2_PTFS_64K)
+ size |= SZ_64K | SZ_512M;
+ }
+
+ arm_smmu_ops.pgsize_bitmap &= size;
+ dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n", size);
+
+ if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
+ dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
+ smmu->va_size, smmu->ipa_size);
+
+ if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
+ dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
+ smmu->ipa_size, smmu->pa_size);
+
+ return 0;
+}
+
+static const struct of_device_id arm_smmu_of_match[] = {
+ { .compatible = "arm,smmu-v1", .data = (void *)ARM_SMMU_V1 },
+ { .compatible = "arm,smmu-v2", .data = (void *)ARM_SMMU_V2 },
+ { .compatible = "arm,mmu-400", .data = (void *)ARM_SMMU_V1 },
+ { .compatible = "arm,mmu-401", .data = (void *)ARM_SMMU_V1 },
+ { .compatible = "arm,mmu-500", .data = (void *)ARM_SMMU_V2 },
+ { },
+};
+MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
+
+static int arm_smmu_device_dt_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *of_id;
+ struct resource *res;
+ struct arm_smmu_device *smmu;
+ struct device *dev = &pdev->dev;
+ struct rb_node *node;
+ struct of_phandle_args masterspec;
+ int num_irqs, i, err;
+
+ smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
+ if (!smmu) {
+ dev_err(dev, "failed to allocate arm_smmu_device\n");
+ return -ENOMEM;
+ }
+ smmu->dev = dev;
+
+ of_id = of_match_node(arm_smmu_of_match, dev->of_node);
+ smmu->version = (enum arm_smmu_arch_version)of_id->data;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ smmu->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(smmu->base))
+ return PTR_ERR(smmu->base);
+ smmu->size = resource_size(res);
+
+ if (of_property_read_u32(dev->of_node, "#global-interrupts",
+ &smmu->num_global_irqs)) {
+ dev_err(dev, "missing #global-interrupts property\n");
+ return -ENODEV;
+ }
+
+ num_irqs = 0;
+ while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
+ num_irqs++;
+ if (num_irqs > smmu->num_global_irqs)
+ smmu->num_context_irqs++;
+ }
+
+ if (!smmu->num_context_irqs) {
+ dev_err(dev, "found %d interrupts but expected at least %d\n",
+ num_irqs, smmu->num_global_irqs + 1);
+ return -ENODEV;
+ }
+
+ smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
+ GFP_KERNEL);
+ if (!smmu->irqs) {
+ dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < num_irqs; ++i) {
+ int irq = platform_get_irq(pdev, i);
+
+ if (irq < 0) {
+ dev_err(dev, "failed to get irq index %d\n", i);
+ return -ENODEV;
+ }
+ smmu->irqs[i] = irq;
+ }
+
+ err = arm_smmu_device_cfg_probe(smmu);
+ if (err)
+ return err;
+
+ i = 0;
+ smmu->masters = RB_ROOT;
+ while (!of_parse_phandle_with_args(dev->of_node, "mmu-masters",
+ "#stream-id-cells", i,
+ &masterspec)) {
+ err = register_smmu_master(smmu, dev, &masterspec);
+ if (err) {
+ dev_err(dev, "failed to add master %s\n",
+ masterspec.np->name);
+ goto out_put_masters;
+ }
+
+ i++;
+ }
+ dev_notice(dev, "registered %d master devices\n", i);
+
+ parse_driver_options(smmu);
+
+ if (smmu->version > ARM_SMMU_V1 &&
+ smmu->num_context_banks != smmu->num_context_irqs) {
+ dev_err(dev,
+ "found only %d context interrupt(s) but %d required\n",
+ smmu->num_context_irqs, smmu->num_context_banks);
+ err = -ENODEV;
+ goto out_put_masters;
+ }
+
+ for (i = 0; i < smmu->num_global_irqs; ++i) {
+ err = request_irq(smmu->irqs[i],
+ arm_smmu_global_fault,
+ IRQF_SHARED,
+ "arm-smmu global fault",
+ smmu);
+ if (err) {
+ dev_err(dev, "failed to request global IRQ %d (%u)\n",
+ i, smmu->irqs[i]);
+ goto out_free_irqs;
+ }
+ }
+
+ INIT_LIST_HEAD(&smmu->list);
+ spin_lock(&arm_smmu_devices_lock);
+ list_add(&smmu->list, &arm_smmu_devices);
+ spin_unlock(&arm_smmu_devices_lock);
+
+ arm_smmu_device_reset(smmu);
+ return 0;
+
+out_free_irqs:
+ while (i--)
+ free_irq(smmu->irqs[i], smmu);
+
+out_put_masters:
+ for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
+ struct arm_smmu_master *master
+ = container_of(node, struct arm_smmu_master, node);
+ of_node_put(master->of_node);
+ }
+
+ return err;
+}
+
+static int arm_smmu_device_remove(struct platform_device *pdev)
+{
+ int i;
+ struct device *dev = &pdev->dev;
+ struct arm_smmu_device *curr, *smmu = NULL;
+ struct rb_node *node;
+
+ spin_lock(&arm_smmu_devices_lock);
+ list_for_each_entry(curr, &arm_smmu_devices, list) {
+ if (curr->dev == dev) {
+ smmu = curr;
+ list_del(&smmu->list);
+ break;
+ }
+ }
+ spin_unlock(&arm_smmu_devices_lock);
+
+ if (!smmu)
+ return -ENODEV;
+
+ for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
+ struct arm_smmu_master *master
+ = container_of(node, struct arm_smmu_master, node);
+ of_node_put(master->of_node);
+ }
+
+ if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
+ dev_err(dev, "removing device with active domains!\n");
+
+ for (i = 0; i < smmu->num_global_irqs; ++i)
+ free_irq(smmu->irqs[i], smmu);
+
+ /* Turn the thing off */
+ writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
+ return 0;
+}
+
+static struct platform_driver arm_smmu_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "arm-smmu",
+ .of_match_table = of_match_ptr(arm_smmu_of_match),
+ },
+ .probe = arm_smmu_device_dt_probe,
+ .remove = arm_smmu_device_remove,
+};
+
+static int __init arm_smmu_init(void)
+{
+ struct device_node *np;
+ int ret;
+
+ /*
+ * Play nice with systems that don't have an ARM SMMU by checking that
+ * an ARM SMMU exists in the system before proceeding with the driver
+ * and IOMMU bus operation registration.
+ */
+ np = of_find_matching_node(NULL, arm_smmu_of_match);
+ if (!np)
+ return 0;
+
+ of_node_put(np);
+
+ ret = platform_driver_register(&arm_smmu_driver);
+ if (ret)
+ return ret;
+
+ /* Oh, for a proper bus abstraction */
+ if (!iommu_present(&platform_bus_type))
+ bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
+
+#ifdef CONFIG_ARM_AMBA
+ if (!iommu_present(&amba_bustype))
+ bus_set_iommu(&amba_bustype, &arm_smmu_ops);
+#endif
+
+ return 0;
+}
+
+static void __exit arm_smmu_exit(void)
+{
+ return platform_driver_unregister(&arm_smmu_driver);
+}
+
+subsys_initcall(arm_smmu_init);
+module_exit(arm_smmu_exit);
+
+MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
+MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
new file mode 100644
index 000000000000..5a500edf00cc
--- /dev/null
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -0,0 +1,986 @@
+/*
+ * CPU-agnostic ARM page table allocator.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Copyright (C) 2014 ARM Limited
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ */
+
+#define pr_fmt(fmt) "arm-lpae io-pgtable: " fmt
+
+#include <linux/iommu.h>
+#include <linux/kernel.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "io-pgtable.h"
+
+#define ARM_LPAE_MAX_ADDR_BITS 48
+#define ARM_LPAE_S2_MAX_CONCAT_PAGES 16
+#define ARM_LPAE_MAX_LEVELS 4
+
+/* Struct accessors */
+#define io_pgtable_to_data(x) \
+ container_of((x), struct arm_lpae_io_pgtable, iop)
+
+#define io_pgtable_ops_to_pgtable(x) \
+ container_of((x), struct io_pgtable, ops)
+
+#define io_pgtable_ops_to_data(x) \
+ io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
+
+/*
+ * For consistency with the architecture, we always consider
+ * ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0
+ */
+#define ARM_LPAE_START_LVL(d) (ARM_LPAE_MAX_LEVELS - (d)->levels)
+
+/*
+ * Calculate the right shift amount to get to the portion describing level l
+ * in a virtual address mapped by the pagetable in d.
+ */
+#define ARM_LPAE_LVL_SHIFT(l,d) \
+ ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \
+ * (d)->bits_per_level) + (d)->pg_shift)
+
+#define ARM_LPAE_PAGES_PER_PGD(d) ((d)->pgd_size >> (d)->pg_shift)
+
+/*
+ * Calculate the index at level l used to map virtual address a using the
+ * pagetable in d.
+ */
+#define ARM_LPAE_PGD_IDX(l,d) \
+ ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0)
+
+#define ARM_LPAE_LVL_IDX(a,l,d) \
+ (((a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \
+ ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
+
+/* Calculate the block/page mapping size at level l for pagetable in d. */
+#define ARM_LPAE_BLOCK_SIZE(l,d) \
+ (1 << (ilog2(sizeof(arm_lpae_iopte)) + \
+ ((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level)))
+
+/* Page table bits */
+#define ARM_LPAE_PTE_TYPE_SHIFT 0
+#define ARM_LPAE_PTE_TYPE_MASK 0x3
+
+#define ARM_LPAE_PTE_TYPE_BLOCK 1
+#define ARM_LPAE_PTE_TYPE_TABLE 3
+#define ARM_LPAE_PTE_TYPE_PAGE 3
+
+#define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63)
+#define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53)
+#define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10)
+#define ARM_LPAE_PTE_SH_NS (((arm_lpae_iopte)0) << 8)
+#define ARM_LPAE_PTE_SH_OS (((arm_lpae_iopte)2) << 8)
+#define ARM_LPAE_PTE_SH_IS (((arm_lpae_iopte)3) << 8)
+#define ARM_LPAE_PTE_NS (((arm_lpae_iopte)1) << 5)
+#define ARM_LPAE_PTE_VALID (((arm_lpae_iopte)1) << 0)
+
+#define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2)
+/* Ignore the contiguous bit for block splitting */
+#define ARM_LPAE_PTE_ATTR_HI_MASK (((arm_lpae_iopte)6) << 52)
+#define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \
+ ARM_LPAE_PTE_ATTR_HI_MASK)
+
+/* Stage-1 PTE */
+#define ARM_LPAE_PTE_AP_UNPRIV (((arm_lpae_iopte)1) << 6)
+#define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)2) << 6)
+#define ARM_LPAE_PTE_ATTRINDX_SHIFT 2
+#define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11)
+
+/* Stage-2 PTE */
+#define ARM_LPAE_PTE_HAP_FAULT (((arm_lpae_iopte)0) << 6)
+#define ARM_LPAE_PTE_HAP_READ (((arm_lpae_iopte)1) << 6)
+#define ARM_LPAE_PTE_HAP_WRITE (((arm_lpae_iopte)2) << 6)
+#define ARM_LPAE_PTE_MEMATTR_OIWB (((arm_lpae_iopte)0xf) << 2)
+#define ARM_LPAE_PTE_MEMATTR_NC (((arm_lpae_iopte)0x5) << 2)
+#define ARM_LPAE_PTE_MEMATTR_DEV (((arm_lpae_iopte)0x1) << 2)
+
+/* Register bits */
+#define ARM_32_LPAE_TCR_EAE (1 << 31)
+#define ARM_64_LPAE_S2_TCR_RES1 (1 << 31)
+
+#define ARM_LPAE_TCR_TG0_4K (0 << 14)
+#define ARM_LPAE_TCR_TG0_64K (1 << 14)
+#define ARM_LPAE_TCR_TG0_16K (2 << 14)
+
+#define ARM_LPAE_TCR_SH0_SHIFT 12
+#define ARM_LPAE_TCR_SH0_MASK 0x3
+#define ARM_LPAE_TCR_SH_NS 0
+#define ARM_LPAE_TCR_SH_OS 2
+#define ARM_LPAE_TCR_SH_IS 3
+
+#define ARM_LPAE_TCR_ORGN0_SHIFT 10
+#define ARM_LPAE_TCR_IRGN0_SHIFT 8
+#define ARM_LPAE_TCR_RGN_MASK 0x3
+#define ARM_LPAE_TCR_RGN_NC 0
+#define ARM_LPAE_TCR_RGN_WBWA 1
+#define ARM_LPAE_TCR_RGN_WT 2
+#define ARM_LPAE_TCR_RGN_WB 3
+
+#define ARM_LPAE_TCR_SL0_SHIFT 6
+#define ARM_LPAE_TCR_SL0_MASK 0x3
+
+#define ARM_LPAE_TCR_T0SZ_SHIFT 0
+#define ARM_LPAE_TCR_SZ_MASK 0xf
+
+#define ARM_LPAE_TCR_PS_SHIFT 16
+#define ARM_LPAE_TCR_PS_MASK 0x7
+
+#define ARM_LPAE_TCR_IPS_SHIFT 32
+#define ARM_LPAE_TCR_IPS_MASK 0x7
+
+#define ARM_LPAE_TCR_PS_32_BIT 0x0ULL
+#define ARM_LPAE_TCR_PS_36_BIT 0x1ULL
+#define ARM_LPAE_TCR_PS_40_BIT 0x2ULL
+#define ARM_LPAE_TCR_PS_42_BIT 0x3ULL
+#define ARM_LPAE_TCR_PS_44_BIT 0x4ULL
+#define ARM_LPAE_TCR_PS_48_BIT 0x5ULL
+
+#define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3)
+#define ARM_LPAE_MAIR_ATTR_MASK 0xff
+#define ARM_LPAE_MAIR_ATTR_DEVICE 0x04
+#define ARM_LPAE_MAIR_ATTR_NC 0x44
+#define ARM_LPAE_MAIR_ATTR_WBRWA 0xff
+#define ARM_LPAE_MAIR_ATTR_IDX_NC 0
+#define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1
+#define ARM_LPAE_MAIR_ATTR_IDX_DEV 2
+
+/* IOPTE accessors */
+#define iopte_deref(pte,d) \
+ (__va((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1) \
+ & ~((1ULL << (d)->pg_shift) - 1)))
+
+#define iopte_type(pte,l) \
+ (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
+
+#define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK)
+
+#define iopte_leaf(pte,l) \
+ (l == (ARM_LPAE_MAX_LEVELS - 1) ? \
+ (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_PAGE) : \
+ (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_BLOCK))
+
+#define iopte_to_pfn(pte,d) \
+ (((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1)) >> (d)->pg_shift)
+
+#define pfn_to_iopte(pfn,d) \
+ (((pfn) << (d)->pg_shift) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1))
+
+struct arm_lpae_io_pgtable {
+ struct io_pgtable iop;
+
+ int levels;
+ size_t pgd_size;
+ unsigned long pg_shift;
+ unsigned long bits_per_level;
+
+ void *pgd;
+};
+
+typedef u64 arm_lpae_iopte;
+
+static bool selftest_running = false;
+
+static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
+ unsigned long iova, phys_addr_t paddr,
+ arm_lpae_iopte prot, int lvl,
+ arm_lpae_iopte *ptep)
+{
+ arm_lpae_iopte pte = prot;
+
+ /* We require an unmap first */
+ if (iopte_leaf(*ptep, lvl)) {
+ WARN_ON(!selftest_running);
+ return -EEXIST;
+ }
+
+ if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
+ pte |= ARM_LPAE_PTE_NS;
+
+ if (lvl == ARM_LPAE_MAX_LEVELS - 1)
+ pte |= ARM_LPAE_PTE_TYPE_PAGE;
+ else
+ pte |= ARM_LPAE_PTE_TYPE_BLOCK;
+
+ pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_IS;
+ pte |= pfn_to_iopte(paddr >> data->pg_shift, data);
+
+ *ptep = pte;
+ data->iop.cfg.tlb->flush_pgtable(ptep, sizeof(*ptep), data->iop.cookie);
+ return 0;
+}
+
+static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
+ phys_addr_t paddr, size_t size, arm_lpae_iopte prot,
+ int lvl, arm_lpae_iopte *ptep)
+{
+ arm_lpae_iopte *cptep, pte;
+ void *cookie = data->iop.cookie;
+ size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
+
+ /* Find our entry at the current level */
+ ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
+
+ /* If we can install a leaf entry at this level, then do so */
+ if (size == block_size && (size & data->iop.cfg.pgsize_bitmap))
+ return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep);
+
+ /* We can't allocate tables at the final level */
+ if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1))
+ return -EINVAL;
+
+ /* Grab a pointer to the next level */
+ pte = *ptep;
+ if (!pte) {
+ cptep = alloc_pages_exact(1UL << data->pg_shift,
+ GFP_ATOMIC | __GFP_ZERO);
+ if (!cptep)
+ return -ENOMEM;
+
+ data->iop.cfg.tlb->flush_pgtable(cptep, 1UL << data->pg_shift,
+ cookie);
+ pte = __pa(cptep) | ARM_LPAE_PTE_TYPE_TABLE;
+ if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
+ pte |= ARM_LPAE_PTE_NSTABLE;
+ *ptep = pte;
+ data->iop.cfg.tlb->flush_pgtable(ptep, sizeof(*ptep), cookie);
+ } else {
+ cptep = iopte_deref(pte, data);
+ }
+
+ /* Rinse, repeat */
+ return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep);
+}
+
+static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
+ int prot)
+{
+ arm_lpae_iopte pte;
+
+ if (data->iop.fmt == ARM_64_LPAE_S1 ||
+ data->iop.fmt == ARM_32_LPAE_S1) {
+ pte = ARM_LPAE_PTE_AP_UNPRIV | ARM_LPAE_PTE_nG;
+
+ if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
+ pte |= ARM_LPAE_PTE_AP_RDONLY;
+
+ if (prot & IOMMU_CACHE)
+ pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
+ << ARM_LPAE_PTE_ATTRINDX_SHIFT);
+ } else {
+ pte = ARM_LPAE_PTE_HAP_FAULT;
+ if (prot & IOMMU_READ)
+ pte |= ARM_LPAE_PTE_HAP_READ;
+ if (prot & IOMMU_WRITE)
+ pte |= ARM_LPAE_PTE_HAP_WRITE;
+ if (prot & IOMMU_CACHE)
+ pte |= ARM_LPAE_PTE_MEMATTR_OIWB;
+ else
+ pte |= ARM_LPAE_PTE_MEMATTR_NC;
+ }
+
+ if (prot & IOMMU_NOEXEC)
+ pte |= ARM_LPAE_PTE_XN;
+
+ return pte;
+}
+
+static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
+ phys_addr_t paddr, size_t size, int iommu_prot)
+{
+ struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
+ arm_lpae_iopte *ptep = data->pgd;
+ int lvl = ARM_LPAE_START_LVL(data);
+ arm_lpae_iopte prot;
+
+ /* If no access, then nothing to do */
+ if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
+ return 0;
+
+ prot = arm_lpae_prot_to_pte(data, iommu_prot);
+ return __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep);
+}
+
+static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
+ arm_lpae_iopte *ptep)
+{
+ arm_lpae_iopte *start, *end;
+ unsigned long table_size;
+
+ /* Only leaf entries at the last level */
+ if (lvl == ARM_LPAE_MAX_LEVELS - 1)
+ return;
+
+ if (lvl == ARM_LPAE_START_LVL(data))
+ table_size = data->pgd_size;
+ else
+ table_size = 1UL << data->pg_shift;
+
+ start = ptep;
+ end = (void *)ptep + table_size;
+
+ while (ptep != end) {
+ arm_lpae_iopte pte = *ptep++;
+
+ if (!pte || iopte_leaf(pte, lvl))
+ continue;
+
+ __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
+ }
+
+ free_pages_exact(start, table_size);
+}
+
+static void arm_lpae_free_pgtable(struct io_pgtable *iop)
+{
+ struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
+
+ __arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), data->pgd);
+ kfree(data);
+}
+
+static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
+ unsigned long iova, size_t size,
+ arm_lpae_iopte prot, int lvl,
+ arm_lpae_iopte *ptep, size_t blk_size)
+{
+ unsigned long blk_start, blk_end;
+ phys_addr_t blk_paddr;
+ arm_lpae_iopte table = 0;
+ void *cookie = data->iop.cookie;
+ const struct iommu_gather_ops *tlb = data->iop.cfg.tlb;
+
+ blk_start = iova & ~(blk_size - 1);
+ blk_end = blk_start + blk_size;
+ blk_paddr = iopte_to_pfn(*ptep, data) << data->pg_shift;
+
+ for (; blk_start < blk_end; blk_start += size, blk_paddr += size) {
+ arm_lpae_iopte *tablep;
+
+ /* Unmap! */
+ if (blk_start == iova)
+ continue;
+
+ /* __arm_lpae_map expects a pointer to the start of the table */
+ tablep = &table - ARM_LPAE_LVL_IDX(blk_start, lvl, data);
+ if (__arm_lpae_map(data, blk_start, blk_paddr, size, prot, lvl,
+ tablep) < 0) {
+ if (table) {
+ /* Free the table we allocated */
+ tablep = iopte_deref(table, data);
+ __arm_lpae_free_pgtable(data, lvl + 1, tablep);
+ }
+ return 0; /* Bytes unmapped */
+ }
+ }
+
+ *ptep = table;
+ tlb->flush_pgtable(ptep, sizeof(*ptep), cookie);
+ iova &= ~(blk_size - 1);
+ tlb->tlb_add_flush(iova, blk_size, true, cookie);
+ return size;
+}
+
+static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
+ unsigned long iova, size_t size, int lvl,
+ arm_lpae_iopte *ptep)
+{
+ arm_lpae_iopte pte;
+ const struct iommu_gather_ops *tlb = data->iop.cfg.tlb;
+ void *cookie = data->iop.cookie;
+ size_t blk_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
+
+ ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
+ pte = *ptep;
+
+ /* Something went horribly wrong and we ran out of page table */
+ if (WARN_ON(!pte || (lvl == ARM_LPAE_MAX_LEVELS)))
+ return 0;
+
+ /* If the size matches this level, we're in the right place */
+ if (size == blk_size) {
+ *ptep = 0;
+ tlb->flush_pgtable(ptep, sizeof(*ptep), cookie);
+
+ if (!iopte_leaf(pte, lvl)) {
+ /* Also flush any partial walks */
+ tlb->tlb_add_flush(iova, size, false, cookie);
+ tlb->tlb_sync(data->iop.cookie);
+ ptep = iopte_deref(pte, data);
+ __arm_lpae_free_pgtable(data, lvl + 1, ptep);
+ } else {
+ tlb->tlb_add_flush(iova, size, true, cookie);
+ }
+
+ return size;
+ } else if (iopte_leaf(pte, lvl)) {
+ /*
+ * Insert a table at the next level to map the old region,
+ * minus the part we want to unmap
+ */
+ return arm_lpae_split_blk_unmap(data, iova, size,
+ iopte_prot(pte), lvl, ptep,
+ blk_size);
+ }
+
+ /* Keep on walkin' */
+ ptep = iopte_deref(pte, data);
+ return __arm_lpae_unmap(data, iova, size, lvl + 1, ptep);
+}
+
+static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
+ size_t size)
+{
+ size_t unmapped;
+ struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
+ struct io_pgtable *iop = &data->iop;
+ arm_lpae_iopte *ptep = data->pgd;
+ int lvl = ARM_LPAE_START_LVL(data);
+
+ unmapped = __arm_lpae_unmap(data, iova, size, lvl, ptep);
+ if (unmapped)
+ iop->cfg.tlb->tlb_sync(iop->cookie);
+
+ return unmapped;
+}
+
+static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
+ unsigned long iova)
+{
+ struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
+ arm_lpae_iopte pte, *ptep = data->pgd;
+ int lvl = ARM_LPAE_START_LVL(data);
+
+ do {
+ /* Valid IOPTE pointer? */
+ if (!ptep)
+ return 0;
+
+ /* Grab the IOPTE we're interested in */
+ pte = *(ptep + ARM_LPAE_LVL_IDX(iova, lvl, data));
+
+ /* Valid entry? */
+ if (!pte)
+ return 0;
+
+ /* Leaf entry? */
+ if (iopte_leaf(pte,lvl))
+ goto found_translation;
+
+ /* Take it to the next level */
+ ptep = iopte_deref(pte, data);
+ } while (++lvl < ARM_LPAE_MAX_LEVELS);
+
+ /* Ran out of page tables to walk */
+ return 0;
+
+found_translation:
+ iova &= ((1 << data->pg_shift) - 1);
+ return ((phys_addr_t)iopte_to_pfn(pte,data) << data->pg_shift) | iova;
+}
+
+static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
+{
+ unsigned long granule;
+
+ /*
+ * We need to restrict the supported page sizes to match the
+ * translation regime for a particular granule. Aim to match
+ * the CPU page size if possible, otherwise prefer smaller sizes.
+ * While we're at it, restrict the block sizes to match the
+ * chosen granule.
+ */
+ if (cfg->pgsize_bitmap & PAGE_SIZE)
+ granule = PAGE_SIZE;
+ else if (cfg->pgsize_bitmap & ~PAGE_MASK)
+ granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK);
+ else if (cfg->pgsize_bitmap & PAGE_MASK)
+ granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK);
+ else
+ granule = 0;
+
+ switch (granule) {
+ case SZ_4K:
+ cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
+ break;
+ case SZ_16K:
+ cfg->pgsize_bitmap &= (SZ_16K | SZ_32M);
+ break;
+ case SZ_64K:
+ cfg->pgsize_bitmap &= (SZ_64K | SZ_512M);
+ break;
+ default:
+ cfg->pgsize_bitmap = 0;
+ }
+}
+
+static struct arm_lpae_io_pgtable *
+arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
+{
+ unsigned long va_bits, pgd_bits;
+ struct arm_lpae_io_pgtable *data;
+
+ arm_lpae_restrict_pgsizes(cfg);
+
+ if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K)))
+ return NULL;
+
+ if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS)
+ return NULL;
+
+ if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
+ return NULL;
+
+ data = kmalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return NULL;
+
+ data->pg_shift = __ffs(cfg->pgsize_bitmap);
+ data->bits_per_level = data->pg_shift - ilog2(sizeof(arm_lpae_iopte));
+
+ va_bits = cfg->ias - data->pg_shift;
+ data->levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
+
+ /* Calculate the actual size of our pgd (without concatenation) */
+ pgd_bits = va_bits - (data->bits_per_level * (data->levels - 1));
+ data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte)));
+
+ data->iop.ops = (struct io_pgtable_ops) {
+ .map = arm_lpae_map,
+ .unmap = arm_lpae_unmap,
+ .iova_to_phys = arm_lpae_iova_to_phys,
+ };
+
+ return data;
+}
+
+static struct io_pgtable *
+arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
+{
+ u64 reg;
+ struct arm_lpae_io_pgtable *data = arm_lpae_alloc_pgtable(cfg);
+
+ if (!data)
+ return NULL;
+
+ /* TCR */
+ reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
+ (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
+ (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
+
+ switch (1 << data->pg_shift) {
+ case SZ_4K:
+ reg |= ARM_LPAE_TCR_TG0_4K;
+ break;
+ case SZ_16K:
+ reg |= ARM_LPAE_TCR_TG0_16K;
+ break;
+ case SZ_64K:
+ reg |= ARM_LPAE_TCR_TG0_64K;
+ break;
+ }
+
+ switch (cfg->oas) {
+ case 32:
+ reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_IPS_SHIFT);
+ break;
+ case 36:
+ reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_IPS_SHIFT);
+ break;
+ case 40:
+ reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_IPS_SHIFT);
+ break;
+ case 42:
+ reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_IPS_SHIFT);
+ break;
+ case 44:
+ reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_IPS_SHIFT);
+ break;
+ case 48:
+ reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_IPS_SHIFT);
+ break;
+ default:
+ goto out_free_data;
+ }
+
+ reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
+ cfg->arm_lpae_s1_cfg.tcr = reg;
+
+ /* MAIRs */
+ reg = (ARM_LPAE_MAIR_ATTR_NC
+ << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
+ (ARM_LPAE_MAIR_ATTR_WBRWA
+ << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
+ (ARM_LPAE_MAIR_ATTR_DEVICE
+ << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
+
+ cfg->arm_lpae_s1_cfg.mair[0] = reg;
+ cfg->arm_lpae_s1_cfg.mair[1] = 0;
+
+ /* Looking good; allocate a pgd */
+ data->pgd = alloc_pages_exact(data->pgd_size, GFP_KERNEL | __GFP_ZERO);
+ if (!data->pgd)
+ goto out_free_data;
+
+ cfg->tlb->flush_pgtable(data->pgd, data->pgd_size, cookie);
+
+ /* TTBRs */
+ cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd);
+ cfg->arm_lpae_s1_cfg.ttbr[1] = 0;
+ return &data->iop;
+
+out_free_data:
+ kfree(data);
+ return NULL;
+}
+
+static struct io_pgtable *
+arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
+{
+ u64 reg, sl;
+ struct arm_lpae_io_pgtable *data = arm_lpae_alloc_pgtable(cfg);
+
+ if (!data)
+ return NULL;
+
+ /*
+ * Concatenate PGDs at level 1 if possible in order to reduce
+ * the depth of the stage-2 walk.
+ */
+ if (data->levels == ARM_LPAE_MAX_LEVELS) {
+ unsigned long pgd_pages;
+
+ pgd_pages = data->pgd_size >> ilog2(sizeof(arm_lpae_iopte));
+ if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) {
+ data->pgd_size = pgd_pages << data->pg_shift;
+ data->levels--;
+ }
+ }
+
+ /* VTCR */
+ reg = ARM_64_LPAE_S2_TCR_RES1 |
+ (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
+ (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
+ (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
+
+ sl = ARM_LPAE_START_LVL(data);
+
+ switch (1 << data->pg_shift) {
+ case SZ_4K:
+ reg |= ARM_LPAE_TCR_TG0_4K;
+ sl++; /* SL0 format is different for 4K granule size */
+ break;
+ case SZ_16K:
+ reg |= ARM_LPAE_TCR_TG0_16K;
+ break;
+ case SZ_64K:
+ reg |= ARM_LPAE_TCR_TG0_64K;
+ break;
+ }
+
+ switch (cfg->oas) {
+ case 32:
+ reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_PS_SHIFT);
+ break;
+ case 36:
+ reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_PS_SHIFT);
+ break;
+ case 40:
+ reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_PS_SHIFT);
+ break;
+ case 42:
+ reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_PS_SHIFT);
+ break;
+ case 44:
+ reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_PS_SHIFT);
+ break;
+ case 48:
+ reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_PS_SHIFT);
+ break;
+ default:
+ goto out_free_data;
+ }
+
+ reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
+ reg |= (~sl & ARM_LPAE_TCR_SL0_MASK) << ARM_LPAE_TCR_SL0_SHIFT;
+ cfg->arm_lpae_s2_cfg.vtcr = reg;
+
+ /* Allocate pgd pages */
+ data->pgd = alloc_pages_exact(data->pgd_size, GFP_KERNEL | __GFP_ZERO);
+ if (!data->pgd)
+ goto out_free_data;
+
+ cfg->tlb->flush_pgtable(data->pgd, data->pgd_size, cookie);
+
+ /* VTTBR */
+ cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
+ return &data->iop;
+
+out_free_data:
+ kfree(data);
+ return NULL;
+}
+
+static struct io_pgtable *
+arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
+{
+ struct io_pgtable *iop;
+
+ if (cfg->ias > 32 || cfg->oas > 40)
+ return NULL;
+
+ cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
+ iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
+ if (iop) {
+ cfg->arm_lpae_s1_cfg.tcr |= ARM_32_LPAE_TCR_EAE;
+ cfg->arm_lpae_s1_cfg.tcr &= 0xffffffff;
+ }
+
+ return iop;
+}
+
+static struct io_pgtable *
+arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
+{
+ struct io_pgtable *iop;
+
+ if (cfg->ias > 40 || cfg->oas > 40)
+ return NULL;
+
+ cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
+ iop = arm_64_lpae_alloc_pgtable_s2(cfg, cookie);
+ if (iop)
+ cfg->arm_lpae_s2_cfg.vtcr &= 0xffffffff;
+
+ return iop;
+}
+
+struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
+ .alloc = arm_64_lpae_alloc_pgtable_s1,
+ .free = arm_lpae_free_pgtable,
+};
+
+struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = {
+ .alloc = arm_64_lpae_alloc_pgtable_s2,
+ .free = arm_lpae_free_pgtable,
+};
+
+struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = {
+ .alloc = arm_32_lpae_alloc_pgtable_s1,
+ .free = arm_lpae_free_pgtable,
+};
+
+struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = {
+ .alloc = arm_32_lpae_alloc_pgtable_s2,
+ .free = arm_lpae_free_pgtable,
+};
+
+#ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
+
+static struct io_pgtable_cfg *cfg_cookie;
+
+static void dummy_tlb_flush_all(void *cookie)
+{
+ WARN_ON(cookie != cfg_cookie);
+}
+
+static void dummy_tlb_add_flush(unsigned long iova, size_t size, bool leaf,
+ void *cookie)
+{
+ WARN_ON(cookie != cfg_cookie);
+ WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
+}
+
+static void dummy_tlb_sync(void *cookie)
+{
+ WARN_ON(cookie != cfg_cookie);
+}
+
+static void dummy_flush_pgtable(void *ptr, size_t size, void *cookie)
+{
+ WARN_ON(cookie != cfg_cookie);
+}
+
+static struct iommu_gather_ops dummy_tlb_ops __initdata = {
+ .tlb_flush_all = dummy_tlb_flush_all,
+ .tlb_add_flush = dummy_tlb_add_flush,
+ .tlb_sync = dummy_tlb_sync,
+ .flush_pgtable = dummy_flush_pgtable,
+};
+
+static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
+{
+ struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
+ struct io_pgtable_cfg *cfg = &data->iop.cfg;
+
+ pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
+ cfg->pgsize_bitmap, cfg->ias);
+ pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, pgd @ %p\n",
+ data->levels, data->pgd_size, data->pg_shift,
+ data->bits_per_level, data->pgd);
+}
+
+#define __FAIL(ops, i) ({ \
+ WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \
+ arm_lpae_dump_ops(ops); \
+ selftest_running = false; \
+ -EFAULT; \
+})
+
+static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
+{
+ static const enum io_pgtable_fmt fmts[] = {
+ ARM_64_LPAE_S1,
+ ARM_64_LPAE_S2,
+ };
+
+ int i, j;
+ unsigned long iova;
+ size_t size;
+ struct io_pgtable_ops *ops;
+
+ selftest_running = true;
+
+ for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
+ cfg_cookie = cfg;
+ ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
+ if (!ops) {
+ pr_err("selftest: failed to allocate io pgtable ops\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * Initial sanity checks.
+ * Empty page tables shouldn't provide any translations.
+ */
+ if (ops->iova_to_phys(ops, 42))
+ return __FAIL(ops, i);
+
+ if (ops->iova_to_phys(ops, SZ_1G + 42))
+ return __FAIL(ops, i);
+
+ if (ops->iova_to_phys(ops, SZ_2G + 42))
+ return __FAIL(ops, i);
+
+ /*
+ * Distinct mappings of different granule sizes.
+ */
+ iova = 0;
+ j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG);
+ while (j != BITS_PER_LONG) {
+ size = 1UL << j;
+
+ if (ops->map(ops, iova, iova, size, IOMMU_READ |
+ IOMMU_WRITE |
+ IOMMU_NOEXEC |
+ IOMMU_CACHE))
+ return __FAIL(ops, i);
+
+ /* Overlapping mappings */
+ if (!ops->map(ops, iova, iova + size, size,
+ IOMMU_READ | IOMMU_NOEXEC))
+ return __FAIL(ops, i);
+
+ if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
+ return __FAIL(ops, i);
+
+ iova += SZ_1G;
+ j++;
+ j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
+ }
+
+ /* Partial unmap */
+ size = 1UL << __ffs(cfg->pgsize_bitmap);
+ if (ops->unmap(ops, SZ_1G + size, size) != size)
+ return __FAIL(ops, i);
+
+ /* Remap of partial unmap */
+ if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ))
+ return __FAIL(ops, i);
+
+ if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
+ return __FAIL(ops, i);
+
+ /* Full unmap */
+ iova = 0;
+ j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG);
+ while (j != BITS_PER_LONG) {
+ size = 1UL << j;
+
+ if (ops->unmap(ops, iova, size) != size)
+ return __FAIL(ops, i);
+
+ if (ops->iova_to_phys(ops, iova + 42))
+ return __FAIL(ops, i);
+
+ /* Remap full block */
+ if (ops->map(ops, iova, iova, size, IOMMU_WRITE))
+ return __FAIL(ops, i);
+
+ if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
+ return __FAIL(ops, i);
+
+ iova += SZ_1G;
+ j++;
+ j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
+ }
+
+ free_io_pgtable_ops(ops);
+ }
+
+ selftest_running = false;
+ return 0;
+}
+
+static int __init arm_lpae_do_selftests(void)
+{
+ static const unsigned long pgsize[] = {
+ SZ_4K | SZ_2M | SZ_1G,
+ SZ_16K | SZ_32M,
+ SZ_64K | SZ_512M,
+ };
+
+ static const unsigned int ias[] = {
+ 32, 36, 40, 42, 44, 48,
+ };
+
+ int i, j, pass = 0, fail = 0;
+ struct io_pgtable_cfg cfg = {
+ .tlb = &dummy_tlb_ops,
+ .oas = 48,
+ };
+
+ for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
+ for (j = 0; j < ARRAY_SIZE(ias); ++j) {
+ cfg.pgsize_bitmap = pgsize[i];
+ cfg.ias = ias[j];
+ pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n",
+ pgsize[i], ias[j]);
+ if (arm_lpae_run_tests(&cfg))
+ fail++;
+ else
+ pass++;
+ }
+ }
+
+ pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail);
+ return fail ? -EFAULT : 0;
+}
+subsys_initcall(arm_lpae_do_selftests);
+#endif
diff --git a/drivers/iommu/io-pgtable.c b/drivers/iommu/io-pgtable.c
new file mode 100644
index 000000000000..6436fe24bc2f
--- /dev/null
+++ b/drivers/iommu/io-pgtable.c
@@ -0,0 +1,82 @@
+/*
+ * Generic page table allocator for IOMMUs.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Copyright (C) 2014 ARM Limited
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ */
+
+#include <linux/bug.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include "io-pgtable.h"
+
+extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns;
+extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns;
+extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns;
+extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns;
+
+static const struct io_pgtable_init_fns *
+io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] =
+{
+#ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE
+ [ARM_32_LPAE_S1] = &io_pgtable_arm_32_lpae_s1_init_fns,
+ [ARM_32_LPAE_S2] = &io_pgtable_arm_32_lpae_s2_init_fns,
+ [ARM_64_LPAE_S1] = &io_pgtable_arm_64_lpae_s1_init_fns,
+ [ARM_64_LPAE_S2] = &io_pgtable_arm_64_lpae_s2_init_fns,
+#endif
+};
+
+struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
+ struct io_pgtable_cfg *cfg,
+ void *cookie)
+{
+ struct io_pgtable *iop;
+ const struct io_pgtable_init_fns *fns;
+
+ if (fmt >= IO_PGTABLE_NUM_FMTS)
+ return NULL;
+
+ fns = io_pgtable_init_table[fmt];
+ if (!fns)
+ return NULL;
+
+ iop = fns->alloc(cfg, cookie);
+ if (!iop)
+ return NULL;
+
+ iop->fmt = fmt;
+ iop->cookie = cookie;
+ iop->cfg = *cfg;
+
+ return &iop->ops;
+}
+
+/*
+ * It is the IOMMU driver's responsibility to ensure that the page table
+ * is no longer accessible to the walker by this point.
+ */
+void free_io_pgtable_ops(struct io_pgtable_ops *ops)
+{
+ struct io_pgtable *iop;
+
+ if (!ops)
+ return;
+
+ iop = container_of(ops, struct io_pgtable, ops);
+ iop->cfg.tlb->tlb_flush_all(iop->cookie);
+ io_pgtable_init_table[iop->fmt]->free(iop);
+}
diff --git a/drivers/iommu/io-pgtable.h b/drivers/iommu/io-pgtable.h
new file mode 100644
index 000000000000..10e32f69c668
--- /dev/null
+++ b/drivers/iommu/io-pgtable.h
@@ -0,0 +1,143 @@
+#ifndef __IO_PGTABLE_H
+#define __IO_PGTABLE_H
+
+/*
+ * Public API for use by IOMMU drivers
+ */
+enum io_pgtable_fmt {
+ ARM_32_LPAE_S1,
+ ARM_32_LPAE_S2,
+ ARM_64_LPAE_S1,
+ ARM_64_LPAE_S2,
+ IO_PGTABLE_NUM_FMTS,
+};
+
+/**
+ * struct iommu_gather_ops - IOMMU callbacks for TLB and page table management.
+ *
+ * @tlb_flush_all: Synchronously invalidate the entire TLB context.
+ * @tlb_add_flush: Queue up a TLB invalidation for a virtual address range.
+ * @tlb_sync: Ensure any queue TLB invalidation has taken effect.
+ * @flush_pgtable: Ensure page table updates are visible to the IOMMU.
+ *
+ * Note that these can all be called in atomic context and must therefore
+ * not block.
+ */
+struct iommu_gather_ops {
+ void (*tlb_flush_all)(void *cookie);
+ void (*tlb_add_flush)(unsigned long iova, size_t size, bool leaf,
+ void *cookie);
+ void (*tlb_sync)(void *cookie);
+ void (*flush_pgtable)(void *ptr, size_t size, void *cookie);
+};
+
+/**
+ * struct io_pgtable_cfg - Configuration data for a set of page tables.
+ *
+ * @quirks: A bitmap of hardware quirks that require some special
+ * action by the low-level page table allocator.
+ * @pgsize_bitmap: A bitmap of page sizes supported by this set of page
+ * tables.
+ * @ias: Input address (iova) size, in bits.
+ * @oas: Output address (paddr) size, in bits.
+ * @tlb: TLB management callbacks for this set of tables.
+ */
+struct io_pgtable_cfg {
+ #define IO_PGTABLE_QUIRK_ARM_NS (1 << 0) /* Set NS bit in PTEs */
+ int quirks;
+ unsigned long pgsize_bitmap;
+ unsigned int ias;
+ unsigned int oas;
+ const struct iommu_gather_ops *tlb;
+
+ /* Low-level data specific to the table format */
+ union {
+ struct {
+ u64 ttbr[2];
+ u64 tcr;
+ u64 mair[2];
+ } arm_lpae_s1_cfg;
+
+ struct {
+ u64 vttbr;
+ u64 vtcr;
+ } arm_lpae_s2_cfg;
+ };
+};
+
+/**
+ * struct io_pgtable_ops - Page table manipulation API for IOMMU drivers.
+ *
+ * @map: Map a physically contiguous memory region.
+ * @unmap: Unmap a physically contiguous memory region.
+ * @iova_to_phys: Translate iova to physical address.
+ *
+ * These functions map directly onto the iommu_ops member functions with
+ * the same names.
+ */
+struct io_pgtable_ops {
+ int (*map)(struct io_pgtable_ops *ops, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot);
+ int (*unmap)(struct io_pgtable_ops *ops, unsigned long iova,
+ size_t size);
+ phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops,
+ unsigned long iova);
+};
+
+/**
+ * alloc_io_pgtable_ops() - Allocate a page table allocator for use by an IOMMU.
+ *
+ * @fmt: The page table format.
+ * @cfg: The page table configuration. This will be modified to represent
+ * the configuration actually provided by the allocator (e.g. the
+ * pgsize_bitmap may be restricted).
+ * @cookie: An opaque token provided by the IOMMU driver and passed back to
+ * the callback routines in cfg->tlb.
+ */
+struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
+ struct io_pgtable_cfg *cfg,
+ void *cookie);
+
+/**
+ * free_io_pgtable_ops() - Free an io_pgtable_ops structure. The caller
+ * *must* ensure that the page table is no longer
+ * live, but the TLB can be dirty.
+ *
+ * @ops: The ops returned from alloc_io_pgtable_ops.
+ */
+void free_io_pgtable_ops(struct io_pgtable_ops *ops);
+
+
+/*
+ * Internal structures for page table allocator implementations.
+ */
+
+/**
+ * struct io_pgtable - Internal structure describing a set of page tables.
+ *
+ * @fmt: The page table format.
+ * @cookie: An opaque token provided by the IOMMU driver and passed back to
+ * any callback routines.
+ * @cfg: A copy of the page table configuration.
+ * @ops: The page table operations in use for this set of page tables.
+ */
+struct io_pgtable {
+ enum io_pgtable_fmt fmt;
+ void *cookie;
+ struct io_pgtable_cfg cfg;
+ struct io_pgtable_ops ops;
+};
+
+/**
+ * struct io_pgtable_init_fns - Alloc/free a set of page tables for a
+ * particular format.
+ *
+ * @alloc: Allocate a set of page tables described by cfg.
+ * @free: Free the page tables associated with iop.
+ */
+struct io_pgtable_init_fns {
+ struct io_pgtable *(*alloc)(struct io_pgtable_cfg *cfg, void *cookie);
+ void (*free)(struct io_pgtable *iop);
+};
+
+#endif /* __IO_PGTABLE_H */
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 3aeb7305e2f5..7c49e10edad5 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -23,9 +23,10 @@
#include <linux/err.h>
#include <linux/types.h>
-#define IOMMU_READ (1)
-#define IOMMU_WRITE (2)
-#define IOMMU_CACHE (4) /* DMA cache coherency */
+#define IOMMU_READ (1 << 0)
+#define IOMMU_WRITE (1 << 1)
+#define IOMMU_CACHE (1 << 2) /* DMA cache coherency */
+#define IOMMU_NOEXEC (1 << 3)
struct iommu_ops;
struct iommu_group;
@@ -62,6 +63,7 @@ enum iommu_attr {
DOMAIN_ATTR_GEOMETRY,
DOMAIN_ATTR_PAGING,
DOMAIN_ATTR_WINDOWS,
+ DOMAIN_ATTR_NESTING, /* two stages of translation */
DOMAIN_ATTR_MAX,
};