aboutsummaryrefslogtreecommitdiff
path: root/drivers/iommu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/amd_iommu.c22
-rw-r--r--drivers/iommu/amd_iommu_types.h1
-rw-r--r--drivers/iommu/arm-smmu.c7
-rw-r--r--drivers/iommu/intel-iommu.c26
4 files changed, 41 insertions, 15 deletions
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index faf0da4bb3a2..b0cb66208c8b 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -1922,9 +1922,15 @@ static void free_pt_##LVL (unsigned long __pt) \
pt = (u64 *)__pt; \
\
for (i = 0; i < 512; ++i) { \
+ /* PTE present? */ \
if (!IOMMU_PTE_PRESENT(pt[i])) \
continue; \
\
+ /* Large PTE? */ \
+ if (PM_PTE_LEVEL(pt[i]) == 0 || \
+ PM_PTE_LEVEL(pt[i]) == 7) \
+ continue; \
+ \
p = (unsigned long)IOMMU_PTE_PAGE(pt[i]); \
FN(p); \
} \
@@ -2146,8 +2152,8 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain, bool ats)
static void clear_dte_entry(u16 devid)
{
/* remove entry from the device table seen by the hardware */
- amd_iommu_dev_table[devid].data[0] = IOMMU_PTE_P | IOMMU_PTE_TV;
- amd_iommu_dev_table[devid].data[1] = 0;
+ amd_iommu_dev_table[devid].data[0] = IOMMU_PTE_P | IOMMU_PTE_TV;
+ amd_iommu_dev_table[devid].data[1] &= DTE_FLAG_MASK;
amd_iommu_apply_erratum_63(devid);
}
@@ -3227,14 +3233,16 @@ free_domains:
static void cleanup_domain(struct protection_domain *domain)
{
- struct iommu_dev_data *dev_data, *next;
+ struct iommu_dev_data *entry;
unsigned long flags;
write_lock_irqsave(&amd_iommu_devtable_lock, flags);
- list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) {
- __detach_device(dev_data);
- atomic_set(&dev_data->bind, 0);
+ while (!list_empty(&domain->dev_list)) {
+ entry = list_first_entry(&domain->dev_list,
+ struct iommu_dev_data, list);
+ __detach_device(entry);
+ atomic_set(&entry->bind, 0);
}
write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
@@ -3999,7 +4007,7 @@ static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic)
iommu_flush_dte(iommu, devid);
if (devid != alias) {
irq_lookup_table[alias] = table;
- set_dte_irq_entry(devid, table);
+ set_dte_irq_entry(alias, table);
iommu_flush_dte(iommu, alias);
}
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index e400fbe411de..e0c39940f659 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -283,6 +283,7 @@
#define IOMMU_PTE_IR (1ULL << 61)
#define IOMMU_PTE_IW (1ULL << 62)
+#define DTE_FLAG_MASK (0x3ffULL << 32)
#define DTE_FLAG_IOTLB (0x01UL << 32)
#define DTE_FLAG_GV (0x01ULL << 55)
#define DTE_GLX_SHIFT (56)
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 1d9ab39af29f..2ecac467f78f 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -794,8 +794,11 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
reg |= TTBCR_EAE |
(TTBCR_SH_IS << TTBCR_SH0_SHIFT) |
(TTBCR_RGN_WBWA << TTBCR_ORGN0_SHIFT) |
- (TTBCR_RGN_WBWA << TTBCR_IRGN0_SHIFT) |
- (TTBCR_SL0_LVL_1 << TTBCR_SL0_SHIFT);
+ (TTBCR_RGN_WBWA << TTBCR_IRGN0_SHIFT);
+
+ if (!stage1)
+ reg |= (TTBCR_SL0_LVL_1 << TTBCR_SL0_SHIFT);
+
writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
/* MAIR0 (stage-1 only) */
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index a22c86c867fa..8855ecbc36be 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -1768,7 +1768,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
struct dma_pte *first_pte = NULL, *pte = NULL;
phys_addr_t uninitialized_var(pteval);
int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
- unsigned long sg_res;
+ unsigned long sg_res = 0;
unsigned int largepage_lvl = 0;
unsigned long lvl_pages = 0;
@@ -1779,10 +1779,8 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
- if (sg)
- sg_res = 0;
- else {
- sg_res = nr_pages + 1;
+ if (!sg) {
+ sg_res = nr_pages;
pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
}
@@ -4075,7 +4073,7 @@ static size_t intel_iommu_unmap(struct iommu_domain *domain,
unsigned long iova, size_t size)
{
struct dmar_domain *dmar_domain = domain->priv;
- int order;
+ int order, iommu_id;
order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
(iova + size - 1) >> VTD_PAGE_SHIFT);
@@ -4083,6 +4081,22 @@ static size_t intel_iommu_unmap(struct iommu_domain *domain,
if (dmar_domain->max_addr == iova + size)
dmar_domain->max_addr = iova;
+ for_each_set_bit(iommu_id, dmar_domain->iommu_bmp, g_num_of_iommus) {
+ struct intel_iommu *iommu = g_iommus[iommu_id];
+ int num, ndomains;
+
+ /*
+ * find bit position of dmar_domain
+ */
+ ndomains = cap_ndoms(iommu->cap);
+ for_each_set_bit(num, iommu->domain_ids, ndomains) {
+ if (iommu->domains[num] == dmar_domain)
+ iommu_flush_iotlb_psi(iommu, num,
+ iova >> VTD_PAGE_SHIFT,
+ 1 << order, 0);
+ }
+ }
+
return PAGE_SIZE << order;
}