aboutsummaryrefslogtreecommitdiff
path: root/drivers/iommu
diff options
context:
space:
mode:
authorSuravee Suthikulpanit <suravee.suthikulpanit@amd.com>2013-04-22 16:32:34 -0500
committerJoerg Roedel <joro@8bytes.org>2013-04-23 14:34:26 +0200
commit3f398bc7762adcd860bd2acce18465a106f47325 (patch)
tree120d9bde4c592f3561d1f218fbea00b11b631393 /drivers/iommu
parentd3263bc29706e42f74d8800807c2dedf320d77f1 (diff)
iommu/AMD: Per-thread IOMMU Interrupt Handling
In the current interrupt handling scheme, there are as many threads as the number of IOMMUs. Each thread is created and assigned to an IOMMU at the time of registering interrupt handlers (request_threaded_irq). When an IOMMU HW generates an interrupt, the irq handler (top half) wakes up the corresponding thread to process event and PPR logs of all IOMMUs starting from the 1st IOMMU. In the system with multiple IOMMU,this handling scheme complicates the synchronization of the IOMMU data structures and status registers as there could be multiple threads competing for the same IOMMU while the other IOMMU could be left unhandled. To simplify, this patch is proposing a different interrupt handling scheme by having each thread only managing interrupts of the corresponding IOMMU. This can be achieved by passing the struct amd_iommu when registering the interrupt handlers. This structure is unique for each IOMMU and can be used by the bottom half thread to identify the IOMMU to be handled instead of calling for_each_iommu. Besides this also eliminate the needs to lock the IOMMU for processing event and PPR logs. Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> Signed-off-by: Joerg Roedel <joro@8bytes.org>
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/amd_iommu.c82
-rw-r--r--drivers/iommu/amd_iommu_init.c2
2 files changed, 34 insertions, 50 deletions
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 27792f8c429..c6f3c7e0468 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -700,22 +700,7 @@ retry:
static void iommu_poll_events(struct amd_iommu *iommu)
{
- u32 head, tail, status;
- unsigned long flags;
-
- spin_lock_irqsave(&iommu->lock, flags);
-
- /* enable event interrupts again */
- do {
- /*
- * Workaround for Erratum ERBT1312
- * Clearing the EVT_INT bit may race in the hardware, so read
- * it again and make sure it was really cleared
- */
- status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
- writel(MMIO_STATUS_EVT_INT_MASK,
- iommu->mmio_base + MMIO_STATUS_OFFSET);
- } while (status & MMIO_STATUS_EVT_INT_MASK);
+ u32 head, tail;
head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
@@ -726,8 +711,6 @@ static void iommu_poll_events(struct amd_iommu *iommu)
}
writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
-
- spin_unlock_irqrestore(&iommu->lock, flags);
}
static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw)
@@ -752,26 +735,11 @@ static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw)
static void iommu_poll_ppr_log(struct amd_iommu *iommu)
{
- unsigned long flags;
- u32 head, tail, status;
+ u32 head, tail;
if (iommu->ppr_log == NULL)
return;
- spin_lock_irqsave(&iommu->lock, flags);
-
- /* enable ppr interrupts again */
- do {
- /*
- * Workaround for Erratum ERBT1312
- * Clearing the PPR_INT bit may race in the hardware, so read
- * it again and make sure it was really cleared
- */
- status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
- writel(MMIO_STATUS_PPR_INT_MASK,
- iommu->mmio_base + MMIO_STATUS_OFFSET);
- } while (status & MMIO_STATUS_PPR_INT_MASK);
-
head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
@@ -807,34 +775,50 @@ static void iommu_poll_ppr_log(struct amd_iommu *iommu)
head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE;
writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
- /*
- * Release iommu->lock because ppr-handling might need to
- * re-acquire it
- */
- spin_unlock_irqrestore(&iommu->lock, flags);
-
/* Handle PPR entry */
iommu_handle_ppr_entry(iommu, entry);
- spin_lock_irqsave(&iommu->lock, flags);
-
/* Refresh ring-buffer information */
head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
}
-
- spin_unlock_irqrestore(&iommu->lock, flags);
}
irqreturn_t amd_iommu_int_thread(int irq, void *data)
{
- struct amd_iommu *iommu;
+ struct amd_iommu *iommu = (struct amd_iommu *) data;
+ u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
- for_each_iommu(iommu) {
- iommu_poll_events(iommu);
- iommu_poll_ppr_log(iommu);
- }
+ while (status & (MMIO_STATUS_EVT_INT_MASK | MMIO_STATUS_PPR_INT_MASK)) {
+ /* Enable EVT and PPR interrupts again */
+ writel((MMIO_STATUS_EVT_INT_MASK | MMIO_STATUS_PPR_INT_MASK),
+ iommu->mmio_base + MMIO_STATUS_OFFSET);
+ if (status & MMIO_STATUS_EVT_INT_MASK) {
+ pr_devel("AMD-Vi: Processing IOMMU Event Log\n");
+ iommu_poll_events(iommu);
+ }
+
+ if (status & MMIO_STATUS_PPR_INT_MASK) {
+ pr_devel("AMD-Vi: Processing IOMMU PPR Log\n");
+ iommu_poll_ppr_log(iommu);
+ }
+
+ /*
+ * Hardware bug: ERBT1312
+ * When re-enabling interrupt (by writing 1
+ * to clear the bit), the hardware might also try to set
+ * the interrupt bit in the event status register.
+ * In this scenario, the bit will be set, and disable
+ * subsequent interrupts.
+ *
+ * Workaround: The IOMMU driver should read back the
+ * status register and check if the interrupt bits are cleared.
+ * If not, driver will need to go through the interrupt handler
+ * again and re-clear the bits
+ */
+ status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
+ }
return IRQ_HANDLED;
}
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 97679416648..3d3d6cd52d4 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -1324,7 +1324,7 @@ static int iommu_setup_msi(struct amd_iommu *iommu)
amd_iommu_int_handler,
amd_iommu_int_thread,
0, "AMD-Vi",
- iommu->dev);
+ iommu);
if (r) {
pci_disable_msi(iommu->dev);