aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--dma-helpers.c172
-rw-r--r--dma.h108
-rw-r--r--hw/qdev-dma.h4
3 files changed, 259 insertions, 25 deletions
diff --git a/dma-helpers.c b/dma-helpers.c
index 4c9e529620..2e09ceb912 100644
--- a/dma-helpers.c
+++ b/dma-helpers.c
@@ -9,8 +9,12 @@
#include "dma.h"
#include "trace.h"
+#include "range.h"
+#include "qemu-thread.h"
-int dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c, dma_addr_t len)
+/* #define DEBUG_IOMMU */
+
+static void do_dma_memory_set(dma_addr_t addr, uint8_t c, dma_addr_t len)
{
#define FILLBUF_SIZE 512
uint8_t fillbuf[FILLBUF_SIZE];
@@ -23,6 +27,15 @@ int dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c, dma_addr_t len)
len -= len;
addr += len;
}
+}
+
+int dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c, dma_addr_t len)
+{
+ if (dma_has_iommu(dma)) {
+ return iommu_dma_memory_set(dma, addr, c, len);
+ }
+ do_dma_memory_set(addr, c, len);
+
return 0;
}
@@ -260,3 +273,160 @@ void dma_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie,
{
bdrv_acct_start(bs, cookie, sg->size, type);
}
+
+bool iommu_dma_memory_valid(DMAContext *dma, dma_addr_t addr, dma_addr_t len,
+ DMADirection dir)
+{
+ target_phys_addr_t paddr, plen;
+
+#ifdef DEBUG_IOMMU
+ fprintf(stderr, "dma_memory_check context=%p addr=0x" DMA_ADDR_FMT
+ " len=0x" DMA_ADDR_FMT " dir=%d\n", dma, addr, len, dir);
+#endif
+
+ while (len) {
+ if (dma->translate(dma, addr, &paddr, &plen, dir) != 0) {
+ return false;
+ }
+
+ /* The translation might be valid for larger regions. */
+ if (plen > len) {
+ plen = len;
+ }
+
+ len -= plen;
+ addr += plen;
+ }
+
+ return true;
+}
+
+int iommu_dma_memory_rw(DMAContext *dma, dma_addr_t addr,
+ void *buf, dma_addr_t len, DMADirection dir)
+{
+ target_phys_addr_t paddr, plen;
+ int err;
+
+#ifdef DEBUG_IOMMU
+ fprintf(stderr, "dma_memory_rw context=%p addr=0x" DMA_ADDR_FMT " len=0x"
+ DMA_ADDR_FMT " dir=%d\n", dma, addr, len, dir);
+#endif
+
+ while (len) {
+ err = dma->translate(dma, addr, &paddr, &plen, dir);
+ if (err) {
+ /*
+ * In case of failure on reads from the guest, we clean the
+ * destination buffer so that a device that doesn't test
+ * for errors will not expose qemu internal memory.
+ */
+ memset(buf, 0, len);
+ return -1;
+ }
+
+ /* The translation might be valid for larger regions. */
+ if (plen > len) {
+ plen = len;
+ }
+
+ cpu_physical_memory_rw(paddr, buf, plen,
+ dir == DMA_DIRECTION_FROM_DEVICE);
+
+ len -= plen;
+ addr += plen;
+ buf += plen;
+ }
+
+ return 0;
+}
+
+int iommu_dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c,
+ dma_addr_t len)
+{
+ target_phys_addr_t paddr, plen;
+ int err;
+
+#ifdef DEBUG_IOMMU
+ fprintf(stderr, "dma_memory_set context=%p addr=0x" DMA_ADDR_FMT
+ " len=0x" DMA_ADDR_FMT "\n", dma, addr, len);
+#endif
+
+ while (len) {
+ err = dma->translate(dma, addr, &paddr, &plen,
+ DMA_DIRECTION_FROM_DEVICE);
+ if (err) {
+ return err;
+ }
+
+ /* The translation might be valid for larger regions. */
+ if (plen > len) {
+ plen = len;
+ }
+
+ do_dma_memory_set(paddr, c, plen);
+
+ len -= plen;
+ addr += plen;
+ }
+
+ return 0;
+}
+
+void dma_context_init(DMAContext *dma, DMATranslateFunc translate,
+ DMAMapFunc map, DMAUnmapFunc unmap)
+{
+#ifdef DEBUG_IOMMU
+ fprintf(stderr, "dma_context_init(%p, %p, %p, %p)\n",
+ dma, translate, map, unmap);
+#endif
+ dma->translate = translate;
+ dma->map = map;
+ dma->unmap = unmap;
+}
+
+void *iommu_dma_memory_map(DMAContext *dma, dma_addr_t addr, dma_addr_t *len,
+ DMADirection dir)
+{
+ int err;
+ target_phys_addr_t paddr, plen;
+ void *buf;
+
+ if (dma->map) {
+ return dma->map(dma, addr, len, dir);
+ }
+
+ plen = *len;
+ err = dma->translate(dma, addr, &paddr, &plen, dir);
+ if (err) {
+ return NULL;
+ }
+
+ /*
+ * If this is true, the virtual region is contiguous,
+ * but the translated physical region isn't. We just
+ * clamp *len, much like cpu_physical_memory_map() does.
+ */
+ if (plen < *len) {
+ *len = plen;
+ }
+
+ buf = cpu_physical_memory_map(paddr, &plen,
+ dir == DMA_DIRECTION_FROM_DEVICE);
+ *len = plen;
+
+ return buf;
+}
+
+void iommu_dma_memory_unmap(DMAContext *dma, void *buffer, dma_addr_t len,
+ DMADirection dir, dma_addr_t access_len)
+{
+ if (dma->unmap) {
+ dma->unmap(dma, buffer, len, dir, access_len);
+ return;
+ }
+
+ cpu_physical_memory_unmap(buffer, len,
+ dir == DMA_DIRECTION_FROM_DEVICE,
+ access_len);
+
+}
diff --git a/dma.h b/dma.h
index e93076abe0..f52a656fd5 100644
--- a/dma.h
+++ b/dma.h
@@ -31,28 +31,74 @@ struct QEMUSGList {
};
#if defined(TARGET_PHYS_ADDR_BITS)
-typedef target_phys_addr_t dma_addr_t;
-#define DMA_ADDR_BITS TARGET_PHYS_ADDR_BITS
-#define DMA_ADDR_FMT TARGET_FMT_plx
+/*
+ * When an IOMMU is present, bus addresses become distinct from
+ * CPU/memory physical addresses and may be a different size. Because
+ * the IOVA size depends more on the bus than on the platform, we more
+ * or less have to treat these as 64-bit always to cover all (or at
+ * least most) cases.
+ */
+typedef uint64_t dma_addr_t;
+
+#define DMA_ADDR_BITS 64
+#define DMA_ADDR_FMT "%" PRIx64
+
+typedef int DMATranslateFunc(DMAContext *dma,
+ dma_addr_t addr,
+ target_phys_addr_t *paddr,
+ target_phys_addr_t *len,
+ DMADirection dir);
+typedef void* DMAMapFunc(DMAContext *dma,
+ dma_addr_t addr,
+ dma_addr_t *len,
+ DMADirection dir);
+typedef void DMAUnmapFunc(DMAContext *dma,
+ void *buffer,
+ dma_addr_t len,
+ DMADirection dir,
+ dma_addr_t access_len);
+
+struct DMAContext {
+ DMATranslateFunc *translate;
+ DMAMapFunc *map;
+ DMAUnmapFunc *unmap;
+};
+
+static inline bool dma_has_iommu(DMAContext *dma)
+{
+ return !!dma;
+}
/* Checks that the given range of addresses is valid for DMA. This is
* useful for certain cases, but usually you should just use
* dma_memory_{read,write}() and check for errors */
-static inline bool dma_memory_valid(DMAContext *dma, dma_addr_t addr,
- dma_addr_t len, DMADirection dir)
+bool iommu_dma_memory_valid(DMAContext *dma, dma_addr_t addr, dma_addr_t len,
+ DMADirection dir);
+static inline bool dma_memory_valid(DMAContext *dma,
+ dma_addr_t addr, dma_addr_t len,
+ DMADirection dir)
{
- /* Stub version, with no iommu we assume all bus addresses are valid */
- return true;
+ if (!dma_has_iommu(dma)) {
+ return true;
+ } else {
+ return iommu_dma_memory_valid(dma, addr, len, dir);
+ }
}
+int iommu_dma_memory_rw(DMAContext *dma, dma_addr_t addr,
+ void *buf, dma_addr_t len, DMADirection dir);
static inline int dma_memory_rw(DMAContext *dma, dma_addr_t addr,
void *buf, dma_addr_t len, DMADirection dir)
{
- /* Stub version when we have no iommu support */
- cpu_physical_memory_rw(addr, buf, (target_phys_addr_t)len,
- dir == DMA_DIRECTION_FROM_DEVICE);
- return 0;
+ if (!dma_has_iommu(dma)) {
+ /* Fast-path for no IOMMU */
+ cpu_physical_memory_rw(addr, buf, len,
+ dir == DMA_DIRECTION_FROM_DEVICE);
+ return 0;
+ } else {
+ return iommu_dma_memory_rw(dma, addr, buf, len, dir);
+ }
}
static inline int dma_memory_read(DMAContext *dma, dma_addr_t addr,
@@ -68,28 +114,45 @@ static inline int dma_memory_write(DMAContext *dma, dma_addr_t addr,
DMA_DIRECTION_FROM_DEVICE);
}
+int iommu_dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c,
+ dma_addr_t len);
+
int dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c, dma_addr_t len);
+void *iommu_dma_memory_map(DMAContext *dma,
+ dma_addr_t addr, dma_addr_t *len,
+ DMADirection dir);
static inline void *dma_memory_map(DMAContext *dma,
dma_addr_t addr, dma_addr_t *len,
DMADirection dir)
{
- target_phys_addr_t xlen = *len;
- void *p;
-
- p = cpu_physical_memory_map(addr, &xlen,
- dir == DMA_DIRECTION_FROM_DEVICE);
- *len = xlen;
- return p;
+ if (!dma_has_iommu(dma)) {
+ target_phys_addr_t xlen = *len;
+ void *p;
+
+ p = cpu_physical_memory_map(addr, &xlen,
+ dir == DMA_DIRECTION_FROM_DEVICE);
+ *len = xlen;
+ return p;
+ } else {
+ return iommu_dma_memory_map(dma, addr, len, dir);
+ }
}
+void iommu_dma_memory_unmap(DMAContext *dma,
+ void *buffer, dma_addr_t len,
+ DMADirection dir, dma_addr_t access_len);
static inline void dma_memory_unmap(DMAContext *dma,
void *buffer, dma_addr_t len,
DMADirection dir, dma_addr_t access_len)
{
- return cpu_physical_memory_unmap(buffer, (target_phys_addr_t)len,
- dir == DMA_DIRECTION_FROM_DEVICE,
- access_len);
+ if (!dma_has_iommu(dma)) {
+ return cpu_physical_memory_unmap(buffer, (target_phys_addr_t)len,
+ dir == DMA_DIRECTION_FROM_DEVICE,
+ access_len);
+ } else {
+ iommu_dma_memory_unmap(dma, buffer, len, dir, access_len);
+ }
}
#define DEFINE_LDST_DMA(_lname, _sname, _bits, _end) \
@@ -130,6 +193,9 @@ DEFINE_LDST_DMA(q, q, 64, be);
#undef DEFINE_LDST_DMA
+void dma_context_init(DMAContext *dma, DMATranslateFunc translate,
+ DMAMapFunc map, DMAUnmapFunc unmap);
+
struct ScatterGatherEntry {
dma_addr_t base;
dma_addr_t len;
diff --git a/hw/qdev-dma.h b/hw/qdev-dma.h
index f0ff558414..6812735e3d 100644
--- a/hw/qdev-dma.h
+++ b/hw/qdev-dma.h
@@ -6,7 +6,5 @@
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
-#include "qdev-addr.h"
-
#define DEFINE_PROP_DMAADDR(_n, _s, _f, _d) \
- DEFINE_PROP_TADDR(_n, _s, _f, _d)
+ DEFINE_PROP_HEX64(_n, _s, _f, _d)