aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--xen/include/asm-arm/arm32/page.h3
-rw-r--r--xen/include/asm-arm/arm64/page.h3
-rw-r--r--xen/include/asm-arm/page.h32
3 files changed, 38 insertions, 0 deletions
diff --git a/xen/include/asm-arm/arm32/page.h b/xen/include/asm-arm/arm32/page.h
index 20a6a7f94f..a07e2170d9 100644
--- a/xen/include/asm-arm/arm32/page.h
+++ b/xen/include/asm-arm/arm32/page.h
@@ -19,6 +19,9 @@ static inline void write_pte(lpae_t *p, lpae_t pte)
: : "r" (pte.bits), "r" (p) : "memory");
}
+/* Inline ASM to invalidate dcache on register R (may be an inline asm operand) */
+#define __invalidate_dcache_one(R) STORE_CP32(R, DCIMVAC)
+
/* Inline ASM to flush dcache on register R (may be an inline asm operand) */
#define __clean_dcache_one(R) STORE_CP32(R, DCCMVAC)
diff --git a/xen/include/asm-arm/arm64/page.h b/xen/include/asm-arm/arm64/page.h
index 101d7a8c47..1fd416d4b2 100644
--- a/xen/include/asm-arm/arm64/page.h
+++ b/xen/include/asm-arm/arm64/page.h
@@ -14,6 +14,9 @@ static inline void write_pte(lpae_t *p, lpae_t pte)
: : "r" (pte.bits), "r" (p) : "memory");
}
+/* Inline ASM to invalidate dcache on register R (may be an inline asm operand) */
+#define __invalidate_dcache_one(R) "dc ivac, %" #R ";"
+
/* Inline ASM to flush dcache on register R (may be an inline asm operand) */
#define __clean_dcache_one(R) "dc cvac, %" #R ";"
diff --git a/xen/include/asm-arm/page.h b/xen/include/asm-arm/page.h
index 69e9a61ab7..53d4b6391f 100644
--- a/xen/include/asm-arm/page.h
+++ b/xen/include/asm-arm/page.h
@@ -268,6 +268,38 @@ extern size_t cacheline_bytes;
/* Functions for flushing medium-sized areas.
* if 'range' is large enough we might want to use model-specific
* full-cache flushes. */
+
+static inline int invalidate_dcache_va_range(const void *p, unsigned long size)
+{
+ size_t off;
+ const void *end = p + size;
+
+ dsb(sy); /* So the CPU issues all writes to the range */
+
+ off = (unsigned long)p % cacheline_bytes;
+ if ( off )
+ {
+ p -= off;
+ asm volatile (__clean_and_invalidate_dcache_one(0) : : "r" (p));
+ p += cacheline_bytes;
+ size -= cacheline_bytes - off;
+ }
+ off = (unsigned long)end % cacheline_bytes;
+ if ( off )
+ {
+ end -= off;
+ size -= off;
+ asm volatile (__clean_and_invalidate_dcache_one(0) : : "r" (end));
+ }
+
+ for ( ; p < end; p += cacheline_bytes )
+ asm volatile (__invalidate_dcache_one(0) : : "r" (p));
+
+ dsb(sy); /* So we know the flushes happen before continuing */
+
+ return 0;
+}
+
static inline int clean_dcache_va_range(const void *p, unsigned long size)
{
const void *end;