aboutsummaryrefslogtreecommitdiff
path: root/arch/arm64/include/asm/tlbflush.h
diff options
context:
space:
mode:
authorMark Brown <broonie@kernel.org>2015-01-24 11:54:15 +0000
committerMark Brown <broonie@kernel.org>2015-01-24 11:54:15 +0000
commit93510b46df6de7db4ca3f116ac1386b24d120566 (patch)
tree8fb61f128c11c3208721db79204fb3e170c4beb6 /arch/arm64/include/asm/tlbflush.h
parenteb72e852c75d21ed6781027f0e3976bb7bd29f08 (diff)
parent15f82fce54b86e159fe5a1d41dcdc89e12b26459 (diff)
Merge branch 'linux-linaro-lsk' into linux-linaro-lsk-rtlsk-v3.10-rt-15.01
Diffstat (limited to 'arch/arm64/include/asm/tlbflush.h')
-rw-r--r--arch/arm64/include/asm/tlbflush.h29
1 files changed, 26 insertions, 3 deletions
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index 3796ea6bb734..73f0ce570fb3 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -98,8 +98,8 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
dsb(ish);
}
-static inline void flush_tlb_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
+static inline void __flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
{
unsigned long asid = (unsigned long)ASID(vma->vm_mm) << 48;
unsigned long addr;
@@ -112,7 +112,7 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
dsb(ish);
}
-static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+static inline void __flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
unsigned long addr;
start >>= 12;
@@ -126,6 +126,29 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end
}
/*
+ * This is meant to avoid soft lock-ups on large TLB flushing ranges and not
+ * necessarily a performance improvement.
+ */
+#define MAX_TLB_RANGE (1024UL << PAGE_SHIFT)
+
+static inline void flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ if ((end - start) <= MAX_TLB_RANGE)
+ __flush_tlb_range(vma, start, end);
+ else
+ flush_tlb_mm(vma->vm_mm);
+}
+
+static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ if ((end - start) <= MAX_TLB_RANGE)
+ __flush_tlb_kernel_range(start, end);
+ else
+ flush_tlb_all();
+}
+
+/*
* On AArch64, the cache coherency is handled via the set_pte_at() function.
*/
static inline void update_mmu_cache(struct vm_area_struct *vma,