aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2007-10-31 16:42:19 +1100
committerJosh Boyer <jwboyer@linux.vnet.ibm.com>2007-11-01 07:15:30 -0500
commitb98ac05d5e460301fbea24cceed0f2a601c82e22 (patch)
tree2e556ad28a007d13339300fbbd4942d0ec9f023c
parente701d269aa28996f3502780951fe1b12d5d66b49 (diff)
[POWERPC] 4xx: Deal with 44x virtually tagged icache
The 44x family has an interesting "feature" which is a virtually tagged instruction cache (yuck !). So far, we haven't dealt with it properly, which means we've been mostly lucky or people didn't report the problems, unless people have been running custom patches in their distro... This is an attempt at fixing it properly. I chose to do it by setting a global flag whenever we change a PTE that was previously marked executable, and flush the entire instruction cache upon return to user space when that happens. This is a bit heavy handed, but it's hard to do more fine grained flushes as the icbi instruction, on those processor, for some very strange reasons (since the cache is virtually mapped) still requires a valid TLB entry for reading in the target address space, which isn't something I want to deal with. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Josh Boyer <jwboyer@linux.vnet.ibm.com>
-rw-r--r--arch/powerpc/kernel/entry_32.S23
-rw-r--r--arch/powerpc/kernel/misc_32.S9
-rw-r--r--arch/powerpc/mm/44x_mmu.c1
-rw-r--r--arch/ppc/kernel/entry.S23
-rw-r--r--arch/ppc/kernel/misc.S9
-rw-r--r--arch/ppc/mm/44x_mmu.c1
-rw-r--r--include/asm-powerpc/pgtable-ppc32.h13
7 files changed, 79 insertions, 0 deletions
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 21d889e63e8..a7572cf464b 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -244,6 +244,13 @@ syscall_exit_cont:
andis. r10,r0,DBCR0_IC@h
bnel- load_dbcr0
#endif
+#ifdef CONFIG_44x
+ lis r4,icache_44x_need_flush@ha
+ lwz r5,icache_44x_need_flush@l(r4)
+ cmplwi cr0,r5,0
+ bne- 2f
+1:
+#endif /* CONFIG_44x */
stwcx. r0,0,r1 /* to clear the reservation */
lwz r4,_LINK(r1)
lwz r5,_CCR(r1)
@@ -258,6 +265,12 @@ syscall_exit_cont:
mtspr SPRN_SRR1,r8
SYNC
RFI
+#ifdef CONFIG_44x
+2: li r7,0
+ iccci r0,r0
+ stw r7,icache_44x_need_flush@l(r4)
+ b 1b
+#endif /* CONFIG_44x */
66: li r3,-ENOSYS
b ret_from_syscall
@@ -683,6 +696,16 @@ resume_kernel:
/* interrupts are hard-disabled at this point */
restore:
+#ifdef CONFIG_44x
+ lis r4,icache_44x_need_flush@ha
+ lwz r5,icache_44x_need_flush@l(r4)
+ cmplwi cr0,r5,0
+ beq+ 1f
+ li r6,0
+ iccci r0,r0
+ stw r6,icache_44x_need_flush@l(r4)
+1:
+#endif /* CONFIG_44x */
lwz r0,GPR0(r1)
lwz r2,GPR2(r1)
REST_4GPRS(3, r1)
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 0ed2c7eddc9..8b642ab26d3 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -543,12 +543,21 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
addi r3,r3,L1_CACHE_BYTES
bdnz 0b
sync
+#ifndef CONFIG_44x
+ /* We don't flush the icache on 44x. Those have a virtual icache
+ * and we don't have access to the virtual address here (it's
+ * not the page vaddr but where it's mapped in user space). The
+ * flushing of the icache on these is handled elsewhere, when
+ * a change in the address space occurs, before returning to
+ * user space
+ */
mtctr r4
1: icbi 0,r6
addi r6,r6,L1_CACHE_BYTES
bdnz 1b
sync
isync
+#endif /* CONFIG_44x */
blr
/*
diff --git a/arch/powerpc/mm/44x_mmu.c b/arch/powerpc/mm/44x_mmu.c
index c3df5047653..04dc08798d3 100644
--- a/arch/powerpc/mm/44x_mmu.c
+++ b/arch/powerpc/mm/44x_mmu.c
@@ -35,6 +35,7 @@
*/
unsigned int tlb_44x_index; /* = 0 */
unsigned int tlb_44x_hwater = PPC44x_TLB_SIZE - 1 - PPC44x_EARLY_TLBS;
+int icache_44x_need_flush;
/*
* "Pins" a 256MB TLB entry in AS0 for kernel lowmem
diff --git a/arch/ppc/kernel/entry.S b/arch/ppc/kernel/entry.S
index fba7ca17a67..b19bfef2034 100644
--- a/arch/ppc/kernel/entry.S
+++ b/arch/ppc/kernel/entry.S
@@ -244,6 +244,13 @@ syscall_exit_cont:
andis. r10,r0,DBCR0_IC@h
bnel- load_dbcr0
#endif
+#ifdef CONFIG_44x
+ lis r4,icache_44x_need_flush@ha
+ lwz r5,icache_44x_need_flush@l(r4)
+ cmplwi cr0,r5,0
+ bne- 2f
+1:
+#endif /* CONFIG_44x */
stwcx. r0,0,r1 /* to clear the reservation */
lwz r4,_LINK(r1)
lwz r5,_CCR(r1)
@@ -258,6 +265,12 @@ syscall_exit_cont:
mtspr SPRN_SRR1,r8
SYNC
RFI
+#ifdef CONFIG_44x
+2: li r7,0
+ iccci r0,r0
+ stw r7,icache_44x_need_flush@l(r4)
+ b 1b
+#endif /* CONFIG_44x */
66: li r3,-ENOSYS
b ret_from_syscall
@@ -679,6 +692,16 @@ resume_kernel:
/* interrupts are hard-disabled at this point */
restore:
+#ifdef CONFIG_44x
+ lis r4,icache_44x_need_flush@ha
+ lwz r5,icache_44x_need_flush@l(r4)
+ cmplwi cr0,r5,0
+ beq+ 1f
+ li r6,0
+ iccci r0,r0
+ stw r6,icache_44x_need_flush@l(r4)
+1:
+#endif /* CONFIG_44x */
lwz r0,GPR0(r1)
lwz r2,GPR2(r1)
REST_4GPRS(3, r1)
diff --git a/arch/ppc/kernel/misc.S b/arch/ppc/kernel/misc.S
index 2b81e71d6b2..e0c850d85c5 100644
--- a/arch/ppc/kernel/misc.S
+++ b/arch/ppc/kernel/misc.S
@@ -499,12 +499,21 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
addi r3,r3,L1_CACHE_BYTES
bdnz 0b
sync
+#ifndef CONFIG_44x
+ /* We don't flush the icache on 44x. Those have a virtual icache
+ * and we don't have access to the virtual address here (it's
+ * not the page vaddr but where it's mapped in user space). The
+ * flushing of the icache on these is handled elsewhere, when
+ * a change in the address space occurs, before returning to
+ * user space
+ */
mtctr r4
1: icbi 0,r6
addi r6,r6,L1_CACHE_BYTES
bdnz 1b
sync
isync
+#endif /* CONFIG_44x */
blr
/*
diff --git a/arch/ppc/mm/44x_mmu.c b/arch/ppc/mm/44x_mmu.c
index 0a0a0487b33..6536a25cfcb 100644
--- a/arch/ppc/mm/44x_mmu.c
+++ b/arch/ppc/mm/44x_mmu.c
@@ -61,6 +61,7 @@ extern char etext[], _stext[];
*/
unsigned int tlb_44x_index = 0;
unsigned int tlb_44x_hwater = 62;
+int icache_44x_need_flush;
/*
* "Pins" a 256MB TLB entry in AS0 for kernel lowmem
diff --git a/include/asm-powerpc/pgtable-ppc32.h b/include/asm-powerpc/pgtable-ppc32.h
index 86a54a4a8a2..fea2d8ff1e7 100644
--- a/include/asm-powerpc/pgtable-ppc32.h
+++ b/include/asm-powerpc/pgtable-ppc32.h
@@ -11,6 +11,11 @@
extern unsigned long va_to_phys(unsigned long address);
extern pte_t *va_to_pte(unsigned long address);
extern unsigned long ioremap_bot, ioremap_base;
+
+#ifdef CONFIG_44x
+extern int icache_44x_need_flush;
+#endif
+
#endif /* __ASSEMBLY__ */
/*
@@ -562,6 +567,10 @@ static inline unsigned long pte_update(pte_t *p, unsigned long clr,
: "=&r" (old), "=&r" (tmp), "=m" (*p)
: "r" (p), "r" (clr), "r" (set), "m" (*p)
: "cc" );
+#ifdef CONFIG_44x
+ if ((old & _PAGE_USER) && (old & _PAGE_HWEXEC))
+ icache_44x_need_flush = 1;
+#endif
return old;
}
#else
@@ -582,6 +591,10 @@ static inline unsigned long long pte_update(pte_t *p, unsigned long clr,
: "=&r" (old), "=&r" (tmp), "=m" (*p)
: "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p)
: "cc" );
+#ifdef CONFIG_44x
+ if ((old & _PAGE_USER) && (old & _PAGE_HWEXEC))
+ icache_44x_need_flush = 1;
+#endif
return old;
}
#endif