aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc/mm/slb.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm/slb.c')
-rw-r--r--arch/powerpc/mm/slb.c36
1 files changed, 28 insertions, 8 deletions
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index a73d2d70097..ff1811ac6c8 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -74,6 +74,22 @@ static inline void slb_shadow_clear(unsigned long entry)
get_slb_shadow()->save_area[entry].esid = 0;
}
+static inline void create_shadowed_slbe(unsigned long ea, unsigned long flags,
+ unsigned long entry)
+{
+ /*
+ * Updating the shadow buffer before writing the SLB ensures
+ * we don't get a stale entry here if we get preempted by PHYP
+ * between these two statements.
+ */
+ slb_shadow_update(ea, flags, entry);
+
+ asm volatile("slbmte %0,%1" :
+ : "r" (mk_vsid_data(ea, flags)),
+ "r" (mk_esid_data(ea, entry))
+ : "memory" );
+}
+
void slb_flush_and_rebolt(void)
{
/* If you change this make sure you change SLB_NUM_BOLTED
@@ -226,12 +242,16 @@ void slb_initialize(void)
vflags = SLB_VSID_KERNEL | vmalloc_llp;
/* Invalidate the entire SLB (even slot 0) & all the ERATS */
- slb_shadow_update(PAGE_OFFSET, lflags, 0);
- asm volatile("isync; slbia; sync; slbmte %0,%1; isync" ::
- "r" (get_slb_shadow()->save_area[0].vsid),
- "r" (get_slb_shadow()->save_area[0].esid) : "memory");
-
- slb_shadow_update(VMALLOC_START, vflags, 1);
-
- slb_flush_and_rebolt();
+ asm volatile("isync":::"memory");
+ asm volatile("slbmte %0,%0"::"r" (0) : "memory");
+ asm volatile("isync; slbia; isync":::"memory");
+ create_shadowed_slbe(PAGE_OFFSET, lflags, 0);
+
+ create_shadowed_slbe(VMALLOC_START, vflags, 1);
+
+ /* We don't bolt the stack for the time being - we're in boot,
+ * so the stack is in the bolted segment. By the time it goes
+ * elsewhere, we'll call _switch() which will bolt in the new
+ * one. */
+ asm volatile("isync":::"memory");
}