aboutsummaryrefslogtreecommitdiff
path: root/include/asm-ia64
diff options
context:
space:
mode:
authorDavid Mosberger-Tang <David.Mosberger@acm.org>2005-07-25 22:23:00 -0700
committerTony Luck <tony.luck@intel.com>2005-08-12 15:05:21 -0700
commitbadea125d7cbd93f1678a95cf009b3bdfe6065cd (patch)
treec9cd47cfc5f7474fdf60735548734e647a4f7a9d /include/asm-ia64
parent7d69fa6266770eeb6317eddd46b64456e8a515bf (diff)
[IA64] Fix race in mm-context wrap-around logic.
The patch below should fix a race which could cause stale TLB entries. Specifically, when 2 CPUs ended up racing for entrance to wrap_mmu_context(). The losing CPU would find that by the time it acquired ctx.lock, mm->context already had a valid value, but then it failed to (re-)check the delayed TLB flushing logic and hence could end up using a context number when there were still stale entries in its TLB. The fix is to check for delayed TLB flushes only after mm->context is valid (non-zero). The patch also makes GCC v4.x happier by defining a non-volatile variant of mm_context_t called nv_mm_context_t. Signed-off-by: David Mosberger-Tang <David.Mosberger@acm.org> Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'include/asm-ia64')
-rw-r--r--include/asm-ia64/mmu.h8
-rw-r--r--include/asm-ia64/mmu_context.h54
2 files changed, 37 insertions, 25 deletions
diff --git a/include/asm-ia64/mmu.h b/include/asm-ia64/mmu.h
index ae1525352a2..611432ba579 100644
--- a/include/asm-ia64/mmu.h
+++ b/include/asm-ia64/mmu.h
@@ -2,10 +2,12 @@
#define __MMU_H
/*
- * Type for a context number. We declare it volatile to ensure proper ordering when it's
- * accessed outside of spinlock'd critical sections (e.g., as done in activate_mm() and
- * init_new_context()).
+ * Type for a context number. We declare it volatile to ensure proper
+ * ordering when it's accessed outside of spinlock'd critical sections
+ * (e.g., as done in activate_mm() and init_new_context()).
*/
typedef volatile unsigned long mm_context_t;
+typedef unsigned long nv_mm_context_t;
+
#endif
diff --git a/include/asm-ia64/mmu_context.h b/include/asm-ia64/mmu_context.h
index e3e5fededb0..0680d163be9 100644
--- a/include/asm-ia64/mmu_context.h
+++ b/include/asm-ia64/mmu_context.h
@@ -55,34 +55,46 @@ static inline void
delayed_tlb_flush (void)
{
extern void local_flush_tlb_all (void);
+ unsigned long flags;
if (unlikely(__ia64_per_cpu_var(ia64_need_tlb_flush))) {
- local_flush_tlb_all();
- __ia64_per_cpu_var(ia64_need_tlb_flush) = 0;
+ spin_lock_irqsave(&ia64_ctx.lock, flags);
+ {
+ if (__ia64_per_cpu_var(ia64_need_tlb_flush)) {
+ local_flush_tlb_all();
+ __ia64_per_cpu_var(ia64_need_tlb_flush) = 0;
+ }
+ }
+ spin_unlock_irqrestore(&ia64_ctx.lock, flags);
}
}
-static inline mm_context_t
+static inline nv_mm_context_t
get_mmu_context (struct mm_struct *mm)
{
unsigned long flags;
- mm_context_t context = mm->context;
-
- if (context)
- return context;
-
- spin_lock_irqsave(&ia64_ctx.lock, flags);
- {
- /* re-check, now that we've got the lock: */
- context = mm->context;
- if (context == 0) {
- cpus_clear(mm->cpu_vm_mask);
- if (ia64_ctx.next >= ia64_ctx.limit)
- wrap_mmu_context(mm);
- mm->context = context = ia64_ctx.next++;
+ nv_mm_context_t context = mm->context;
+
+ if (unlikely(!context)) {
+ spin_lock_irqsave(&ia64_ctx.lock, flags);
+ {
+ /* re-check, now that we've got the lock: */
+ context = mm->context;
+ if (context == 0) {
+ cpus_clear(mm->cpu_vm_mask);
+ if (ia64_ctx.next >= ia64_ctx.limit)
+ wrap_mmu_context(mm);
+ mm->context = context = ia64_ctx.next++;
+ }
}
+ spin_unlock_irqrestore(&ia64_ctx.lock, flags);
}
- spin_unlock_irqrestore(&ia64_ctx.lock, flags);
+ /*
+ * Ensure we're not starting to use "context" before any old
+ * uses of it are gone from our TLB.
+ */
+ delayed_tlb_flush();
+
return context;
}
@@ -104,7 +116,7 @@ destroy_context (struct mm_struct *mm)
}
static inline void
-reload_context (mm_context_t context)
+reload_context (nv_mm_context_t context)
{
unsigned long rid;
unsigned long rid_incr = 0;
@@ -138,7 +150,7 @@ reload_context (mm_context_t context)
static inline void
activate_context (struct mm_struct *mm)
{
- mm_context_t context;
+ nv_mm_context_t context;
do {
context = get_mmu_context(mm);
@@ -157,8 +169,6 @@ activate_context (struct mm_struct *mm)
static inline void
activate_mm (struct mm_struct *prev, struct mm_struct *next)
{
- delayed_tlb_flush();
-
/*
* We may get interrupts here, but that's OK because interrupt handlers cannot
* touch user-space.