diff options
author | Sebastian Andrzej Siewior <bigeasy@linutronix.de> | 2013-03-11 17:08:49 +0100 |
---|---|---|
committer | Steven Rostedt <rostedt@rostedt.homelinux.com> | 2013-06-06 21:28:32 -0400 |
commit | 97f2278eb517838422a0a57a48e5c244a05c0d69 (patch) | |
tree | b8ea52b0ef12efb70d8f4f43f6569396232750d7 | |
parent | e7850f464e656cf3dc6fd3b8bd30069a4ba7c279 (diff) |
x86/highmem: close race between clear/set ptes
If the task is interrupted during a kmap_atomic() / kunmap_atomic() (or
the same code in kmap_atomic_prot_pfn() and its counter part) it may
race against switch_kmaps() and trigger a false positive warning.
In kmap_atomic_prot() we first grab a new index via
kmap_atomic_idx_push() and then check if the slot is already in use.
If we get interrupted after taking the index then switch_kmaps() will
assume that the index is in use and write the old entry from the
kmap_pte member. Since __kunmap_atomic() never invalidates this member
it might write an old entry and now it looks like this entry is already
in use and a WARN_ON() is seen.
This patch sets the shadow pte entry to 0 so pte_none() doesn't trigger
a warning.
While here, I add a BUG_ON() to kmap_atomic_idx_push() which is also in
the non-RT case.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-rw-r--r-- | arch/x86/kernel/process_32.c | 3 | ||||
-rw-r--r-- | arch/x86/mm/highmem_32.c | 3 | ||||
-rw-r--r-- | arch/x86/mm/iomap_32.c | 3 | ||||
-rw-r--r-- | include/linux/highmem.h | 4 |
4 files changed, 11 insertions, 2 deletions
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 33e5d1477b8b..ebcee602aae9 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -218,7 +218,8 @@ static void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) for (i = 0; i < next_p->kmap_idx; i++) { int idx = i + KM_TYPE_NR * smp_processor_id(); - set_pte(kmap_pte - idx, next_p->kmap_pte[i]); + if (!pte_none(next_p->kmap_pte[i])) + set_pte(kmap_pte - idx, next_p->kmap_pte[i]); } } #else diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c index 09357898881c..6e5ac8bdd847 100644 --- a/arch/x86/mm/highmem_32.c +++ b/arch/x86/mm/highmem_32.c @@ -91,6 +91,9 @@ void __kunmap_atomic(void *kvaddr) * is a bad idea also, in case the page changes cacheability * attributes or becomes a protected page in a hypervisor. */ +#ifdef CONFIG_PREEMPT_RT_FULL + current->kmap_pte[type] = __pte(0); +#endif kpte_clear_flush(kmap_pte-idx, vaddr); kmap_atomic_idx_pop(); arch_flush_lazy_mmu_mode(); diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c index 4e1d4d585948..0c953e399b96 100644 --- a/arch/x86/mm/iomap_32.c +++ b/arch/x86/mm/iomap_32.c @@ -114,6 +114,9 @@ iounmap_atomic(void __iomem *kvaddr) * is a bad idea also, in case the page changes cacheability * attributes or becomes a protected page in a hypervisor. */ +#ifdef CONFIG_PREEMPT_RT_FULL + current->kmap_pte[type] = __pte(0); +#endif kpte_clear_flush(kmap_pte-idx, vaddr); kmap_atomic_idx_pop(); } diff --git a/include/linux/highmem.h b/include/linux/highmem.h index cac451548fc1..4ec3d97fdfda 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -100,7 +100,9 @@ static inline int kmap_atomic_idx_push(void) # endif return idx; #else - return current->kmap_idx++; + current->kmap_idx++; + BUG_ON(current->kmap_idx > KM_TYPE_NR); + return current->kmap_idx - 1; #endif } |