From 4a40d59021b6b895e3f19e53a5dfee9fefca7855 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 19 Oct 2012 17:53:01 +0100 Subject: ARM: 7559/1: smp: switch away from the idmap before updating init_mm.mm_count commit 5f40b909728ad784eb43aa309d3c4e9bdf050781 upstream. When booting a secondary CPU, the primary CPU hands two sets of page tables via the secondary_data struct: (1) swapper_pg_dir: a normal, cacheable, shared (if SMP) mapping of the kernel image (i.e. the tables used by init_mm). (2) idmap_pgd: an uncached mapping of the .idmap.text ELF section. The idmap is generally used when enabling and disabling the MMU, which includes early CPU boot. In this case, the secondary CPU switches to swapper as soon as it enters C code: struct mm_struct *mm = &init_mm; unsigned int cpu = smp_processor_id(); /* * All kernel threads share the same mm context; grab a * reference and switch to it. */ atomic_inc(&mm->mm_count); current->active_mm = mm; cpumask_set_cpu(cpu, mm_cpumask(mm)); cpu_switch_mm(mm->pgd, mm); This causes a problem on ARMv7, where the identity mapping is treated as strongly-ordered leading to architecturally UNPREDICTABLE behaviour of exclusive accesses, such as those used by atomic_inc. This patch re-orders the secondary_start_kernel function so that we switch to swapper before performing any exclusive accesses. Cc: David McKay Reported-by: Gilles Chanteperdrix Signed-off-by: Will Deacon Signed-off-by: Russell King Signed-off-by: Ben Hutchings --- arch/arm/kernel/smp.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 1d1710e715c9..bfa0eeb5e7f4 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -295,18 +295,24 @@ static void __cpuinit smp_store_cpu_info(unsigned int cpuid) asmlinkage void __cpuinit secondary_start_kernel(void) { struct mm_struct *mm = &init_mm; - unsigned int cpu = smp_processor_id(); + unsigned int cpu; + + /* + * The identity mapping is uncached (strongly ordered), so + * switch away from it before attempting any exclusive accesses. + */ + cpu_switch_mm(mm->pgd, mm); + enter_lazy_tlb(mm, current); + local_flush_tlb_all(); /* * All kernel threads share the same mm context; grab a * reference and switch to it. */ + cpu = smp_processor_id(); atomic_inc(&mm->mm_count); current->active_mm = mm; cpumask_set_cpu(cpu, mm_cpumask(mm)); - cpu_switch_mm(mm->pgd, mm); - enter_lazy_tlb(mm, current); - local_flush_tlb_all(); printk("CPU%u: Booted secondary processor\n", cpu); -- cgit v1.2.3