From 5a34ac3cf18e9a88e080f0b3cfa4b20c51aa7db1 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 31 May 2011 15:38:43 +0100 Subject: ARM: Use TTBR1 instead of reserved context ID On ARMv7 CPUs that cache first level page table entries (like the Cortex-A15), using a reserved ASID while changing the TTBR or flushing the TLB is unsafe. This is because the CPU may cache the first level entry as the result of a speculative memory access while the reserved ASID is assigned. After the process owning the page tables dies, the memory will be reallocated and may be written with junk values which can be interpreted as global, valid PTEs by the processor. This will result in the TLB being populated with bogus global entries. This patch avoids the use of a reserved context ID in the v7 switch_mm and ASID rollover code by temporarily using the swapper_pg_dir pointed at by TTBR1, which contains only global entries that are not tagged with ASIDs. Reviewed-by: Frank Rowand Tested-by: Marc Zyngier Cc: Russell King Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm/mm/context.c | 45 ++++++++++++++++++++++++++------------------ arch/arm/mm/proc-v7-2level.S | 10 ++++------ 2 files changed, 31 insertions(+), 24 deletions(-) diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index ee9bb363d606..97842a09a6eb 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c @@ -23,25 +23,37 @@ DEFINE_PER_CPU(struct mm_struct *, current_mm); #endif #ifdef CONFIG_ARM_LPAE -#define cpu_set_asid(asid) { \ - unsigned long ttbl, ttbh; \ - asm volatile( \ - " mrrc p15, 0, %0, %1, c2 @ read TTBR0\n" \ - " mov %1, %2, lsl #(48 - 32) @ set ASID\n" \ - " mcrr p15, 0, %0, %1, c2 @ set TTBR0\n" \ - : "=&r" (ttbl), "=&r" (ttbh) \ - : "r" (asid & ~ASID_MASK)); \ +static void cpu_set_reserved_ttbr0(void) +{ + unsigned long ttbl = __pa(swapper_pg_dir); + unsigned long ttbh = 0; + + /* + * Set TTBR0 to swapper_pg_dir. Note that swapper_pg_dir only contains + * global entries so the ASID value is not relevant. + */ + asm volatile( + " mcrr p15, 0, %0, %1, c2 @ set TTBR0\n" + : + : "r" (ttbl), "r" (ttbh)); + isb(); } #else -#define cpu_set_asid(asid) \ - asm(" mcr p15, 0, %0, c13, c0, 1\n" : : "r" (asid)) +static void cpu_set_reserved_ttbr0(void) +{ + u32 ttb; + /* Copy TTBR1 into TTBR0 */ + asm volatile( + " mrc p15, 0, %0, c2, c0, 1 @ read TTBR1\n" + " mcr p15, 0, %0, c2, c0, 0 @ set TTBR0\n" + : "=r" (ttb)); + isb(); +} #endif /* * We fork()ed a process, and we need a new context for the child - * to run in. We reserve version 0 for initial tasks so we will - * always allocate an ASID. The ASID 0 is reserved for the TTBR - * register changing sequence. + * to run in. */ void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) { @@ -51,9 +63,7 @@ void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) static void flush_context(void) { - /* set the reserved ASID before flushing the TLB */ - cpu_set_asid(0); - isb(); + cpu_set_reserved_ttbr0(); local_flush_tlb_all(); if (icache_is_vivt_asid_tagged()) { __flush_icache_all(); @@ -114,8 +124,7 @@ static void reset_context(void *info) set_mm_context(mm, asid); /* set the new ASID */ - cpu_set_asid(mm->context.id); - isb(); + cpu_switch_mm(mm->pgd, mm); } #else diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S index 3a4b3e7b888c..72270482a922 100644 --- a/arch/arm/mm/proc-v7-2level.S +++ b/arch/arm/mm/proc-v7-2level.S @@ -46,18 +46,16 @@ ENTRY(cpu_v7_switch_mm) #ifdef CONFIG_ARM_ERRATA_430973 mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB #endif -#ifdef CONFIG_ARM_ERRATA_754322 - dsb -#endif - mcr p15, 0, r2, c13, c0, 1 @ set reserved context ID - isb -1: mcr p15, 0, r0, c2, c0, 0 @ set TTB 0 + mrc p15, 0, r2, c2, c0, 1 @ load TTB 1 + mcr p15, 0, r2, c2, c0, 0 @ into TTB 0 isb #ifdef CONFIG_ARM_ERRATA_754322 dsb #endif mcr p15, 0, r1, c13, c0, 1 @ set context ID isb + mcr p15, 0, r0, c2, c0, 0 @ set TTB 0 + isb #endif mov pc, lr ENDPROC(cpu_v7_switch_mm) -- cgit v1.2.3 From 35d5dcd371217884374a2be9af99b659f9366479 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 26 May 2011 11:24:25 +0100 Subject: ARM: Allow ASID 0 to be allocated to tasks Now that ASID 0 is no longer used as a reserved value, allow it to be allocated to tasks. Reviewed-by: Frank Rowand Tested-by: Marc Zyngier Cc: Russell King Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm/mm/context.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index 97842a09a6eb..efa413ad3bc1 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c @@ -118,7 +118,7 @@ static void reset_context(void *info) return; smp_rmb(); - asid = cpu_last_asid + cpu + 1; + asid = cpu_last_asid + cpu; flush_context(); set_mm_context(mm, asid); @@ -167,13 +167,13 @@ void __new_context(struct mm_struct *mm) * to start a new version and flush the TLB. */ if (unlikely((asid & ~ASID_MASK) == 0)) { - asid = cpu_last_asid + smp_processor_id() + 1; + asid = cpu_last_asid + smp_processor_id(); flush_context(); #ifdef CONFIG_SMP smp_wmb(); smp_call_function(reset_context, NULL, 1); #endif - cpu_last_asid += NR_CPUS; + cpu_last_asid += NR_CPUS - 1; } set_mm_context(mm, asid); -- cgit v1.2.3 From ffb66d370de41ad36b249cd9621f58d3a27ecf21 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Tue, 14 Feb 2012 12:09:08 +0000 Subject: ARM: Set bit 22 in the PL310 (cache controller) AuxCtlr register Clearing bit 22 in the PL310 Auxiliary Control register (shared attribute override enable) has the side effect of transforming Normal Shared Non-cacheable reads into Cacheable no-allocate reads. Coherent DMA buffers in Linux always have a Cacheable alias via the kernel linear mapping and the processor can speculatively load cache lines into the PL310 controller. With bit 22 cleared, Non-cacheable reads would unexpectedly hit such cache lines leading to buffer corruption. This patch ensures that bit 22 is set in the l2x0_init() function if PL310 and not rely on the platform code to specify it. It also modifies the 'aux' variable only if the actual register is written so that the final printk displays the real hardware value. Signed-off-by: Catalin Marinas Tested-by: Kyungmin Park --- arch/arm/mm/cache-l2x0.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index 2a8e380501e8..81f1dd6280c1 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -319,9 +319,6 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask) cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID); aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); - aux &= aux_mask; - aux |= aux_val; - /* Determine the number of ways */ switch (cache_id & L2X0_CACHE_ID_PART_MASK) { case L2X0_CACHE_ID_PART_L310: @@ -335,6 +332,13 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask) sync_reg_offset = L2X0_DUMMY_REG; #endif outer_cache.set_debug = pl310_set_debug; + + /* + * Set bit 22 in the auxiliary control register. If this bit + * is cleared, PL310 treats Normal Shared Non-cacheable + * accesses as Cacheable no-allocate. + */ + aux_val |= 1 << 22; break; case L2X0_CACHE_ID_PART_L210: ways = (aux >> 13) & 0xf; @@ -365,6 +369,9 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask) /* Make sure that I&D is not locked down when starting */ l2x0_unlock(cache_id); + aux &= aux_mask; + aux |= aux_val; + /* l2x0 controller is disabled */ writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL); -- cgit v1.2.3