From b849a60e0903b1c5430c3859864554662e127a8d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= Date: Mon, 16 Jan 2012 10:34:31 +0100 Subject: ARM: make cr_alignment read-only #ifndef CONFIG_CPU_CP15 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This makes cr_alignment a constant 0 to break code that tries to modify the value as it's likely that it's built on wrong assumption when CONFIG_CPU_CP15 isn't defined. For code that is only reading the value 0 is more or less a fine value to report. Signed-off-by: Uwe Kleine-König Message-Id: 1358413196-5609-2-git-send-email-u.kleine-koenig@pengutronix.de (v8) --- arch/arm/mm/alignment.c | 2 ++ arch/arm/mm/mmu.c | 17 +++++++++++++++++ 2 files changed, 19 insertions(+) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index b820edaf318..feeb3eaccb1 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c @@ -964,12 +964,14 @@ static int __init alignment_init(void) return -ENOMEM; #endif +#ifdef CONFIG_CPU_CP15 if (cpu_is_v6_unaligned()) { cr_alignment &= ~CR_A; cr_no_alignment &= ~CR_A; set_cr(cr_alignment); ai_usermode = safe_usermode(ai_usermode, false); } +#endif hook_fault_code(FAULT_CODE_ALIGNMENT, do_alignment, SIGBUS, BUS_ADRALN, "alignment exception"); diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index ce328c7f5c9..7c347bcc942 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -97,6 +97,7 @@ static struct cachepolicy cache_policies[] __initdata = { } }; +#ifdef CONFIG_CPU_CP15 /* * These are useful for identifying cache coherency * problems by allowing the cache or the cache and @@ -195,6 +196,22 @@ void adjust_cr(unsigned long mask, unsigned long set) } #endif +#else /* ifdef CONFIG_CPU_CP15 */ + +static int __init early_cachepolicy(char *p) +{ + pr_warning("cachepolicy kernel parameter not supported without cp15\n"); +} +early_param("cachepolicy", early_cachepolicy); + +static int __init noalign_setup(char *__unused) +{ + pr_warning("noalign kernel parameter not supported without cp15\n"); +} +__setup("noalign", noalign_setup); + +#endif /* ifdef CONFIG_CPU_CP15 / else */ + #define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE -- cgit v1.2.3 From bc7dea00a9a0ea385bbc4aed67dfdcf7d8879953 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= Date: Fri, 9 Dec 2011 20:52:10 +0100 Subject: ARM: let CPUs not being able to run in ARM mode enter in THUMB mode MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Some ARM cores are not capable to run in ARM mode (e.g. Cortex-M3). So obviously these cannot enter the kernel in ARM mode. Make an exception for them and let them enter in THUMB mode. Signed-off-by: Uwe Kleine-König Message-Id: 1358162123-30113-1-git-send-email-u.kleine-koenig@pengutronix.de Acked-by: Nicolas Pitre --- arch/arm/mm/Kconfig | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 3fd629d5a51..8defd638c79 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -397,6 +397,13 @@ config CPU_V7 select CPU_PABRT_V7 select CPU_TLB_V7 if MMU +config CPU_THUMBONLY + bool + # There are no CPUs available with MMU that don't implement an ARM ISA: + depends on !MMU + help + Select this if your CPU doesn't support the 32 bit ARM instructions. + # Figure out what processor architecture version we should be using. # This defines the compiler instruction set which depends on the machine type. config CPU_32v3 @@ -608,7 +615,7 @@ config ARCH_DMA_ADDR_T_64BIT bool config ARM_THUMB - bool "Support Thumb user binaries" + bool "Support Thumb user binaries" if !CPU_THUMBONLY depends on CPU_ARM720T || CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_V6 || CPU_V6K || CPU_V7 || CPU_FEROCEON default y help -- cgit v1.2.3 From ae8a8b9553bd3906af74ff4e8d763904d20ab4e5 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 3 Apr 2013 17:16:57 +0100 Subject: ARM: 7691/1: mm: kill unused TLB_CAN_READ_FROM_L1_CACHE and use ALT_SMP instead Many ARMv7 cores have hardware page table walkers that can read the L1 cache. This is discoverable from the ID_MMFR3 register, although this can be expensive to access from the low-level set_pte functions and is a pain to cache, particularly with multi-cluster systems. A useful observation is that the multi-processing extensions for ARMv7 require coherent table walks, meaning that we can make use of ALT_SMP patching in proc-v7-* to patch away the cache flush safely for these cores. Reported-by: Albin Tonnerre Reviewed-by: Catalin Marinas Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/mm/proc-v6.S | 2 -- arch/arm/mm/proc-v7-2level.S | 3 ++- arch/arm/mm/proc-v7-3level.S | 3 ++- arch/arm/mm/proc-v7.S | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S index bcaaa8de932..a286d4712b5 100644 --- a/arch/arm/mm/proc-v6.S +++ b/arch/arm/mm/proc-v6.S @@ -80,12 +80,10 @@ ENTRY(cpu_v6_do_idle) mov pc, lr ENTRY(cpu_v6_dcache_clean_area) -#ifndef TLB_CAN_READ_FROM_L1_CACHE 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #D_CACHE_LINE_SIZE subs r1, r1, #D_CACHE_LINE_SIZE bhi 1b -#endif mov pc, lr /* diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S index 78f520bc0e9..9704097c450 100644 --- a/arch/arm/mm/proc-v7-2level.S +++ b/arch/arm/mm/proc-v7-2level.S @@ -110,7 +110,8 @@ ENTRY(cpu_v7_set_pte_ext) ARM( str r3, [r0, #2048]! ) THUMB( add r0, r0, #2048 ) THUMB( str r3, [r0] ) - mcr p15, 0, r0, c7, c10, 1 @ flush_pte + ALT_SMP(mov pc,lr) + ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte #endif mov pc, lr ENDPROC(cpu_v7_set_pte_ext) diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S index 6ffd78c0f9a..363027e811d 100644 --- a/arch/arm/mm/proc-v7-3level.S +++ b/arch/arm/mm/proc-v7-3level.S @@ -73,7 +73,8 @@ ENTRY(cpu_v7_set_pte_ext) tst r3, #1 << (55 - 32) @ L_PTE_DIRTY orreq r2, #L_PTE_RDONLY 1: strd r2, r3, [r0] - mcr p15, 0, r0, c7, c10, 1 @ flush_pte + ALT_SMP(mov pc, lr) + ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte #endif mov pc, lr ENDPROC(cpu_v7_set_pte_ext) diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 3a3c015f8d5..37716b0508e 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -75,14 +75,14 @@ ENTRY(cpu_v7_do_idle) ENDPROC(cpu_v7_do_idle) ENTRY(cpu_v7_dcache_clean_area) -#ifndef TLB_CAN_READ_FROM_L1_CACHE + ALT_SMP(mov pc, lr) @ MP extensions imply L1 PTW + ALT_UP(W(nop)) dcache_line_size r2, r3 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, r2 subs r1, r1, r2 bhi 1b dsb -#endif mov pc, lr ENDPROC(cpu_v7_dcache_clean_area) -- cgit v1.2.3 From dd0f67f4747797f36f0c6bab7fed6a1f2448476d Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Fri, 5 Apr 2013 03:16:14 +0100 Subject: ARM: 7693/1: mm: clean-up in order to reduce to call kmap_high_get() In kmap_atomic(), kmap_high_get() is invoked for checking already mapped area. In __flush_dcache_page() and dma_cache_maint_page(), we explicitly call kmap_high_get() before kmap_atomic() when cache_is_vipt(), so kmap_high_get() can be invoked twice. This is useless operation, so remove one. v2: change cache_is_vipt() to cache_is_vipt_nonaliasing() in order to be self-documented Acked-by: Nicolas Pitre Signed-off-by: Joonsoo Kim Signed-off-by: Russell King --- arch/arm/mm/dma-mapping.c | 15 ++++++++------- arch/arm/mm/flush.c | 15 +++++++++------ 2 files changed, 17 insertions(+), 13 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index c7e3759f16d..b47dd48d863 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -822,16 +822,17 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset, if (PageHighMem(page)) { if (len + offset > PAGE_SIZE) len = PAGE_SIZE - offset; - vaddr = kmap_high_get(page); - if (vaddr) { - vaddr += offset; - op(vaddr, len, dir); - kunmap_high(page); - } else if (cache_is_vipt()) { - /* unmapped pages might still be cached */ + + if (cache_is_vipt_nonaliasing()) { vaddr = kmap_atomic(page); op(vaddr + offset, len, dir); kunmap_atomic(vaddr); + } else { + vaddr = kmap_high_get(page); + if (vaddr) { + op(vaddr + offset, len, dir); + kunmap_high(page); + } } } else { vaddr = page_address(page) + offset; diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index 1c8f7f56417..0d473cce501 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c @@ -170,15 +170,18 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page) if (!PageHighMem(page)) { __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE); } else { - void *addr = kmap_high_get(page); - if (addr) { - __cpuc_flush_dcache_area(addr, PAGE_SIZE); - kunmap_high(page); - } else if (cache_is_vipt()) { - /* unmapped pages might still be cached */ + void *addr; + + if (cache_is_vipt_nonaliasing()) { addr = kmap_atomic(page); __cpuc_flush_dcache_area(addr, PAGE_SIZE); kunmap_atomic(addr); + } else { + addr = kmap_high_get(page); + if (addr) { + __cpuc_flush_dcache_area(addr, PAGE_SIZE); + kunmap_high(page); + } } } -- cgit v1.2.3 From b361d61dc1aaa9bdac0a0995e443c12146d916fd Mon Sep 17 00:00:00 2001 From: Gregory CLEMENT Date: Tue, 9 Apr 2013 13:37:20 +0100 Subject: ARM: 7695/1: mvebu: Enable pj4b on LPAE compilations pj4b cpus are LPAE capable so enable them on LPAE compilations Signed-off-by: Lior Amsalem Tested-by: Franklin Signed-off-by: Gregory CLEMENT Signed-off-by: Russell King --- arch/arm/mm/proc-v7.S | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 37716b0508e..4fa28acaf7f 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -402,6 +402,8 @@ __v7_ca9mp_proc_info: __v7_proc __v7_ca9mp_setup .size __v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info +#endif /* CONFIG_ARM_LPAE */ + /* * Marvell PJ4B processor. */ @@ -411,7 +413,6 @@ __v7_pj4b_proc_info: .long 0xfffffff0 __v7_proc __v7_pj4b_setup .size __v7_pj4b_proc_info, . - __v7_pj4b_proc_info -#endif /* CONFIG_ARM_LPAE */ /* * ARM Ltd. Cortex A7 processor. -- cgit v1.2.3