From 1baa82f48030f38d1895301f1ec93acbcb3d15db Mon Sep 17 00:00:00 2001 From: Peter Crosthwaite Date: Mon, 2 Mar 2015 19:19:14 +0000 Subject: arm64: Implement cpu_relax as yield ARM64 has the yield nop hint which has the intended semantics of cpu_relax. Implement. The immediate application is ARM CPU emulators. An emulator can take advantage of the yield hint to de-prioritise an emulated CPU in favor of other emulation tasks. QEMU A64 SMP emulation has yield awareness, and sees a significant boot time performance increase with this change. Signed-off-by: Peter Crosthwaite Acked-by: Will Deacon Signed-off-by: Will Deacon --- arch/arm64/include/asm/processor.h | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'arch/arm64/include') diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 20e9591a60cf..d2c37a1df0eb 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -127,7 +127,11 @@ extern void release_thread(struct task_struct *); unsigned long get_wchan(struct task_struct *p); -#define cpu_relax() barrier() +static inline void cpu_relax(void) +{ + asm volatile("yield" ::: "memory"); +} + #define cpu_relax_lowlatency() cpu_relax() /* Thread switching */ -- cgit v1.2.3 From 137650aad96c9594683445e41afa8ac5a2097520 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Fri, 13 Mar 2015 16:14:34 +0000 Subject: arm64: apply alternatives for !SMP kernels Currently we only perform alternative patching for kernels built with CONFIG_SMP, as we call apply_alternatives_all() in smp.c, which is only built for CONFIG_SMP. Thus !SMP kernels may not have necessary alternatives patched in. This patch ensures that we call apply_alternatives_all() once all CPUs are booted, even for !SMP kernels, by having the smp_init_cpus() stub call this for !SMP kernels via up_late_init. A new wrapper, do_post_cpus_up_work, is added so we can hook other calls here later (e.g. boot mode logging). Cc: Andre Przywara Cc: Catalin Marinas Fixes: e039ee4ee3fcf174 ("arm64: add alternative runtime patching") Tested-by: Ard Biesheuvel Reviewed-by: Ard Biesheuvel Signed-off-by: Mark Rutland Signed-off-by: Will Deacon --- arch/arm64/include/asm/smp_plat.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/arm64/include') diff --git a/arch/arm64/include/asm/smp_plat.h b/arch/arm64/include/asm/smp_plat.h index 59e282311b58..8dcd61e32176 100644 --- a/arch/arm64/include/asm/smp_plat.h +++ b/arch/arm64/include/asm/smp_plat.h @@ -40,4 +40,6 @@ static inline u32 mpidr_hash_size(void) extern u64 __cpu_logical_map[NR_CPUS]; #define cpu_logical_map(cpu) __cpu_logical_map[cpu] +void __init do_post_cpus_up_work(void); + #endif /* __ASM_SMP_PLAT_H */ -- cgit v1.2.3 From 18ccb0cab49ef7868eaf9504f257e1a84683dbbd Mon Sep 17 00:00:00 2001 From: Andreas Schwab Date: Mon, 16 Mar 2015 16:32:22 +0000 Subject: arm64: fix implementation of mmap2 compat syscall The arm mmap2 syscall takes the offset in units of 4K, thus with 64K pages the offset needs to be scaled to units of pages. Signed-off-by: Andreas Schwab Signed-off-by: Alexander Graf [will: removed redundant lr parameter, localised PAGE_SHIFT #if check] Signed-off-by: Will Deacon --- arch/arm64/include/asm/unistd32.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm64/include') diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h index 27224426e0bf..cef934a90f17 100644 --- a/arch/arm64/include/asm/unistd32.h +++ b/arch/arm64/include/asm/unistd32.h @@ -406,7 +406,7 @@ __SYSCALL(__NR_vfork, sys_vfork) #define __NR_ugetrlimit 191 /* SuS compliant getrlimit */ __SYSCALL(__NR_ugetrlimit, compat_sys_getrlimit) /* SuS compliant getrlimit */ #define __NR_mmap2 192 -__SYSCALL(__NR_mmap2, sys_mmap_pgoff) +__SYSCALL(__NR_mmap2, compat_sys_mmap2_wrapper) #define __NR_truncate64 193 __SYSCALL(__NR_truncate64, compat_sys_truncate64_wrapper) #define __NR_ftruncate64 194 -- cgit v1.2.3 From 19fc577579c86fec6e2523baeb457b02e939796f Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 4 Mar 2015 13:27:34 +0000 Subject: arm64: fixmap: make FIX_TEXT_POKE0 permanent The FIX_TEXT_POKE0 is currently at the end of the temporary fixmap slots, despite the fact that it can be used at any point during runtime (e.g. for poking the text of loaded modules), and thus should be a permanent fixmap slot (as is the case on arm and x86). This patch moves FIX_TEXT_POKE0 into the set of permanent fixmap slots. Cc: Catalin Marinas Cc: Kees Cook Acked-by: Ard Biesheuvel Acked-by: Laura Abbott Signed-off-by: Mark Rutland Signed-off-by: Will Deacon --- arch/arm64/include/asm/fixmap.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm64/include') diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h index defa0ff98250..926495686554 100644 --- a/arch/arm64/include/asm/fixmap.h +++ b/arch/arm64/include/asm/fixmap.h @@ -33,6 +33,7 @@ enum fixed_addresses { FIX_HOLE, FIX_EARLYCON_MEM_BASE, + FIX_TEXT_POKE0, __end_of_permanent_fixed_addresses, /* @@ -49,7 +50,6 @@ enum fixed_addresses { FIX_BTMAP_END = __end_of_permanent_fixed_addresses, FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1, - FIX_TEXT_POKE0, __end_of_fixed_addresses }; -- cgit v1.2.3 From a591ede4cd1cac02d3398a9ad332bd0bba460efe Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 18 Mar 2015 14:55:20 +0000 Subject: arm64: Get rid of struct cpu_table struct cpu_table is an artifact left from the (very) early days of the arm64 port, and its only real use is to allow the most beautiful "AArch64 Processor" string to be displayed at boot time. Really? Yes, really. Let's get rid of it. In order to avoid another BogoMips-gate, the aforementioned string is preserved. Acked-by: Mark Rutland Acked-by: Catalin Marinas Acked-by: Ard Biesheuvel Signed-off-by: Marc Zyngier Signed-off-by: Ard Biesheuvel Signed-off-by: Will Deacon --- arch/arm64/include/asm/cputable.h | 30 ------------------------------ 1 file changed, 30 deletions(-) delete mode 100644 arch/arm64/include/asm/cputable.h (limited to 'arch/arm64/include') diff --git a/arch/arm64/include/asm/cputable.h b/arch/arm64/include/asm/cputable.h deleted file mode 100644 index e3bd983d3661..000000000000 --- a/arch/arm64/include/asm/cputable.h +++ /dev/null @@ -1,30 +0,0 @@ -/* - * arch/arm64/include/asm/cputable.h - * - * Copyright (C) 2012 ARM Ltd. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ -#ifndef __ASM_CPUTABLE_H -#define __ASM_CPUTABLE_H - -struct cpu_info { - unsigned int cpu_id_val; - unsigned int cpu_id_mask; - const char *cpu_name; - unsigned long (*cpu_setup)(void); -}; - -extern struct cpu_info *lookup_processor_type(unsigned int); - -#endif -- cgit v1.2.3 From b784a5d97d0af4835dd0125a3e0e5d0fd48128d6 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Wed, 4 Mar 2015 19:45:38 +0100 Subject: arm64: add macros for common adrp usages The adrp instruction is mostly used in combination with either an add, a ldr or a str instruction with the low bits of the referenced symbol in the 12-bit immediate of the followup instruction. Introduce the macros adr_l, ldr_l and str_l that encapsulate these common patterns. Tested-by: Mark Rutland Reviewed-by: Mark Rutland Signed-off-by: Ard Biesheuvel Signed-off-by: Will Deacon --- arch/arm64/include/asm/assembler.h | 48 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) (limited to 'arch/arm64/include') diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 750bac4e637e..144b64ad96c3 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -159,4 +159,52 @@ lr .req x30 // link register orr \rd, \lbits, \hbits, lsl #32 .endm +/* + * Pseudo-ops for PC-relative adr/ldr/str , where + * is within the range +/- 4 GB of the PC. + */ + /* + * @dst: destination register (64 bit wide) + * @sym: name of the symbol + * @tmp: optional scratch register to be used if == sp, which + * is not allowed in an adrp instruction + */ + .macro adr_l, dst, sym, tmp= + .ifb \tmp + adrp \dst, \sym + add \dst, \dst, :lo12:\sym + .else + adrp \tmp, \sym + add \dst, \tmp, :lo12:\sym + .endif + .endm + + /* + * @dst: destination register (32 or 64 bit wide) + * @sym: name of the symbol + * @tmp: optional 64-bit scratch register to be used if is a + * 32-bit wide register, in which case it cannot be used to hold + * the address + */ + .macro ldr_l, dst, sym, tmp= + .ifb \tmp + adrp \dst, \sym + ldr \dst, [\dst, :lo12:\sym] + .else + adrp \tmp, \sym + ldr \dst, [\tmp, :lo12:\sym] + .endif + .endm + + /* + * @src: source register (32 or 64 bit wide) + * @sym: name of the symbol + * @tmp: mandatory 64-bit scratch register to calculate the address + * while needs to be preserved. + */ + .macro str_l, src, sym, tmp + adrp \tmp, \sym + str \src, [\tmp, :lo12:\sym] + .endm + #endif /* __ASM_ASSEMBLER_H */ -- cgit v1.2.3 From ce47fbb7c8956742a6de06b9706b1c6236339f51 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 19 Mar 2015 19:19:40 +0000 Subject: arm64: proc: remove unused cpu_get_pgd macro cpu_get_pgd isn't used anywhere and is Probably Not What You Want. Remove it before anybody decides to use it. Signed-off-by: Will Deacon --- arch/arm64/include/asm/proc-fns.h | 9 --------- 1 file changed, 9 deletions(-) (limited to 'arch/arm64/include') diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h index 9a8fd84f8fb2..4d9ede7b6361 100644 --- a/arch/arm64/include/asm/proc-fns.h +++ b/arch/arm64/include/asm/proc-fns.h @@ -41,15 +41,6 @@ extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr); #define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm) -#define cpu_get_pgd() \ -({ \ - unsigned long pg; \ - asm("mrs %0, ttbr0_el1\n" \ - : "=r" (pg)); \ - pg &= ~0xffff000000003ffful; \ - (pgd_t *)phys_to_virt(pg); \ -}) - #endif /* __ASSEMBLY__ */ #endif /* __KERNEL__ */ #endif /* __ASM_PROCFNS_H */ -- cgit v1.2.3 From dd006da21646f1c86f0242eb8f527d093303127a Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 19 Mar 2015 16:42:27 +0000 Subject: arm64: mm: increase VA range of identity map The page size and the number of translation levels, and hence the supported virtual address range, are build-time configurables on arm64 whose optimal values are use case dependent. However, in the current implementation, if the system's RAM is located at a very high offset, the virtual address range needs to reflect that merely because the identity mapping, which is only used to enable or disable the MMU, requires the extended virtual range to map the physical memory at an equal virtual offset. This patch relaxes that requirement, by increasing the number of translation levels for the identity mapping only, and only when actually needed, i.e., when system RAM's offset is found to be out of reach at runtime. Tested-by: Laura Abbott Reviewed-by: Catalin Marinas Tested-by: Marc Zyngier Signed-off-by: Ard Biesheuvel Signed-off-by: Will Deacon --- arch/arm64/include/asm/mmu_context.h | 43 ++++++++++++++++++++++++++++++++++ arch/arm64/include/asm/page.h | 6 +++-- arch/arm64/include/asm/pgtable-hwdef.h | 7 +++++- 3 files changed, 53 insertions(+), 3 deletions(-) (limited to 'arch/arm64/include') diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index a9eee33dfa62..ecf2d060036b 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -64,6 +64,49 @@ static inline void cpu_set_reserved_ttbr0(void) : "r" (ttbr)); } +/* + * TCR.T0SZ value to use when the ID map is active. Usually equals + * TCR_T0SZ(VA_BITS), unless system RAM is positioned very high in + * physical memory, in which case it will be smaller. + */ +extern u64 idmap_t0sz; + +static inline bool __cpu_uses_extended_idmap(void) +{ + return (!IS_ENABLED(CONFIG_ARM64_VA_BITS_48) && + unlikely(idmap_t0sz != TCR_T0SZ(VA_BITS))); +} + +static inline void __cpu_set_tcr_t0sz(u64 t0sz) +{ + unsigned long tcr; + + if (__cpu_uses_extended_idmap()) + asm volatile ( + " mrs %0, tcr_el1 ;" + " bfi %0, %1, %2, %3 ;" + " msr tcr_el1, %0 ;" + " isb" + : "=&r" (tcr) + : "r"(t0sz), "I"(TCR_T0SZ_OFFSET), "I"(TCR_TxSZ_WIDTH)); +} + +/* + * Set TCR.T0SZ to the value appropriate for activating the identity map. + */ +static inline void cpu_set_idmap_tcr_t0sz(void) +{ + __cpu_set_tcr_t0sz(idmap_t0sz); +} + +/* + * Set TCR.T0SZ to its default value (based on VA_BITS) + */ +static inline void cpu_set_default_tcr_t0sz(void) +{ + __cpu_set_tcr_t0sz(TCR_T0SZ(VA_BITS)); +} + static inline void switch_new_context(struct mm_struct *mm) { unsigned long flags; diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h index 22b16232bd60..3d02b1869eb8 100644 --- a/arch/arm64/include/asm/page.h +++ b/arch/arm64/include/asm/page.h @@ -33,7 +33,9 @@ * image. Both require pgd, pud (4 levels only) and pmd tables to (section) * map the kernel. With the 64K page configuration, swapper and idmap need to * map to pte level. The swapper also maps the FDT (see __create_page_tables - * for more information). + * for more information). Note that the number of ID map translation levels + * could be increased on the fly if system RAM is out of reach for the default + * VA range, so 3 pages are reserved in all cases. */ #ifdef CONFIG_ARM64_64K_PAGES #define SWAPPER_PGTABLE_LEVELS (CONFIG_ARM64_PGTABLE_LEVELS) @@ -42,7 +44,7 @@ #endif #define SWAPPER_DIR_SIZE (SWAPPER_PGTABLE_LEVELS * PAGE_SIZE) -#define IDMAP_DIR_SIZE (SWAPPER_DIR_SIZE) +#define IDMAP_DIR_SIZE (3 * PAGE_SIZE) #ifndef __ASSEMBLY__ diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index 5f930cc9ea83..847e864202cc 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -143,7 +143,12 @@ /* * TCR flags. */ -#define TCR_TxSZ(x) (((UL(64) - (x)) << 16) | ((UL(64) - (x)) << 0)) +#define TCR_T0SZ_OFFSET 0 +#define TCR_T1SZ_OFFSET 16 +#define TCR_T0SZ(x) ((UL(64) - (x)) << TCR_T0SZ_OFFSET) +#define TCR_T1SZ(x) ((UL(64) - (x)) << TCR_T1SZ_OFFSET) +#define TCR_TxSZ(x) (TCR_T0SZ(x) | TCR_T1SZ(x)) +#define TCR_TxSZ_WIDTH 6 #define TCR_IRGN_NC ((UL(0) << 8) | (UL(0) << 24)) #define TCR_IRGN_WBWA ((UL(1) << 8) | (UL(1) << 24)) #define TCR_IRGN_WT ((UL(2) << 8) | (UL(2) << 24)) -- cgit v1.2.3 From e4c5a6851058386c9e109ad529717a23173918bc Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 19 Mar 2015 16:42:28 +0000 Subject: arm64: KVM: use ID map with increased VA range if required This patch modifies the HYP init code so it can deal with system RAM residing at an offset which exceeds the reach of VA_BITS. Like for EL1, this involves configuring an additional level of translation for the ID map. However, in case of EL2, this implies that all translations use the extra level, as we cannot seamlessly switch between translation tables with different numbers of translation levels. So add an extra translation table at the root level. Since the ID map and the runtime HYP map are guaranteed not to overlap, they can share this root level, and we can essentially merge these two tables into one. Tested-by: Marc Zyngier Reviewed-by: Marc Zyngier Signed-off-by: Ard Biesheuvel Signed-off-by: Will Deacon --- arch/arm64/include/asm/kvm_mmu.h | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) (limited to 'arch/arm64/include') diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 6458b5373142..edfe6864bc28 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -68,6 +68,8 @@ #include #include #include +#include +#include #define KERN_TO_HYP(kva) ((unsigned long)kva - PAGE_OFFSET + HYP_PAGE_OFFSET) @@ -305,5 +307,36 @@ static inline void __kvm_flush_dcache_pud(pud_t pud) void kvm_set_way_flush(struct kvm_vcpu *vcpu); void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled); +static inline bool __kvm_cpu_uses_extended_idmap(void) +{ + return __cpu_uses_extended_idmap(); +} + +static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd, + pgd_t *hyp_pgd, + pgd_t *merged_hyp_pgd, + unsigned long hyp_idmap_start) +{ + int idmap_idx; + + /* + * Use the first entry to access the HYP mappings. It is + * guaranteed to be free, otherwise we wouldn't use an + * extended idmap. + */ + VM_BUG_ON(pgd_val(merged_hyp_pgd[0])); + merged_hyp_pgd[0] = __pgd(__pa(hyp_pgd) | PMD_TYPE_TABLE); + + /* + * Create another extended level entry that points to the boot HYP map, + * which contains an ID mapping of the HYP init code. We essentially + * merge the boot and runtime HYP maps by doing so, but they don't + * overlap anyway, so this is fine. + */ + idmap_idx = hyp_idmap_start >> VA_BITS; + VM_BUG_ON(pgd_val(merged_hyp_pgd[idmap_idx])); + merged_hyp_pgd[idmap_idx] = __pgd(__pa(boot_hyp_pgd) | PMD_TYPE_TABLE); +} + #endif /* __ASSEMBLY__ */ #endif /* __ARM64_KVM_MMU_H__ */ -- cgit v1.2.3 From d5efd9cc9cf2e422d064c912c7d5d985f52c1b2c Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 6 Mar 2015 11:54:10 +0000 Subject: arm64: pmu: add support for interrupt-affinity property Historically, the PMU devicetree bindings have expected SPIs to be listed in order of *logical* CPU number. This is problematic for bootloaders, especially when the boot CPU (logical ID 0) isn't listed first in the devicetree. This patch adds a new optional property, interrupt-affinity, to the PMU node which allows the interrupt affinity to be described using a list of phandled to CPU nodes, with each entry in the list corresponding to the SPI at the same index in the interrupts property. Cc: Mark Rutland Signed-off-by: Will Deacon --- arch/arm64/include/asm/pmu.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm64/include') diff --git a/arch/arm64/include/asm/pmu.h b/arch/arm64/include/asm/pmu.h index e6f087806aaf..b7710a59672c 100644 --- a/arch/arm64/include/asm/pmu.h +++ b/arch/arm64/include/asm/pmu.h @@ -44,6 +44,7 @@ struct pmu_hw_events { struct arm_pmu { struct pmu pmu; cpumask_t active_irqs; + int *irq_affinity; const char *name; irqreturn_t (*handle_irq)(int irq_num, void *dev); void (*enable)(struct hw_perf_event *evt, int idx); -- cgit v1.2.3 From 0978fb25f86b7595821cee6955679250d47c6438 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 27 Mar 2015 13:09:21 +0000 Subject: arm64: insn: Add aarch64_insn_decode_immediate Patching an instruction sometimes requires extracting the immediate field from this instruction. To facilitate this, and avoid potential duplication of code, add aarch64_insn_decode_immediate as the reciprocal to aarch64_insn_encode_immediate. Acked-by: Will Deacon Signed-off-by: Marc Zyngier Signed-off-by: Will Deacon --- arch/arm64/include/asm/insn.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm64/include') diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index d2f49423c5dc..f81b328d9cf4 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -285,6 +285,7 @@ bool aarch64_insn_is_nop(u32 insn); int aarch64_insn_read(void *addr, u32 *insnp); int aarch64_insn_write(void *addr, u32 insn); enum aarch64_insn_encoding_class aarch64_get_insn_class(u32 insn); +u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn); u32 aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type, u32 insn, u64 imm); u32 aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr, -- cgit v1.2.3 From 359b706473b47da3c93bd99fd10d798fe411ab67 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 27 Mar 2015 13:09:23 +0000 Subject: arm64: Extract feature parsing code from cpu_errata.c As we detect more architectural features at runtime, it makes sense to reuse the existing framework whilst avoiding to call a feature an erratum... This patch extract the core capability parsing, moves it into a new file (cpufeature.c), and let the CPU errata detection code use it. Reviewed-by: Andre Przywara Acked-by: Will Deacon Signed-off-by: Marc Zyngier Signed-off-by: Will Deacon --- arch/arm64/include/asm/cpufeature.h | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'arch/arm64/include') diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index b6c16d5f622f..6ae35d160464 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -28,6 +28,18 @@ #ifndef __ASSEMBLY__ +struct arm64_cpu_capabilities { + const char *desc; + u16 capability; + bool (*matches)(const struct arm64_cpu_capabilities *); + union { + struct { /* To be used for erratum handling only */ + u32 midr_model; + u32 midr_range_min, midr_range_max; + }; + }; +}; + extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); static inline bool cpu_have_feature(unsigned int num) @@ -51,7 +63,10 @@ static inline void cpus_set_cap(unsigned int num) __set_bit(num, cpu_hwcaps); } +void check_cpu_capabilities(const struct arm64_cpu_capabilities *caps, + const char *info); void check_local_cpu_errata(void); +void check_local_cpu_features(void); bool cpu_supports_mixed_endian_el0(void); bool system_supports_mixed_endian_el0(void); -- cgit v1.2.3 From cc3979b54d5f1d5b5059b404892888c304d28080 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Tue, 31 Mar 2015 00:46:00 +0100 Subject: arm64: Use bool function return values of true/false not 1/0 Use the normal return values for bool functions Signed-off-by: Joe Perches Signed-off-by: Will Deacon --- arch/arm64/include/asm/dma-mapping.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm64/include') diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h index 6932bb57dba0..9437e3dc5833 100644 --- a/arch/arm64/include/asm/dma-mapping.h +++ b/arch/arm64/include/asm/dma-mapping.h @@ -97,7 +97,7 @@ static inline int dma_set_mask(struct device *dev, u64 mask) static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) { if (!dev->dma_mask) - return 0; + return false; return addr + size - 1 <= *dev->dma_mask; } -- cgit v1.2.3 From 905e8c5dcaa147163672b06fe9dcb5abaacbc711 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 23 Mar 2015 19:07:02 +0000 Subject: arm64: errata: add workaround for cortex-a53 erratum #845719 When running a compat (AArch32) userspace on Cortex-A53, a load at EL0 from a virtual address that matches the bottom 32 bits of the virtual address used by a recent load at (AArch64) EL1 might return incorrect data. This patch works around the issue by writing to the contextidr_el1 register on the exception return path when returning to a 32-bit task. This workaround is patched in at runtime based on the MIDR value of the processor. Reviewed-by: Marc Zyngier Tested-by: Mark Rutland Signed-off-by: Will Deacon --- arch/arm64/include/asm/cpufeature.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/arm64/include') diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 6ae35d160464..82cb9f98ba1a 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -23,8 +23,9 @@ #define ARM64_WORKAROUND_CLEAN_CACHE 0 #define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE 1 +#define ARM64_WORKAROUND_845719 2 -#define ARM64_NCAPS 2 +#define ARM64_NCAPS 3 #ifndef __ASSEMBLY__ -- cgit v1.2.3