From c3545236e8740ab556022f87685d18503c86e187 Mon Sep 17 00:00:00 2001 From: Gregory CLEMENT Date: Mon, 1 Oct 2012 10:59:29 +0100 Subject: ARM: 7546/1: cache-l2x0: add an optional register to save/restore Tested-and-Reviewed-by: Yehuda Yitschak Tested-and-Reviewed-by: Lior Amsalem Signed-off-by: Gregory CLEMENT Signed-off-by: Russell King --- arch/arm/include/asm/hardware/cache-l2x0.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h index c4c87bc1223..5f2c7b44fda 100644 --- a/arch/arm/include/asm/hardware/cache-l2x0.h +++ b/arch/arm/include/asm/hardware/cache-l2x0.h @@ -126,6 +126,7 @@ struct l2x0_regs { unsigned long filter_end; unsigned long prefetch_ctrl; unsigned long pwr_ctrl; + unsigned long ctrl; }; extern struct l2x0_regs l2x0_saved_regs; -- cgit v1.2.3 From 07c9249f1fa90cc8189bed44c0bcece664596a72 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Tue, 16 Oct 2012 18:50:00 +0100 Subject: ARM: 7554/1: VIC: use irq_domain_add_simple() Instead of allocating descriptors on-the-fly for the device tree initialization case, use irq_domain_add_simple() which will take care of this if you pass negative as the first_irq. Alter the signature of __vic_init() to pass the first_irq as signed so this works as expected. Switching the VIC to use irq_domain_add_simple() also has the upside of displaying the same WARNING when you boot with pre-allocated descriptors on systems using SPARSE_IRQ but yet not using device tree. Cc: Grant Likely Acked-by: Rob Herring Signed-off-by: Linus Walleij Signed-off-by: Russell King --- arch/arm/include/asm/hardware/vic.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/hardware/vic.h b/arch/arm/include/asm/hardware/vic.h index e14af1a1a32..2bebad36fc8 100644 --- a/arch/arm/include/asm/hardware/vic.h +++ b/arch/arm/include/asm/hardware/vic.h @@ -47,7 +47,7 @@ struct device_node; struct pt_regs; -void __vic_init(void __iomem *base, unsigned int irq_start, u32 vic_sources, +void __vic_init(void __iomem *base, int irq_start, u32 vic_sources, u32 resume_sources, struct device_node *node); void vic_init(void __iomem *base, unsigned int irq_start, u32 vic_sources, u32 resume_sources); int vic_of_init(struct device_node *node, struct device_node *parent); -- cgit v1.2.3 From b5466f8728527a05a493cc4abe9e6f034a1bbaab Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 15 Jun 2012 14:47:31 +0100 Subject: ARM: mm: remove IPI broadcasting on ASID rollover ASIDs are allocated to MMU contexts based on a rolling counter. This means that after 255 allocations we must invalidate all existing ASIDs via an expensive IPI mechanism to synchronise all of the online CPUs and ensure that all tasks execute with an ASID from the new generation. This patch changes the rollover behaviour so that we rely instead on the hardware broadcasting of the TLB invalidation to avoid the IPI calls. This works by keeping track of the active ASID on each core, which is then reserved in the case of a rollover so that currently scheduled tasks can continue to run. For cores without hardware TLB broadcasting, we keep track of pending flushes in a cpumask, so cores can flush their local TLB before scheduling a new mm. Reviewed-by: Catalin Marinas Tested-by: Marc Zyngier Signed-off-by: Will Deacon --- arch/arm/include/asm/mmu.h | 11 ++--- arch/arm/include/asm/mmu_context.h | 82 ++------------------------------------ 2 files changed, 7 insertions(+), 86 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h index 14965658a92..5b53b53ab5c 100644 --- a/arch/arm/include/asm/mmu.h +++ b/arch/arm/include/asm/mmu.h @@ -5,18 +5,15 @@ typedef struct { #ifdef CONFIG_CPU_HAS_ASID - unsigned int id; - raw_spinlock_t id_lock; + u64 id; #endif unsigned int kvm_seq; } mm_context_t; #ifdef CONFIG_CPU_HAS_ASID -#define ASID(mm) ((mm)->context.id & 255) - -/* init_mm.context.id_lock should be initialized. */ -#define INIT_MM_CONTEXT(name) \ - .context.id_lock = __RAW_SPIN_LOCK_UNLOCKED(name.context.id_lock), +#define ASID_BITS 8 +#define ASID_MASK ((~0ULL) << ASID_BITS) +#define ASID(mm) ((mm)->context.id & ~ASID_MASK) #else #define ASID(mm) (0) #endif diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h index 0306bc642c0..a64f61cb23d 100644 --- a/arch/arm/include/asm/mmu_context.h +++ b/arch/arm/include/asm/mmu_context.h @@ -24,84 +24,8 @@ void __check_kvm_seq(struct mm_struct *mm); #ifdef CONFIG_CPU_HAS_ASID -/* - * On ARMv6, we have the following structure in the Context ID: - * - * 31 7 0 - * +-------------------------+-----------+ - * | process ID | ASID | - * +-------------------------+-----------+ - * | context ID | - * +-------------------------------------+ - * - * The ASID is used to tag entries in the CPU caches and TLBs. - * The context ID is used by debuggers and trace logic, and - * should be unique within all running processes. - */ -#define ASID_BITS 8 -#define ASID_MASK ((~0) << ASID_BITS) -#define ASID_FIRST_VERSION (1 << ASID_BITS) - -extern unsigned int cpu_last_asid; - -void __init_new_context(struct task_struct *tsk, struct mm_struct *mm); -void __new_context(struct mm_struct *mm); -void cpu_set_reserved_ttbr0(void); - -static inline void switch_new_context(struct mm_struct *mm) -{ - unsigned long flags; - - __new_context(mm); - - local_irq_save(flags); - cpu_switch_mm(mm->pgd, mm); - local_irq_restore(flags); -} - -static inline void check_and_switch_context(struct mm_struct *mm, - struct task_struct *tsk) -{ - if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq)) - __check_kvm_seq(mm); - - /* - * Required during context switch to avoid speculative page table - * walking with the wrong TTBR. - */ - cpu_set_reserved_ttbr0(); - - if (!((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) - /* - * The ASID is from the current generation, just switch to the - * new pgd. This condition is only true for calls from - * context_switch() and interrupts are already disabled. - */ - cpu_switch_mm(mm->pgd, mm); - else if (irqs_disabled()) - /* - * Defer the new ASID allocation until after the context - * switch critical region since __new_context() cannot be - * called with interrupts disabled (it sends IPIs). - */ - set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM); - else - /* - * That is a direct call to switch_mm() or activate_mm() with - * interrupts enabled and a new context. - */ - switch_new_context(mm); -} - -#define init_new_context(tsk,mm) (__init_new_context(tsk,mm),0) - -#define finish_arch_post_lock_switch \ - finish_arch_post_lock_switch -static inline void finish_arch_post_lock_switch(void) -{ - if (test_and_clear_thread_flag(TIF_SWITCH_MM)) - switch_new_context(current->mm); -} +void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk); +#define init_new_context(tsk,mm) ({ mm->context.id = 0; }) #else /* !CONFIG_CPU_HAS_ASID */ @@ -143,6 +67,7 @@ static inline void finish_arch_post_lock_switch(void) #endif /* CONFIG_CPU_HAS_ASID */ #define destroy_context(mm) do { } while(0) +#define activate_mm(prev,next) switch_mm(prev, next, NULL) /* * This is called when "tsk" is about to enter lazy TLB mode. @@ -186,6 +111,5 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, } #define deactivate_mm(tsk,mm) do { } while (0) -#define activate_mm(prev,next) switch_mm(prev, next, NULL) #endif -- cgit v1.2.3 From b8db6b886a1fecd6a5b1d13b190f3149247305ef Mon Sep 17 00:00:00 2001 From: Gregory CLEMENT Date: Tue, 6 Nov 2012 01:58:07 +0100 Subject: ARM: 7547/4: cache-l2x0: add support for Aurora L2 cache ctrl Aurora Cache Controller was designed to be compatible with the ARM L2 Cache Controller. It comes with some difference or improvement such as: - no cache id part number available through hardware (need to get it by the DT). - always write through mode available. - two flavors of the controller outer cache and system cache (meaning maintenance operations on L1 are broadcasted to the L2 and L2 performs the same operation). - in outer cache mode, the cache maintenance operations are improved and can be done on a range inside a page and are not limited to a cache line. Tested-and-Reviewed-by: Lior Amsalem Signed-off-by: Gregory CLEMENT Signed-off-by: Yehuda Yitschak Reviewed-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/hardware/cache-l2x0.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h index 5f2c7b44fda..3b2c40b5bfa 100644 --- a/arch/arm/include/asm/hardware/cache-l2x0.h +++ b/arch/arm/include/asm/hardware/cache-l2x0.h @@ -102,6 +102,10 @@ #define L2X0_ADDR_FILTER_EN 1 +#define L2X0_CTRL_EN 1 + +#define L2X0_WAY_SIZE_SHIFT 3 + #ifndef __ASSEMBLY__ extern void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask); #if defined(CONFIG_CACHE_L2X0) && defined(CONFIG_OF) -- cgit v1.2.3 From e50c54189f7c6211a99539156e3978474f0b1a0b Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 13 Sep 2012 16:40:46 +0100 Subject: ARM: perf: add guest vs host discrimination Add minimal guest support to perf, so it can distinguish whether the PMU interrupt was in the host or the guest, as well as collecting some very basic information (guest PC, user vs kernel mode). This is not feature complete though, as it doesn't support backtracing in the guest. Based on the x86 implementation, tested with KVM/ARM. Signed-off-by: Marc Zyngier Signed-off-by: Will Deacon --- arch/arm/include/asm/perf_event.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h index 625cd621a43..00416edecea 100644 --- a/arch/arm/include/asm/perf_event.h +++ b/arch/arm/include/asm/perf_event.h @@ -21,4 +21,9 @@ #define C(_x) PERF_COUNT_HW_CACHE_##_x #define CACHE_OP_UNSUPPORTED 0xFFFF +struct pt_regs; +extern unsigned long perf_instruction_pointer(struct pt_regs *regs); +extern unsigned long perf_misc_flags(struct pt_regs *regs); +#define perf_misc_flags(regs) perf_misc_flags(regs) + #endif /* __ARM_PERF_EVENT_H__ */ -- cgit v1.2.3 From ed6f2a522398c26559f4da23a80aa6195e6284c7 Mon Sep 17 00:00:00 2001 From: Sudeep KarkadaNagesha Date: Mon, 30 Jul 2012 12:00:02 +0100 Subject: ARM: perf: consistently use struct perf_event in arm_pmu functions The arm_pmu functions have wildly varied parameters which can often be derived from struct perf_event. This patch changes the arm_pmu function prototypes so that struct perf_event pointers are passed in preference to fields that can be derived from the event. Signed-off-by: Sudeep KarkadaNagesha Signed-off-by: Will Deacon --- arch/arm/include/asm/pmu.h | 26 +++++++++++--------------- 1 file changed, 11 insertions(+), 15 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h index a26170dce02..a209a384dbc 100644 --- a/arch/arm/include/asm/pmu.h +++ b/arch/arm/include/asm/pmu.h @@ -67,19 +67,19 @@ struct arm_pmu { cpumask_t active_irqs; char *name; irqreturn_t (*handle_irq)(int irq_num, void *dev); - void (*enable)(struct hw_perf_event *evt, int idx); - void (*disable)(struct hw_perf_event *evt, int idx); + void (*enable)(struct perf_event *event); + void (*disable)(struct perf_event *event); int (*get_event_idx)(struct pmu_hw_events *hw_events, - struct hw_perf_event *hwc); + struct perf_event *event); int (*set_event_filter)(struct hw_perf_event *evt, struct perf_event_attr *attr); - u32 (*read_counter)(int idx); - void (*write_counter)(int idx, u32 val); - void (*start)(void); - void (*stop)(void); + u32 (*read_counter)(struct perf_event *event); + void (*write_counter)(struct perf_event *event, u32 val); + void (*start)(struct arm_pmu *); + void (*stop)(struct arm_pmu *); void (*reset)(void *); - int (*request_irq)(irq_handler_t handler); - void (*free_irq)(void); + int (*request_irq)(struct arm_pmu *, irq_handler_t handler); + void (*free_irq)(struct arm_pmu *); int (*map_event)(struct perf_event *event); int num_events; atomic_t active_events; @@ -95,13 +95,9 @@ extern const struct dev_pm_ops armpmu_dev_pm_ops; int armpmu_register(struct arm_pmu *armpmu, char *name, int type); -u64 armpmu_event_update(struct perf_event *event, - struct hw_perf_event *hwc, - int idx); +u64 armpmu_event_update(struct perf_event *event); -int armpmu_event_set_period(struct perf_event *event, - struct hw_perf_event *hwc, - int idx); +int armpmu_event_set_period(struct perf_event *event); int armpmu_map_event(struct perf_event *event, const unsigned (*event_map)[PERF_COUNT_HW_MAX], -- cgit v1.2.3 From 0305230a3d92d6829db89c9e0c096d4d8733f317 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 21 Sep 2012 14:23:47 +0100 Subject: ARM: perf: consistently use arm_pmu->name for PMU name Perf has three ways to name a PMU: either by passing an explicit char *, reading arm_pmu->name or accessing arm_pmu->pmu.name. Just use arm_pmu->name consistently in the ARM backend. Signed-off-by: Will Deacon --- arch/arm/include/asm/pmu.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h index a209a384dbc..f24edad26c7 100644 --- a/arch/arm/include/asm/pmu.h +++ b/arch/arm/include/asm/pmu.h @@ -93,7 +93,7 @@ struct arm_pmu { extern const struct dev_pm_ops armpmu_dev_pm_ops; -int armpmu_register(struct arm_pmu *armpmu, char *name, int type); +int armpmu_register(struct arm_pmu *armpmu, int type); u64 armpmu_event_update(struct perf_event *event); -- cgit v1.2.3 From 9e962f76602dbd293a57030f4ce5a4b57853e2ea Mon Sep 17 00:00:00 2001 From: Dietmar Eggemann Date: Wed, 26 Sep 2012 17:28:47 +0100 Subject: ARM: hw_breakpoint: use CRn as argument for debug reg accessor macros The coprocessor register CRn for accesses to the debug register can be a different one than C0. Take this into account for the ARM_DBG_READ and the ARM_DBG_WRITE macro. The inline assembler calls which used a coprocessor register CRn other than C0 are replaced by the ARM_DBG_READ or ARM_DBG_WRITE macro. Tested-by: Stephen Boyd Signed-off-by: Dietmar Eggemann Signed-off-by: Will Deacon --- arch/arm/include/asm/hw_breakpoint.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/hw_breakpoint.h b/arch/arm/include/asm/hw_breakpoint.h index c190bc992f0..01169dd723f 100644 --- a/arch/arm/include/asm/hw_breakpoint.h +++ b/arch/arm/include/asm/hw_breakpoint.h @@ -98,12 +98,12 @@ static inline void decode_ctrl_reg(u32 reg, #define ARM_BASE_WCR 112 /* Accessor macros for the debug registers. */ -#define ARM_DBG_READ(M, OP2, VAL) do {\ - asm volatile("mrc p14, 0, %0, c0," #M ", " #OP2 : "=r" (VAL));\ +#define ARM_DBG_READ(N, M, OP2, VAL) do {\ + asm volatile("mrc p14, 0, %0, " #N "," #M ", " #OP2 : "=r" (VAL));\ } while (0) -#define ARM_DBG_WRITE(M, OP2, VAL) do {\ - asm volatile("mcr p14, 0, %0, c0," #M ", " #OP2 : : "r" (VAL));\ +#define ARM_DBG_WRITE(N, M, OP2, VAL) do {\ + asm volatile("mcr p14, 0, %0, " #N "," #M ", " #OP2 : : "r" (VAL));\ } while (0) struct notifier_block; -- cgit v1.2.3 From dbf62d50067e55a782583fe53c3d2a3d98b1f6f3 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 19 Jul 2012 11:51:05 +0100 Subject: ARM: mm: introduce L_PTE_VALID for page table entries For long-descriptor translation table formats, the ARMv7 architecture defines the last two bits of the second- and third-level descriptors to be: x0b - Invalid 01b - Block (second-level), Reserved (third-level) 11b - Table (second-level), Page (third-level) This allows us to define L_PTE_PRESENT as (3 << 0) and use this value to create ptes directly. However, when determining whether a given pte value is present in the low-level page table accessors, we only need to check the least significant bit of the descriptor, allowing us to write faulting, present entries which are required for PROT_NONE mappings. This patch introduces L_PTE_VALID, which can be used to test whether a pte should fault, and updates the low-level page table accessors accordingly. Signed-off-by: Will Deacon --- arch/arm/include/asm/pgtable-2level.h | 1 + arch/arm/include/asm/pgtable-3level.h | 3 ++- arch/arm/include/asm/pgtable.h | 4 +--- 3 files changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h index 2317a71c8f8..c44a1ecfc28 100644 --- a/arch/arm/include/asm/pgtable-2level.h +++ b/arch/arm/include/asm/pgtable-2level.h @@ -115,6 +115,7 @@ * The PTE table pointer refers to the hardware entries; the "Linux" * entries are stored 1024 bytes below. */ +#define L_PTE_VALID (_AT(pteval_t, 1) << 0) /* Valid */ #define L_PTE_PRESENT (_AT(pteval_t, 1) << 0) #define L_PTE_YOUNG (_AT(pteval_t, 1) << 1) #define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */ diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h index b24903549d1..e32311a9abc 100644 --- a/arch/arm/include/asm/pgtable-3level.h +++ b/arch/arm/include/asm/pgtable-3level.h @@ -67,7 +67,8 @@ * These bits overlap with the hardware bits but the naming is preserved for * consistency with the classic page table format. */ -#define L_PTE_PRESENT (_AT(pteval_t, 3) << 0) /* Valid */ +#define L_PTE_VALID (_AT(pteval_t, 1) << 0) /* Valid */ +#define L_PTE_PRESENT (_AT(pteval_t, 3) << 0) /* Present */ #define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */ #define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */ #define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */ diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 08c12312a1f..ccf34b6e990 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -203,9 +203,7 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd) #define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN)) #define pte_special(pte) (0) -#define pte_present_user(pte) \ - ((pte_val(pte) & (L_PTE_PRESENT | L_PTE_USER)) == \ - (L_PTE_PRESENT | L_PTE_USER)) +#define pte_present_user(pte) (pte_present(pte) && (pte_val(pte) & L_PTE_USER)) #if __LINUX_ARM_ARCH__ < 6 static inline void __sync_icache_dcache(pte_t pteval) -- cgit v1.2.3 From 26ffd0d43b186b0d5186354da8714a1c2d360df0 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Sat, 1 Sep 2012 05:22:12 +0100 Subject: ARM: mm: introduce present, faulting entries for PAGE_NONE PROT_NONE mappings apply the page protection attributes defined by _P000 which translate to PAGE_NONE for ARM. These attributes specify an XN, RDONLY pte that is inaccessible to userspace. However, on kernels configured without support for domains, such a pte *is* accessible to the kernel and can be read via get_user, allowing tasks to read PROT_NONE pages via syscalls such as read/write over a pipe. This patch introduces a new software pte flag, L_PTE_NONE, that is set to identify faulting, present entries. Signed-off-by: Will Deacon --- arch/arm/include/asm/pgtable-2level.h | 1 + arch/arm/include/asm/pgtable-3level.h | 1 + arch/arm/include/asm/pgtable.h | 6 +++--- 3 files changed, 5 insertions(+), 3 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h index c44a1ecfc28..f97ee02386e 100644 --- a/arch/arm/include/asm/pgtable-2level.h +++ b/arch/arm/include/asm/pgtable-2level.h @@ -124,6 +124,7 @@ #define L_PTE_USER (_AT(pteval_t, 1) << 8) #define L_PTE_XN (_AT(pteval_t, 1) << 9) #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */ +#define L_PTE_NONE (_AT(pteval_t, 1) << 11) /* * These are the memory types, defined to be compatible with diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h index e32311a9abc..a3f37929940 100644 --- a/arch/arm/include/asm/pgtable-3level.h +++ b/arch/arm/include/asm/pgtable-3level.h @@ -77,6 +77,7 @@ #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */ #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55) /* unused */ #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */ +#define L_PTE_NONE (_AT(pteval_t, 1) << 57) /* PROT_NONE */ /* * To be used in assembly code with the upper page attributes. diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index ccf34b6e990..9c82f988c0e 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -73,7 +73,7 @@ extern pgprot_t pgprot_kernel; #define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b)) -#define PAGE_NONE _MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_RDONLY) +#define PAGE_NONE _MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_RDONLY | L_PTE_NONE) #define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_XN) #define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER) #define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) @@ -83,7 +83,7 @@ extern pgprot_t pgprot_kernel; #define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN) #define PAGE_KERNEL_EXEC pgprot_kernel -#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN) +#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE) #define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN) #define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER) #define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) @@ -240,7 +240,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; } static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { - const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER; + const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER | L_PTE_NONE; pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); return pte; } -- cgit v1.2.3 From b62655f4c6f3e4d21934eee14ac2ac5cd479c97c Mon Sep 17 00:00:00 2001 From: Shawn Guo Date: Tue, 6 Nov 2012 03:48:40 +0100 Subject: ARM: 7571/1: SMP: add function arch_send_wakeup_ipi_mask() Add function arch_send_wakeup_ipi_mask(), so that platform code can use it as an easy way to wake up cores that are in WFI. Signed-off-by: Shawn Guo Signed-off-by: Russell King --- arch/arm/include/asm/smp.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h index 2e3be16c676..d3a22bebe6c 100644 --- a/arch/arm/include/asm/smp.h +++ b/arch/arm/include/asm/smp.h @@ -79,6 +79,7 @@ extern void cpu_die(void); extern void arch_send_call_function_single_ipi(int cpu); extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); +extern void arch_send_wakeup_ipi_mask(const struct cpumask *mask); struct smp_operations { #ifdef CONFIG_SMP -- cgit v1.2.3 From f600b9fcd2bcb8ee0adb235f54ccdd93c729c442 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 15 Nov 2012 21:28:43 +0000 Subject: ARM: cti: fix manipulation of debug lock registers The LOCKSTATUS register for memory-mapped coresight devices indicates whether or not the device in question implements hardware locking. If not, locking is not present (i.e. LSR.SLI == 0) and LAR is write-ignore, so software doesn't actually need to check the status register at all. This patch removes the broken LSR checks. Cc: Ming Lei Reported-by: Mike Williams Signed-off-by: Will Deacon --- arch/arm/include/asm/cti.h | 20 ++------------------ 1 file changed, 2 insertions(+), 18 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/cti.h b/arch/arm/include/asm/cti.h index a0ada3ea435..f2e5cad3f30 100644 --- a/arch/arm/include/asm/cti.h +++ b/arch/arm/include/asm/cti.h @@ -146,15 +146,7 @@ static inline void cti_irq_ack(struct cti *cti) */ static inline void cti_unlock(struct cti *cti) { - void __iomem *base = cti->base; - unsigned long val; - - val = __raw_readl(base + LOCKSTATUS); - - if (val & 1) { - val = LOCKCODE; - __raw_writel(val, base + LOCKACCESS); - } + __raw_writel(LOCKCODE, cti->base + LOCKACCESS); } /** @@ -166,14 +158,6 @@ static inline void cti_unlock(struct cti *cti) */ static inline void cti_lock(struct cti *cti) { - void __iomem *base = cti->base; - unsigned long val; - - val = __raw_readl(base + LOCKSTATUS); - - if (!(val & 1)) { - val = ~LOCKCODE; - __raw_writel(val, base + LOCKACCESS); - } + __raw_writel(~LOCKCODE, cti->base + LOCKACCESS); } #endif -- cgit v1.2.3 From 6920b5a791bbb54810159fbf6acf2c6ae14cdd22 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 21 Sep 2012 10:18:58 +0100 Subject: ARM: move serial_sa1100.h header file to linux/platform_data This is really driver platform data, so move it to the appropriate directory. Acked-by: Greg Kroah-Hartman Signed-off-by: Russell King --- arch/arm/include/asm/mach/serial_sa1100.h | 31 ------------------------------- 1 file changed, 31 deletions(-) delete mode 100644 arch/arm/include/asm/mach/serial_sa1100.h (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/mach/serial_sa1100.h b/arch/arm/include/asm/mach/serial_sa1100.h deleted file mode 100644 index d09064bf95a..00000000000 --- a/arch/arm/include/asm/mach/serial_sa1100.h +++ /dev/null @@ -1,31 +0,0 @@ -/* - * arch/arm/include/asm/mach/serial_sa1100.h - * - * Author: Nicolas Pitre - * - * Moved and changed lots, Russell King - * - * Low level machine dependent UART functions. - */ - -struct uart_port; -struct uart_info; - -/* - * This is a temporary structure for registering these - * functions; it is intended to be discarded after boot. - */ -struct sa1100_port_fns { - void (*set_mctrl)(struct uart_port *, u_int); - u_int (*get_mctrl)(struct uart_port *); - void (*pm)(struct uart_port *, u_int, u_int); - int (*set_wake)(struct uart_port *, u_int); -}; - -#ifdef CONFIG_SERIAL_SA1100 -void sa1100_register_uart_fns(struct sa1100_port_fns *fns); -void sa1100_register_uart(int idx, int port); -#else -#define sa1100_register_uart_fns(fns) do { } while (0) -#define sa1100_register_uart(idx,port) do { } while (0) -#endif -- cgit v1.2.3 From 46000065a631c6d21452d533baf086175c384ba4 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 21 Sep 2012 10:23:44 +0100 Subject: ARM: move udc_pxa2xx.h to linux/platform_data Move the PXA2xx/IXP4xx UDC header file into linux/platform_data as it only contains a driver platform data structure. Acked-by: Felipe Balbi Acked-by: Greg Kroah-Hartman Acked-by: Krzysztof Halasa Signed-off-by: Russell King --- arch/arm/include/asm/mach/udc_pxa2xx.h | 26 -------------------------- 1 file changed, 26 deletions(-) delete mode 100644 arch/arm/include/asm/mach/udc_pxa2xx.h (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/mach/udc_pxa2xx.h b/arch/arm/include/asm/mach/udc_pxa2xx.h deleted file mode 100644 index ea297ac70bc..00000000000 --- a/arch/arm/include/asm/mach/udc_pxa2xx.h +++ /dev/null @@ -1,26 +0,0 @@ -/* - * arch/arm/include/asm/mach/udc_pxa2xx.h - * - * This supports machine-specific differences in how the PXA2xx - * USB Device Controller (UDC) is wired. - * - * It is set in linux/arch/arm/mach-pxa/.c or in - * linux/arch/mach-ixp4xx/.c and used in - * the probe routine of linux/drivers/usb/gadget/pxa2xx_udc.c - */ - -struct pxa2xx_udc_mach_info { - int (*udc_is_connected)(void); /* do we see host? */ - void (*udc_command)(int cmd); -#define PXA2XX_UDC_CMD_CONNECT 0 /* let host see us */ -#define PXA2XX_UDC_CMD_DISCONNECT 1 /* so host won't see us */ - - /* Boards following the design guidelines in the developer's manual, - * with on-chip GPIOs not Lubbock's weird hardware, can have a sane - * VBUS IRQ and omit the methods above. Store the GPIO number - * here. Note that sometimes the signals go through inverters... - */ - bool gpio_pullup_inverted; - int gpio_pullup; /* high == pullup activated */ -}; - -- cgit v1.2.3 From 95e629b761ce36996d1befe2824d5346b5a220b9 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 21 Sep 2012 10:26:40 +0100 Subject: ARM/AVR32: get rid of serial_at91.h The definitions provided by serial_at91.h are only used by the atmel_serial driver, and the function that uses it is never called from anywhere in the kernel. Therefore, these definitions are unused and/or obsolete, and can be removed. Acked-by: Greg Kroah-Hartman Acked-by: Jean-Christophe PLAGNIOL-VILLARD Acked-by: Nicolas Ferre Signed-off-by: Russell King --- arch/arm/include/asm/mach/serial_at91.h | 33 --------------------------------- 1 file changed, 33 deletions(-) delete mode 100644 arch/arm/include/asm/mach/serial_at91.h (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/mach/serial_at91.h b/arch/arm/include/asm/mach/serial_at91.h deleted file mode 100644 index ea6d063923b..00000000000 --- a/arch/arm/include/asm/mach/serial_at91.h +++ /dev/null @@ -1,33 +0,0 @@ -/* - * arch/arm/include/asm/mach/serial_at91.h - * - * Based on serial_sa1100.h by Nicolas Pitre - * - * Copyright (C) 2002 ATMEL Rousset - * - * Low level machine dependent UART functions. - */ - -struct uart_port; - -/* - * This is a temporary structure for registering these - * functions; it is intended to be discarded after boot. - */ -struct atmel_port_fns { - void (*set_mctrl)(struct uart_port *, u_int); - u_int (*get_mctrl)(struct uart_port *); - void (*enable_ms)(struct uart_port *); - void (*pm)(struct uart_port *, u_int, u_int); - int (*set_wake)(struct uart_port *, u_int); - int (*open)(struct uart_port *); - void (*close)(struct uart_port *); -}; - -#if defined(CONFIG_SERIAL_ATMEL) -void atmel_register_uart_fns(struct atmel_port_fns *fns); -#else -#define atmel_register_uart_fns(fns) do { } while (0) -#endif - - -- cgit v1.2.3 From 1f59d13bee172945ccdfbc5018477ba94a0ac28e Mon Sep 17 00:00:00 2001 From: Will Drewry Date: Thu, 15 Nov 2012 22:11:29 +0100 Subject: ARM: 7577/1: arch/add syscall_get_arch Provide an ARM implementation of syscall_get_arch. This is a pre-requisite for CONFIG_HAVE_ARCH_SECCOMP_FILTER. Signed-off-by: Will Drewry Signed-off-by: Kees Cook Reviewed-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/syscall.h | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/syscall.h b/arch/arm/include/asm/syscall.h index 9fdded6b108..f1d96d4e809 100644 --- a/arch/arm/include/asm/syscall.h +++ b/arch/arm/include/asm/syscall.h @@ -7,6 +7,8 @@ #ifndef _ASM_ARM_SYSCALL_H #define _ASM_ARM_SYSCALL_H +#include /* for AUDIT_ARCH_* */ +#include /* for ELF_EM */ #include #include @@ -95,4 +97,11 @@ static inline void syscall_set_arguments(struct task_struct *task, memcpy(®s->ARM_r0 + i, args, n * sizeof(args[0])); } +static inline int syscall_get_arch(struct task_struct *task, + struct pt_regs *regs) +{ + /* ARM tasks don't change audit architectures on the fly. */ + return AUDIT_ARCH_ARM; +} + #endif /* _ASM_ARM_SYSCALL_H */ -- cgit v1.2.3 From 9b790d71d58be65f9508ab60920eb978af828412 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Thu, 15 Nov 2012 22:12:00 +0100 Subject: ARM: 7578/1: arch/move secure_computing into trace There is very little difference in the TIF_SECCOMP and TIF_SYSCALL_WORK path in entry-common.S, so merge TIF_SECCOMP into TIF_SYSCALL_WORK and move seccomp into the syscall_trace_enter() handler. Expanded some of the tracehook logic into the callers to make this code more readable. Since tracehook needs to do register changing, this portion is best left in its own function instead of copy/pasting into the callers. Additionally, the return value for secure_computing() is now checked and a -1 value will result in the system call being skipped. Signed-off-by: Kees Cook Acked-by: Will Drewry Reviewed-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/thread_info.h | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index 8477b4c1d39..cddda1f41f0 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -151,10 +151,10 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, #define TIF_SYSCALL_TRACE 8 #define TIF_SYSCALL_AUDIT 9 #define TIF_SYSCALL_TRACEPOINT 10 +#define TIF_SECCOMP 11 /* seccomp syscall filtering active */ #define TIF_USING_IWMMXT 17 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ #define TIF_RESTORE_SIGMASK 20 -#define TIF_SECCOMP 21 #define TIF_SWITCH_MM 22 /* deferred switch_mm */ #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) @@ -163,11 +163,12 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) -#define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT) #define _TIF_SECCOMP (1 << TIF_SECCOMP) +#define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT) /* Checks for any syscall work in entry-common.S */ -#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT) +#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ + _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP) /* * Change these and you break ASM code in entry-common.S -- cgit v1.2.3 From e8d432c9cf0a3d569b2d00877d12c9ffe9e55286 Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Tue, 6 Nov 2012 11:57:43 +0000 Subject: ARM: kernel: add MIDR to per-CPU information data The advent of big.LITTLE ARM platforms requires the kernel to be able to identify the MIDRs of all online CPUs upon request. MIDRs are stashed at boot time so that kernel subsystems can detect the MIDR of online CPUs by simply retrieving per-CPU data updated by all booted CPUs. Signed-off-by: Lorenzo Pieralisi Acked-by: Nicolas Pitre --- arch/arm/include/asm/cpu.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/cpu.h b/arch/arm/include/asm/cpu.h index d797223b39d..2744f060255 100644 --- a/arch/arm/include/asm/cpu.h +++ b/arch/arm/include/asm/cpu.h @@ -15,6 +15,7 @@ struct cpuinfo_arm { struct cpu cpu; + u32 cpuid; #ifdef CONFIG_SMP unsigned int loops_per_jiffy; #endif -- cgit v1.2.3 From dca463daa0151c5bbbd8ec8fd42882a3966d3c44 Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Thu, 15 Nov 2012 17:30:32 +0000 Subject: ARM: kernel: enhance MPIDR macro definitions Kernel subsystems other than the topology layer need the MPIDR mask definitions to access the MPIDR without relying on hardcoded masks. This patch moves the MPIDR register masks definition to a header file and defines a macro to simplify access to MPIDR bit fields representing affinity levels. Signed-off-by: Lorenzo Pieralisi Acked-by: Will Deacon Acked-by: Nicolas Pitre --- arch/arm/include/asm/cputype.h | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h index cb47d28cbe1..a59dcb5ab5f 100644 --- a/arch/arm/include/asm/cputype.h +++ b/arch/arm/include/asm/cputype.h @@ -25,6 +25,19 @@ #define CPUID_EXT_ISAR4 "c2, 4" #define CPUID_EXT_ISAR5 "c2, 5" +#define MPIDR_SMP_BITMASK (0x3 << 30) +#define MPIDR_SMP_VALUE (0x2 << 30) + +#define MPIDR_MT_BITMASK (0x1 << 24) + +#define MPIDR_HWID_BITMASK 0xFFFFFF + +#define MPIDR_LEVEL_BITS 8 +#define MPIDR_LEVEL_MASK ((1 << MPIDR_LEVEL_BITS) - 1) + +#define MPIDR_AFFINITY_LEVEL(mpidr, level) \ + ((mpidr >> (MPIDR_LEVEL_BITS * level)) & MPIDR_LEVEL_MASK) + extern unsigned int processor_id; #ifdef CONFIG_CPU_CP15 -- cgit v1.2.3 From a0ae02405076ac32bd17ece976e914b5b6075bb0 Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Thu, 17 Nov 2011 17:31:51 +0000 Subject: ARM: kernel: add device tree init map function When booting through a device tree, the kernel cpu logical id map can be initialized using device tree data passed by FW or through an embedded blob. This patch adds a function that parses device tree "cpu" nodes and retrieves the corresponding CPUs hardware identifiers (MPIDR). It sets the possible cpus and the cpu logical map values according to the number of CPUs defined in the device tree and respective properties. The device tree HW identifiers are considered valid if all CPU nodes contain a "reg" property, there are no duplicate "reg" entries and the DT defines a CPU node whose "reg" property matches the MPIDR[23:0] of the boot CPU. The primary CPU is assigned cpu logical number 0 to keep the current convention valid. Current bindings documentation is included in the patch: Documentation/devicetree/bindings/arm/cpus.txt Signed-off-by: Lorenzo Pieralisi Acked-by: Nicolas Pitre --- arch/arm/include/asm/prom.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/prom.h b/arch/arm/include/asm/prom.h index aeae9c609df..8dd51dc1a36 100644 --- a/arch/arm/include/asm/prom.h +++ b/arch/arm/include/asm/prom.h @@ -15,6 +15,7 @@ extern struct machine_desc *setup_machine_fdt(unsigned int dt_phys); extern void arm_dt_memblock_reserve(void); +extern void __init arm_dt_init_cpu_maps(void); #else /* CONFIG_OF */ @@ -24,6 +25,7 @@ static inline struct machine_desc *setup_machine_fdt(unsigned int dt_phys) } static inline void arm_dt_memblock_reserve(void) { } +static inline void arm_dt_init_cpu_maps(void) { } #endif /* CONFIG_OF */ #endif /* ASMARM_PROM_H */ -- cgit v1.2.3 From 7f124aaf01439d2fa54283f3c375ce3b9fc776d4 Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Thu, 17 Nov 2011 17:36:24 +0000 Subject: ARM: kernel: add logical mappings look-up In ARM SMP systems the MPIDR register ([23:0] bits) is used to uniquely identify CPUs. In order to retrieve the logical CPU index corresponding to a given MPIDR value and guarantee a consistent translation throughout the kernel, this patch adds a look-up based on the MPIDR[23:0] so that kernel subsystems can use it whenever the logical cpu index corresponding to a given MPIDR value is needed. Signed-off-by: Lorenzo Pieralisi Acked-by: Will Deacon Acked-by: Nicolas Pitre --- arch/arm/include/asm/smp_plat.h | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h index 558d6c80aca..aaa61b6f50f 100644 --- a/arch/arm/include/asm/smp_plat.h +++ b/arch/arm/include/asm/smp_plat.h @@ -5,6 +5,9 @@ #ifndef __ASMARM_SMP_PLAT_H #define __ASMARM_SMP_PLAT_H +#include +#include + #include /* @@ -48,5 +51,19 @@ static inline int cache_ops_need_broadcast(void) */ extern int __cpu_logical_map[]; #define cpu_logical_map(cpu) __cpu_logical_map[cpu] +/* + * Retrieve logical cpu index corresponding to a given MPIDR[23:0] + * - mpidr: MPIDR[23:0] to be used for the look-up + * + * Returns the cpu logical index or -EINVAL on look-up error + */ +static inline int get_logical_index(u32 mpidr) +{ + int cpu; + for (cpu = 0; cpu < nr_cpu_ids; cpu++) + if (cpu_logical_map(cpu) == mpidr) + return cpu; + return -EINVAL; +} #endif -- cgit v1.2.3 From c7cc504bc351e41e871e317ca7f032f4562f34ad Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 22 Nov 2012 13:05:55 +0100 Subject: ARM: 7584/1: perf: fix link error when CONFIG_HW_PERF_EVENTS is not selected Commit e50c541 (ARM: perf: add guest vs host discrimination) broken the link as perf_instruction_pointer and perf_misc_flags are not defined when CONFIG_HW_PERF_EVENTS is not selected. As it make little sense to try and profile a guest without any HW event, just fallback to the original code when this config option is not selected. Reported-by: Russell King Acked-by: Will Deacon Signed-off-by: Marc Zyngier Signed-off-by: Russell King --- arch/arm/include/asm/perf_event.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h index 00416edecea..755877527cf 100644 --- a/arch/arm/include/asm/perf_event.h +++ b/arch/arm/include/asm/perf_event.h @@ -21,9 +21,11 @@ #define C(_x) PERF_COUNT_HW_CACHE_##_x #define CACHE_OP_UNSUPPORTED 0xFFFF +#ifdef CONFIG_HW_PERF_EVENTS struct pt_regs; extern unsigned long perf_instruction_pointer(struct pt_regs *regs); extern unsigned long perf_misc_flags(struct pt_regs *regs); #define perf_misc_flags(regs) perf_misc_flags(regs) +#endif #endif /* __ARM_PERF_EVENT_H__ */ -- cgit v1.2.3 From 3e99675af1b25a191c467700499b1cbe5585a778 Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Sun, 25 Nov 2012 03:24:32 +0100 Subject: ARM: 7582/2: rename kvm_seq to vmalloc_seq so to avoid confusion with KVM The kvm_seq value has nothing to do what so ever with this other KVM. Given that KVM support on ARM is imminent, it's best to rename kvm_seq into something else to clearly identify what it is about i.e. a sequence number for vmalloc section mappings. Signed-off-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/include/asm/mmu.h | 2 +- arch/arm/include/asm/mmu_context.h | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h index 5b53b53ab5c..9f77e7804f3 100644 --- a/arch/arm/include/asm/mmu.h +++ b/arch/arm/include/asm/mmu.h @@ -7,7 +7,7 @@ typedef struct { #ifdef CONFIG_CPU_HAS_ASID u64 id; #endif - unsigned int kvm_seq; + unsigned int vmalloc_seq; } mm_context_t; #ifdef CONFIG_CPU_HAS_ASID diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h index a64f61cb23d..e1f644bc7cc 100644 --- a/arch/arm/include/asm/mmu_context.h +++ b/arch/arm/include/asm/mmu_context.h @@ -20,7 +20,7 @@ #include #include -void __check_kvm_seq(struct mm_struct *mm); +void __check_vmalloc_seq(struct mm_struct *mm); #ifdef CONFIG_CPU_HAS_ASID @@ -34,8 +34,8 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk); static inline void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) { - if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq)) - __check_kvm_seq(mm); + if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)) + __check_vmalloc_seq(mm); if (irqs_disabled()) /* -- cgit v1.2.3 From 14318efb322e2fe1a034c69463d725209eb9d548 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Thu, 29 Nov 2012 20:39:54 +0100 Subject: ARM: 7587/1: implement optimized percpu variable access Use the previously unused TPIDRPRW register to store percpu offsets. TPIDRPRW is only accessible in PL1, so it can only be used in the kernel. This replaces 2 loads with a mrc instruction for each percpu variable access. With hackbench, the performance improvement is 1.4% on Cortex-A9 (highbank). Taking an average of 30 runs of "hackbench -l 1000" yields: Before: 6.2191 After: 6.1348 Will Deacon reported similar delta on v6 with 11MPCore. The asm "memory clobber" are needed here to ensure the percpu offset gets reloaded. Testing by Will found that this would not happen in __schedule() which is a bit of a special case as preemption is disabled but the execution can move cores. Signed-off-by: Rob Herring Acked-by: Will Deacon Acked-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/include/asm/Kbuild | 1 - arch/arm/include/asm/percpu.h | 45 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 45 insertions(+), 1 deletion(-) create mode 100644 arch/arm/include/asm/percpu.h (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild index f70ae175a3d..2ffdaacd461 100644 --- a/arch/arm/include/asm/Kbuild +++ b/arch/arm/include/asm/Kbuild @@ -16,7 +16,6 @@ generic-y += local64.h generic-y += msgbuf.h generic-y += param.h generic-y += parport.h -generic-y += percpu.h generic-y += poll.h generic-y += resource.h generic-y += sections.h diff --git a/arch/arm/include/asm/percpu.h b/arch/arm/include/asm/percpu.h new file mode 100644 index 00000000000..968c0a14e0a --- /dev/null +++ b/arch/arm/include/asm/percpu.h @@ -0,0 +1,45 @@ +/* + * Copyright 2012 Calxeda, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ +#ifndef _ASM_ARM_PERCPU_H_ +#define _ASM_ARM_PERCPU_H_ + +/* + * Same as asm-generic/percpu.h, except that we store the per cpu offset + * in the TPIDRPRW. TPIDRPRW only exists on V6K and V7 + */ +#if defined(CONFIG_SMP) && !defined(CONFIG_CPU_V6) +static inline void set_my_cpu_offset(unsigned long off) +{ + /* Set TPIDRPRW */ + asm volatile("mcr p15, 0, %0, c13, c0, 4" : : "r" (off) : "memory"); +} + +static inline unsigned long __my_cpu_offset(void) +{ + unsigned long off; + /* Read TPIDRPRW */ + asm("mrc p15, 0, %0, c13, c0, 4" : "=r" (off) : : "memory"); + return off; +} +#define __my_cpu_offset __my_cpu_offset() +#else +#define set_my_cpu_offset(x) do {} while(0) + +#endif /* CONFIG_SMP */ + +#include + +#endif /* _ASM_ARM_PERCPU_H_ */ -- cgit v1.2.3 From 1ecec696c8bb9b4cefb09495d81d081d1c81b578 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Mon, 10 Dec 2012 18:35:22 +0100 Subject: ARM: 7599/1: head: Remove boot-time HYP mode check for v5 and below The kernel can only be entered on HYP mode on CPUs which actually support it, i.e. >= ARMv7. pre-v6 platform support cannot coexist in the same kernel as support for v7 and higher, so there is no advantage in having the HYP mode check on pre-v6 hardware. At least one pre-v6 board is known to fail when the HYP mode check code is present, although the exact cause remains unknown and may be unrelated. [1] This patch restores the old behaviour for pre-v6 platforms, whereby the CPSR is forced directly to SVC mode with IRQs and FIQs masked. All kernels capable of booting on v7 hardware will retain the check, so this should not impair functionality. [1] http://lists.arm.linux.org.uk/lurker/message/20121130.013814.19218413.en.html ([ARM] head.S change broke platform device registration?) Signed-off-by: Dave Martin Signed-off-by: Russell King --- arch/arm/include/asm/assembler.h | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 2ef95813fce..eb87200aa4b 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -250,6 +250,7 @@ * Beware, it also clobers LR. */ .macro safe_svcmode_maskall reg:req +#if __LINUX_ARM_ARCH__ >= 6 mrs \reg , cpsr mov lr , \reg and lr , lr , #MODE_MASK @@ -266,6 +267,13 @@ THUMB( orr \reg , \reg , #PSR_T_BIT ) __ERET 1: msr cpsr_c, \reg 2: +#else +/* + * workaround for possibly broken pre-v6 hardware + * (akita, Sharp Zaurus C-1000, PXA270-based) + */ + setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, \reg +#endif .endm /* -- cgit v1.2.3