diff options
Diffstat (limited to 'arch/arm/include/asm')
-rw-r--r-- | arch/arm/include/asm/cacheflush.h | 75 | ||||
-rw-r--r-- | arch/arm/include/asm/cp15.h | 14 | ||||
-rw-r--r-- | arch/arm/include/asm/hardware/timer-sp.h | 15 | ||||
-rw-r--r-- | arch/arm/include/asm/irq.h | 5 | ||||
-rw-r--r-- | arch/arm/include/asm/mach/arch.h | 5 | ||||
-rw-r--r-- | arch/arm/include/asm/mach/irq.h | 36 | ||||
-rw-r--r-- | arch/arm/include/asm/mcpm.h | 209 | ||||
-rw-r--r-- | arch/arm/include/asm/psci.h | 12 |
8 files changed, 329 insertions, 42 deletions
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index e1489c54cd12..bff71388e72a 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -363,4 +363,79 @@ static inline void flush_cache_vunmap(unsigned long start, unsigned long end) flush_cache_all(); } +/* + * Memory synchronization helpers for mixed cached vs non cached accesses. + * + * Some synchronization algorithms have to set states in memory with the + * cache enabled or disabled depending on the code path. It is crucial + * to always ensure proper cache maintenance to update main memory right + * away in that case. + * + * Any cached write must be followed by a cache clean operation. + * Any cached read must be preceded by a cache invalidate operation. + * Yet, in the read case, a cache flush i.e. atomic clean+invalidate + * operation is needed to avoid discarding possible concurrent writes to the + * accessed memory. + * + * Also, in order to prevent a cached writer from interfering with an + * adjacent non-cached writer, each state variable must be located to + * a separate cache line. + */ + +/* + * This needs to be >= the max cache writeback size of all + * supported platforms included in the current kernel configuration. + * This is used to align state variables to their own cache lines. + */ +#define __CACHE_WRITEBACK_ORDER 6 /* guessed from existing platforms */ +#define __CACHE_WRITEBACK_GRANULE (1 << __CACHE_WRITEBACK_ORDER) + +/* + * There is no __cpuc_clean_dcache_area but we use it anyway for + * code intent clarity, and alias it to __cpuc_flush_dcache_area. + */ +#define __cpuc_clean_dcache_area __cpuc_flush_dcache_area + +/* + * Ensure preceding writes to *p by this CPU are visible to + * subsequent reads by other CPUs: + */ +static inline void __sync_cache_range_w(volatile void *p, size_t size) +{ + char *_p = (char *)p; + + __cpuc_clean_dcache_area(_p, size); + outer_clean_range(__pa(_p), __pa(_p + size)); +} + +/* + * Ensure preceding writes to *p by other CPUs are visible to + * subsequent reads by this CPU. We must be careful not to + * discard data simultaneously written by another CPU, hence the + * usage of flush rather than invalidate operations. + */ +static inline void __sync_cache_range_r(volatile void *p, size_t size) +{ + char *_p = (char *)p; + +#ifdef CONFIG_OUTER_CACHE + if (outer_cache.flush_range) { + /* + * Ensure dirty data migrated from other CPUs into our cache + * are cleaned out safely before the outer cache is cleaned: + */ + __cpuc_clean_dcache_area(_p, size); + + /* Clean and invalidate stale data for *p from outer ... */ + outer_flush_range(__pa(_p), __pa(_p + size)); + } +#endif + + /* ... and inner cache: */ + __cpuc_flush_dcache_area(_p, size); +} + +#define sync_cache_w(ptr) __sync_cache_range_w(ptr, sizeof *(ptr)) +#define sync_cache_r(ptr) __sync_cache_range_r(ptr, sizeof *(ptr)) + #endif diff --git a/arch/arm/include/asm/cp15.h b/arch/arm/include/asm/cp15.h index 5ef4d8015a60..ce4d01c03e6c 100644 --- a/arch/arm/include/asm/cp15.h +++ b/arch/arm/include/asm/cp15.h @@ -59,6 +59,20 @@ static inline void set_cr(unsigned int val) isb(); } +static inline unsigned int get_auxcr(void) +{ + unsigned int val; + asm("mrc p15, 0, %0, c1, c0, 1 @ get AUXCR" : "=r" (val)); + return val; +} + +static inline void set_auxcr(unsigned int val) +{ + asm volatile("mcr p15, 0, %0, c1, c0, 1 @ set AUXCR" + : : "r" (val)); + isb(); +} + #ifndef CONFIG_SMP extern void adjust_cr(unsigned long mask, unsigned long set); #endif diff --git a/arch/arm/include/asm/hardware/timer-sp.h b/arch/arm/include/asm/hardware/timer-sp.h index 2dd9d3f83f29..4cba3e6d8aed 100644 --- a/arch/arm/include/asm/hardware/timer-sp.h +++ b/arch/arm/include/asm/hardware/timer-sp.h @@ -1,15 +1,18 @@ void __sp804_clocksource_and_sched_clock_init(void __iomem *, - const char *, int); + const char *, struct clk *, int); -static inline void sp804_clocksource_init(void __iomem *base, const char *name) +static inline void sp804_clocksource_init(void __iomem *base, const char *name, + struct clk *clk) { - __sp804_clocksource_and_sched_clock_init(base, name, 0); + __sp804_clocksource_and_sched_clock_init(base, name, clk, 0); } static inline void sp804_clocksource_and_sched_clock_init(void __iomem *base, - const char *name) + const char *name, + struct clk *clk) { - __sp804_clocksource_and_sched_clock_init(base, name, 1); + __sp804_clocksource_and_sched_clock_init(base, name, clk, 1); } -void sp804_clockevents_init(void __iomem *, unsigned int, const char *); +void sp804_clockevents_init(void __iomem *, unsigned int, const char *, + struct clk *); diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h index 35c21c375d81..53c15dec7af6 100644 --- a/arch/arm/include/asm/irq.h +++ b/arch/arm/include/asm/irq.h @@ -30,6 +30,11 @@ extern void asm_do_IRQ(unsigned int, struct pt_regs *); void handle_IRQ(unsigned int, struct pt_regs *); void init_IRQ(void); +#ifdef CONFIG_MULTI_IRQ_HANDLER +extern void (*handle_arch_irq)(struct pt_regs *); +extern void set_handle_irq(void (*handle_irq)(struct pt_regs *)); +#endif + #endif #endif diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h index 308ad7d6f98b..75bf07910b81 100644 --- a/arch/arm/include/asm/mach/arch.h +++ b/arch/arm/include/asm/mach/arch.h @@ -8,6 +8,8 @@ * published by the Free Software Foundation. */ +#include <linux/types.h> + #ifndef __ASSEMBLY__ struct tag; @@ -16,8 +18,10 @@ struct pt_regs; struct smp_operations; #ifdef CONFIG_SMP #define smp_ops(ops) (&(ops)) +#define smp_init_ops(ops) (&(ops)) #else #define smp_ops(ops) (struct smp_operations *)NULL +#define smp_init_ops(ops) (bool (*)(void))NULL #endif struct machine_desc { @@ -41,6 +45,7 @@ struct machine_desc { unsigned char reserve_lp2 :1; /* never has lp2 */ char restart_mode; /* default restart mode */ struct smp_operations *smp; /* SMP operations */ + bool (*smp_init)(void); void (*fixup)(struct tag *, char **, struct meminfo *); void (*reserve)(void);/* reserve mem blocks */ diff --git a/arch/arm/include/asm/mach/irq.h b/arch/arm/include/asm/mach/irq.h index 18c883023339..2092ee1e1300 100644 --- a/arch/arm/include/asm/mach/irq.h +++ b/arch/arm/include/asm/mach/irq.h @@ -20,11 +20,6 @@ struct seq_file; extern void init_FIQ(int); extern int show_fiq_list(struct seq_file *, int); -#ifdef CONFIG_MULTI_IRQ_HANDLER -extern void (*handle_arch_irq)(struct pt_regs *); -extern void set_handle_irq(void (*handle_irq)(struct pt_regs *)); -#endif - /* * This is for easy migration, but should be changed in the source */ @@ -35,35 +30,4 @@ do { \ raw_spin_unlock(&desc->lock); \ } while(0) -#ifndef __ASSEMBLY__ -/* - * Entry/exit functions for chained handlers where the primary IRQ chip - * may implement either fasteoi or level-trigger flow control. - */ -static inline void chained_irq_enter(struct irq_chip *chip, - struct irq_desc *desc) -{ - /* FastEOI controllers require no action on entry. */ - if (chip->irq_eoi) - return; - - if (chip->irq_mask_ack) { - chip->irq_mask_ack(&desc->irq_data); - } else { - chip->irq_mask(&desc->irq_data); - if (chip->irq_ack) - chip->irq_ack(&desc->irq_data); - } -} - -static inline void chained_irq_exit(struct irq_chip *chip, - struct irq_desc *desc) -{ - if (chip->irq_eoi) - chip->irq_eoi(&desc->irq_data); - else - chip->irq_unmask(&desc->irq_data); -} -#endif - #endif diff --git a/arch/arm/include/asm/mcpm.h b/arch/arm/include/asm/mcpm.h new file mode 100644 index 000000000000..0f7b7620e9a5 --- /dev/null +++ b/arch/arm/include/asm/mcpm.h @@ -0,0 +1,209 @@ +/* + * arch/arm/include/asm/mcpm.h + * + * Created by: Nicolas Pitre, April 2012 + * Copyright: (C) 2012-2013 Linaro Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef MCPM_H +#define MCPM_H + +/* + * Maximum number of possible clusters / CPUs per cluster. + * + * This should be sufficient for quite a while, while keeping the + * (assembly) code simpler. When this starts to grow then we'll have + * to consider dynamic allocation. + */ +#define MAX_CPUS_PER_CLUSTER 4 +#define MAX_NR_CLUSTERS 2 + +#ifndef __ASSEMBLY__ + +#include <linux/types.h> +#include <asm/cacheflush.h> + +/* + * Platform specific code should use this symbol to set up secondary + * entry location for processors to use when released from reset. + */ +extern void mcpm_entry_point(void); + +/* + * This is used to indicate where the given CPU from given cluster should + * branch once it is ready to re-enter the kernel using ptr, or NULL if it + * should be gated. A gated CPU is held in a WFE loop until its vector + * becomes non NULL. + */ +void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr); + +/* + * CPU/cluster power operations API for higher subsystems to use. + */ + +/** + * mcpm_cpu_power_up - make given CPU in given cluster runable + * + * @cpu: CPU number within given cluster + * @cluster: cluster number for the CPU + * + * The identified CPU is brought out of reset. If the cluster was powered + * down then it is brought up as well, taking care not to let the other CPUs + * in the cluster run, and ensuring appropriate cluster setup. + * + * Caller must ensure the appropriate entry vector is initialized with + * mcpm_set_entry_vector() prior to calling this. + * + * This must be called in a sleepable context. However, the implementation + * is strongly encouraged to return early and let the operation happen + * asynchronously, especially when significant delays are expected. + * + * If the operation cannot be performed then an error code is returned. + */ +int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster); + +/** + * mcpm_cpu_power_down - power the calling CPU down + * + * The calling CPU is powered down. + * + * If this CPU is found to be the "last man standing" in the cluster + * then the cluster is prepared for power-down too. + * + * This must be called with interrupts disabled. + * + * This does not return. Re-entry in the kernel is expected via + * mcpm_entry_point. + */ +void mcpm_cpu_power_down(void); + +/** + * mcpm_cpu_suspend - bring the calling CPU in a suspended state + * + * @expected_residency: duration in microseconds the CPU is expected + * to remain suspended, or 0 if unknown/infinity. + * + * The calling CPU is suspended. The expected residency argument is used + * as a hint by the platform specific backend to implement the appropriate + * sleep state level according to the knowledge it has on wake-up latency + * for the given hardware. + * + * If this CPU is found to be the "last man standing" in the cluster + * then the cluster may be prepared for power-down too, if the expected + * residency makes it worthwhile. + * + * This must be called with interrupts disabled. + * + * This does not return. Re-entry in the kernel is expected via + * mcpm_entry_point. + */ +void mcpm_cpu_suspend(u64 expected_residency); + +/** + * mcpm_cpu_powered_up - housekeeping workafter a CPU has been powered up + * + * This lets the platform specific backend code perform needed housekeeping + * work. This must be called by the newly activated CPU as soon as it is + * fully operational in kernel space, before it enables interrupts. + * + * If the operation cannot be performed then an error code is returned. + */ +int mcpm_cpu_powered_up(void); + +/* + * Platform specific methods used in the implementation of the above API. + */ +struct mcpm_platform_ops { + int (*power_up)(unsigned int cpu, unsigned int cluster); + void (*power_down)(void); + void (*suspend)(u64); + void (*powered_up)(void); +}; + +/** + * mcpm_platform_register - register platform specific power methods + * + * @ops: mcpm_platform_ops structure to register + * + * An error is returned if the registration has been done previously. + */ +int __init mcpm_platform_register(const struct mcpm_platform_ops *ops); + +/* Synchronisation structures for coordinating safe cluster setup/teardown: */ + +/* + * When modifying this structure, make sure you update the MCPM_SYNC_ defines + * to match. + */ +struct mcpm_sync_struct { + /* individual CPU states */ + struct { + s8 cpu __aligned(__CACHE_WRITEBACK_GRANULE); + } cpus[MAX_CPUS_PER_CLUSTER]; + + /* cluster state */ + s8 cluster __aligned(__CACHE_WRITEBACK_GRANULE); + + /* inbound-side state */ + s8 inbound __aligned(__CACHE_WRITEBACK_GRANULE); +}; + +struct sync_struct { + struct mcpm_sync_struct clusters[MAX_NR_CLUSTERS]; +}; + +extern unsigned long sync_phys; /* physical address of *mcpm_sync */ + +void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster); +void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster); +void __mcpm_outbound_leave_critical(unsigned int cluster, int state); +bool __mcpm_outbound_enter_critical(unsigned int this_cpu, unsigned int cluster); +int __mcpm_cluster_state(unsigned int cluster); + +int __init mcpm_sync_init( + void (*power_up_setup)(unsigned int affinity_level)); + +void __init mcpm_smp_set_ops(void); + +#else + +/* + * asm-offsets.h causes trouble when included in .c files, and cacheflush.h + * cannot be included in asm files. Let's work around the conflict like this. + */ +#include <asm/asm-offsets.h> +#define __CACHE_WRITEBACK_GRANULE CACHE_WRITEBACK_GRANULE + +#endif /* ! __ASSEMBLY__ */ + +/* Definitions for mcpm_sync_struct */ +#define CPU_DOWN 0x11 +#define CPU_COMING_UP 0x12 +#define CPU_UP 0x13 +#define CPU_GOING_DOWN 0x14 + +#define CLUSTER_DOWN 0x21 +#define CLUSTER_UP 0x22 +#define CLUSTER_GOING_DOWN 0x23 + +#define INBOUND_NOT_COMING_UP 0x31 +#define INBOUND_COMING_UP 0x32 + +/* + * Offsets for the mcpm_sync_struct members, for use in asm. + * We don't want to make them global to the kernel via asm-offsets.c. + */ +#define MCPM_SYNC_CLUSTER_CPUS 0 +#define MCPM_SYNC_CPU_SIZE __CACHE_WRITEBACK_GRANULE +#define MCPM_SYNC_CLUSTER_CLUSTER \ + (MCPM_SYNC_CLUSTER_CPUS + MCPM_SYNC_CPU_SIZE * MAX_CPUS_PER_CLUSTER) +#define MCPM_SYNC_CLUSTER_INBOUND \ + (MCPM_SYNC_CLUSTER_CLUSTER + __CACHE_WRITEBACK_GRANULE) +#define MCPM_SYNC_CLUSTER_SIZE \ + (MCPM_SYNC_CLUSTER_INBOUND + __CACHE_WRITEBACK_GRANULE) + +#endif diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h index ce0dbe7c1625..a079cbee4275 100644 --- a/arch/arm/include/asm/psci.h +++ b/arch/arm/include/asm/psci.h @@ -16,6 +16,10 @@ #define PSCI_POWER_STATE_TYPE_STANDBY 0 #define PSCI_POWER_STATE_TYPE_POWER_DOWN 1 +#define PSCI_POWER_STATE_AFFINITY_LEVEL0 0 +#define PSCI_POWER_STATE_AFFINITY_LEVEL1 1 +#define PSCI_POWER_STATE_AFFINITY_LEVEL2 2 +#define PSCI_POWER_STATE_AFFINITY_LEVEL3 3 struct psci_power_state { u16 id; @@ -33,4 +37,12 @@ struct psci_operations { extern struct psci_operations psci_ops; +#ifdef CONFIG_ARM_PSCI +extern int __init psci_probe(void); +#else +static inline int psci_probe(void) +{ + return -ENODEV; +} +#endif #endif /* __ASM_ARM_PSCI_H */ |