aboutsummaryrefslogtreecommitdiff
path: root/arch/arm
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2011-01-06 22:32:52 +0000
committerRussell King <rmk+kernel@arm.linux.org.uk>2011-01-06 22:32:52 +0000
commit4073723acb9cdcdbe4df9c0e0c376c65d1697e43 (patch)
treef41c17eac157b1223ce104845cf9b1e5a9e6a83d /arch/arm
parent58daf18cdcab550262a5f4681e1f1e073e21965a (diff)
parent4ec3eb13634529c0bc7466658d84d0bbe3244aea (diff)
Merge branch 'misc' into devel
Conflicts: arch/arm/Kconfig arch/arm/common/Makefile arch/arm/kernel/Makefile arch/arm/kernel/smp.c
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/Kconfig65
-rw-r--r--arch/arm/Kconfig.debug2
-rw-r--r--arch/arm/common/Kconfig4
-rw-r--r--arch/arm/common/clkdev.c179
-rw-r--r--arch/arm/common/dmabounce.c16
-rw-r--r--arch/arm/include/asm/assembler.h35
-rw-r--r--arch/arm/include/asm/cache.h2
-rw-r--r--arch/arm/include/asm/clkdev.h22
-rw-r--r--arch/arm/include/asm/dma-mapping.h93
-rw-r--r--arch/arm/include/asm/domain.h31
-rw-r--r--arch/arm/include/asm/entry-macro-multi.S44
-rw-r--r--arch/arm/include/asm/futex.h9
-rw-r--r--arch/arm/include/asm/hardirq.h18
-rw-r--r--arch/arm/include/asm/localtimer.h12
-rw-r--r--arch/arm/include/asm/mach/arch.h9
-rw-r--r--arch/arm/include/asm/mach/irq.h8
-rw-r--r--arch/arm/include/asm/mach/time.h1
-rw-r--r--arch/arm/include/asm/smp.h17
-rw-r--r--arch/arm/include/asm/smp_mpidr.h17
-rw-r--r--arch/arm/include/asm/smp_twd.h1
-rw-r--r--arch/arm/include/asm/system.h7
-rw-r--r--arch/arm/include/asm/traps.h2
-rw-r--r--arch/arm/include/asm/uaccess.h16
-rw-r--r--arch/arm/kernel/Makefile4
-rw-r--r--arch/arm/kernel/entry-armv.S52
-rw-r--r--arch/arm/kernel/fiq.c10
-rw-r--r--arch/arm/kernel/head.S50
-rw-r--r--arch/arm/kernel/irq.c30
-rw-r--r--arch/arm/kernel/setup.c37
-rw-r--r--arch/arm/kernel/smp.c409
-rw-r--r--arch/arm/kernel/smp_tlb.c139
-rw-r--r--arch/arm/kernel/smp_twd.c10
-rw-r--r--arch/arm/kernel/swp_emulate.c267
-rw-r--r--arch/arm/kernel/time.c4
-rw-r--r--arch/arm/kernel/traps.c14
-rw-r--r--arch/arm/kernel/vmlinux.lds.S1
-rw-r--r--arch/arm/lib/getuser.S13
-rw-r--r--arch/arm/lib/putuser.S29
-rw-r--r--arch/arm/lib/uaccess.S83
-rw-r--r--arch/arm/mach-bcmring/clock.c3
-rw-r--r--arch/arm/mach-bcmring/core.c2
-rw-r--r--arch/arm/mach-cns3xxx/Kconfig1
-rw-r--r--arch/arm/mach-davinci/clock.h2
-rw-r--r--arch/arm/mach-ep93xx/clock.c2
-rw-r--r--arch/arm/mach-imx/clock-imx1.c3
-rw-r--r--arch/arm/mach-imx/clock-imx21.c2
-rw-r--r--arch/arm/mach-imx/clock-imx27.c2
-rw-r--r--arch/arm/mach-integrator/Kconfig1
-rw-r--r--arch/arm/mach-integrator/core.c3
-rw-r--r--arch/arm/mach-integrator/impd1.c3
-rw-r--r--arch/arm/mach-integrator/integrator_cp.c3
-rw-r--r--arch/arm/mach-iop13xx/include/mach/memory.h6
-rw-r--r--arch/arm/mach-ks8695/Kconfig1
-rw-r--r--arch/arm/mach-ks8695/include/mach/memory.h8
-rw-r--r--arch/arm/mach-lpc32xx/clock.c3
-rw-r--r--arch/arm/mach-mmp/clock.h2
-rw-r--r--arch/arm/mach-msm/include/mach/smp.h4
-rw-r--r--arch/arm/mach-mx25/clock.c3
-rw-r--r--arch/arm/mach-mx3/clock-imx31.c2
-rw-r--r--arch/arm/mach-mx3/clock-imx35.c3
-rw-r--r--arch/arm/mach-mx5/clock-mx51.c2
-rw-r--r--arch/arm/mach-mxc91231/clock.c2
-rw-r--r--arch/arm/mach-nomadik/clock.c2
-rw-r--r--arch/arm/mach-nuc93x/clock.h2
-rw-r--r--arch/arm/mach-omap1/clock.c2
-rw-r--r--arch/arm/mach-omap2/dpll3xxx.c2
-rw-r--r--arch/arm/mach-omap2/omap-hotplug.c14
-rw-r--r--arch/arm/mach-omap2/omap-smp.c66
-rw-r--r--arch/arm/mach-pnx4008/clock.c3
-rw-r--r--arch/arm/mach-pxa/Kconfig1
-rw-r--r--arch/arm/mach-pxa/clock.c2
-rw-r--r--arch/arm/mach-pxa/clock.h2
-rw-r--r--arch/arm/mach-realview/core.c3
-rw-r--r--arch/arm/mach-realview/hotplug.c44
-rw-r--r--arch/arm/mach-realview/include/mach/smp.h5
-rw-r--r--arch/arm/mach-realview/platsmp.c114
-rw-r--r--arch/arm/mach-s3c2412/Kconfig2
-rw-r--r--arch/arm/mach-s5pv310/hotplug.c44
-rw-r--r--arch/arm/mach-s5pv310/include/mach/smp.h5
-rw-r--r--arch/arm/mach-s5pv310/platsmp.c66
-rw-r--r--arch/arm/mach-shmobile/Kconfig6
-rw-r--r--arch/arm/mach-shmobile/clock-sh7367.c2
-rw-r--r--arch/arm/mach-shmobile/clock-sh7372.c2
-rw-r--r--arch/arm/mach-shmobile/clock-sh7377.c2
-rw-r--r--arch/arm/mach-tcc8k/clock.c3
-rw-r--r--arch/arm/mach-tegra/clock.c2
-rw-r--r--arch/arm/mach-tegra/clock.h2
-rw-r--r--arch/arm/mach-tegra/hotplug.c44
-rw-r--r--arch/arm/mach-tegra/include/mach/smp.h12
-rw-r--r--arch/arm/mach-tegra/platsmp.c33
-rw-r--r--arch/arm/mach-tegra/tegra2_clocks.c3
-rw-r--r--arch/arm/mach-u300/clock.c2
-rw-r--r--arch/arm/mach-ux500/clock.c3
-rw-r--r--arch/arm/mach-ux500/headsmp.S1
-rw-r--r--arch/arm/mach-ux500/hotplug.c18
-rw-r--r--arch/arm/mach-ux500/include/mach/smp.h5
-rw-r--r--arch/arm/mach-ux500/platsmp.c73
-rw-r--r--arch/arm/mach-versatile/Kconfig1
-rw-r--r--arch/arm/mach-versatile/core.c3
-rw-r--r--arch/arm/mach-vexpress/Makefile1
-rw-r--r--arch/arm/mach-vexpress/ct-ca9x4.c3
-rw-r--r--arch/arm/mach-vexpress/hotplug.c128
-rw-r--r--arch/arm/mach-vexpress/include/mach/smp.h5
-rw-r--r--arch/arm/mach-vexpress/platsmp.c74
-rw-r--r--arch/arm/mach-vexpress/v2m.c3
-rw-r--r--arch/arm/mach-w90x900/clock.h2
-rw-r--r--arch/arm/mm/Kconfig35
-rw-r--r--arch/arm/mm/dma-mapping.c28
-rw-r--r--arch/arm/mm/ioremap.c8
-rw-r--r--arch/arm/mm/mmu.c6
-rw-r--r--arch/arm/mm/proc-macros.S7
-rw-r--r--arch/arm/mm/proc-v7.S9
-rw-r--r--arch/arm/plat-omap/Kconfig4
-rw-r--r--arch/arm/plat-omap/include/plat/clkdev_omap.h2
-rw-r--r--arch/arm/plat-omap/include/plat/memory.h8
-rw-r--r--arch/arm/plat-omap/include/plat/smp.h5
-rw-r--r--arch/arm/plat-spear/include/plat/clock.h2
-rw-r--r--arch/arm/plat-stmp3xxx/clock.c2
-rw-r--r--arch/arm/vfp/vfpmodule.c24
119 files changed, 1514 insertions, 1263 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 2e86b8f6210..32cbf3e888f 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -2,6 +2,7 @@ config ARM
bool
default y
select HAVE_AOUT
+ select HAVE_DMA_API_DEBUG
select HAVE_IDE
select HAVE_MEMBLOCK
select RTC_LIB
@@ -36,6 +37,9 @@ config ARM
config HAVE_PWM
bool
+config MIGHT_HAVE_PCI
+ bool
+
config SYS_SUPPORTS_APM_EMULATION
bool
@@ -226,7 +230,7 @@ config ARCH_INTEGRATOR
bool "ARM Ltd. Integrator family"
select ARM_AMBA
select ARCH_HAS_CPUFREQ
- select COMMON_CLKDEV
+ select CLKDEV_LOOKUP
select ICST
select GENERIC_CLOCKEVENTS
select PLAT_VERSATILE
@@ -236,7 +240,7 @@ config ARCH_INTEGRATOR
config ARCH_REALVIEW
bool "ARM Ltd. RealView family"
select ARM_AMBA
- select COMMON_CLKDEV
+ select CLKDEV_LOOKUP
select HAVE_SCHED_CLOCK
select ICST
select GENERIC_CLOCKEVENTS
@@ -251,7 +255,7 @@ config ARCH_VERSATILE
bool "ARM Ltd. Versatile family"
select ARM_AMBA
select ARM_VIC
- select COMMON_CLKDEV
+ select CLKDEV_LOOKUP
select HAVE_SCHED_CLOCK
select ICST
select GENERIC_CLOCKEVENTS
@@ -266,7 +270,7 @@ config ARCH_VEXPRESS
select ARCH_WANT_OPTIONAL_GPIOLIB
select ARM_AMBA
select ARM_TIMER_SP804
- select COMMON_CLKDEV
+ select CLKDEV_LOOKUP
select GENERIC_CLOCKEVENTS
select HAVE_CLK
select HAVE_SCHED_CLOCK
@@ -288,7 +292,7 @@ config ARCH_BCMRING
depends on MMU
select CPU_V6
select ARM_AMBA
- select COMMON_CLKDEV
+ select CLKDEV_LOOKUP
select GENERIC_CLOCKEVENTS
select ARCH_WANT_OPTIONAL_GPIOLIB
help
@@ -306,6 +310,7 @@ config ARCH_CNS3XXX
select CPU_V6
select GENERIC_CLOCKEVENTS
select ARM_GIC
+ select MIGHT_HAVE_PCI
select PCI_DOMAINS if PCI
help
Support for Cavium Networks CNS3XXX platform.
@@ -335,7 +340,7 @@ config ARCH_EP93XX
select CPU_ARM920T
select ARM_AMBA
select ARM_VIC
- select COMMON_CLKDEV
+ select CLKDEV_LOOKUP
select ARCH_REQUIRE_GPIOLIB
select ARCH_HAS_HOLES_MEMORYMODEL
select ARCH_USES_GETTIMEOFFSET
@@ -355,14 +360,14 @@ config ARCH_MXC
bool "Freescale MXC/iMX-based"
select GENERIC_CLOCKEVENTS
select ARCH_REQUIRE_GPIOLIB
- select COMMON_CLKDEV
+ select CLKDEV_LOOKUP
help
Support for Freescale MXC/iMX-based family of processors
config ARCH_STMP3XXX
bool "Freescale STMP3xxx"
select CPU_ARM926T
- select COMMON_CLKDEV
+ select CLKDEV_LOOKUP
select ARCH_REQUIRE_GPIOLIB
select GENERIC_CLOCKEVENTS
select USB_ARCH_HAS_EHCI
@@ -442,6 +447,7 @@ config ARCH_IXP4XX
select GENERIC_GPIO
select GENERIC_CLOCKEVENTS
select HAVE_SCHED_CLOCK
+ select MIGHT_HAVE_PCI
select DMABOUNCE if PCI
help
Support for Intel's IXP4XX (XScale) family of processors.
@@ -481,7 +487,7 @@ config ARCH_LPC32XX
select HAVE_IDE
select ARM_AMBA
select USB_ARCH_HAS_OHCI
- select COMMON_CLKDEV
+ select CLKDEV_LOOKUP
select GENERIC_TIME
select GENERIC_CLOCKEVENTS
help
@@ -515,7 +521,7 @@ config ARCH_MMP
bool "Marvell PXA168/910/MMP2"
depends on MMU
select ARCH_REQUIRE_GPIOLIB
- select COMMON_CLKDEV
+ select CLKDEV_LOOKUP
select GENERIC_CLOCKEVENTS
select HAVE_SCHED_CLOCK
select TICK_ONESHOT
@@ -549,7 +555,7 @@ config ARCH_W90X900
bool "Nuvoton W90X900 CPU"
select CPU_ARM926T
select ARCH_REQUIRE_GPIOLIB
- select COMMON_CLKDEV
+ select CLKDEV_LOOKUP
select GENERIC_CLOCKEVENTS
help
Support for Nuvoton (Winbond logic dept.) ARM9 processor,
@@ -563,19 +569,19 @@ config ARCH_W90X900
config ARCH_NUC93X
bool "Nuvoton NUC93X CPU"
select CPU_ARM926T
- select COMMON_CLKDEV
+ select CLKDEV_LOOKUP
help
Support for Nuvoton (Winbond logic dept.) NUC93X MCU,The NUC93X is a
low-power and high performance MPEG-4/JPEG multimedia controller chip.
config ARCH_TEGRA
bool "NVIDIA Tegra"
+ select CLKDEV_LOOKUP
select GENERIC_TIME
select GENERIC_CLOCKEVENTS
select GENERIC_GPIO
select HAVE_CLK
select HAVE_SCHED_CLOCK
- select COMMON_CLKDEV
select ARCH_HAS_BARRIERS if CACHE_L2X0
select ARCH_HAS_CPUFREQ
help
@@ -585,7 +591,7 @@ config ARCH_TEGRA
config ARCH_PNX4008
bool "Philips Nexperia PNX4008 Mobile"
select CPU_ARM926T
- select COMMON_CLKDEV
+ select CLKDEV_LOOKUP
select ARCH_USES_GETTIMEOFFSET
help
This enables support for Philips PNX4008 mobile platform.
@@ -595,7 +601,7 @@ config ARCH_PXA
depends on MMU
select ARCH_MTD_XIP
select ARCH_HAS_CPUFREQ
- select COMMON_CLKDEV
+ select CLKDEV_LOOKUP
select ARCH_REQUIRE_GPIOLIB
select GENERIC_CLOCKEVENTS
select HAVE_SCHED_CLOCK
@@ -774,7 +780,7 @@ config ARCH_TCC_926
bool "Telechips TCC ARM926-based systems"
select CPU_ARM926T
select HAVE_CLK
- select COMMON_CLKDEV
+ select CLKDEV_LOOKUP
select GENERIC_CLOCKEVENTS
help
Support for Telechips TCC ARM926-based systems.
@@ -799,7 +805,7 @@ config ARCH_U300
select ARM_AMBA
select ARM_VIC
select GENERIC_CLOCKEVENTS
- select COMMON_CLKDEV
+ select CLKDEV_LOOKUP
select GENERIC_GPIO
help
Support for ST-Ericsson U300 series mobile platforms.
@@ -809,7 +815,7 @@ config ARCH_U8500
select CPU_V7
select ARM_AMBA
select GENERIC_CLOCKEVENTS
- select COMMON_CLKDEV
+ select CLKDEV_LOOKUP
select ARCH_REQUIRE_GPIOLIB
help
Support for ST-Ericsson's Ux500 architecture
@@ -819,7 +825,7 @@ config ARCH_NOMADIK
select ARM_AMBA
select ARM_VIC
select CPU_ARM926T
- select COMMON_CLKDEV
+ select CLKDEV_LOOKUP
select GENERIC_CLOCKEVENTS
select ARCH_REQUIRE_GPIOLIB
help
@@ -831,7 +837,7 @@ config ARCH_DAVINCI
select ARCH_REQUIRE_GPIOLIB
select ZONE_DMA
select HAVE_IDE
- select COMMON_CLKDEV
+ select CLKDEV_LOOKUP
select GENERIC_ALLOCATOR
select ARCH_HAS_HOLES_MEMORYMODEL
help
@@ -852,7 +858,7 @@ config PLAT_SPEAR
bool "ST SPEAr"
select ARM_AMBA
select ARCH_REQUIRE_GPIOLIB
- select COMMON_CLKDEV
+ select CLKDEV_LOOKUP
select GENERIC_CLOCKEVENTS
select HAVE_CLK
help
@@ -1034,6 +1040,11 @@ config CPU_HAS_PMU
default y
bool
+config MULTI_IRQ_HANDLER
+ bool
+ help
+ Allow each machine to specify it's own IRQ handler at run time.
+
if !MMU
source "arch/arm/Kconfig-nommu"
endif
@@ -1181,7 +1192,7 @@ config ISA_DMA_API
bool
config PCI
- bool "PCI support" if ARCH_INTEGRATOR_AP || ARCH_VERSATILE_PB || ARCH_IXP4XX || ARCH_KS8695 || MACH_ARMCORE || ARCH_CNS3XXX || SA1100_NANOENGINE
+ bool "PCI support" if MIGHT_HAVE_PCI
help
Find out whether you have a PCI motherboard. PCI is the name of a
bus system, i.e. the way the CPU talks to the other stuff inside
@@ -1253,7 +1264,7 @@ config SMP
config SMP_ON_UP
bool "Allow booting SMP kernel on uniprocessor systems (EXPERIMENTAL)"
depends on EXPERIMENTAL
- depends on SMP && !XIP && !THUMB2_KERNEL
+ depends on SMP && !XIP
default y
help
SMP kernels contain instructions which fail on non-SMP processors.
@@ -1272,6 +1283,7 @@ config HAVE_ARM_SCU
config HAVE_ARM_TWD
bool
depends on SMP
+ select TICK_ONESHOT
help
This options enables support for the ARM timer and watchdog unit
@@ -1335,7 +1347,7 @@ config HZ
default 100
config THUMB2_KERNEL
- bool "Compile the kernel in Thumb-2 mode"
+ bool "Compile the kernel in Thumb-2 mode (EXPERIMENTAL)"
depends on CPU_V7 && !CPU_V6 && EXPERIMENTAL
select AEABI
select ARM_ASM_UNIFIED
@@ -1549,6 +1561,7 @@ config SECCOMP
config CC_STACKPROTECTOR
bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
help
This option turns on the -fstack-protector GCC feature. This
feature puts, at the beginning of functions, a canary value on
@@ -1745,7 +1758,7 @@ config CPU_FREQ_S3C
Internal configuration node for common cpufreq on Samsung SoC
config CPU_FREQ_S3C24XX
- bool "CPUfreq driver for Samsung S3C24XX series CPUs"
+ bool "CPUfreq driver for Samsung S3C24XX series CPUs (EXPERIMENTAL)"
depends on ARCH_S3C2410 && CPU_FREQ && EXPERIMENTAL
select CPU_FREQ_S3C
help
@@ -1757,7 +1770,7 @@ config CPU_FREQ_S3C24XX
If in doubt, say N.
config CPU_FREQ_S3C24XX_PLL
- bool "Support CPUfreq changing of PLL frequency"
+ bool "Support CPUfreq changing of PLL frequency (EXPERIMENTAL)"
depends on CPU_FREQ_S3C24XX && EXPERIMENTAL
help
Compile in support for changing the PLL frequency from the
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index eac62085f5b..494224a9b45 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -31,7 +31,7 @@ config FRAME_POINTER
reported is severely limited.
config ARM_UNWIND
- bool "Enable stack unwinding support"
+ bool "Enable stack unwinding support (EXPERIMENTAL)"
depends on AEABI && EXPERIMENTAL
default y
help
diff --git a/arch/arm/common/Kconfig b/arch/arm/common/Kconfig
index 0a34c818692..778655f0257 100644
--- a/arch/arm/common/Kconfig
+++ b/arch/arm/common/Kconfig
@@ -37,7 +37,3 @@ config SHARP_PARAM
config SHARP_SCOOP
bool
-
-config COMMON_CLKDEV
- bool
- select HAVE_CLK
diff --git a/arch/arm/common/clkdev.c b/arch/arm/common/clkdev.c
deleted file mode 100644
index e2b2bb66e09..00000000000
--- a/arch/arm/common/clkdev.c
+++ /dev/null
@@ -1,179 +0,0 @@
-/*
- * arch/arm/common/clkdev.c
- *
- * Copyright (C) 2008 Russell King.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Helper for the clk API to assist looking up a struct clk.
- */
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/device.h>
-#include <linux/list.h>
-#include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/string.h>
-#include <linux/mutex.h>
-#include <linux/clk.h>
-#include <linux/slab.h>
-
-#include <asm/clkdev.h>
-#include <mach/clkdev.h>
-
-static LIST_HEAD(clocks);
-static DEFINE_MUTEX(clocks_mutex);
-
-/*
- * Find the correct struct clk for the device and connection ID.
- * We do slightly fuzzy matching here:
- * An entry with a NULL ID is assumed to be a wildcard.
- * If an entry has a device ID, it must match
- * If an entry has a connection ID, it must match
- * Then we take the most specific entry - with the following
- * order of precedence: dev+con > dev only > con only.
- */
-static struct clk *clk_find(const char *dev_id, const char *con_id)
-{
- struct clk_lookup *p;
- struct clk *clk = NULL;
- int match, best = 0;
-
- list_for_each_entry(p, &clocks, node) {
- match = 0;
- if (p->dev_id) {
- if (!dev_id || strcmp(p->dev_id, dev_id))
- continue;
- match += 2;
- }
- if (p->con_id) {
- if (!con_id || strcmp(p->con_id, con_id))
- continue;
- match += 1;
- }
-
- if (match > best) {
- clk = p->clk;
- if (match != 3)
- best = match;
- else
- break;
- }
- }
- return clk;
-}
-
-struct clk *clk_get_sys(const char *dev_id, const char *con_id)
-{
- struct clk *clk;
-
- mutex_lock(&clocks_mutex);
- clk = clk_find(dev_id, con_id);
- if (clk && !__clk_get(clk))
- clk = NULL;
- mutex_unlock(&clocks_mutex);
-
- return clk ? clk : ERR_PTR(-ENOENT);
-}
-EXPORT_SYMBOL(clk_get_sys);
-
-struct clk *clk_get(struct device *dev, const char *con_id)
-{
- const char *dev_id = dev ? dev_name(dev) : NULL;
-
- return clk_get_sys(dev_id, con_id);
-}
-EXPORT_SYMBOL(clk_get);
-
-void clk_put(struct clk *clk)
-{
- __clk_put(clk);
-}
-EXPORT_SYMBOL(clk_put);
-
-void clkdev_add(struct clk_lookup *cl)
-{
- mutex_lock(&clocks_mutex);
- list_add_tail(&cl->node, &clocks);
- mutex_unlock(&clocks_mutex);
-}
-EXPORT_SYMBOL(clkdev_add);
-
-void __init clkdev_add_table(struct clk_lookup *cl, size_t num)
-{
- mutex_lock(&clocks_mutex);
- while (num--) {
- list_add_tail(&cl->node, &clocks);
- cl++;
- }
- mutex_unlock(&clocks_mutex);
-}
-
-#define MAX_DEV_ID 20
-#define MAX_CON_ID 16
-
-struct clk_lookup_alloc {
- struct clk_lookup cl;
- char dev_id[MAX_DEV_ID];
- char con_id[MAX_CON_ID];
-};
-
-struct clk_lookup *clkdev_alloc(struct clk *clk, const char *con_id,
- const char *dev_fmt, ...)
-{
- struct clk_lookup_alloc *cla;
-
- cla = kzalloc(sizeof(*cla), GFP_KERNEL);
- if (!cla)
- return NULL;
-
- cla->cl.clk = clk;
- if (con_id) {
- strlcpy(cla->con_id, con_id, sizeof(cla->con_id));
- cla->cl.con_id = cla->con_id;
- }
-
- if (dev_fmt) {
- va_list ap;
-
- va_start(ap, dev_fmt);
- vscnprintf(cla->dev_id, sizeof(cla->dev_id), dev_fmt, ap);
- cla->cl.dev_id = cla->dev_id;
- va_end(ap);
- }
-
- return &cla->cl;
-}
-EXPORT_SYMBOL(clkdev_alloc);
-
-int clk_add_alias(const char *alias, const char *alias_dev_name, char *id,
- struct device *dev)
-{
- struct clk *r = clk_get(dev, id);
- struct clk_lookup *l;
-
- if (IS_ERR(r))
- return PTR_ERR(r);
-
- l = clkdev_alloc(r, alias, alias_dev_name);
- clk_put(r);
- if (!l)
- return -ENODEV;
- clkdev_add(l);
- return 0;
-}
-EXPORT_SYMBOL(clk_add_alias);
-
-/*
- * clkdev_drop - remove a clock dynamically allocated
- */
-void clkdev_drop(struct clk_lookup *cl)
-{
- mutex_lock(&clocks_mutex);
- list_del(&cl->node);
- mutex_unlock(&clocks_mutex);
- kfree(cl);
-}
-EXPORT_SYMBOL(clkdev_drop);
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
index cc0a932bbea..e5681636626 100644
--- a/arch/arm/common/dmabounce.c
+++ b/arch/arm/common/dmabounce.c
@@ -328,7 +328,7 @@ static inline void unmap_single(struct device *dev, dma_addr_t dma_addr,
* substitute the safe buffer for the unsafe one.
* (basically move the buffer from an unsafe area to a safe one)
*/
-dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
+dma_addr_t __dma_map_single(struct device *dev, void *ptr, size_t size,
enum dma_data_direction dir)
{
dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
@@ -338,7 +338,7 @@ dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
return map_single(dev, ptr, size, dir);
}
-EXPORT_SYMBOL(dma_map_single);
+EXPORT_SYMBOL(__dma_map_single);
/*
* see if a mapped address was really a "safe" buffer and if so, copy
@@ -346,7 +346,7 @@ EXPORT_SYMBOL(dma_map_single);
* the safe buffer. (basically return things back to the way they
* should be)
*/
-void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
+void __dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
enum dma_data_direction dir)
{
dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
@@ -354,9 +354,9 @@ void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
unmap_single(dev, dma_addr, size, dir);
}
-EXPORT_SYMBOL(dma_unmap_single);
+EXPORT_SYMBOL(__dma_unmap_single);
-dma_addr_t dma_map_page(struct device *dev, struct page *page,
+dma_addr_t __dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir)
{
dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n",
@@ -372,7 +372,7 @@ dma_addr_t dma_map_page(struct device *dev, struct page *page,
return map_single(dev, page_address(page) + offset, size, dir);
}
-EXPORT_SYMBOL(dma_map_page);
+EXPORT_SYMBOL(__dma_map_page);
/*
* see if a mapped address was really a "safe" buffer and if so, copy
@@ -380,7 +380,7 @@ EXPORT_SYMBOL(dma_map_page);
* the safe buffer. (basically return things back to the way they
* should be)
*/
-void dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
+void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
enum dma_data_direction dir)
{
dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
@@ -388,7 +388,7 @@ void dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
unmap_single(dev, dma_addr, size, dir);
}
-EXPORT_SYMBOL(dma_unmap_page);
+EXPORT_SYMBOL(__dma_unmap_page);
int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
unsigned long off, size_t sz, enum dma_data_direction dir)
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 749bb662240..bc2d2d75f70 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -18,6 +18,7 @@
#endif
#include <asm/ptrace.h>
+#include <asm/domain.h>
/*
* Endian independent macros for shifting bytes within registers.
@@ -157,16 +158,24 @@
#ifdef CONFIG_SMP
#define ALT_SMP(instr...) \
9998: instr
+/*
+ * Note: if you get assembler errors from ALT_UP() when building with
+ * CONFIG_THUMB2_KERNEL, you almost certainly need to use
+ * ALT_SMP( W(instr) ... )
+ */
#define ALT_UP(instr...) \
.pushsection ".alt.smp.init", "a" ;\
.long 9998b ;\
- instr ;\
+9997: instr ;\
+ .if . - 9997b != 4 ;\
+ .error "ALT_UP() content must assemble to exactly 4 bytes";\
+ .endif ;\
.popsection
#define ALT_UP_B(label) \
.equ up_b_offset, label - 9998b ;\
.pushsection ".alt.smp.init", "a" ;\
.long 9998b ;\
- b . + up_b_offset ;\
+ W(b) . + up_b_offset ;\
.popsection
#else
#define ALT_SMP(instr...)
@@ -177,16 +186,24 @@
/*
* SMP data memory barrier
*/
- .macro smp_dmb
+ .macro smp_dmb mode
#ifdef CONFIG_SMP
#if __LINUX_ARM_ARCH__ >= 7
+ .ifeqs "\mode","arm"
ALT_SMP(dmb)
+ .else
+ ALT_SMP(W(dmb))
+ .endif
#elif __LINUX_ARM_ARCH__ == 6
ALT_SMP(mcr p15, 0, r0, c7, c10, 5) @ dmb
#else
#error Incompatible SMP platform
#endif
+ .ifeqs "\mode","arm"
ALT_UP(nop)
+ .else
+ ALT_UP(W(nop))
+ .endif
#endif
.endm
@@ -206,12 +223,12 @@
*/
#ifdef CONFIG_THUMB2_KERNEL
- .macro usraccoff, instr, reg, ptr, inc, off, cond, abort
+ .macro usraccoff, instr, reg, ptr, inc, off, cond, abort, t=T()
9999:
.if \inc == 1
- \instr\cond\()bt \reg, [\ptr, #\off]
+ \instr\cond\()b\()\t\().w \reg, [\ptr, #\off]
.elseif \inc == 4
- \instr\cond\()t \reg, [\ptr, #\off]
+ \instr\cond\()\t\().w \reg, [\ptr, #\off]
.else
.error "Unsupported inc macro argument"
.endif
@@ -246,13 +263,13 @@
#else /* !CONFIG_THUMB2_KERNEL */
- .macro usracc, instr, reg, ptr, inc, cond, rept, abort
+ .macro usracc, instr, reg, ptr, inc, cond, rept, abort, t=T()
.rept \rept
9999:
.if \inc == 1
- \instr\cond\()bt \reg, [\ptr], #\inc
+ \instr\cond\()b\()\t \reg, [\ptr], #\inc
.elseif \inc == 4
- \instr\cond\()t \reg, [\ptr], #\inc
+ \instr\cond\()\t \reg, [\ptr], #\inc
.else
.error "Unsupported inc macro argument"
.endif
diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
index 9d6122096fb..75fe66bc02b 100644
--- a/arch/arm/include/asm/cache.h
+++ b/arch/arm/include/asm/cache.h
@@ -23,4 +23,6 @@
#define ARCH_SLAB_MINALIGN 8
#endif
+#define __read_mostly __attribute__((__section__(".data..read_mostly")))
+
#endif
diff --git a/arch/arm/include/asm/clkdev.h b/arch/arm/include/asm/clkdev.h
index b56c1389b6f..765d3322236 100644
--- a/arch/arm/include/asm/clkdev.h
+++ b/arch/arm/include/asm/clkdev.h
@@ -12,23 +12,13 @@
#ifndef __ASM_CLKDEV_H
#define __ASM_CLKDEV_H
-struct clk;
-struct device;
+#include <linux/slab.h>
-struct clk_lookup {
- struct list_head node;
- const char *dev_id;
- const char *con_id;
- struct clk *clk;
-};
+#include <mach/clkdev.h>
-struct clk_lookup *clkdev_alloc(struct clk *clk, const char *con_id,
- const char *dev_fmt, ...);
-
-void clkdev_add(struct clk_lookup *cl);
-void clkdev_drop(struct clk_lookup *cl);
-
-void clkdev_add_table(struct clk_lookup *, size_t);
-int clk_add_alias(const char *, const char *, char *, struct device *);
+static inline struct clk_lookup_alloc *__clkdev_alloc(size_t size)
+{
+ return kzalloc(size, GFP_KERNEL);
+}
#endif
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index c568da7dcae..4fff837363e 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -5,24 +5,29 @@
#include <linux/mm_types.h>
#include <linux/scatterlist.h>
+#include <linux/dma-debug.h>
#include <asm-generic/dma-coherent.h>
#include <asm/memory.h>
+#ifdef __arch_page_to_dma
+#error Please update to __arch_pfn_to_dma
+#endif
+
/*
- * page_to_dma/dma_to_virt/virt_to_dma are architecture private functions
- * used internally by the DMA-mapping API to provide DMA addresses. They
- * must not be used by drivers.
+ * dma_to_pfn/pfn_to_dma/dma_to_virt/virt_to_dma are architecture private
+ * functions used internally by the DMA-mapping API to provide DMA
+ * addresses. They must not be used by drivers.
*/
-#ifndef __arch_page_to_dma
-static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
+#ifndef __arch_pfn_to_dma
+static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
{
- return (dma_addr_t)__pfn_to_bus(page_to_pfn(page));
+ return (dma_addr_t)__pfn_to_bus(pfn);
}
-static inline struct page *dma_to_page(struct device *dev, dma_addr_t addr)
+static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
{
- return pfn_to_page(__bus_to_pfn(addr));
+ return __bus_to_pfn(addr);
}
static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
@@ -35,14 +40,14 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
}
#else
-static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
+static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
{
- return __arch_page_to_dma(dev, page);
+ return __arch_pfn_to_dma(dev, pfn);
}
-static inline struct page *dma_to_page(struct device *dev, dma_addr_t addr)
+static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
{
- return __arch_dma_to_page(dev, addr);
+ return __arch_dma_to_pfn(dev, addr);
}
static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
@@ -293,13 +298,13 @@ extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);
/*
* The DMA API, implemented by dmabounce.c. See below for descriptions.
*/
-extern dma_addr_t dma_map_single(struct device *, void *, size_t,
+extern dma_addr_t __dma_map_single(struct device *, void *, size_t,
enum dma_data_direction);
-extern void dma_unmap_single(struct device *, dma_addr_t, size_t,
+extern void __dma_unmap_single(struct device *, dma_addr_t, size_t,
enum dma_data_direction);
-extern dma_addr_t dma_map_page(struct device *, struct page *,
+extern dma_addr_t __dma_map_page(struct device *, struct page *,
unsigned long, size_t, enum dma_data_direction);
-extern void dma_unmap_page(struct device *, dma_addr_t, size_t,
+extern void __dma_unmap_page(struct device *, dma_addr_t, size_t,
enum dma_data_direction);
/*
@@ -323,6 +328,34 @@ static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
}
+static inline dma_addr_t __dma_map_single(struct device *dev, void *cpu_addr,
+ size_t size, enum dma_data_direction dir)
+{
+ __dma_single_cpu_to_dev(cpu_addr, size, dir);
+ return virt_to_dma(dev, cpu_addr);
+}
+
+static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction dir)
+{
+ __dma_page_cpu_to_dev(page, offset, size, dir);
+ return pfn_to_dma(dev, page_to_pfn(page)) + offset;
+}
+
+static inline void __dma_unmap_single(struct device *dev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir)
+{
+ __dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir);
+}
+
+static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir)
+{
+ __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
+ handle & ~PAGE_MASK, size, dir);
+}
+#endif /* CONFIG_DMABOUNCE */
+
/**
* dma_map_single - map a single buffer for streaming DMA
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
@@ -340,11 +373,16 @@ static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
size_t size, enum dma_data_direction dir)
{
+ dma_addr_t addr;
+
BUG_ON(!valid_dma_direction(dir));
- __dma_single_cpu_to_dev(cpu_addr, size, dir);
+ addr = __dma_map_single(dev, cpu_addr, size, dir);
+ debug_dma_map_page(dev, virt_to_page(cpu_addr),
+ (unsigned long)cpu_addr & ~PAGE_MASK, size,
+ dir, addr, true);
- return virt_to_dma(dev, cpu_addr);
+ return addr;
}
/**
@@ -364,11 +402,14 @@ static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir)
{
+ dma_addr_t addr;
+
BUG_ON(!valid_dma_direction(dir));
- __dma_page_cpu_to_dev(page, offset, size, dir);
+ addr = __dma_map_page(dev, page, offset, size, dir);
+ debug_dma_map_page(dev, page, offset, size, dir, addr, false);
- return page_to_dma(dev, page) + offset;
+ return addr;
}
/**
@@ -388,7 +429,8 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir)
{
- __dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir);
+ debug_dma_unmap_page(dev, handle, size, dir, true);
+ __dma_unmap_single(dev, handle, size, dir);
}
/**
@@ -408,10 +450,9 @@ static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir)
{
- __dma_page_dev_to_cpu(dma_to_page(dev, handle), handle & ~PAGE_MASK,
- size, dir);
+ debug_dma_unmap_page(dev, handle, size, dir, false);
+ __dma_unmap_page(dev, handle, size, dir);
}
-#endif /* CONFIG_DMABOUNCE */
/**
* dma_sync_single_range_for_cpu
@@ -437,6 +478,8 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
{
BUG_ON(!valid_dma_direction(dir));
+ debug_dma_sync_single_for_cpu(dev, handle + offset, size, dir);
+
if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir))
return;
@@ -449,6 +492,8 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
{
BUG_ON(!valid_dma_direction(dir));
+ debug_dma_sync_single_for_device(dev, handle + offset, size, dir);
+
if (!dmabounce_sync_for_device(dev, handle, offset, size, dir))
return;
diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
index cc7ef408071..af18ceaacf5 100644
--- a/arch/arm/include/asm/domain.h
+++ b/arch/arm/include/asm/domain.h
@@ -45,13 +45,17 @@
*/
#define DOMAIN_NOACCESS 0
#define DOMAIN_CLIENT 1
+#ifdef CONFIG_CPU_USE_DOMAINS
#define DOMAIN_MANAGER 3
+#else
+#define DOMAIN_MANAGER 1
+#endif
#define domain_val(dom,type) ((type) << (2*(dom)))
#ifndef __ASSEMBLY__
-#ifdef CONFIG_MMU
+#ifdef CONFIG_CPU_USE_DOMAINS
#define set_domain(x) \
do { \
__asm__ __volatile__( \
@@ -74,5 +78,28 @@
#define modify_domain(dom,type) do { } while (0)
#endif
+/*
+ * Generate the T (user) versions of the LDR/STR and related
+ * instructions (inline assembly)
+ */
+#ifdef CONFIG_CPU_USE_DOMAINS
+#define T(instr) #instr "t"
+#else
+#define T(instr) #instr
#endif
-#endif /* !__ASSEMBLY__ */
+
+#else /* __ASSEMBLY__ */
+
+/*
+ * Generate the T (user) versions of the LDR/STR and related
+ * instructions
+ */
+#ifdef CONFIG_CPU_USE_DOMAINS
+#define T(instr) instr ## t
+#else
+#define T(instr) instr
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* !__ASM_PROC_DOMAIN_H */
diff --git a/arch/arm/include/asm/entry-macro-multi.S b/arch/arm/include/asm/entry-macro-multi.S
new file mode 100644
index 00000000000..ec0bbf79c71
--- /dev/null
+++ b/arch/arm/include/asm/entry-macro-multi.S
@@ -0,0 +1,44 @@
+/*
+ * Interrupt handling. Preserves r7, r8, r9
+ */
+ .macro arch_irq_handler_default
+ get_irqnr_preamble r5, lr
+1: get_irqnr_and_base r0, r6, r5, lr
+ movne r1, sp
+ @
+ @ routine called with r0 = irq number, r1 = struct pt_regs *
+ @
+ adrne lr, BSYM(1b)
+ bne asm_do_IRQ
+
+#ifdef CONFIG_SMP
+ /*
+ * XXX
+ *
+ * this macro assumes that irqstat (r6) and base (r5) are
+ * preserved from get_irqnr_and_base above
+ */
+ ALT_SMP(test_for_ipi r0, r6, r5, lr)
+ ALT_UP_B(9997f)
+ movne r1, sp
+ adrne lr, BSYM(1b)
+ bne do_IPI
+
+#ifdef CONFIG_LOCAL_TIMERS
+ test_for_ltirq r0, r6, r5, lr
+ movne r0, sp
+ adrne lr, BSYM(1b)
+ bne do_local_timer
+#endif
+#endif
+9997:
+ .endm
+
+ .macro arch_irq_handler, symbol_name
+ .align 5
+ .global \symbol_name
+\symbol_name:
+ mov r4, lr
+ arch_irq_handler_default
+ mov pc, r4
+ .endm
diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
index 540a044153a..b33fe7065b3 100644
--- a/arch/arm/include/asm/futex.h
+++ b/arch/arm/include/asm/futex.h
@@ -13,12 +13,13 @@
#include <linux/preempt.h>
#include <linux/uaccess.h>
#include <asm/errno.h>
+#include <asm/domain.h>
#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
__asm__ __volatile__( \
- "1: ldrt %1, [%2]\n" \
+ "1: " T(ldr) " %1, [%2]\n" \
" " insn "\n" \
- "2: strt %0, [%2]\n" \
+ "2: " T(str) " %0, [%2]\n" \
" mov %0, #0\n" \
"3:\n" \
" .pushsection __ex_table,\"a\"\n" \
@@ -97,10 +98,10 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
pagefault_disable(); /* implies preempt_disable() */
__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
- "1: ldrt %0, [%3]\n"
+ "1: " T(ldr) " %0, [%3]\n"
" teq %0, %1\n"
" it eq @ explicit IT needed for the 2b label\n"
- "2: streqt %2, [%3]\n"
+ "2: " T(streq) " %2, [%3]\n"
"3:\n"
" .pushsection __ex_table,\"a\"\n"
" .align 3\n"
diff --git a/arch/arm/include/asm/hardirq.h b/arch/arm/include/asm/hardirq.h
index 6d7485aff95..89ad1805e57 100644
--- a/arch/arm/include/asm/hardirq.h
+++ b/arch/arm/include/asm/hardirq.h
@@ -5,13 +5,31 @@
#include <linux/threads.h>
#include <asm/irq.h>
+#define NR_IPI 5
+
typedef struct {
unsigned int __softirq_pending;
+#ifdef CONFIG_LOCAL_TIMERS
unsigned int local_timer_irqs;
+#endif
+#ifdef CONFIG_SMP
+ unsigned int ipi_irqs[NR_IPI];
+#endif
} ____cacheline_aligned irq_cpustat_t;
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
+#define __inc_irq_stat(cpu, member) __IRQ_STAT(cpu, member)++
+#define __get_irq_stat(cpu, member) __IRQ_STAT(cpu, member)
+
+#ifdef CONFIG_SMP
+u64 smp_irq_stat_cpu(unsigned int cpu);
+#else
+#define smp_irq_stat_cpu(cpu) 0
+#endif
+
+#define arch_irq_stat_cpu smp_irq_stat_cpu
+
#if NR_IRQS > 512
#define HARDIRQ_BITS 10
#elif NR_IRQS > 256
diff --git a/arch/arm/include/asm/localtimer.h b/arch/arm/include/asm/localtimer.h
index 50c7e7cfd67..6bc63ab498c 100644
--- a/arch/arm/include/asm/localtimer.h
+++ b/arch/arm/include/asm/localtimer.h
@@ -30,7 +30,6 @@ asmlinkage void do_local_timer(struct pt_regs *);
#include "smp_twd.h"
#define local_timer_ack() twd_timer_ack()
-#define local_timer_stop() twd_timer_stop()
#else
@@ -40,11 +39,6 @@ asmlinkage void do_local_timer(struct pt_regs *);
*/
int local_timer_ack(void);
-/*
- * Stop a local timer interrupt.
- */
-void local_timer_stop(void);
-
#endif
/*
@@ -52,12 +46,6 @@ void local_timer_stop(void);
*/
void local_timer_setup(struct clock_event_device *);
-#else
-
-static inline void local_timer_stop(void)
-{
-}
-
#endif
#endif
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h
index d97a964207f..3a0893a76a3 100644
--- a/arch/arm/include/asm/mach/arch.h
+++ b/arch/arm/include/asm/mach/arch.h
@@ -37,12 +37,21 @@ struct machine_desc {
struct meminfo *);
void (*reserve)(void);/* reserve mem blocks */
void (*map_io)(void);/* IO mapping function */
+ void (*init_early)(void);
void (*init_irq)(void);
struct sys_timer *timer; /* system tick timer */
void (*init_machine)(void);
+#ifdef CONFIG_MULTI_IRQ_HANDLER
+ void (*handle_irq)(struct pt_regs *);
+#endif
};
/*
+ * Current machine - only accessible during boot.
+ */
+extern struct machine_desc *machine_desc;
+
+/*
* Set of macros to define architecture features. This is built into
* a table by the linker.
*/
diff --git a/arch/arm/include/asm/mach/irq.h b/arch/arm/include/asm/mach/irq.h
index ce3eee9fe26..22ac140edd9 100644
--- a/arch/arm/include/asm/mach/irq.h
+++ b/arch/arm/include/asm/mach/irq.h
@@ -17,10 +17,12 @@ struct seq_file;
/*
* This is internal. Do not use it.
*/
-extern unsigned int arch_nr_irqs;
-extern void (*init_arch_irq)(void);
extern void init_FIQ(void);
-extern int show_fiq_list(struct seq_file *, void *);
+extern int show_fiq_list(struct seq_file *, int);
+
+#ifdef CONFIG_MULTI_IRQ_HANDLER
+extern void (*handle_arch_irq)(struct pt_regs *);
+#endif
/*
* This is for easy migration, but should be changed in the source
diff --git a/arch/arm/include/asm/mach/time.h b/arch/arm/include/asm/mach/time.h
index 35d408f6dcc..883f6be5117 100644
--- a/arch/arm/include/asm/mach/time.h
+++ b/arch/arm/include/asm/mach/time.h
@@ -43,7 +43,6 @@ struct sys_timer {
#endif
};
-extern struct sys_timer *system_timer;
extern void timer_tick(void);
#endif
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
index 3d05190797c..96ed521f240 100644
--- a/arch/arm/include/asm/smp.h
+++ b/arch/arm/include/asm/smp.h
@@ -33,27 +33,23 @@ struct seq_file;
/*
* generate IPI list text
*/
-extern void show_ipi_list(struct seq_file *p);
+extern void show_ipi_list(struct seq_file *, int);
/*
* Called from assembly code, this handles an IPI.
*/
-asmlinkage void do_IPI(struct pt_regs *regs);
+asmlinkage void do_IPI(int ipinr, struct pt_regs *regs);
/*
* Setup the set of possible CPUs (via set_cpu_possible)
*/
extern void smp_init_cpus(void);
-/*
- * Move global data into per-processor storage.
- */
-extern void smp_store_cpu_info(unsigned int cpuid);
/*
* Raise an IPI cross call on CPUs in callmap.
*/
-extern void smp_cross_call(const struct cpumask *mask);
+extern void smp_cross_call(const struct cpumask *mask, int ipi);
/*
* Boot a secondary CPU, and assign it the specified idle task.
@@ -73,6 +69,11 @@ asmlinkage void secondary_start_kernel(void);
extern void platform_secondary_init(unsigned int cpu);
/*
+ * Initialize cpu_possible map, and enable coherency
+ */
+extern void platform_smp_prepare_cpus(unsigned int);
+
+/*
* Initial data for bringing up a secondary CPU.
*/
struct secondary_data {
@@ -97,6 +98,6 @@ extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
/*
* show local interrupt info
*/
-extern void show_local_irqs(struct seq_file *);
+extern void show_local_irqs(struct seq_file *, int);
#endif /* ifndef __ASM_ARM_SMP_H */
diff --git a/arch/arm/include/asm/smp_mpidr.h b/arch/arm/include/asm/smp_mpidr.h
deleted file mode 100644
index 6a9307d6490..00000000000
--- a/arch/arm/include/asm/smp_mpidr.h
+++ /dev/null
@@ -1,17 +0,0 @@
-#ifndef ASMARM_SMP_MIDR_H
-#define ASMARM_SMP_MIDR_H
-
-#define hard_smp_processor_id() \
- ({ \
- unsigned int cpunum; \
- __asm__("\n" \
- "1: mrc p15, 0, %0, c0, c0, 5\n" \
- " .pushsection \".alt.smp.init\", \"a\"\n"\
- " .long 1b\n" \
- " mov %0, #0\n" \
- " .popsection" \
- : "=r" (cpunum)); \
- cpunum &= 0x0F; \
- })
-
-#endif
diff --git a/arch/arm/include/asm/smp_twd.h b/arch/arm/include/asm/smp_twd.h
index 634f357be6b..fed9981fba0 100644
--- a/arch/arm/include/asm/smp_twd.h
+++ b/arch/arm/include/asm/smp_twd.h
@@ -22,7 +22,6 @@ struct clock_event_device;
extern void __iomem *twd_base;
-void twd_timer_stop(void);
int twd_timer_ack(void);
void twd_timer_setup(struct clock_event_device *);
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
index 3222ab8b344..97f6d60297d 100644
--- a/arch/arm/include/asm/system.h
+++ b/arch/arm/include/asm/system.h
@@ -124,6 +124,13 @@ extern unsigned int user_debug;
#define vectors_high() (0)
#endif
+#if __LINUX_ARM_ARCH__ >= 7 || \
+ (__LINUX_ARM_ARCH__ == 6 && defined(CONFIG_CPU_32v6K))
+#define sev() __asm__ __volatile__ ("sev" : : : "memory")
+#define wfe() __asm__ __volatile__ ("wfe" : : : "memory")
+#define wfi() __asm__ __volatile__ ("wfi" : : : "memory")
+#endif
+
#if __LINUX_ARM_ARCH__ >= 7
#define isb() __asm__ __volatile__ ("isb" : : : "memory")
#define dsb() __asm__ __volatile__ ("dsb" : : : "memory")
diff --git a/arch/arm/include/asm/traps.h b/arch/arm/include/asm/traps.h
index 124475afb00..1b960d5ef6a 100644
--- a/arch/arm/include/asm/traps.h
+++ b/arch/arm/include/asm/traps.h
@@ -46,4 +46,6 @@ static inline int in_exception_text(unsigned long ptr)
extern void __init early_trap_init(void);
extern void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame);
+extern void *vectors_page;
+
#endif
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 33e4a48fe10..b293616a1a1 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -227,7 +227,7 @@ do { \
#define __get_user_asm_byte(x,addr,err) \
__asm__ __volatile__( \
- "1: ldrbt %1,[%2]\n" \
+ "1: " T(ldrb) " %1,[%2],#0\n" \
"2:\n" \
" .pushsection .fixup,\"ax\"\n" \
" .align 2\n" \
@@ -263,7 +263,7 @@ do { \
#define __get_user_asm_word(x,addr,err) \
__asm__ __volatile__( \
- "1: ldrt %1,[%2]\n" \
+ "1: " T(ldr) " %1,[%2],#0\n" \
"2:\n" \
" .pushsection .fixup,\"ax\"\n" \
" .align 2\n" \
@@ -308,7 +308,7 @@ do { \
#define __put_user_asm_byte(x,__pu_addr,err) \
__asm__ __volatile__( \
- "1: strbt %1,[%2]\n" \
+ "1: " T(strb) " %1,[%2],#0\n" \
"2:\n" \
" .pushsection .fixup,\"ax\"\n" \
" .align 2\n" \
@@ -341,7 +341,7 @@ do { \
#define __put_user_asm_word(x,__pu_addr,err) \
__asm__ __volatile__( \
- "1: strt %1,[%2]\n" \
+ "1: " T(str) " %1,[%2],#0\n" \
"2:\n" \
" .pushsection .fixup,\"ax\"\n" \
" .align 2\n" \
@@ -366,10 +366,10 @@ do { \
#define __put_user_asm_dword(x,__pu_addr,err) \
__asm__ __volatile__( \
- ARM( "1: strt " __reg_oper1 ", [%1], #4\n" ) \
- ARM( "2: strt " __reg_oper0 ", [%1]\n" ) \
- THUMB( "1: strt " __reg_oper1 ", [%1]\n" ) \
- THUMB( "2: strt " __reg_oper0 ", [%1, #4]\n" ) \
+ ARM( "1: " T(str) " " __reg_oper1 ", [%1], #4\n" ) \
+ ARM( "2: " T(str) " " __reg_oper0 ", [%1]\n" ) \
+ THUMB( "1: " T(str) " " __reg_oper1 ", [%1]\n" ) \
+ THUMB( "2: " T(str) " " __reg_oper0 ", [%1, #4]\n" ) \
"3:\n" \
" .pushsection .fixup,\"ax\"\n" \
" .align 2\n" \
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index fd3ec49bfba..7c33e6f29bc 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -30,7 +30,7 @@ obj-$(CONFIG_ARTHUR) += arthur.o
obj-$(CONFIG_ISA_DMA) += dma-isa.o
obj-$(CONFIG_PCI) += bios32.o isa.o
obj-$(CONFIG_HAVE_SCHED_CLOCK) += sched_clock.o
-obj-$(CONFIG_SMP) += smp.o
+obj-$(CONFIG_SMP) += smp.o smp_tlb.o
obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o
obj-$(CONFIG_HAVE_ARM_TWD) += smp_twd.o
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
@@ -44,6 +44,8 @@ obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_ARM_UNWIND) += unwind.o
obj-$(CONFIG_HAVE_TCM) += tcm.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
+obj-$(CONFIG_SWP_EMULATE) += swp_emulate.o
+CFLAGS_swp_emulate.o := -Wa,-march=armv7-a
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
obj-$(CONFIG_CRUNCH) += crunch.o crunch-bits.o
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index bb96a7d4bbf..27f64489c1c 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -25,42 +25,22 @@
#include <asm/tls.h>
#include "entry-header.S"
+#include <asm/entry-macro-multi.S>
/*
* Interrupt handling. Preserves r7, r8, r9
*/
.macro irq_handler
- get_irqnr_preamble r5, lr
-1: get_irqnr_and_base r0, r6, r5, lr
- movne r1, sp
- @
- @ routine called with r0 = irq number, r1 = struct pt_regs *
- @
- adrne lr, BSYM(1b)
- bne asm_do_IRQ
-
-#ifdef CONFIG_SMP
- /*
- * XXX
- *
- * this macro assumes that irqstat (r6) and base (r5) are
- * preserved from get_irqnr_and_base above
- */
- ALT_SMP(test_for_ipi r0, r6, r5, lr)
- ALT_UP_B(9997f)
- movne r0, sp
- adrne lr, BSYM(1b)
- bne do_IPI
-
-#ifdef CONFIG_LOCAL_TIMERS
- test_for_ltirq r0, r6, r5, lr
- movne r0, sp
- adrne lr, BSYM(1b)
- bne do_local_timer
+#ifdef CONFIG_MULTI_IRQ_HANDLER
+ ldr r5, =handle_arch_irq
+ mov r0, sp
+ ldr r5, [r5]
+ adr lr, BSYM(9997f)
+ teq r5, #0
+ movne pc, r5
#endif
+ arch_irq_handler_default
9997:
-#endif
-
.endm
#ifdef CONFIG_KPROBES
@@ -735,7 +715,7 @@ ENTRY(__switch_to)
THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack
THUMB( str sp, [ip], #4 )
THUMB( str lr, [ip], #4 )
-#ifdef CONFIG_MMU
+#ifdef CONFIG_CPU_USE_DOMAINS
ldr r6, [r2, #TI_CPU_DOMAIN]
#endif
set_tls r3, r4, r5
@@ -744,7 +724,7 @@ ENTRY(__switch_to)
ldr r8, =__stack_chk_guard
ldr r7, [r7, #TSK_STACK_CANARY]
#endif
-#ifdef CONFIG_MMU
+#ifdef CONFIG_CPU_USE_DOMAINS
mcr p15, 0, r6, c3, c0, 0 @ Set domain register
#endif
mov r5, r0
@@ -842,7 +822,7 @@ __kuser_helper_start:
*/
__kuser_memory_barrier: @ 0xffff0fa0
- smp_dmb
+ smp_dmb arm
usr_ret lr
.align 5
@@ -959,7 +939,7 @@ kuser_cmpxchg_fixup:
#else
- smp_dmb
+ smp_dmb arm
1: ldrex r3, [r2]
subs r3, r3, r0
strexeq r3, r1, [r2]
@@ -1245,3 +1225,9 @@ cr_alignment:
.space 4
cr_no_alignment:
.space 4
+
+#ifdef CONFIG_MULTI_IRQ_HANDLER
+ .globl handle_arch_irq
+handle_arch_irq:
+ .space 4
+#endif
diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
index 6ff7919613d..e72dc34eea1 100644
--- a/arch/arm/kernel/fiq.c
+++ b/arch/arm/kernel/fiq.c
@@ -45,6 +45,7 @@
#include <asm/fiq.h>
#include <asm/irq.h>
#include <asm/system.h>
+#include <asm/traps.h>
static unsigned long no_fiq_insn;
@@ -67,17 +68,22 @@ static struct fiq_handler default_owner = {
static struct fiq_handler *current_fiq = &default_owner;
-int show_fiq_list(struct seq_file *p, void *v)
+int show_fiq_list(struct seq_file *p, int prec)
{
if (current_fiq != &default_owner)
- seq_printf(p, "FIQ: %s\n", current_fiq->name);
+ seq_printf(p, "%*s: %s\n", prec, "FIQ",
+ current_fiq->name);
return 0;
}
void set_fiq_handler(void *start, unsigned int length)
{
+#if defined(CONFIG_CPU_USE_DOMAINS)
memcpy((void *)0xffff001c, start, length);
+#else
+ memcpy(vectors_page + 0x1c, start, length);
+#endif
flush_icache_range(0xffff001c, 0xffff001c + length);
if (!vectors_high())
flush_icache_range(0x1c, 0x1c + length);
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 6bd82d25683..f17d9a09e8f 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -91,6 +91,11 @@ ENTRY(stext)
movs r8, r5 @ invalid machine (r5=0)?
THUMB( it eq ) @ force fixup-able long branch encoding
beq __error_a @ yes, error 'a'
+
+ /*
+ * r1 = machine no, r2 = atags,
+ * r8 = machinfo, r9 = cpuid, r10 = procinfo
+ */
bl __vet_atags
#ifdef CONFIG_SMP_ON_UP
bl __fixup_smp
@@ -387,19 +392,19 @@ ENDPROC(__turn_mmu_on)
#ifdef CONFIG_SMP_ON_UP
__fixup_smp:
- mov r7, #0x00070000
- orr r6, r7, #0xff000000 @ mask 0xff070000
- orr r7, r7, #0x41000000 @ val 0x41070000
- and r0, r9, r6
- teq r0, r7 @ ARM CPU and ARMv6/v7?
+ mov r4, #0x00070000
+ orr r3, r4, #0xff000000 @ mask 0xff070000
+ orr r4, r4, #0x41000000 @ val 0x41070000
+ and r0, r9, r3
+ teq r0, r4 @ ARM CPU and ARMv6/v7?
bne __fixup_smp_on_up @ no, assume UP
- orr r6, r6, #0x0000ff00
- orr r6, r6, #0x000000f0 @ mask 0xff07fff0
- orr r7, r7, #0x0000b000
- orr r7, r7, #0x00000020 @ val 0x4107b020
- and r0, r9, r6
- teq r0, r7 @ ARM 11MPCore?
+ orr r3, r3, #0x0000ff00
+ orr r3, r3, #0x000000f0 @ mask 0xff07fff0
+ orr r4, r4, #0x0000b000
+ orr r4, r4, #0x00000020 @ val 0x4107b020
+ and r0, r9, r3
+ teq r0, r4 @ ARM 11MPCore?
moveq pc, lr @ yes, assume SMP
mrc p15, 0, r0, c0, c0, 5 @ read MPIDR
@@ -408,15 +413,22 @@ __fixup_smp:
__fixup_smp_on_up:
adr r0, 1f
- ldmia r0, {r3, r6, r7}
+ ldmia r0, {r3 - r5}
sub r3, r0, r3
- add r6, r6, r3
- add r7, r7, r3
-2: cmp r6, r7
- ldmia r6!, {r0, r4}
- strlo r4, [r0, r3]
- blo 2b
- mov pc, lr
+ add r4, r4, r3
+ add r5, r5, r3
+2: cmp r4, r5
+ movhs pc, lr
+ ldmia r4!, {r0, r6}
+ ARM( str r6, [r0, r3] )
+ THUMB( add r0, r0, r3 )
+#ifdef __ARMEB__
+ THUMB( mov r6, r6, ror #16 ) @ Convert word order for big-endian.
+#endif
+ THUMB( strh r6, [r0], #2 ) @ For Thumb-2, store as two halfwords
+ THUMB( mov r6, r6, lsr #16 ) @ to be robust against misaligned r3.
+ THUMB( strh r6, [r0] )
+ b 2b
ENDPROC(__fixup_smp)
.align
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 6d616333340..8135438b881 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -38,6 +38,7 @@
#include <linux/ftrace.h>
#include <asm/system.h>
+#include <asm/mach/arch.h>
#include <asm/mach/irq.h>
#include <asm/mach/time.h>
@@ -48,8 +49,6 @@
#define irq_finish(irq) do { } while (0)
#endif
-unsigned int arch_nr_irqs;
-void (*init_arch_irq)(void) __initdata = NULL;
unsigned long irq_err_count;
int show_interrupts(struct seq_file *p, void *v)
@@ -58,11 +57,20 @@ int show_interrupts(struct seq_file *p, void *v)
struct irq_desc *desc;
struct irqaction * action;
unsigned long flags;
+ int prec, n;
+
+ for (prec = 3, n = 1000; prec < 10 && n <= nr_irqs; prec++)
+ n *= 10;
+
+#ifdef CONFIG_SMP
+ if (prec < 4)
+ prec = 4;
+#endif
if (i == 0) {
char cpuname[12];
- seq_printf(p, " ");
+ seq_printf(p, "%*s ", prec, "");
for_each_present_cpu(cpu) {
sprintf(cpuname, "CPU%d", cpu);
seq_printf(p, " %10s", cpuname);
@@ -77,7 +85,7 @@ int show_interrupts(struct seq_file *p, void *v)
if (!action)
goto unlock;
- seq_printf(p, "%3d: ", i);
+ seq_printf(p, "%*d: ", prec, i);
for_each_present_cpu(cpu)
seq_printf(p, "%10u ", kstat_irqs_cpu(i, cpu));
seq_printf(p, " %10s", desc->chip->name ? : "-");
@@ -90,13 +98,15 @@ unlock:
raw_spin_unlock_irqrestore(&desc->lock, flags);
} else if (i == nr_irqs) {
#ifdef CONFIG_FIQ
- show_fiq_list(p, v);
+ show_fiq_list(p, prec);
#endif
#ifdef CONFIG_SMP
- show_ipi_list(p);
- show_local_irqs(p);
+ show_ipi_list(p, prec);
+#endif
+#ifdef CONFIG_LOCAL_TIMERS
+ show_local_irqs(p, prec);
#endif
- seq_printf(p, "Err: %10lu\n", irq_err_count);
+ seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count);
}
return 0;
}
@@ -156,13 +166,13 @@ void set_irq_flags(unsigned int irq, unsigned int iflags)
void __init init_IRQ(void)
{
- init_arch_irq();
+ machine_desc->init_irq();
}
#ifdef CONFIG_SPARSE_IRQ
int __init arch_probe_nr_irqs(void)
{
- nr_irqs = arch_nr_irqs ? arch_nr_irqs : NR_IRQS;
+ nr_irqs = machine_desc->nr_irqs ? machine_desc->nr_irqs : NR_IRQS;
return nr_irqs;
}
#endif
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 336f14e0e5c..3455ad33de4 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -75,9 +75,9 @@ extern void reboot_setup(char *str);
unsigned int processor_id;
EXPORT_SYMBOL(processor_id);
-unsigned int __machine_arch_type;
+unsigned int __machine_arch_type __read_mostly;
EXPORT_SYMBOL(__machine_arch_type);
-unsigned int cacheid;
+unsigned int cacheid __read_mostly;
EXPORT_SYMBOL(cacheid);
unsigned int __atags_pointer __initdata;
@@ -91,24 +91,24 @@ EXPORT_SYMBOL(system_serial_low);
unsigned int system_serial_high;
EXPORT_SYMBOL(system_serial_high);
-unsigned int elf_hwcap;
+unsigned int elf_hwcap __read_mostly;
EXPORT_SYMBOL(elf_hwcap);
#ifdef MULTI_CPU
-struct processor processor;
+struct processor processor __read_mostly;
#endif
#ifdef MULTI_TLB
-struct cpu_tlb_fns cpu_tlb;
+struct cpu_tlb_fns cpu_tlb __read_mostly;
#endif
#ifdef MULTI_USER
-struct cpu_user_fns cpu_user;
+struct cpu_user_fns cpu_user __read_mostly;
#endif
#ifdef MULTI_CACHE
-struct cpu_cache_fns cpu_cache;
+struct cpu_cache_fns cpu_cache __read_mostly;
#endif
#ifdef CONFIG_OUTER_CACHE
-struct outer_cache_fns outer_cache;
+struct outer_cache_fns outer_cache __read_mostly;
EXPORT_SYMBOL(outer_cache);
#endif
@@ -126,6 +126,7 @@ EXPORT_SYMBOL(elf_platform);
static const char *cpu_name;
static const char *machine_name;
static char __initdata cmd_line[COMMAND_LINE_SIZE];
+struct machine_desc *machine_desc __initdata;
static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
@@ -708,13 +709,11 @@ static struct init_tags {
{ 0, ATAG_NONE }
};
-static void (*init_machine)(void) __initdata;
-
static int __init customize_machine(void)
{
/* customizes platform devices, or adds new ones */
- if (init_machine)
- init_machine();
+ if (machine_desc->init_machine)
+ machine_desc->init_machine();
return 0;
}
arch_initcall(customize_machine);
@@ -809,6 +808,7 @@ void __init setup_arch(char **cmdline_p)
setup_processor();
mdesc = setup_machine(machine_arch_type);
+ machine_desc = mdesc;
machine_name = mdesc->name;
if (mdesc->soft_reboot)
@@ -868,13 +868,9 @@ void __init setup_arch(char **cmdline_p)
cpu_init();
tcm_init();
- /*
- * Set up various architecture-specific pointers
- */
- arch_nr_irqs = mdesc->nr_irqs;
- init_arch_irq = mdesc->init_irq;
- system_timer = mdesc->timer;
- init_machine = mdesc->init_machine;
+#ifdef CONFIG_MULTI_IRQ_HANDLER
+ handle_arch_irq = mdesc->handle_irq;
+#endif
#ifdef CONFIG_VT
#if defined(CONFIG_VGA_CONSOLE)
@@ -884,6 +880,9 @@ void __init setup_arch(char **cmdline_p)
#endif
#endif
early_trap_init();
+
+ if (mdesc->init_early)
+ mdesc->init_early();
}
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index b6b78b22031..5ec79b4ff95 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -25,6 +25,7 @@
#include <linux/irq.h>
#include <linux/percpu.h>
#include <linux/clockchips.h>
+#include <linux/completion.h>
#include <asm/atomic.h>
#include <asm/cacheflush.h>
@@ -38,7 +39,6 @@
#include <asm/tlbflush.h>
#include <asm/ptrace.h>
#include <asm/localtimer.h>
-#include <asm/smp_plat.h>
/*
* as from 2.5, kernels no longer have an init_tasks structure
@@ -47,22 +47,8 @@
*/
struct secondary_data secondary_data;
-/*
- * structures for inter-processor calls
- * - A collection of single bit ipi messages.
- */
-struct ipi_data {
- spinlock_t lock;
- unsigned long ipi_count;
- unsigned long bits;
-};
-
-static DEFINE_PER_CPU(struct ipi_data, ipi_data) = {
- .lock = SPIN_LOCK_UNLOCKED,
-};
-
enum ipi_msg_type {
- IPI_TIMER,
+ IPI_TIMER = 2,
IPI_RESCHEDULE,
IPI_CALL_FUNC,
IPI_CALL_FUNC_SINGLE,
@@ -178,8 +164,12 @@ int __cpuinit __cpu_up(unsigned int cpu)
barrier();
}
- if (!cpu_online(cpu))
+ if (!cpu_online(cpu)) {
+ pr_crit("CPU%u: failed to come online\n", cpu);
ret = -EIO;
+ }
+ } else {
+ pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
}
secondary_data.stack = NULL;
@@ -195,18 +185,12 @@ int __cpuinit __cpu_up(unsigned int cpu)
pgd_free(&init_mm, pgd);
- if (ret) {
- printk(KERN_CRIT "CPU%u: processor failed to boot\n", cpu);
-
- /*
- * FIXME: We need to clean up the new idle thread. --rmk
- */
- }
-
return ret;
}
#ifdef CONFIG_HOTPLUG_CPU
+static void percpu_timer_stop(void);
+
/*
* __cpu_disable runs on the processor to be shutdown.
*/
@@ -234,7 +218,7 @@ int __cpu_disable(void)
/*
* Stop the local timer for this CPU.
*/
- local_timer_stop();
+ percpu_timer_stop();
/*
* Flush user cache and TLB mappings, and then remove this CPU
@@ -253,12 +237,20 @@ int __cpu_disable(void)
return 0;
}
+static DECLARE_COMPLETION(cpu_died);
+
/*
* called on the thread which is asking for a CPU to be shutdown -
* waits until shutdown has completed, or it is timed out.
*/
void __cpu_die(unsigned int cpu)
{
+ if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {
+ pr_err("CPU%u: cpu didn't die\n", cpu);
+ return;
+ }
+ printk(KERN_NOTICE "CPU%u: shutdown\n", cpu);
+
if (!platform_cpu_kill(cpu))
printk("CPU%u: unable to kill\n", cpu);
}
@@ -275,12 +267,17 @@ void __ref cpu_die(void)
{
unsigned int cpu = smp_processor_id();
- local_irq_disable();
idle_task_exit();
+ local_irq_disable();
+ mb();
+
+ /* Tell __cpu_die() that this CPU is now safe to dispose of */
+ complete(&cpu_died);
+
/*
* actual CPU shutdown procedure is at least platform (if not
- * CPU) specific
+ * CPU) specific.
*/
platform_cpu_die(cpu);
@@ -290,6 +287,7 @@ void __ref cpu_die(void)
* to be repeated to undo the effects of taking the CPU offline.
*/
__asm__("mov sp, %0\n"
+ " mov fp, #0\n"
" b secondary_start_kernel"
:
: "r" (task_stack_page(current) + THREAD_SIZE - 8));
@@ -297,6 +295,17 @@ void __ref cpu_die(void)
#endif /* CONFIG_HOTPLUG_CPU */
/*
+ * Called by both boot and secondaries to move global data into
+ * per-processor storage.
+ */
+static void __cpuinit smp_store_cpu_info(unsigned int cpuid)
+{
+ struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
+
+ cpu_info->loops_per_jiffy = loops_per_jiffy;
+}
+
+/*
* This is the secondary CPU boot entry. We're using this CPUs
* idle thread stack, but a set of temporary page tables.
*/
@@ -320,6 +329,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
cpu_init();
preempt_disable();
+ trace_hardirqs_off();
/*
* Give the platform a chance to do its own initialisation.
@@ -353,17 +363,6 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
cpu_idle();
}
-/*
- * Called by both boot and secondaries to move global data into
- * per-processor storage.
- */
-void __cpuinit smp_store_cpu_info(unsigned int cpuid)
-{
- struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
-
- cpu_info->loops_per_jiffy = loops_per_jiffy;
-}
-
void __init smp_cpus_done(unsigned int max_cpus)
{
int cpu;
@@ -386,61 +385,80 @@ void __init smp_prepare_boot_cpu(void)
per_cpu(cpu_data, cpu).idle = current;
}
-static void send_ipi_message(const struct cpumask *mask, enum ipi_msg_type msg)
+void __init smp_prepare_cpus(unsigned int max_cpus)
{
- unsigned long flags;
- unsigned int cpu;
-
- local_irq_save(flags);
+ unsigned int ncores = num_possible_cpus();
- for_each_cpu(cpu, mask) {
- struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
-
- spin_lock(&ipi->lock);
- ipi->bits |= 1 << msg;
- spin_unlock(&ipi->lock);
- }
+ smp_store_cpu_info(smp_processor_id());
/*
- * Call the platform specific cross-CPU call function.
+ * are we trying to boot more cores than exist?
*/
- smp_cross_call(mask);
+ if (max_cpus > ncores)
+ max_cpus = ncores;
+
+ if (max_cpus > 1) {
+ /*
+ * Enable the local timer or broadcast device for the
+ * boot CPU, but only if we have more than one CPU.
+ */
+ percpu_timer_setup();
- local_irq_restore(flags);
+ /*
+ * Initialise the SCU if there are more than one CPU
+ * and let them know where to start.
+ */
+ platform_smp_prepare_cpus(max_cpus);
+ }
}
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
- send_ipi_message(mask, IPI_CALL_FUNC);
+ smp_cross_call(mask, IPI_CALL_FUNC);
}
void arch_send_call_function_single_ipi(int cpu)
{
- send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
+ smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
}
-void show_ipi_list(struct seq_file *p)
+static const char *ipi_types[NR_IPI] = {
+#define S(x,s) [x - IPI_TIMER] = s
+ S(IPI_TIMER, "Timer broadcast interrupts"),
+ S(IPI_RESCHEDULE, "Rescheduling interrupts"),
+ S(IPI_CALL_FUNC, "Function call interrupts"),
+ S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
+ S(IPI_CPU_STOP, "CPU stop interrupts"),
+};
+
+void show_ipi_list(struct seq_file *p, int prec)
{
- unsigned int cpu;
+ unsigned int cpu, i;
- seq_puts(p, "IPI:");
+ for (i = 0; i < NR_IPI; i++) {
+ seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
- for_each_present_cpu(cpu)
- seq_printf(p, " %10lu", per_cpu(ipi_data, cpu).ipi_count);
+ for_each_present_cpu(cpu)
+ seq_printf(p, "%10u ",
+ __get_irq_stat(cpu, ipi_irqs[i]));
- seq_putc(p, '\n');
+ seq_printf(p, " %s\n", ipi_types[i]);
+ }
}
-void show_local_irqs(struct seq_file *p)
+u64 smp_irq_stat_cpu(unsigned int cpu)
{
- unsigned int cpu;
+ u64 sum = 0;
+ int i;
- seq_printf(p, "LOC: ");
+ for (i = 0; i < NR_IPI; i++)
+ sum += __get_irq_stat(cpu, ipi_irqs[i]);
- for_each_present_cpu(cpu)
- seq_printf(p, "%10u ", irq_stat[cpu].local_timer_irqs);
+#ifdef CONFIG_LOCAL_TIMERS
+ sum += __get_irq_stat(cpu, local_timer_irqs);
+#endif
- seq_putc(p, '\n');
+ return sum;
}
/*
@@ -463,18 +481,30 @@ asmlinkage void __exception_irq_entry do_local_timer(struct pt_regs *regs)
int cpu = smp_processor_id();
if (local_timer_ack()) {
- irq_stat[cpu].local_timer_irqs++;
+ __inc_irq_stat(cpu, local_timer_irqs);
ipi_timer();
}
set_irq_regs(old_regs);
}
+
+void show_local_irqs(struct seq_file *p, int prec)
+{
+ unsigned int cpu;
+
+ seq_printf(p, "%*s: ", prec, "LOC");
+
+ for_each_present_cpu(cpu)
+ seq_printf(p, "%10u ", __get_irq_stat(cpu, local_timer_irqs));
+
+ seq_printf(p, " Local timer interrupts\n");
+}
#endif
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
static void smp_timer_broadcast(const struct cpumask *mask)
{
- send_ipi_message(mask, IPI_TIMER);
+ smp_cross_call(mask, IPI_TIMER);
}
#else
#define smp_timer_broadcast NULL
@@ -511,6 +541,21 @@ void __cpuinit percpu_timer_setup(void)
local_timer_setup(evt);
}
+#ifdef CONFIG_HOTPLUG_CPU
+/*
+ * The generic clock events code purposely does not stop the local timer
+ * on CPU_DEAD/CPU_DEAD_FROZEN hotplug events, so we have to do it
+ * manually here.
+ */
+static void percpu_timer_stop(void)
+{
+ unsigned int cpu = smp_processor_id();
+ struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
+
+ evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
+}
+#endif
+
static DEFINE_SPINLOCK(stop_lock);
/*
@@ -537,216 +582,76 @@ static void ipi_cpu_stop(unsigned int cpu)
/*
* Main handler for inter-processor interrupts
- *
- * For ARM, the ipimask now only identifies a single
- * category of IPI (Bit 1 IPIs have been replaced by a
- * different mechanism):
- *
- * Bit 0 - Inter-processor function call
*/
-asmlinkage void __exception_irq_entry do_IPI(struct pt_regs *regs)
+asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
{
unsigned int cpu = smp_processor_id();
- struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
struct pt_regs *old_regs = set_irq_regs(regs);
- ipi->ipi_count++;
-
- for (;;) {
- unsigned long msgs;
+ if (ipinr >= IPI_TIMER && ipinr < IPI_TIMER + NR_IPI)
+ __inc_irq_stat(cpu, ipi_irqs[ipinr - IPI_TIMER]);
- spin_lock(&ipi->lock);
- msgs = ipi->bits;
- ipi->bits = 0;
- spin_unlock(&ipi->lock);
-
- if (!msgs)
- break;
-
- do {
- unsigned nextmsg;
-
- nextmsg = msgs & -msgs;
- msgs &= ~nextmsg;
- nextmsg = ffz(~nextmsg);
-
- switch (nextmsg) {
- case IPI_TIMER:
- ipi_timer();
- break;
+ switch (ipinr) {
+ case IPI_TIMER:
+ ipi_timer();
+ break;
- case IPI_RESCHEDULE:
- /*
- * nothing more to do - eveything is
- * done on the interrupt return path
- */
- break;
+ case IPI_RESCHEDULE:
+ /*
+ * nothing more to do - eveything is
+ * done on the interrupt return path
+ */
+ break;
- case IPI_CALL_FUNC:
- generic_smp_call_function_interrupt();
- break;
+ case IPI_CALL_FUNC:
+ generic_smp_call_function_interrupt();
+ break;
- case IPI_CALL_FUNC_SINGLE:
- generic_smp_call_function_single_interrupt();
- break;
+ case IPI_CALL_FUNC_SINGLE:
+ generic_smp_call_function_single_interrupt();
+ break;
- case IPI_CPU_STOP:
- ipi_cpu_stop(cpu);
- break;
+ case IPI_CPU_STOP:
+ ipi_cpu_stop(cpu);
+ break;
- default:
- printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n",
- cpu, nextmsg);
- break;
- }
- } while (msgs);
+ default:
+ printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n",
+ cpu, ipinr);
+ break;
}
-
set_irq_regs(old_regs);
}
void smp_send_reschedule(int cpu)
{
- send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
+ smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
}
void smp_send_stop(void)
{
- cpumask_t mask = cpu_online_map;
- cpu_clear(smp_processor_id(), mask);
- if (!cpus_empty(mask))
- send_ipi_message(&mask, IPI_CPU_STOP);
-}
+ unsigned long timeout;
-/*
- * not supported here
- */
-int setup_profiling_timer(unsigned int multiplier)
-{
- return -EINVAL;
-}
+ if (num_online_cpus() > 1) {
+ cpumask_t mask = cpu_online_map;
+ cpu_clear(smp_processor_id(), mask);
-static void
-on_each_cpu_mask(void (*func)(void *), void *info, int wait,
- const struct cpumask *mask)
-{
- preempt_disable();
+ smp_cross_call(&mask, IPI_CPU_STOP);
+ }
- smp_call_function_many(mask, func, info, wait);
- if (cpumask_test_cpu(smp_processor_id(), mask))
- func(info);
+ /* Wait up to one second for other CPUs to stop */
+ timeout = USEC_PER_SEC;
+ while (num_online_cpus() > 1 && timeout--)
+ udelay(1);
- preempt_enable();
+ if (num_online_cpus() > 1)
+ pr_warning("SMP: failed to stop secondary CPUs\n");
}
-/**********************************************************************/
-
/*
- * TLB operations
+ * not supported here
*/
-struct tlb_args {
- struct vm_area_struct *ta_vma;
- unsigned long ta_start;
- unsigned long ta_end;
-};
-
-static inline void ipi_flush_tlb_all(void *ignored)
-{
- local_flush_tlb_all();
-}
-
-static inline void ipi_flush_tlb_mm(void *arg)
-{
- struct mm_struct *mm = (struct mm_struct *)arg;
-
- local_flush_tlb_mm(mm);
-}
-
-static inline void ipi_flush_tlb_page(void *arg)
-{
- struct tlb_args *ta = (struct tlb_args *)arg;
-
- local_flush_tlb_page(ta->ta_vma, ta->ta_start);
-}
-
-static inline void ipi_flush_tlb_kernel_page(void *arg)
-{
- struct tlb_args *ta = (struct tlb_args *)arg;
-
- local_flush_tlb_kernel_page(ta->ta_start);
-}
-
-static inline void ipi_flush_tlb_range(void *arg)
-{
- struct tlb_args *ta = (struct tlb_args *)arg;
-
- local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
-}
-
-static inline void ipi_flush_tlb_kernel_range(void *arg)
-{
- struct tlb_args *ta = (struct tlb_args *)arg;
-
- local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
-}
-
-void flush_tlb_all(void)
-{
- if (tlb_ops_need_broadcast())
- on_each_cpu(ipi_flush_tlb_all, NULL, 1);
- else
- local_flush_tlb_all();
-}
-
-void flush_tlb_mm(struct mm_struct *mm)
-{
- if (tlb_ops_need_broadcast())
- on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, mm_cpumask(mm));
- else
- local_flush_tlb_mm(mm);
-}
-
-void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
-{
- if (tlb_ops_need_broadcast()) {
- struct tlb_args ta;
- ta.ta_vma = vma;
- ta.ta_start = uaddr;
- on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, mm_cpumask(vma->vm_mm));
- } else
- local_flush_tlb_page(vma, uaddr);
-}
-
-void flush_tlb_kernel_page(unsigned long kaddr)
-{
- if (tlb_ops_need_broadcast()) {
- struct tlb_args ta;
- ta.ta_start = kaddr;
- on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1);
- } else
- local_flush_tlb_kernel_page(kaddr);
-}
-
-void flush_tlb_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
-{
- if (tlb_ops_need_broadcast()) {
- struct tlb_args ta;
- ta.ta_vma = vma;
- ta.ta_start = start;
- ta.ta_end = end;
- on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, mm_cpumask(vma->vm_mm));
- } else
- local_flush_tlb_range(vma, start, end);
-}
-
-void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+int setup_profiling_timer(unsigned int multiplier)
{
- if (tlb_ops_need_broadcast()) {
- struct tlb_args ta;
- ta.ta_start = start;
- ta.ta_end = end;
- on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
- } else
- local_flush_tlb_kernel_range(start, end);
+ return -EINVAL;
}
diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c
new file mode 100644
index 00000000000..7dcb35285be
--- /dev/null
+++ b/arch/arm/kernel/smp_tlb.c
@@ -0,0 +1,139 @@
+/*
+ * linux/arch/arm/kernel/smp_tlb.c
+ *
+ * Copyright (C) 2002 ARM Limited, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/preempt.h>
+#include <linux/smp.h>
+
+#include <asm/smp_plat.h>
+#include <asm/tlbflush.h>
+
+static void on_each_cpu_mask(void (*func)(void *), void *info, int wait,
+ const struct cpumask *mask)
+{
+ preempt_disable();
+
+ smp_call_function_many(mask, func, info, wait);
+ if (cpumask_test_cpu(smp_processor_id(), mask))
+ func(info);
+
+ preempt_enable();
+}
+
+/**********************************************************************/
+
+/*
+ * TLB operations
+ */
+struct tlb_args {
+ struct vm_area_struct *ta_vma;
+ unsigned long ta_start;
+ unsigned long ta_end;
+};
+
+static inline void ipi_flush_tlb_all(void *ignored)
+{
+ local_flush_tlb_all();
+}
+
+static inline void ipi_flush_tlb_mm(void *arg)
+{
+ struct mm_struct *mm = (struct mm_struct *)arg;
+
+ local_flush_tlb_mm(mm);
+}
+
+static inline void ipi_flush_tlb_page(void *arg)
+{
+ struct tlb_args *ta = (struct tlb_args *)arg;
+
+ local_flush_tlb_page(ta->ta_vma, ta->ta_start);
+}
+
+static inline void ipi_flush_tlb_kernel_page(void *arg)
+{
+ struct tlb_args *ta = (struct tlb_args *)arg;
+
+ local_flush_tlb_kernel_page(ta->ta_start);
+}
+
+static inline void ipi_flush_tlb_range(void *arg)
+{
+ struct tlb_args *ta = (struct tlb_args *)arg;
+
+ local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
+}
+
+static inline void ipi_flush_tlb_kernel_range(void *arg)
+{
+ struct tlb_args *ta = (struct tlb_args *)arg;
+
+ local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
+}
+
+void flush_tlb_all(void)
+{
+ if (tlb_ops_need_broadcast())
+ on_each_cpu(ipi_flush_tlb_all, NULL, 1);
+ else
+ local_flush_tlb_all();
+}
+
+void flush_tlb_mm(struct mm_struct *mm)
+{
+ if (tlb_ops_need_broadcast())
+ on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, mm_cpumask(mm));
+ else
+ local_flush_tlb_mm(mm);
+}
+
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
+{
+ if (tlb_ops_need_broadcast()) {
+ struct tlb_args ta;
+ ta.ta_vma = vma;
+ ta.ta_start = uaddr;
+ on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, mm_cpumask(vma->vm_mm));
+ } else
+ local_flush_tlb_page(vma, uaddr);
+}
+
+void flush_tlb_kernel_page(unsigned long kaddr)
+{
+ if (tlb_ops_need_broadcast()) {
+ struct tlb_args ta;
+ ta.ta_start = kaddr;
+ on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1);
+ } else
+ local_flush_tlb_kernel_page(kaddr);
+}
+
+void flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ if (tlb_ops_need_broadcast()) {
+ struct tlb_args ta;
+ ta.ta_vma = vma;
+ ta.ta_start = start;
+ ta.ta_end = end;
+ on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, mm_cpumask(vma->vm_mm));
+ } else
+ local_flush_tlb_range(vma, start, end);
+}
+
+void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ if (tlb_ops_need_broadcast()) {
+ struct tlb_args ta;
+ ta.ta_start = start;
+ ta.ta_end = end;
+ on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
+ } else
+ local_flush_tlb_kernel_range(start, end);
+}
+
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index 67f933ec417..dd790745b3e 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -145,13 +145,3 @@ void __cpuinit twd_timer_setup(struct clock_event_device *clk)
clockevents_register_device(clk);
}
-
-#ifdef CONFIG_HOTPLUG_CPU
-/*
- * take a local timer down
- */
-void twd_timer_stop(void)
-{
- __raw_writel(0, twd_base + TWD_TIMER_CONTROL);
-}
-#endif
diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c
new file mode 100644
index 00000000000..7a576092291
--- /dev/null
+++ b/arch/arm/kernel/swp_emulate.c
@@ -0,0 +1,267 @@
+/*
+ * linux/arch/arm/kernel/swp_emulate.c
+ *
+ * Copyright (C) 2009 ARM Limited
+ * __user_* functions adapted from include/asm/uaccess.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Implements emulation of the SWP/SWPB instructions using load-exclusive and
+ * store-exclusive for processors that have them disabled (or future ones that
+ * might not implement them).
+ *
+ * Syntax of SWP{B} instruction: SWP{B}<c> <Rt>, <Rt2>, [<Rn>]
+ * Where: Rt = destination
+ * Rt2 = source
+ * Rn = address
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/proc_fs.h>
+#include <linux/sched.h>
+#include <linux/syscalls.h>
+#include <linux/perf_event.h>
+
+#include <asm/traps.h>
+#include <asm/uaccess.h>
+
+/*
+ * Error-checking SWP macros implemented using ldrex{b}/strex{b}
+ */
+#define __user_swpX_asm(data, addr, res, temp, B) \
+ __asm__ __volatile__( \
+ " mov %2, %1\n" \
+ "0: ldrex"B" %1, [%3]\n" \
+ "1: strex"B" %0, %2, [%3]\n" \
+ " cmp %0, #0\n" \
+ " movne %0, %4\n" \
+ "2:\n" \
+ " .section .fixup,\"ax\"\n" \
+ " .align 2\n" \
+ "3: mov %0, %5\n" \
+ " b 2b\n" \
+ " .previous\n" \
+ " .section __ex_table,\"a\"\n" \
+ " .align 3\n" \
+ " .long 0b, 3b\n" \
+ " .long 1b, 3b\n" \
+ " .previous" \
+ : "=&r" (res), "+r" (data), "=&r" (temp) \
+ : "r" (addr), "i" (-EAGAIN), "i" (-EFAULT) \
+ : "cc", "memory")
+
+#define __user_swp_asm(data, addr, res, temp) \
+ __user_swpX_asm(data, addr, res, temp, "")
+#define __user_swpb_asm(data, addr, res, temp) \
+ __user_swpX_asm(data, addr, res, temp, "b")
+
+/*
+ * Macros/defines for extracting register numbers from instruction.
+ */
+#define EXTRACT_REG_NUM(instruction, offset) \
+ (((instruction) & (0xf << (offset))) >> (offset))
+#define RN_OFFSET 16
+#define RT_OFFSET 12
+#define RT2_OFFSET 0
+/*
+ * Bit 22 of the instruction encoding distinguishes between
+ * the SWP and SWPB variants (bit set means SWPB).
+ */
+#define TYPE_SWPB (1 << 22)
+
+static unsigned long swpcounter;
+static unsigned long swpbcounter;
+static unsigned long abtcounter;
+static pid_t previous_pid;
+
+#ifdef CONFIG_PROC_FS
+static int proc_read_status(char *page, char **start, off_t off, int count,
+ int *eof, void *data)
+{
+ char *p = page;
+ int len;
+
+ p += sprintf(p, "Emulated SWP:\t\t%lu\n", swpcounter);
+ p += sprintf(p, "Emulated SWPB:\t\t%lu\n", swpbcounter);
+ p += sprintf(p, "Aborted SWP{B}:\t\t%lu\n", abtcounter);
+ if (previous_pid != 0)
+ p += sprintf(p, "Last process:\t\t%d\n", previous_pid);
+
+ len = (p - page) - off;
+ if (len < 0)
+ len = 0;
+
+ *eof = (len <= count) ? 1 : 0;
+ *start = page + off;
+
+ return len;
+}
+#endif
+
+/*
+ * Set up process info to signal segmentation fault - called on access error.
+ */
+static void set_segfault(struct pt_regs *regs, unsigned long addr)
+{
+ siginfo_t info;
+
+ if (find_vma(current->mm, addr) == NULL)
+ info.si_code = SEGV_MAPERR;
+ else
+ info.si_code = SEGV_ACCERR;
+
+ info.si_signo = SIGSEGV;
+ info.si_errno = 0;
+ info.si_addr = (void *) instruction_pointer(regs);
+
+ pr_debug("SWP{B} emulation: access caused memory abort!\n");
+ arm_notify_die("Illegal memory access", regs, &info, 0, 0);
+
+ abtcounter++;
+}
+
+static int emulate_swpX(unsigned int address, unsigned int *data,
+ unsigned int type)
+{
+ unsigned int res = 0;
+
+ if ((type != TYPE_SWPB) && (address & 0x3)) {
+ /* SWP to unaligned address not permitted */
+ pr_debug("SWP instruction on unaligned pointer!\n");
+ return -EFAULT;
+ }
+
+ while (1) {
+ unsigned long temp;
+
+ /*
+ * Barrier required between accessing protected resource and
+ * releasing a lock for it. Legacy code might not have done
+ * this, and we cannot determine that this is not the case
+ * being emulated, so insert always.
+ */
+ smp_mb();
+
+ if (type == TYPE_SWPB)
+ __user_swpb_asm(*data, address, res, temp);
+ else
+ __user_swp_asm(*data, address, res, temp);
+
+ if (likely(res != -EAGAIN) || signal_pending(current))
+ break;
+
+ cond_resched();
+ }
+
+ if (res == 0) {
+ /*
+ * Barrier also required between aquiring a lock for a
+ * protected resource and accessing the resource. Inserted for
+ * same reason as above.
+ */
+ smp_mb();
+
+ if (type == TYPE_SWPB)
+ swpbcounter++;
+ else
+ swpcounter++;
+ }
+
+ return res;
+}
+
+/*
+ * swp_handler logs the id of calling process, dissects the instruction, sanity
+ * checks the memory location, calls emulate_swpX for the actual operation and
+ * deals with fixup/error handling before returning
+ */
+static int swp_handler(struct pt_regs *regs, unsigned int instr)
+{
+ unsigned int address, destreg, data, type;
+ unsigned int res = 0;
+
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, regs->ARM_pc);
+
+ if (current->pid != previous_pid) {
+ pr_debug("\"%s\" (%ld) uses deprecated SWP{B} instruction\n",
+ current->comm, (unsigned long)current->pid);
+ previous_pid = current->pid;
+ }
+
+ address = regs->uregs[EXTRACT_REG_NUM(instr, RN_OFFSET)];
+ data = regs->uregs[EXTRACT_REG_NUM(instr, RT2_OFFSET)];
+ destreg = EXTRACT_REG_NUM(instr, RT_OFFSET);
+
+ type = instr & TYPE_SWPB;
+
+ pr_debug("addr in r%d->0x%08x, dest is r%d, source in r%d->0x%08x)\n",
+ EXTRACT_REG_NUM(instr, RN_OFFSET), address,
+ destreg, EXTRACT_REG_NUM(instr, RT2_OFFSET), data);
+
+ /* Check access in reasonable access range for both SWP and SWPB */
+ if (!access_ok(VERIFY_WRITE, (address & ~3), 4)) {
+ pr_debug("SWP{B} emulation: access to %p not allowed!\n",
+ (void *)address);
+ res = -EFAULT;
+ } else {
+ res = emulate_swpX(address, &data, type);
+ }
+
+ if (res == 0) {
+ /*
+ * On successful emulation, revert the adjustment to the PC
+ * made in kernel/traps.c in order to resume execution at the
+ * instruction following the SWP{B}.
+ */
+ regs->ARM_pc += 4;
+ regs->uregs[destreg] = data;
+ } else if (res == -EFAULT) {
+ /*
+ * Memory errors do not mean emulation failed.
+ * Set up signal info to return SEGV, then return OK
+ */
+ set_segfault(regs, address);
+ }
+
+ return 0;
+}
+
+/*
+ * Only emulate SWP/SWPB executed in ARM state/User mode.
+ * The kernel must be SWP free and SWP{B} does not exist in Thumb/ThumbEE.
+ */
+static struct undef_hook swp_hook = {
+ .instr_mask = 0x0fb00ff0,
+ .instr_val = 0x01000090,
+ .cpsr_mask = MODE_MASK | PSR_T_BIT | PSR_J_BIT,
+ .cpsr_val = USR_MODE,
+ .fn = swp_handler
+};
+
+/*
+ * Register handler and create status file in /proc/cpu
+ * Invoked as late_initcall, since not needed before init spawned.
+ */
+static int __init swp_emulation_init(void)
+{
+#ifdef CONFIG_PROC_FS
+ struct proc_dir_entry *res;
+
+ res = create_proc_entry("cpu/swp_emulation", S_IRUGO, NULL);
+
+ if (!res)
+ return -ENOMEM;
+
+ res->read_proc = proc_read_status;
+#endif /* CONFIG_PROC_FS */
+
+ printk(KERN_NOTICE "Registering SWP/SWPB emulation handler\n");
+ register_undef_hook(&swp_hook);
+
+ return 0;
+}
+
+late_initcall(swp_emulation_init);
diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c
index 38c261f9951..f1e2eb19a67 100644
--- a/arch/arm/kernel/time.c
+++ b/arch/arm/kernel/time.c
@@ -30,12 +30,13 @@
#include <asm/leds.h>
#include <asm/thread_info.h>
#include <asm/stacktrace.h>
+#include <asm/mach/arch.h>
#include <asm/mach/time.h>
/*
* Our system timer.
*/
-struct sys_timer *system_timer;
+static struct sys_timer *system_timer;
#if defined(CONFIG_RTC_DRV_CMOS) || defined(CONFIG_RTC_DRV_CMOS_MODULE)
/* this needs a better home */
@@ -160,6 +161,7 @@ device_initcall(timer_init_sysfs);
void __init time_init(void)
{
+ system_timer = machine_desc->timer;
system_timer->init();
}
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 446aee97436..e02f4f7537c 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -37,6 +37,8 @@
static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" };
+void *vectors_page;
+
#ifdef CONFIG_DEBUG_USER
unsigned int user_debug;
@@ -756,7 +758,11 @@ static void __init kuser_get_tls_init(unsigned long vectors)
void __init early_trap_init(void)
{
+#if defined(CONFIG_CPU_USE_DOMAINS)
unsigned long vectors = CONFIG_VECTORS_BASE;
+#else
+ unsigned long vectors = (unsigned long)vectors_page;
+#endif
extern char __stubs_start[], __stubs_end[];
extern char __vectors_start[], __vectors_end[];
extern char __kuser_helper_start[], __kuser_helper_end[];
@@ -780,10 +786,10 @@ void __init early_trap_init(void)
* Copy signal return handlers into the vector page, and
* set sigreturn to be a pointer to these.
*/
- memcpy((void *)KERN_SIGRETURN_CODE, sigreturn_codes,
- sizeof(sigreturn_codes));
- memcpy((void *)KERN_RESTART_CODE, syscall_restart_code,
- sizeof(syscall_restart_code));
+ memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE),
+ sigreturn_codes, sizeof(sigreturn_codes));
+ memcpy((void *)(vectors + KERN_RESTART_CODE - CONFIG_VECTORS_BASE),
+ syscall_restart_code, sizeof(syscall_restart_code));
flush_icache_range(vectors, vectors + PAGE_SIZE);
modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 897c1a8f169..86b66f3f203 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -168,6 +168,7 @@ SECTIONS
NOSAVE_DATA
CACHELINE_ALIGNED_DATA(32)
+ READ_MOSTLY_DATA(32)
/*
* The exception fixup table (might need resorting at runtime)
diff --git a/arch/arm/lib/getuser.S b/arch/arm/lib/getuser.S
index b1631a7dbe7..1b049cd7a49 100644
--- a/arch/arm/lib/getuser.S
+++ b/arch/arm/lib/getuser.S
@@ -28,20 +28,21 @@
*/
#include <linux/linkage.h>
#include <asm/errno.h>
+#include <asm/domain.h>
ENTRY(__get_user_1)
-1: ldrbt r2, [r0]
+1: T(ldrb) r2, [r0]
mov r0, #0
mov pc, lr
ENDPROC(__get_user_1)
ENTRY(__get_user_2)
#ifdef CONFIG_THUMB2_KERNEL
-2: ldrbt r2, [r0]
-3: ldrbt r3, [r0, #1]
+2: T(ldrb) r2, [r0]
+3: T(ldrb) r3, [r0, #1]
#else
-2: ldrbt r2, [r0], #1
-3: ldrbt r3, [r0]
+2: T(ldrb) r2, [r0], #1
+3: T(ldrb) r3, [r0]
#endif
#ifndef __ARMEB__
orr r2, r2, r3, lsl #8
@@ -53,7 +54,7 @@ ENTRY(__get_user_2)
ENDPROC(__get_user_2)
ENTRY(__get_user_4)
-4: ldrt r2, [r0]
+4: T(ldr) r2, [r0]
mov r0, #0
mov pc, lr
ENDPROC(__get_user_4)
diff --git a/arch/arm/lib/putuser.S b/arch/arm/lib/putuser.S
index 5a01a23c6c0..c023fc11e86 100644
--- a/arch/arm/lib/putuser.S
+++ b/arch/arm/lib/putuser.S
@@ -28,9 +28,10 @@
*/
#include <linux/linkage.h>
#include <asm/errno.h>
+#include <asm/domain.h>
ENTRY(__put_user_1)
-1: strbt r2, [r0]
+1: T(strb) r2, [r0]
mov r0, #0
mov pc, lr
ENDPROC(__put_user_1)
@@ -39,19 +40,19 @@ ENTRY(__put_user_2)
mov ip, r2, lsr #8
#ifdef CONFIG_THUMB2_KERNEL
#ifndef __ARMEB__
-2: strbt r2, [r0]
-3: strbt ip, [r0, #1]
+2: T(strb) r2, [r0]
+3: T(strb) ip, [r0, #1]
#else
-2: strbt ip, [r0]
-3: strbt r2, [r0, #1]
+2: T(strb) ip, [r0]
+3: T(strb) r2, [r0, #1]
#endif
#else /* !CONFIG_THUMB2_KERNEL */
#ifndef __ARMEB__
-2: strbt r2, [r0], #1
-3: strbt ip, [r0]
+2: T(strb) r2, [r0], #1
+3: T(strb) ip, [r0]
#else
-2: strbt ip, [r0], #1
-3: strbt r2, [r0]
+2: T(strb) ip, [r0], #1
+3: T(strb) r2, [r0]
#endif
#endif /* CONFIG_THUMB2_KERNEL */
mov r0, #0
@@ -59,18 +60,18 @@ ENTRY(__put_user_2)
ENDPROC(__put_user_2)
ENTRY(__put_user_4)
-4: strt r2, [r0]
+4: T(str) r2, [r0]
mov r0, #0
mov pc, lr
ENDPROC(__put_user_4)
ENTRY(__put_user_8)
#ifdef CONFIG_THUMB2_KERNEL
-5: strt r2, [r0]
-6: strt r3, [r0, #4]
+5: T(str) r2, [r0]
+6: T(str) r3, [r0, #4]
#else
-5: strt r2, [r0], #4
-6: strt r3, [r0]
+5: T(str) r2, [r0], #4
+6: T(str) r3, [r0]
#endif
mov r0, #0
mov pc, lr
diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
index fee9f6f88ad..d0ece2aeb70 100644
--- a/arch/arm/lib/uaccess.S
+++ b/arch/arm/lib/uaccess.S
@@ -14,6 +14,7 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/errno.h>
+#include <asm/domain.h>
.text
@@ -31,11 +32,11 @@
rsb ip, ip, #4
cmp ip, #2
ldrb r3, [r1], #1
-USER( strbt r3, [r0], #1) @ May fault
+USER( T(strb) r3, [r0], #1) @ May fault
ldrgeb r3, [r1], #1
-USER( strgebt r3, [r0], #1) @ May fault
+USER( T(strgeb) r3, [r0], #1) @ May fault
ldrgtb r3, [r1], #1
-USER( strgtbt r3, [r0], #1) @ May fault
+USER( T(strgtb) r3, [r0], #1) @ May fault
sub r2, r2, ip
b .Lc2u_dest_aligned
@@ -58,7 +59,7 @@ ENTRY(__copy_to_user)
addmi ip, r2, #4
bmi .Lc2u_0nowords
ldr r3, [r1], #4
-USER( strt r3, [r0], #4) @ May fault
+USER( T(str) r3, [r0], #4) @ May fault
mov ip, r0, lsl #32 - PAGE_SHIFT @ On each page, use a ld/st??t instruction
rsb ip, ip, #0
movs ip, ip, lsr #32 - PAGE_SHIFT
@@ -87,18 +88,18 @@ USER( strt r3, [r0], #4) @ May fault
stmneia r0!, {r3 - r4} @ Shouldnt fault
tst ip, #4
ldrne r3, [r1], #4
- strnet r3, [r0], #4 @ Shouldnt fault
+ T(strne) r3, [r0], #4 @ Shouldnt fault
ands ip, ip, #3
beq .Lc2u_0fupi
.Lc2u_0nowords: teq ip, #0
beq .Lc2u_finished
.Lc2u_nowords: cmp ip, #2
ldrb r3, [r1], #1
-USER( strbt r3, [r0], #1) @ May fault
+USER( T(strb) r3, [r0], #1) @ May fault
ldrgeb r3, [r1], #1
-USER( strgebt r3, [r0], #1) @ May fault
+USER( T(strgeb) r3, [r0], #1) @ May fault
ldrgtb r3, [r1], #1
-USER( strgtbt r3, [r0], #1) @ May fault
+USER( T(strgtb) r3, [r0], #1) @ May fault
b .Lc2u_finished
.Lc2u_not_enough:
@@ -119,7 +120,7 @@ USER( strgtbt r3, [r0], #1) @ May fault
mov r3, r7, pull #8
ldr r7, [r1], #4
orr r3, r3, r7, push #24
-USER( strt r3, [r0], #4) @ May fault
+USER( T(str) r3, [r0], #4) @ May fault
mov ip, r0, lsl #32 - PAGE_SHIFT
rsb ip, ip, #0
movs ip, ip, lsr #32 - PAGE_SHIFT
@@ -154,18 +155,18 @@ USER( strt r3, [r0], #4) @ May fault
movne r3, r7, pull #8
ldrne r7, [r1], #4
orrne r3, r3, r7, push #24
- strnet r3, [r0], #4 @ Shouldnt fault
+ T(strne) r3, [r0], #4 @ Shouldnt fault
ands ip, ip, #3
beq .Lc2u_1fupi
.Lc2u_1nowords: mov r3, r7, get_byte_1
teq ip, #0
beq .Lc2u_finished
cmp ip, #2
-USER( strbt r3, [r0], #1) @ May fault
+USER( T(strb) r3, [r0], #1) @ May fault
movge r3, r7, get_byte_2
-USER( strgebt r3, [r0], #1) @ May fault
+USER( T(strgeb) r3, [r0], #1) @ May fault
movgt r3, r7, get_byte_3
-USER( strgtbt r3, [r0], #1) @ May fault
+USER( T(strgtb) r3, [r0], #1) @ May fault
b .Lc2u_finished
.Lc2u_2fupi: subs r2, r2, #4
@@ -174,7 +175,7 @@ USER( strgtbt r3, [r0], #1) @ May fault
mov r3, r7, pull #16
ldr r7, [r1], #4
orr r3, r3, r7, push #16
-USER( strt r3, [r0], #4) @ May fault
+USER( T(str) r3, [r0], #4) @ May fault
mov ip, r0, lsl #32 - PAGE_SHIFT
rsb ip, ip, #0
movs ip, ip, lsr #32 - PAGE_SHIFT
@@ -209,18 +210,18 @@ USER( strt r3, [r0], #4) @ May fault
movne r3, r7, pull #16
ldrne r7, [r1], #4
orrne r3, r3, r7, push #16
- strnet r3, [r0], #4 @ Shouldnt fault
+ T(strne) r3, [r0], #4 @ Shouldnt fault
ands ip, ip, #3
beq .Lc2u_2fupi
.Lc2u_2nowords: mov r3, r7, get_byte_2
teq ip, #0
beq .Lc2u_finished
cmp ip, #2
-USER( strbt r3, [r0], #1) @ May fault
+USER( T(strb) r3, [r0], #1) @ May fault
movge r3, r7, get_byte_3
-USER( strgebt r3, [r0], #1) @ May fault
+USER( T(strgeb) r3, [r0], #1) @ May fault
ldrgtb r3, [r1], #0
-USER( strgtbt r3, [r0], #1) @ May fault
+USER( T(strgtb) r3, [r0], #1) @ May fault
b .Lc2u_finished
.Lc2u_3fupi: subs r2, r2, #4
@@ -229,7 +230,7 @@ USER( strgtbt r3, [r0], #1) @ May fault
mov r3, r7, pull #24
ldr r7, [r1], #4
orr r3, r3, r7, push #8
-USER( strt r3, [r0], #4) @ May fault
+USER( T(str) r3, [r0], #4) @ May fault
mov ip, r0, lsl #32 - PAGE_SHIFT
rsb ip, ip, #0
movs ip, ip, lsr #32 - PAGE_SHIFT
@@ -264,18 +265,18 @@ USER( strt r3, [r0], #4) @ May fault
movne r3, r7, pull #24
ldrne r7, [r1], #4
orrne r3, r3, r7, push #8
- strnet r3, [r0], #4 @ Shouldnt fault
+ T(strne) r3, [r0], #4 @ Shouldnt fault
ands ip, ip, #3
beq .Lc2u_3fupi
.Lc2u_3nowords: mov r3, r7, get_byte_3
teq ip, #0
beq .Lc2u_finished
cmp ip, #2
-USER( strbt r3, [r0], #1) @ May fault
+USER( T(strb) r3, [r0], #1) @ May fault
ldrgeb r3, [r1], #1
-USER( strgebt r3, [r0], #1) @ May fault
+USER( T(strgeb) r3, [r0], #1) @ May fault
ldrgtb r3, [r1], #0
-USER( strgtbt r3, [r0], #1) @ May fault
+USER( T(strgtb) r3, [r0], #1) @ May fault
b .Lc2u_finished
ENDPROC(__copy_to_user)
@@ -294,11 +295,11 @@ ENDPROC(__copy_to_user)
.Lcfu_dest_not_aligned:
rsb ip, ip, #4
cmp ip, #2
-USER( ldrbt r3, [r1], #1) @ May fault
+USER( T(ldrb) r3, [r1], #1) @ May fault
strb r3, [r0], #1
-USER( ldrgebt r3, [r1], #1) @ May fault
+USER( T(ldrgeb) r3, [r1], #1) @ May fault
strgeb r3, [r0], #1
-USER( ldrgtbt r3, [r1], #1) @ May fault
+USER( T(ldrgtb) r3, [r1], #1) @ May fault
strgtb r3, [r0], #1
sub r2, r2, ip
b .Lcfu_dest_aligned
@@ -321,7 +322,7 @@ ENTRY(__copy_from_user)
.Lcfu_0fupi: subs r2, r2, #4
addmi ip, r2, #4
bmi .Lcfu_0nowords
-USER( ldrt r3, [r1], #4)
+USER( T(ldr) r3, [r1], #4)
str r3, [r0], #4
mov ip, r1, lsl #32 - PAGE_SHIFT @ On each page, use a ld/st??t instruction
rsb ip, ip, #0
@@ -350,18 +351,18 @@ USER( ldrt r3, [r1], #4)
ldmneia r1!, {r3 - r4} @ Shouldnt fault
stmneia r0!, {r3 - r4}
tst ip, #4
- ldrnet r3, [r1], #4 @ Shouldnt fault
+ T(ldrne) r3, [r1], #4 @ Shouldnt fault
strne r3, [r0], #4
ands ip, ip, #3
beq .Lcfu_0fupi
.Lcfu_0nowords: teq ip, #0
beq .Lcfu_finished
.Lcfu_nowords: cmp ip, #2
-USER( ldrbt r3, [r1], #1) @ May fault
+USER( T(ldrb) r3, [r1], #1) @ May fault
strb r3, [r0], #1
-USER( ldrgebt r3, [r1], #1) @ May fault
+USER( T(ldrgeb) r3, [r1], #1) @ May fault
strgeb r3, [r0], #1
-USER( ldrgtbt r3, [r1], #1) @ May fault
+USER( T(ldrgtb) r3, [r1], #1) @ May fault
strgtb r3, [r0], #1
b .Lcfu_finished
@@ -374,7 +375,7 @@ USER( ldrgtbt r3, [r1], #1) @ May fault
.Lcfu_src_not_aligned:
bic r1, r1, #3
-USER( ldrt r7, [r1], #4) @ May fault
+USER( T(ldr) r7, [r1], #4) @ May fault
cmp ip, #2
bgt .Lcfu_3fupi
beq .Lcfu_2fupi
@@ -382,7 +383,7 @@ USER( ldrt r7, [r1], #4) @ May fault
addmi ip, r2, #4
bmi .Lcfu_1nowords
mov r3, r7, pull #8
-USER( ldrt r7, [r1], #4) @ May fault
+USER( T(ldr) r7, [r1], #4) @ May fault
orr r3, r3, r7, push #24
str r3, [r0], #4
mov ip, r1, lsl #32 - PAGE_SHIFT
@@ -417,7 +418,7 @@ USER( ldrt r7, [r1], #4) @ May fault
stmneia r0!, {r3 - r4}
tst ip, #4
movne r3, r7, pull #8
-USER( ldrnet r7, [r1], #4) @ May fault
+USER( T(ldrne) r7, [r1], #4) @ May fault
orrne r3, r3, r7, push #24
strne r3, [r0], #4
ands ip, ip, #3
@@ -437,7 +438,7 @@ USER( ldrnet r7, [r1], #4) @ May fault
addmi ip, r2, #4
bmi .Lcfu_2nowords
mov r3, r7, pull #16
-USER( ldrt r7, [r1], #4) @ May fault
+USER( T(ldr) r7, [r1], #4) @ May fault
orr r3, r3, r7, push #16
str r3, [r0], #4
mov ip, r1, lsl #32 - PAGE_SHIFT
@@ -473,7 +474,7 @@ USER( ldrt r7, [r1], #4) @ May fault
stmneia r0!, {r3 - r4}
tst ip, #4
movne r3, r7, pull #16
-USER( ldrnet r7, [r1], #4) @ May fault
+USER( T(ldrne) r7, [r1], #4) @ May fault
orrne r3, r3, r7, push #16
strne r3, [r0], #4
ands ip, ip, #3
@@ -485,7 +486,7 @@ USER( ldrnet r7, [r1], #4) @ May fault
strb r3, [r0], #1
movge r3, r7, get_byte_3
strgeb r3, [r0], #1
-USER( ldrgtbt r3, [r1], #0) @ May fault
+USER( T(ldrgtb) r3, [r1], #0) @ May fault
strgtb r3, [r0], #1
b .Lcfu_finished
@@ -493,7 +494,7 @@ USER( ldrgtbt r3, [r1], #0) @ May fault
addmi ip, r2, #4
bmi .Lcfu_3nowords
mov r3, r7, pull #24
-USER( ldrt r7, [r1], #4) @ May fault
+USER( T(ldr) r7, [r1], #4) @ May fault
orr r3, r3, r7, push #8
str r3, [r0], #4
mov ip, r1, lsl #32 - PAGE_SHIFT
@@ -528,7 +529,7 @@ USER( ldrt r7, [r1], #4) @ May fault
stmneia r0!, {r3 - r4}
tst ip, #4
movne r3, r7, pull #24
-USER( ldrnet r7, [r1], #4) @ May fault
+USER( T(ldrne) r7, [r1], #4) @ May fault
orrne r3, r3, r7, push #8
strne r3, [r0], #4
ands ip, ip, #3
@@ -538,9 +539,9 @@ USER( ldrnet r7, [r1], #4) @ May fault
beq .Lcfu_finished
cmp ip, #2
strb r3, [r0], #1
-USER( ldrgebt r3, [r1], #1) @ May fault
+USER( T(ldrgeb) r3, [r1], #1) @ May fault
strgeb r3, [r0], #1
-USER( ldrgtbt r3, [r1], #1) @ May fault
+USER( T(ldrgtb) r3, [r1], #1) @ May fault
strgtb r3, [r0], #1
b .Lcfu_finished
ENDPROC(__copy_from_user)
diff --git a/arch/arm/mach-bcmring/clock.c b/arch/arm/mach-bcmring/clock.c
index 14bafc38f2d..ad237a42d26 100644
--- a/arch/arm/mach-bcmring/clock.c
+++ b/arch/arm/mach-bcmring/clock.c
@@ -21,13 +21,12 @@
#include <linux/string.h>
#include <linux/clk.h>
#include <linux/spinlock.h>
+#include <linux/clkdev.h>
#include <mach/csp/hw_cfg.h>
#include <mach/csp/chipcHw_def.h>
#include <mach/csp/chipcHw_reg.h>
#include <mach/csp/chipcHw_inline.h>
-#include <asm/clkdev.h>
-
#include "clock.h"
#define clk_is_primary(x) ((x)->type & CLK_TYPE_PRIMARY)
diff --git a/arch/arm/mach-bcmring/core.c b/arch/arm/mach-bcmring/core.c
index b91b1b0fdff..8fc2035759f 100644
--- a/arch/arm/mach-bcmring/core.c
+++ b/arch/arm/mach-bcmring/core.c
@@ -30,10 +30,10 @@
#include <linux/amba/bus.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
+#include <linux/clkdev.h>
#include <mach/csp/mm_addr.h>
#include <mach/hardware.h>
-#include <asm/clkdev.h>
#include <linux/io.h>
#include <asm/irq.h>
#include <asm/hardware/arm_timer.h>
diff --git a/arch/arm/mach-cns3xxx/Kconfig b/arch/arm/mach-cns3xxx/Kconfig
index 9ebfcc46feb..29b13f249aa 100644
--- a/arch/arm/mach-cns3xxx/Kconfig
+++ b/arch/arm/mach-cns3xxx/Kconfig
@@ -3,6 +3,7 @@ menu "CNS3XXX platform type"
config MACH_CNS3420VB
bool "Support for CNS3420 Validation Board"
+ select MIGHT_HAVE_PCI
help
Include support for the Cavium Networks CNS3420 MPCore Platform
Baseboard.
diff --git a/arch/arm/mach-davinci/clock.h b/arch/arm/mach-davinci/clock.h
index 11099980b58..0dd22031ec6 100644
--- a/arch/arm/mach-davinci/clock.h
+++ b/arch/arm/mach-davinci/clock.h
@@ -68,7 +68,7 @@
#ifndef __ASSEMBLER__
#include <linux/list.h>
-#include <asm/clkdev.h>
+#include <linux/clkdev.h>
#define PLLSTAT_GOSTAT BIT(0)
#define PLLCMD_GOSET BIT(0)
diff --git a/arch/arm/mach-ep93xx/clock.c b/arch/arm/mach-ep93xx/clock.c
index ef06c66a6f1..ca4de710509 100644
--- a/arch/arm/mach-ep93xx/clock.c
+++ b/arch/arm/mach-ep93xx/clock.c
@@ -19,10 +19,10 @@
#include <linux/string.h>
#include <linux/io.h>
#include <linux/spinlock.h>
+#include <linux/clkdev.h>
#include <mach/hardware.h>
-#include <asm/clkdev.h>
#include <asm/div64.h>
diff --git a/arch/arm/mach-imx/clock-imx1.c b/arch/arm/mach-imx/clock-imx1.c
index daca30b2d5b..3938a563b28 100644
--- a/arch/arm/mach-imx/clock-imx1.c
+++ b/arch/arm/mach-imx/clock-imx1.c
@@ -22,8 +22,7 @@
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/io.h>
-
-#include <asm/clkdev.h>
+#include <linux/clkdev.h>
#include <mach/clock.h>
#include <mach/hardware.h>
diff --git a/arch/arm/mach-imx/clock-imx21.c b/arch/arm/mach-imx/clock-imx21.c
index cf15ea516a7..d7056559715 100644
--- a/arch/arm/mach-imx/clock-imx21.c
+++ b/arch/arm/mach-imx/clock-imx21.c
@@ -21,11 +21,11 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/clkdev.h>
#include <mach/clock.h>
#include <mach/hardware.h>
#include <mach/common.h>
-#include <asm/clkdev.h>
#include <asm/div64.h>
#define IO_ADDR_CCM(off) (MX21_IO_ADDRESS(MX21_CCM_BASE_ADDR + (off)))
diff --git a/arch/arm/mach-imx/clock-imx27.c b/arch/arm/mach-imx/clock-imx27.c
index 98a25bada78..ca1017b9028 100644
--- a/arch/arm/mach-imx/clock-imx27.c
+++ b/arch/arm/mach-imx/clock-imx27.c
@@ -21,8 +21,8 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/clkdev.h>
-#include <asm/clkdev.h>
#include <asm/div64.h>
#include <mach/clock.h>
diff --git a/arch/arm/mach-integrator/Kconfig b/arch/arm/mach-integrator/Kconfig
index 27db275b367..769b0f10c83 100644
--- a/arch/arm/mach-integrator/Kconfig
+++ b/arch/arm/mach-integrator/Kconfig
@@ -4,6 +4,7 @@ menu "Integrator Options"
config ARCH_INTEGRATOR_AP
bool "Support Integrator/AP and Integrator/PP2 platforms"
+ select MIGHT_HAVE_PCI
help
Include support for the ARM(R) Integrator/AP and
Integrator/PP2 platforms.
diff --git a/arch/arm/mach-integrator/core.c b/arch/arm/mach-integrator/core.c
index 8f4fb6d638f..b8e884b450d 100644
--- a/arch/arm/mach-integrator/core.c
+++ b/arch/arm/mach-integrator/core.c
@@ -21,9 +21,8 @@
#include <linux/amba/bus.h>
#include <linux/amba/serial.h>
#include <linux/io.h>
+#include <linux/clkdev.h>
-#include <asm/clkdev.h>
-#include <mach/clkdev.h>
#include <mach/hardware.h>
#include <mach/platform.h>
#include <asm/irq.h>
diff --git a/arch/arm/mach-integrator/impd1.c b/arch/arm/mach-integrator/impd1.c
index fd684bf205e..5db574f8ae3 100644
--- a/arch/arm/mach-integrator/impd1.c
+++ b/arch/arm/mach-integrator/impd1.c
@@ -22,9 +22,8 @@
#include <linux/amba/clcd.h>
#include <linux/io.h>
#include <linux/slab.h>
+#include <linux/clkdev.h>
-#include <asm/clkdev.h>
-#include <mach/clkdev.h>
#include <asm/hardware/icst.h>
#include <mach/lm.h>
#include <mach/impd1.h>
diff --git a/arch/arm/mach-integrator/integrator_cp.c b/arch/arm/mach-integrator/integrator_cp.c
index 1713ecf0c95..85e48a5f77b 100644
--- a/arch/arm/mach-integrator/integrator_cp.c
+++ b/arch/arm/mach-integrator/integrator_cp.c
@@ -21,9 +21,8 @@
#include <linux/amba/mmci.h>
#include <linux/io.h>
#include <linux/gfp.h>
+#include <linux/clkdev.h>
-#include <asm/clkdev.h>
-#include <mach/clkdev.h>
#include <mach/hardware.h>
#include <mach/platform.h>
#include <asm/irq.h>
diff --git a/arch/arm/mach-iop13xx/include/mach/memory.h b/arch/arm/mach-iop13xx/include/mach/memory.h
index 7415e433865..3ad45531886 100644
--- a/arch/arm/mach-iop13xx/include/mach/memory.h
+++ b/arch/arm/mach-iop13xx/include/mach/memory.h
@@ -58,13 +58,13 @@ static inline unsigned long __lbus_to_virt(dma_addr_t x)
__dma; \
})
-#define __arch_page_to_dma(dev, page) \
+#define __arch_pfn_to_dma(dev, pfn) \
({ \
/* __is_lbus_virt() can never be true for RAM pages */ \
- (dma_addr_t)page_to_phys(page); \
+ (dma_addr_t)__pfn_to_phys(pfn); \
})
-#define __arch_dma_to_page(dev, addr) phys_to_page(addr)
+#define __arch_dma_to_pfn(dev, addr) __phys_to_pfn(addr)
#endif /* CONFIG_ARCH_IOP13XX */
#endif /* !ASSEMBLY */
diff --git a/arch/arm/mach-ks8695/Kconfig b/arch/arm/mach-ks8695/Kconfig
index fe0c82e30b2..f5c39a8c2b0 100644
--- a/arch/arm/mach-ks8695/Kconfig
+++ b/arch/arm/mach-ks8695/Kconfig
@@ -4,6 +4,7 @@ menu "Kendin/Micrel KS8695 Implementations"
config MACH_KS8695
bool "KS8695 development board"
+ select MIGHT_HAVE_PCI
help
Say 'Y' here if you want your kernel to run on the original
Kendin-Micrel KS8695 development board.
diff --git a/arch/arm/mach-ks8695/include/mach/memory.h b/arch/arm/mach-ks8695/include/mach/memory.h
index ffa19aae6e0..bace9a681ad 100644
--- a/arch/arm/mach-ks8695/include/mach/memory.h
+++ b/arch/arm/mach-ks8695/include/mach/memory.h
@@ -35,17 +35,17 @@ extern struct bus_type platform_bus_type;
__phys_to_virt(x) : __bus_to_virt(x)); })
#define __arch_virt_to_dma(dev, x) ({ is_lbus_device(dev) ? \
(dma_addr_t)__virt_to_phys(x) : (dma_addr_t)__virt_to_bus(x); })
-#define __arch_page_to_dma(dev, x) \
- ({ dma_addr_t __dma = page_to_phys(page); \
+#define __arch_pfn_to_dma(dev, pfn) \
+ ({ dma_addr_t __dma = __pfn_to_phys(pfn); \
if (!is_lbus_device(dev)) \
__dma = __dma - PHYS_OFFSET + KS8695_PCIMEM_PA; \
__dma; })
-#define __arch_dma_to_page(dev, x) \
+#define __arch_dma_to_pfn(dev, x) \
({ dma_addr_t __dma = x; \
if (!is_lbus_device(dev)) \
__dma += PHYS_OFFSET - KS8695_PCIMEM_PA; \
- phys_to_page(__dma); \
+ __phys_to_pfn(__dma); \
})
#endif
diff --git a/arch/arm/mach-lpc32xx/clock.c b/arch/arm/mach-lpc32xx/clock.c
index 32d63796430..da0e6498110 100644
--- a/arch/arm/mach-lpc32xx/clock.c
+++ b/arch/arm/mach-lpc32xx/clock.c
@@ -90,10 +90,9 @@
#include <linux/clk.h>
#include <linux/amba/bus.h>
#include <linux/amba/clcd.h>
+#include <linux/clkdev.h>
#include <mach/hardware.h>
-#include <asm/clkdev.h>
-#include <mach/clkdev.h>
#include <mach/platform.h>
#include "clock.h"
#include "common.h"
diff --git a/arch/arm/mach-mmp/clock.h b/arch/arm/mach-mmp/clock.h
index 016ae94691c..9b027d7491f 100644
--- a/arch/arm/mach-mmp/clock.h
+++ b/arch/arm/mach-mmp/clock.h
@@ -6,7 +6,7 @@
* published by the Free Software Foundation.
*/
-#include <asm/clkdev.h>
+#include <linux/clkdev.h>
struct clkops {
void (*enable)(struct clk *);
diff --git a/arch/arm/mach-msm/include/mach/smp.h b/arch/arm/mach-msm/include/mach/smp.h
index 3ff7bf5e679..a95f7b9efe3 100644
--- a/arch/arm/mach-msm/include/mach/smp.h
+++ b/arch/arm/mach-msm/include/mach/smp.h
@@ -31,9 +31,9 @@
#include <asm/hardware/gic.h>
-static inline void smp_cross_call(const struct cpumask *mask)
+static inline void smp_cross_call(const struct cpumask *mask, int ipi)
{
- gic_raise_softirq(mask, 1);
+ gic_raise_softirq(mask, ipi);
}
#endif
diff --git a/arch/arm/mach-mx25/clock.c b/arch/arm/mach-mx25/clock.c
index 9e4a5578c2f..00dcb08019e 100644
--- a/arch/arm/mach-mx25/clock.c
+++ b/arch/arm/mach-mx25/clock.c
@@ -21,8 +21,7 @@
#include <linux/list.h>
#include <linux/clk.h>
#include <linux/io.h>
-
-#include <asm/clkdev.h>
+#include <linux/clkdev.h>
#include <mach/clock.h>
#include <mach/hardware.h>
diff --git a/arch/arm/mach-mx3/clock-imx31.c b/arch/arm/mach-mx3/clock-imx31.c
index 109e98f323e..1cd8b40b767 100644
--- a/arch/arm/mach-mx3/clock-imx31.c
+++ b/arch/arm/mach-mx3/clock-imx31.c
@@ -23,8 +23,8 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
+#include <linux/clkdev.h>
-#include <asm/clkdev.h>
#include <asm/div64.h>
#include <mach/clock.h>
diff --git a/arch/arm/mach-mx3/clock-imx35.c b/arch/arm/mach-mx3/clock-imx35.c
index 61e4a318980..819dd809615 100644
--- a/arch/arm/mach-mx3/clock-imx35.c
+++ b/arch/arm/mach-mx3/clock-imx35.c
@@ -21,8 +21,7 @@
#include <linux/list.h>
#include <linux/clk.h>
#include <linux/io.h>
-
-#include <asm/clkdev.h>
+#include <linux/clkdev.h>
#include <mach/clock.h>
#include <mach/hardware.h>
diff --git a/arch/arm/mach-mx5/clock-mx51.c b/arch/arm/mach-mx5/clock-mx51.c
index 8ac36d88292..5975edb47de 100644
--- a/arch/arm/mach-mx5/clock-mx51.c
+++ b/arch/arm/mach-mx5/clock-mx51.c
@@ -14,8 +14,8 @@
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/io.h>
+#include <linux/clkdev.h>
-#include <asm/clkdev.h>
#include <asm/div64.h>
#include <mach/hardware.h>
diff --git a/arch/arm/mach-mxc91231/clock.c b/arch/arm/mach-mxc91231/clock.c
index 5c85075d8a5..9fab505f1eb 100644
--- a/arch/arm/mach-mxc91231/clock.c
+++ b/arch/arm/mach-mxc91231/clock.c
@@ -2,12 +2,12 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/io.h>
+#include <linux/clkdev.h>
#include <mach/clock.h>
#include <mach/hardware.h>
#include <mach/common.h>
-#include <asm/clkdev.h>
#include <asm/bug.h>
#include <asm/div64.h>
diff --git a/arch/arm/mach-nomadik/clock.c b/arch/arm/mach-nomadik/clock.c
index 89f793adf77..48a59f24e10 100644
--- a/arch/arm/mach-nomadik/clock.c
+++ b/arch/arm/mach-nomadik/clock.c
@@ -7,7 +7,7 @@
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/clk.h>
-#include <asm/clkdev.h>
+#include <linux/clkdev.h>
#include "clock.h"
/*
diff --git a/arch/arm/mach-nuc93x/clock.h b/arch/arm/mach-nuc93x/clock.h
index 18e51be4816..4de1f1da9dc 100644
--- a/arch/arm/mach-nuc93x/clock.h
+++ b/arch/arm/mach-nuc93x/clock.h
@@ -10,7 +10,7 @@
* the Free Software Foundation; either version 2 of the License.
*/
-#include <asm/clkdev.h>
+#include <linux/clkdev.h>
void nuc93x_clk_enable(struct clk *clk, int enable);
void clks_register(struct clk_lookup *clks, size_t num);
diff --git a/arch/arm/mach-omap1/clock.c b/arch/arm/mach-omap1/clock.c
index b8c7fb9d792..84ef70476b5 100644
--- a/arch/arm/mach-omap1/clock.c
+++ b/arch/arm/mach-omap1/clock.c
@@ -17,9 +17,9 @@
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/io.h>
+#include <linux/clkdev.h>
#include <asm/mach-types.h>
-#include <asm/clkdev.h>
#include <plat/cpu.h>
#include <plat/usb.h>
diff --git a/arch/arm/mach-omap2/dpll3xxx.c b/arch/arm/mach-omap2/dpll3xxx.c
index ed8d330522f..ebb888f5936 100644
--- a/arch/arm/mach-omap2/dpll3xxx.c
+++ b/arch/arm/mach-omap2/dpll3xxx.c
@@ -26,10 +26,10 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/bitops.h>
+#include <linux/clkdev.h>
#include <plat/cpu.h>
#include <plat/clock.h>
-#include <asm/clkdev.h>
#include "clock.h"
#include "prm.h"
diff --git a/arch/arm/mach-omap2/omap-hotplug.c b/arch/arm/mach-omap2/omap-hotplug.c
index 6cee456ca54..4976b9393e4 100644
--- a/arch/arm/mach-omap2/omap-hotplug.c
+++ b/arch/arm/mach-omap2/omap-hotplug.c
@@ -17,16 +17,13 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/smp.h>
-#include <linux/completion.h>
#include <asm/cacheflush.h>
#include <mach/omap4-common.h>
-static DECLARE_COMPLETION(cpu_killed);
-
int platform_cpu_kill(unsigned int cpu)
{
- return wait_for_completion_timeout(&cpu_killed, 5000);
+ return 1;
}
/*
@@ -35,15 +32,6 @@ int platform_cpu_kill(unsigned int cpu)
*/
void platform_cpu_die(unsigned int cpu)
{
- unsigned int this_cpu = hard_smp_processor_id();
-
- if (cpu != this_cpu) {
- pr_crit("platform_cpu_die running on %u, should be %u\n",
- this_cpu, cpu);
- BUG();
- }
- pr_notice("CPU%u: shutdown\n", cpu);
- complete(&cpu_killed);
flush_cache_all();
dsb();
diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
index 9fbac2c3910..b66cfe8bc46 100644
--- a/arch/arm/mach-omap2/omap-smp.c
+++ b/arch/arm/mach-omap2/omap-smp.c
@@ -21,7 +21,6 @@
#include <linux/io.h>
#include <asm/cacheflush.h>
-#include <asm/localtimer.h>
#include <asm/smp_scu.h>
#include <mach/hardware.h>
#include <mach/omap4-common.h>
@@ -29,22 +28,10 @@
/* SCU base address */
static void __iomem *scu_base;
-/*
- * Use SCU config register to count number of cores
- */
-static inline unsigned int get_core_count(void)
-{
- if (scu_base)
- return scu_get_core_count(scu_base);
- return 1;
-}
-
static DEFINE_SPINLOCK(boot_lock);
void __cpuinit platform_secondary_init(unsigned int cpu)
{
- trace_hardirqs_off();
-
/*
* If any interrupts are already enabled for the primary
* core (e.g. timer irq), then they will not have been enabled
@@ -76,7 +63,7 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
omap_modify_auxcoreboot0(0x200, 0xfffffdff);
flush_cache_all();
smp_wmb();
- smp_cross_call(cpumask_of(cpu));
+ smp_cross_call(cpumask_of(cpu), 1);
/*
* Now the secondary core is starting up let it run its
@@ -118,25 +105,9 @@ void __init smp_init_cpus(void)
scu_base = ioremap(OMAP44XX_SCU_BASE, SZ_256);
BUG_ON(!scu_base);
- ncores = get_core_count();
-
- for (i = 0; i < ncores; i++)
- set_cpu_possible(i, true);
-}
-
-void __init smp_prepare_cpus(unsigned int max_cpus)
-{
- unsigned int ncores = get_core_count();
- unsigned int cpu = smp_processor_id();
- int i;
+ ncores = scu_get_core_count(scu_base);
/* sanity check */
- if (ncores == 0) {
- printk(KERN_ERR
- "OMAP4: strange core count of 0? Default to 1\n");
- ncores = 1;
- }
-
if (ncores > NR_CPUS) {
printk(KERN_WARNING
"OMAP4: no. of cores (%d) greater than configured "
@@ -144,13 +115,14 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
ncores, NR_CPUS);
ncores = NR_CPUS;
}
- smp_store_cpu_info(cpu);
- /*
- * are we trying to boot more cores than exist?
- */
- if (max_cpus > ncores)
- max_cpus = ncores;
+ for (i = 0; i < ncores; i++)
+ set_cpu_possible(i, true);
+}
+
+void __init platform_smp_prepare_cpus(unsigned int max_cpus)
+{
+ int i;
/*
* Initialise the present map, which describes the set of CPUs
@@ -159,18 +131,10 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
for (i = 0; i < max_cpus; i++)
set_cpu_present(i, true);
- if (max_cpus > 1) {
- /*
- * Enable the local timer or broadcast device for the
- * boot CPU, but only if we have more than one CPU.
- */
- percpu_timer_setup();
-
- /*
- * Initialise the SCU and wake up the secondary core using
- * wakeup_secondary().
- */
- scu_enable(scu_base);
- wakeup_secondary();
- }
+ /*
+ * Initialise the SCU and wake up the secondary core using
+ * wakeup_secondary().
+ */
+ scu_enable(scu_base);
+ wakeup_secondary();
}
diff --git a/arch/arm/mach-pnx4008/clock.c b/arch/arm/mach-pnx4008/clock.c
index 9d1975fa4d9..a4a3819c96c 100644
--- a/arch/arm/mach-pnx4008/clock.c
+++ b/arch/arm/mach-pnx4008/clock.c
@@ -21,8 +21,7 @@
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/io.h>
-
-#include <asm/clkdev.h>
+#include <linux/clkdev.h>
#include <mach/hardware.h>
#include <mach/clock.h>
diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig
index c93e73d54dd..c98d81ff250 100644
--- a/arch/arm/mach-pxa/Kconfig
+++ b/arch/arm/mach-pxa/Kconfig
@@ -94,6 +94,7 @@ config MACH_ARMCORE
select PXA27x
select IWMMXT
select PXA25x
+ select MIGHT_HAVE_PCI
config MACH_EM_X270
bool "CompuLab EM-x270 platform"
diff --git a/arch/arm/mach-pxa/clock.c b/arch/arm/mach-pxa/clock.c
index abba0089a2a..4e4a84be96b 100644
--- a/arch/arm/mach-pxa/clock.c
+++ b/arch/arm/mach-pxa/clock.c
@@ -11,8 +11,8 @@
#include <linux/spinlock.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
+#include <linux/clkdev.h>
-#include <asm/clkdev.h>
#include <mach/pxa2xx-regs.h>
#include <mach/hardware.h>
diff --git a/arch/arm/mach-pxa/clock.h b/arch/arm/mach-pxa/clock.h
index d8488742b80..12cc0e87e6c 100644
--- a/arch/arm/mach-pxa/clock.h
+++ b/arch/arm/mach-pxa/clock.h
@@ -1,4 +1,4 @@
-#include <asm/clkdev.h>
+#include <linux/clkdev.h>
struct clkops {
void (*enable)(struct clk *);
diff --git a/arch/arm/mach-realview/core.c b/arch/arm/mach-realview/core.c
index aad806c5cb1..1c6602cf50e 100644
--- a/arch/arm/mach-realview/core.c
+++ b/arch/arm/mach-realview/core.c
@@ -30,8 +30,8 @@
#include <linux/ata_platform.h>
#include <linux/amba/mmci.h>
#include <linux/gfp.h>
+#include <linux/clkdev.h>
-#include <asm/clkdev.h>
#include <asm/system.h>
#include <mach/hardware.h>
#include <asm/irq.h>
@@ -47,7 +47,6 @@
#include <asm/hardware/gic.h>
-#include <mach/clkdev.h>
#include <mach/platform.h>
#include <mach/irqs.h>
#include <asm/hardware/timer-sp.h>
diff --git a/arch/arm/mach-realview/hotplug.c b/arch/arm/mach-realview/hotplug.c
index f95521a5e5c..a87523d095e 100644
--- a/arch/arm/mach-realview/hotplug.c
+++ b/arch/arm/mach-realview/hotplug.c
@@ -11,14 +11,11 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/smp.h>
-#include <linux/completion.h>
#include <asm/cacheflush.h>
extern volatile int pen_release;
-static DECLARE_COMPLETION(cpu_killed);
-
static inline void cpu_enter_lowpower(void)
{
unsigned int v;
@@ -34,10 +31,10 @@ static inline void cpu_enter_lowpower(void)
" bic %0, %0, #0x20\n"
" mcr p15, 0, %0, c1, c0, 1\n"
" mrc p15, 0, %0, c1, c0, 0\n"
- " bic %0, %0, #0x04\n"
+ " bic %0, %0, %2\n"
" mcr p15, 0, %0, c1, c0, 0\n"
: "=&r" (v)
- : "r" (0)
+ : "r" (0), "Ir" (CR_C)
: "cc");
}
@@ -46,17 +43,17 @@ static inline void cpu_leave_lowpower(void)
unsigned int v;
asm volatile( "mrc p15, 0, %0, c1, c0, 0\n"
- " orr %0, %0, #0x04\n"
+ " orr %0, %0, %1\n"
" mcr p15, 0, %0, c1, c0, 0\n"
" mrc p15, 0, %0, c1, c0, 1\n"
" orr %0, %0, #0x20\n"
" mcr p15, 0, %0, c1, c0, 1\n"
: "=&r" (v)
- :
+ : "Ir" (CR_C)
: "cc");
}
-static inline void platform_do_lowpower(unsigned int cpu)
+static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
{
/*
* there is no power-control hardware on this platform, so all
@@ -80,22 +77,19 @@ static inline void platform_do_lowpower(unsigned int cpu)
}
/*
- * getting here, means that we have come out of WFI without
+ * Getting here, means that we have come out of WFI without
* having been woken up - this shouldn't happen
*
- * The trouble is, letting people know about this is not really
- * possible, since we are currently running incoherently, and
- * therefore cannot safely call printk() or anything else
+ * Just note it happening - when we're woken, we can report
+ * its occurrence.
*/
-#ifdef DEBUG
- printk("CPU%u: spurious wakeup call\n", cpu);
-#endif
+ (*spurious)++;
}
}
int platform_cpu_kill(unsigned int cpu)
{
- return wait_for_completion_timeout(&cpu_killed, 5000);
+ return 1;
}
/*
@@ -105,30 +99,22 @@ int platform_cpu_kill(unsigned int cpu)
*/
void platform_cpu_die(unsigned int cpu)
{
-#ifdef DEBUG
- unsigned int this_cpu = hard_smp_processor_id();
-
- if (cpu != this_cpu) {
- printk(KERN_CRIT "Eek! platform_cpu_die running on %u, should be %u\n",
- this_cpu, cpu);
- BUG();
- }
-#endif
-
- printk(KERN_NOTICE "CPU%u: shutdown\n", cpu);
- complete(&cpu_killed);
+ int spurious = 0;
/*
* we're ready for shutdown now, so do it
*/
cpu_enter_lowpower();
- platform_do_lowpower(cpu);
+ platform_do_lowpower(cpu, &spurious);
/*
* bring this CPU back into the world of cache
* coherency, and then restore interrupts
*/
cpu_leave_lowpower();
+
+ if (spurious)
+ pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);
}
int platform_cpu_disable(unsigned int cpu)
diff --git a/arch/arm/mach-realview/include/mach/smp.h b/arch/arm/mach-realview/include/mach/smp.h
index d3cd265cb05..c8221b38ee7 100644
--- a/arch/arm/mach-realview/include/mach/smp.h
+++ b/arch/arm/mach-realview/include/mach/smp.h
@@ -2,14 +2,13 @@
#define ASMARM_ARCH_SMP_H
#include <asm/hardware/gic.h>
-#include <asm/smp_mpidr.h>
/*
* We use IRQ1 as the IPI
*/
-static inline void smp_cross_call(const struct cpumask *mask)
+static inline void smp_cross_call(const struct cpumask *mask, int ipi)
{
- gic_raise_softirq(mask, 1);
+ gic_raise_softirq(mask, ipi);
}
#endif
diff --git a/arch/arm/mach-realview/platsmp.c b/arch/arm/mach-realview/platsmp.c
index 6da8a2e53c4..a22bf67f2f7 100644
--- a/arch/arm/mach-realview/platsmp.c
+++ b/arch/arm/mach-realview/platsmp.c
@@ -19,7 +19,6 @@
#include <asm/cacheflush.h>
#include <mach/hardware.h>
#include <asm/mach-types.h>
-#include <asm/localtimer.h>
#include <asm/unified.h>
#include <mach/board-eb.h>
@@ -37,6 +36,19 @@ extern void realview_secondary_startup(void);
*/
volatile int __cpuinitdata pen_release = -1;
+/*
+ * Write pen_release in a way that is guaranteed to be visible to all
+ * observers, irrespective of whether they're taking part in coherency
+ * or not. This is necessary for the hotplug code to work reliably.
+ */
+static void write_pen_release(int val)
+{
+ pen_release = val;
+ smp_wmb();
+ __cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release));
+ outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1));
+}
+
static void __iomem *scu_base_addr(void)
{
if (machine_is_realview_eb_mp())
@@ -50,20 +62,10 @@ static void __iomem *scu_base_addr(void)
return (void __iomem *)0;
}
-static inline unsigned int get_core_count(void)
-{
- void __iomem *scu_base = scu_base_addr();
- if (scu_base)
- return scu_get_core_count(scu_base);
- return 1;
-}
-
static DEFINE_SPINLOCK(boot_lock);
void __cpuinit platform_secondary_init(unsigned int cpu)
{
- trace_hardirqs_off();
-
/*
* if any interrupts are already enabled for the primary
* core (e.g. timer irq), then they will not have been enabled
@@ -75,8 +77,7 @@ void __cpuinit platform_secondary_init(unsigned int cpu)
* let the primary processor know we're out of the
* pen, then head off into the C entry point
*/
- pen_release = -1;
- smp_wmb();
+ write_pen_release(-1);
/*
* Synchronise with the boot thread.
@@ -103,20 +104,14 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
* Note that "pen_release" is the hardware CPU ID, whereas
* "cpu" is Linux's internal ID.
*/
- pen_release = cpu;
- flush_cache_all();
+ write_pen_release(cpu);
/*
- * XXX
- *
- * This is a later addition to the booting protocol: the
- * bootMonitor now puts secondary cores into WFI, so
- * poke_milo() no longer gets the cores moving; we need
- * to send a soft interrupt to wake the secondary core.
- * Use smp_cross_call() for this, since there's little
- * point duplicating the code here
+ * Send the secondary CPU a soft interrupt, thereby causing
+ * the boot monitor to read the system wide flags register,
+ * and branch to the address found there.
*/
- smp_cross_call(cpumask_of(cpu));
+ smp_cross_call(cpumask_of(cpu), 1);
timeout = jiffies + (1 * HZ);
while (time_before(jiffies, timeout)) {
@@ -136,48 +131,18 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
return pen_release != -1 ? -ENOSYS : 0;
}
-static void __init poke_milo(void)
-{
- /* nobody is to be released from the pen yet */
- pen_release = -1;
-
- /*
- * Write the address of secondary startup into the system-wide flags
- * register. The BootMonitor waits for this register to become
- * non-zero.
- */
- __raw_writel(BSYM(virt_to_phys(realview_secondary_startup)),
- __io_address(REALVIEW_SYS_FLAGSSET));
-
- mb();
-}
-
/*
* Initialise the CPU possible map early - this describes the CPUs
* which may be present or become present in the system.
*/
void __init smp_init_cpus(void)
{
- unsigned int i, ncores = get_core_count();
+ void __iomem *scu_base = scu_base_addr();
+ unsigned int i, ncores;
- for (i = 0; i < ncores; i++)
- set_cpu_possible(i, true);
-}
-
-void __init smp_prepare_cpus(unsigned int max_cpus)
-{
- unsigned int ncores = get_core_count();
- unsigned int cpu = smp_processor_id();
- int i;
+ ncores = scu_base ? scu_get_core_count(scu_base) : 1;
/* sanity check */
- if (ncores == 0) {
- printk(KERN_ERR
- "Realview: strange CM count of 0? Default to 1\n");
-
- ncores = 1;
- }
-
if (ncores > NR_CPUS) {
printk(KERN_WARNING
"Realview: no. of cores (%d) greater than configured "
@@ -186,13 +151,13 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
ncores = NR_CPUS;
}
- smp_store_cpu_info(cpu);
+ for (i = 0; i < ncores; i++)
+ set_cpu_possible(i, true);
+}
- /*
- * are we trying to boot more cores than exist?
- */
- if (max_cpus > ncores)
- max_cpus = ncores;
+void __init platform_smp_prepare_cpus(unsigned int max_cpus)
+{
+ int i;
/*
* Initialise the present map, which describes the set of CPUs
@@ -201,21 +166,14 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
for (i = 0; i < max_cpus; i++)
set_cpu_present(i, true);
+ scu_enable(scu_base_addr());
+
/*
- * Initialise the SCU if there are more than one CPU and let
- * them know where to start. Note that, on modern versions of
- * MILO, the "poke" doesn't actually do anything until each
- * individual core is sent a soft interrupt to get it out of
- * WFI
+ * Write the address of secondary startup into the
+ * system-wide flags register. The BootMonitor waits
+ * until it receives a soft interrupt, and then the
+ * secondary CPU branches to this address.
*/
- if (max_cpus > 1) {
- /*
- * Enable the local timer or broadcast device for the
- * boot CPU, but only if we have more than one CPU.
- */
- percpu_timer_setup();
-
- scu_enable(scu_base_addr());
- poke_milo();
- }
+ __raw_writel(BSYM(virt_to_phys(realview_secondary_startup)),
+ __io_address(REALVIEW_SYS_FLAGSSET));
}
diff --git a/arch/arm/mach-s3c2412/Kconfig b/arch/arm/mach-s3c2412/Kconfig
index 6983cb4d4ca..e82ab4aa7ab 100644
--- a/arch/arm/mach-s3c2412/Kconfig
+++ b/arch/arm/mach-s3c2412/Kconfig
@@ -59,7 +59,7 @@ config MACH_JIVE
Say Y here if you are using the Logitech Jive.
config MACH_JIVE_SHOW_BOOTLOADER
- bool "Allow access to bootloader partitions in MTD"
+ bool "Allow access to bootloader partitions in MTD (EXPERIMENTAL)"
depends on MACH_JIVE && EXPERIMENTAL
config MACH_SMDK2413
diff --git a/arch/arm/mach-s5pv310/hotplug.c b/arch/arm/mach-s5pv310/hotplug.c
index 03652c3605f..afa5392d9fc 100644
--- a/arch/arm/mach-s5pv310/hotplug.c
+++ b/arch/arm/mach-s5pv310/hotplug.c
@@ -13,14 +13,11 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/smp.h>
-#include <linux/completion.h>
#include <asm/cacheflush.h>
extern volatile int pen_release;
-static DECLARE_COMPLETION(cpu_killed);
-
static inline void cpu_enter_lowpower(void)
{
unsigned int v;
@@ -33,13 +30,13 @@ static inline void cpu_enter_lowpower(void)
* Turn off coherency
*/
" mrc p15, 0, %0, c1, c0, 1\n"
- " bic %0, %0, #0x20\n"
+ " bic %0, %0, %2\n"
" mcr p15, 0, %0, c1, c0, 1\n"
" mrc p15, 0, %0, c1, c0, 0\n"
" bic %0, %0, #0x04\n"
" mcr p15, 0, %0, c1, c0, 0\n"
: "=&r" (v)
- : "r" (0)
+ : "r" (0), "Ir" (CR_C)
: "cc");
}
@@ -49,17 +46,17 @@ static inline void cpu_leave_lowpower(void)
asm volatile(
"mrc p15, 0, %0, c1, c0, 0\n"
- " orr %0, %0, #0x04\n"
+ " orr %0, %0, %1\n"
" mcr p15, 0, %0, c1, c0, 0\n"
" mrc p15, 0, %0, c1, c0, 1\n"
" orr %0, %0, #0x20\n"
" mcr p15, 0, %0, c1, c0, 1\n"
: "=&r" (v)
- :
+ : "Ir" (CR_C)
: "cc");
}
-static inline void platform_do_lowpower(unsigned int cpu)
+static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
{
/*
* there is no power-control hardware on this platform, so all
@@ -83,22 +80,19 @@ static inline void platform_do_lowpower(unsigned int cpu)
}
/*
- * getting here, means that we have come out of WFI without
+ * Getting here, means that we have come out of WFI without
* having been woken up - this shouldn't happen
*
- * The trouble is, letting people know about this is not really
- * possible, since we are currently running incoherently, and
- * therefore cannot safely call printk() or anything else
+ * Just note it happening - when we're woken, we can report
+ * its occurrence.
*/
-#ifdef DEBUG
- printk(KERN_WARN "CPU%u: spurious wakeup call\n", cpu);
-#endif
+ (*spurious)++;
}
}
int platform_cpu_kill(unsigned int cpu)
{
- return wait_for_completion_timeout(&cpu_killed, 5000);
+ return 1;
}
/*
@@ -108,30 +102,22 @@ int platform_cpu_kill(unsigned int cpu)
*/
void platform_cpu_die(unsigned int cpu)
{
-#ifdef DEBUG
- unsigned int this_cpu = hard_smp_processor_id();
-
- if (cpu != this_cpu) {
- printk(KERN_CRIT "Eek! platform_cpu_die running on %u, should be %u\n",
- this_cpu, cpu);
- BUG();
- }
-#endif
-
- printk(KERN_NOTICE "CPU%u: shutdown\n", cpu);
- complete(&cpu_killed);
+ int spurious = 0;
/*
* we're ready for shutdown now, so do it
*/
cpu_enter_lowpower();
- platform_do_lowpower(cpu);
+ platform_do_lowpower(cpu, &spurious);
/*
* bring this CPU back into the world of cache
* coherency, and then restore interrupts
*/
cpu_leave_lowpower();
+
+ if (spurious)
+ pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);
}
int platform_cpu_disable(unsigned int cpu)
diff --git a/arch/arm/mach-s5pv310/include/mach/smp.h b/arch/arm/mach-s5pv310/include/mach/smp.h
index e1cc6a251c6..393ccbd52c4 100644
--- a/arch/arm/mach-s5pv310/include/mach/smp.h
+++ b/arch/arm/mach-s5pv310/include/mach/smp.h
@@ -7,14 +7,13 @@
#define ASM_ARCH_SMP_H __FILE__
#include <asm/hardware/gic.h>
-#include <asm/smp_mpidr.h>
/*
* We use IRQ1 as the IPI
*/
-static inline void smp_cross_call(const struct cpumask *mask)
+static inline void smp_cross_call(const struct cpumask *mask, int ipi)
{
- gic_raise_softirq(mask, 1);
+ gic_raise_softirq(mask, ipi);
}
#endif
diff --git a/arch/arm/mach-s5pv310/platsmp.c b/arch/arm/mach-s5pv310/platsmp.c
index 15929c169f8..34093b069f6 100644
--- a/arch/arm/mach-s5pv310/platsmp.c
+++ b/arch/arm/mach-s5pv310/platsmp.c
@@ -22,7 +22,6 @@
#include <linux/io.h>
#include <asm/cacheflush.h>
-#include <asm/localtimer.h>
#include <asm/smp_scu.h>
#include <asm/unified.h>
@@ -38,6 +37,19 @@ extern void s5pv310_secondary_startup(void);
volatile int __cpuinitdata pen_release = -1;
+/*
+ * Write pen_release in a way that is guaranteed to be visible to all
+ * observers, irrespective of whether they're taking part in coherency
+ * or not. This is necessary for the hotplug code to work reliably.
+ */
+static void write_pen_release(int val)
+{
+ pen_release = val;
+ smp_wmb();
+ __cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release));
+ outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1));
+}
+
static void __iomem *scu_base_addr(void)
{
return (void __iomem *)(S5P_VA_SCU);
@@ -47,8 +59,6 @@ static DEFINE_SPINLOCK(boot_lock);
void __cpuinit platform_secondary_init(unsigned int cpu)
{
- trace_hardirqs_off();
-
/*
* if any interrupts are already enabled for the primary
* core (e.g. timer irq), then they will not have been enabled
@@ -60,8 +70,7 @@ void __cpuinit platform_secondary_init(unsigned int cpu)
* let the primary processor know we're out of the
* pen, then head off into the C entry point
*/
- pen_release = -1;
- smp_wmb();
+ write_pen_release(-1);
/*
* Synchronise with the boot thread.
@@ -88,16 +97,14 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
* Note that "pen_release" is the hardware CPU ID, whereas
* "cpu" is Linux's internal ID.
*/
- pen_release = cpu;
- __cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release));
- outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1));
+ write_pen_release(cpu);
/*
* Send the secondary CPU a soft interrupt, thereby causing
* the boot monitor to read the system wide flags register,
* and branch to the address found there.
*/
- smp_cross_call(cpumask_of(cpu));
+ smp_cross_call(cpumask_of(cpu), 1);
timeout = jiffies + (1 * HZ);
while (time_before(jiffies, timeout)) {
@@ -130,13 +137,6 @@ void __init smp_init_cpus(void)
ncores = scu_base ? scu_get_core_count(scu_base) : 1;
/* sanity check */
- if (ncores == 0) {
- printk(KERN_ERR
- "S5PV310: strange CM count of 0? Default to 1\n");
-
- ncores = 1;
- }
-
if (ncores > NR_CPUS) {
printk(KERN_WARNING
"S5PV310: no. of cores (%d) greater than configured "
@@ -149,18 +149,10 @@ void __init smp_init_cpus(void)
set_cpu_possible(i, true);
}
-void __init smp_prepare_cpus(unsigned int max_cpus)
+void __init platform_smp_prepare_cpus(unsigned int max_cpus)
{
- unsigned int ncores = num_possible_cpus();
- unsigned int cpu = smp_processor_id();
int i;
- smp_store_cpu_info(cpu);
-
- /* are we trying to boot more cores than exist? */
- if (max_cpus > ncores)
- max_cpus = ncores;
-
/*
* Initialise the present map, which describes the set of CPUs
* actually populated at the present time.
@@ -168,25 +160,13 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
for (i = 0; i < max_cpus; i++)
set_cpu_present(i, true);
+ scu_enable(scu_base_addr());
+
/*
- * Initialise the SCU if there are more than one CPU and let
- * them know where to start.
+ * Write the address of secondary startup into the
+ * system-wide flags register. The boot monitor waits
+ * until it receives a soft interrupt, and then the
+ * secondary CPU branches to this address.
*/
- if (max_cpus > 1) {
- /*
- * Enable the local timer or broadcast device for the
- * boot CPU, but only if we have more than one CPU.
- */
- percpu_timer_setup();
-
- scu_enable(scu_base_addr());
-
- /*
- * Write the address of secondary startup into the
- * system-wide flags register. The boot monitor waits
- * until it receives a soft interrupt, and then the
- * secondary CPU branches to this address.
- */
__raw_writel(BSYM(virt_to_phys(s5pv310_secondary_startup)), S5P_VA_SYSRAM);
- }
}
diff --git a/arch/arm/mach-shmobile/Kconfig b/arch/arm/mach-shmobile/Kconfig
index 51dcd59eda6..63293335724 100644
--- a/arch/arm/mach-shmobile/Kconfig
+++ b/arch/arm/mach-shmobile/Kconfig
@@ -6,7 +6,7 @@ config ARCH_SH7367
bool "SH-Mobile G3 (SH7367)"
select CPU_V6
select HAVE_CLK
- select COMMON_CLKDEV
+ select CLKDEV_LOOKUP
select SH_CLK_CPG
select GENERIC_CLOCKEVENTS
@@ -14,7 +14,7 @@ config ARCH_SH7377
bool "SH-Mobile G4 (SH7377)"
select CPU_V7
select HAVE_CLK
- select COMMON_CLKDEV
+ select CLKDEV_LOOKUP
select SH_CLK_CPG
select GENERIC_CLOCKEVENTS
@@ -22,7 +22,7 @@ config ARCH_SH7372
bool "SH-Mobile AP4 (SH7372)"
select CPU_V7
select HAVE_CLK
- select COMMON_CLKDEV
+ select CLKDEV_LOOKUP
select SH_CLK_CPG
select GENERIC_CLOCKEVENTS
diff --git a/arch/arm/mach-shmobile/clock-sh7367.c b/arch/arm/mach-shmobile/clock-sh7367.c
index 9f78729098f..6b186aefcbd 100644
--- a/arch/arm/mach-shmobile/clock-sh7367.c
+++ b/arch/arm/mach-shmobile/clock-sh7367.c
@@ -20,8 +20,8 @@
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/sh_clk.h>
+#include <linux/clkdev.h>
#include <mach/common.h>
-#include <asm/clkdev.h>
/* SH7367 registers */
#define RTFRQCR 0xe6150000
diff --git a/arch/arm/mach-shmobile/clock-sh7372.c b/arch/arm/mach-shmobile/clock-sh7372.c
index 3aa02606943..d98deb497c2 100644
--- a/arch/arm/mach-shmobile/clock-sh7372.c
+++ b/arch/arm/mach-shmobile/clock-sh7372.c
@@ -20,8 +20,8 @@
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/sh_clk.h>
+#include <linux/clkdev.h>
#include <mach/common.h>
-#include <asm/clkdev.h>
/* SH7372 registers */
#define FRQCRA 0xe6150000
diff --git a/arch/arm/mach-shmobile/clock-sh7377.c b/arch/arm/mach-shmobile/clock-sh7377.c
index f91395aeb9a..95942466e63 100644
--- a/arch/arm/mach-shmobile/clock-sh7377.c
+++ b/arch/arm/mach-shmobile/clock-sh7377.c
@@ -20,8 +20,8 @@
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/sh_clk.h>
+#include <linux/clkdev.h>
#include <mach/common.h>
-#include <asm/clkdev.h>
/* SH7377 registers */
#define RTFRQCR 0xe6150000
diff --git a/arch/arm/mach-tcc8k/clock.c b/arch/arm/mach-tcc8k/clock.c
index ba32a15127a..3970a9cdce2 100644
--- a/arch/arm/mach-tcc8k/clock.c
+++ b/arch/arm/mach-tcc8k/clock.c
@@ -12,8 +12,7 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/spinlock.h>
-
-#include <asm/clkdev.h>
+#include <linux/clkdev.h>
#include <mach/clock.h>
#include <mach/irqs.h>
diff --git a/arch/arm/mach-tegra/clock.c b/arch/arm/mach-tegra/clock.c
index ae19f95585b..77948e0f490 100644
--- a/arch/arm/mach-tegra/clock.c
+++ b/arch/arm/mach-tegra/clock.c
@@ -25,7 +25,7 @@
#include <linux/slab.h>
#include <linux/seq_file.h>
#include <linux/regulator/consumer.h>
-#include <asm/clkdev.h>
+#include <linux/clkdev.h>
#include "clock.h"
#include "board.h"
diff --git a/arch/arm/mach-tegra/clock.h b/arch/arm/mach-tegra/clock.h
index 94fd859770f..083a4cfc6cf 100644
--- a/arch/arm/mach-tegra/clock.h
+++ b/arch/arm/mach-tegra/clock.h
@@ -21,7 +21,7 @@
#define __MACH_TEGRA_CLOCK_H
#include <linux/list.h>
-#include <asm/clkdev.h>
+#include <linux/clkdev.h>
#define DIV_BUS (1 << 0)
#define DIV_U71 (1 << 1)
diff --git a/arch/arm/mach-tegra/hotplug.c b/arch/arm/mach-tegra/hotplug.c
index 8e7f115aa21..a5cb1ce76ff 100644
--- a/arch/arm/mach-tegra/hotplug.c
+++ b/arch/arm/mach-tegra/hotplug.c
@@ -11,12 +11,9 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/smp.h>
-#include <linux/completion.h>
#include <asm/cacheflush.h>
-static DECLARE_COMPLETION(cpu_killed);
-
static inline void cpu_enter_lowpower(void)
{
unsigned int v;
@@ -29,13 +26,13 @@ static inline void cpu_enter_lowpower(void)
* Turn off coherency
*/
" mrc p15, 0, %0, c1, c0, 1\n"
- " bic %0, %0, #0x20\n"
+ " bic %0, %0, %2\n"
" mcr p15, 0, %0, c1, c0, 1\n"
" mrc p15, 0, %0, c1, c0, 0\n"
" bic %0, %0, #0x04\n"
" mcr p15, 0, %0, c1, c0, 0\n"
: "=&r" (v)
- : "r" (0)
+ : "r" (0), "Ir" (CR_C)
: "cc");
}
@@ -45,17 +42,17 @@ static inline void cpu_leave_lowpower(void)
asm volatile(
"mrc p15, 0, %0, c1, c0, 0\n"
- " orr %0, %0, #0x04\n"
+ " orr %0, %0, %1\n"
" mcr p15, 0, %0, c1, c0, 0\n"
" mrc p15, 0, %0, c1, c0, 1\n"
" orr %0, %0, #0x20\n"
" mcr p15, 0, %0, c1, c0, 1\n"
: "=&r" (v)
- :
+ : "Ir" (CR_C)
: "cc");
}
-static inline void platform_do_lowpower(unsigned int cpu)
+static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
{
/*
* there is no power-control hardware on this platform, so all
@@ -79,22 +76,19 @@ static inline void platform_do_lowpower(unsigned int cpu)
/*}*/
/*
- * getting here, means that we have come out of WFI without
+ * Getting here, means that we have come out of WFI without
* having been woken up - this shouldn't happen
*
- * The trouble is, letting people know about this is not really
- * possible, since we are currently running incoherently, and
- * therefore cannot safely call printk() or anything else
+ * Just note it happening - when we're woken, we can report
+ * its occurrence.
*/
-#ifdef DEBUG
- printk(KERN_WARN "CPU%u: spurious wakeup call\n", cpu);
-#endif
+ (*spurious)++;
}
}
int platform_cpu_kill(unsigned int cpu)
{
- return wait_for_completion_timeout(&cpu_killed, 5000);
+ return 1;
}
/*
@@ -104,30 +98,22 @@ int platform_cpu_kill(unsigned int cpu)
*/
void platform_cpu_die(unsigned int cpu)
{
-#ifdef DEBUG
- unsigned int this_cpu = hard_smp_processor_id();
-
- if (cpu != this_cpu) {
- printk(KERN_CRIT "Eek! platform_cpu_die running on %u, should be %u\n",
- this_cpu, cpu);
- BUG();
- }
-#endif
-
- printk(KERN_NOTICE "CPU%u: shutdown\n", cpu);
- complete(&cpu_killed);
+ int spurious = 0;
/*
* we're ready for shutdown now, so do it
*/
cpu_enter_lowpower();
- platform_do_lowpower(cpu);
+ platform_do_lowpower(cpu, &spurious);
/*
* bring this CPU back into the world of cache
* coherency, and then restore interrupts
*/
cpu_leave_lowpower();
+
+ if (spurious)
+ pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);
}
int platform_cpu_disable(unsigned int cpu)
diff --git a/arch/arm/mach-tegra/include/mach/smp.h b/arch/arm/mach-tegra/include/mach/smp.h
index e4a34a35a54..c8221b38ee7 100644
--- a/arch/arm/mach-tegra/include/mach/smp.h
+++ b/arch/arm/mach-tegra/include/mach/smp.h
@@ -2,21 +2,13 @@
#define ASMARM_ARCH_SMP_H
#include <asm/hardware/gic.h>
-#include <asm/smp_mpidr.h>
/*
* We use IRQ1 as the IPI
*/
-static inline void smp_cross_call(const struct cpumask *mask)
-{
- gic_raise_softirq(mask, 1);
-}
-
-/*
- * Do nothing on MPcore.
- */
-static inline void smp_cross_call_done(cpumask_t callmap)
+static inline void smp_cross_call(const struct cpumask *mask, int ipi)
{
+ gic_raise_softirq(mask, ipi);
}
#endif
diff --git a/arch/arm/mach-tegra/platsmp.c b/arch/arm/mach-tegra/platsmp.c
index 3b7376c4f35..ec1f68924ed 100644
--- a/arch/arm/mach-tegra/platsmp.c
+++ b/arch/arm/mach-tegra/platsmp.c
@@ -22,7 +22,6 @@
#include <asm/cacheflush.h>
#include <mach/hardware.h>
#include <asm/mach-types.h>
-#include <asm/localtimer.h>
#include <asm/smp_scu.h>
#include <mach/iomap.h>
@@ -41,8 +40,6 @@ static void __iomem *scu_base = IO_ADDRESS(TEGRA_ARM_PERIF_BASE);
void __cpuinit platform_secondary_init(unsigned int cpu)
{
- trace_hardirqs_off();
-
/*
* if any interrupts are already enabled for the primary
* core (e.g. timer irq), then they will not have been enabled
@@ -117,24 +114,20 @@ void __init smp_init_cpus(void)
{
unsigned int i, ncores = scu_get_core_count(scu_base);
+ if (ncores > NR_CPUS) {
+ printk(KERN_ERR "Tegra: no. of cores (%u) greater than configured (%u), clipping\n",
+ ncores, NR_CPUS);
+ ncores = NR_CPUS;
+ }
+
for (i = 0; i < ncores; i++)
cpu_set(i, cpu_possible_map);
}
-void __init smp_prepare_cpus(unsigned int max_cpus)
+void __init platform_smp_prepare_cpus(unsigned int max_cpus)
{
- unsigned int ncores = scu_get_core_count(scu_base);
- unsigned int cpu = smp_processor_id();
int i;
- smp_store_cpu_info(cpu);
-
- /*
- * are we trying to boot more cores than exist?
- */
- if (max_cpus > ncores)
- max_cpus = ncores;
-
/*
* Initialise the present map, which describes the set of CPUs
* actually populated at the present time.
@@ -142,15 +135,5 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
for (i = 0; i < max_cpus; i++)
set_cpu_present(i, true);
- /*
- * Initialise the SCU if there are more than one CPU and let
- * them know where to start. Note that, on modern versions of
- * MILO, the "poke" doesn't actually do anything until each
- * individual core is sent a soft interrupt to get it out of
- * WFI
- */
- if (max_cpus > 1) {
- percpu_timer_setup();
- scu_enable(scu_base);
- }
+ scu_enable(scu_base);
}
diff --git a/arch/arm/mach-tegra/tegra2_clocks.c b/arch/arm/mach-tegra/tegra2_clocks.c
index ae3b308e22a..f0dae6d8ba5 100644
--- a/arch/arm/mach-tegra/tegra2_clocks.c
+++ b/arch/arm/mach-tegra/tegra2_clocks.c
@@ -24,8 +24,7 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/hrtimer.h>
-
-#include <asm/clkdev.h>
+#include <linux/clkdev.h>
#include <mach/iomap.h>
diff --git a/arch/arm/mach-u300/clock.c b/arch/arm/mach-u300/clock.c
index 7458fc6df5c..fabcc49abe8 100644
--- a/arch/arm/mach-u300/clock.c
+++ b/arch/arm/mach-u300/clock.c
@@ -25,8 +25,8 @@
#include <linux/timer.h>
#include <linux/io.h>
#include <linux/seq_file.h>
+#include <linux/clkdev.h>
-#include <asm/clkdev.h>
#include <mach/hardware.h>
#include <mach/syscon.h>
diff --git a/arch/arm/mach-ux500/clock.c b/arch/arm/mach-ux500/clock.c
index 1675047daf2..531de5c6364 100644
--- a/arch/arm/mach-ux500/clock.c
+++ b/arch/arm/mach-ux500/clock.c
@@ -13,8 +13,7 @@
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/io.h>
-
-#include <asm/clkdev.h>
+#include <linux/clkdev.h>
#include <plat/mtu.h>
#include <mach/hardware.h>
diff --git a/arch/arm/mach-ux500/headsmp.S b/arch/arm/mach-ux500/headsmp.S
index a6be2cdf2b2..64fa451edcf 100644
--- a/arch/arm/mach-ux500/headsmp.S
+++ b/arch/arm/mach-ux500/headsmp.S
@@ -23,7 +23,6 @@ ENTRY(u8500_secondary_startup)
ldmia r4, {r5, r6}
sub r4, r4, r5
add r6, r6, r4
- dsb
pen: ldr r7, [r6]
cmp r7, r0
bne pen
diff --git a/arch/arm/mach-ux500/hotplug.c b/arch/arm/mach-ux500/hotplug.c
index b782a03024b..dd8037ebccf 100644
--- a/arch/arm/mach-ux500/hotplug.c
+++ b/arch/arm/mach-ux500/hotplug.c
@@ -11,14 +11,11 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/smp.h>
-#include <linux/completion.h>
#include <asm/cacheflush.h>
extern volatile int pen_release;
-static DECLARE_COMPLETION(cpu_killed);
-
static inline void platform_do_lowpower(unsigned int cpu)
{
flush_cache_all();
@@ -38,7 +35,7 @@ static inline void platform_do_lowpower(unsigned int cpu)
int platform_cpu_kill(unsigned int cpu)
{
- return wait_for_completion_timeout(&cpu_killed, 5000);
+ return 1;
}
/*
@@ -48,19 +45,6 @@ int platform_cpu_kill(unsigned int cpu)
*/
void platform_cpu_die(unsigned int cpu)
{
-#ifdef DEBUG
- unsigned int this_cpu = hard_smp_processor_id();
-
- if (cpu != this_cpu) {
- printk(KERN_CRIT "Eek! platform_cpu_die running on %u, should be %u\n",
- this_cpu, cpu);
- BUG();
- }
-#endif
-
- printk(KERN_NOTICE "CPU%u: shutdown\n", cpu);
- complete(&cpu_killed);
-
/* directly enter low power state, skipping secure registers */
platform_do_lowpower(cpu);
}
diff --git a/arch/arm/mach-ux500/include/mach/smp.h b/arch/arm/mach-ux500/include/mach/smp.h
index 197e8417375..ca2b15b1b3b 100644
--- a/arch/arm/mach-ux500/include/mach/smp.h
+++ b/arch/arm/mach-ux500/include/mach/smp.h
@@ -10,7 +10,6 @@
#define ASMARM_ARCH_SMP_H
#include <asm/hardware/gic.h>
-#include <asm/smp_mpidr.h>
/* This is required to wakeup the secondary core */
extern void u8500_secondary_startup(void);
@@ -18,8 +17,8 @@ extern void u8500_secondary_startup(void);
/*
* We use IRQ1 as the IPI
*/
-static inline void smp_cross_call(const struct cpumask *mask)
+static inline void smp_cross_call(const struct cpumask *mask, int ipi)
{
- gic_raise_softirq(mask, 1);
+ gic_raise_softirq(mask, ipi);
}
#endif
diff --git a/arch/arm/mach-ux500/platsmp.c b/arch/arm/mach-ux500/platsmp.c
index b5077b4c419..2115a0cf07b 100644
--- a/arch/arm/mach-ux500/platsmp.c
+++ b/arch/arm/mach-ux500/platsmp.c
@@ -18,7 +18,6 @@
#include <linux/io.h>
#include <asm/cacheflush.h>
-#include <asm/localtimer.h>
#include <asm/smp_scu.h>
#include <mach/hardware.h>
@@ -28,17 +27,23 @@
*/
volatile int __cpuinitdata pen_release = -1;
-static unsigned int __init get_core_count(void)
+/*
+ * Write pen_release in a way that is guaranteed to be visible to all
+ * observers, irrespective of whether they're taking part in coherency
+ * or not. This is necessary for the hotplug code to work reliably.
+ */
+static void write_pen_release(int val)
{
- return scu_get_core_count(__io_address(UX500_SCU_BASE));
+ pen_release = val;
+ smp_wmb();
+ __cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release));
+ outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1));
}
static DEFINE_SPINLOCK(boot_lock);
void __cpuinit platform_secondary_init(unsigned int cpu)
{
- trace_hardirqs_off();
-
/*
* if any interrupts are already enabled for the primary
* core (e.g. timer irq), then they will not have been enabled
@@ -50,7 +55,7 @@ void __cpuinit platform_secondary_init(unsigned int cpu)
* let the primary processor know we're out of the
* pen, then head off into the C entry point
*/
- pen_release = -1;
+ write_pen_release(-1);
/*
* Synchronise with the boot thread.
@@ -74,11 +79,9 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
* the holding pen - release it, then wait for it to flag
* that it has been released by resetting pen_release.
*/
- pen_release = cpu;
- __cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release));
- outer_clean_range(__pa(&pen_release), __pa(&pen_release) + 1);
+ write_pen_release(cpu);
- smp_cross_call(cpumask_of(cpu));
+ smp_cross_call(cpumask_of(cpu), 1);
timeout = jiffies + (1 * HZ);
while (time_before(jiffies, timeout)) {
@@ -97,9 +100,6 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
static void __init wakeup_secondary(void)
{
- /* nobody is to be released from the pen yet */
- pen_release = -1;
-
/*
* write the address of secondary startup into the backup ram register
* at offset 0x1FF4, then write the magic number 0xA1FEED01 to the
@@ -126,40 +126,26 @@ static void __init wakeup_secondary(void)
*/
void __init smp_init_cpus(void)
{
- unsigned int i, ncores = get_core_count();
+ unsigned int i, ncores;
- for (i = 0; i < ncores; i++)
- set_cpu_possible(i, true);
-}
-
-void __init smp_prepare_cpus(unsigned int max_cpus)
-{
- unsigned int ncores = get_core_count();
- unsigned int cpu = smp_processor_id();
- int i;
+ ncores = scu_get_core_count(__io_address(UX500_SCU_BASE));
/* sanity check */
- if (ncores == 0) {
- printk(KERN_ERR
- "U8500: strange CM count of 0? Default to 1\n");
- ncores = 1;
- }
-
- if (ncores > num_possible_cpus()) {
+ if (ncores > NR_CPUS) {
printk(KERN_WARNING
"U8500: no. of cores (%d) greater than configured "
"maximum of %d - clipping\n",
- ncores, num_possible_cpus());
- ncores = num_possible_cpus();
+ ncores, NR_CPUS);
+ ncores = NR_CPUS;
}
- smp_store_cpu_info(cpu);
+ for (i = 0; i < ncores; i++)
+ set_cpu_possible(i, true);
+}
- /*
- * are we trying to boot more cores than exist?
- */
- if (max_cpus > ncores)
- max_cpus = ncores;
+void __init platform_smp_prepare_cpus(unsigned int max_cpus)
+{
+ int i;
/*
* Initialise the present map, which describes the set of CPUs
@@ -168,13 +154,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
for (i = 0; i < max_cpus; i++)
set_cpu_present(i, true);
- if (max_cpus > 1) {
- /*
- * Enable the local timer or broadcast device for the
- * boot CPU, but only if we have more than one CPU.
- */
- percpu_timer_setup();
- scu_enable(__io_address(UX500_SCU_BASE));
- wakeup_secondary();
- }
+ scu_enable(__io_address(UX500_SCU_BASE));
+ wakeup_secondary();
}
diff --git a/arch/arm/mach-versatile/Kconfig b/arch/arm/mach-versatile/Kconfig
index c781f30c836..3f7b5e9d83c 100644
--- a/arch/arm/mach-versatile/Kconfig
+++ b/arch/arm/mach-versatile/Kconfig
@@ -4,6 +4,7 @@ menu "Versatile platform type"
config ARCH_VERSATILE_PB
bool "Support Versatile/PB platform"
select CPU_ARM926T
+ select MIGHT_HAVE_PCI
default y
help
Include support for the ARM(R) Versatile/PB platform.
diff --git a/arch/arm/mach-versatile/core.c b/arch/arm/mach-versatile/core.c
index 40a024c71e4..13a83e45a33 100644
--- a/arch/arm/mach-versatile/core.c
+++ b/arch/arm/mach-versatile/core.c
@@ -31,8 +31,8 @@
#include <linux/amba/pl022.h>
#include <linux/io.h>
#include <linux/gfp.h>
+#include <linux/clkdev.h>
-#include <asm/clkdev.h>
#include <asm/system.h>
#include <asm/irq.h>
#include <asm/leds.h>
@@ -46,7 +46,6 @@
#include <asm/mach/irq.h>
#include <asm/mach/time.h>
#include <asm/mach/map.h>
-#include <mach/clkdev.h>
#include <mach/hardware.h>
#include <mach/platform.h>
#include <asm/hardware/timer-sp.h>
diff --git a/arch/arm/mach-vexpress/Makefile b/arch/arm/mach-vexpress/Makefile
index 1b71b77ade2..2c0ac7de281 100644
--- a/arch/arm/mach-vexpress/Makefile
+++ b/arch/arm/mach-vexpress/Makefile
@@ -5,4 +5,5 @@
obj-y := v2m.o
obj-$(CONFIG_ARCH_VEXPRESS_CA9X4) += ct-ca9x4.o
obj-$(CONFIG_SMP) += platsmp.o headsmp.o
+obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
obj-$(CONFIG_LOCAL_TIMERS) += localtimer.o
diff --git a/arch/arm/mach-vexpress/ct-ca9x4.c b/arch/arm/mach-vexpress/ct-ca9x4.c
index cb5793ef3ee..e628402b754 100644
--- a/arch/arm/mach-vexpress/ct-ca9x4.c
+++ b/arch/arm/mach-vexpress/ct-ca9x4.c
@@ -8,8 +8,8 @@
#include <linux/platform_device.h>
#include <linux/amba/bus.h>
#include <linux/amba/clcd.h>
+#include <linux/clkdev.h>
-#include <asm/clkdev.h>
#include <asm/pgtable.h>
#include <asm/hardware/arm_timer.h>
#include <asm/hardware/cache-l2x0.h>
@@ -18,7 +18,6 @@
#include <asm/pmu.h>
#include <asm/smp_twd.h>
-#include <mach/clkdev.h>
#include <mach/ct-ca9x4.h>
#include <asm/hardware/timer-sp.h>
diff --git a/arch/arm/mach-vexpress/hotplug.c b/arch/arm/mach-vexpress/hotplug.c
new file mode 100644
index 00000000000..ea4cbfb90a6
--- /dev/null
+++ b/arch/arm/mach-vexpress/hotplug.c
@@ -0,0 +1,128 @@
+/*
+ * linux/arch/arm/mach-realview/hotplug.c
+ *
+ * Copyright (C) 2002 ARM Ltd.
+ * All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/smp.h>
+
+#include <asm/cacheflush.h>
+
+extern volatile int pen_release;
+
+static inline void cpu_enter_lowpower(void)
+{
+ unsigned int v;
+
+ flush_cache_all();
+ asm volatile(
+ "mcr p15, 0, %1, c7, c5, 0\n"
+ " mcr p15, 0, %1, c7, c10, 4\n"
+ /*
+ * Turn off coherency
+ */
+ " mrc p15, 0, %0, c1, c0, 1\n"
+ " bic %0, %0, %3\n"
+ " mcr p15, 0, %0, c1, c0, 1\n"
+ " mrc p15, 0, %0, c1, c0, 0\n"
+ " bic %0, %0, %2\n"
+ " mcr p15, 0, %0, c1, c0, 0\n"
+ : "=&r" (v)
+ : "r" (0), "Ir" (CR_C), "Ir" (0x40)
+ : "cc");
+}
+
+static inline void cpu_leave_lowpower(void)
+{
+ unsigned int v;
+
+ asm volatile(
+ "mrc p15, 0, %0, c1, c0, 0\n"
+ " orr %0, %0, %1\n"
+ " mcr p15, 0, %0, c1, c0, 0\n"
+ " mrc p15, 0, %0, c1, c0, 1\n"
+ " orr %0, %0, %2\n"
+ " mcr p15, 0, %0, c1, c0, 1\n"
+ : "=&r" (v)
+ : "Ir" (CR_C), "Ir" (0x40)
+ : "cc");
+}
+
+static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
+{
+ /*
+ * there is no power-control hardware on this platform, so all
+ * we can do is put the core into WFI; this is safe as the calling
+ * code will have already disabled interrupts
+ */
+ for (;;) {
+ /*
+ * here's the WFI
+ */
+ asm(".word 0xe320f003\n"
+ :
+ :
+ : "memory", "cc");
+
+ if (pen_release == cpu) {
+ /*
+ * OK, proper wakeup, we're done
+ */
+ break;
+ }
+
+ /*
+ * Getting here, means that we have come out of WFI without
+ * having been woken up - this shouldn't happen
+ *
+ * Just note it happening - when we're woken, we can report
+ * its occurrence.
+ */
+ (*spurious)++;
+ }
+}
+
+int platform_cpu_kill(unsigned int cpu)
+{
+ return 1;
+}
+
+/*
+ * platform-specific code to shutdown a CPU
+ *
+ * Called with IRQs disabled
+ */
+void platform_cpu_die(unsigned int cpu)
+{
+ int spurious = 0;
+
+ /*
+ * we're ready for shutdown now, so do it
+ */
+ cpu_enter_lowpower();
+ platform_do_lowpower(cpu, &spurious);
+
+ /*
+ * bring this CPU back into the world of cache
+ * coherency, and then restore interrupts
+ */
+ cpu_leave_lowpower();
+
+ if (spurious)
+ pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);
+}
+
+int platform_cpu_disable(unsigned int cpu)
+{
+ /*
+ * we don't allow CPU 0 to be shutdown (it is still too special
+ * e.g. clock tick interrupts)
+ */
+ return cpu == 0 ? -EPERM : 0;
+}
diff --git a/arch/arm/mach-vexpress/include/mach/smp.h b/arch/arm/mach-vexpress/include/mach/smp.h
index 5a6da4fd247..4c05e4a9713 100644
--- a/arch/arm/mach-vexpress/include/mach/smp.h
+++ b/arch/arm/mach-vexpress/include/mach/smp.h
@@ -2,13 +2,12 @@
#define __MACH_SMP_H
#include <asm/hardware/gic.h>
-#include <asm/smp_mpidr.h>
/*
* We use IRQ1 as the IPI
*/
-static inline void smp_cross_call(const struct cpumask *mask)
+static inline void smp_cross_call(const struct cpumask *mask, int ipi)
{
- gic_raise_softirq(mask, 1);
+ gic_raise_softirq(mask, ipi);
}
#endif
diff --git a/arch/arm/mach-vexpress/platsmp.c b/arch/arm/mach-vexpress/platsmp.c
index dfb591031d1..b1687b6abe6 100644
--- a/arch/arm/mach-vexpress/platsmp.c
+++ b/arch/arm/mach-vexpress/platsmp.c
@@ -17,7 +17,6 @@
#include <linux/io.h>
#include <asm/cacheflush.h>
-#include <asm/localtimer.h>
#include <asm/smp_scu.h>
#include <asm/unified.h>
@@ -35,6 +34,19 @@ extern void vexpress_secondary_startup(void);
*/
volatile int __cpuinitdata pen_release = -1;
+/*
+ * Write pen_release in a way that is guaranteed to be visible to all
+ * observers, irrespective of whether they're taking part in coherency
+ * or not. This is necessary for the hotplug code to work reliably.
+ */
+static void write_pen_release(int val)
+{
+ pen_release = val;
+ smp_wmb();
+ __cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release));
+ outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1));
+}
+
static void __iomem *scu_base_addr(void)
{
return MMIO_P2V(A9_MPCORE_SCU);
@@ -44,8 +56,6 @@ static DEFINE_SPINLOCK(boot_lock);
void __cpuinit platform_secondary_init(unsigned int cpu)
{
- trace_hardirqs_off();
-
/*
* if any interrupts are already enabled for the primary
* core (e.g. timer irq), then they will not have been enabled
@@ -57,8 +67,7 @@ void __cpuinit platform_secondary_init(unsigned int cpu)
* let the primary processor know we're out of the
* pen, then head off into the C entry point
*/
- pen_release = -1;
- smp_wmb();
+ write_pen_release(-1);
/*
* Synchronise with the boot thread.
@@ -83,16 +92,14 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
* since we haven't sent them a soft interrupt, they shouldn't
* be there.
*/
- pen_release = cpu;
- __cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release));
- outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1));
+ write_pen_release(cpu);
/*
* Send the secondary CPU a soft interrupt, thereby causing
* the boot monitor to read the system wide flags register,
* and branch to the address found there.
*/
- smp_cross_call(cpumask_of(cpu));
+ smp_cross_call(cpumask_of(cpu), 1);
timeout = jiffies + (1 * HZ);
while (time_before(jiffies, timeout)) {
@@ -124,13 +131,6 @@ void __init smp_init_cpus(void)
ncores = scu_base ? scu_get_core_count(scu_base) : 1;
/* sanity check */
- if (ncores == 0) {
- printk(KERN_ERR
- "vexpress: strange CM count of 0? Default to 1\n");
-
- ncores = 1;
- }
-
if (ncores > NR_CPUS) {
printk(KERN_WARNING
"vexpress: no. of cores (%d) greater than configured "
@@ -143,20 +143,10 @@ void __init smp_init_cpus(void)
set_cpu_possible(i, true);
}
-void __init smp_prepare_cpus(unsigned int max_cpus)
+void __init platform_smp_prepare_cpus(unsigned int max_cpus)
{
- unsigned int ncores = num_possible_cpus();
- unsigned int cpu = smp_processor_id();
int i;
- smp_store_cpu_info(cpu);
-
- /*
- * are we trying to boot more cores than exist?
- */
- if (max_cpus > ncores)
- max_cpus = ncores;
-
/*
* Initialise the present map, which describes the set of CPUs
* actually populated at the present time.
@@ -164,27 +154,15 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
for (i = 0; i < max_cpus; i++)
set_cpu_present(i, true);
+ scu_enable(scu_base_addr());
+
/*
- * Initialise the SCU if there are more than one CPU and let
- * them know where to start.
+ * Write the address of secondary startup into the
+ * system-wide flags register. The boot monitor waits
+ * until it receives a soft interrupt, and then the
+ * secondary CPU branches to this address.
*/
- if (max_cpus > 1) {
- /*
- * Enable the local timer or broadcast device for the
- * boot CPU, but only if we have more than one CPU.
- */
- percpu_timer_setup();
-
- scu_enable(scu_base_addr());
-
- /*
- * Write the address of secondary startup into the
- * system-wide flags register. The boot monitor waits
- * until it receives a soft interrupt, and then the
- * secondary CPU branches to this address.
- */
- writel(~0, MMIO_P2V(V2M_SYS_FLAGSCLR));
- writel(BSYM(virt_to_phys(vexpress_secondary_startup)),
- MMIO_P2V(V2M_SYS_FLAGSSET));
- }
+ writel(~0, MMIO_P2V(V2M_SYS_FLAGSCLR));
+ writel(BSYM(virt_to_phys(vexpress_secondary_startup)),
+ MMIO_P2V(V2M_SYS_FLAGSSET));
}
diff --git a/arch/arm/mach-vexpress/v2m.c b/arch/arm/mach-vexpress/v2m.c
index de13603dc02..a9ed3428a2f 100644
--- a/arch/arm/mach-vexpress/v2m.c
+++ b/arch/arm/mach-vexpress/v2m.c
@@ -11,8 +11,8 @@
#include <linux/spinlock.h>
#include <linux/sysdev.h>
#include <linux/usb/isp1760.h>
+#include <linux/clkdev.h>
-#include <asm/clkdev.h>
#include <asm/sizes.h>
#include <asm/mach/flash.h>
#include <asm/mach/map.h>
@@ -20,7 +20,6 @@
#include <asm/hardware/arm_timer.h>
#include <asm/hardware/timer-sp.h>
-#include <mach/clkdev.h>
#include <mach/motherboard.h>
#include <plat/sched_clock.h>
diff --git a/arch/arm/mach-w90x900/clock.h b/arch/arm/mach-w90x900/clock.h
index c56ddab3d91..b88a1b16b2e 100644
--- a/arch/arm/mach-w90x900/clock.h
+++ b/arch/arm/mach-w90x900/clock.h
@@ -10,7 +10,7 @@
* the Free Software Foundation; either version 2 of the License.
*/
-#include <asm/clkdev.h>
+#include <linux/clkdev.h>
void nuc900_clk_enable(struct clk *clk, int enable);
void nuc900_subclk_enable(struct clk *clk, int enable);
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 4414a01e1e8..8493ed04797 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -599,6 +599,14 @@ config CPU_CP15_MPU
help
Processor has the CP15 register, which has MPU related registers.
+config CPU_USE_DOMAINS
+ bool
+ depends on MMU
+ default y if !CPU_32v6K
+ help
+ This option enables or disables the use of domain switching
+ via the set_fs() function.
+
#
# CPU supports 36-bit I/O
#
@@ -628,6 +636,33 @@ config ARM_THUMBEE
Say Y here if you have a CPU with the ThumbEE extension and code to
make use of it. Say N for code that can run on CPUs without ThumbEE.
+config SWP_EMULATE
+ bool "Emulate SWP/SWPB instructions"
+ depends on CPU_V7
+ select HAVE_PROC_CPU if PROC_FS
+ default y if SMP
+ help
+ ARMv6 architecture deprecates use of the SWP/SWPB instructions.
+ ARMv7 multiprocessing extensions introduce the ability to disable
+ these instructions, triggering an undefined instruction exception
+ when executed. Say Y here to enable software emulation of these
+ instructions for userspace (not kernel) using LDREX/STREX.
+ Also creates /proc/cpu/swp_emulation for statistics.
+
+ In some older versions of glibc [<=2.8] SWP is used during futex
+ trylock() operations with the assumption that the code will not
+ be preempted. This invalid assumption may be more likely to fail
+ with SWP emulation enabled, leading to deadlock of the user
+ application.
+
+ NOTE: when accessing uncached shared regions, LDREX/STREX rely
+ on an external transaction monitoring block called a global
+ monitor to maintain update atomicity. If your system does not
+ implement a global monitor, this option can cause programs that
+ perform SWP operations to uncached memory to deadlock.
+
+ If unsure, say Y.
+
config CPU_BIG_ENDIAN
bool "Build big-endian kernel"
depends on ARCH_SUPPORTS_BIG_ENDIAN
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 809f1bf9fa2..6b48e0a3d7a 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -312,7 +312,7 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
addr = page_address(page);
if (addr)
- *handle = page_to_dma(dev, page);
+ *handle = pfn_to_dma(dev, page_to_pfn(page));
return addr;
}
@@ -407,7 +407,7 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr
if (!arch_is_coherent())
__dma_free_remap(cpu_addr, size);
- __dma_free_buffer(dma_to_page(dev, handle), size);
+ __dma_free_buffer(pfn_to_page(dma_to_pfn(dev, handle)), size);
}
EXPORT_SYMBOL(dma_free_coherent);
@@ -555,17 +555,20 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
struct scatterlist *s;
int i, j;
+ BUG_ON(!valid_dma_direction(dir));
+
for_each_sg(sg, s, nents, i) {
- s->dma_address = dma_map_page(dev, sg_page(s), s->offset,
+ s->dma_address = __dma_map_page(dev, sg_page(s), s->offset,
s->length, dir);
if (dma_mapping_error(dev, s->dma_address))
goto bad_mapping;
}
+ debug_dma_map_sg(dev, sg, nents, nents, dir);
return nents;
bad_mapping:
for_each_sg(sg, s, i, j)
- dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
+ __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
return 0;
}
EXPORT_SYMBOL(dma_map_sg);
@@ -586,8 +589,10 @@ void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
struct scatterlist *s;
int i;
+ debug_dma_unmap_sg(dev, sg, nents, dir);
+
for_each_sg(sg, s, nents, i)
- dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
+ __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
}
EXPORT_SYMBOL(dma_unmap_sg);
@@ -612,6 +617,8 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
__dma_page_dev_to_cpu(sg_page(s), s->offset,
s->length, dir);
}
+
+ debug_dma_sync_sg_for_cpu(dev, sg, nents, dir);
}
EXPORT_SYMBOL(dma_sync_sg_for_cpu);
@@ -636,5 +643,16 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
__dma_page_cpu_to_dev(sg_page(s), s->offset,
s->length, dir);
}
+
+ debug_dma_sync_sg_for_device(dev, sg, nents, dir);
}
EXPORT_SYMBOL(dma_sync_sg_for_device);
+
+#define PREALLOC_DMA_DEBUG_ENTRIES 4096
+
+static int __init dma_debug_do_init(void)
+{
+ dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
+ return 0;
+}
+fs_initcall(dma_debug_do_init);
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 55c17a6fb22..ab506272b2d 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -204,12 +204,8 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
/*
* Don't allow RAM to be mapped - this causes problems with ARMv6+
*/
- if (pfn_valid(pfn)) {
- printk(KERN_WARNING "BUG: Your driver calls ioremap() on system memory. This leads\n"
- "to architecturally unpredictable behaviour on ARMv6+, and ioremap()\n"
- "will fail in the next kernel release. Please fix your driver.\n");
- WARN_ON(1);
- }
+ if (WARN_ON(pfn_valid(pfn)))
+ return NULL;
type = get_mem_type(mtype);
if (!type)
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 72ad3e1f56c..79c01f540cb 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -24,6 +24,7 @@
#include <asm/smp_plat.h>
#include <asm/tlb.h>
#include <asm/highmem.h>
+#include <asm/traps.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
@@ -914,12 +915,11 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
{
struct map_desc map;
unsigned long addr;
- void *vectors;
/*
* Allocate the vector page early.
*/
- vectors = early_alloc(PAGE_SIZE);
+ vectors_page = early_alloc(PAGE_SIZE);
for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE)
pmd_clear(pmd_off_k(addr));
@@ -959,7 +959,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
* location (0xffff0000). If we aren't using high-vectors, also
* create a mapping at the low-vectors virtual address.
*/
- map.pfn = __phys_to_pfn(virt_to_phys(vectors));
+ map.pfn = __phys_to_pfn(virt_to_phys(vectors_page));
map.virtual = 0xffff0000;
map.length = PAGE_SIZE;
map.type = MT_HIGH_VECTORS;
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
index b795afd0a2c..f8f777df8d7 100644
--- a/arch/arm/mm/proc-macros.S
+++ b/arch/arm/mm/proc-macros.S
@@ -109,6 +109,10 @@
* 110x 0 1 0 r/w r/o
* 11x0 0 1 0 r/w r/o
* 1111 0 1 1 r/w r/w
+ *
+ * If !CONFIG_CPU_USE_DOMAINS, the following permissions are changed:
+ * 110x 1 1 1 r/o r/o
+ * 11x0 1 1 1 r/o r/o
*/
.macro armv6_mt_table pfx
\pfx\()_mt_table:
@@ -148,8 +152,11 @@
tst r1, #L_PTE_USER
orrne r3, r3, #PTE_EXT_AP1
+#ifdef CONFIG_CPU_USE_DOMAINS
+ @ allow kernel read/write access to read-only user pages
tstne r3, #PTE_EXT_APX
bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0
+#endif
tst r1, #L_PTE_EXEC
orreq r3, r3, #PTE_EXT_XN
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 9b9ff5d949f..7401f4d7e67 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -148,8 +148,11 @@ ENTRY(cpu_v7_set_pte_ext)
tst r1, #L_PTE_USER
orrne r3, r3, #PTE_EXT_AP1
+#ifdef CONFIG_CPU_USE_DOMAINS
+ @ allow kernel read/write access to read-only user pages
tstne r3, #PTE_EXT_APX
bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0
+#endif
tst r1, #L_PTE_EXEC
orreq r3, r3, #PTE_EXT_XN
@@ -273,8 +276,6 @@ __v7_setup:
ALT_SMP(orr r4, r4, #TTB_FLAGS_SMP)
ALT_UP(orr r4, r4, #TTB_FLAGS_UP)
mcr p15, 0, r4, c2, c0, 1 @ load TTB1
- mov r10, #0x1f @ domains 0, 1 = manager
- mcr p15, 0, r10, c3, c0, 0 @ load domain access register
/*
* Memory region attributes with SCTLR.TRE=1
*
@@ -313,6 +314,10 @@ __v7_setup:
#ifdef CONFIG_CPU_ENDIAN_BE8
orr r6, r6, #1 << 25 @ big-endian page tables
#endif
+#ifdef CONFIG_SWP_EMULATE
+ orr r5, r5, #(1 << 10) @ set SW bit in "clear"
+ bic r6, r6, #(1 << 10) @ clear it in "mmuset"
+#endif
mrc p15, 0, r0, c1, c0, 0 @ read control register
bic r0, r0, r5 @ clear bits them
orr r0, r0, r6 @ set them
diff --git a/arch/arm/plat-omap/Kconfig b/arch/arm/plat-omap/Kconfig
index 92c5bb7909f..c9408434a85 100644
--- a/arch/arm/plat-omap/Kconfig
+++ b/arch/arm/plat-omap/Kconfig
@@ -11,13 +11,13 @@ choice
config ARCH_OMAP1
bool "TI OMAP1"
- select COMMON_CLKDEV
+ select CLKDEV_LOOKUP
help
"Systems based on omap7xx, omap15xx or omap16xx"
config ARCH_OMAP2PLUS
bool "TI OMAP2/3/4"
- select COMMON_CLKDEV
+ select CLKDEV_LOOKUP
help
"Systems based on OMAP2, OMAP3 or OMAP4"
diff --git a/arch/arm/plat-omap/include/plat/clkdev_omap.h b/arch/arm/plat-omap/include/plat/clkdev_omap.h
index bb937f3fabe..4b2028ab4d2 100644
--- a/arch/arm/plat-omap/include/plat/clkdev_omap.h
+++ b/arch/arm/plat-omap/include/plat/clkdev_omap.h
@@ -8,7 +8,7 @@
#ifndef __ARCH_ARM_PLAT_OMAP_INCLUDE_PLAT_CLKDEV_OMAP_H
#define __ARCH_ARM_PLAT_OMAP_INCLUDE_PLAT_CLKDEV_OMAP_H
-#include <asm/clkdev.h>
+#include <linux/clkdev.h>
struct omap_clk {
u16 cpu;
diff --git a/arch/arm/plat-omap/include/plat/memory.h b/arch/arm/plat-omap/include/plat/memory.h
index d5306bee44b..f8d922fb558 100644
--- a/arch/arm/plat-omap/include/plat/memory.h
+++ b/arch/arm/plat-omap/include/plat/memory.h
@@ -61,17 +61,17 @@
#define lbus_to_virt(x) ((x) - OMAP1510_LB_OFFSET + PAGE_OFFSET)
#define is_lbus_device(dev) (cpu_is_omap15xx() && dev && (strncmp(dev_name(dev), "ohci", 4) == 0))
-#define __arch_page_to_dma(dev, page) \
- ({ dma_addr_t __dma = page_to_phys(page); \
+#define __arch_pfn_to_dma(dev, pfn) \
+ ({ dma_addr_t __dma = __pfn_to_phys(pfn); \
if (is_lbus_device(dev)) \
__dma = __dma - PHYS_OFFSET + OMAP1510_LB_OFFSET; \
__dma; })
-#define __arch_dma_to_page(dev, addr) \
+#define __arch_dma_to_pfn(dev, addr) \
({ dma_addr_t __dma = addr; \
if (is_lbus_device(dev)) \
__dma += PHYS_OFFSET - OMAP1510_LB_OFFSET; \
- phys_to_page(__dma); \
+ __phys_to_pfn(__dma); \
})
#define __arch_dma_to_virt(dev, addr) ({ (void *) (is_lbus_device(dev) ? \
diff --git a/arch/arm/plat-omap/include/plat/smp.h b/arch/arm/plat-omap/include/plat/smp.h
index ecd6a488c49..7a10257909e 100644
--- a/arch/arm/plat-omap/include/plat/smp.h
+++ b/arch/arm/plat-omap/include/plat/smp.h
@@ -18,7 +18,6 @@
#define OMAP_ARCH_SMP_H
#include <asm/hardware/gic.h>
-#include <asm/smp_mpidr.h>
/* Needed for secondary core boot */
extern void omap_secondary_startup(void);
@@ -29,9 +28,9 @@ extern u32 omap_read_auxcoreboot0(void);
/*
* We use Soft IRQ1 as the IPI
*/
-static inline void smp_cross_call(const struct cpumask *mask)
+static inline void smp_cross_call(const struct cpumask *mask, int ipi)
{
- gic_raise_softirq(mask, 1);
+ gic_raise_softirq(mask, ipi);
}
#endif
diff --git a/arch/arm/plat-spear/include/plat/clock.h b/arch/arm/plat-spear/include/plat/clock.h
index 298bafc0a52..2572260f990 100644
--- a/arch/arm/plat-spear/include/plat/clock.h
+++ b/arch/arm/plat-spear/include/plat/clock.h
@@ -15,7 +15,7 @@
#define __PLAT_CLOCK_H
#include <linux/list.h>
-#include <asm/clkdev.h>
+#include <linux/clkdev.h>
#include <linux/types.h>
/* clk structure flags */
diff --git a/arch/arm/plat-stmp3xxx/clock.c b/arch/arm/plat-stmp3xxx/clock.c
index e593a2a801c..2e712e17ce7 100644
--- a/arch/arm/plat-stmp3xxx/clock.c
+++ b/arch/arm/plat-stmp3xxx/clock.c
@@ -25,9 +25,9 @@
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/io.h>
+#include <linux/clkdev.h>
#include <asm/mach-types.h>
-#include <asm/clkdev.h>
#include <mach/platform.h>
#include <mach/regs-clkctrl.h>
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index 8063a322c79..0797cb528b4 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -10,9 +10,12 @@
*/
#include <linux/module.h>
#include <linux/types.h>
+#include <linux/cpu.h>
#include <linux/kernel.h>
+#include <linux/notifier.h>
#include <linux/signal.h>
#include <linux/sched.h>
+#include <linux/smp.h>
#include <linux/init.h>
#include <asm/cputype.h>
@@ -484,7 +487,24 @@ void vfp_flush_hwstate(struct thread_info *thread)
put_cpu();
}
-#include <linux/smp.h>
+/*
+ * VFP hardware can lose all context when a CPU goes offline.
+ * Safely clear our held state when a CPU has been killed, and
+ * re-enable access to VFP when the CPU comes back online.
+ *
+ * Both CPU_DYING and CPU_STARTING are called on the CPU which
+ * is being offlined/onlined.
+ */
+static int vfp_hotplug(struct notifier_block *b, unsigned long action,
+ void *hcpu)
+{
+ if (action == CPU_DYING || action == CPU_DYING_FROZEN) {
+ unsigned int cpu = (long)hcpu;
+ last_VFP_context[cpu] = NULL;
+ } else if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
+ vfp_enable(NULL);
+ return NOTIFY_OK;
+}
/*
* VFP support code initialisation.
@@ -514,6 +534,8 @@ static int __init vfp_init(void)
else if (vfpsid & FPSID_NODOUBLE) {
printk("no double precision support\n");
} else {
+ hotcpu_notifier(vfp_hotplug, 0);
+
smp_call_function(vfp_enable, NULL, 1);
VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT; /* Extract the architecture version */