aboutsummaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-02-20 11:26:56 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-20 11:26:56 -0800
commit8793422fd9ac5037f5047f80473007301df3689f (patch)
treef5aa3b3a564f053e1b5604c45db80193abc734a4 /arch
parentb3cdda2b4f541439ca4205793040aa2e1c852e3b (diff)
parent10baf04e95fbf7eb6089410220a547211dd2ffa7 (diff)
Merge tag 'pm+acpi-3.9-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
Pull ACPI and power management updates from Rafael Wysocki: - Rework of the ACPI namespace scanning code from Rafael J. Wysocki with contributions from Bjorn Helgaas, Jiang Liu, Mika Westerberg, Toshi Kani, and Yinghai Lu. - ACPI power resources handling and ACPI device PM update from Rafael J Wysocki. - ACPICA update to version 20130117 from Bob Moore and Lv Zheng with contributions from Aaron Lu, Chao Guan, Jesper Juhl, and Tim Gardner. - Support for Intel Lynxpoint LPSS from Mika Westerberg. - cpuidle update from Len Brown including Intel Haswell support, C1 state for intel_idle, removal of global pm_idle. - cpuidle fixes and cleanups from Daniel Lezcano. - cpufreq fixes and cleanups from Viresh Kumar and Fabio Baltieri with contributions from Stratos Karafotis and Rickard Andersson. - Intel P-states driver for Sandy Bridge processors from Dirk Brandewie. - cpufreq driver for Marvell Kirkwood SoCs from Andrew Lunn. - cpufreq fixes related to ordering issues between acpi-cpufreq and powernow-k8 from Borislav Petkov and Matthew Garrett. - cpufreq support for Calxeda Highbank processors from Mark Langsdorf and Rob Herring. - cpufreq driver for the Freescale i.MX6Q SoC and cpufreq-cpu0 update from Shawn Guo. - cpufreq Exynos fixes and cleanups from Jonghwan Choi, Sachin Kamat, and Inderpal Singh. - Support for "lightweight suspend" from Zhang Rui. - Removal of the deprecated power trace API from Paul Gortmaker. - Assorted updates from Andreas Fleig, Colin Ian King, Davidlohr Bueso, Joseph Salisbury, Kees Cook, Li Fei, Nishanth Menon, ShuoX Liu, Srinivas Pandruvada, Tejun Heo, Thomas Renninger, and Yasuaki Ishimatsu. * tag 'pm+acpi-3.9-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: (267 commits) PM idle: remove global declaration of pm_idle unicore32 idle: delete stray pm_idle comment openrisc idle: delete pm_idle mn10300 idle: delete pm_idle microblaze idle: delete pm_idle m32r idle: delete pm_idle, and other dead idle code ia64 idle: delete pm_idle cris idle: delete idle and pm_idle ARM64 idle: delete pm_idle ARM idle: delete pm_idle blackfin idle: delete pm_idle sparc idle: rename pm_idle to sparc_idle sh idle: rename global pm_idle to static sh_idle x86 idle: rename global pm_idle to static x86_idle APM idle: register apm_cpu_idle via cpuidle cpufreq / intel_pstate: Add kernel command line option disable intel_pstate. cpufreq / intel_pstate: Change to disallow module build tools/power turbostat: display SMI count by default intel_idle: export both C1 and C1E ACPI / hotplug: Fix concurrency issues and memory leaks ...
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/boot/dts/highbank.dts10
-rw-r--r--arch/arm/kernel/process.c13
-rw-r--r--arch/arm/kernel/smp_twd.c53
-rw-r--r--arch/arm/mach-davinci/cpuidle.c84
-rw-r--r--arch/arm/mach-exynos/include/mach/cpufreq.h19
-rw-r--r--arch/arm/mach-highbank/Kconfig4
-rw-r--r--arch/arm/mach-omap2/pm34xx.c2
-rw-r--r--arch/arm/mach-tegra/cpu-tegra.c3
-rw-r--r--arch/arm64/kernel/process.c13
-rw-r--r--arch/blackfin/kernel/process.c7
-rw-r--r--arch/cris/kernel/process.c11
-rw-r--r--arch/ia64/hp/common/aml_nfw.c2
-rw-r--r--arch/ia64/include/asm/acpi.h4
-rw-r--r--arch/ia64/kernel/process.c3
-rw-r--r--arch/ia64/kernel/setup.c1
-rw-r--r--arch/m32r/kernel/process.c51
-rw-r--r--arch/microblaze/kernel/process.c3
-rw-r--r--arch/mn10300/kernel/process.c7
-rw-r--r--arch/openrisc/kernel/idle.c5
-rw-r--r--arch/sh/kernel/idle.c12
-rw-r--r--arch/sparc/include/asm/processor_32.h1
-rw-r--r--arch/sparc/kernel/apc.c3
-rw-r--r--arch/sparc/kernel/leon_pmc.c5
-rw-r--r--arch/sparc/kernel/pmc.c3
-rw-r--r--arch/sparc/kernel/process_32.c7
-rw-r--r--arch/unicore32/kernel/process.c5
-rw-r--r--arch/x86/Kconfig11
-rw-r--r--arch/x86/include/asm/acpi.h4
-rw-r--r--arch/x86/include/asm/mwait.h3
-rw-r--r--arch/x86/include/asm/processor.h18
-rw-r--r--arch/x86/include/uapi/asm/msr-index.h3
-rw-r--r--arch/x86/kernel/apm_32.c57
-rw-r--r--arch/x86/kernel/cpu/bugs.c27
-rw-r--r--arch/x86/kernel/cpu/proc.c2
-rw-r--r--arch/x86/kernel/process.c122
-rw-r--r--arch/x86/kernel/smpboot.c2
-rw-r--r--arch/x86/platform/olpc/olpc-xo15-sci.c2
-rw-r--r--arch/x86/xen/setup.c5
38 files changed, 180 insertions, 407 deletions
diff --git a/arch/arm/boot/dts/highbank.dts b/arch/arm/boot/dts/highbank.dts
index 5927a8df562..6aad34ad951 100644
--- a/arch/arm/boot/dts/highbank.dts
+++ b/arch/arm/boot/dts/highbank.dts
@@ -37,6 +37,16 @@
next-level-cache = <&L2>;
clocks = <&a9pll>;
clock-names = "cpu";
+ operating-points = <
+ /* kHz ignored */
+ 1300000 1000000
+ 1200000 1000000
+ 1100000 1000000
+ 800000 1000000
+ 400000 1000000
+ 200000 1000000
+ >;
+ clock-latency = <100000>;
};
cpu@901 {
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index c6dec5fc20a..047d3e40e47 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -172,14 +172,9 @@ static void default_idle(void)
local_irq_enable();
}
-void (*pm_idle)(void) = default_idle;
-EXPORT_SYMBOL(pm_idle);
-
/*
- * The idle thread, has rather strange semantics for calling pm_idle,
- * but this is what x86 does and we need to do the same, so that
- * things like cpuidle get called in the same way. The only difference
- * is that we always respect 'hlt_counter' to prevent low power idle.
+ * The idle thread.
+ * We always respect 'hlt_counter' to prevent low power idle.
*/
void cpu_idle(void)
{
@@ -210,10 +205,10 @@ void cpu_idle(void)
} else if (!need_resched()) {
stop_critical_timings();
if (cpuidle_idle_call())
- pm_idle();
+ default_idle();
start_critical_timings();
/*
- * pm_idle functions must always
+ * default_idle functions must always
* return with IRQs enabled.
*/
WARN_ON(irqs_disabled());
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index 49f335d301b..ae0c7bb39ae 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -31,7 +31,6 @@ static void __iomem *twd_base;
static struct clk *twd_clk;
static unsigned long twd_timer_rate;
-static bool common_setup_called;
static DEFINE_PER_CPU(bool, percpu_setup_called);
static struct clock_event_device __percpu **twd_evt;
@@ -239,25 +238,28 @@ static irqreturn_t twd_handler(int irq, void *dev_id)
return IRQ_NONE;
}
-static struct clk *twd_get_clock(void)
+static void twd_get_clock(struct device_node *np)
{
- struct clk *clk;
int err;
- clk = clk_get_sys("smp_twd", NULL);
- if (IS_ERR(clk)) {
- pr_err("smp_twd: clock not found: %d\n", (int)PTR_ERR(clk));
- return clk;
+ if (np)
+ twd_clk = of_clk_get(np, 0);
+ else
+ twd_clk = clk_get_sys("smp_twd", NULL);
+
+ if (IS_ERR(twd_clk)) {
+ pr_err("smp_twd: clock not found %d\n", (int) PTR_ERR(twd_clk));
+ return;
}
- err = clk_prepare_enable(clk);
+ err = clk_prepare_enable(twd_clk);
if (err) {
pr_err("smp_twd: clock failed to prepare+enable: %d\n", err);
- clk_put(clk);
- return ERR_PTR(err);
+ clk_put(twd_clk);
+ return;
}
- return clk;
+ twd_timer_rate = clk_get_rate(twd_clk);
}
/*
@@ -280,26 +282,7 @@ static int __cpuinit twd_timer_setup(struct clock_event_device *clk)
}
per_cpu(percpu_setup_called, cpu) = true;
- /*
- * This stuff only need to be done once for the entire TWD cluster
- * during the runtime of the system.
- */
- if (!common_setup_called) {
- twd_clk = twd_get_clock();
-
- /*
- * We use IS_ERR_OR_NULL() here, because if the clock stubs
- * are active we will get a valid clk reference which is
- * however NULL and will return the rate 0. In that case we
- * need to calibrate the rate instead.
- */
- if (!IS_ERR_OR_NULL(twd_clk))
- twd_timer_rate = clk_get_rate(twd_clk);
- else
- twd_calibrate_rate();
-
- common_setup_called = true;
- }
+ twd_calibrate_rate();
/*
* The following is done once per CPU the first time .setup() is
@@ -330,7 +313,7 @@ static struct local_timer_ops twd_lt_ops __cpuinitdata = {
.stop = twd_timer_stop,
};
-static int __init twd_local_timer_common_register(void)
+static int __init twd_local_timer_common_register(struct device_node *np)
{
int err;
@@ -350,6 +333,8 @@ static int __init twd_local_timer_common_register(void)
if (err)
goto out_irq;
+ twd_get_clock(np);
+
return 0;
out_irq:
@@ -373,7 +358,7 @@ int __init twd_local_timer_register(struct twd_local_timer *tlt)
if (!twd_base)
return -ENOMEM;
- return twd_local_timer_common_register();
+ return twd_local_timer_common_register(NULL);
}
#ifdef CONFIG_OF
@@ -405,7 +390,7 @@ void __init twd_local_timer_of_register(void)
goto out;
}
- err = twd_local_timer_common_register();
+ err = twd_local_timer_common_register(np);
out:
WARN(err, "twd_local_timer_of_register failed (%d)\n", err);
diff --git a/arch/arm/mach-davinci/cpuidle.c b/arch/arm/mach-davinci/cpuidle.c
index 9107691adbd..5ac9e9384b1 100644
--- a/arch/arm/mach-davinci/cpuidle.c
+++ b/arch/arm/mach-davinci/cpuidle.c
@@ -25,35 +25,44 @@
#define DAVINCI_CPUIDLE_MAX_STATES 2
-struct davinci_ops {
- void (*enter) (u32 flags);
- void (*exit) (u32 flags);
- u32 flags;
-};
+static DEFINE_PER_CPU(struct cpuidle_device, davinci_cpuidle_device);
+static void __iomem *ddr2_reg_base;
+static bool ddr2_pdown;
+
+static void davinci_save_ddr_power(int enter, bool pdown)
+{
+ u32 val;
+
+ val = __raw_readl(ddr2_reg_base + DDR2_SDRCR_OFFSET);
+
+ if (enter) {
+ if (pdown)
+ val |= DDR2_SRPD_BIT;
+ else
+ val &= ~DDR2_SRPD_BIT;
+ val |= DDR2_LPMODEN_BIT;
+ } else {
+ val &= ~(DDR2_SRPD_BIT | DDR2_LPMODEN_BIT);
+ }
+
+ __raw_writel(val, ddr2_reg_base + DDR2_SDRCR_OFFSET);
+}
/* Actual code that puts the SoC in different idle states */
static int davinci_enter_idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv,
int index)
{
- struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
- struct davinci_ops *ops = cpuidle_get_statedata(state_usage);
-
- if (ops && ops->enter)
- ops->enter(ops->flags);
+ davinci_save_ddr_power(1, ddr2_pdown);
index = cpuidle_wrap_enter(dev, drv, index,
arm_cpuidle_simple_enter);
- if (ops && ops->exit)
- ops->exit(ops->flags);
+ davinci_save_ddr_power(0, ddr2_pdown);
return index;
}
-/* fields in davinci_ops.flags */
-#define DAVINCI_CPUIDLE_FLAGS_DDR2_PWDN BIT(0)
-
static struct cpuidle_driver davinci_idle_driver = {
.name = "cpuidle-davinci",
.owner = THIS_MODULE,
@@ -70,45 +79,6 @@ static struct cpuidle_driver davinci_idle_driver = {
.state_count = DAVINCI_CPUIDLE_MAX_STATES,
};
-static DEFINE_PER_CPU(struct cpuidle_device, davinci_cpuidle_device);
-static void __iomem *ddr2_reg_base;
-
-static void davinci_save_ddr_power(int enter, bool pdown)
-{
- u32 val;
-
- val = __raw_readl(ddr2_reg_base + DDR2_SDRCR_OFFSET);
-
- if (enter) {
- if (pdown)
- val |= DDR2_SRPD_BIT;
- else
- val &= ~DDR2_SRPD_BIT;
- val |= DDR2_LPMODEN_BIT;
- } else {
- val &= ~(DDR2_SRPD_BIT | DDR2_LPMODEN_BIT);
- }
-
- __raw_writel(val, ddr2_reg_base + DDR2_SDRCR_OFFSET);
-}
-
-static void davinci_c2state_enter(u32 flags)
-{
- davinci_save_ddr_power(1, !!(flags & DAVINCI_CPUIDLE_FLAGS_DDR2_PWDN));
-}
-
-static void davinci_c2state_exit(u32 flags)
-{
- davinci_save_ddr_power(0, !!(flags & DAVINCI_CPUIDLE_FLAGS_DDR2_PWDN));
-}
-
-static struct davinci_ops davinci_states[DAVINCI_CPUIDLE_MAX_STATES] = {
- [1] = {
- .enter = davinci_c2state_enter,
- .exit = davinci_c2state_exit,
- },
-};
-
static int __init davinci_cpuidle_probe(struct platform_device *pdev)
{
int ret;
@@ -124,11 +94,7 @@ static int __init davinci_cpuidle_probe(struct platform_device *pdev)
ddr2_reg_base = pdata->ddr2_ctlr_base;
- if (pdata->ddr2_pdown)
- davinci_states[1].flags |= DAVINCI_CPUIDLE_FLAGS_DDR2_PWDN;
- cpuidle_set_statedata(&device->states_usage[1], &davinci_states[1]);
-
- device->state_count = DAVINCI_CPUIDLE_MAX_STATES;
+ ddr2_pdown = pdata->ddr2_pdown;
ret = cpuidle_register_driver(&davinci_idle_driver);
if (ret) {
diff --git a/arch/arm/mach-exynos/include/mach/cpufreq.h b/arch/arm/mach-exynos/include/mach/cpufreq.h
index 7517c3f417a..b5d39dd03b2 100644
--- a/arch/arm/mach-exynos/include/mach/cpufreq.h
+++ b/arch/arm/mach-exynos/include/mach/cpufreq.h
@@ -18,12 +18,25 @@ enum cpufreq_level_index {
L20,
};
+#define APLL_FREQ(f, a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, m, p, s) \
+ { \
+ .freq = (f) * 1000, \
+ .clk_div_cpu0 = ((a0) | (a1) << 4 | (a2) << 8 | (a3) << 12 | \
+ (a4) << 16 | (a5) << 20 | (a6) << 24 | (a7) << 28), \
+ .clk_div_cpu1 = (b0 << 0 | b1 << 4 | b2 << 8), \
+ .mps = ((m) << 16 | (p) << 8 | (s)), \
+ }
+
+struct apll_freq {
+ unsigned int freq;
+ u32 clk_div_cpu0;
+ u32 clk_div_cpu1;
+ u32 mps;
+};
+
struct exynos_dvfs_info {
unsigned long mpll_freq_khz;
unsigned int pll_safe_idx;
- unsigned int pm_lock_idx;
- unsigned int max_support_idx;
- unsigned int min_support_idx;
struct clk *cpu_clk;
unsigned int *volt_table;
struct cpufreq_frequency_table *freq_table;
diff --git a/arch/arm/mach-highbank/Kconfig b/arch/arm/mach-highbank/Kconfig
index 551c97e87a7..44b12f9c158 100644
--- a/arch/arm/mach-highbank/Kconfig
+++ b/arch/arm/mach-highbank/Kconfig
@@ -1,5 +1,7 @@
config ARCH_HIGHBANK
bool "Calxeda ECX-1000/2000 (Highbank/Midway)" if ARCH_MULTI_V7
+ select ARCH_HAS_CPUFREQ
+ select ARCH_HAS_OPP
select ARCH_WANT_OPTIONAL_GPIOLIB
select ARM_AMBA
select ARM_GIC
@@ -11,5 +13,7 @@ config ARCH_HIGHBANK
select GENERIC_CLOCKEVENTS
select HAVE_ARM_SCU
select HAVE_SMP
+ select MAILBOX
+ select PL320_MBOX
select SPARSE_IRQ
select USE_OF
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index 7be3622cfc8..2d93d8b2383 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -351,12 +351,10 @@ static void omap3_pm_idle(void)
if (omap_irq_pending())
goto out;
- trace_power_start(POWER_CSTATE, 1, smp_processor_id());
trace_cpu_idle(1, smp_processor_id());
omap_sram_idle();
- trace_power_end(smp_processor_id());
trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
out:
diff --git a/arch/arm/mach-tegra/cpu-tegra.c b/arch/arm/mach-tegra/cpu-tegra.c
index a74d3c7d2e2..a36a03d3c9a 100644
--- a/arch/arm/mach-tegra/cpu-tegra.c
+++ b/arch/arm/mach-tegra/cpu-tegra.c
@@ -243,8 +243,7 @@ static int tegra_cpu_init(struct cpufreq_policy *policy)
/* FIXME: what's the actual transition time? */
policy->cpuinfo.transition_latency = 300 * 1000;
- policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
- cpumask_copy(policy->related_cpus, cpu_possible_mask);
+ cpumask_copy(policy->cpus, cpu_possible_mask);
if (policy->cpu == 0)
register_pm_notifier(&tegra_cpu_pm_notifier);
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index cb0956bc96e..c7002d40a9b 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -97,14 +97,9 @@ static void default_idle(void)
local_irq_enable();
}
-void (*pm_idle)(void) = default_idle;
-EXPORT_SYMBOL_GPL(pm_idle);
-
/*
- * The idle thread, has rather strange semantics for calling pm_idle,
- * but this is what x86 does and we need to do the same, so that
- * things like cpuidle get called in the same way. The only difference
- * is that we always respect 'hlt_counter' to prevent low power idle.
+ * The idle thread.
+ * We always respect 'hlt_counter' to prevent low power idle.
*/
void cpu_idle(void)
{
@@ -122,10 +117,10 @@ void cpu_idle(void)
local_irq_disable();
if (!need_resched()) {
stop_critical_timings();
- pm_idle();
+ default_idle();
start_critical_timings();
/*
- * pm_idle functions should always return
+ * default_idle functions should always return
* with IRQs enabled.
*/
WARN_ON(irqs_disabled());
diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c
index 3e16ad9b0a9..8061426b7df 100644
--- a/arch/blackfin/kernel/process.c
+++ b/arch/blackfin/kernel/process.c
@@ -39,12 +39,6 @@ int nr_l1stack_tasks;
void *l1_stack_base;
unsigned long l1_stack_len;
-/*
- * Powermanagement idle function, if any..
- */
-void (*pm_idle)(void) = NULL;
-EXPORT_SYMBOL(pm_idle);
-
void (*pm_power_off)(void) = NULL;
EXPORT_SYMBOL(pm_power_off);
@@ -81,7 +75,6 @@ void cpu_idle(void)
{
/* endless idle loop with no priority at all */
while (1) {
- void (*idle)(void) = pm_idle;
#ifdef CONFIG_HOTPLUG_CPU
if (cpu_is_offline(smp_processor_id()))
diff --git a/arch/cris/kernel/process.c b/arch/cris/kernel/process.c
index 7f65be6f7f1..104ff4dd9b9 100644
--- a/arch/cris/kernel/process.c
+++ b/arch/cris/kernel/process.c
@@ -54,11 +54,6 @@ void enable_hlt(void)
EXPORT_SYMBOL(enable_hlt);
-/*
- * The following aren't currently used.
- */
-void (*pm_idle)(void);
-
extern void default_idle(void);
void (*pm_power_off)(void);
@@ -77,16 +72,12 @@ void cpu_idle (void)
while (1) {
rcu_idle_enter();
while (!need_resched()) {
- void (*idle)(void);
/*
* Mark this as an RCU critical section so that
* synchronize_kernel() in the unload path waits
* for our completion.
*/
- idle = pm_idle;
- if (!idle)
- idle = default_idle;
- idle();
+ default_idle();
}
rcu_idle_exit();
schedule_preempt_disabled();
diff --git a/arch/ia64/hp/common/aml_nfw.c b/arch/ia64/hp/common/aml_nfw.c
index 6192f718865..916ffe770bc 100644
--- a/arch/ia64/hp/common/aml_nfw.c
+++ b/arch/ia64/hp/common/aml_nfw.c
@@ -191,7 +191,7 @@ static int aml_nfw_add(struct acpi_device *device)
return aml_nfw_add_global_handler();
}
-static int aml_nfw_remove(struct acpi_device *device, int type)
+static int aml_nfw_remove(struct acpi_device *device)
{
return aml_nfw_remove_global_handler();
}
diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h
index 359e68a03ca..faa1bf0da81 100644
--- a/arch/ia64/include/asm/acpi.h
+++ b/arch/ia64/include/asm/acpi.h
@@ -52,10 +52,6 @@
/* Asm macros */
-#define ACPI_ASM_MACROS
-#define BREAKPOINT3
-#define ACPI_DISABLE_IRQS() local_irq_disable()
-#define ACPI_ENABLE_IRQS() local_irq_enable()
#define ACPI_FLUSH_CPU_CACHE()
static inline int
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index 31360cbbd5f..e34f565f595 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -57,8 +57,6 @@ void (*ia64_mark_idle)(int);
unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
EXPORT_SYMBOL(boot_option_idle_override);
-void (*pm_idle) (void);
-EXPORT_SYMBOL(pm_idle);
void (*pm_power_off) (void);
EXPORT_SYMBOL(pm_power_off);
@@ -301,7 +299,6 @@ cpu_idle (void)
if (mark_idle)
(*mark_idle)(1);
- idle = pm_idle;
if (!idle)
idle = default_idle;
(*idle)();
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index aaefd9b94f2..2029cc0d2fc 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -1051,7 +1051,6 @@ cpu_init (void)
max_num_phys_stacked = num_phys_stacked;
}
platform_cpu_init();
- pm_idle = default_idle;
}
void __init
diff --git a/arch/m32r/kernel/process.c b/arch/m32r/kernel/process.c
index 765d0f57c78..bde899e155d 100644
--- a/arch/m32r/kernel/process.c
+++ b/arch/m32r/kernel/process.c
@@ -44,36 +44,10 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
return tsk->thread.lr;
}
-/*
- * Powermanagement idle function, if any..
- */
-static void (*pm_idle)(void) = NULL;
-
void (*pm_power_off)(void) = NULL;
EXPORT_SYMBOL(pm_power_off);
/*
- * We use this is we don't have any better
- * idle routine..
- */
-static void default_idle(void)
-{
- /* M32R_FIXME: Please use "cpu_sleep" mode. */
- cpu_relax();
-}
-
-/*
- * On SMP it's slightly faster (but much more power-consuming!)
- * to poll the ->work.need_resched flag instead of waiting for the
- * cross-CPU IPI to arrive. Use this option with caution.
- */
-static void poll_idle (void)
-{
- /* M32R_FIXME */
- cpu_relax();
-}
-
-/*
* The idle thread. There's no useful work to be
* done, so just try to conserve power and have a
* low exit latency (ie sit in a loop waiting for
@@ -84,14 +58,8 @@ void cpu_idle (void)
/* endless idle loop with no priority at all */
while (1) {
rcu_idle_enter();
- while (!need_resched()) {
- void (*idle)(void) = pm_idle;
-
- if (!idle)
- idle = default_idle;
-
- idle();
- }
+ while (!need_resched())
+ cpu_relax();
rcu_idle_exit();
schedule_preempt_disabled();
}
@@ -120,21 +88,6 @@ void machine_power_off(void)
/* M32R_FIXME */
}
-static int __init idle_setup (char *str)
-{
- if (!strncmp(str, "poll", 4)) {
- printk("using poll in idle threads.\n");
- pm_idle = poll_idle;
- } else if (!strncmp(str, "sleep", 4)) {
- printk("using sleep in idle threads.\n");
- pm_idle = default_idle;
- }
-
- return 1;
-}
-
-__setup("idle=", idle_setup);
-
void show_regs(struct pt_regs * regs)
{
printk("\n");
diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c
index a5b74f729e5..6ff2dcff341 100644
--- a/arch/microblaze/kernel/process.c
+++ b/arch/microblaze/kernel/process.c
@@ -41,7 +41,6 @@ void show_regs(struct pt_regs *regs)
regs->msr, regs->ear, regs->esr, regs->fsr);
}
-void (*pm_idle)(void);
void (*pm_power_off)(void) = NULL;
EXPORT_SYMBOL(pm_power_off);
@@ -98,8 +97,6 @@ void cpu_idle(void)
/* endless idle loop with no priority at all */
while (1) {
- void (*idle)(void) = pm_idle;
-
if (!idle)
idle = default_idle;
diff --git a/arch/mn10300/kernel/process.c b/arch/mn10300/kernel/process.c
index eb09f5a552f..84f4e97e307 100644
--- a/arch/mn10300/kernel/process.c
+++ b/arch/mn10300/kernel/process.c
@@ -37,12 +37,6 @@
#include "internal.h"
/*
- * power management idle function, if any..
- */
-void (*pm_idle)(void);
-EXPORT_SYMBOL(pm_idle);
-
-/*
* return saved PC of a blocked thread.
*/
unsigned long thread_saved_pc(struct task_struct *tsk)
@@ -113,7 +107,6 @@ void cpu_idle(void)
void (*idle)(void);
smp_rmb();
- idle = pm_idle;
if (!idle) {
#if defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU)
idle = poll_idle;
diff --git a/arch/openrisc/kernel/idle.c b/arch/openrisc/kernel/idle.c
index 7d618feb1b7..5e8a3b6d6bc 100644
--- a/arch/openrisc/kernel/idle.c
+++ b/arch/openrisc/kernel/idle.c
@@ -39,11 +39,6 @@
void (*powersave) (void) = NULL;
-static inline void pm_idle(void)
-{
- barrier();
-}
-
void cpu_idle(void)
{
set_thread_flag(TIF_POLLING_NRFLAG);
diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c
index 0c910163caa..3d5a1b387cc 100644
--- a/arch/sh/kernel/idle.c
+++ b/arch/sh/kernel/idle.c
@@ -22,7 +22,7 @@
#include <asm/smp.h>
#include <asm/bl_bit.h>
-void (*pm_idle)(void);
+static void (*sh_idle)(void);
static int hlt_counter;
@@ -103,9 +103,9 @@ void cpu_idle(void)
/* Don't trace irqs off for idle */
stop_critical_timings();
if (cpuidle_idle_call())
- pm_idle();
+ sh_idle();
/*
- * Sanity check to ensure that pm_idle() returns
+ * Sanity check to ensure that sh_idle() returns
* with IRQs enabled
*/
WARN_ON(irqs_disabled());
@@ -123,13 +123,13 @@ void __init select_idle_routine(void)
/*
* If a platform has set its own idle routine, leave it alone.
*/
- if (pm_idle)
+ if (sh_idle)
return;
if (hlt_works())
- pm_idle = default_idle;
+ sh_idle = default_idle;
else
- pm_idle = poll_idle;
+ sh_idle = poll_idle;
}
void stop_this_cpu(void *unused)
diff --git a/arch/sparc/include/asm/processor_32.h b/arch/sparc/include/asm/processor_32.h
index c1e01914fd9..2c7baa4c450 100644
--- a/arch/sparc/include/asm/processor_32.h
+++ b/arch/sparc/include/asm/processor_32.h
@@ -118,6 +118,7 @@ extern unsigned long get_wchan(struct task_struct *);
extern struct task_struct *last_task_used_math;
#define cpu_relax() barrier()
+extern void (*sparc_idle)(void);
#endif
diff --git a/arch/sparc/kernel/apc.c b/arch/sparc/kernel/apc.c
index 348fa1aeabc..eefda32b595 100644
--- a/arch/sparc/kernel/apc.c
+++ b/arch/sparc/kernel/apc.c
@@ -20,6 +20,7 @@
#include <asm/uaccess.h>
#include <asm/auxio.h>
#include <asm/apc.h>
+#include <asm/processor.h>
/* Debugging
*
@@ -158,7 +159,7 @@ static int apc_probe(struct platform_device *op)
/* Assign power management IDLE handler */
if (!apc_no_idle)
- pm_idle = apc_swift_idle;
+ sparc_idle = apc_swift_idle;
printk(KERN_INFO "%s: power management initialized%s\n",
APC_DEVNAME, apc_no_idle ? " (CPU idle disabled)" : "");
diff --git a/arch/sparc/kernel/leon_pmc.c b/arch/sparc/kernel/leon_pmc.c
index 4e174321097..708bca43521 100644
--- a/arch/sparc/kernel/leon_pmc.c
+++ b/arch/sparc/kernel/leon_pmc.c
@@ -9,6 +9,7 @@
#include <asm/leon_amba.h>
#include <asm/cpu_type.h>
#include <asm/leon.h>
+#include <asm/processor.h>
/* List of Systems that need fixup instructions around power-down instruction */
unsigned int pmc_leon_fixup_ids[] = {
@@ -69,9 +70,9 @@ static int __init leon_pmc_install(void)
if (sparc_cpu_model == sparc_leon) {
/* Assign power management IDLE handler */
if (pmc_leon_need_fixup())
- pm_idle = pmc_leon_idle_fixup;
+ sparc_idle = pmc_leon_idle_fixup;
else
- pm_idle = pmc_leon_idle;
+ sparc_idle = pmc_leon_idle;
printk(KERN_INFO "leon: power management initialized\n");
}
diff --git a/arch/sparc/kernel/pmc.c b/arch/sparc/kernel/pmc.c
index dcbb62f6306..8b7297faca7 100644
--- a/arch/sparc/kernel/pmc.c
+++ b/arch/sparc/kernel/pmc.c
@@ -17,6 +17,7 @@
#include <asm/oplib.h>
#include <asm/uaccess.h>
#include <asm/auxio.h>
+#include <asm/processor.h>
/* Debug
*
@@ -63,7 +64,7 @@ static int pmc_probe(struct platform_device *op)
#ifndef PMC_NO_IDLE
/* Assign power management IDLE handler */
- pm_idle = pmc_swift_idle;
+ sparc_idle = pmc_swift_idle;
#endif
printk(KERN_INFO "%s: power management initialized\n", PMC_DEVNAME);
diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
index be8e862bada..62eede13831 100644
--- a/arch/sparc/kernel/process_32.c
+++ b/arch/sparc/kernel/process_32.c
@@ -43,8 +43,7 @@
* Power management idle function
* Set in pm platform drivers (apc.c and pmc.c)
*/
-void (*pm_idle)(void);
-EXPORT_SYMBOL(pm_idle);
+void (*sparc_idle)(void);
/*
* Power-off handler instantiation for pm.h compliance
@@ -75,8 +74,8 @@ void cpu_idle(void)
/* endless idle loop with no priority at all */
for (;;) {
while (!need_resched()) {
- if (pm_idle)
- (*pm_idle)();
+ if (sparc_idle)
+ (*sparc_idle)();
else
cpu_relax();
}
diff --git a/arch/unicore32/kernel/process.c b/arch/unicore32/kernel/process.c
index 62bad9fed03..872d7e22d84 100644
--- a/arch/unicore32/kernel/process.c
+++ b/arch/unicore32/kernel/process.c
@@ -45,11 +45,6 @@ static const char * const processor_modes[] = {
"UK18", "UK19", "UK1A", "EXTN", "UK1C", "UK1D", "UK1E", "SUSR"
};
-/*
- * The idle thread, has rather strange semantics for calling pm_idle,
- * but this is what x86 does and we need to do the same, so that
- * things like cpuidle get called in the same way.
- */
void cpu_idle(void)
{
/* endless idle loop with no priority at all */
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 260857a53b8..f7a27fdb509 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -469,6 +469,16 @@ config X86_MDFLD
endif
+config X86_INTEL_LPSS
+ bool "Intel Low Power Subsystem Support"
+ depends on ACPI
+ select COMMON_CLK
+ ---help---
+ Select to build support for Intel Low Power Subsystem such as
+ found on Intel Lynxpoint PCH. Selecting this option enables
+ things like clock tree (common clock framework) which are needed
+ by the LPSS peripheral drivers.
+
config X86_RDC321X
bool "RDC R-321x SoC"
depends on X86_32
@@ -1927,6 +1937,7 @@ config APM_DO_ENABLE
this feature.
config APM_CPU_IDLE
+ depends on CPU_IDLE
bool "Make CPU Idle calls when idle"
---help---
Enable calls to APM CPU Idle/CPU Busy inside the kernel's idle loop.
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index 0c44630d178..b31bf97775f 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -49,10 +49,6 @@
/* Asm macros */
-#define ACPI_ASM_MACROS
-#define BREAKPOINT3
-#define ACPI_DISABLE_IRQS() local_irq_disable()
-#define ACPI_ENABLE_IRQS() local_irq_enable()
#define ACPI_FLUSH_CPU_CACHE() wbinvd()
int __acpi_acquire_global_lock(unsigned int *lock);
diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h
index bcdff997668..2f366d0ac6b 100644
--- a/arch/x86/include/asm/mwait.h
+++ b/arch/x86/include/asm/mwait.h
@@ -4,7 +4,8 @@
#define MWAIT_SUBSTATE_MASK 0xf
#define MWAIT_CSTATE_MASK 0xf
#define MWAIT_SUBSTATE_SIZE 4
-#define MWAIT_MAX_NUM_CSTATES 8
+#define MWAIT_HINT2CSTATE(hint) (((hint) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK)
+#define MWAIT_HINT2SUBSTATE(hint) ((hint) & MWAIT_CSTATE_MASK)
#define CPUID_MWAIT_LEAF 5
#define CPUID5_ECX_EXTENSIONS_SUPPORTED 0x1
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index cf500543f6f..d172588efae 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -89,7 +89,6 @@ struct cpuinfo_x86 {
char wp_works_ok; /* It doesn't on 386's */
/* Problems on some 486Dx4's and old 386's: */
- char hlt_works_ok;
char hard_math;
char rfu;
char fdiv_bug;
@@ -165,15 +164,6 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
extern const struct seq_operations cpuinfo_op;
-static inline int hlt_works(int cpu)
-{
-#ifdef CONFIG_X86_32
- return cpu_data(cpu).hlt_works_ok;
-#else
- return 1;
-#endif
-}
-
#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
extern void cpu_detect(struct cpuinfo_x86 *c);
@@ -725,7 +715,7 @@ extern unsigned long boot_option_idle_override;
extern bool amd_e400_c1e_detected;
enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT,
- IDLE_POLL, IDLE_FORCE_MWAIT};
+ IDLE_POLL};
extern void enable_sep_cpu(void);
extern int sysenter_setup(void);
@@ -998,7 +988,11 @@ extern unsigned long arch_align_stack(unsigned long sp);
extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
void default_idle(void);
-bool set_pm_idle_to_default(void);
+#ifdef CONFIG_XEN
+bool xen_set_default_idle(void);
+#else
+#define xen_set_default_idle 0
+#endif
void stop_this_cpu(void *dummy);
diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h
index 075a4025559..f26d2771846 100644
--- a/arch/x86/include/uapi/asm/msr-index.h
+++ b/arch/x86/include/uapi/asm/msr-index.h
@@ -103,6 +103,8 @@
#define DEBUGCTLMSR_BTS_OFF_USR (1UL << 10)
#define DEBUGCTLMSR_FREEZE_LBRS_ON_PMI (1UL << 11)
+#define MSR_IA32_POWER_CTL 0x000001fc
+
#define MSR_IA32_MC0_CTL 0x00000400
#define MSR_IA32_MC0_STATUS 0x00000401
#define MSR_IA32_MC0_ADDR 0x00000402
@@ -274,6 +276,7 @@
#define MSR_IA32_PLATFORM_ID 0x00000017
#define MSR_IA32_EBL_CR_POWERON 0x0000002a
#define MSR_EBC_FREQUENCY_ID 0x0000002c
+#define MSR_SMI_COUNT 0x00000034
#define MSR_IA32_FEATURE_CONTROL 0x0000003a
#define MSR_IA32_TSC_ADJUST 0x0000003b
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index 8d7012b7f40..66b5faffe14 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -232,6 +232,7 @@
#include <linux/acpi.h>
#include <linux/syscore_ops.h>
#include <linux/i8253.h>
+#include <linux/cpuidle.h>
#include <asm/uaccess.h>
#include <asm/desc.h>
@@ -360,13 +361,35 @@ struct apm_user {
* idle percentage above which bios idle calls are done
*/
#ifdef CONFIG_APM_CPU_IDLE
-#warning deprecated CONFIG_APM_CPU_IDLE will be deleted in 2012
#define DEFAULT_IDLE_THRESHOLD 95
#else
#define DEFAULT_IDLE_THRESHOLD 100
#endif
#define DEFAULT_IDLE_PERIOD (100 / 3)
+static int apm_cpu_idle(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index);
+
+static struct cpuidle_driver apm_idle_driver = {
+ .name = "apm_idle",
+ .owner = THIS_MODULE,
+ .en_core_tk_irqen = 1,
+ .states = {
+ { /* entry 0 is for polling */ },
+ { /* entry 1 is for APM idle */
+ .name = "APM",
+ .desc = "APM idle",
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 250, /* WAG */
+ .target_residency = 500, /* WAG */
+ .enter = &apm_cpu_idle
+ },
+ },
+ .state_count = 2,
+};
+
+static struct cpuidle_device apm_cpuidle_device;
+
/*
* Local variables
*/
@@ -377,7 +400,6 @@ static struct {
static int clock_slowed;
static int idle_threshold __read_mostly = DEFAULT_IDLE_THRESHOLD;
static int idle_period __read_mostly = DEFAULT_IDLE_PERIOD;
-static int set_pm_idle;
static int suspends_pending;
static int standbys_pending;
static int ignore_sys_suspend;
@@ -884,8 +906,6 @@ static void apm_do_busy(void)
#define IDLE_CALC_LIMIT (HZ * 100)
#define IDLE_LEAKY_MAX 16
-static void (*original_pm_idle)(void) __read_mostly;
-
/**
* apm_cpu_idle - cpu idling for APM capable Linux
*
@@ -894,7 +914,8 @@ static void (*original_pm_idle)(void) __read_mostly;
* Furthermore it calls the system default idle routine.
*/
-static void apm_cpu_idle(void)
+static int apm_cpu_idle(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
{
static int use_apm_idle; /* = 0 */
static unsigned int last_jiffies; /* = 0 */
@@ -905,7 +926,6 @@ static void apm_cpu_idle(void)
unsigned int jiffies_since_last_check = jiffies - last_jiffies;
unsigned int bucket;
- WARN_ONCE(1, "deprecated apm_cpu_idle will be deleted in 2012");
recalc:
task_cputime(current, NULL, &stime);
if (jiffies_since_last_check > IDLE_CALC_LIMIT) {
@@ -951,10 +971,7 @@ recalc:
break;
}
}
- if (original_pm_idle)
- original_pm_idle();
- else
- default_idle();
+ default_idle();
local_irq_disable();
jiffies_since_last_check = jiffies - last_jiffies;
if (jiffies_since_last_check > idle_period)
@@ -964,7 +981,7 @@ recalc:
if (apm_idle_done)
apm_do_busy();
- local_irq_enable();
+ return index;
}
/**
@@ -2382,9 +2399,9 @@ static int __init apm_init(void)
if (HZ != 100)
idle_period = (idle_period * HZ) / 100;
if (idle_threshold < 100) {
- original_pm_idle = pm_idle;
- pm_idle = apm_cpu_idle;
- set_pm_idle = 1;
+ if (!cpuidle_register_driver(&apm_idle_driver))
+ if (cpuidle_register_device(&apm_cpuidle_device))
+ cpuidle_unregister_driver(&apm_idle_driver);
}
return 0;
@@ -2394,15 +2411,9 @@ static void __exit apm_exit(void)
{
int error;
- if (set_pm_idle) {
- pm_idle = original_pm_idle;
- /*
- * We are about to unload the current idle thread pm callback
- * (pm_idle), Wait for all processors to update cached/local
- * copies of pm_idle before proceeding.
- */
- kick_all_cpus_sync();
- }
+ cpuidle_unregister_device(&apm_cpuidle_device);
+ cpuidle_unregister_driver(&apm_idle_driver);
+
if (((apm_info.bios.flags & APM_BIOS_DISENGAGED) == 0)
&& (apm_info.connection_version > 0x0100)) {
error = apm_engage_power_management(APM_DEVICE_ALL, 0);
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 92dfec986a4..af6455e3fcc 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -17,15 +17,6 @@
#include <asm/paravirt.h>
#include <asm/alternative.h>
-static int __init no_halt(char *s)
-{
- WARN_ONCE(1, "\"no-hlt\" is deprecated, please use \"idle=poll\"\n");
- boot_cpu_data.hlt_works_ok = 0;
- return 1;
-}
-
-__setup("no-hlt", no_halt);
-
static int __init no_387(char *s)
{
boot_cpu_data.hard_math = 0;
@@ -89,23 +80,6 @@ static void __init check_fpu(void)
pr_warn("Hmm, FPU with FDIV bug\n");
}
-static void __init check_hlt(void)
-{
- if (boot_cpu_data.x86 >= 5 || paravirt_enabled())
- return;
-
- pr_info("Checking 'hlt' instruction... ");
- if (!boot_cpu_data.hlt_works_ok) {
- pr_cont("disabled\n");
- return;
- }
- halt();
- halt();
- halt();
- halt();
- pr_cont("OK\n");
-}
-
/*
* Check whether we are able to run this kernel safely on SMP.
*
@@ -129,7 +103,6 @@ void __init check_bugs(void)
print_cpu_info(&boot_cpu_data);
#endif
check_config();
- check_hlt();
init_utsname()->machine[1] =
'0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
alternative_instructions();
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
index 3286a92e662..e280253f6f9 100644
--- a/arch/x86/kernel/cpu/proc.c
+++ b/arch/x86/kernel/cpu/proc.c
@@ -28,7 +28,6 @@ static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c)
{
seq_printf(m,
"fdiv_bug\t: %s\n"
- "hlt_bug\t\t: %s\n"
"f00f_bug\t: %s\n"
"coma_bug\t: %s\n"
"fpu\t\t: %s\n"
@@ -36,7 +35,6 @@ static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c)
"cpuid level\t: %d\n"
"wp\t\t: %s\n",
c->fdiv_bug ? "yes" : "no",
- c->hlt_works_ok ? "no" : "yes",
c->f00f_bug ? "yes" : "no",
c->coma_bug ? "yes" : "no",
c->hard_math ? "yes" : "no",
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 2ed787f15bf..14ae10031ff 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -268,13 +268,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
EXPORT_SYMBOL(boot_option_idle_override);
-/*
- * Powermanagement idle function, if any..
- */
-void (*pm_idle)(void);
-#ifdef CONFIG_APM_MODULE
-EXPORT_SYMBOL(pm_idle);
-#endif
+static void (*x86_idle)(void);
#ifndef CONFIG_SMP
static inline void play_dead(void)
@@ -351,7 +345,7 @@ void cpu_idle(void)
rcu_idle_enter();
if (cpuidle_idle_call())
- pm_idle();
+ x86_idle();
rcu_idle_exit();
start_critical_timings();
@@ -375,7 +369,6 @@ void cpu_idle(void)
*/
void default_idle(void)
{
- trace_power_start_rcuidle(POWER_CSTATE, 1, smp_processor_id());
trace_cpu_idle_rcuidle(1, smp_processor_id());
current_thread_info()->status &= ~TS_POLLING;
/*
@@ -389,21 +382,22 @@ void default_idle(void)
else
local_irq_enable();
current_thread_info()->status |= TS_POLLING;
- trace_power_end_rcuidle(smp_processor_id());
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
}
#ifdef CONFIG_APM_MODULE
EXPORT_SYMBOL(default_idle);
#endif
-bool set_pm_idle_to_default(void)
+#ifdef CONFIG_XEN
+bool xen_set_default_idle(void)
{
- bool ret = !!pm_idle;
+ bool ret = !!x86_idle;
- pm_idle = default_idle;
+ x86_idle = default_idle;
return ret;
}
+#endif
void stop_this_cpu(void *dummy)
{
local_irq_disable();
@@ -413,31 +407,8 @@ void stop_this_cpu(void *dummy)
set_cpu_online(smp_processor_id(), false);
disable_local_APIC();
- for (;;) {
- if (hlt_works(smp_processor_id()))
- halt();
- }
-}
-
-/* Default MONITOR/MWAIT with no hints, used for default C1 state */
-static void mwait_idle(void)
-{
- if (!need_resched()) {
- trace_power_start_rcuidle(POWER_CSTATE, 1, smp_processor_id());
- trace_cpu_idle_rcuidle(1, smp_processor_id());
- if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
- clflush((void *)&current_thread_info()->flags);
-
- __monitor((void *)&current_thread_info()->flags, 0, 0);
- smp_mb();
- if (!need_resched())
- __sti_mwait(0, 0);
- else
- local_irq_enable();
- trace_power_end_rcuidle(smp_processor_id());
- trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
- } else
- local_irq_enable();
+ for (;;)
+ halt();
}
/*
@@ -447,62 +418,13 @@ static void mwait_idle(void)
*/
static void poll_idle(void)
{
- trace_power_start_rcuidle(POWER_CSTATE, 0, smp_processor_id());
trace_cpu_idle_rcuidle(0, smp_processor_id());
local_irq_enable();
while (!need_resched())
cpu_relax();
- trace_power_end_rcuidle(smp_processor_id());
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
}
-/*
- * mwait selection logic:
- *
- * It depends on the CPU. For AMD CPUs that support MWAIT this is
- * wrong. Family 0x10 and 0x11 CPUs will enter C1 on HLT. Powersavings
- * then depend on a clock divisor and current Pstate of the core. If
- * all cores of a processor are in halt state (C1) the processor can
- * enter the C1E (C1 enhanced) state. If mwait is used this will never
- * happen.
- *
- * idle=mwait overrides this decision and forces the usage of mwait.
- */
-
-#define MWAIT_INFO 0x05
-#define MWAIT_ECX_EXTENDED_INFO 0x01
-#define MWAIT_EDX_C1 0xf0
-
-int mwait_usable(const struct cpuinfo_x86 *c)
-{
- u32 eax, ebx, ecx, edx;
-
- /* Use mwait if idle=mwait boot option is given */
- if (boot_option_idle_override == IDLE_FORCE_MWAIT)
- return 1;
-
- /*
- * Any idle= boot option other than idle=mwait means that we must not
- * use mwait. Eg: idle=halt or idle=poll or idle=nomwait
- */
- if (boot_option_idle_override != IDLE_NO_OVERRIDE)
- return 0;
-
- if (c->cpuid_level < MWAIT_INFO)
- return 0;
-
- cpuid(MWAIT_INFO, &eax, &ebx, &ecx, &edx);
- /* Check, whether EDX has extended info about MWAIT */
- if (!(ecx & MWAIT_ECX_EXTENDED_INFO))
- return 1;
-
- /*
- * edx enumeratios MONITOR/MWAIT extensions. Check, whether
- * C1 supports MWAIT
- */
- return (edx & MWAIT_EDX_C1);
-}
-
bool amd_e400_c1e_detected;
EXPORT_SYMBOL(amd_e400_c1e_detected);
@@ -567,31 +489,24 @@ static void amd_e400_idle(void)
void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
{
#ifdef CONFIG_SMP
- if (pm_idle == poll_idle && smp_num_siblings > 1) {
+ if (x86_idle == poll_idle && smp_num_siblings > 1)
pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n");
- }
#endif
- if (pm_idle)
+ if (x86_idle)
return;
- if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) {
- /*
- * One CPU supports mwait => All CPUs supports mwait
- */
- pr_info("using mwait in idle threads\n");
- pm_idle = mwait_idle;
- } else if (cpu_has_amd_erratum(amd_erratum_400)) {
+ if (cpu_has_amd_erratum(amd_erratum_400)) {
/* E400: APIC timer interrupt does not wake up CPU from C1e */
pr_info("using AMD E400 aware idle routine\n");
- pm_idle = amd_e400_idle;
+ x86_idle = amd_e400_idle;
} else
- pm_idle = default_idle;
+ x86_idle = default_idle;
}
void __init init_amd_e400_c1e_mask(void)
{
/* If we're using amd_e400_idle, we need to allocate amd_e400_c1e_mask. */
- if (pm_idle == amd_e400_idle)
+ if (x86_idle == amd_e400_idle)
zalloc_cpumask_var(&amd_e400_c1e_mask, GFP_KERNEL);
}
@@ -602,11 +517,8 @@ static int __init idle_setup(char *str)
if (!strcmp(str, "poll")) {
pr_info("using polling idle threads\n");
- pm_idle = poll_idle;
+ x86_idle = poll_idle;
boot_option_idle_override = IDLE_POLL;
- } else if (!strcmp(str, "mwait")) {
- boot_option_idle_override = IDLE_FORCE_MWAIT;
- WARN_ONCE(1, "\"idle=mwait\" will be removed in 2012\n");
} else if (!strcmp(str, "halt")) {
/*
* When the boot option of idle=halt is added, halt is
@@ -615,7 +527,7 @@ static int __init idle_setup(char *str)
* To continue to load the CPU idle driver, don't touch
* the boot_option_idle_override.
*/
- pm_idle = default_idle;
+ x86_idle = default_idle;
boot_option_idle_override = IDLE_HALT;
} else if (!strcmp(str, "nomwait")) {
/*
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index ed0fe385289..a6ceaedc396 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1369,7 +1369,7 @@ static inline void mwait_play_dead(void)
void *mwait_ptr;
struct cpuinfo_x86 *c = __this_cpu_ptr(&cpu_info);
- if (!(this_cpu_has(X86_FEATURE_MWAIT) && mwait_usable(c)))
+ if (!this_cpu_has(X86_FEATURE_MWAIT))
return;
if (!this_cpu_has(X86_FEATURE_CLFLSH))
return;
diff --git a/arch/x86/platform/olpc/olpc-xo15-sci.c b/arch/x86/platform/olpc/olpc-xo15-sci.c
index 2fdca25905a..fef7d0ba7e3 100644
--- a/arch/x86/platform/olpc/olpc-xo15-sci.c
+++ b/arch/x86/platform/olpc/olpc-xo15-sci.c
@@ -195,7 +195,7 @@ err_sysfs:
return r;
}
-static int xo15_sci_remove(struct acpi_device *device, int type)
+static int xo15_sci_remove(struct acpi_device *device)
{
acpi_disable_gpe(NULL, xo15_sci_gpe);
acpi_remove_gpe_handler(NULL, xo15_sci_gpe, xo15_sci_gpe_handler);
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 8971a26d21a..94eac5c85cd 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -556,12 +556,9 @@ void __init xen_arch_setup(void)
COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE);
/* Set up idle, making sure it calls safe_halt() pvop */
-#ifdef CONFIG_X86_32
- boot_cpu_data.hlt_works_ok = 1;
-#endif
disable_cpuidle();
disable_cpufreq();
- WARN_ON(set_pm_idle_to_default());
+ WARN_ON(xen_set_default_idle());
fiddle_vdso();
#ifdef CONFIG_NUMA
numa_off = 1;