aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2011-10-31 14:08:10 +0100
committerArnd Bergmann <arnd@arndb.de>2011-10-31 14:08:10 +0100
commit08cab72f91c8b28ffabfd143119bccdd4a115ad7 (patch)
treeccd5583971caecd82bf2d1e62691bf6e0362d650 /kernel
parent86c1e5a74af11e4817ffa6d7748d9ac1353b5b53 (diff)
parentf37a53cc5d8a8fb199e41386d125d8c2ed9e54ef (diff)
Merge branch 'dt/gic' into next/dt
Conflicts: arch/arm/include/asm/localtimer.h arch/arm/mach-msm/board-msm8x60.c arch/arm/mach-omap2/board-generic.c
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Makefile1
-rw-r--r--kernel/cpu_pm.c233
-rw-r--r--kernel/events/core.c4
-rw-r--r--kernel/irq/chip.c64
-rw-r--r--kernel/irq/internals.h19
-rw-r--r--kernel/irq/irqdesc.c32
-rw-r--r--kernel/irq/irqdomain.c12
-rw-r--r--kernel/irq/manage.c218
-rw-r--r--kernel/irq/settings.h7
-rw-r--r--kernel/power/Kconfig4
10 files changed, 564 insertions, 30 deletions
diff --git a/kernel/Makefile b/kernel/Makefile
index eca595e2fd5..988cb3da703 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -101,6 +101,7 @@ obj-$(CONFIG_RING_BUFFER) += trace/
obj-$(CONFIG_TRACEPOINTS) += trace/
obj-$(CONFIG_SMP) += sched_cpupri.o
obj-$(CONFIG_IRQ_WORK) += irq_work.o
+obj-$(CONFIG_CPU_PM) += cpu_pm.o
obj-$(CONFIG_PERF_EVENTS) += events/
diff --git a/kernel/cpu_pm.c b/kernel/cpu_pm.c
new file mode 100644
index 00000000000..249152e1530
--- /dev/null
+++ b/kernel/cpu_pm.c
@@ -0,0 +1,233 @@
+/*
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * Author:
+ * Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/cpu_pm.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/spinlock.h>
+#include <linux/syscore_ops.h>
+
+static DEFINE_RWLOCK(cpu_pm_notifier_lock);
+static RAW_NOTIFIER_HEAD(cpu_pm_notifier_chain);
+
+static int cpu_pm_notify(enum cpu_pm_event event, int nr_to_call, int *nr_calls)
+{
+ int ret;
+
+ ret = __raw_notifier_call_chain(&cpu_pm_notifier_chain, event, NULL,
+ nr_to_call, nr_calls);
+
+ return notifier_to_errno(ret);
+}
+
+/**
+ * cpu_pm_register_notifier - register a driver with cpu_pm
+ * @nb: notifier block to register
+ *
+ * Add a driver to a list of drivers that are notified about
+ * CPU and CPU cluster low power entry and exit.
+ *
+ * This function may sleep, and has the same return conditions as
+ * raw_notifier_chain_register.
+ */
+int cpu_pm_register_notifier(struct notifier_block *nb)
+{
+ unsigned long flags;
+ int ret;
+
+ write_lock_irqsave(&cpu_pm_notifier_lock, flags);
+ ret = raw_notifier_chain_register(&cpu_pm_notifier_chain, nb);
+ write_unlock_irqrestore(&cpu_pm_notifier_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cpu_pm_register_notifier);
+
+/**
+ * cpu_pm_unregister_notifier - unregister a driver with cpu_pm
+ * @nb: notifier block to be unregistered
+ *
+ * Remove a driver from the CPU PM notifier list.
+ *
+ * This function may sleep, and has the same return conditions as
+ * raw_notifier_chain_unregister.
+ */
+int cpu_pm_unregister_notifier(struct notifier_block *nb)
+{
+ unsigned long flags;
+ int ret;
+
+ write_lock_irqsave(&cpu_pm_notifier_lock, flags);
+ ret = raw_notifier_chain_unregister(&cpu_pm_notifier_chain, nb);
+ write_unlock_irqrestore(&cpu_pm_notifier_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cpu_pm_unregister_notifier);
+
+/**
+ * cpm_pm_enter - CPU low power entry notifier
+ *
+ * Notifies listeners that a single CPU is entering a low power state that may
+ * cause some blocks in the same power domain as the cpu to reset.
+ *
+ * Must be called on the affected CPU with interrupts disabled. Platform is
+ * responsible for ensuring that cpu_pm_enter is not called twice on the same
+ * CPU before cpu_pm_exit is called. Notified drivers can include VFP
+ * co-processor, interrupt controller and it's PM extensions, local CPU
+ * timers context save/restore which shouldn't be interrupted. Hence it
+ * must be called with interrupts disabled.
+ *
+ * Return conditions are same as __raw_notifier_call_chain.
+ */
+int cpu_pm_enter(void)
+{
+ int nr_calls;
+ int ret = 0;
+
+ read_lock(&cpu_pm_notifier_lock);
+ ret = cpu_pm_notify(CPU_PM_ENTER, -1, &nr_calls);
+ if (ret)
+ /*
+ * Inform listeners (nr_calls - 1) about failure of CPU PM
+ * PM entry who are notified earlier to prepare for it.
+ */
+ cpu_pm_notify(CPU_PM_ENTER_FAILED, nr_calls - 1, NULL);
+ read_unlock(&cpu_pm_notifier_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cpu_pm_enter);
+
+/**
+ * cpm_pm_exit - CPU low power exit notifier
+ *
+ * Notifies listeners that a single CPU is exiting a low power state that may
+ * have caused some blocks in the same power domain as the cpu to reset.
+ *
+ * Notified drivers can include VFP co-processor, interrupt controller
+ * and it's PM extensions, local CPU timers context save/restore which
+ * shouldn't be interrupted. Hence it must be called with interrupts disabled.
+ *
+ * Return conditions are same as __raw_notifier_call_chain.
+ */
+int cpu_pm_exit(void)
+{
+ int ret;
+
+ read_lock(&cpu_pm_notifier_lock);
+ ret = cpu_pm_notify(CPU_PM_EXIT, -1, NULL);
+ read_unlock(&cpu_pm_notifier_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cpu_pm_exit);
+
+/**
+ * cpm_cluster_pm_enter - CPU cluster low power entry notifier
+ *
+ * Notifies listeners that all cpus in a power domain are entering a low power
+ * state that may cause some blocks in the same power domain to reset.
+ *
+ * Must be called after cpu_pm_enter has been called on all cpus in the power
+ * domain, and before cpu_pm_exit has been called on any cpu in the power
+ * domain. Notified drivers can include VFP co-processor, interrupt controller
+ * and it's PM extensions, local CPU timers context save/restore which
+ * shouldn't be interrupted. Hence it must be called with interrupts disabled.
+ *
+ * Must be called with interrupts disabled.
+ *
+ * Return conditions are same as __raw_notifier_call_chain.
+ */
+int cpu_cluster_pm_enter(void)
+{
+ int nr_calls;
+ int ret = 0;
+
+ read_lock(&cpu_pm_notifier_lock);
+ ret = cpu_pm_notify(CPU_CLUSTER_PM_ENTER, -1, &nr_calls);
+ if (ret)
+ /*
+ * Inform listeners (nr_calls - 1) about failure of CPU cluster
+ * PM entry who are notified earlier to prepare for it.
+ */
+ cpu_pm_notify(CPU_CLUSTER_PM_ENTER_FAILED, nr_calls - 1, NULL);
+ read_unlock(&cpu_pm_notifier_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cpu_cluster_pm_enter);
+
+/**
+ * cpm_cluster_pm_exit - CPU cluster low power exit notifier
+ *
+ * Notifies listeners that all cpus in a power domain are exiting form a
+ * low power state that may have caused some blocks in the same power domain
+ * to reset.
+ *
+ * Must be called after cpu_pm_exit has been called on all cpus in the power
+ * domain, and before cpu_pm_exit has been called on any cpu in the power
+ * domain. Notified drivers can include VFP co-processor, interrupt controller
+ * and it's PM extensions, local CPU timers context save/restore which
+ * shouldn't be interrupted. Hence it must be called with interrupts disabled.
+ *
+ * Return conditions are same as __raw_notifier_call_chain.
+ */
+int cpu_cluster_pm_exit(void)
+{
+ int ret;
+
+ read_lock(&cpu_pm_notifier_lock);
+ ret = cpu_pm_notify(CPU_CLUSTER_PM_EXIT, -1, NULL);
+ read_unlock(&cpu_pm_notifier_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cpu_cluster_pm_exit);
+
+#ifdef CONFIG_PM
+static int cpu_pm_suspend(void)
+{
+ int ret;
+
+ ret = cpu_pm_enter();
+ if (ret)
+ return ret;
+
+ ret = cpu_cluster_pm_enter();
+ return ret;
+}
+
+static void cpu_pm_resume(void)
+{
+ cpu_cluster_pm_exit();
+ cpu_pm_exit();
+}
+
+static struct syscore_ops cpu_pm_syscore_ops = {
+ .suspend = cpu_pm_suspend,
+ .resume = cpu_pm_resume,
+};
+
+static int cpu_pm_init(void)
+{
+ register_syscore_ops(&cpu_pm_syscore_ops);
+ return 0;
+}
+core_initcall(cpu_pm_init);
+#endif
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 0f857782d06..fbe38f2e8ed 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -5758,6 +5758,7 @@ struct pmu *perf_init_event(struct perf_event *event)
pmu = idr_find(&pmu_idr, event->attr.type);
rcu_read_unlock();
if (pmu) {
+ event->pmu = pmu;
ret = pmu->event_init(event);
if (ret)
pmu = ERR_PTR(ret);
@@ -5765,6 +5766,7 @@ struct pmu *perf_init_event(struct perf_event *event)
}
list_for_each_entry_rcu(pmu, &pmus, entry) {
+ event->pmu = pmu;
ret = pmu->event_init(event);
if (!ret)
goto unlock;
@@ -5891,8 +5893,6 @@ done:
return ERR_PTR(err);
}
- event->pmu = pmu;
-
if (!event->parent) {
if (event->attach_state & PERF_ATTACH_TASK)
jump_label_inc(&perf_sched_events);
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index dc5114b4c16..f7c543a801d 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -26,7 +26,7 @@
int irq_set_chip(unsigned int irq, struct irq_chip *chip)
{
unsigned long flags;
- struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
+ struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
if (!desc)
return -EINVAL;
@@ -54,7 +54,7 @@ EXPORT_SYMBOL(irq_set_chip);
int irq_set_irq_type(unsigned int irq, unsigned int type)
{
unsigned long flags;
- struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
+ struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
int ret = 0;
if (!desc)
@@ -78,7 +78,7 @@ EXPORT_SYMBOL(irq_set_irq_type);
int irq_set_handler_data(unsigned int irq, void *data)
{
unsigned long flags;
- struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
+ struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
if (!desc)
return -EINVAL;
@@ -98,7 +98,7 @@ EXPORT_SYMBOL(irq_set_handler_data);
int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
{
unsigned long flags;
- struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
+ struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
if (!desc)
return -EINVAL;
@@ -119,7 +119,7 @@ int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
int irq_set_chip_data(unsigned int irq, void *data)
{
unsigned long flags;
- struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
+ struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
if (!desc)
return -EINVAL;
@@ -204,6 +204,24 @@ void irq_disable(struct irq_desc *desc)
}
}
+void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu)
+{
+ if (desc->irq_data.chip->irq_enable)
+ desc->irq_data.chip->irq_enable(&desc->irq_data);
+ else
+ desc->irq_data.chip->irq_unmask(&desc->irq_data);
+ cpumask_set_cpu(cpu, desc->percpu_enabled);
+}
+
+void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu)
+{
+ if (desc->irq_data.chip->irq_disable)
+ desc->irq_data.chip->irq_disable(&desc->irq_data);
+ else
+ desc->irq_data.chip->irq_mask(&desc->irq_data);
+ cpumask_clear_cpu(cpu, desc->percpu_enabled);
+}
+
static inline void mask_ack_irq(struct irq_desc *desc)
{
if (desc->irq_data.chip->irq_mask_ack)
@@ -544,12 +562,44 @@ handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
chip->irq_eoi(&desc->irq_data);
}
+/**
+ * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids
+ * @irq: the interrupt number
+ * @desc: the interrupt description structure for this irq
+ *
+ * Per CPU interrupts on SMP machines without locking requirements. Same as
+ * handle_percpu_irq() above but with the following extras:
+ *
+ * action->percpu_dev_id is a pointer to percpu variables which
+ * contain the real device id for the cpu on which this handler is
+ * called
+ */
+void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct irqaction *action = desc->action;
+ void *dev_id = __this_cpu_ptr(action->percpu_dev_id);
+ irqreturn_t res;
+
+ kstat_incr_irqs_this_cpu(irq, desc);
+
+ if (chip->irq_ack)
+ chip->irq_ack(&desc->irq_data);
+
+ trace_irq_handler_entry(irq, action);
+ res = action->handler(irq, dev_id);
+ trace_irq_handler_exit(irq, action, res);
+
+ if (chip->irq_eoi)
+ chip->irq_eoi(&desc->irq_data);
+}
+
void
__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
const char *name)
{
unsigned long flags;
- struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
+ struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
if (!desc)
return;
@@ -593,7 +643,7 @@ irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
{
unsigned long flags;
- struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
+ struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
if (!desc)
return;
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index 6546431447d..a73dd6c7372 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -71,6 +71,8 @@ extern int irq_startup(struct irq_desc *desc);
extern void irq_shutdown(struct irq_desc *desc);
extern void irq_enable(struct irq_desc *desc);
extern void irq_disable(struct irq_desc *desc);
+extern void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu);
+extern void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu);
extern void mask_irq(struct irq_desc *desc);
extern void unmask_irq(struct irq_desc *desc);
@@ -114,14 +116,21 @@ static inline void chip_bus_sync_unlock(struct irq_desc *desc)
desc->irq_data.chip->irq_bus_sync_unlock(&desc->irq_data);
}
+#define _IRQ_DESC_CHECK (1 << 0)
+#define _IRQ_DESC_PERCPU (1 << 1)
+
+#define IRQ_GET_DESC_CHECK_GLOBAL (_IRQ_DESC_CHECK)
+#define IRQ_GET_DESC_CHECK_PERCPU (_IRQ_DESC_CHECK | _IRQ_DESC_PERCPU)
+
struct irq_desc *
-__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus);
+__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus,
+ unsigned int check);
void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus);
static inline struct irq_desc *
-irq_get_desc_buslock(unsigned int irq, unsigned long *flags)
+irq_get_desc_buslock(unsigned int irq, unsigned long *flags, unsigned int check)
{
- return __irq_get_desc_lock(irq, flags, true);
+ return __irq_get_desc_lock(irq, flags, true, check);
}
static inline void
@@ -131,9 +140,9 @@ irq_put_desc_busunlock(struct irq_desc *desc, unsigned long flags)
}
static inline struct irq_desc *
-irq_get_desc_lock(unsigned int irq, unsigned long *flags)
+irq_get_desc_lock(unsigned int irq, unsigned long *flags, unsigned int check)
{
- return __irq_get_desc_lock(irq, flags, false);
+ return __irq_get_desc_lock(irq, flags, false, check);
}
static inline void
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 039b889ea05..1550e8447a1 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -424,11 +424,22 @@ unsigned int irq_get_next_irq(unsigned int offset)
}
struct irq_desc *
-__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus)
+__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus,
+ unsigned int check)
{
struct irq_desc *desc = irq_to_desc(irq);
if (desc) {
+ if (check & _IRQ_DESC_CHECK) {
+ if ((check & _IRQ_DESC_PERCPU) &&
+ !irq_settings_is_per_cpu_devid(desc))
+ return NULL;
+
+ if (!(check & _IRQ_DESC_PERCPU) &&
+ irq_settings_is_per_cpu_devid(desc))
+ return NULL;
+ }
+
if (bus)
chip_bus_lock(desc);
raw_spin_lock_irqsave(&desc->lock, *flags);
@@ -443,6 +454,25 @@ void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus)
chip_bus_sync_unlock(desc);
}
+int irq_set_percpu_devid(unsigned int irq)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ if (!desc)
+ return -EINVAL;
+
+ if (desc->percpu_enabled)
+ return -EINVAL;
+
+ desc->percpu_enabled = kzalloc(sizeof(*desc->percpu_enabled), GFP_KERNEL);
+
+ if (!desc->percpu_enabled)
+ return -ENOMEM;
+
+ irq_set_percpu_devid_flags(irq);
+ return 0;
+}
+
/**
* dynamic_irq_cleanup - cleanup a dynamically allocated irq
* @irq: irq number to initialize
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index b57a3776de4..200ce832c58 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -20,15 +20,15 @@ static DEFINE_MUTEX(irq_domain_mutex);
void irq_domain_add(struct irq_domain *domain)
{
struct irq_data *d;
- int hwirq;
+ int hwirq, irq;
/*
* This assumes that the irq_domain owner has already allocated
* the irq_descs. This block will be removed when support for dynamic
* allocation of irq_descs is added to irq_domain.
*/
- for (hwirq = 0; hwirq < domain->nr_irq; hwirq++) {
- d = irq_get_irq_data(irq_domain_to_irq(domain, hwirq));
+ irq_domain_for_each_irq(domain, hwirq, irq) {
+ d = irq_get_irq_data(irq);
if (!d) {
WARN(1, "error: assigning domain to non existant irq_desc");
return;
@@ -54,15 +54,15 @@ void irq_domain_add(struct irq_domain *domain)
void irq_domain_del(struct irq_domain *domain)
{
struct irq_data *d;
- int hwirq;
+ int hwirq, irq;
mutex_lock(&irq_domain_mutex);
list_del(&domain->list);
mutex_unlock(&irq_domain_mutex);
/* Clear the irq_domain assignments */
- for (hwirq = 0; hwirq < domain->nr_irq; hwirq++) {
- d = irq_get_irq_data(irq_domain_to_irq(domain, hwirq));
+ irq_domain_for_each_irq(domain, hwirq, irq) {
+ d = irq_get_irq_data(irq);
d->domain = NULL;
}
}
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 9b956fa2030..67ce837ae52 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -195,7 +195,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
{
unsigned long flags;
- struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
+ struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
if (!desc)
return -EINVAL;
@@ -356,7 +356,7 @@ void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend)
static int __disable_irq_nosync(unsigned int irq)
{
unsigned long flags;
- struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
+ struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
if (!desc)
return -EINVAL;
@@ -448,7 +448,7 @@ void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
void enable_irq(unsigned int irq)
{
unsigned long flags;
- struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
+ struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
if (!desc)
return;
@@ -467,6 +467,9 @@ static int set_irq_wake_real(unsigned int irq, unsigned int on)
struct irq_desc *desc = irq_to_desc(irq);
int ret = -ENXIO;
+ if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE)
+ return 0;
+
if (desc->irq_data.chip->irq_set_wake)
ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
@@ -488,7 +491,7 @@ static int set_irq_wake_real(unsigned int irq, unsigned int on)
int irq_set_irq_wake(unsigned int irq, unsigned int on)
{
unsigned long flags;
- struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
+ struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
int ret = 0;
if (!desc)
@@ -529,7 +532,7 @@ EXPORT_SYMBOL(irq_set_irq_wake);
int can_request_irq(unsigned int irq, unsigned long irqflags)
{
unsigned long flags;
- struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
+ struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
int canrequest = 0;
if (!desc)
@@ -1118,6 +1121,8 @@ int setup_irq(unsigned int irq, struct irqaction *act)
int retval;
struct irq_desc *desc = irq_to_desc(irq);
+ if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
+ return -EINVAL;
chip_bus_lock(desc);
retval = __setup_irq(irq, desc, act);
chip_bus_sync_unlock(desc);
@@ -1126,7 +1131,7 @@ int setup_irq(unsigned int irq, struct irqaction *act)
}
EXPORT_SYMBOL_GPL(setup_irq);
- /*
+/*
* Internal function to unregister an irqaction - used to free
* regular and special interrupts that are part of the architecture.
*/
@@ -1224,7 +1229,10 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
*/
void remove_irq(unsigned int irq, struct irqaction *act)
{
- __free_irq(irq, act->dev_id);
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc)))
+ __free_irq(irq, act->dev_id);
}
EXPORT_SYMBOL_GPL(remove_irq);
@@ -1246,7 +1254,7 @@ void free_irq(unsigned int irq, void *dev_id)
{
struct irq_desc *desc = irq_to_desc(irq);
- if (!desc)
+ if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
return;
#ifdef CONFIG_SMP
@@ -1324,7 +1332,8 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler,
if (!desc)
return -EINVAL;
- if (!irq_settings_can_request(desc))
+ if (!irq_settings_can_request(desc) ||
+ WARN_ON(irq_settings_is_per_cpu_devid(desc)))
return -EINVAL;
if (!handler) {
@@ -1409,3 +1418,194 @@ int request_any_context_irq(unsigned int irq, irq_handler_t handler,
return !ret ? IRQC_IS_HARDIRQ : ret;
}
EXPORT_SYMBOL_GPL(request_any_context_irq);
+
+void enable_percpu_irq(unsigned int irq, unsigned int type)
+{
+ unsigned int cpu = smp_processor_id();
+ unsigned long flags;
+ struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
+
+ if (!desc)
+ return;
+
+ type &= IRQ_TYPE_SENSE_MASK;
+ if (type != IRQ_TYPE_NONE) {
+ int ret;
+
+ ret = __irq_set_trigger(desc, irq, type);
+
+ if (ret) {
+ WARN(1, "failed to set type for IRQ%d\n", irq);
+ goto out;
+ }
+ }
+
+ irq_percpu_enable(desc, cpu);
+out:
+ irq_put_desc_unlock(desc, flags);
+}
+
+void disable_percpu_irq(unsigned int irq)
+{
+ unsigned int cpu = smp_processor_id();
+ unsigned long flags;
+ struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
+
+ if (!desc)
+ return;
+
+ irq_percpu_disable(desc, cpu);
+ irq_put_desc_unlock(desc, flags);
+}
+
+/*
+ * Internal function to unregister a percpu irqaction.
+ */
+static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+ struct irqaction *action;
+ unsigned long flags;
+
+ WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
+
+ if (!desc)
+ return NULL;
+
+ raw_spin_lock_irqsave(&desc->lock, flags);
+
+ action = desc->action;
+ if (!action || action->percpu_dev_id != dev_id) {
+ WARN(1, "Trying to free already-free IRQ %d\n", irq);
+ goto bad;
+ }
+
+ if (!cpumask_empty(desc->percpu_enabled)) {
+ WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
+ irq, cpumask_first(desc->percpu_enabled));
+ goto bad;
+ }
+
+ /* Found it - now remove it from the list of entries: */
+ desc->action = NULL;
+
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+
+ unregister_handler_proc(irq, action);
+
+ module_put(desc->owner);
+ return action;
+
+bad:
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+ return NULL;
+}
+
+/**
+ * remove_percpu_irq - free a per-cpu interrupt
+ * @irq: Interrupt line to free
+ * @act: irqaction for the interrupt
+ *
+ * Used to remove interrupts statically setup by the early boot process.
+ */
+void remove_percpu_irq(unsigned int irq, struct irqaction *act)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ if (desc && irq_settings_is_per_cpu_devid(desc))
+ __free_percpu_irq(irq, act->percpu_dev_id);
+}
+
+/**
+ * free_percpu_irq - free an interrupt allocated with request_percpu_irq
+ * @irq: Interrupt line to free
+ * @dev_id: Device identity to free
+ *
+ * Remove a percpu interrupt handler. The handler is removed, but
+ * the interrupt line is not disabled. This must be done on each
+ * CPU before calling this function. The function does not return
+ * until any executing interrupts for this IRQ have completed.
+ *
+ * This function must not be called from interrupt context.
+ */
+void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ if (!desc || !irq_settings_is_per_cpu_devid(desc))
+ return;
+
+ chip_bus_lock(desc);
+ kfree(__free_percpu_irq(irq, dev_id));
+ chip_bus_sync_unlock(desc);
+}
+
+/**
+ * setup_percpu_irq - setup a per-cpu interrupt
+ * @irq: Interrupt line to setup
+ * @act: irqaction for the interrupt
+ *
+ * Used to statically setup per-cpu interrupts in the early boot process.
+ */
+int setup_percpu_irq(unsigned int irq, struct irqaction *act)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+ int retval;
+
+ if (!desc || !irq_settings_is_per_cpu_devid(desc))
+ return -EINVAL;
+ chip_bus_lock(desc);
+ retval = __setup_irq(irq, desc, act);
+ chip_bus_sync_unlock(desc);
+
+ return retval;
+}
+
+/**
+ * request_percpu_irq - allocate a percpu interrupt line
+ * @irq: Interrupt line to allocate
+ * @handler: Function to be called when the IRQ occurs.
+ * @devname: An ascii name for the claiming device
+ * @dev_id: A percpu cookie passed back to the handler function
+ *
+ * This call allocates interrupt resources, but doesn't
+ * automatically enable the interrupt. It has to be done on each
+ * CPU using enable_percpu_irq().
+ *
+ * Dev_id must be globally unique. It is a per-cpu variable, and
+ * the handler gets called with the interrupted CPU's instance of
+ * that variable.
+ */
+int request_percpu_irq(unsigned int irq, irq_handler_t handler,
+ const char *devname, void __percpu *dev_id)
+{
+ struct irqaction *action;
+ struct irq_desc *desc;
+ int retval;
+
+ if (!dev_id)
+ return -EINVAL;
+
+ desc = irq_to_desc(irq);
+ if (!desc || !irq_settings_can_request(desc) ||
+ !irq_settings_is_per_cpu_devid(desc))
+ return -EINVAL;
+
+ action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
+ if (!action)
+ return -ENOMEM;
+
+ action->handler = handler;
+ action->flags = IRQF_PERCPU;
+ action->name = devname;
+ action->percpu_dev_id = dev_id;
+
+ chip_bus_lock(desc);
+ retval = __setup_irq(irq, desc, action);
+ chip_bus_sync_unlock(desc);
+
+ if (retval)
+ kfree(action);
+
+ return retval;
+}
diff --git a/kernel/irq/settings.h b/kernel/irq/settings.h
index f1667833d44..1162f1030f1 100644
--- a/kernel/irq/settings.h
+++ b/kernel/irq/settings.h
@@ -13,6 +13,7 @@ enum {
_IRQ_MOVE_PCNTXT = IRQ_MOVE_PCNTXT,
_IRQ_NO_BALANCING = IRQ_NO_BALANCING,
_IRQ_NESTED_THREAD = IRQ_NESTED_THREAD,
+ _IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID,
_IRQF_MODIFY_MASK = IRQF_MODIFY_MASK,
};
@@ -24,6 +25,7 @@ enum {
#define IRQ_NOTHREAD GOT_YOU_MORON
#define IRQ_NOAUTOEN GOT_YOU_MORON
#define IRQ_NESTED_THREAD GOT_YOU_MORON
+#define IRQ_PER_CPU_DEVID GOT_YOU_MORON
#undef IRQF_MODIFY_MASK
#define IRQF_MODIFY_MASK GOT_YOU_MORON
@@ -39,6 +41,11 @@ static inline bool irq_settings_is_per_cpu(struct irq_desc *desc)
return desc->status_use_accessors & _IRQ_PER_CPU;
}
+static inline bool irq_settings_is_per_cpu_devid(struct irq_desc *desc)
+{
+ return desc->status_use_accessors & _IRQ_PER_CPU_DEVID;
+}
+
static inline void irq_settings_set_per_cpu(struct irq_desc *desc)
{
desc->status_use_accessors |= _IRQ_PER_CPU;
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 3744c594b19..80a85971cf6 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -235,3 +235,7 @@ config PM_GENERIC_DOMAINS
config PM_GENERIC_DOMAINS_RUNTIME
def_bool y
depends on PM_RUNTIME && PM_GENERIC_DOMAINS
+
+config CPU_PM
+ bool
+ depends on SUSPEND || CPU_IDLE