aboutsummaryrefslogtreecommitdiff
path: root/drivers/clocksource/exynos_mct.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/clocksource/exynos_mct.c')
-rw-r--r--drivers/clocksource/exynos_mct.c57
1 files changed, 34 insertions, 23 deletions
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index 48f76bc05da0..ddd03f8037a7 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -98,8 +98,8 @@ static void exynos4_mct_write(unsigned int value, unsigned long offset)
__raw_writel(value, reg_base + offset);
if (likely(offset >= EXYNOS4_MCT_L_BASE(0))) {
- stat_addr = (offset & ~EXYNOS4_MCT_L_MASK) + MCT_L_WSTAT_OFFSET;
- switch (offset & EXYNOS4_MCT_L_MASK) {
+ stat_addr = (offset & EXYNOS4_MCT_L_MASK) + MCT_L_WSTAT_OFFSET;
+ switch (offset & ~EXYNOS4_MCT_L_MASK) {
case MCT_L_TCON_OFFSET:
mask = 1 << 3; /* L_TCON write status */
break;
@@ -418,23 +418,21 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt)
evt->set_mode = exynos4_tick_set_mode;
evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
evt->rating = 450;
- clockevents_config_and_register(evt, clk_rate / (TICK_BASE_CNT + 1),
- 0xf, 0x7fffffff);
exynos4_mct_write(TICK_BASE_CNT, mevt->base + MCT_L_TCNTB_OFFSET);
if (mct_int_type == MCT_INT_SPI) {
- evt->irq = mct_irqs[MCT_L0_IRQ + cpu];
- if (request_irq(evt->irq, exynos4_mct_tick_isr,
- IRQF_TIMER | IRQF_NOBALANCING,
- evt->name, mevt)) {
- pr_err("exynos-mct: cannot register IRQ %d\n",
- evt->irq);
+
+ if (evt->irq == -1)
return -EIO;
- }
+
+ irq_force_affinity(evt->irq, cpumask_of(cpu));
+ enable_irq(evt->irq);
} else {
enable_percpu_irq(mct_irqs[MCT_L0_IRQ], 0);
}
+ clockevents_config_and_register(evt, clk_rate / (TICK_BASE_CNT + 1),
+ 0xf, 0x7fffffff);
return 0;
}
@@ -442,17 +440,18 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt)
static void exynos4_local_timer_stop(struct clock_event_device *evt)
{
evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
- if (mct_int_type == MCT_INT_SPI)
- free_irq(evt->irq, this_cpu_ptr(&percpu_mct_tick));
- else
+ if (mct_int_type == MCT_INT_SPI) {
+ if (evt->irq != -1)
+ disable_irq_nosync(evt->irq);
+ } else {
disable_percpu_irq(mct_irqs[MCT_L0_IRQ]);
+ }
}
static int exynos4_mct_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
struct mct_clock_event_device *mevt;
- unsigned int cpu;
/*
* Grab cpu pointer in each case to avoid spurious
@@ -463,12 +462,6 @@ static int exynos4_mct_cpu_notify(struct notifier_block *self,
mevt = this_cpu_ptr(&percpu_mct_tick);
exynos4_local_timer_setup(&mevt->evt);
break;
- case CPU_ONLINE:
- cpu = (unsigned long)hcpu;
- if (mct_int_type == MCT_INT_SPI)
- irq_set_affinity(mct_irqs[MCT_L0_IRQ + cpu],
- cpumask_of(cpu));
- break;
case CPU_DYING:
mevt = this_cpu_ptr(&percpu_mct_tick);
exynos4_local_timer_stop(&mevt->evt);
@@ -484,7 +477,7 @@ static struct notifier_block exynos4_mct_cpu_nb = {
static void __init exynos4_timer_resources(struct device_node *np, void __iomem *base)
{
- int err;
+ int err, cpu;
struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick);
struct clk *mct_clk, *tick_clk;
@@ -511,7 +504,25 @@ static void __init exynos4_timer_resources(struct device_node *np, void __iomem
WARN(err, "MCT: can't request IRQ %d (%d)\n",
mct_irqs[MCT_L0_IRQ], err);
} else {
- irq_set_affinity(mct_irqs[MCT_L0_IRQ], cpumask_of(0));
+ for_each_possible_cpu(cpu) {
+ int mct_irq = mct_irqs[MCT_L0_IRQ + cpu];
+ struct mct_clock_event_device *pcpu_mevt =
+ per_cpu_ptr(&percpu_mct_tick, cpu);
+
+ pcpu_mevt->evt.irq = -1;
+
+ irq_set_status_flags(mct_irq, IRQ_NOAUTOEN);
+ if (request_irq(mct_irq,
+ exynos4_mct_tick_isr,
+ IRQF_TIMER | IRQF_NOBALANCING,
+ pcpu_mevt->name, pcpu_mevt)) {
+ pr_err("exynos-mct: cannot register IRQ (cpu%d)\n",
+ cpu);
+
+ continue;
+ }
+ pcpu_mevt->evt.irq = mct_irq;
+ }
}
err = register_cpu_notifier(&exynos4_mct_cpu_nb);