From c13f3d378f77ce3176628ade452b0e461242faf3 Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Mon, 15 Feb 2010 11:33:04 +0900 Subject: x86/gart: Unexport gart_iommu_aperture I wrongly exported gart_iommu_aperture in the commit 42590a75019a50012f25a962246498dead428433. It's not necessary so let's unexport it. Signed-off-by: FUJITA Tomonori Cc: Joerg Roedel LKML-Reference: <20100215113241P.fujita.tomonori@lab.ntt.co.jp> Signed-off-by: Ingo Molnar --- arch/x86/kernel/aperture_64.c | 1 - 1 file changed, 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c index f147a95fd84..3704997e8b2 100644 --- a/arch/x86/kernel/aperture_64.c +++ b/arch/x86/kernel/aperture_64.c @@ -31,7 +31,6 @@ #include int gart_iommu_aperture; -EXPORT_SYMBOL_GPL(gart_iommu_aperture); int gart_iommu_aperture_disabled __initdata; int gart_iommu_aperture_allowed __initdata; -- cgit v1.2.3 From 281ff33b7c1b1ba2a5f9b03425e5f692a94913fa Mon Sep 17 00:00:00 2001 From: Suresh Siddha Date: Thu, 18 Feb 2010 11:51:40 -0800 Subject: x86_64, cpa: Don't work hard in preserving kernel 2M mappings when using 4K already We currently enforce the !RW mapping for the kernel mapping that maps holes between different text, rodata and data sections. However, kernel identity mappings will have different RWX permissions to the pages mapping to text and to the pages padding (which are freed) the text, rodata sections. Hence kernel identity mappings will be broken to smaller pages. For 64-bit, kernel text and kernel identity mappings are different, so we can enable protection checks that come with CONFIG_DEBUG_RODATA, as well as retain 2MB large page mappings for kernel text. Konrad reported a boot failure with the Linux Xen paravirt guest because of this. In this paravirt guest case, the kernel text mapping and the kernel identity mapping share the same page-table pages. Thus forcing the !RW mapping for some of the kernel mappings also cause the kernel identity mappings to be read-only resulting in the boot failure. Linux Xen paravirt guest also uses 4k mappings and don't use 2M mapping. Fix this issue and retain large page performance advantage for native kernels by not working hard and not enforcing !RW for the kernel text mapping, if the current mapping is already using small page mapping. Reported-by: Konrad Rzeszutek Wilk Signed-off-by: Suresh Siddha LKML-Reference: <1266522700.2909.34.camel@sbs-t61.sc.intel.com> Tested-by: Konrad Rzeszutek Wilk Cc: stable@kernel.org [2.6.32, 2.6.33] Signed-off-by: H. Peter Anvin --- arch/x86/mm/pageattr.c | 25 +++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 1d4eb93d333..cf07c26d9a4 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -291,8 +291,29 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address, */ if (kernel_set_to_readonly && within(address, (unsigned long)_text, - (unsigned long)__end_rodata_hpage_align)) - pgprot_val(forbidden) |= _PAGE_RW; + (unsigned long)__end_rodata_hpage_align)) { + unsigned int level; + + /* + * Don't enforce the !RW mapping for the kernel text mapping, + * if the current mapping is already using small page mapping. + * No need to work hard to preserve large page mappings in this + * case. + * + * This also fixes the Linux Xen paravirt guest boot failure + * (because of unexpected read-only mappings for kernel identity + * mappings). In this paravirt guest case, the kernel text + * mapping and the kernel identity mapping share the same + * page-table pages. Thus we can't really use different + * protections for the kernel text and identity mappings. Also, + * these shared mappings are made of small page mappings. + * Thus this don't enforce !RW mapping for small page kernel + * text mapping logic will help Linux Xen parvirt guest boot + * aswell. + */ + if (lookup_address(address, &level) && (level != PG_LEVEL_4K)) + pgprot_val(forbidden) |= _PAGE_RW; + } #endif prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden)); -- cgit v1.2.3 From 3d083407a16698de86b42aee0da2ffb280b5cb7e Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Sat, 27 Feb 2010 17:24:15 +0100 Subject: x86/hw-breakpoints: Remove the name field Remove the name field from the arch_hw_breakpoint. We never deal with target symbols in the arch level, neither do we need to ever store it. It's a legacy for the previous version of the x86 breakpoint backend. Let's remove it. Signed-off-by: Frederic Weisbecker Cc: K.Prasad Cc: Linus Torvalds --- arch/x86/include/asm/hw_breakpoint.h | 1 - arch/x86/kernel/hw_breakpoint.c | 7 ------- 2 files changed, 8 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/hw_breakpoint.h b/arch/x86/include/asm/hw_breakpoint.h index 0675a7c4c20..2a1bd8f4f23 100644 --- a/arch/x86/include/asm/hw_breakpoint.h +++ b/arch/x86/include/asm/hw_breakpoint.h @@ -10,7 +10,6 @@ * (display/resolving) */ struct arch_hw_breakpoint { - char *name; /* Contains name of the symbol to set bkpt */ unsigned long address; u8 len; u8 type; diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c index dca2802c666..41e08dff016 100644 --- a/arch/x86/kernel/hw_breakpoint.c +++ b/arch/x86/kernel/hw_breakpoint.c @@ -343,13 +343,6 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp, return ret; } - /* - * For kernel-addresses, either the address or symbol name can be - * specified. - */ - if (info->name) - info->address = (unsigned long) - kallsyms_lookup_name(info->name); /* * Check that the low-order bits of the address are appropriate * for the alignment implied by len. -- cgit v1.2.3 From 1e259e0a9982078896f3404240096cbea01daca4 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Sun, 28 Feb 2010 20:51:15 +0100 Subject: hw-breakpoints: Remove stub unthrottle callback We support event unthrottling in breakpoint events. It means that if we have more than sysctl_perf_event_sample_rate/HZ, perf will throttle, ignoring subsequent events until the next tick. So if ptrace exceeds this max rate, it will omit events, which breaks the ptrace determinism that is supposed to report every triggered breakpoints. This is likely to happen if we set sysctl_perf_event_sample_rate to 1. This patch removes support for unthrottling in breakpoint events to break throttling and restore ptrace determinism. Signed-off-by: Frederic Weisbecker Cc: 2.6.33.x Cc: Peter Zijlstra Cc: K.Prasad Cc: Paul Mackerras --- arch/x86/kernel/hw_breakpoint.c | 5 ----- 1 file changed, 5 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c index bb6006e3e29..1e8ceadc0d6 100644 --- a/arch/x86/kernel/hw_breakpoint.c +++ b/arch/x86/kernel/hw_breakpoint.c @@ -531,8 +531,3 @@ void hw_breakpoint_pmu_read(struct perf_event *bp) { /* TODO */ } - -void hw_breakpoint_pmu_unthrottle(struct perf_event *bp) -{ - /* TODO */ -} -- cgit v1.2.3 From 1d6040f17d12a65b9f7ab4cb9fd6d721206b79ec Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Thu, 25 Feb 2010 19:40:46 +0100 Subject: perf, x86: make IBS macros available in perf_event.h This patch moves code from oprofile to perf_event.h to make it also available for usage by perf. Signed-off-by: Robert Richter --- arch/x86/include/asm/perf_event.h | 10 ++++++++++ arch/x86/oprofile/op_model_amd.c | 11 ----------- 2 files changed, 10 insertions(+), 11 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index befd172c82a..4933ccde96c 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -117,6 +117,16 @@ union cpuid10_edx { */ #define X86_PMC_IDX_FIXED_BTS (X86_PMC_IDX_FIXED + 16) +/* IbsFetchCtl bits/masks */ +#define IBS_FETCH_RAND_EN (1ULL<<57) +#define IBS_FETCH_VAL (1ULL<<49) +#define IBS_FETCH_ENABLE (1ULL<<48) +#define IBS_FETCH_CNT_MASK 0xFFFF0000ULL + +/* IbsOpCtl bits */ +#define IBS_OP_CNT_CTL (1ULL<<19) +#define IBS_OP_VAL (1ULL<<18) +#define IBS_OP_ENABLE (1ULL<<17) #ifdef CONFIG_PERF_EVENTS extern void init_hw_perf_events(void); diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c index 6a58256dce9..c6717491730 100644 --- a/arch/x86/oprofile/op_model_amd.c +++ b/arch/x86/oprofile/op_model_amd.c @@ -46,17 +46,6 @@ static unsigned long reset_value[NUM_VIRT_COUNTERS]; -/* IbsFetchCtl bits/masks */ -#define IBS_FETCH_RAND_EN (1ULL<<57) -#define IBS_FETCH_VAL (1ULL<<49) -#define IBS_FETCH_ENABLE (1ULL<<48) -#define IBS_FETCH_CNT_MASK 0xFFFF0000ULL - -/* IbsOpCtl bits */ -#define IBS_OP_CNT_CTL (1ULL<<19) -#define IBS_OP_VAL (1ULL<<18) -#define IBS_OP_ENABLE (1ULL<<17) - #define IBS_FETCH_SIZE 6 #define IBS_OP_SIZE 12 -- cgit v1.2.3 From a163b1099dc7016704043c7fc572ae42519f08f7 Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Thu, 25 Feb 2010 19:43:07 +0100 Subject: perf, x86: add some IBS macros to perf_event.h Signed-off-by: Robert Richter --- arch/x86/include/asm/perf_event.h | 4 +++- arch/x86/oprofile/op_model_amd.c | 6 +++--- 2 files changed, 6 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index 4933ccde96c..c7f60e1297a 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -121,12 +121,14 @@ union cpuid10_edx { #define IBS_FETCH_RAND_EN (1ULL<<57) #define IBS_FETCH_VAL (1ULL<<49) #define IBS_FETCH_ENABLE (1ULL<<48) -#define IBS_FETCH_CNT_MASK 0xFFFF0000ULL +#define IBS_FETCH_CNT 0xFFFF0000ULL +#define IBS_FETCH_MAX_CNT 0x0000FFFFULL /* IbsOpCtl bits */ #define IBS_OP_CNT_CTL (1ULL<<19) #define IBS_OP_VAL (1ULL<<18) #define IBS_OP_ENABLE (1ULL<<17) +#define IBS_OP_MAX_CNT 0x0000FFFFULL #ifdef CONFIG_PERF_EVENTS extern void init_hw_perf_events(void); diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c index c6717491730..8ddb9fa9c1b 100644 --- a/arch/x86/oprofile/op_model_amd.c +++ b/arch/x86/oprofile/op_model_amd.c @@ -279,7 +279,7 @@ op_amd_handle_ibs(struct pt_regs * const regs, oprofile_write_commit(&entry); /* reenable the IRQ */ - ctl &= ~(IBS_FETCH_VAL | IBS_FETCH_CNT_MASK); + ctl &= ~(IBS_FETCH_VAL | IBS_FETCH_CNT); ctl |= IBS_FETCH_ENABLE; wrmsrl(MSR_AMD64_IBSFETCHCTL, ctl); } @@ -319,7 +319,7 @@ static inline void op_amd_start_ibs(void) return; if (ibs_config.fetch_enabled) { - val = (ibs_config.max_cnt_fetch >> 4) & 0xFFFF; + val = (ibs_config.max_cnt_fetch >> 4) & IBS_FETCH_MAX_CNT; val |= ibs_config.rand_en ? IBS_FETCH_RAND_EN : 0; val |= IBS_FETCH_ENABLE; wrmsrl(MSR_AMD64_IBSFETCHCTL, val); @@ -341,7 +341,7 @@ static inline void op_amd_start_ibs(void) * avoid underflows. */ ibs_op_ctl = min(ibs_op_ctl + IBS_RANDOM_MAXCNT_OFFSET, - 0xFFFFULL); + IBS_OP_MAX_CNT); } if (ibs_caps & IBS_CAPS_OPCNT && ibs_config.dispatched_ops) ibs_op_ctl |= IBS_OP_CNT_CTL; -- cgit v1.2.3 From bb1165d6882f423f90fc7007a88c6c993b7c2ac4 Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Mon, 1 Mar 2010 14:21:23 +0100 Subject: perf, x86: rename macro in ARCH_PERFMON_EVENTSEL_ENABLE For consistency reasons this patch renames ARCH_PERFMON_EVENTSEL0_ENABLE to ARCH_PERFMON_EVENTSEL_ENABLE. The following is performed: $ sed -i -e s/ARCH_PERFMON_EVENTSEL0_ENABLE/ARCH_PERFMON_EVENTSEL_ENABLE/g \ arch/x86/include/asm/perf_event.h arch/x86/kernel/cpu/perf_event.c \ arch/x86/kernel/cpu/perf_event_p6.c \ arch/x86/kernel/cpu/perfctr-watchdog.c \ arch/x86/oprofile/op_model_amd.c arch/x86/oprofile/op_model_ppro.c Signed-off-by: Robert Richter --- arch/x86/include/asm/perf_event.h | 2 +- arch/x86/kernel/cpu/perf_event.c | 8 ++++---- arch/x86/kernel/cpu/perf_event_p6.c | 8 ++++---- arch/x86/kernel/cpu/perfctr-watchdog.c | 2 +- arch/x86/oprofile/op_model_amd.c | 6 +++--- arch/x86/oprofile/op_model_ppro.c | 6 +++--- 6 files changed, 16 insertions(+), 16 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index c7f60e1297a..80e693684f1 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -18,7 +18,7 @@ #define MSR_ARCH_PERFMON_EVENTSEL0 0x186 #define MSR_ARCH_PERFMON_EVENTSEL1 0x187 -#define ARCH_PERFMON_EVENTSEL0_ENABLE (1 << 22) +#define ARCH_PERFMON_EVENTSEL_ENABLE (1 << 22) #define ARCH_PERFMON_EVENTSEL_ANY (1 << 21) #define ARCH_PERFMON_EVENTSEL_INT (1 << 20) #define ARCH_PERFMON_EVENTSEL_OS (1 << 17) diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 641ccb9dddb..6531b4bdb22 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -553,9 +553,9 @@ static void x86_pmu_disable_all(void) if (!test_bit(idx, cpuc->active_mask)) continue; rdmsrl(x86_pmu.eventsel + idx, val); - if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE)) + if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE)) continue; - val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; + val &= ~ARCH_PERFMON_EVENTSEL_ENABLE; wrmsrl(x86_pmu.eventsel + idx, val); } } @@ -590,7 +590,7 @@ static void x86_pmu_enable_all(void) continue; val = event->hw.config; - val |= ARCH_PERFMON_EVENTSEL0_ENABLE; + val |= ARCH_PERFMON_EVENTSEL_ENABLE; wrmsrl(x86_pmu.eventsel + idx, val); } } @@ -853,7 +853,7 @@ void hw_perf_enable(void) static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, int idx) { (void)checking_wrmsrl(hwc->config_base + idx, - hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE); + hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE); } static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx) diff --git a/arch/x86/kernel/cpu/perf_event_p6.c b/arch/x86/kernel/cpu/perf_event_p6.c index 1ca5ba078af..a4e67b99d91 100644 --- a/arch/x86/kernel/cpu/perf_event_p6.c +++ b/arch/x86/kernel/cpu/perf_event_p6.c @@ -62,7 +62,7 @@ static void p6_pmu_disable_all(void) /* p6 only has one enable register */ rdmsrl(MSR_P6_EVNTSEL0, val); - val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; + val &= ~ARCH_PERFMON_EVENTSEL_ENABLE; wrmsrl(MSR_P6_EVNTSEL0, val); } @@ -72,7 +72,7 @@ static void p6_pmu_enable_all(void) /* p6 only has one enable register */ rdmsrl(MSR_P6_EVNTSEL0, val); - val |= ARCH_PERFMON_EVENTSEL0_ENABLE; + val |= ARCH_PERFMON_EVENTSEL_ENABLE; wrmsrl(MSR_P6_EVNTSEL0, val); } @@ -83,7 +83,7 @@ p6_pmu_disable_event(struct hw_perf_event *hwc, int idx) u64 val = P6_NOP_EVENT; if (cpuc->enabled) - val |= ARCH_PERFMON_EVENTSEL0_ENABLE; + val |= ARCH_PERFMON_EVENTSEL_ENABLE; (void)checking_wrmsrl(hwc->config_base + idx, val); } @@ -95,7 +95,7 @@ static void p6_pmu_enable_event(struct hw_perf_event *hwc, int idx) val = hwc->config; if (cpuc->enabled) - val |= ARCH_PERFMON_EVENTSEL0_ENABLE; + val |= ARCH_PERFMON_EVENTSEL_ENABLE; (void)checking_wrmsrl(hwc->config_base + idx, val); } diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c index 74f4e85a572..fb329e9f849 100644 --- a/arch/x86/kernel/cpu/perfctr-watchdog.c +++ b/arch/x86/kernel/cpu/perfctr-watchdog.c @@ -680,7 +680,7 @@ static int setup_intel_arch_watchdog(unsigned nmi_hz) cpu_nmi_set_wd_enabled(); apic_write(APIC_LVTPC, APIC_DM_NMI); - evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE; + evntsel |= ARCH_PERFMON_EVENTSEL_ENABLE; wrmsr(evntsel_msr, evntsel, 0); intel_arch_wd_ops.checkbit = 1ULL << (eax.split.bit_width - 1); return 1; diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c index 8ddb9fa9c1b..090cbbec7db 100644 --- a/arch/x86/oprofile/op_model_amd.c +++ b/arch/x86/oprofile/op_model_amd.c @@ -171,7 +171,7 @@ static void op_amd_setup_ctrs(struct op_x86_model_spec const *model, continue; } rdmsrl(msrs->controls[i].addr, val); - if (val & ARCH_PERFMON_EVENTSEL0_ENABLE) + if (val & ARCH_PERFMON_EVENTSEL_ENABLE) op_x86_warn_in_use(i); val &= model->reserved; wrmsrl(msrs->controls[i].addr, val); @@ -398,7 +398,7 @@ static void op_amd_start(struct op_msrs const * const msrs) if (!reset_value[op_x86_phys_to_virt(i)]) continue; rdmsrl(msrs->controls[i].addr, val); - val |= ARCH_PERFMON_EVENTSEL0_ENABLE; + val |= ARCH_PERFMON_EVENTSEL_ENABLE; wrmsrl(msrs->controls[i].addr, val); } @@ -418,7 +418,7 @@ static void op_amd_stop(struct op_msrs const * const msrs) if (!reset_value[op_x86_phys_to_virt(i)]) continue; rdmsrl(msrs->controls[i].addr, val); - val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; + val &= ~ARCH_PERFMON_EVENTSEL_ENABLE; wrmsrl(msrs->controls[i].addr, val); } diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c index 5d1727ba409..2bf90fafa7b 100644 --- a/arch/x86/oprofile/op_model_ppro.c +++ b/arch/x86/oprofile/op_model_ppro.c @@ -88,7 +88,7 @@ static void ppro_setup_ctrs(struct op_x86_model_spec const *model, continue; } rdmsrl(msrs->controls[i].addr, val); - if (val & ARCH_PERFMON_EVENTSEL0_ENABLE) + if (val & ARCH_PERFMON_EVENTSEL_ENABLE) op_x86_warn_in_use(i); val &= model->reserved; wrmsrl(msrs->controls[i].addr, val); @@ -166,7 +166,7 @@ static void ppro_start(struct op_msrs const * const msrs) for (i = 0; i < num_counters; ++i) { if (reset_value[i]) { rdmsrl(msrs->controls[i].addr, val); - val |= ARCH_PERFMON_EVENTSEL0_ENABLE; + val |= ARCH_PERFMON_EVENTSEL_ENABLE; wrmsrl(msrs->controls[i].addr, val); } } @@ -184,7 +184,7 @@ static void ppro_stop(struct op_msrs const * const msrs) if (!reset_value[i]) continue; rdmsrl(msrs->controls[i].addr, val); - val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; + val &= ~ARCH_PERFMON_EVENTSEL_ENABLE; wrmsrl(msrs->controls[i].addr, val); } } -- cgit v1.2.3 From 14be1f7454ea96ee614467a49cf018a1a383b189 Mon Sep 17 00:00:00 2001 From: Dimitri Sivanich Date: Mon, 1 Mar 2010 11:48:15 -0600 Subject: x86: Fix sched_clock_cpu for systems with unsynchronized TSC On UV systems, the TSC is not synchronized across blades. The sched_clock_cpu() function is returning values that can go backwards (I've seen as much as 8 seconds) when switching between cpus. As each cpu comes up, early_init_intel() will currently set the sched_clock_stable flag true. When mark_tsc_unstable() runs, it clears the flag, but this only occurs once (the first time a cpu comes up whose TSC is not synchronized with cpu 0). After this, early_init_intel() will set the flag again as the next cpu comes up. Only set sched_clock_stable if tsc has not been marked unstable. Signed-off-by: Dimitri Sivanich Acked-by: Venkatesh Pallipadi Acked-by: Peter Zijlstra LKML-Reference: <20100301174815.GC8224@sgi.com> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/intel.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 879666f4d87..7e1cca13af3 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -70,7 +70,8 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) if (c->x86_power & (1 << 8)) { set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); - sched_clock_stable = 1; + if (!check_tsc_unstable()) + sched_clock_stable = 1; } /* -- cgit v1.2.3 From 320ebf09cbb6d01954c9a060266aa8e0d27f4638 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 2 Mar 2010 12:35:37 +0100 Subject: perf, x86: Restrict the ANY flag The ANY flag can show SMT data of another task (like 'top'), so we want to disable it when system-wide profiling is disabled. Signed-off-by: Peter Zijlstra LKML-Reference: Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 6531b4bdb22..aab2e1ce9de 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -503,6 +503,9 @@ static int __hw_perf_event_init(struct perf_event *event) */ if (attr->type == PERF_TYPE_RAW) { hwc->config |= x86_pmu.raw_event(attr->config); + if ((hwc->config & ARCH_PERFMON_EVENTSEL_ANY) && + perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) + return -EACCES; return 0; } -- cgit v1.2.3 From b622d644c7d61a5cb95b74e7b143c263bed21f0a Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 1 Feb 2010 15:36:30 +0100 Subject: perf_events, x86: Fixup fixed counter constraints Patch 1da53e0230 ("perf_events, x86: Improve x86 event scheduling") lost us one of the fixed purpose counters and then ed8777fc13 ("perf_events, x86: Fix event constraint masks") broke it even further. Widen the fixed event mask to event+umask and specify the full config for each of the 3 fixed purpose counters. Then let the init code fill out the placement for the GP regs based on the cpuid info. Signed-off-by: Peter Zijlstra Cc: Stephane Eranian LKML-Reference: Signed-off-by: Ingo Molnar --- arch/x86/include/asm/perf_event.h | 2 +- arch/x86/kernel/cpu/perf_event.c | 25 ++++++++++++++++++------- arch/x86/kernel/cpu/perf_event_intel.c | 31 +++++++++++++++++++++---------- 3 files changed, 40 insertions(+), 18 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index 80e693684f1..db6109a885a 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -50,7 +50,7 @@ INTEL_ARCH_INV_MASK| \ INTEL_ARCH_EDGE_MASK|\ INTEL_ARCH_UNIT_MASK|\ - INTEL_ARCH_EVTSEL_MASK) + INTEL_ARCH_EVENT_MASK) #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index aab2e1ce9de..bfc43fa208b 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -73,10 +73,10 @@ struct debug_store { struct event_constraint { union { unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; - u64 idxmsk64[1]; + u64 idxmsk64; }; - int code; - int cmask; + u64 code; + u64 cmask; int weight; }; @@ -103,7 +103,7 @@ struct cpu_hw_events { }; #define __EVENT_CONSTRAINT(c, n, m, w) {\ - { .idxmsk64[0] = (n) }, \ + { .idxmsk64 = (n) }, \ .code = (c), \ .cmask = (m), \ .weight = (w), \ @@ -116,7 +116,7 @@ struct cpu_hw_events { EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVTSEL_MASK) #define FIXED_EVENT_CONSTRAINT(c, n) \ - EVENT_CONSTRAINT(c, n, INTEL_ARCH_FIXED_MASK) + EVENT_CONSTRAINT(c, (1ULL << (32+n)), INTEL_ARCH_FIXED_MASK) #define EVENT_CONSTRAINT_END \ EVENT_CONSTRAINT(0, 0, 0) @@ -615,8 +615,8 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) bitmap_zero(used_mask, X86_PMC_IDX_MAX); for (i = 0; i < n; i++) { - constraints[i] = - x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]); + c = x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]); + constraints[i] = c; } /* @@ -1350,6 +1350,7 @@ static void __init pmu_check_apic(void) void __init init_hw_perf_events(void) { + struct event_constraint *c; int err; pr_info("Performance Events: "); @@ -1398,6 +1399,16 @@ void __init init_hw_perf_events(void) __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_events) - 1, 0, x86_pmu.num_events); + if (x86_pmu.event_constraints) { + for_each_event_constraint(c, x86_pmu.event_constraints) { + if (c->cmask != INTEL_ARCH_FIXED_MASK) + continue; + + c->idxmsk64 |= (1ULL << x86_pmu.num_events) - 1; + c->weight += x86_pmu.num_events; + } + } + pr_info("... version: %d\n", x86_pmu.version); pr_info("... bit width: %d\n", x86_pmu.event_bits); pr_info("... generic registers: %d\n", x86_pmu.num_events); diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index cf6590cf4a5..4fbdfe5708d 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -1,7 +1,7 @@ #ifdef CONFIG_CPU_SUP_INTEL /* - * Intel PerfMon v3. Used on Core2 and later. + * Intel PerfMon, used on Core and later. */ static const u64 intel_perfmon_event_map[] = { @@ -27,8 +27,14 @@ static struct event_constraint intel_core_event_constraints[] = static struct event_constraint intel_core2_event_constraints[] = { - FIXED_EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */ - FIXED_EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */ + FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ + FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ + /* + * Core2 has Fixed Counter 2 listed as CPU_CLK_UNHALTED.REF and event + * 0x013c as CPU_CLK_UNHALTED.BUS and specifies there is a fixed + * ratio between these counters. + */ + /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */ INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */ INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */ INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */ @@ -37,14 +43,16 @@ static struct event_constraint intel_core2_event_constraints[] = INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */ INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */ INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */ + INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */ INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */ EVENT_CONSTRAINT_END }; static struct event_constraint intel_nehalem_event_constraints[] = { - FIXED_EVENT_CONSTRAINT(0xc0, (0xf|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */ - FIXED_EVENT_CONSTRAINT(0x3c, (0xf|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */ + FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ + FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ + /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */ INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */ INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */ INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */ @@ -58,8 +66,9 @@ static struct event_constraint intel_nehalem_event_constraints[] = static struct event_constraint intel_westmere_event_constraints[] = { - FIXED_EVENT_CONSTRAINT(0xc0, (0xf|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */ - FIXED_EVENT_CONSTRAINT(0x3c, (0xf|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */ + FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ + FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ + /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */ INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */ INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */ INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */ @@ -68,8 +77,9 @@ static struct event_constraint intel_westmere_event_constraints[] = static struct event_constraint intel_gen_event_constraints[] = { - FIXED_EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */ - FIXED_EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */ + FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ + FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ + /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */ EVENT_CONSTRAINT_END }; @@ -935,7 +945,7 @@ static __init int intel_pmu_init(void) x86_pmu.event_constraints = intel_nehalem_event_constraints; pr_cont("Nehalem/Corei7 events, "); break; - case 28: + case 28: /* Atom */ memcpy(hw_cache_event_ids, atom_hw_cache_event_ids, sizeof(hw_cache_event_ids)); @@ -951,6 +961,7 @@ static __init int intel_pmu_init(void) x86_pmu.event_constraints = intel_westmere_event_constraints; pr_cont("Westmere events, "); break; + default: /* * default constraints for v2 and up -- cgit v1.2.3 From 29044ad1509ecc229f1d5a31aeed7a8dc61a71c4 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Wed, 3 Mar 2010 02:25:22 +0100 Subject: x86/stacktrace: Don't dereference bad frame pointers Callers of a stacktrace might pass bad frame pointers. Those are usually checked for safety in stack walking helpers before any dereferencing, but this is not the case when we need to go through one more frame pointer that backlinks the irq stack to the previous one, as we don't have any reliable address boudaries to compare this frame pointer against. This raises crashes when we record callchains for ftrace events with perf because we don't use the right helpers to capture registers there. We get wrong frame pointers as we call task_pt_regs() even on kernel threads, which is a wrong thing as it gives us the initial state of any kernel threads freshly created. This is even not what we want for user tasks. What we want is a hot snapshot of registers when the ftrace event triggers, not the state before a task entered the kernel. This requires more thoughts to do it correctly though. So first put a guardian to ensure the given frame pointer can be dereferenced to avoid crashes. We'll think about how to fix the callers in a subsequent patch. Signed-off-by: Frederic Weisbecker Cc: Ingo Molnar Cc: Thomas Gleixner Cc: H. Peter Anvin Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Steven Rostedt Cc: 2.6.33.x Cc: Arnaldo Carvalho de Melo --- arch/x86/kernel/dumpstack_64.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c index 0ad9597073f..a6c906c9b19 100644 --- a/arch/x86/kernel/dumpstack_64.c +++ b/arch/x86/kernel/dumpstack_64.c @@ -125,9 +125,15 @@ fixup_bp_irq_link(unsigned long bp, unsigned long *stack, { #ifdef CONFIG_FRAME_POINTER struct stack_frame *frame = (struct stack_frame *)bp; + unsigned long next; - if (!in_irq_stack(stack, irq_stack, irq_stack_end)) - return (unsigned long)frame->next_frame; + if (!in_irq_stack(stack, irq_stack, irq_stack_end)) { + if (!probe_kernel_address(&frame->next_frame, next)) + return next; + else + WARN_ONCE(1, "Perf: bad frame pointer = %p in " + "callchain\n", &frame->next_frame); + } #endif return bp; } -- cgit v1.2.3 From dc1d628a67a8f042e711ea5accc0beedc3ef0092 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 3 Mar 2010 15:55:04 +0100 Subject: perf: Provide generic perf_sample_data initialization This makes it easier to extend perf_sample_data and fixes a bug on arm and sparc, which failed to set ->raw to NULL, which can cause crashes when combined with PERF_SAMPLE_RAW. It also optimizes PowerPC and tracepoint, because the struct initialization is forced to zero out the whole structure. Signed-off-by: Peter Zijlstra Acked-by: Jean Pihet Reviewed-by: Frederic Weisbecker Acked-by: David S. Miller Cc: Jamie Iles Cc: Paul Mackerras Cc: Stephane Eranian Cc: stable@kernel.org LKML-Reference: <20100304140100.315416040@chello.nl> Signed-off-by: Ingo Molnar --- arch/arm/kernel/perf_event.c | 4 ++-- arch/powerpc/kernel/perf_event.c | 8 ++++---- arch/sparc/kernel/perf_event.c | 2 +- arch/x86/kernel/cpu/perf_event.c | 3 +-- arch/x86/kernel/cpu/perf_event_intel.c | 6 ++---- 5 files changed, 10 insertions(+), 13 deletions(-) (limited to 'arch') diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index c54ceb3d1f9..3875d99cc40 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -965,7 +965,7 @@ armv6pmu_handle_irq(int irq_num, */ armv6_pmcr_write(pmcr); - data.addr = 0; + perf_sample_data_init(&data, 0); cpuc = &__get_cpu_var(cpu_hw_events); for (idx = 0; idx <= armpmu->num_events; ++idx) { @@ -1945,7 +1945,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) */ regs = get_irq_regs(); - data.addr = 0; + perf_sample_data_init(&data, 0); cpuc = &__get_cpu_var(cpu_hw_events); for (idx = 0; idx <= armpmu->num_events; ++idx) { diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c index b6cf8f1f4d3..5120bd44f69 100644 --- a/arch/powerpc/kernel/perf_event.c +++ b/arch/powerpc/kernel/perf_event.c @@ -1164,10 +1164,10 @@ static void record_and_restart(struct perf_event *event, unsigned long val, * Finally record data if requested. */ if (record) { - struct perf_sample_data data = { - .addr = ~0ULL, - .period = event->hw.last_period, - }; + struct perf_sample_data data; + + perf_sample_data_init(&data, ~0ULL); + data.period = event->hw.last_period; if (event->attr.sample_type & PERF_SAMPLE_ADDR) perf_get_data_addr(regs, &data.addr); diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 9f2b2bac8b2..6504208f375 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c @@ -1189,7 +1189,7 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self, regs = args->regs; - data.addr = 0; + perf_sample_data_init(&data, 0); cpuc = &__get_cpu_var(cpu_hw_events); diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 97cddbf3293..42aafd11e17 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -1097,8 +1097,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs) int idx, handled = 0; u64 val; - data.addr = 0; - data.raw = NULL; + perf_sample_data_init(&data, 0); cpuc = &__get_cpu_var(cpu_hw_events); diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 73102df8bfc..44b60c85210 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -590,10 +590,9 @@ static void intel_pmu_drain_bts_buffer(void) ds->bts_index = ds->bts_buffer_base; + perf_sample_data_init(&data, 0); data.period = event->hw.last_period; - data.addr = 0; - data.raw = NULL; regs.ip = 0; /* @@ -742,8 +741,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) int bit, loops; u64 ack, status; - data.addr = 0; - data.raw = NULL; + perf_sample_data_init(&data, 0); cpuc = &__get_cpu_var(cpu_hw_events); -- cgit v1.2.3 From f56e8a0765cc4374e02f4e3a79e2427b5096b075 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 5 Mar 2010 15:03:27 -0800 Subject: x86/mce: Fix RCU lockdep splats Create an rcu_dereference_check_mce() that checks for RCU-sched read side and mce_read_mutex being held on update side. Replace uses of rcu_dereference() in arch/x86/kernel/cpu/mcheck/mce.c with this new macro. Signed-off-by: Paul E. McKenney Cc: "H. Peter Anvin" Cc: x86@kernel.org Cc: laijs@cn.fujitsu.com Cc: dipankar@in.ibm.com Cc: mathieu.desnoyers@polymtl.ca Cc: josh@joshtriplett.org Cc: dvhltc@us.ibm.com Cc: niv@us.ibm.com Cc: peterz@infradead.org Cc: rostedt@goodmis.org Cc: Valdis.Kletnieks@vt.edu Cc: dhowells@redhat.com LKML-Reference: <1267830207-9474-3-git-send-email-paulmck@linux.vnet.ibm.com> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/mcheck/mce.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index a8aacd4b513..4442e9e898c 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -46,6 +46,11 @@ #include "mce-internal.h" +#define rcu_dereference_check_mce(p) \ + rcu_dereference_check((p), \ + rcu_read_lock_sched_held() || \ + lockdep_is_held(&mce_read_mutex)) + #define CREATE_TRACE_POINTS #include @@ -158,7 +163,7 @@ void mce_log(struct mce *mce) mce->finished = 0; wmb(); for (;;) { - entry = rcu_dereference(mcelog.next); + entry = rcu_dereference_check_mce(mcelog.next); for (;;) { /* * When the buffer fills up discard new entries. @@ -1500,7 +1505,7 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize, return -ENOMEM; mutex_lock(&mce_read_mutex); - next = rcu_dereference(mcelog.next); + next = rcu_dereference_check_mce(mcelog.next); /* Only supports full reads right now */ if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce)) { @@ -1565,7 +1570,7 @@ timeout: static unsigned int mce_poll(struct file *file, poll_table *wait) { poll_wait(file, &mce_wait, wait); - if (rcu_dereference(mcelog.next)) + if (rcu_dereference_check_mce(mcelog.next)) return POLLIN | POLLRDNORM; return 0; } -- cgit v1.2.3 From 10fb7f1f2d311b4d2e5d881fe2d83f1c281100f9 Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Fri, 5 Mar 2010 13:10:36 -0600 Subject: x86: Reduce per cpu MCA boot up messages Don't write per cpu MCA boot up messages. Signed-of-by: Mike Travis Cc: Hidetoshi Seto Cc: x86@kernel.org Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/mcheck/mce_intel.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c index 7c785634af2..d15df6e49bf 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_intel.c +++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c @@ -95,7 +95,7 @@ static void cmci_discover(int banks, int boot) /* Already owned by someone else? */ if (val & CMCI_EN) { - if (test_and_clear_bit(i, owned) || boot) + if (test_and_clear_bit(i, owned) && !boot) print_update("SHD", &hdr, i); __clear_bit(i, __get_cpu_var(mce_poll_banks)); continue; @@ -107,7 +107,7 @@ static void cmci_discover(int banks, int boot) /* Did the enable bit stick? -- the bank supports CMCI */ if (val & CMCI_EN) { - if (!test_and_set_bit(i, owned) || boot) + if (!test_and_set_bit(i, owned) && !boot) print_update("CMCI", &hdr, i); __clear_bit(i, __get_cpu_var(mce_poll_banks)); } else { -- cgit v1.2.3 From d6dd692168c049196f54edc2e8227c60702bb1d2 Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Fri, 5 Mar 2010 13:10:38 -0600 Subject: x86: Reduce per cpu warning boot up messages Reduce warning message output to one line only instead of per cpu. Signed-of-by: Mike Travis Cc: Rusty Russell Cc: Frederic Weisbecker Cc: Brian Gerst Cc: x86@kernel.org Signed-off-by: Ingo Molnar --- arch/x86/kernel/process.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index c9b3522b6b4..4e8cb4ee9fc 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -600,7 +600,7 @@ void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) { #ifdef CONFIG_SMP if (pm_idle == poll_idle && smp_num_siblings > 1) { - printk(KERN_WARNING "WARNING: polling idle and HT enabled," + printk_once(KERN_WARNING "WARNING: polling idle and HT enabled," " performance may degrade.\n"); } #endif -- cgit v1.2.3 From 8447b360a3897bdfb0677107564d1dd9ab6e63be Mon Sep 17 00:00:00 2001 From: Jack Steiner Date: Thu, 11 Mar 2010 12:43:29 -0600 Subject: x86, UV: Fix target_cpus() in x2apic_uv_x.c target_cpu() should initially target all cpus, not just cpu 0. Otherwise systems with lots of disks can exhaust the interrupt vectors on cpu 0 if a large number of disks are discovered before the irq balancer is running. Note: UV code only... Signed-off-by: Jack Steiner LKML-Reference: <20100311184328.GA21433@sgi.com> Signed-off-by: Ingo Molnar --- arch/x86/kernel/apic/x2apic_uv_x.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 21db3cbea7d..af0ca80e38a 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c @@ -114,11 +114,9 @@ EXPORT_SYMBOL_GPL(uv_possible_blades); unsigned long sn_rtc_cycles_per_second; EXPORT_SYMBOL(sn_rtc_cycles_per_second); -/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ - static const struct cpumask *uv_target_cpus(void) { - return cpumask_of(0); + return cpu_online_mask; } static void uv_vector_allocation_domain(int cpu, struct cpumask *retmask) -- cgit v1.2.3 From 0e152cd7c16832bd5cadee0c2e41d9959bc9b6f9 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Fri, 12 Mar 2010 15:43:03 +0100 Subject: x86, k8 nb: Fix boot crash: enable k8_northbridges unconditionally on AMD systems de957628ce7c84764ff41331111036b3ae5bad0f changed setting of the x86_init.iommu.iommu_init function ptr only when GART IOMMU is found. One side effect of it is that num_k8_northbridges is not initialized anymore if not explicitly called. This resulted in uninitialized pointers in , for example, which uses the num_k8_northbridges thing through node_to_k8_nb_misc(). Fix that through an initcall that runs right after the PCI subsystem and does all the scanning. Then, remove initialization in gart_iommu_init() which is a rootfs_initcall and we're running before that. What is more, since num_k8_northbridges is being used in other places beside GART IOMMU, include it whenever we add AMD CPU support. The previous dependency chain in kconfig contained K8_NB depends on AGP_AMD64|GART_IOMMU which was clearly incorrect. The more natural way in terms of hardware dependency should be AGP_AMD64|GART_IOMMU depends on K8_NB depends on CPU_SUP_AMD && PCI. Make it so Number One! Signed-off-by: Borislav Petkov Cc: FUJITA Tomonori Cc: Joerg Roedel LKML-Reference: <20100312144303.GA29262@aftab> Signed-off-by: Ingo Molnar Tested-by: Joerg Roedel --- arch/x86/Kconfig | 4 ++-- arch/x86/kernel/k8.c | 14 ++++++++++++++ arch/x86/kernel/pci-gart_64.c | 2 +- 3 files changed, 17 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index eb4092568f9..ddb52b8d38a 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -627,7 +627,7 @@ config GART_IOMMU bool "GART IOMMU support" if EMBEDDED default y select SWIOTLB - depends on X86_64 && PCI + depends on X86_64 && PCI && K8_NB ---help--- Support for full DMA access of devices with 32bit memory access only on systems with more than 3GB. This is usually needed for USB, @@ -2026,7 +2026,7 @@ endif # X86_32 config K8_NB def_bool y - depends on AGP_AMD64 || (X86_64 && (GART_IOMMU || (PCI && NUMA))) + depends on CPU_SUP_AMD && PCI source "drivers/pcmcia/Kconfig" diff --git a/arch/x86/kernel/k8.c b/arch/x86/kernel/k8.c index cbc4332a77b..9b895464dd0 100644 --- a/arch/x86/kernel/k8.c +++ b/arch/x86/kernel/k8.c @@ -121,3 +121,17 @@ void k8_flush_garts(void) } EXPORT_SYMBOL_GPL(k8_flush_garts); +static __init int init_k8_nbs(void) +{ + int err = 0; + + err = cache_k8_northbridges(); + + if (err < 0) + printk(KERN_NOTICE "K8 NB: Cannot enumerate AMD northbridges.\n"); + + return err; +} + +/* This has to go after the PCI subsystem */ +fs_initcall(init_k8_nbs); diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index 34de53b46f8..f3af115a573 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c @@ -735,7 +735,7 @@ int __init gart_iommu_init(void) unsigned long scratch; long i; - if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0) + if (num_k8_northbridges == 0) return 0; #ifndef CONFIG_AGP_AMD64 -- cgit v1.2.3 From 2aa2b50dd62b5d0675bd7453fbeb5732dc2d7866 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sun, 14 Mar 2010 08:57:03 +0100 Subject: x86/mce: Fix build bug with CONFIG_PROVE_LOCKING=y && CONFIG_X86_MCE_INTEL=y Commit f56e8a076 "x86/mce: Fix RCU lockdep splats" introduced the following build bug: arch/x86/kernel/cpu/mcheck/mce.c: In function 'mce_log': arch/x86/kernel/cpu/mcheck/mce.c:166: error: 'mce_read_mutex' undeclared (first use in this function) arch/x86/kernel/cpu/mcheck/mce.c:166: error: (Each undeclared identifier is reported only once arch/x86/kernel/cpu/mcheck/mce.c:166: error: for each function it appears in.) Move the in-the-middle-of-file lock variable up to the variable definition section, the top of the .c file. Cc: Paul E. McKenney Cc: "H. Peter Anvin" Cc: x86@kernel.org Cc: laijs@cn.fujitsu.com Cc: dipankar@in.ibm.com Cc: mathieu.desnoyers@polymtl.ca Cc: josh@joshtriplett.org Cc: dvhltc@us.ibm.com Cc: niv@us.ibm.com Cc: peterz@infradead.org Cc: rostedt@goodmis.org Cc: Valdis.Kletnieks@vt.edu Cc: dhowells@redhat.com LKML-Reference: <1267830207-9474-3-git-send-email-paulmck@linux.vnet.ibm.com> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/mcheck/mce.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index bd58de4d7a2..3ab9c886b61 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -46,6 +46,8 @@ #include "mce-internal.h" +static DEFINE_MUTEX(mce_read_mutex); + #define rcu_dereference_check_mce(p) \ rcu_dereference_check((p), \ rcu_read_lock_sched_held() || \ @@ -1490,8 +1492,6 @@ static void collect_tscs(void *data) rdtscll(cpu_tsc[smp_processor_id()]); } -static DEFINE_MUTEX(mce_read_mutex); - static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize, loff_t *off) { -- cgit v1.2.3 From 03fb256df9c960b10c0e01b7e92d2f31433675fe Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Tue, 16 Feb 2010 10:48:15 +0000 Subject: ARM: mach-shmobile: G3EVM KEYSC platform data This patch adds KEYSC platform data for the G3EVM board. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/arm/mach-shmobile/board-g3evm.c | 55 +++++++++++++++++++++++++++++++++++ arch/arm/mach-shmobile/clock-sh7367.c | 7 +++++ 2 files changed, 62 insertions(+) (limited to 'arch') diff --git a/arch/arm/mach-shmobile/board-g3evm.c b/arch/arm/mach-shmobile/board-g3evm.c index f36c9a94d32..9d326893a21 100644 --- a/arch/arm/mach-shmobile/board-g3evm.c +++ b/arch/arm/mach-shmobile/board-g3evm.c @@ -29,6 +29,8 @@ #include #include #include +#include +#include #include #include #include @@ -127,9 +129,47 @@ static struct platform_device usb_host_device = { .resource = usb_host_resources, }; +/* KEYSC */ +static struct sh_keysc_info keysc_info = { + .mode = SH_KEYSC_MODE_5, + .scan_timing = 3, + .delay = 100, + .keycodes = { + KEY_A, KEY_B, KEY_C, KEY_D, KEY_E, KEY_F, KEY_G, + KEY_H, KEY_I, KEY_J, KEY_K, KEY_L, KEY_M, KEY_N, + KEY_O, KEY_P, KEY_Q, KEY_R, KEY_S, KEY_T, KEY_U, + KEY_V, KEY_W, KEY_X, KEY_Y, KEY_Z, KEY_HOME, KEY_SLEEP, + KEY_WAKEUP, KEY_COFFEE, KEY_0, KEY_1, KEY_2, KEY_3, KEY_4, + KEY_5, KEY_6, KEY_7, KEY_8, KEY_9, KEY_STOP, KEY_COMPUTER, + }, +}; + +static struct resource keysc_resources[] = { + [0] = { + .name = "KEYSC", + .start = 0xe61b0000, + .end = 0xe61b000f, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = 79, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device keysc_device = { + .name = "sh_keysc", + .num_resources = ARRAY_SIZE(keysc_resources), + .resource = keysc_resources, + .dev = { + .platform_data = &keysc_info, + }, +}; + static struct platform_device *g3evm_devices[] __initdata = { &nor_flash_device, &usb_host_device, + &keysc_device, }; static struct map_desc g3evm_io_desc[] __initdata = { @@ -196,6 +236,21 @@ static void __init g3evm_init(void) __raw_writew(0x6010, 0xe60581c6); /* CGPOSR */ __raw_writew(0x8a0a, 0xe605810c); /* USBCR2 */ + /* KEYSC @ CN7 */ + gpio_request(GPIO_FN_PORT42_KEYOUT0, NULL); + gpio_request(GPIO_FN_PORT43_KEYOUT1, NULL); + gpio_request(GPIO_FN_PORT44_KEYOUT2, NULL); + gpio_request(GPIO_FN_PORT45_KEYOUT3, NULL); + gpio_request(GPIO_FN_PORT46_KEYOUT4, NULL); + gpio_request(GPIO_FN_PORT47_KEYOUT5, NULL); + gpio_request(GPIO_FN_PORT48_KEYIN0_PU, NULL); + gpio_request(GPIO_FN_PORT49_KEYIN1_PU, NULL); + gpio_request(GPIO_FN_PORT50_KEYIN2_PU, NULL); + gpio_request(GPIO_FN_PORT55_KEYIN3_PU, NULL); + gpio_request(GPIO_FN_PORT56_KEYIN4_PU, NULL); + gpio_request(GPIO_FN_PORT57_KEYIN5_PU, NULL); + gpio_request(GPIO_FN_PORT58_KEYIN6_PU, NULL); + sh7367_add_standard_devices(); platform_add_devices(g3evm_devices, ARRAY_SIZE(g3evm_devices)); diff --git a/arch/arm/mach-shmobile/clock-sh7367.c b/arch/arm/mach-shmobile/clock-sh7367.c index 58bd54e1113..bb940c6e4e6 100644 --- a/arch/arm/mach-shmobile/clock-sh7367.c +++ b/arch/arm/mach-shmobile/clock-sh7367.c @@ -75,6 +75,11 @@ static struct clk usb0_clk = { .name = "usb0", }; +/* a static keysc0 clk for now - enough to get sh_keysc working */ +static struct clk keysc0_clk = { + .name = "keysc0", +}; + static struct clk_lookup lookups[] = { { .clk = &peripheral_clk, @@ -82,6 +87,8 @@ static struct clk_lookup lookups[] = { .clk = &r_clk, }, { .clk = &usb0_clk, + }, { + .clk = &keysc0_clk, } }; -- cgit v1.2.3 From 143f3b833f98271341379d813cb72deb1657a380 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Fri, 19 Feb 2010 09:54:06 +0000 Subject: ARM: mach-shmobile: G3EVM FLCTL platform data This patch adds FLCTL platform data for the G3EVM board. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/arm/mach-shmobile/board-g3evm.c | 67 ++++++++++++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) (limited to 'arch') diff --git a/arch/arm/mach-shmobile/board-g3evm.c b/arch/arm/mach-shmobile/board-g3evm.c index 9d326893a21..9247503296c 100644 --- a/arch/arm/mach-shmobile/board-g3evm.c +++ b/arch/arm/mach-shmobile/board-g3evm.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -166,10 +167,53 @@ static struct platform_device keysc_device = { }, }; +static struct mtd_partition nand_partition_info[] = { + { + .name = "system", + .offset = 0, + .size = 64 * 1024 * 1024, + }, + { + .name = "userdata", + .offset = MTDPART_OFS_APPEND, + .size = 128 * 1024 * 1024, + }, + { + .name = "cache", + .offset = MTDPART_OFS_APPEND, + .size = 64 * 1024 * 1024, + }, +}; + +static struct resource nand_flash_resources[] = { + [0] = { + .start = 0xe6a30000, + .end = 0xe6a3009b, + .flags = IORESOURCE_MEM, + } +}; + +static struct sh_flctl_platform_data nand_flash_data = { + .parts = nand_partition_info, + .nr_parts = ARRAY_SIZE(nand_partition_info), + .flcmncr_val = QTSEL_E | FCKSEL_E | TYPESEL_SET | NANWF_E + | SHBUSSEL | SEL_16BIT, +}; + +static struct platform_device nand_flash_device = { + .name = "sh_flctl", + .resource = nand_flash_resources, + .num_resources = ARRAY_SIZE(nand_flash_resources), + .dev = { + .platform_data = &nand_flash_data, + }, +}; + static struct platform_device *g3evm_devices[] __initdata = { &nor_flash_device, &usb_host_device, &keysc_device, + &nand_flash_device, }; static struct map_desc g3evm_io_desc[] __initdata = { @@ -251,6 +295,29 @@ static void __init g3evm_init(void) gpio_request(GPIO_FN_PORT57_KEYIN5_PU, NULL); gpio_request(GPIO_FN_PORT58_KEYIN6_PU, NULL); + /* FLCTL */ + gpio_request(GPIO_FN_FCE0, NULL); + gpio_request(GPIO_FN_D0_ED0_NAF0, NULL); + gpio_request(GPIO_FN_D1_ED1_NAF1, NULL); + gpio_request(GPIO_FN_D2_ED2_NAF2, NULL); + gpio_request(GPIO_FN_D3_ED3_NAF3, NULL); + gpio_request(GPIO_FN_D4_ED4_NAF4, NULL); + gpio_request(GPIO_FN_D5_ED5_NAF5, NULL); + gpio_request(GPIO_FN_D6_ED6_NAF6, NULL); + gpio_request(GPIO_FN_D7_ED7_NAF7, NULL); + gpio_request(GPIO_FN_D8_ED8_NAF8, NULL); + gpio_request(GPIO_FN_D9_ED9_NAF9, NULL); + gpio_request(GPIO_FN_D10_ED10_NAF10, NULL); + gpio_request(GPIO_FN_D11_ED11_NAF11, NULL); + gpio_request(GPIO_FN_D12_ED12_NAF12, NULL); + gpio_request(GPIO_FN_D13_ED13_NAF13, NULL); + gpio_request(GPIO_FN_D14_ED14_NAF14, NULL); + gpio_request(GPIO_FN_D15_ED15_NAF15, NULL); + gpio_request(GPIO_FN_WE0_XWR0_FWE, NULL); + gpio_request(GPIO_FN_FRB, NULL); + /* FOE, FCDE, FSC on dedicated pins */ + __raw_writel(__raw_readl(0xe6158048) & ~(1 << 15), 0xe6158048); + sh7367_add_standard_devices(); platform_add_devices(g3evm_devices, ARRAY_SIZE(g3evm_devices)); -- cgit v1.2.3 From 6676a1701b0b135dacbb7cfeef48004315300df0 Mon Sep 17 00:00:00 2001 From: NISHIMOTO Hiroki Date: Tue, 23 Feb 2010 10:55:10 +0000 Subject: ARM: mach-shmobile: G4EVM KEYSC platform data This patch adds KEYSC platform data for the G4EVM board. Signed-off-by: NISHIMOTO Hiroki Signed-off-by: Paul Mundt --- arch/arm/mach-shmobile/board-g4evm.c | 57 ++++++++++++++++++++++++++++++++++++ 1 file changed, 57 insertions(+) (limited to 'arch') diff --git a/arch/arm/mach-shmobile/board-g4evm.c b/arch/arm/mach-shmobile/board-g4evm.c index 5acd623f93e..10673a90be5 100644 --- a/arch/arm/mach-shmobile/board-g4evm.c +++ b/arch/arm/mach-shmobile/board-g4evm.c @@ -28,6 +28,8 @@ #include #include #include +#include +#include #include #include #include @@ -128,9 +130,49 @@ static struct platform_device usb_host_device = { .resource = usb_host_resources, }; +/* KEYSC */ +static struct sh_keysc_info keysc_info = { + .mode = SH_KEYSC_MODE_5, + .scan_timing = 3, + .delay = 100, + .keycodes = { + KEY_A, KEY_B, KEY_C, KEY_D, KEY_E, KEY_F, + KEY_G, KEY_H, KEY_I, KEY_J, KEY_K, KEY_L, + KEY_M, KEY_N, KEY_U, KEY_P, KEY_Q, KEY_R, + KEY_S, KEY_T, KEY_U, KEY_V, KEY_W, KEY_X, + KEY_Y, KEY_Z, KEY_HOME, KEY_SLEEP, KEY_WAKEUP, KEY_COFFEE, + KEY_0, KEY_1, KEY_2, KEY_3, KEY_4, KEY_5, + KEY_6, KEY_7, KEY_8, KEY_9, KEY_STOP, KEY_COMPUTER, + }, +}; + +static struct resource keysc_resources[] = { + [0] = { + .name = "KEYSC", + .start = 0xe61b0000, + .end = 0xe61b000f, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = 79, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device keysc_device = { + .name = "sh_keysc", + .id = 0, /* keysc0 clock */ + .num_resources = ARRAY_SIZE(keysc_resources), + .resource = keysc_resources, + .dev = { + .platform_data = &keysc_info, + }, +}; + static struct platform_device *g4evm_devices[] __initdata = { &nor_flash_device, &usb_host_device, + &keysc_device, }; static struct map_desc g4evm_io_desc[] __initdata = { @@ -196,6 +238,21 @@ static void __init g4evm_init(void) __raw_writew(0x6010, 0xe60581c6); /* CGPOSR */ __raw_writew(0x8a0a, 0xe605810c); /* USBCR2 */ + /* KEYSC @ CN31 */ + gpio_request(GPIO_FN_PORT60_KEYOUT5, NULL); + gpio_request(GPIO_FN_PORT61_KEYOUT4, NULL); + gpio_request(GPIO_FN_PORT62_KEYOUT3, NULL); + gpio_request(GPIO_FN_PORT63_KEYOUT2, NULL); + gpio_request(GPIO_FN_PORT64_KEYOUT1, NULL); + gpio_request(GPIO_FN_PORT65_KEYOUT0, NULL); + gpio_request(GPIO_FN_PORT66_KEYIN0_PU, NULL); + gpio_request(GPIO_FN_PORT67_KEYIN1_PU, NULL); + gpio_request(GPIO_FN_PORT68_KEYIN2_PU, NULL); + gpio_request(GPIO_FN_PORT69_KEYIN3_PU, NULL); + gpio_request(GPIO_FN_PORT70_KEYIN4_PU, NULL); + gpio_request(GPIO_FN_PORT71_KEYIN5_PU, NULL); + gpio_request(GPIO_FN_PORT72_KEYIN6_PU, NULL); + sh7377_add_standard_devices(); platform_add_devices(g4evm_devices, ARRAY_SIZE(g4evm_devices)); -- cgit v1.2.3 From 9615b37c5c8fed963811c100053d495c412880fb Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Wed, 10 Mar 2010 05:13:12 +0000 Subject: ARM: mach-shmobile: sh7367 SDHI vector merge Merge the SDHI vectors for sh7367 using the recently merged INTC force_enable/disable feature. With this in place SDHI hotplug is supported using the drivers sh_mobile_sdhi and tmio_mmc. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/arm/mach-shmobile/intc-sh7367.c | 46 ++++++++++++++++++------------------ 1 file changed, 23 insertions(+), 23 deletions(-) (limited to 'arch') diff --git a/arch/arm/mach-shmobile/intc-sh7367.c b/arch/arm/mach-shmobile/intc-sh7367.c index 6a547b47aab..5ff70cadfc3 100644 --- a/arch/arm/mach-shmobile/intc-sh7367.c +++ b/arch/arm/mach-shmobile/intc-sh7367.c @@ -27,6 +27,8 @@ enum { UNUSED_INTCA = 0, + ENABLED, + DISABLED, /* interrupt sources INTCA */ IRQ0A, IRQ1A, IRQ2A, IRQ3A, IRQ4A, IRQ5A, IRQ6A, IRQ7A, @@ -46,8 +48,8 @@ enum { MSIOF2, MSIOF1, SCIFA4, SCIFA5, SCIFB, FLCTL_FLSTEI, FLCTL_FLTENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I, - SDHI0_SDHI0I0, SDHI0_SDHI0I1, SDHI0_SDHI0I2, SDHI0_SDHI0I3, - SDHI1_SDHI1I0, SDHI1_SDHI1I1, SDHI1_SDHI1I2, SDHI1_SDHI1I3, + SDHI0, + SDHI1, MSU_MSU, MSU_MSU2, IREM, SIU, @@ -59,7 +61,7 @@ enum { TTI20, MISTY, DDM, - SDHI2_SDHI2I0, SDHI2_SDHI2I1, SDHI2_SDHI2I2, SDHI2_SDHI2I3, + SDHI2, RWDT0, RWDT1, DMAC_1_DEI0, DMAC_1_DEI1, DMAC_1_DEI2, DMAC_1_DEI3, DMAC_2_DEI4, DMAC_2_DEI5, DMAC_2_DADERR, @@ -70,7 +72,7 @@ enum { /* interrupt groups INTCA */ DMAC_1, DMAC_2, DMAC2_1, DMAC2_2, DMAC3_1, DMAC3_2, - ETM11, ARM11, USBHS, FLCTL, IIC1, SDHI0, SDHI1, SDHI2, + ETM11, ARM11, USBHS, FLCTL, IIC1 }; static struct intc_vect intca_vectors[] = { @@ -105,10 +107,10 @@ static struct intc_vect intca_vectors[] = { INTC_VECT(SCIFB, 0x0d60), INTC_VECT(FLCTL_FLSTEI, 0x0d80), INTC_VECT(FLCTL_FLTENDI, 0x0da0), INTC_VECT(FLCTL_FLTREQ0I, 0x0dc0), INTC_VECT(FLCTL_FLTREQ1I, 0x0de0), - INTC_VECT(SDHI0_SDHI0I0, 0x0e00), INTC_VECT(SDHI0_SDHI0I1, 0x0e20), - INTC_VECT(SDHI0_SDHI0I2, 0x0e40), INTC_VECT(SDHI0_SDHI0I3, 0x0e60), - INTC_VECT(SDHI1_SDHI1I0, 0x0e80), INTC_VECT(SDHI1_SDHI1I1, 0x0ea0), - INTC_VECT(SDHI1_SDHI1I2, 0x0ec0), INTC_VECT(SDHI1_SDHI1I3, 0x0ee0), + INTC_VECT(SDHI0, 0x0e00), INTC_VECT(SDHI0, 0x0e20), + INTC_VECT(SDHI0, 0x0e40), INTC_VECT(SDHI0, 0x0e60), + INTC_VECT(SDHI1, 0x0e80), INTC_VECT(SDHI1, 0x0ea0), + INTC_VECT(SDHI1, 0x0ec0), INTC_VECT(SDHI1, 0x0ee0), INTC_VECT(MSU_MSU, 0x0f20), INTC_VECT(MSU_MSU2, 0x0f40), INTC_VECT(IREM, 0x0f60), INTC_VECT(SIU, 0x0fa0), @@ -122,8 +124,8 @@ static struct intc_vect intca_vectors[] = { INTC_VECT(TTI20, 0x1100), INTC_VECT(MISTY, 0x1120), INTC_VECT(DDM, 0x1140), - INTC_VECT(SDHI2_SDHI2I0, 0x1200), INTC_VECT(SDHI2_SDHI2I1, 0x1220), - INTC_VECT(SDHI2_SDHI2I2, 0x1240), INTC_VECT(SDHI2_SDHI2I3, 0x1260), + INTC_VECT(SDHI2, 0x1200), INTC_VECT(SDHI2, 0x1220), + INTC_VECT(SDHI2, 0x1240), INTC_VECT(SDHI2, 0x1260), INTC_VECT(RWDT0, 0x1280), INTC_VECT(RWDT1, 0x12a0), INTC_VECT(DMAC_1_DEI0, 0x2000), INTC_VECT(DMAC_1_DEI1, 0x2020), INTC_VECT(DMAC_1_DEI2, 0x2040), INTC_VECT(DMAC_1_DEI3, 0x2060), @@ -158,12 +160,6 @@ static struct intc_group intca_groups[] __initdata = { INTC_GROUP(FLCTL, FLCTL_FLSTEI, FLCTL_FLTENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I), INTC_GROUP(IIC1, IIC1_ALI1, IIC1_TACKI1, IIC1_WAITI1, IIC1_DTEI1), - INTC_GROUP(SDHI0, SDHI0_SDHI0I0, SDHI0_SDHI0I1, - SDHI0_SDHI0I2, SDHI0_SDHI0I3), - INTC_GROUP(SDHI1, SDHI1_SDHI1I0, SDHI1_SDHI1I1, - SDHI1_SDHI1I2, SDHI1_SDHI1I3), - INTC_GROUP(SDHI2, SDHI2_SDHI2I0, SDHI2_SDHI2I1, - SDHI2_SDHI2I2, SDHI2_SDHI2I3), }; static struct intc_mask_reg intca_mask_registers[] = { @@ -193,10 +189,10 @@ static struct intc_mask_reg intca_mask_registers[] = { { SCIFB, SCIFA5, SCIFA4, MSIOF1, 0, 0, MSIOF2, 0 } }, { 0xe694009c, 0xe69400dc, 8, /* IMR7A / IMCR7A */ - { SDHI0_SDHI0I3, SDHI0_SDHI0I2, SDHI0_SDHI0I1, SDHI0_SDHI0I0, + { DISABLED, DISABLED, ENABLED, ENABLED, FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLTENDI, FLCTL_FLSTEI } }, { 0xe69400a0, 0xe69400e0, 8, /* IMR8A / IMCR8A */ - { SDHI1_SDHI1I3, SDHI1_SDHI1I2, SDHI1_SDHI1I1, SDHI1_SDHI1I0, + { DISABLED, DISABLED, ENABLED, ENABLED, TTI20, USBDMAC_USHDMI, SPU, SIU } }, { 0xe69400a4, 0xe69400e4, 8, /* IMR9A / IMCR9A */ { CMT1_CMT13, CMT1_CMT12, CMT1_CMT11, CMT1_CMT10, @@ -211,7 +207,7 @@ static struct intc_mask_reg intca_mask_registers[] = { { 0, 0, TPU0, TPU1, TPU2, TPU3, TPU4, 0 } }, { 0xe69400b4, 0xe69400f4, 8, /* IMR13A / IMCR13A */ - { SDHI2_SDHI2I3, SDHI2_SDHI2I2, SDHI2_SDHI2I1, SDHI2_SDHI2I0, + { DISABLED, DISABLED, ENABLED, ENABLED, MISTY, CMT3, RWDT1, RWDT0 } }, }; @@ -258,10 +254,14 @@ static struct intc_mask_reg intca_ack_registers[] __initdata = { { IRQ8A, IRQ9A, IRQ10A, IRQ11A, IRQ12A, IRQ13A, IRQ14A, IRQ15A } }, }; -static DECLARE_INTC_DESC_ACK(intca_desc, "sh7367-intca", - intca_vectors, intca_groups, - intca_mask_registers, intca_prio_registers, - intca_sense_registers, intca_ack_registers); +static struct intc_desc intca_desc __initdata = { + .name = "sh7367-intca", + .force_enable = ENABLED, + .force_disable = DISABLED, + .hw = INTC_HW_DESC(intca_vectors, intca_groups, + intca_mask_registers, intca_prio_registers, + intca_sense_registers, intca_ack_registers), +}; void __init sh7367_init_irq(void) { -- cgit v1.2.3 From c148abfc2d807b2734e7ecd0e00c71ef7d4b7f42 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Wed, 10 Mar 2010 05:15:16 +0000 Subject: ARM: mach-shmobile: sh7377 SDHI vector merge Merge the SDHI vectors for sh7377 using the recently merged INTC force_enable/disable feature. With this in place SDHI hotplug is supported using the drivers sh_mobile_sdhi and tmio_mmc. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/arm/mach-shmobile/intc-sh7377.c | 36 +++++++++++++++++++----------------- 1 file changed, 19 insertions(+), 17 deletions(-) (limited to 'arch') diff --git a/arch/arm/mach-shmobile/intc-sh7377.c b/arch/arm/mach-shmobile/intc-sh7377.c index 125021cfba5..5c781e2d189 100644 --- a/arch/arm/mach-shmobile/intc-sh7377.c +++ b/arch/arm/mach-shmobile/intc-sh7377.c @@ -27,6 +27,8 @@ enum { UNUSED_INTCA = 0, + ENABLED, + DISABLED, /* interrupt sources INTCA */ IRQ0A, IRQ1A, IRQ2A, IRQ3A, IRQ4A, IRQ5A, IRQ6A, IRQ7A, @@ -49,8 +51,8 @@ enum { MSIOF2, MSIOF1, SCIFA4, SCIFA5, SCIFB, FLCTL_FLSTEI, FLCTL_FLTENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I, - SDHI0_SDHI0I0, SDHI0_SDHI0I1, SDHI0_SDHI0I2, SDHI0_SDHI0I3, - SDHI1_SDHI1I0, SDHI1_SDHI1I1, SDHI1_SDHI1I2, SDHI1_SDHI1I3, + SDHI0, + SDHI1, MSU_MSU, MSU_MSU2, IRREM, MSUG, @@ -84,7 +86,7 @@ enum { /* interrupt groups INTCA */ DMAC_1, DMAC_2, DMAC2_1, DMAC2_2, DMAC3_1, DMAC3_2, SHWYSTAT, - AP_ARM1, AP_ARM2, USBHS, SPU2, FLCTL, IIC1, SDHI0, SDHI1, + AP_ARM1, AP_ARM2, USBHS, SPU2, FLCTL, IIC1, ICUSB, ICUDMC }; @@ -128,10 +130,10 @@ static struct intc_vect intca_vectors[] = { INTC_VECT(SCIFB, 0x0d60), INTC_VECT(FLCTL_FLSTEI, 0x0d80), INTC_VECT(FLCTL_FLTENDI, 0x0da0), INTC_VECT(FLCTL_FLTREQ0I, 0x0dc0), INTC_VECT(FLCTL_FLTREQ1I, 0x0de0), - INTC_VECT(SDHI0_SDHI0I0, 0x0e00), INTC_VECT(SDHI0_SDHI0I1, 0x0e20), - INTC_VECT(SDHI0_SDHI0I2, 0x0e40), INTC_VECT(SDHI0_SDHI0I3, 0x0e60), - INTC_VECT(SDHI1_SDHI1I0, 0x0e80), INTC_VECT(SDHI1_SDHI1I1, 0x0ea0), - INTC_VECT(SDHI1_SDHI1I2, 0x0ec0), INTC_VECT(SDHI1_SDHI1I3, 0x0ee0), + INTC_VECT(SDHI0, 0x0e00), INTC_VECT(SDHI0, 0x0e20), + INTC_VECT(SDHI0, 0x0e40), INTC_VECT(SDHI0, 0x0e60), + INTC_VECT(SDHI1, 0x0e80), INTC_VECT(SDHI1, 0x0ea0), + INTC_VECT(SDHI1, 0x0ec0), INTC_VECT(SDHI1, 0x0ee0), INTC_VECT(MSU_MSU, 0x0f20), INTC_VECT(MSU_MSU2, 0x0f40), INTC_VECT(IRREM, 0x0f60), INTC_VECT(MSUG, 0x0fa0), @@ -195,10 +197,6 @@ static struct intc_group intca_groups[] __initdata = { INTC_GROUP(FLCTL, FLCTL_FLSTEI, FLCTL_FLTENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I), INTC_GROUP(IIC1, IIC1_ALI1, IIC1_TACKI1, IIC1_WAITI1, IIC1_DTEI1), - INTC_GROUP(SDHI0, SDHI0_SDHI0I0, SDHI0_SDHI0I1, - SDHI0_SDHI0I2, SDHI0_SDHI0I3), - INTC_GROUP(SDHI1, SDHI1_SDHI1I0, SDHI1_SDHI1I1, - SDHI1_SDHI1I2, SDHI1_SDHI1I3), INTC_GROUP(SHWYSTAT, SHWYSTAT_RT, SHWYSTAT_HS, SHWYSTAT_COM), INTC_GROUP(ICUSB, ICUSB_ICUSB0, ICUSB_ICUSB1), INTC_GROUP(ICUDMC, ICUDMC_ICUDMC1, ICUDMC_ICUDMC2), @@ -236,10 +234,10 @@ static struct intc_mask_reg intca_mask_registers[] = { { SCIFB, SCIFA5, SCIFA4, MSIOF1, 0, 0, MSIOF2, 0 } }, { 0xe694009c, 0xe69400dc, 8, /* IMR7A / IMCR7A */ - { SDHI0_SDHI0I3, SDHI0_SDHI0I2, SDHI0_SDHI0I1, SDHI0_SDHI0I0, + { DISABLED, DISABLED, ENABLED, ENABLED, FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLTENDI, FLCTL_FLSTEI } }, { 0xe69400a0, 0xe69400e0, 8, /* IMR8A / IMCR8A */ - { SDHI1_SDHI1I3, SDHI1_SDHI1I2, SDHI1_SDHI1I1, SDHI1_SDHI1I0, + { DISABLED, DISABLED, ENABLED, ENABLED, TTI20, USBDMAC_USHDMI, 0, MSUG } }, { 0xe69400a4, 0xe69400e4, 8, /* IMR9A / IMCR9A */ { CMT1_CMT13, CMT1_CMT12, CMT1_CMT11, CMT1_CMT10, @@ -339,10 +337,14 @@ static struct intc_mask_reg intca_ack_registers[] __initdata = { { IRQ24A, IRQ25A, IRQ26A, IRQ27A, IRQ28A, IRQ29A, IRQ30A, IRQ31A } }, }; -static DECLARE_INTC_DESC_ACK(intca_desc, "sh7377-intca", - intca_vectors, intca_groups, - intca_mask_registers, intca_prio_registers, - intca_sense_registers, intca_ack_registers); +static struct intc_desc intca_desc __initdata = { + .name = "sh7377-intca", + .force_enable = ENABLED, + .force_disable = DISABLED, + .hw = INTC_HW_DESC(intca_vectors, intca_groups, + intca_mask_registers, intca_prio_registers, + intca_sense_registers, intca_ack_registers), +}; void __init sh7377_init_irq(void) { -- cgit v1.2.3 From c57a31abf0b469b9cab6810f4e1895bb7ef1c482 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Wed, 10 Mar 2010 05:17:24 +0000 Subject: ARM: mach-shmobile: sh7372 SDHI vector merge Merge the SDHI vectors for sh7372 using the recently merged INTC force_enable/disable feature. With this in place SDHI hotplug is supported using the drivers sh_mobile_sdhi and tmio_mmc. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/arm/mach-shmobile/intc-sh7372.c | 46 ++++++++++++++++++------------------ 1 file changed, 23 insertions(+), 23 deletions(-) (limited to 'arch') diff --git a/arch/arm/mach-shmobile/intc-sh7372.c b/arch/arm/mach-shmobile/intc-sh7372.c index c57a923f97a..3ce9d9bd589 100644 --- a/arch/arm/mach-shmobile/intc-sh7372.c +++ b/arch/arm/mach-shmobile/intc-sh7372.c @@ -27,6 +27,8 @@ enum { UNUSED_INTCA = 0, + ENABLED, + DISABLED, /* interrupt sources INTCA */ IRQ0A, IRQ1A, IRQ2A, IRQ3A, IRQ4A, IRQ5A, IRQ6A, IRQ7A, @@ -47,14 +49,14 @@ enum { MSIOF2, MSIOF1, SCIFA4, SCIFA5, SCIFB, FLCTL_FLSTEI, FLCTL_FLTENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I, - SDHI0_SDHI0I0, SDHI0_SDHI0I1, SDHI0_SDHI0I2, SDHI0_SDHI0I3, - SDHI1_SDHI1I0, SDHI1_SDHI1I1, SDHI1_SDHI1I2, + SDHI0, + SDHI1, IRREM, IRDA, TPU0, TTI20, DDM, - SDHI2_SDHI2I0, SDHI2_SDHI2I1, SDHI2_SDHI2I2, SDHI2_SDHI2I3, + SDHI2, RWDT0, DMAC1_1_DEI0, DMAC1_1_DEI1, DMAC1_1_DEI2, DMAC1_1_DEI3, DMAC1_2_DEI4, DMAC1_2_DEI5, DMAC1_2_DADERR, @@ -82,7 +84,7 @@ enum { /* interrupt groups INTCA */ DMAC1_1, DMAC1_2, DMAC2_1, DMAC2_2, DMAC3_1, DMAC3_2, SHWYSTAT, - AP_ARM1, AP_ARM2, SPU2, FLCTL, IIC1, SDHI0, SDHI1, SDHI2 + AP_ARM1, AP_ARM2, SPU2, FLCTL, IIC1 }; static struct intc_vect intca_vectors[] __initdata = { @@ -123,17 +125,17 @@ static struct intc_vect intca_vectors[] __initdata = { INTC_VECT(SCIFB, 0x0d60), INTC_VECT(FLCTL_FLSTEI, 0x0d80), INTC_VECT(FLCTL_FLTENDI, 0x0da0), INTC_VECT(FLCTL_FLTREQ0I, 0x0dc0), INTC_VECT(FLCTL_FLTREQ1I, 0x0de0), - INTC_VECT(SDHI0_SDHI0I0, 0x0e00), INTC_VECT(SDHI0_SDHI0I1, 0x0e20), - INTC_VECT(SDHI0_SDHI0I2, 0x0e40), INTC_VECT(SDHI0_SDHI0I3, 0x0e60), - INTC_VECT(SDHI1_SDHI1I0, 0x0e80), INTC_VECT(SDHI1_SDHI1I1, 0x0ea0), - INTC_VECT(SDHI1_SDHI1I2, 0x0ec0), + INTC_VECT(SDHI0, 0x0e00), INTC_VECT(SDHI0, 0x0e20), + INTC_VECT(SDHI0, 0x0e40), INTC_VECT(SDHI0, 0x0e60), + INTC_VECT(SDHI1, 0x0e80), INTC_VECT(SDHI1, 0x0ea0), + INTC_VECT(SDHI1, 0x0ec0), INTC_VECT(IRREM, 0x0f60), INTC_VECT(IRDA, 0x0480), INTC_VECT(TPU0, 0x04a0), INTC_VECT(TTI20, 0x1100), INTC_VECT(DDM, 0x1140), - INTC_VECT(SDHI2_SDHI2I0, 0x1200), INTC_VECT(SDHI2_SDHI2I1, 0x1220), - INTC_VECT(SDHI2_SDHI2I2, 0x1240), INTC_VECT(SDHI2_SDHI2I3, 0x1260), + INTC_VECT(SDHI2, 0x1200), INTC_VECT(SDHI2, 0x1220), + INTC_VECT(SDHI2, 0x1240), INTC_VECT(SDHI2, 0x1260), INTC_VECT(RWDT0, 0x1280), INTC_VECT(DMAC1_1_DEI0, 0x2000), INTC_VECT(DMAC1_1_DEI1, 0x2020), INTC_VECT(DMAC1_1_DEI2, 0x2040), INTC_VECT(DMAC1_1_DEI3, 0x2060), @@ -193,12 +195,6 @@ static struct intc_group intca_groups[] __initdata = { INTC_GROUP(FLCTL, FLCTL_FLSTEI, FLCTL_FLTENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I), INTC_GROUP(IIC1, IIC1_ALI1, IIC1_TACKI1, IIC1_WAITI1, IIC1_DTEI1), - INTC_GROUP(SDHI0, SDHI0_SDHI0I0, SDHI0_SDHI0I1, - SDHI0_SDHI0I2, SDHI0_SDHI0I3), - INTC_GROUP(SDHI1, SDHI1_SDHI1I0, SDHI1_SDHI1I1, - SDHI1_SDHI1I2), - INTC_GROUP(SDHI2, SDHI2_SDHI2I0, SDHI2_SDHI2I1, - SDHI2_SDHI2I2, SDHI2_SDHI2I3), INTC_GROUP(SHWYSTAT, SHWYSTAT_RT, SHWYSTAT_HS, SHWYSTAT_COM), }; @@ -234,10 +230,10 @@ static struct intc_mask_reg intca_mask_registers[] __initdata = { { SCIFB, SCIFA5, SCIFA4, MSIOF1, 0, 0, MSIOF2, 0 } }, { 0xe694009c, 0xe69400dc, 8, /* IMR7A / IMCR7A */ - { SDHI0_SDHI0I3, SDHI0_SDHI0I2, SDHI0_SDHI0I1, SDHI0_SDHI0I0, + { DISABLED, DISABLED, ENABLED, ENABLED, FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLTENDI, FLCTL_FLSTEI } }, { 0xe69400a0, 0xe69400e0, 8, /* IMR8A / IMCR8A */ - { 0, SDHI1_SDHI1I2, SDHI1_SDHI1I1, SDHI1_SDHI1I0, + { 0, DISABLED, ENABLED, ENABLED, TTI20, USBHSDMAC0_USHDMI, 0, 0 } }, { 0xe69400a4, 0xe69400e4, 8, /* IMR9A / IMCR9A */ { CMT1_CMT13, CMT1_CMT12, CMT1_CMT11, CMT1_CMT10, @@ -252,7 +248,7 @@ static struct intc_mask_reg intca_mask_registers[] __initdata = { { 0, 0, TPU0, 0, 0, 0, 0, 0 } }, { 0xe69400b4, 0xe69400f4, 8, /* IMR13A / IMCR13A */ - { SDHI2_SDHI2I3, SDHI2_SDHI2I2, SDHI2_SDHI2I1, SDHI2_SDHI2I0, + { DISABLED, DISABLED, ENABLED, ENABLED, 0, CMT3, 0, RWDT0 } }, { 0xe6950080, 0xe69500c0, 8, /* IMR0A3 / IMCR0A3 */ { SHWYSTAT_RT, SHWYSTAT_HS, SHWYSTAT_COM, 0, @@ -358,10 +354,14 @@ static struct intc_mask_reg intca_ack_registers[] __initdata = { { IRQ24A, IRQ25A, IRQ26A, IRQ27A, IRQ28A, IRQ29A, IRQ30A, IRQ31A } }, }; -static DECLARE_INTC_DESC_ACK(intca_desc, "sh7372-intca", - intca_vectors, intca_groups, - intca_mask_registers, intca_prio_registers, - intca_sense_registers, intca_ack_registers); +static struct intc_desc intca_desc __initdata = { + .name = "sh7372-intca", + .force_enable = ENABLED, + .force_disable = DISABLED, + .hw = INTC_HW_DESC(intca_vectors, intca_groups, + intca_mask_registers, intca_prio_registers, + intca_sense_registers, intca_ack_registers), +}; void __init sh7372_init_irq(void) { -- cgit v1.2.3 From 3a14d0397732b6aaa541348b5a8e8f639ecd02b7 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Wed, 10 Mar 2010 09:26:44 +0000 Subject: ARM: mach-shmobile: ap4evb SDHI0 platform data V2 Add SDHI0 platform data for the AP4EVB board V2. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/arm/mach-shmobile/board-ap4evb.c | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) (limited to 'arch') diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c index a0463d92644..1c2ec96ce26 100644 --- a/arch/arm/mach-shmobile/board-ap4evb.c +++ b/arch/arm/mach-shmobile/board-ap4evb.c @@ -206,10 +206,32 @@ static struct platform_device keysc_device = { }, }; +/* SDHI0 */ +static struct resource sdhi0_resources[] = { + [0] = { + .name = "SDHI0", + .start = 0xe6850000, + .end = 0xe68501ff, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = 96, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device sdhi0_device = { + .name = "sh_mobile_sdhi", + .num_resources = ARRAY_SIZE(sdhi0_resources), + .resource = sdhi0_resources, + .id = 0, +}; + static struct platform_device *ap4evb_devices[] __initdata = { &nor_flash_device, &smc911x_device, &keysc_device, + &sdhi0_device, }; static struct map_desc ap4evb_io_desc[] __initdata = { @@ -286,6 +308,16 @@ static void __init ap4evb_init(void) gpio_request(GPIO_FN_KEYIN3_133, NULL); gpio_request(GPIO_FN_KEYIN4, NULL); + /* SDHI0 */ + gpio_request(GPIO_FN_SDHICD0, NULL); + gpio_request(GPIO_FN_SDHIWP0, NULL); + gpio_request(GPIO_FN_SDHICMD0, NULL); + gpio_request(GPIO_FN_SDHICLK0, NULL); + gpio_request(GPIO_FN_SDHID0_3, NULL); + gpio_request(GPIO_FN_SDHID0_2, NULL); + gpio_request(GPIO_FN_SDHID0_1, NULL); + gpio_request(GPIO_FN_SDHID0_0, NULL); + sh7372_add_standard_devices(); platform_add_devices(ap4evb_devices, ARRAY_SIZE(ap4evb_devices)); -- cgit v1.2.3