aboutsummaryrefslogtreecommitdiff
path: root/arch/i386
diff options
context:
space:
mode:
Diffstat (limited to 'arch/i386')
-rw-r--r--arch/i386/Kconfig.cpu3
-rw-r--r--arch/i386/Makefile8
-rw-r--r--arch/i386/defconfig30
-rw-r--r--arch/i386/kernel/acpi/boot.c20
-rw-r--r--arch/i386/kernel/acpi/cstate.c122
-rw-r--r--arch/i386/kernel/acpi/earlyquirk.c8
-rw-r--r--arch/i386/kernel/alternative.c4
-rw-r--r--arch/i386/kernel/apm.c39
-rw-r--r--arch/i386/kernel/cpu/mcheck/therm_throt.c21
-rw-r--r--arch/i386/kernel/head.S2
-rw-r--r--arch/i386/kernel/i8253.c2
-rw-r--r--arch/i386/kernel/i8259.c7
-rw-r--r--arch/i386/kernel/io_apic.c115
-rw-r--r--arch/i386/kernel/irq.c2
-rw-r--r--arch/i386/kernel/kprobes.c22
-rw-r--r--arch/i386/kernel/microcode.c10
-rw-r--r--arch/i386/kernel/nmi.c10
-rw-r--r--arch/i386/kernel/process.c32
-rw-r--r--arch/i386/kernel/setup.c15
-rw-r--r--arch/i386/kernel/syscall_table.S1
-rw-r--r--arch/i386/kernel/traps.c10
-rw-r--r--arch/i386/kernel/tsc.c6
-rw-r--r--arch/i386/kernel/vmlinux.lds.S9
-rw-r--r--arch/i386/lib/usercopy.c5
-rw-r--r--arch/i386/mach-visws/visws_apic.c7
-rw-r--r--arch/i386/mach-voyager/voyager_basic.c6
-rw-r--r--arch/i386/mach-voyager/voyager_smp.c52
-rw-r--r--arch/i386/mm/discontig.c11
-rw-r--r--arch/i386/pci/common.c60
-rw-r--r--arch/i386/pci/fixup.c16
-rw-r--r--arch/i386/pci/i386.c9
-rw-r--r--arch/i386/pci/irq.c4
-rw-r--r--arch/i386/pci/mmconfig.c35
-rw-r--r--arch/i386/pci/pci.h8
34 files changed, 494 insertions, 217 deletions
diff --git a/arch/i386/Kconfig.cpu b/arch/i386/Kconfig.cpu
index 21c9a4e7110..fc4f2abccf0 100644
--- a/arch/i386/Kconfig.cpu
+++ b/arch/i386/Kconfig.cpu
@@ -7,6 +7,7 @@ choice
config M386
bool "386"
+ depends on !UML
---help---
This is the processor type of your CPU. This information is used for
optimizing purposes. In order to compile a kernel that can run on
@@ -301,7 +302,7 @@ config X86_USE_PPRO_CHECKSUM
config X86_USE_3DNOW
bool
- depends on MCYRIXIII || MK7 || MGEODE_LX
+ depends on (MCYRIXIII || MK7 || MGEODE_LX) && !UML
default y
config X86_OOSTORE
diff --git a/arch/i386/Makefile b/arch/i386/Makefile
index 7cc0b189b82..0677908dfa0 100644
--- a/arch/i386/Makefile
+++ b/arch/i386/Makefile
@@ -42,6 +42,10 @@ cflags-$(CONFIG_REGPARM) += -mregparm=3
# temporary until string.h is fixed
cflags-y += -ffreestanding
+# this works around some issues with generating unwind tables in older gccs
+# newer gccs do it by default
+cflags-y += -maccumulate-outgoing-args
+
# Disable unit-at-a-time mode on pre-gcc-4.0 compilers, it makes gcc use
# a lot more stack due to the lack of sharing of stacklots:
CFLAGS += $(shell if [ $(call cc-version) -lt 0400 ] ; then echo $(call cc-option,-fno-unit-at-a-time); fi ;)
@@ -51,8 +55,8 @@ cflags-y += $(call as-instr,.cfi_startproc\n.cfi_endproc,-DCONFIG_AS_CFI=1,)
AFLAGS += $(call as-instr,.cfi_startproc\n.cfi_endproc,-DCONFIG_AS_CFI=1,)
# is .cfi_signal_frame supported too?
-cflags-y += $(call as-instr,.cfi_startproc\n.cfi_endproc,-DCONFIG_AS_CFI=1,)
-AFLAGS += $(call as-instr,.cfi_startproc\n.cfi_endproc,-DCONFIG_AS_CFI=1,)
+cflags-y += $(call as-instr,.cfi_startproc\n.cfi_signal_frame\n.cfi_endproc,-DCONFIG_AS_CFI_SIGNAL_FRAME=1,)
+AFLAGS += $(call as-instr,.cfi_startproc\n.cfi_signal_frame\n.cfi_endproc,-DCONFIG_AS_CFI_SIGNAL_FRAME=1,)
CFLAGS += $(cflags-y)
diff --git a/arch/i386/defconfig b/arch/i386/defconfig
index 60c0c02574f..97aacd6bd7d 100644
--- a/arch/i386/defconfig
+++ b/arch/i386/defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.19-rc1
-# Thu Oct 5 13:04:53 2006
+# Linux kernel version: 2.6.19-rc2-git4
+# Sat Oct 21 03:38:56 2006
#
CONFIG_X86_32=y
CONFIG_GENERIC_TIME=y
@@ -380,8 +380,8 @@ CONFIG_INET6_XFRM_MODE_TRANSPORT=y
CONFIG_INET6_XFRM_MODE_TUNNEL=y
# CONFIG_INET6_XFRM_MODE_BEET is not set
# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
+CONFIG_IPV6_SIT=y
# CONFIG_IPV6_TUNNEL is not set
-# CONFIG_IPV6_SUBTREES is not set
# CONFIG_IPV6_MULTIPLE_TABLES is not set
# CONFIG_NETWORK_SECMARK is not set
# CONFIG_NETFILTER is not set
@@ -483,6 +483,13 @@ CONFIG_BLK_DEV_INITRD=y
# CONFIG_ATA_OVER_ETH is not set
#
+# Misc devices
+#
+# CONFIG_IBM_ASM is not set
+# CONFIG_SGI_IOC4 is not set
+# CONFIG_TIFM_CORE is not set
+
+#
# ATA/ATAPI/MFM/RLL support
#
CONFIG_IDE=y
@@ -1024,6 +1031,7 @@ CONFIG_HANGCHECK_TIMER=y
#
# Dallas's 1-wire bus
#
+# CONFIG_W1 is not set
#
# Hardware Monitoring support
@@ -1032,12 +1040,6 @@ CONFIG_HANGCHECK_TIMER=y
# CONFIG_HWMON_VID is not set
#
-# Misc devices
-#
-# CONFIG_IBM_ASM is not set
-# CONFIG_TIFM_CORE is not set
-
-#
# Multimedia devices
#
# CONFIG_VIDEO_DEV is not set
@@ -1169,7 +1171,6 @@ CONFIG_USB_HIDINPUT=y
# CONFIG_USB_ATI_REMOTE2 is not set
# CONFIG_USB_KEYSPAN_REMOTE is not set
# CONFIG_USB_APPLETOUCH is not set
-# CONFIG_USB_TRANCEVIBRATOR is not set
#
# USB Imaging devices
@@ -1215,6 +1216,7 @@ CONFIG_USB_MON=y
# CONFIG_USB_APPLEDISPLAY is not set
# CONFIG_USB_SISUSBVGA is not set
# CONFIG_USB_LD is not set
+# CONFIG_USB_TRANCEVIBRATOR is not set
# CONFIG_USB_TEST is not set
#
@@ -1284,6 +1286,7 @@ CONFIG_EXT3_FS=y
CONFIG_EXT3_FS_XATTR=y
CONFIG_EXT3_FS_POSIX_ACL=y
# CONFIG_EXT3_FS_SECURITY is not set
+# CONFIG_EXT4DEV_FS is not set
CONFIG_JBD=y
# CONFIG_JBD_DEBUG is not set
CONFIG_FS_MBCACHE=y
@@ -1307,6 +1310,7 @@ CONFIG_DNOTIFY=y
# CONFIG_AUTOFS_FS is not set
CONFIG_AUTOFS4_FS=y
# CONFIG_FUSE_FS is not set
+CONFIG_GENERIC_ACL=y
#
# CD-ROM/DVD Filesystems
@@ -1384,7 +1388,6 @@ CONFIG_SUNRPC=y
# CONFIG_CODA_FS is not set
# CONFIG_AFS_FS is not set
# CONFIG_9P_FS is not set
-CONFIG_GENERIC_ACL=y
#
# Partition Types
@@ -1437,10 +1440,6 @@ CONFIG_NLS_ISO8859_15=y
CONFIG_NLS_UTF8=y
#
-# Distributed Lock Manager
-#
-
-#
# Instrumentation Support
#
CONFIG_PROFILING=y
@@ -1480,6 +1479,7 @@ CONFIG_DEBUG_BUGVERBOSE=y
CONFIG_UNWIND_INFO=y
CONFIG_STACK_UNWIND=y
# CONFIG_FORCED_INLINING is not set
+# CONFIG_HEADERS_CHECK is not set
# CONFIG_RCU_TORTURE_TEST is not set
# CONFIG_LKDTM is not set
CONFIG_EARLY_PRINTK=y
diff --git a/arch/i386/kernel/acpi/boot.c b/arch/i386/kernel/acpi/boot.c
index 92f79cdd9a4..d12fb97a533 100644
--- a/arch/i386/kernel/acpi/boot.c
+++ b/arch/i386/kernel/acpi/boot.c
@@ -70,7 +70,7 @@ static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id) { return
#define PREFIX "ACPI: "
-int acpi_noirq __initdata; /* skip ACPI IRQ initialization */
+int acpi_noirq; /* skip ACPI IRQ initialization */
int acpi_pci_disabled __initdata; /* skip ACPI PCI scan and IRQ initialization */
int acpi_ht __initdata = 1; /* enable HT */
@@ -82,6 +82,7 @@ EXPORT_SYMBOL(acpi_strict);
acpi_interrupt_flags acpi_sci_flags __initdata;
int acpi_sci_override_gsi __initdata;
int acpi_skip_timer_override __initdata;
+int acpi_use_timer_override __initdata;
#ifdef CONFIG_X86_LOCAL_APIC
static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
@@ -332,7 +333,7 @@ acpi_parse_ioapic(acpi_table_entry_header * header, const unsigned long end)
/*
* Parse Interrupt Source Override for the ACPI SCI
*/
-static void acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger)
+static void acpi_sci_ioapic_setup(u32 bus_irq, u32 gsi, u16 polarity, u16 trigger)
{
if (trigger == 0) /* compatible SCI trigger is level */
trigger = 3;
@@ -352,13 +353,13 @@ static void acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger)
* If GSI is < 16, this will update its flags,
* else it will create a new mp_irqs[] entry.
*/
- mp_override_legacy_irq(gsi, polarity, trigger, gsi);
+ mp_override_legacy_irq(bus_irq, polarity, trigger, gsi);
/*
* stash over-ride to indicate we've been here
* and for later update of acpi_fadt
*/
- acpi_sci_override_gsi = gsi;
+ acpi_sci_override_gsi = bus_irq;
return;
}
@@ -376,7 +377,7 @@ acpi_parse_int_src_ovr(acpi_table_entry_header * header,
acpi_table_print_madt_entry(header);
if (intsrc->bus_irq == acpi_fadt.sci_int) {
- acpi_sci_ioapic_setup(intsrc->global_irq,
+ acpi_sci_ioapic_setup(intsrc->bus_irq, intsrc->global_irq,
intsrc->flags.polarity,
intsrc->flags.trigger);
return 0;
@@ -879,7 +880,7 @@ static int __init acpi_parse_madt_ioapic_entries(void)
* pretend we got one so we can set the SCI flags.
*/
if (!acpi_sci_override_gsi)
- acpi_sci_ioapic_setup(acpi_fadt.sci_int, 0, 0);
+ acpi_sci_ioapic_setup(acpi_fadt.sci_int, acpi_fadt.sci_int, 0, 0);
/* Fill in identity legacy mapings where no override */
mp_config_acpi_legacy_irqs();
@@ -1300,6 +1301,13 @@ static int __init parse_acpi_skip_timer_override(char *arg)
return 0;
}
early_param("acpi_skip_timer_override", parse_acpi_skip_timer_override);
+
+static int __init parse_acpi_use_timer_override(char *arg)
+{
+ acpi_use_timer_override = 1;
+ return 0;
+}
+early_param("acpi_use_timer_override", parse_acpi_use_timer_override);
#endif /* CONFIG_X86_IO_APIC */
static int __init setup_acpi_sci(char *s)
diff --git a/arch/i386/kernel/acpi/cstate.c b/arch/i386/kernel/acpi/cstate.c
index 25db49ef177..20563e52c62 100644
--- a/arch/i386/kernel/acpi/cstate.c
+++ b/arch/i386/kernel/acpi/cstate.c
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/acpi.h>
+#include <linux/cpu.h>
#include <acpi/processor.h>
#include <asm/acpi.h>
@@ -41,5 +42,124 @@ void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags,
flags->bm_check = 1;
}
}
-
EXPORT_SYMBOL(acpi_processor_power_init_bm_check);
+
+/* The code below handles cstate entry with monitor-mwait pair on Intel*/
+
+struct cstate_entry_s {
+ struct {
+ unsigned int eax;
+ unsigned int ecx;
+ } states[ACPI_PROCESSOR_MAX_POWER];
+};
+static struct cstate_entry_s *cpu_cstate_entry; /* per CPU ptr */
+
+static short mwait_supported[ACPI_PROCESSOR_MAX_POWER];
+
+#define MWAIT_SUBSTATE_MASK (0xf)
+#define MWAIT_SUBSTATE_SIZE (4)
+
+#define CPUID_MWAIT_LEAF (5)
+#define CPUID5_ECX_EXTENSIONS_SUPPORTED (0x1)
+#define CPUID5_ECX_INTERRUPT_BREAK (0x2)
+
+#define MWAIT_ECX_INTERRUPT_BREAK (0x1)
+
+#define NATIVE_CSTATE_BEYOND_HALT (2)
+
+int acpi_processor_ffh_cstate_probe(unsigned int cpu,
+ struct acpi_processor_cx *cx, struct acpi_power_register *reg)
+{
+ struct cstate_entry_s *percpu_entry;
+ struct cpuinfo_x86 *c = cpu_data + cpu;
+
+ cpumask_t saved_mask;
+ int retval;
+ unsigned int eax, ebx, ecx, edx;
+ unsigned int edx_part;
+ unsigned int cstate_type; /* C-state type and not ACPI C-state type */
+ unsigned int num_cstate_subtype;
+
+ if (!cpu_cstate_entry || c->cpuid_level < CPUID_MWAIT_LEAF )
+ return -1;
+
+ if (reg->bit_offset != NATIVE_CSTATE_BEYOND_HALT)
+ return -1;
+
+ percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu);
+ percpu_entry->states[cx->index].eax = 0;
+ percpu_entry->states[cx->index].ecx = 0;
+
+ /* Make sure we are running on right CPU */
+ saved_mask = current->cpus_allowed;
+ retval = set_cpus_allowed(current, cpumask_of_cpu(cpu));
+ if (retval)
+ return -1;
+
+ cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
+
+ /* Check whether this particular cx_type (in CST) is supported or not */
+ cstate_type = (cx->address >> MWAIT_SUBSTATE_SIZE) + 1;
+ edx_part = edx >> (cstate_type * MWAIT_SUBSTATE_SIZE);
+ num_cstate_subtype = edx_part & MWAIT_SUBSTATE_MASK;
+
+ retval = 0;
+ if (num_cstate_subtype < (cx->address & MWAIT_SUBSTATE_MASK)) {
+ retval = -1;
+ goto out;
+ }
+
+ /* mwait ecx extensions INTERRUPT_BREAK should be supported for C2/C3 */
+ if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
+ !(ecx & CPUID5_ECX_INTERRUPT_BREAK)) {
+ retval = -1;
+ goto out;
+ }
+ percpu_entry->states[cx->index].ecx = MWAIT_ECX_INTERRUPT_BREAK;
+
+ /* Use the hint in CST */
+ percpu_entry->states[cx->index].eax = cx->address;
+
+ if (!mwait_supported[cstate_type]) {
+ mwait_supported[cstate_type] = 1;
+ printk(KERN_DEBUG "Monitor-Mwait will be used to enter C-%d "
+ "state\n", cx->type);
+ }
+
+out:
+ set_cpus_allowed(current, saved_mask);
+ return retval;
+}
+EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe);
+
+void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx)
+{
+ unsigned int cpu = smp_processor_id();
+ struct cstate_entry_s *percpu_entry;
+
+ percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu);
+ mwait_idle_with_hints(percpu_entry->states[cx->index].eax,
+ percpu_entry->states[cx->index].ecx);
+}
+EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_enter);
+
+static int __init ffh_cstate_init(void)
+{
+ struct cpuinfo_x86 *c = &boot_cpu_data;
+ if (c->x86_vendor != X86_VENDOR_INTEL)
+ return -1;
+
+ cpu_cstate_entry = alloc_percpu(struct cstate_entry_s);
+ return 0;
+}
+
+static void __exit ffh_cstate_exit(void)
+{
+ if (cpu_cstate_entry) {
+ free_percpu(cpu_cstate_entry);
+ cpu_cstate_entry = NULL;
+ }
+}
+
+arch_initcall(ffh_cstate_init);
+__exitcall(ffh_cstate_exit);
diff --git a/arch/i386/kernel/acpi/earlyquirk.c b/arch/i386/kernel/acpi/earlyquirk.c
index fe799b11ac0..c9841692bb7 100644
--- a/arch/i386/kernel/acpi/earlyquirk.c
+++ b/arch/i386/kernel/acpi/earlyquirk.c
@@ -27,11 +27,17 @@ static int __init check_bridge(int vendor, int device)
#ifdef CONFIG_ACPI
/* According to Nvidia all timer overrides are bogus unless HPET
is enabled. */
- if (vendor == PCI_VENDOR_ID_NVIDIA) {
+ if (!acpi_use_timer_override && vendor == PCI_VENDOR_ID_NVIDIA) {
nvidia_hpet_detected = 0;
acpi_table_parse(ACPI_HPET, nvidia_hpet_check);
if (nvidia_hpet_detected == 0) {
acpi_skip_timer_override = 1;
+ printk(KERN_INFO "Nvidia board "
+ "detected. Ignoring ACPI "
+ "timer override.\n");
+ printk(KERN_INFO "If you got timer trouble "
+ "try acpi_use_timer_override\n");
+
}
}
#endif
diff --git a/arch/i386/kernel/alternative.c b/arch/i386/kernel/alternative.c
index 28ab8064976..583c238e17f 100644
--- a/arch/i386/kernel/alternative.c
+++ b/arch/i386/kernel/alternative.c
@@ -344,6 +344,7 @@ void alternatives_smp_switch(int smp)
void __init alternative_instructions(void)
{
+ unsigned long flags;
if (no_replacement) {
printk(KERN_INFO "(SMP-)alternatives turned off\n");
free_init_pages("SMP alternatives",
@@ -351,6 +352,8 @@ void __init alternative_instructions(void)
(unsigned long)__smp_alt_end);
return;
}
+
+ local_irq_save(flags);
apply_alternatives(__alt_instructions, __alt_instructions_end);
/* switch to patch-once-at-boottime-only mode and free the
@@ -386,4 +389,5 @@ void __init alternative_instructions(void)
alternatives_smp_switch(0);
}
#endif
+ local_irq_restore(flags);
}
diff --git a/arch/i386/kernel/apm.c b/arch/i386/kernel/apm.c
index b42f2d914af..a60358fe9a4 100644
--- a/arch/i386/kernel/apm.c
+++ b/arch/i386/kernel/apm.c
@@ -198,7 +198,7 @@
* (APM) BIOS Interface Specification, Revision 1.2, February 1996.
*
* [This document is available from Microsoft at:
- * http://www.microsoft.com/hwdev/busbios/amp_12.htm]
+ * http://www.microsoft.com/whdc/archive/amp_12.mspx]
*/
#include <linux/module.h>
@@ -540,11 +540,30 @@ static inline void apm_restore_cpus(cpumask_t mask)
* Also, we KNOW that for the non error case of apm_bios_call, there
* is no useful data returned in the low order 8 bits of eax.
*/
-#define APM_DO_CLI \
- if (apm_info.allow_ints) \
- local_irq_enable(); \
- else \
+
+static inline unsigned long __apm_irq_save(void)
+{
+ unsigned long flags;
+ local_save_flags(flags);
+ if (apm_info.allow_ints) {
+ if (irqs_disabled_flags(flags))
+ local_irq_enable();
+ } else
+ local_irq_disable();
+
+ return flags;
+}
+
+#define apm_irq_save(flags) \
+ do { flags = __apm_irq_save(); } while (0)
+
+static inline void apm_irq_restore(unsigned long flags)
+{
+ if (irqs_disabled_flags(flags))
local_irq_disable();
+ else if (irqs_disabled())
+ local_irq_enable();
+}
#ifdef APM_ZERO_SEGS
# define APM_DECL_SEGS \
@@ -596,12 +615,11 @@ static u8 apm_bios_call(u32 func, u32 ebx_in, u32 ecx_in,
save_desc_40 = gdt[0x40 / 8];
gdt[0x40 / 8] = bad_bios_desc;
- local_save_flags(flags);
- APM_DO_CLI;
+ apm_irq_save(flags);
APM_DO_SAVE_SEGS;
apm_bios_call_asm(func, ebx_in, ecx_in, eax, ebx, ecx, edx, esi);
APM_DO_RESTORE_SEGS;
- local_irq_restore(flags);
+ apm_irq_restore(flags);
gdt[0x40 / 8] = save_desc_40;
put_cpu();
apm_restore_cpus(cpus);
@@ -640,12 +658,11 @@ static u8 apm_bios_call_simple(u32 func, u32 ebx_in, u32 ecx_in, u32 *eax)
save_desc_40 = gdt[0x40 / 8];
gdt[0x40 / 8] = bad_bios_desc;
- local_save_flags(flags);
- APM_DO_CLI;
+ apm_irq_save(flags);
APM_DO_SAVE_SEGS;
error = apm_bios_call_simple_asm(func, ebx_in, ecx_in, eax);
APM_DO_RESTORE_SEGS;
- local_irq_restore(flags);
+ apm_irq_restore(flags);
gdt[0x40 / 8] = save_desc_40;
put_cpu();
apm_restore_cpus(cpus);
diff --git a/arch/i386/kernel/cpu/mcheck/therm_throt.c b/arch/i386/kernel/cpu/mcheck/therm_throt.c
index 4f43047de40..2d8703b7ce6 100644
--- a/arch/i386/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/i386/kernel/cpu/mcheck/therm_throt.c
@@ -110,17 +110,15 @@ int therm_throt_process(int curr)
#ifdef CONFIG_SYSFS
/* Add/Remove thermal_throttle interface for CPU device */
-static __cpuinit int thermal_throttle_add_dev(struct sys_device * sys_dev)
+static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev)
{
- sysfs_create_group(&sys_dev->kobj, &thermal_throttle_attr_group);
- return 0;
+ return sysfs_create_group(&sys_dev->kobj, &thermal_throttle_attr_group);
}
#ifdef CONFIG_HOTPLUG_CPU
-static __cpuinit int thermal_throttle_remove_dev(struct sys_device * sys_dev)
+static __cpuinit void thermal_throttle_remove_dev(struct sys_device *sys_dev)
{
- sysfs_remove_group(&sys_dev->kobj, &thermal_throttle_attr_group);
- return 0;
+ return sysfs_remove_group(&sys_dev->kobj, &thermal_throttle_attr_group);
}
/* Mutex protecting device creation against CPU hotplug */
@@ -133,12 +131,14 @@ static __cpuinit int thermal_throttle_cpu_callback(struct notifier_block *nfb,
{
unsigned int cpu = (unsigned long)hcpu;
struct sys_device *sys_dev;
+ int err;
sys_dev = get_cpu_sysdev(cpu);
mutex_lock(&therm_cpu_lock);
switch (action) {
case CPU_ONLINE:
- thermal_throttle_add_dev(sys_dev);
+ err = thermal_throttle_add_dev(sys_dev);
+ WARN_ON(err);
break;
case CPU_DEAD:
thermal_throttle_remove_dev(sys_dev);
@@ -157,6 +157,7 @@ static struct notifier_block thermal_throttle_cpu_notifier =
static __init int thermal_throttle_init_device(void)
{
unsigned int cpu = 0;
+ int err;
if (!atomic_read(&therm_throt_en))
return 0;
@@ -167,8 +168,10 @@ static __init int thermal_throttle_init_device(void)
mutex_lock(&therm_cpu_lock);
#endif
/* connect live CPUs to sysfs */
- for_each_online_cpu(cpu)
- thermal_throttle_add_dev(get_cpu_sysdev(cpu));
+ for_each_online_cpu(cpu) {
+ err = thermal_throttle_add_dev(get_cpu_sysdev(cpu));
+ WARN_ON(err);
+ }
#ifdef CONFIG_HOTPLUG_CPU
mutex_unlock(&therm_cpu_lock);
#endif
diff --git a/arch/i386/kernel/head.S b/arch/i386/kernel/head.S
index be9d883c62c..ca31f18d277 100644
--- a/arch/i386/kernel/head.S
+++ b/arch/i386/kernel/head.S
@@ -317,7 +317,7 @@ is386: movl $2,%ecx # set MP
movl %eax,%gs
lldt %ax
cld # gcc2 wants the direction flag cleared at all times
- pushl %eax # fake return address
+ pushl $0 # fake return address for unwinder
#ifdef CONFIG_SMP
movb ready, %cl
movb $1, ready
diff --git a/arch/i386/kernel/i8253.c b/arch/i386/kernel/i8253.c
index 477b24daff5..9a0060b92e3 100644
--- a/arch/i386/kernel/i8253.c
+++ b/arch/i386/kernel/i8253.c
@@ -109,7 +109,7 @@ static struct clocksource clocksource_pit = {
static int __init init_pit_clocksource(void)
{
- if (num_possible_cpus() > 4) /* PIT does not scale! */
+ if (num_possible_cpus() > 1) /* PIT does not scale! */
return 0;
clocksource_pit.mult = clocksource_hz2mult(CLOCK_TICK_RATE, 20);
diff --git a/arch/i386/kernel/i8259.c b/arch/i386/kernel/i8259.c
index d53eafb6daa..62996cd1708 100644
--- a/arch/i386/kernel/i8259.c
+++ b/arch/i386/kernel/i8259.c
@@ -113,7 +113,8 @@ void make_8259A_irq(unsigned int irq)
{
disable_irq_nosync(irq);
io_apic_irqs &= ~(1<<irq);
- set_irq_chip_and_handler(irq, &i8259A_chip, handle_level_irq);
+ set_irq_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq,
+ "XT");
enable_irq(irq);
}
@@ -369,8 +370,8 @@ void __init init_ISA_irqs (void)
/*
* 16 old-style INTA-cycle interrupts:
*/
- set_irq_chip_and_handler(i, &i8259A_chip,
- handle_level_irq);
+ set_irq_chip_and_handler_name(i, &i8259A_chip,
+ handle_level_irq, "XT");
} else {
/*
* 'high' PCI IRQs filled in on demand
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c
index cd082c36ca0..3b7a63e0ed1 100644
--- a/arch/i386/kernel/io_apic.c
+++ b/arch/i386/kernel/io_apic.c
@@ -91,6 +91,46 @@ static struct irq_pin_list {
int apic, pin, next;
} irq_2_pin[PIN_MAP_SIZE];
+struct io_apic {
+ unsigned int index;
+ unsigned int unused[3];
+ unsigned int data;
+};
+
+static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx)
+{
+ return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx)
+ + (mp_ioapics[idx].mpc_apicaddr & ~PAGE_MASK);
+}
+
+static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
+{
+ struct io_apic __iomem *io_apic = io_apic_base(apic);
+ writel(reg, &io_apic->index);
+ return readl(&io_apic->data);
+}
+
+static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
+{
+ struct io_apic __iomem *io_apic = io_apic_base(apic);
+ writel(reg, &io_apic->index);
+ writel(value, &io_apic->data);
+}
+
+/*
+ * Re-write a value: to be used for read-modify-write
+ * cycles where the read already set up the index register.
+ *
+ * Older SiS APIC requires we rewrite the index register
+ */
+static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value)
+{
+ volatile struct io_apic *io_apic = io_apic_base(apic);
+ if (sis_apic_bug)
+ writel(reg, &io_apic->index);
+ writel(value, &io_apic->data);
+}
+
union entry_union {
struct { u32 w1, w2; };
struct IO_APIC_route_entry entry;
@@ -107,12 +147,34 @@ static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
return eu.entry;
}
+/*
+ * When we write a new IO APIC routing entry, we need to write the high
+ * word first! If the mask bit in the low word is clear, we will enable
+ * the interrupt, and we need to make sure the entry is fully populated
+ * before that happens.
+ */
static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
{
unsigned long flags;
union entry_union eu;
eu.entry = e;
spin_lock_irqsave(&ioapic_lock, flags);
+ io_apic_write(apic, 0x11 + 2*pin, eu.w2);
+ io_apic_write(apic, 0x10 + 2*pin, eu.w1);
+ spin_unlock_irqrestore(&ioapic_lock, flags);
+}
+
+/*
+ * When we mask an IO APIC routing entry, we need to write the low
+ * word first, in order to set the mask bit before we change the
+ * high bits!
+ */
+static void ioapic_mask_entry(int apic, int pin)
+{
+ unsigned long flags;
+ union entry_union eu = { .entry.mask = 1 };
+
+ spin_lock_irqsave(&ioapic_lock, flags);
io_apic_write(apic, 0x10 + 2*pin, eu.w1);
io_apic_write(apic, 0x11 + 2*pin, eu.w2);
spin_unlock_irqrestore(&ioapic_lock, flags);
@@ -234,9 +296,7 @@ static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
/*
* Disable it in the IO-APIC irq-routing table:
*/
- memset(&entry, 0, sizeof(entry));
- entry.mask = 1;
- ioapic_write_entry(apic, pin, entry);
+ ioapic_mask_entry(apic, pin);
}
static void clear_IO_APIC (void)
@@ -1225,11 +1285,13 @@ static void ioapic_register_intr(int irq, int vector, unsigned long trigger)
{
if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
trigger == IOAPIC_LEVEL)
- set_irq_chip_and_handler(irq, &ioapic_chip,
- handle_fasteoi_irq);
- else
- set_irq_chip_and_handler(irq, &ioapic_chip,
- handle_edge_irq);
+ set_irq_chip_and_handler_name(irq, &ioapic_chip,
+ handle_fasteoi_irq, "fasteoi");
+ else {
+ irq_desc[irq].status |= IRQ_DELAYED_DISABLE;
+ set_irq_chip_and_handler_name(irq, &ioapic_chip,
+ handle_edge_irq, "edge");
+ }
set_intr_gate(vector, interrupt[irq]);
}
@@ -2235,7 +2297,8 @@ static inline void check_timer(void)
printk(KERN_INFO "...trying to set up timer as Virtual Wire IRQ...");
disable_8259A_irq(0);
- set_irq_chip_and_handler(0, &lapic_chip, handle_fasteoi_irq);
+ set_irq_chip_and_handler_name(0, &lapic_chip, handle_fasteoi_irq,
+ "fasteio");
apic_write_around(APIC_LVT0, APIC_DM_FIXED | vector); /* Fixed mode */
enable_8259A_irq(0);
@@ -2541,7 +2604,8 @@ int arch_setup_msi_irq(unsigned int irq, struct pci_dev *dev)
write_msi_msg(irq, &msg);
- set_irq_chip_and_handler(irq, &msi_chip, handle_edge_irq);
+ set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq,
+ "edge");
return 0;
}
@@ -2562,18 +2626,16 @@ void arch_teardown_msi_irq(unsigned int irq)
static void target_ht_irq(unsigned int irq, unsigned int dest)
{
- u32 low, high;
- low = read_ht_irq_low(irq);
- high = read_ht_irq_high(irq);
+ struct ht_irq_msg msg;
+ fetch_ht_irq_msg(irq, &msg);
- low &= ~(HT_IRQ_LOW_DEST_ID_MASK);
- high &= ~(HT_IRQ_HIGH_DEST_ID_MASK);
+ msg.address_lo &= ~(HT_IRQ_LOW_DEST_ID_MASK);
+ msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK);
- low |= HT_IRQ_LOW_DEST_ID(dest);
- high |= HT_IRQ_HIGH_DEST_ID(dest);
+ msg.address_lo |= HT_IRQ_LOW_DEST_ID(dest);
+ msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest);
- write_ht_irq_low(irq, low);
- write_ht_irq_high(irq, high);
+ write_ht_irq_msg(irq, &msg);
}
static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask)
@@ -2594,7 +2656,7 @@ static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask)
}
#endif
-static struct hw_interrupt_type ht_irq_chip = {
+static struct irq_chip ht_irq_chip = {
.name = "PCI-HT",
.mask = mask_ht_irq,
.unmask = unmask_ht_irq,
@@ -2611,7 +2673,7 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
vector = assign_irq_vector(irq);
if (vector >= 0) {
- u32 low, high;
+ struct ht_irq_msg msg;
unsigned dest;
cpumask_t tmp;
@@ -2619,9 +2681,10 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
cpu_set(vector >> 8, tmp);
dest = cpu_mask_to_apicid(tmp);
- high = HT_IRQ_HIGH_DEST_ID(dest);
+ msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
- low = HT_IRQ_LOW_BASE |
+ msg.address_lo =
+ HT_IRQ_LOW_BASE |
HT_IRQ_LOW_DEST_ID(dest) |
HT_IRQ_LOW_VECTOR(vector) |
((INT_DEST_MODE == 0) ?
@@ -2633,10 +2696,10 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
HT_IRQ_LOW_MT_ARBITRATED) |
HT_IRQ_LOW_IRQ_MASKED;
- write_ht_irq_low(irq, low);
- write_ht_irq_high(irq, high);
+ write_ht_irq_msg(irq, &msg);
- set_irq_chip_and_handler(irq, &ht_irq_chip, handle_edge_irq);
+ set_irq_chip_and_handler_name(irq, &ht_irq_chip,
+ handle_edge_irq, "edge");
}
return vector;
}
diff --git a/arch/i386/kernel/irq.c b/arch/i386/kernel/irq.c
index 8cfc7dbec7b..3201d421090 100644
--- a/arch/i386/kernel/irq.c
+++ b/arch/i386/kernel/irq.c
@@ -258,7 +258,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
#endif
seq_printf(p, " %8s", irq_desc[i].chip->name);
- seq_printf(p, "-%s", handle_irq_name(irq_desc[i].handle_irq));
+ seq_printf(p, "-%-8s", irq_desc[i].name);
seq_printf(p, " %s", action->name);
for (action=action->next; action; action = action->next)
diff --git a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c
index d98e44b16fe..fc79e1e859c 100644
--- a/arch/i386/kernel/kprobes.c
+++ b/arch/i386/kernel/kprobes.c
@@ -361,8 +361,11 @@ no_kprobe:
asm volatile ( ".global kretprobe_trampoline\n"
"kretprobe_trampoline: \n"
" pushf\n"
- /* skip cs, eip, orig_eax, es, ds */
- " subl $20, %esp\n"
+ /* skip cs, eip, orig_eax */
+ " subl $12, %esp\n"
+ " pushl %gs\n"
+ " pushl %ds\n"
+ " pushl %es\n"
" pushl %eax\n"
" pushl %ebp\n"
" pushl %edi\n"
@@ -373,10 +376,10 @@ no_kprobe:
" movl %esp, %eax\n"
" call trampoline_handler\n"
/* move eflags to cs */
- " movl 48(%esp), %edx\n"
- " movl %edx, 44(%esp)\n"
+ " movl 52(%esp), %edx\n"
+ " movl %edx, 48(%esp)\n"
/* save true return address on eflags */
- " movl %eax, 48(%esp)\n"
+ " movl %eax, 52(%esp)\n"
" popl %ebx\n"
" popl %ecx\n"
" popl %edx\n"
@@ -384,8 +387,8 @@ no_kprobe:
" popl %edi\n"
" popl %ebp\n"
" popl %eax\n"
- /* skip eip, orig_eax, es, ds */
- " addl $16, %esp\n"
+ /* skip eip, orig_eax, es, ds, gs */
+ " addl $20, %esp\n"
" popf\n"
" ret\n");
}
@@ -404,6 +407,10 @@ fastcall void *__kprobes trampoline_handler(struct pt_regs *regs)
INIT_HLIST_HEAD(&empty_rp);
spin_lock_irqsave(&kretprobe_lock, flags);
head = kretprobe_inst_table_head(current);
+ /* fixup registers */
+ regs->xcs = __KERNEL_CS;
+ regs->eip = trampoline_address;
+ regs->orig_eax = 0xffffffff;
/*
* It is possible to have multiple instances associated with a given
@@ -425,6 +432,7 @@ fastcall void *__kprobes trampoline_handler(struct pt_regs *regs)
if (ri->rp && ri->rp->handler){
__get_cpu_var(current_kprobe) = &ri->rp->kp;
+ get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
ri->rp->handler(ri, regs);
__get_cpu_var(current_kprobe) = NULL;
}
diff --git a/arch/i386/kernel/microcode.c b/arch/i386/kernel/microcode.c
index 9b9479768d5..23f5984d065 100644
--- a/arch/i386/kernel/microcode.c
+++ b/arch/i386/kernel/microcode.c
@@ -577,7 +577,7 @@ static void microcode_init_cpu(int cpu)
set_cpus_allowed(current, cpumask_of_cpu(cpu));
mutex_lock(&microcode_mutex);
collect_cpu_info(cpu);
- if (uci->valid)
+ if (uci->valid && system_state == SYSTEM_RUNNING)
cpu_request_microcode(cpu);
mutex_unlock(&microcode_mutex);
set_cpus_allowed(current, old);
@@ -656,14 +656,18 @@ static struct attribute_group mc_attr_group = {
static int mc_sysdev_add(struct sys_device *sys_dev)
{
- int cpu = sys_dev->id;
+ int err, cpu = sys_dev->id;
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
if (!cpu_online(cpu))
return 0;
+
pr_debug("Microcode:CPU %d added\n", cpu);
memset(uci, 0, sizeof(*uci));
- sysfs_create_group(&sys_dev->kobj, &mc_attr_group);
+
+ err = sysfs_create_group(&sys_dev->kobj, &mc_attr_group);
+ if (err)
+ return err;
microcode_init_cpu(cpu);
return 0;
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c
index 3e8e3adb048..eaafe233a5d 100644
--- a/arch/i386/kernel/nmi.c
+++ b/arch/i386/kernel/nmi.c
@@ -219,11 +219,11 @@ static int __init check_nmi_watchdog(void)
int cpu;
/* Enable NMI watchdog for newer systems.
- Actually it should be safe for most systems before 2004 too except
- for some IBM systems that corrupt registers when NMI happens
- during SMM. Unfortunately we don't have more exact information
- on these and use this coarse check. */
- if (nmi_watchdog == NMI_DEFAULT && dmi_get_year(DMI_BIOS_DATE) >= 2004)
+ Probably safe on most older systems too, but let's be careful.
+ IBM ThinkPads use INT10 inside SMM and that allows early NMI inside SMM
+ which hangs the system. Disable watchdog for all thinkpads */
+ if (nmi_watchdog == NMI_DEFAULT && dmi_get_year(DMI_BIOS_DATE) >= 2004 &&
+ !dmi_name_in_vendors("ThinkPad"))
nmi_watchdog = NMI_LOCAL_APIC;
if ((nmi_watchdog == NMI_NONE) || (nmi_watchdog == NMI_DEFAULT))
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
index b0a07801d9d..dd53c58f64f 100644
--- a/arch/i386/kernel/process.c
+++ b/arch/i386/kernel/process.c
@@ -205,7 +205,7 @@ void cpu_idle(void)
void cpu_idle_wait(void)
{
unsigned int cpu, this_cpu = get_cpu();
- cpumask_t map;
+ cpumask_t map, tmp = current->cpus_allowed;
set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
put_cpu();
@@ -227,6 +227,8 @@ void cpu_idle_wait(void)
}
cpus_and(map, map, cpu_online_map);
} while (!cpus_empty(map));
+
+ set_cpus_allowed(current, tmp);
}
EXPORT_SYMBOL_GPL(cpu_idle_wait);
@@ -236,20 +238,28 @@ EXPORT_SYMBOL_GPL(cpu_idle_wait);
* We execute MONITOR against need_resched and enter optimized wait state
* through MWAIT. Whenever someone changes need_resched, we would be woken
* up from MWAIT (without an IPI).
+ *
+ * New with Core Duo processors, MWAIT can take some hints based on CPU
+ * capability.
*/
-static void mwait_idle(void)
+void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
{
- local_irq_enable();
-
- while (!need_resched()) {
+ if (!need_resched()) {
__monitor((void *)&current_thread_info()->flags, 0, 0);
smp_mb();
- if (need_resched())
- break;
- __mwait(0, 0);
+ if (!need_resched())
+ __mwait(eax, ecx);
}
}
+/* Default MONITOR/MWAIT with no hints, used for default C1 state */
+static void mwait_idle(void)
+{
+ local_irq_enable();
+ while (!need_resched())
+ mwait_idle_with_hints(0, 0);
+}
+
void __devinit select_idle_routine(const struct cpuinfo_x86 *c)
{
if (cpu_has(c, X86_FEATURE_MWAIT)) {
@@ -328,7 +338,6 @@ extern void kernel_thread_helper(void);
int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
{
struct pt_regs regs;
- int err;
memset(&regs, 0, sizeof(regs));
@@ -343,10 +352,7 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
regs.eflags = X86_EFLAGS_IF | X86_EFLAGS_SF | X86_EFLAGS_PF | 0x2;
/* Ok, create the new process.. */
- err = do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
- if (err == 0) /* terminate kernel stack */
- task_pt_regs(current)->eip = 0;
- return err;
+ return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
}
EXPORT_SYMBOL(kernel_thread);
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c
index 000cf03751f..141041dde74 100644
--- a/arch/i386/kernel/setup.c
+++ b/arch/i386/kernel/setup.c
@@ -846,7 +846,7 @@ efi_find_max_pfn(unsigned long start, unsigned long end, void *arg)
static int __init
efi_memory_present_wrapper(unsigned long start, unsigned long end, void *arg)
{
- memory_present(0, start, end);
+ memory_present(0, PFN_UP(start), PFN_DOWN(end));
return 0;
}
@@ -1083,16 +1083,15 @@ static unsigned long __init setup_memory(void)
void __init zone_sizes_init(void)
{
+ unsigned long max_zone_pfns[MAX_NR_ZONES];
+ memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
+ max_zone_pfns[ZONE_DMA] =
+ virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
+ max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
#ifdef CONFIG_HIGHMEM
- unsigned long max_zone_pfns[MAX_NR_ZONES] = {
- virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT,
- max_low_pfn,
- highend_pfn};
+ max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
add_active_range(0, 0, highend_pfn);
#else
- unsigned long max_zone_pfns[MAX_NR_ZONES] = {
- virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT,
- max_low_pfn};
add_active_range(0, 0, max_low_pfn);
#endif
diff --git a/arch/i386/kernel/syscall_table.S b/arch/i386/kernel/syscall_table.S
index 7e639f78b0b..2697e9210e9 100644
--- a/arch/i386/kernel/syscall_table.S
+++ b/arch/i386/kernel/syscall_table.S
@@ -318,3 +318,4 @@ ENTRY(sys_call_table)
.long sys_vmsplice
.long sys_move_pages
.long sys_getcpu
+ .long sys_epoll_pwait
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index 00489b706d2..fe9c5e8e7e6 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -129,15 +129,19 @@ static inline unsigned long print_context_stack(struct thread_info *tinfo,
#ifdef CONFIG_FRAME_POINTER
while (valid_stack_ptr(tinfo, (void *)ebp)) {
+ unsigned long new_ebp;
addr = *(unsigned long *)(ebp + 4);
ops->address(data, addr);
/*
* break out of recursive entries (such as
- * end_of_stack_stop_unwind_function):
+ * end_of_stack_stop_unwind_function). Also,
+ * we can never allow a frame pointer to
+ * move downwards!
*/
- if (ebp == *(unsigned long *)ebp)
+ new_ebp = *(unsigned long *)ebp;
+ if (new_ebp <= ebp)
break;
- ebp = *(unsigned long *)ebp;
+ ebp = new_ebp;
}
#else
while (valid_stack_ptr(tinfo, stack)) {
diff --git a/arch/i386/kernel/tsc.c b/arch/i386/kernel/tsc.c
index b8fa0a8b2e4..fbc95828cd7 100644
--- a/arch/i386/kernel/tsc.c
+++ b/arch/i386/kernel/tsc.c
@@ -349,8 +349,8 @@ static int tsc_update_callback(void)
int change = 0;
/* check to see if we should switch to the safe clocksource: */
- if (clocksource_tsc.rating != 50 && check_tsc_unstable()) {
- clocksource_tsc.rating = 50;
+ if (clocksource_tsc.rating != 0 && check_tsc_unstable()) {
+ clocksource_tsc.rating = 0;
clocksource_reselect();
change = 1;
}
@@ -461,7 +461,7 @@ static int __init init_tsc_clocksource(void)
clocksource_tsc.shift);
/* lower the rating if we already know its unstable: */
if (check_tsc_unstable())
- clocksource_tsc.rating = 50;
+ clocksource_tsc.rating = 0;
init_timer(&verify_tsc_freq_timer);
verify_tsc_freq_timer.function = verify_tsc_freq;
diff --git a/arch/i386/kernel/vmlinux.lds.S b/arch/i386/kernel/vmlinux.lds.S
index 1e7ac1c44dd..c6f84a0322b 100644
--- a/arch/i386/kernel/vmlinux.lds.S
+++ b/arch/i386/kernel/vmlinux.lds.S
@@ -51,6 +51,7 @@ SECTIONS
__tracedata_end = .;
/* writeable */
+ . = ALIGN(4096);
.data : AT(ADDR(.data) - LOAD_OFFSET) { /* Data */
*(.data)
CONSTRUCTORS
@@ -126,13 +127,7 @@ SECTIONS
__setup_end = .;
__initcall_start = .;
.initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) {
- *(.initcall1.init)
- *(.initcall2.init)
- *(.initcall3.init)
- *(.initcall4.init)
- *(.initcall5.init)
- *(.initcall6.init)
- *(.initcall7.init)
+ INITCALLS
}
__initcall_end = .;
__con_initcall_start = .;
diff --git a/arch/i386/lib/usercopy.c b/arch/i386/lib/usercopy.c
index 08502fc6d0c..d22cfc9d656 100644
--- a/arch/i386/lib/usercopy.c
+++ b/arch/i386/lib/usercopy.c
@@ -9,6 +9,7 @@
#include <linux/highmem.h>
#include <linux/blkdev.h>
#include <linux/module.h>
+#include <linux/backing-dev.h>
#include <asm/uaccess.h>
#include <asm/mmx.h>
@@ -179,7 +180,7 @@ __clear_user(void __user *to, unsigned long n)
EXPORT_SYMBOL(__clear_user);
/**
- * strlen_user: - Get the size of a string in user space.
+ * strnlen_user: - Get the size of a string in user space.
* @s: The string to measure.
* @n: The maximum valid length
*
@@ -741,7 +742,7 @@ survive:
if (retval == -ENOMEM && is_init(current)) {
up_read(&current->mm->mmap_sem);
- blk_congestion_wait(WRITE, HZ/50);
+ congestion_wait(WRITE, HZ/50);
goto survive;
}
diff --git a/arch/i386/mach-visws/visws_apic.c b/arch/i386/mach-visws/visws_apic.c
index 07097ed4889..38c2b13124d 100644
--- a/arch/i386/mach-visws/visws_apic.c
+++ b/arch/i386/mach-visws/visws_apic.c
@@ -122,7 +122,7 @@ static void end_cobalt_irq(unsigned int irq)
spin_unlock_irqrestore(&cobalt_lock, flags);
}
-static struct hw_interrupt_type cobalt_irq_type = {
+static struct irq_chip cobalt_irq_type = {
.typename = "Cobalt-APIC",
.startup = startup_cobalt_irq,
.shutdown = disable_cobalt_irq,
@@ -159,7 +159,7 @@ static void end_piix4_master_irq(unsigned int irq)
spin_unlock_irqrestore(&cobalt_lock, flags);
}
-static struct hw_interrupt_type piix4_master_irq_type = {
+static struct irq_chip piix4_master_irq_type = {
.typename = "PIIX4-master",
.startup = startup_piix4_master_irq,
.ack = ack_cobalt_irq,
@@ -167,9 +167,8 @@ static struct hw_interrupt_type piix4_master_irq_type = {
};
-static struct hw_interrupt_type piix4_virtual_irq_type = {
+static struct irq_chip piix4_virtual_irq_type = {
.typename = "PIIX4-virtual",
- .startup = startup_8259A_irq,
.shutdown = disable_8259A_irq,
.enable = enable_8259A_irq,
.disable = disable_8259A_irq,
diff --git a/arch/i386/mach-voyager/voyager_basic.c b/arch/i386/mach-voyager/voyager_basic.c
index c639d30d8bd..8fe7e4593d5 100644
--- a/arch/i386/mach-voyager/voyager_basic.c
+++ b/arch/i386/mach-voyager/voyager_basic.c
@@ -44,7 +44,7 @@ struct voyager_SUS *voyager_SUS = NULL;
#ifdef CONFIG_SMP
static void
-voyager_dump(int dummy1, struct pt_regs *dummy2, struct tty_struct *dummy3)
+voyager_dump(int dummy1, struct tty_struct *dummy3)
{
/* get here via a sysrq */
voyager_smp_dump();
@@ -166,7 +166,7 @@ voyager_memory_detect(int region, __u32 *start, __u32 *length)
* off the timer tick to the SMP code, since the VIC doesn't have an
* internal timer (The QIC does, but that's another story). */
void
-voyager_timer_interrupt(struct pt_regs *regs)
+voyager_timer_interrupt(void)
{
if((jiffies & 0x3ff) == 0) {
@@ -202,7 +202,7 @@ voyager_timer_interrupt(struct pt_regs *regs)
}
}
#ifdef CONFIG_SMP
- smp_vic_timer_interrupt(regs);
+ smp_vic_timer_interrupt();
#endif
}
diff --git a/arch/i386/mach-voyager/voyager_smp.c b/arch/i386/mach-voyager/voyager_smp.c
index d42422fc4af..f3fea2ad50f 100644
--- a/arch/i386/mach-voyager/voyager_smp.c
+++ b/arch/i386/mach-voyager/voyager_smp.c
@@ -85,8 +85,8 @@ static int ack_QIC_CPI(__u8 cpi);
static void ack_special_QIC_CPI(__u8 cpi);
static void ack_VIC_CPI(__u8 cpi);
static void send_CPI_allbutself(__u8 cpi);
-static void enable_vic_irq(unsigned int irq);
-static void disable_vic_irq(unsigned int irq);
+static void mask_vic_irq(unsigned int irq);
+static void unmask_vic_irq(unsigned int irq);
static unsigned int startup_vic_irq(unsigned int irq);
static void enable_local_vic_irq(unsigned int irq);
static void disable_local_vic_irq(unsigned int irq);
@@ -205,15 +205,12 @@ ack_CPI(__u8 cpi)
/* The VIC IRQ descriptors -- these look almost identical to the
* 8259 IRQs except that masks and things must be kept per processor
*/
-static struct hw_interrupt_type vic_irq_type = {
- .typename = "VIC-level",
- .startup = startup_vic_irq,
- .shutdown = disable_vic_irq,
- .enable = enable_vic_irq,
- .disable = disable_vic_irq,
- .ack = before_handle_vic_irq,
- .end = after_handle_vic_irq,
- .set_affinity = set_vic_irq_affinity,
+static struct irq_chip vic_chip = {
+ .name = "VIC",
+ .startup = startup_vic_irq,
+ .mask = mask_vic_irq,
+ .unmask = unmask_vic_irq,
+ .set_affinity = set_vic_irq_affinity,
};
/* used to count up as CPUs are brought on line (starts at 0) */
@@ -1144,9 +1141,9 @@ smp_apic_timer_interrupt(struct pt_regs *regs)
fastcall void
smp_qic_timer_interrupt(struct pt_regs *regs)
{
- ack_QIC_CPI(QIC_TIMER_CPI);
struct pt_regs *old_regs = set_irq_regs(regs);
- wrapper_smp_local_timer_interrupt(void);
+ ack_QIC_CPI(QIC_TIMER_CPI);
+ wrapper_smp_local_timer_interrupt();
set_irq_regs(old_regs);
}
@@ -1270,12 +1267,10 @@ smp_send_stop(void)
/* this function is triggered in time.c when a clock tick fires
* we need to re-broadcast the tick to all CPUs */
void
-smp_vic_timer_interrupt(struct pt_regs *regs)
+smp_vic_timer_interrupt(void)
{
- struct pt_regs *old_regs = set_irq_regs(regs);
send_CPI_allbutself(VIC_TIMER_CPI);
smp_local_timer_interrupt();
- set_irq_regs(old_regs);
}
/* local (per CPU) timer interrupt. It does both profiling and
@@ -1310,7 +1305,7 @@ smp_local_timer_interrupt(void)
per_cpu(prof_counter, cpu);
}
- update_process_times(user_mode_vm(irq_regs));
+ update_process_times(user_mode_vm(get_irq_regs()));
}
if( ((1<<cpu) & voyager_extended_vic_processors) == 0)
@@ -1397,6 +1392,17 @@ setup_profiling_timer(unsigned int multiplier)
return 0;
}
+/* This is a bit of a mess, but forced on us by the genirq changes
+ * there's no genirq handler that really does what voyager wants
+ * so hack it up with the simple IRQ handler */
+static void fastcall
+handle_vic_irq(unsigned int irq, struct irq_desc *desc)
+{
+ before_handle_vic_irq(irq);
+ handle_simple_irq(irq, desc);
+ after_handle_vic_irq(irq);
+}
+
/* The CPIs are handled in the per cpu 8259s, so they must be
* enabled to be received: FIX: enabling the CPIs in the early
@@ -1433,7 +1439,7 @@ smp_intr_init(void)
* This is for later: first 16 correspond to PC IRQs; next 16
* are Primary MC IRQs and final 16 are Secondary MC IRQs */
for(i = 0; i < 48; i++)
- irq_desc[i].chip = &vic_irq_type;
+ set_irq_chip_and_handler(i, &vic_chip, handle_vic_irq);
}
/* send a CPI at level cpi to a set of cpus in cpuset (set 1 bit per
@@ -1531,7 +1537,7 @@ ack_VIC_CPI(__u8 cpi)
static unsigned int
startup_vic_irq(unsigned int irq)
{
- enable_vic_irq(irq);
+ unmask_vic_irq(irq);
return 0;
}
@@ -1558,7 +1564,7 @@ startup_vic_irq(unsigned int irq)
* adjust their masks accordingly. */
static void
-enable_vic_irq(unsigned int irq)
+unmask_vic_irq(unsigned int irq)
{
/* linux doesn't to processor-irq affinity, so enable on
* all CPUs we know about */
@@ -1567,7 +1573,7 @@ enable_vic_irq(unsigned int irq)
__u32 processorList = 0;
unsigned long flags;
- VDEBUG(("VOYAGER: enable_vic_irq(%d) CPU%d affinity 0x%lx\n",
+ VDEBUG(("VOYAGER: unmask_vic_irq(%d) CPU%d affinity 0x%lx\n",
irq, cpu, cpu_irq_affinity[cpu]));
spin_lock_irqsave(&vic_irq_lock, flags);
for_each_online_cpu(real_cpu) {
@@ -1591,7 +1597,7 @@ enable_vic_irq(unsigned int irq)
}
static void
-disable_vic_irq(unsigned int irq)
+mask_vic_irq(unsigned int irq)
{
/* lazy disable, do nothing */
}
@@ -1819,7 +1825,7 @@ set_vic_irq_affinity(unsigned int irq, cpumask_t mask)
* disabled again as it comes in (voyager lazy disable). If
* the affinity map is tightened to disable the interrupt on a
* cpu, it will be pushed off when it comes in */
- enable_vic_irq(irq);
+ unmask_vic_irq(irq);
}
static void
diff --git a/arch/i386/mm/discontig.c b/arch/i386/mm/discontig.c
index 455597db84d..ddbdb0336f2 100644
--- a/arch/i386/mm/discontig.c
+++ b/arch/i386/mm/discontig.c
@@ -356,11 +356,12 @@ void __init numa_kva_reserve(void)
void __init zone_sizes_init(void)
{
int nid;
- unsigned long max_zone_pfns[MAX_NR_ZONES] = {
- virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT,
- max_low_pfn,
- highend_pfn
- };
+ unsigned long max_zone_pfns[MAX_NR_ZONES];
+ memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
+ max_zone_pfns[ZONE_DMA] =
+ virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
+ max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
+ max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
/* If SRAT has not registered memory, register it now */
if (find_max_pfn_with_active_regions() == 0) {
diff --git a/arch/i386/pci/common.c b/arch/i386/pci/common.c
index 68bce194e68..cdfcf971098 100644
--- a/arch/i386/pci/common.c
+++ b/arch/i386/pci/common.c
@@ -20,6 +20,7 @@
unsigned int pci_probe = PCI_PROBE_BIOS | PCI_PROBE_CONF1 | PCI_PROBE_CONF2 |
PCI_PROBE_MMCONF;
+int pci_bf_sort;
int pci_routeirq;
int pcibios_last_bus = -1;
unsigned long pirq_table_addr;
@@ -118,6 +119,20 @@ void __devinit pcibios_fixup_bus(struct pci_bus *b)
}
/*
+ * Only use DMI information to set this if nothing was passed
+ * on the kernel command line (which was parsed earlier).
+ */
+
+static int __devinit set_bf_sort(struct dmi_system_id *d)
+{
+ if (pci_bf_sort == pci_bf_sort_default) {
+ pci_bf_sort = pci_dmi_bf;
+ printk(KERN_INFO "PCI: %s detected, enabling pci=bfsort.\n", d->ident);
+ }
+ return 0;
+}
+
+/*
* Enable renumbering of PCI bus# ranges to reach all PCI busses (Cardbus)
*/
#ifdef __i386__
@@ -130,11 +145,11 @@ static int __devinit assign_all_busses(struct dmi_system_id *d)
}
#endif
+static struct dmi_system_id __devinitdata pciprobe_dmi_table[] = {
+#ifdef __i386__
/*
* Laptops which need pci=assign-busses to see Cardbus cards
*/
-static struct dmi_system_id __devinitdata pciprobe_dmi_table[] = {
-#ifdef __i386__
{
.callback = assign_all_busses,
.ident = "Samsung X20 Laptop",
@@ -144,6 +159,38 @@ static struct dmi_system_id __devinitdata pciprobe_dmi_table[] = {
},
},
#endif /* __i386__ */
+ {
+ .callback = set_bf_sort,
+ .ident = "Dell PowerEdge 1950",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 1950"),
+ },
+ },
+ {
+ .callback = set_bf_sort,
+ .ident = "Dell PowerEdge 1955",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 1955"),
+ },
+ },
+ {
+ .callback = set_bf_sort,
+ .ident = "Dell PowerEdge 2900",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 2900"),
+ },
+ },
+ {
+ .callback = set_bf_sort,
+ .ident = "Dell PowerEdge 2950",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 2950"),
+ },
+ },
{}
};
@@ -189,6 +236,8 @@ static int __init pcibios_init(void)
pcibios_resource_survey();
+ if (pci_bf_sort >= pci_force_bf)
+ pci_sort_breadthfirst();
#ifdef CONFIG_PCI_BIOS
if ((pci_probe & PCI_BIOS_SORT) && !(pci_probe & PCI_NO_SORT))
pcibios_sort();
@@ -203,6 +252,12 @@ char * __devinit pcibios_setup(char *str)
if (!strcmp(str, "off")) {
pci_probe = 0;
return NULL;
+ } else if (!strcmp(str, "bfsort")) {
+ pci_bf_sort = pci_force_bf;
+ return NULL;
+ } else if (!strcmp(str, "nobfsort")) {
+ pci_bf_sort = pci_force_nobf;
+ return NULL;
}
#ifdef CONFIG_PCI_BIOS
else if (!strcmp(str, "bios")) {
@@ -288,7 +343,6 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
void pcibios_disable_device (struct pci_dev *dev)
{
- pcibios_disable_resources(dev);
if (pcibios_disable_irq)
pcibios_disable_irq(dev);
}
diff --git a/arch/i386/pci/fixup.c b/arch/i386/pci/fixup.c
index b60d7e8689e..c1949ff38d6 100644
--- a/arch/i386/pci/fixup.c
+++ b/arch/i386/pci/fixup.c
@@ -348,8 +348,8 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PC1, pcie_r
* From information provided by "Jon Smirl" <jonsmirl@gmail.com>
*
* The standard boot ROM sequence for an x86 machine uses the BIOS
- * to select an initial video card for boot display. This boot video
- * card will have it's BIOS copied to C0000 in system RAM.
+ * to select an initial video card for boot display. This boot video
+ * card will have it's BIOS copied to C0000 in system RAM.
* IORESOURCE_ROM_SHADOW is used to associate the boot video
* card with this copy. On laptops this copy has to be used since
* the main ROM may be compressed or combined with another image.
@@ -371,7 +371,17 @@ static void __devinit pci_fixup_video(struct pci_dev *pdev)
bus = pdev->bus;
while (bus) {
bridge = bus->self;
- if (bridge) {
+
+ /*
+ * From information provided by
+ * "David Miller" <davem@davemloft.net>
+ * The bridge control register is valid for PCI header
+ * type BRIDGE, or CARDBUS. Host to PCI controllers use
+ * PCI header type NORMAL.
+ */
+ if (bridge
+ &&((bridge->hdr_type == PCI_HEADER_TYPE_BRIDGE)
+ ||(bridge->hdr_type == PCI_HEADER_TYPE_CARDBUS))) {
pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
&config);
if (!(config & PCI_BRIDGE_CTL_VGA))
diff --git a/arch/i386/pci/i386.c b/arch/i386/pci/i386.c
index 10154a2cac6..98580292f0d 100644
--- a/arch/i386/pci/i386.c
+++ b/arch/i386/pci/i386.c
@@ -242,15 +242,6 @@ int pcibios_enable_resources(struct pci_dev *dev, int mask)
return 0;
}
-void pcibios_disable_resources(struct pci_dev *dev)
-{
- u16 cmd;
-
- pci_read_config_word(dev, PCI_COMMAND, &cmd);
- cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
- pci_write_config_word(dev, PCI_COMMAND, cmd);
-}
-
/*
* If we set up a device for bus mastering, we need to check the latency
* timer as certain crappy BIOSes forget to set it properly.
diff --git a/arch/i386/pci/irq.c b/arch/i386/pci/irq.c
index dbc4aae9195..69163998ade 100644
--- a/arch/i386/pci/irq.c
+++ b/arch/i386/pci/irq.c
@@ -255,13 +255,13 @@ static int pirq_via_set(struct pci_dev *router, struct pci_dev *dev, int pirq, i
*/
static int pirq_via586_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
{
- static const unsigned int pirqmap[4] = { 3, 2, 5, 1 };
+ static const unsigned int pirqmap[5] = { 3, 2, 5, 1, 1 };
return read_config_nybble(router, 0x55, pirqmap[pirq-1]);
}
static int pirq_via586_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
{
- static const unsigned int pirqmap[4] = { 3, 2, 5, 1 };
+ static const unsigned int pirqmap[5] = { 3, 2, 5, 1, 1 };
write_config_nybble(router, 0x55, pirqmap[pirq-1], irq);
return 1;
}
diff --git a/arch/i386/pci/mmconfig.c b/arch/i386/pci/mmconfig.c
index d0c3da3aa2a..c6b6d9bbc45 100644
--- a/arch/i386/pci/mmconfig.c
+++ b/arch/i386/pci/mmconfig.c
@@ -154,38 +154,6 @@ static struct pci_raw_ops pci_mmcfg = {
.write = pci_mmcfg_write,
};
-
-static __init void pci_mmcfg_insert_resources(void)
-{
-#define PCI_MMCFG_RESOURCE_NAME_LEN 19
- int i;
- struct resource *res;
- char *names;
- unsigned num_buses;
-
- res = kcalloc(PCI_MMCFG_RESOURCE_NAME_LEN + sizeof(*res),
- pci_mmcfg_config_num, GFP_KERNEL);
-
- if (!res) {
- printk(KERN_ERR "PCI: Unable to allocate MMCONFIG resources\n");
- return;
- }
-
- names = (void *)&res[pci_mmcfg_config_num];
- for (i = 0; i < pci_mmcfg_config_num; i++, res++) {
- num_buses = pci_mmcfg_config[i].end_bus_number -
- pci_mmcfg_config[i].start_bus_number + 1;
- res->name = names;
- snprintf(names, PCI_MMCFG_RESOURCE_NAME_LEN, "PCI MMCONFIG %u",
- pci_mmcfg_config[i].pci_segment_group_number);
- res->start = pci_mmcfg_config[i].base_address;
- res->end = res->start + (num_buses << 20) - 1;
- res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
- insert_resource(&iomem_resource, res);
- names += PCI_MMCFG_RESOURCE_NAME_LEN;
- }
-}
-
/* K8 systems have some devices (typically in the builtin northbridge)
that are only accessible using type1
Normally this can be expressed in the MCFG by not listing them
@@ -222,8 +190,6 @@ static __init void unreachable_devices(void)
}
}
-
-
void __init pci_mmcfg_init(int type)
{
if ((pci_probe & PCI_PROBE_MMCONF) == 0)
@@ -251,5 +217,4 @@ void __init pci_mmcfg_init(int type)
pci_probe = (pci_probe & ~PCI_PROBE_MASK) | PCI_PROBE_MMCONF;
unreachable_devices();
- pci_mmcfg_insert_resources();
}
diff --git a/arch/i386/pci/pci.h b/arch/i386/pci/pci.h
index 1814f74569c..a0a25180b61 100644
--- a/arch/i386/pci/pci.h
+++ b/arch/i386/pci/pci.h
@@ -30,13 +30,19 @@
extern unsigned int pci_probe;
extern unsigned long pirq_table_addr;
+enum pci_bf_sort_state {
+ pci_bf_sort_default,
+ pci_force_nobf,
+ pci_force_bf,
+ pci_dmi_bf,
+};
+
/* pci-i386.c */
extern unsigned int pcibios_max_latency;
void pcibios_resource_survey(void);
int pcibios_enable_resources(struct pci_dev *, int);
-void pcibios_disable_resources(struct pci_dev *);
/* pci-pc.c */