aboutsummaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2010-03-15 14:27:06 +0000
committerRussell King <rmk+kernel@arm.linux.org.uk>2010-03-15 14:27:06 +0000
commit2d3b5fa3a39d16c880bda3cf2bd9dd6ed5a01f74 (patch)
treee20283fe2ed46aa35c8ca5fc1724ba067cd2e2f8 /arch
parent3f17522ce461a31e7ced6311b28fcf5b8a763316 (diff)
parent7278a22143b003e9af7b9ca1b5f1c40ae4b55d98 (diff)
Merge master.kernel.org:/pub/scm/linux/kernel/git/lethal/genesis-2.6
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/kernel/perf_event.c4
-rw-r--r--arch/arm/mach-shmobile/board-ap4evb.c32
-rw-r--r--arch/arm/mach-shmobile/board-g3evm.c122
-rw-r--r--arch/arm/mach-shmobile/board-g4evm.c57
-rw-r--r--arch/arm/mach-shmobile/clock-sh7367.c7
-rw-r--r--arch/arm/mach-shmobile/intc-sh7367.c46
-rw-r--r--arch/arm/mach-shmobile/intc-sh7372.c46
-rw-r--r--arch/arm/mach-shmobile/intc-sh7377.c36
-rw-r--r--arch/powerpc/kernel/perf_event.c8
-rw-r--r--arch/sparc/kernel/perf_event.c2
-rw-r--r--arch/x86/Kconfig4
-rw-r--r--arch/x86/include/asm/hw_breakpoint.h1
-rw-r--r--arch/x86/include/asm/perf_event.h16
-rw-r--r--arch/x86/kernel/aperture_64.c1
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c4
-rw-r--r--arch/x86/kernel/cpu/intel.c3
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c15
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_intel.c4
-rw-r--r--arch/x86/kernel/cpu/perf_event.c39
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c37
-rw-r--r--arch/x86/kernel/cpu/perf_event_p6.c8
-rw-r--r--arch/x86/kernel/cpu/perfctr-watchdog.c2
-rw-r--r--arch/x86/kernel/dumpstack_64.c10
-rw-r--r--arch/x86/kernel/hw_breakpoint.c12
-rw-r--r--arch/x86/kernel/k8.c14
-rw-r--r--arch/x86/kernel/pci-gart_64.c2
-rw-r--r--arch/x86/kernel/process.c2
-rw-r--r--arch/x86/mm/pageattr.c25
-rw-r--r--arch/x86/oprofile/op_model_amd.c23
-rw-r--r--arch/x86/oprofile/op_model_ppro.c6
30 files changed, 431 insertions, 157 deletions
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index c45a155a73d..9e70f2053f9 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -966,7 +966,7 @@ armv6pmu_handle_irq(int irq_num,
*/
armv6_pmcr_write(pmcr);
- data.addr = 0;
+ perf_sample_data_init(&data, 0);
cpuc = &__get_cpu_var(cpu_hw_events);
for (idx = 0; idx <= armpmu->num_events; ++idx) {
@@ -1946,7 +1946,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
*/
regs = get_irq_regs();
- data.addr = 0;
+ perf_sample_data_init(&data, 0);
cpuc = &__get_cpu_var(cpu_hw_events);
for (idx = 0; idx <= armpmu->num_events; ++idx) {
diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c
index a0463d92644..1c2ec96ce26 100644
--- a/arch/arm/mach-shmobile/board-ap4evb.c
+++ b/arch/arm/mach-shmobile/board-ap4evb.c
@@ -206,10 +206,32 @@ static struct platform_device keysc_device = {
},
};
+/* SDHI0 */
+static struct resource sdhi0_resources[] = {
+ [0] = {
+ .name = "SDHI0",
+ .start = 0xe6850000,
+ .end = 0xe68501ff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 96,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device sdhi0_device = {
+ .name = "sh_mobile_sdhi",
+ .num_resources = ARRAY_SIZE(sdhi0_resources),
+ .resource = sdhi0_resources,
+ .id = 0,
+};
+
static struct platform_device *ap4evb_devices[] __initdata = {
&nor_flash_device,
&smc911x_device,
&keysc_device,
+ &sdhi0_device,
};
static struct map_desc ap4evb_io_desc[] __initdata = {
@@ -286,6 +308,16 @@ static void __init ap4evb_init(void)
gpio_request(GPIO_FN_KEYIN3_133, NULL);
gpio_request(GPIO_FN_KEYIN4, NULL);
+ /* SDHI0 */
+ gpio_request(GPIO_FN_SDHICD0, NULL);
+ gpio_request(GPIO_FN_SDHIWP0, NULL);
+ gpio_request(GPIO_FN_SDHICMD0, NULL);
+ gpio_request(GPIO_FN_SDHICLK0, NULL);
+ gpio_request(GPIO_FN_SDHID0_3, NULL);
+ gpio_request(GPIO_FN_SDHID0_2, NULL);
+ gpio_request(GPIO_FN_SDHID0_1, NULL);
+ gpio_request(GPIO_FN_SDHID0_0, NULL);
+
sh7372_add_standard_devices();
platform_add_devices(ap4evb_devices, ARRAY_SIZE(ap4evb_devices));
diff --git a/arch/arm/mach-shmobile/board-g3evm.c b/arch/arm/mach-shmobile/board-g3evm.c
index f36c9a94d32..9247503296c 100644
--- a/arch/arm/mach-shmobile/board-g3evm.c
+++ b/arch/arm/mach-shmobile/board-g3evm.c
@@ -26,9 +26,12 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h>
+#include <linux/mtd/sh_flctl.h>
#include <linux/usb/r8a66597.h>
#include <linux/io.h>
#include <linux/gpio.h>
+#include <linux/input.h>
+#include <linux/input/sh_keysc.h>
#include <mach/sh7367.h>
#include <mach/common.h>
#include <asm/mach-types.h>
@@ -127,9 +130,90 @@ static struct platform_device usb_host_device = {
.resource = usb_host_resources,
};
+/* KEYSC */
+static struct sh_keysc_info keysc_info = {
+ .mode = SH_KEYSC_MODE_5,
+ .scan_timing = 3,
+ .delay = 100,
+ .keycodes = {
+ KEY_A, KEY_B, KEY_C, KEY_D, KEY_E, KEY_F, KEY_G,
+ KEY_H, KEY_I, KEY_J, KEY_K, KEY_L, KEY_M, KEY_N,
+ KEY_O, KEY_P, KEY_Q, KEY_R, KEY_S, KEY_T, KEY_U,
+ KEY_V, KEY_W, KEY_X, KEY_Y, KEY_Z, KEY_HOME, KEY_SLEEP,
+ KEY_WAKEUP, KEY_COFFEE, KEY_0, KEY_1, KEY_2, KEY_3, KEY_4,
+ KEY_5, KEY_6, KEY_7, KEY_8, KEY_9, KEY_STOP, KEY_COMPUTER,
+ },
+};
+
+static struct resource keysc_resources[] = {
+ [0] = {
+ .name = "KEYSC",
+ .start = 0xe61b0000,
+ .end = 0xe61b000f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 79,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device keysc_device = {
+ .name = "sh_keysc",
+ .num_resources = ARRAY_SIZE(keysc_resources),
+ .resource = keysc_resources,
+ .dev = {
+ .platform_data = &keysc_info,
+ },
+};
+
+static struct mtd_partition nand_partition_info[] = {
+ {
+ .name = "system",
+ .offset = 0,
+ .size = 64 * 1024 * 1024,
+ },
+ {
+ .name = "userdata",
+ .offset = MTDPART_OFS_APPEND,
+ .size = 128 * 1024 * 1024,
+ },
+ {
+ .name = "cache",
+ .offset = MTDPART_OFS_APPEND,
+ .size = 64 * 1024 * 1024,
+ },
+};
+
+static struct resource nand_flash_resources[] = {
+ [0] = {
+ .start = 0xe6a30000,
+ .end = 0xe6a3009b,
+ .flags = IORESOURCE_MEM,
+ }
+};
+
+static struct sh_flctl_platform_data nand_flash_data = {
+ .parts = nand_partition_info,
+ .nr_parts = ARRAY_SIZE(nand_partition_info),
+ .flcmncr_val = QTSEL_E | FCKSEL_E | TYPESEL_SET | NANWF_E
+ | SHBUSSEL | SEL_16BIT,
+};
+
+static struct platform_device nand_flash_device = {
+ .name = "sh_flctl",
+ .resource = nand_flash_resources,
+ .num_resources = ARRAY_SIZE(nand_flash_resources),
+ .dev = {
+ .platform_data = &nand_flash_data,
+ },
+};
+
static struct platform_device *g3evm_devices[] __initdata = {
&nor_flash_device,
&usb_host_device,
+ &keysc_device,
+ &nand_flash_device,
};
static struct map_desc g3evm_io_desc[] __initdata = {
@@ -196,6 +280,44 @@ static void __init g3evm_init(void)
__raw_writew(0x6010, 0xe60581c6); /* CGPOSR */
__raw_writew(0x8a0a, 0xe605810c); /* USBCR2 */
+ /* KEYSC @ CN7 */
+ gpio_request(GPIO_FN_PORT42_KEYOUT0, NULL);
+ gpio_request(GPIO_FN_PORT43_KEYOUT1, NULL);
+ gpio_request(GPIO_FN_PORT44_KEYOUT2, NULL);
+ gpio_request(GPIO_FN_PORT45_KEYOUT3, NULL);
+ gpio_request(GPIO_FN_PORT46_KEYOUT4, NULL);
+ gpio_request(GPIO_FN_PORT47_KEYOUT5, NULL);
+ gpio_request(GPIO_FN_PORT48_KEYIN0_PU, NULL);
+ gpio_request(GPIO_FN_PORT49_KEYIN1_PU, NULL);
+ gpio_request(GPIO_FN_PORT50_KEYIN2_PU, NULL);
+ gpio_request(GPIO_FN_PORT55_KEYIN3_PU, NULL);
+ gpio_request(GPIO_FN_PORT56_KEYIN4_PU, NULL);
+ gpio_request(GPIO_FN_PORT57_KEYIN5_PU, NULL);
+ gpio_request(GPIO_FN_PORT58_KEYIN6_PU, NULL);
+
+ /* FLCTL */
+ gpio_request(GPIO_FN_FCE0, NULL);
+ gpio_request(GPIO_FN_D0_ED0_NAF0, NULL);
+ gpio_request(GPIO_FN_D1_ED1_NAF1, NULL);
+ gpio_request(GPIO_FN_D2_ED2_NAF2, NULL);
+ gpio_request(GPIO_FN_D3_ED3_NAF3, NULL);
+ gpio_request(GPIO_FN_D4_ED4_NAF4, NULL);
+ gpio_request(GPIO_FN_D5_ED5_NAF5, NULL);
+ gpio_request(GPIO_FN_D6_ED6_NAF6, NULL);
+ gpio_request(GPIO_FN_D7_ED7_NAF7, NULL);
+ gpio_request(GPIO_FN_D8_ED8_NAF8, NULL);
+ gpio_request(GPIO_FN_D9_ED9_NAF9, NULL);
+ gpio_request(GPIO_FN_D10_ED10_NAF10, NULL);
+ gpio_request(GPIO_FN_D11_ED11_NAF11, NULL);
+ gpio_request(GPIO_FN_D12_ED12_NAF12, NULL);
+ gpio_request(GPIO_FN_D13_ED13_NAF13, NULL);
+ gpio_request(GPIO_FN_D14_ED14_NAF14, NULL);
+ gpio_request(GPIO_FN_D15_ED15_NAF15, NULL);
+ gpio_request(GPIO_FN_WE0_XWR0_FWE, NULL);
+ gpio_request(GPIO_FN_FRB, NULL);
+ /* FOE, FCDE, FSC on dedicated pins */
+ __raw_writel(__raw_readl(0xe6158048) & ~(1 << 15), 0xe6158048);
+
sh7367_add_standard_devices();
platform_add_devices(g3evm_devices, ARRAY_SIZE(g3evm_devices));
diff --git a/arch/arm/mach-shmobile/board-g4evm.c b/arch/arm/mach-shmobile/board-g4evm.c
index 5acd623f93e..10673a90be5 100644
--- a/arch/arm/mach-shmobile/board-g4evm.c
+++ b/arch/arm/mach-shmobile/board-g4evm.c
@@ -28,6 +28,8 @@
#include <linux/mtd/physmap.h>
#include <linux/usb/r8a66597.h>
#include <linux/io.h>
+#include <linux/input.h>
+#include <linux/input/sh_keysc.h>
#include <linux/gpio.h>
#include <mach/sh7377.h>
#include <mach/common.h>
@@ -128,9 +130,49 @@ static struct platform_device usb_host_device = {
.resource = usb_host_resources,
};
+/* KEYSC */
+static struct sh_keysc_info keysc_info = {
+ .mode = SH_KEYSC_MODE_5,
+ .scan_timing = 3,
+ .delay = 100,
+ .keycodes = {
+ KEY_A, KEY_B, KEY_C, KEY_D, KEY_E, KEY_F,
+ KEY_G, KEY_H, KEY_I, KEY_J, KEY_K, KEY_L,
+ KEY_M, KEY_N, KEY_U, KEY_P, KEY_Q, KEY_R,
+ KEY_S, KEY_T, KEY_U, KEY_V, KEY_W, KEY_X,
+ KEY_Y, KEY_Z, KEY_HOME, KEY_SLEEP, KEY_WAKEUP, KEY_COFFEE,
+ KEY_0, KEY_1, KEY_2, KEY_3, KEY_4, KEY_5,
+ KEY_6, KEY_7, KEY_8, KEY_9, KEY_STOP, KEY_COMPUTER,
+ },
+};
+
+static struct resource keysc_resources[] = {
+ [0] = {
+ .name = "KEYSC",
+ .start = 0xe61b0000,
+ .end = 0xe61b000f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 79,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device keysc_device = {
+ .name = "sh_keysc",
+ .id = 0, /* keysc0 clock */
+ .num_resources = ARRAY_SIZE(keysc_resources),
+ .resource = keysc_resources,
+ .dev = {
+ .platform_data = &keysc_info,
+ },
+};
+
static struct platform_device *g4evm_devices[] __initdata = {
&nor_flash_device,
&usb_host_device,
+ &keysc_device,
};
static struct map_desc g4evm_io_desc[] __initdata = {
@@ -196,6 +238,21 @@ static void __init g4evm_init(void)
__raw_writew(0x6010, 0xe60581c6); /* CGPOSR */
__raw_writew(0x8a0a, 0xe605810c); /* USBCR2 */
+ /* KEYSC @ CN31 */
+ gpio_request(GPIO_FN_PORT60_KEYOUT5, NULL);
+ gpio_request(GPIO_FN_PORT61_KEYOUT4, NULL);
+ gpio_request(GPIO_FN_PORT62_KEYOUT3, NULL);
+ gpio_request(GPIO_FN_PORT63_KEYOUT2, NULL);
+ gpio_request(GPIO_FN_PORT64_KEYOUT1, NULL);
+ gpio_request(GPIO_FN_PORT65_KEYOUT0, NULL);
+ gpio_request(GPIO_FN_PORT66_KEYIN0_PU, NULL);
+ gpio_request(GPIO_FN_PORT67_KEYIN1_PU, NULL);
+ gpio_request(GPIO_FN_PORT68_KEYIN2_PU, NULL);
+ gpio_request(GPIO_FN_PORT69_KEYIN3_PU, NULL);
+ gpio_request(GPIO_FN_PORT70_KEYIN4_PU, NULL);
+ gpio_request(GPIO_FN_PORT71_KEYIN5_PU, NULL);
+ gpio_request(GPIO_FN_PORT72_KEYIN6_PU, NULL);
+
sh7377_add_standard_devices();
platform_add_devices(g4evm_devices, ARRAY_SIZE(g4evm_devices));
diff --git a/arch/arm/mach-shmobile/clock-sh7367.c b/arch/arm/mach-shmobile/clock-sh7367.c
index 58bd54e1113..bb940c6e4e6 100644
--- a/arch/arm/mach-shmobile/clock-sh7367.c
+++ b/arch/arm/mach-shmobile/clock-sh7367.c
@@ -75,6 +75,11 @@ static struct clk usb0_clk = {
.name = "usb0",
};
+/* a static keysc0 clk for now - enough to get sh_keysc working */
+static struct clk keysc0_clk = {
+ .name = "keysc0",
+};
+
static struct clk_lookup lookups[] = {
{
.clk = &peripheral_clk,
@@ -82,6 +87,8 @@ static struct clk_lookup lookups[] = {
.clk = &r_clk,
}, {
.clk = &usb0_clk,
+ }, {
+ .clk = &keysc0_clk,
}
};
diff --git a/arch/arm/mach-shmobile/intc-sh7367.c b/arch/arm/mach-shmobile/intc-sh7367.c
index 6a547b47aab..5ff70cadfc3 100644
--- a/arch/arm/mach-shmobile/intc-sh7367.c
+++ b/arch/arm/mach-shmobile/intc-sh7367.c
@@ -27,6 +27,8 @@
enum {
UNUSED_INTCA = 0,
+ ENABLED,
+ DISABLED,
/* interrupt sources INTCA */
IRQ0A, IRQ1A, IRQ2A, IRQ3A, IRQ4A, IRQ5A, IRQ6A, IRQ7A,
@@ -46,8 +48,8 @@ enum {
MSIOF2, MSIOF1,
SCIFA4, SCIFA5, SCIFB,
FLCTL_FLSTEI, FLCTL_FLTENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I,
- SDHI0_SDHI0I0, SDHI0_SDHI0I1, SDHI0_SDHI0I2, SDHI0_SDHI0I3,
- SDHI1_SDHI1I0, SDHI1_SDHI1I1, SDHI1_SDHI1I2, SDHI1_SDHI1I3,
+ SDHI0,
+ SDHI1,
MSU_MSU, MSU_MSU2,
IREM,
SIU,
@@ -59,7 +61,7 @@ enum {
TTI20,
MISTY,
DDM,
- SDHI2_SDHI2I0, SDHI2_SDHI2I1, SDHI2_SDHI2I2, SDHI2_SDHI2I3,
+ SDHI2,
RWDT0, RWDT1,
DMAC_1_DEI0, DMAC_1_DEI1, DMAC_1_DEI2, DMAC_1_DEI3,
DMAC_2_DEI4, DMAC_2_DEI5, DMAC_2_DADERR,
@@ -70,7 +72,7 @@ enum {
/* interrupt groups INTCA */
DMAC_1, DMAC_2, DMAC2_1, DMAC2_2, DMAC3_1, DMAC3_2,
- ETM11, ARM11, USBHS, FLCTL, IIC1, SDHI0, SDHI1, SDHI2,
+ ETM11, ARM11, USBHS, FLCTL, IIC1
};
static struct intc_vect intca_vectors[] = {
@@ -105,10 +107,10 @@ static struct intc_vect intca_vectors[] = {
INTC_VECT(SCIFB, 0x0d60),
INTC_VECT(FLCTL_FLSTEI, 0x0d80), INTC_VECT(FLCTL_FLTENDI, 0x0da0),
INTC_VECT(FLCTL_FLTREQ0I, 0x0dc0), INTC_VECT(FLCTL_FLTREQ1I, 0x0de0),
- INTC_VECT(SDHI0_SDHI0I0, 0x0e00), INTC_VECT(SDHI0_SDHI0I1, 0x0e20),
- INTC_VECT(SDHI0_SDHI0I2, 0x0e40), INTC_VECT(SDHI0_SDHI0I3, 0x0e60),
- INTC_VECT(SDHI1_SDHI1I0, 0x0e80), INTC_VECT(SDHI1_SDHI1I1, 0x0ea0),
- INTC_VECT(SDHI1_SDHI1I2, 0x0ec0), INTC_VECT(SDHI1_SDHI1I3, 0x0ee0),
+ INTC_VECT(SDHI0, 0x0e00), INTC_VECT(SDHI0, 0x0e20),
+ INTC_VECT(SDHI0, 0x0e40), INTC_VECT(SDHI0, 0x0e60),
+ INTC_VECT(SDHI1, 0x0e80), INTC_VECT(SDHI1, 0x0ea0),
+ INTC_VECT(SDHI1, 0x0ec0), INTC_VECT(SDHI1, 0x0ee0),
INTC_VECT(MSU_MSU, 0x0f20), INTC_VECT(MSU_MSU2, 0x0f40),
INTC_VECT(IREM, 0x0f60),
INTC_VECT(SIU, 0x0fa0),
@@ -122,8 +124,8 @@ static struct intc_vect intca_vectors[] = {
INTC_VECT(TTI20, 0x1100),
INTC_VECT(MISTY, 0x1120),
INTC_VECT(DDM, 0x1140),
- INTC_VECT(SDHI2_SDHI2I0, 0x1200), INTC_VECT(SDHI2_SDHI2I1, 0x1220),
- INTC_VECT(SDHI2_SDHI2I2, 0x1240), INTC_VECT(SDHI2_SDHI2I3, 0x1260),
+ INTC_VECT(SDHI2, 0x1200), INTC_VECT(SDHI2, 0x1220),
+ INTC_VECT(SDHI2, 0x1240), INTC_VECT(SDHI2, 0x1260),
INTC_VECT(RWDT0, 0x1280), INTC_VECT(RWDT1, 0x12a0),
INTC_VECT(DMAC_1_DEI0, 0x2000), INTC_VECT(DMAC_1_DEI1, 0x2020),
INTC_VECT(DMAC_1_DEI2, 0x2040), INTC_VECT(DMAC_1_DEI3, 0x2060),
@@ -158,12 +160,6 @@ static struct intc_group intca_groups[] __initdata = {
INTC_GROUP(FLCTL, FLCTL_FLSTEI, FLCTL_FLTENDI,
FLCTL_FLTREQ0I, FLCTL_FLTREQ1I),
INTC_GROUP(IIC1, IIC1_ALI1, IIC1_TACKI1, IIC1_WAITI1, IIC1_DTEI1),
- INTC_GROUP(SDHI0, SDHI0_SDHI0I0, SDHI0_SDHI0I1,
- SDHI0_SDHI0I2, SDHI0_SDHI0I3),
- INTC_GROUP(SDHI1, SDHI1_SDHI1I0, SDHI1_SDHI1I1,
- SDHI1_SDHI1I2, SDHI1_SDHI1I3),
- INTC_GROUP(SDHI2, SDHI2_SDHI2I0, SDHI2_SDHI2I1,
- SDHI2_SDHI2I2, SDHI2_SDHI2I3),
};
static struct intc_mask_reg intca_mask_registers[] = {
@@ -193,10 +189,10 @@ static struct intc_mask_reg intca_mask_registers[] = {
{ SCIFB, SCIFA5, SCIFA4, MSIOF1,
0, 0, MSIOF2, 0 } },
{ 0xe694009c, 0xe69400dc, 8, /* IMR7A / IMCR7A */
- { SDHI0_SDHI0I3, SDHI0_SDHI0I2, SDHI0_SDHI0I1, SDHI0_SDHI0I0,
+ { DISABLED, DISABLED, ENABLED, ENABLED,
FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLTENDI, FLCTL_FLSTEI } },
{ 0xe69400a0, 0xe69400e0, 8, /* IMR8A / IMCR8A */
- { SDHI1_SDHI1I3, SDHI1_SDHI1I2, SDHI1_SDHI1I1, SDHI1_SDHI1I0,
+ { DISABLED, DISABLED, ENABLED, ENABLED,
TTI20, USBDMAC_USHDMI, SPU, SIU } },
{ 0xe69400a4, 0xe69400e4, 8, /* IMR9A / IMCR9A */
{ CMT1_CMT13, CMT1_CMT12, CMT1_CMT11, CMT1_CMT10,
@@ -211,7 +207,7 @@ static struct intc_mask_reg intca_mask_registers[] = {
{ 0, 0, TPU0, TPU1,
TPU2, TPU3, TPU4, 0 } },
{ 0xe69400b4, 0xe69400f4, 8, /* IMR13A / IMCR13A */
- { SDHI2_SDHI2I3, SDHI2_SDHI2I2, SDHI2_SDHI2I1, SDHI2_SDHI2I0,
+ { DISABLED, DISABLED, ENABLED, ENABLED,
MISTY, CMT3, RWDT1, RWDT0 } },
};
@@ -258,10 +254,14 @@ static struct intc_mask_reg intca_ack_registers[] __initdata = {
{ IRQ8A, IRQ9A, IRQ10A, IRQ11A, IRQ12A, IRQ13A, IRQ14A, IRQ15A } },
};
-static DECLARE_INTC_DESC_ACK(intca_desc, "sh7367-intca",
- intca_vectors, intca_groups,
- intca_mask_registers, intca_prio_registers,
- intca_sense_registers, intca_ack_registers);
+static struct intc_desc intca_desc __initdata = {
+ .name = "sh7367-intca",
+ .force_enable = ENABLED,
+ .force_disable = DISABLED,
+ .hw = INTC_HW_DESC(intca_vectors, intca_groups,
+ intca_mask_registers, intca_prio_registers,
+ intca_sense_registers, intca_ack_registers),
+};
void __init sh7367_init_irq(void)
{
diff --git a/arch/arm/mach-shmobile/intc-sh7372.c b/arch/arm/mach-shmobile/intc-sh7372.c
index c57a923f97a..3ce9d9bd589 100644
--- a/arch/arm/mach-shmobile/intc-sh7372.c
+++ b/arch/arm/mach-shmobile/intc-sh7372.c
@@ -27,6 +27,8 @@
enum {
UNUSED_INTCA = 0,
+ ENABLED,
+ DISABLED,
/* interrupt sources INTCA */
IRQ0A, IRQ1A, IRQ2A, IRQ3A, IRQ4A, IRQ5A, IRQ6A, IRQ7A,
@@ -47,14 +49,14 @@ enum {
MSIOF2, MSIOF1,
SCIFA4, SCIFA5, SCIFB,
FLCTL_FLSTEI, FLCTL_FLTENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I,
- SDHI0_SDHI0I0, SDHI0_SDHI0I1, SDHI0_SDHI0I2, SDHI0_SDHI0I3,
- SDHI1_SDHI1I0, SDHI1_SDHI1I1, SDHI1_SDHI1I2,
+ SDHI0,
+ SDHI1,
IRREM,
IRDA,
TPU0,
TTI20,
DDM,
- SDHI2_SDHI2I0, SDHI2_SDHI2I1, SDHI2_SDHI2I2, SDHI2_SDHI2I3,
+ SDHI2,
RWDT0,
DMAC1_1_DEI0, DMAC1_1_DEI1, DMAC1_1_DEI2, DMAC1_1_DEI3,
DMAC1_2_DEI4, DMAC1_2_DEI5, DMAC1_2_DADERR,
@@ -82,7 +84,7 @@ enum {
/* interrupt groups INTCA */
DMAC1_1, DMAC1_2, DMAC2_1, DMAC2_2, DMAC3_1, DMAC3_2, SHWYSTAT,
- AP_ARM1, AP_ARM2, SPU2, FLCTL, IIC1, SDHI0, SDHI1, SDHI2
+ AP_ARM1, AP_ARM2, SPU2, FLCTL, IIC1
};
static struct intc_vect intca_vectors[] __initdata = {
@@ -123,17 +125,17 @@ static struct intc_vect intca_vectors[] __initdata = {
INTC_VECT(SCIFB, 0x0d60),
INTC_VECT(FLCTL_FLSTEI, 0x0d80), INTC_VECT(FLCTL_FLTENDI, 0x0da0),
INTC_VECT(FLCTL_FLTREQ0I, 0x0dc0), INTC_VECT(FLCTL_FLTREQ1I, 0x0de0),
- INTC_VECT(SDHI0_SDHI0I0, 0x0e00), INTC_VECT(SDHI0_SDHI0I1, 0x0e20),
- INTC_VECT(SDHI0_SDHI0I2, 0x0e40), INTC_VECT(SDHI0_SDHI0I3, 0x0e60),
- INTC_VECT(SDHI1_SDHI1I0, 0x0e80), INTC_VECT(SDHI1_SDHI1I1, 0x0ea0),
- INTC_VECT(SDHI1_SDHI1I2, 0x0ec0),
+ INTC_VECT(SDHI0, 0x0e00), INTC_VECT(SDHI0, 0x0e20),
+ INTC_VECT(SDHI0, 0x0e40), INTC_VECT(SDHI0, 0x0e60),
+ INTC_VECT(SDHI1, 0x0e80), INTC_VECT(SDHI1, 0x0ea0),
+ INTC_VECT(SDHI1, 0x0ec0),
INTC_VECT(IRREM, 0x0f60),
INTC_VECT(IRDA, 0x0480),
INTC_VECT(TPU0, 0x04a0),
INTC_VECT(TTI20, 0x1100),
INTC_VECT(DDM, 0x1140),
- INTC_VECT(SDHI2_SDHI2I0, 0x1200), INTC_VECT(SDHI2_SDHI2I1, 0x1220),
- INTC_VECT(SDHI2_SDHI2I2, 0x1240), INTC_VECT(SDHI2_SDHI2I3, 0x1260),
+ INTC_VECT(SDHI2, 0x1200), INTC_VECT(SDHI2, 0x1220),
+ INTC_VECT(SDHI2, 0x1240), INTC_VECT(SDHI2, 0x1260),
INTC_VECT(RWDT0, 0x1280),
INTC_VECT(DMAC1_1_DEI0, 0x2000), INTC_VECT(DMAC1_1_DEI1, 0x2020),
INTC_VECT(DMAC1_1_DEI2, 0x2040), INTC_VECT(DMAC1_1_DEI3, 0x2060),
@@ -193,12 +195,6 @@ static struct intc_group intca_groups[] __initdata = {
INTC_GROUP(FLCTL, FLCTL_FLSTEI, FLCTL_FLTENDI,
FLCTL_FLTREQ0I, FLCTL_FLTREQ1I),
INTC_GROUP(IIC1, IIC1_ALI1, IIC1_TACKI1, IIC1_WAITI1, IIC1_DTEI1),
- INTC_GROUP(SDHI0, SDHI0_SDHI0I0, SDHI0_SDHI0I1,
- SDHI0_SDHI0I2, SDHI0_SDHI0I3),
- INTC_GROUP(SDHI1, SDHI1_SDHI1I0, SDHI1_SDHI1I1,
- SDHI1_SDHI1I2),
- INTC_GROUP(SDHI2, SDHI2_SDHI2I0, SDHI2_SDHI2I1,
- SDHI2_SDHI2I2, SDHI2_SDHI2I3),
INTC_GROUP(SHWYSTAT, SHWYSTAT_RT, SHWYSTAT_HS, SHWYSTAT_COM),
};
@@ -234,10 +230,10 @@ static struct intc_mask_reg intca_mask_registers[] __initdata = {
{ SCIFB, SCIFA5, SCIFA4, MSIOF1,
0, 0, MSIOF2, 0 } },
{ 0xe694009c, 0xe69400dc, 8, /* IMR7A / IMCR7A */
- { SDHI0_SDHI0I3, SDHI0_SDHI0I2, SDHI0_SDHI0I1, SDHI0_SDHI0I0,
+ { DISABLED, DISABLED, ENABLED, ENABLED,
FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLTENDI, FLCTL_FLSTEI } },
{ 0xe69400a0, 0xe69400e0, 8, /* IMR8A / IMCR8A */
- { 0, SDHI1_SDHI1I2, SDHI1_SDHI1I1, SDHI1_SDHI1I0,
+ { 0, DISABLED, ENABLED, ENABLED,
TTI20, USBHSDMAC0_USHDMI, 0, 0 } },
{ 0xe69400a4, 0xe69400e4, 8, /* IMR9A / IMCR9A */
{ CMT1_CMT13, CMT1_CMT12, CMT1_CMT11, CMT1_CMT10,
@@ -252,7 +248,7 @@ static struct intc_mask_reg intca_mask_registers[] __initdata = {
{ 0, 0, TPU0, 0,
0, 0, 0, 0 } },
{ 0xe69400b4, 0xe69400f4, 8, /* IMR13A / IMCR13A */
- { SDHI2_SDHI2I3, SDHI2_SDHI2I2, SDHI2_SDHI2I1, SDHI2_SDHI2I0,
+ { DISABLED, DISABLED, ENABLED, ENABLED,
0, CMT3, 0, RWDT0 } },
{ 0xe6950080, 0xe69500c0, 8, /* IMR0A3 / IMCR0A3 */
{ SHWYSTAT_RT, SHWYSTAT_HS, SHWYSTAT_COM, 0,
@@ -358,10 +354,14 @@ static struct intc_mask_reg intca_ack_registers[] __initdata = {
{ IRQ24A, IRQ25A, IRQ26A, IRQ27A, IRQ28A, IRQ29A, IRQ30A, IRQ31A } },
};
-static DECLARE_INTC_DESC_ACK(intca_desc, "sh7372-intca",
- intca_vectors, intca_groups,
- intca_mask_registers, intca_prio_registers,
- intca_sense_registers, intca_ack_registers);
+static struct intc_desc intca_desc __initdata = {
+ .name = "sh7372-intca",
+ .force_enable = ENABLED,
+ .force_disable = DISABLED,
+ .hw = INTC_HW_DESC(intca_vectors, intca_groups,
+ intca_mask_registers, intca_prio_registers,
+ intca_sense_registers, intca_ack_registers),
+};
void __init sh7372_init_irq(void)
{
diff --git a/arch/arm/mach-shmobile/intc-sh7377.c b/arch/arm/mach-shmobile/intc-sh7377.c
index 125021cfba5..5c781e2d189 100644
--- a/arch/arm/mach-shmobile/intc-sh7377.c
+++ b/arch/arm/mach-shmobile/intc-sh7377.c
@@ -27,6 +27,8 @@
enum {
UNUSED_INTCA = 0,
+ ENABLED,
+ DISABLED,
/* interrupt sources INTCA */
IRQ0A, IRQ1A, IRQ2A, IRQ3A, IRQ4A, IRQ5A, IRQ6A, IRQ7A,
@@ -49,8 +51,8 @@ enum {
MSIOF2, MSIOF1,
SCIFA4, SCIFA5, SCIFB,
FLCTL_FLSTEI, FLCTL_FLTENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I,
- SDHI0_SDHI0I0, SDHI0_SDHI0I1, SDHI0_SDHI0I2, SDHI0_SDHI0I3,
- SDHI1_SDHI1I0, SDHI1_SDHI1I1, SDHI1_SDHI1I2, SDHI1_SDHI1I3,
+ SDHI0,
+ SDHI1,
MSU_MSU, MSU_MSU2,
IRREM,
MSUG,
@@ -84,7 +86,7 @@ enum {
/* interrupt groups INTCA */
DMAC_1, DMAC_2, DMAC2_1, DMAC2_2, DMAC3_1, DMAC3_2, SHWYSTAT,
- AP_ARM1, AP_ARM2, USBHS, SPU2, FLCTL, IIC1, SDHI0, SDHI1,
+ AP_ARM1, AP_ARM2, USBHS, SPU2, FLCTL, IIC1,
ICUSB, ICUDMC
};
@@ -128,10 +130,10 @@ static struct intc_vect intca_vectors[] = {
INTC_VECT(SCIFB, 0x0d60),
INTC_VECT(FLCTL_FLSTEI, 0x0d80), INTC_VECT(FLCTL_FLTENDI, 0x0da0),
INTC_VECT(FLCTL_FLTREQ0I, 0x0dc0), INTC_VECT(FLCTL_FLTREQ1I, 0x0de0),
- INTC_VECT(SDHI0_SDHI0I0, 0x0e00), INTC_VECT(SDHI0_SDHI0I1, 0x0e20),
- INTC_VECT(SDHI0_SDHI0I2, 0x0e40), INTC_VECT(SDHI0_SDHI0I3, 0x0e60),
- INTC_VECT(SDHI1_SDHI1I0, 0x0e80), INTC_VECT(SDHI1_SDHI1I1, 0x0ea0),
- INTC_VECT(SDHI1_SDHI1I2, 0x0ec0), INTC_VECT(SDHI1_SDHI1I3, 0x0ee0),
+ INTC_VECT(SDHI0, 0x0e00), INTC_VECT(SDHI0, 0x0e20),
+ INTC_VECT(SDHI0, 0x0e40), INTC_VECT(SDHI0, 0x0e60),
+ INTC_VECT(SDHI1, 0x0e80), INTC_VECT(SDHI1, 0x0ea0),
+ INTC_VECT(SDHI1, 0x0ec0), INTC_VECT(SDHI1, 0x0ee0),
INTC_VECT(MSU_MSU, 0x0f20), INTC_VECT(MSU_MSU2, 0x0f40),
INTC_VECT(IRREM, 0x0f60),
INTC_VECT(MSUG, 0x0fa0),
@@ -195,10 +197,6 @@ static struct intc_group intca_groups[] __initdata = {
INTC_GROUP(FLCTL, FLCTL_FLSTEI, FLCTL_FLTENDI,
FLCTL_FLTREQ0I, FLCTL_FLTREQ1I),
INTC_GROUP(IIC1, IIC1_ALI1, IIC1_TACKI1, IIC1_WAITI1, IIC1_DTEI1),
- INTC_GROUP(SDHI0, SDHI0_SDHI0I0, SDHI0_SDHI0I1,
- SDHI0_SDHI0I2, SDHI0_SDHI0I3),
- INTC_GROUP(SDHI1, SDHI1_SDHI1I0, SDHI1_SDHI1I1,
- SDHI1_SDHI1I2, SDHI1_SDHI1I3),
INTC_GROUP(SHWYSTAT, SHWYSTAT_RT, SHWYSTAT_HS, SHWYSTAT_COM),
INTC_GROUP(ICUSB, ICUSB_ICUSB0, ICUSB_ICUSB1),
INTC_GROUP(ICUDMC, ICUDMC_ICUDMC1, ICUDMC_ICUDMC2),
@@ -236,10 +234,10 @@ static struct intc_mask_reg intca_mask_registers[] = {
{ SCIFB, SCIFA5, SCIFA4, MSIOF1,
0, 0, MSIOF2, 0 } },
{ 0xe694009c, 0xe69400dc, 8, /* IMR7A / IMCR7A */
- { SDHI0_SDHI0I3, SDHI0_SDHI0I2, SDHI0_SDHI0I1, SDHI0_SDHI0I0,
+ { DISABLED, DISABLED, ENABLED, ENABLED,
FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLTENDI, FLCTL_FLSTEI } },
{ 0xe69400a0, 0xe69400e0, 8, /* IMR8A / IMCR8A */
- { SDHI1_SDHI1I3, SDHI1_SDHI1I2, SDHI1_SDHI1I1, SDHI1_SDHI1I0,
+ { DISABLED, DISABLED, ENABLED, ENABLED,
TTI20, USBDMAC_USHDMI, 0, MSUG } },
{ 0xe69400a4, 0xe69400e4, 8, /* IMR9A / IMCR9A */
{ CMT1_CMT13, CMT1_CMT12, CMT1_CMT11, CMT1_CMT10,
@@ -339,10 +337,14 @@ static struct intc_mask_reg intca_ack_registers[] __initdata = {
{ IRQ24A, IRQ25A, IRQ26A, IRQ27A, IRQ28A, IRQ29A, IRQ30A, IRQ31A } },
};
-static DECLARE_INTC_DESC_ACK(intca_desc, "sh7377-intca",
- intca_vectors, intca_groups,
- intca_mask_registers, intca_prio_registers,
- intca_sense_registers, intca_ack_registers);
+static struct intc_desc intca_desc __initdata = {
+ .name = "sh7377-intca",
+ .force_enable = ENABLED,
+ .force_disable = DISABLED,
+ .hw = INTC_HW_DESC(intca_vectors, intca_groups,
+ intca_mask_registers, intca_prio_registers,
+ intca_sense_registers, intca_ack_registers),
+};
void __init sh7377_init_irq(void)
{
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c
index b6cf8f1f4d3..5120bd44f69 100644
--- a/arch/powerpc/kernel/perf_event.c
+++ b/arch/powerpc/kernel/perf_event.c
@@ -1164,10 +1164,10 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
* Finally record data if requested.
*/
if (record) {
- struct perf_sample_data data = {
- .addr = ~0ULL,
- .period = event->hw.last_period,
- };
+ struct perf_sample_data data;
+
+ perf_sample_data_init(&data, ~0ULL);
+ data.period = event->hw.last_period;
if (event->attr.sample_type & PERF_SAMPLE_ADDR)
perf_get_data_addr(regs, &data.addr);
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index b867ab3353b..68cb9b42088 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -1189,7 +1189,7 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
regs = args->regs;
- data.addr = 0;
+ perf_sample_data_init(&data, 0);
cpuc = &__get_cpu_var(cpu_hw_events);
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 93936de6779..0eacb1ffb42 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -662,7 +662,7 @@ config GART_IOMMU
bool "GART IOMMU support" if EMBEDDED
default y
select SWIOTLB
- depends on X86_64 && PCI
+ depends on X86_64 && PCI && K8_NB
---help---
Support for full DMA access of devices with 32bit memory access only
on systems with more than 3GB. This is usually needed for USB,
@@ -2061,7 +2061,7 @@ endif # X86_32
config K8_NB
def_bool y
- depends on AGP_AMD64 || (X86_64 && (GART_IOMMU || (PCI && NUMA)))
+ depends on CPU_SUP_AMD && PCI
source "drivers/pcmcia/Kconfig"
diff --git a/arch/x86/include/asm/hw_breakpoint.h b/arch/x86/include/asm/hw_breakpoint.h
index 0675a7c4c20..2a1bd8f4f23 100644
--- a/arch/x86/include/asm/hw_breakpoint.h
+++ b/arch/x86/include/asm/hw_breakpoint.h
@@ -10,7 +10,6 @@
* (display/resolving)
*/
struct arch_hw_breakpoint {
- char *name; /* Contains name of the symbol to set bkpt */
unsigned long address;
u8 len;
u8 type;
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index befd172c82a..db6109a885a 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -18,7 +18,7 @@
#define MSR_ARCH_PERFMON_EVENTSEL0 0x186
#define MSR_ARCH_PERFMON_EVENTSEL1 0x187
-#define ARCH_PERFMON_EVENTSEL0_ENABLE (1 << 22)
+#define ARCH_PERFMON_EVENTSEL_ENABLE (1 << 22)
#define ARCH_PERFMON_EVENTSEL_ANY (1 << 21)
#define ARCH_PERFMON_EVENTSEL_INT (1 << 20)
#define ARCH_PERFMON_EVENTSEL_OS (1 << 17)
@@ -50,7 +50,7 @@
INTEL_ARCH_INV_MASK| \
INTEL_ARCH_EDGE_MASK|\
INTEL_ARCH_UNIT_MASK|\
- INTEL_ARCH_EVTSEL_MASK)
+ INTEL_ARCH_EVENT_MASK)
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
@@ -117,6 +117,18 @@ union cpuid10_edx {
*/
#define X86_PMC_IDX_FIXED_BTS (X86_PMC_IDX_FIXED + 16)
+/* IbsFetchCtl bits/masks */
+#define IBS_FETCH_RAND_EN (1ULL<<57)
+#define IBS_FETCH_VAL (1ULL<<49)
+#define IBS_FETCH_ENABLE (1ULL<<48)
+#define IBS_FETCH_CNT 0xFFFF0000ULL
+#define IBS_FETCH_MAX_CNT 0x0000FFFFULL
+
+/* IbsOpCtl bits */
+#define IBS_OP_CNT_CTL (1ULL<<19)
+#define IBS_OP_VAL (1ULL<<18)
+#define IBS_OP_ENABLE (1ULL<<17)
+#define IBS_OP_MAX_CNT 0x0000FFFFULL
#ifdef CONFIG_PERF_EVENTS
extern void init_hw_perf_events(void);
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
index f147a95fd84..3704997e8b2 100644
--- a/arch/x86/kernel/aperture_64.c
+++ b/arch/x86/kernel/aperture_64.c
@@ -31,7 +31,6 @@
#include <asm/x86_init.h>
int gart_iommu_aperture;
-EXPORT_SYMBOL_GPL(gart_iommu_aperture);
int gart_iommu_aperture_disabled __initdata;
int gart_iommu_aperture_allowed __initdata;
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 3740c8a4eae..49dbeaef2a2 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -120,11 +120,9 @@ EXPORT_SYMBOL_GPL(uv_possible_blades);
unsigned long sn_rtc_cycles_per_second;
EXPORT_SYMBOL(sn_rtc_cycles_per_second);
-/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */
-
static const struct cpumask *uv_target_cpus(void)
{
- return cpumask_of(0);
+ return cpu_online_mask;
}
static void uv_vector_allocation_domain(int cpu, struct cpumask *retmask)
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 879666f4d87..7e1cca13af3 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -70,7 +70,8 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
if (c->x86_power & (1 << 8)) {
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
- sched_clock_stable = 1;
+ if (!check_tsc_unstable())
+ sched_clock_stable = 1;
}
/*
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 28cba46bf32..3ab9c886b61 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -46,6 +46,13 @@
#include "mce-internal.h"
+static DEFINE_MUTEX(mce_read_mutex);
+
+#define rcu_dereference_check_mce(p) \
+ rcu_dereference_check((p), \
+ rcu_read_lock_sched_held() || \
+ lockdep_is_held(&mce_read_mutex))
+
#define CREATE_TRACE_POINTS
#include <trace/events/mce.h>
@@ -158,7 +165,7 @@ void mce_log(struct mce *mce)
mce->finished = 0;
wmb();
for (;;) {
- entry = rcu_dereference(mcelog.next);
+ entry = rcu_dereference_check_mce(mcelog.next);
for (;;) {
/*
* When the buffer fills up discard new entries.
@@ -1485,8 +1492,6 @@ static void collect_tscs(void *data)
rdtscll(cpu_tsc[smp_processor_id()]);
}
-static DEFINE_MUTEX(mce_read_mutex);
-
static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize,
loff_t *off)
{
@@ -1500,7 +1505,7 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize,
return -ENOMEM;
mutex_lock(&mce_read_mutex);
- next = rcu_dereference(mcelog.next);
+ next = rcu_dereference_check_mce(mcelog.next);
/* Only supports full reads right now */
if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce)) {
@@ -1565,7 +1570,7 @@ timeout:
static unsigned int mce_poll(struct file *file, poll_table *wait)
{
poll_wait(file, &mce_wait, wait);
- if (rcu_dereference(mcelog.next))
+ if (rcu_dereference_check_mce(mcelog.next))
return POLLIN | POLLRDNORM;
return 0;
}
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c
index 7c785634af2..d15df6e49bf 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_intel.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c
@@ -95,7 +95,7 @@ static void cmci_discover(int banks, int boot)
/* Already owned by someone else? */
if (val & CMCI_EN) {
- if (test_and_clear_bit(i, owned) || boot)
+ if (test_and_clear_bit(i, owned) && !boot)
print_update("SHD", &hdr, i);
__clear_bit(i, __get_cpu_var(mce_poll_banks));
continue;
@@ -107,7 +107,7 @@ static void cmci_discover(int banks, int boot)
/* Did the enable bit stick? -- the bank supports CMCI */
if (val & CMCI_EN) {
- if (!test_and_set_bit(i, owned) || boot)
+ if (!test_and_set_bit(i, owned) && !boot)
print_update("CMCI", &hdr, i);
__clear_bit(i, __get_cpu_var(mce_poll_banks));
} else {
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index b1fbdeecf6c..42aafd11e17 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -73,10 +73,10 @@ struct debug_store {
struct event_constraint {
union {
unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
- u64 idxmsk64[1];
+ u64 idxmsk64;
};
- int code;
- int cmask;
+ u64 code;
+ u64 cmask;
int weight;
};
@@ -103,7 +103,7 @@ struct cpu_hw_events {
};
#define __EVENT_CONSTRAINT(c, n, m, w) {\
- { .idxmsk64[0] = (n) }, \
+ { .idxmsk64 = (n) }, \
.code = (c), \
.cmask = (m), \
.weight = (w), \
@@ -116,7 +116,7 @@ struct cpu_hw_events {
EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVTSEL_MASK)
#define FIXED_EVENT_CONSTRAINT(c, n) \
- EVENT_CONSTRAINT(c, n, INTEL_ARCH_FIXED_MASK)
+ EVENT_CONSTRAINT(c, (1ULL << (32+n)), INTEL_ARCH_FIXED_MASK)
#define EVENT_CONSTRAINT_END \
EVENT_CONSTRAINT(0, 0, 0)
@@ -503,6 +503,9 @@ static int __hw_perf_event_init(struct perf_event *event)
*/
if (attr->type == PERF_TYPE_RAW) {
hwc->config |= x86_pmu.raw_event(attr->config);
+ if ((hwc->config & ARCH_PERFMON_EVENTSEL_ANY) &&
+ perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
+ return -EACCES;
return 0;
}
@@ -553,9 +556,9 @@ static void x86_pmu_disable_all(void)
if (!test_bit(idx, cpuc->active_mask))
continue;
rdmsrl(x86_pmu.eventsel + idx, val);
- if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE))
+ if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE))
continue;
- val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
+ val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
wrmsrl(x86_pmu.eventsel + idx, val);
}
}
@@ -590,7 +593,7 @@ static void x86_pmu_enable_all(void)
continue;
val = event->hw.config;
- val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
+ val |= ARCH_PERFMON_EVENTSEL_ENABLE;
wrmsrl(x86_pmu.eventsel + idx, val);
}
}
@@ -612,8 +615,8 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
bitmap_zero(used_mask, X86_PMC_IDX_MAX);
for (i = 0; i < n; i++) {
- constraints[i] =
- x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]);
+ c = x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]);
+ constraints[i] = c;
}
/*
@@ -853,7 +856,7 @@ void hw_perf_enable(void)
static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, int idx)
{
(void)checking_wrmsrl(hwc->config_base + idx,
- hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE);
+ hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE);
}
static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx)
@@ -1094,8 +1097,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
int idx, handled = 0;
u64 val;
- data.addr = 0;
- data.raw = NULL;
+ perf_sample_data_init(&data, 0);
cpuc = &__get_cpu_var(cpu_hw_events);
@@ -1347,6 +1349,7 @@ static void __init pmu_check_apic(void)
void __init init_hw_perf_events(void)
{
+ struct event_constraint *c;
int err;
pr_info("Performance Events: ");
@@ -1395,6 +1398,16 @@ void __init init_hw_perf_events(void)
__EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_events) - 1,
0, x86_pmu.num_events);
+ if (x86_pmu.event_constraints) {
+ for_each_event_constraint(c, x86_pmu.event_constraints) {
+ if (c->cmask != INTEL_ARCH_FIXED_MASK)
+ continue;
+
+ c->idxmsk64 |= (1ULL << x86_pmu.num_events) - 1;
+ c->weight += x86_pmu.num_events;
+ }
+ }
+
pr_info("... version: %d\n", x86_pmu.version);
pr_info("... bit width: %d\n", x86_pmu.event_bits);
pr_info("... generic registers: %d\n", x86_pmu.num_events);
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 977e7544738..44b60c85210 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1,7 +1,7 @@
#ifdef CONFIG_CPU_SUP_INTEL
/*
- * Intel PerfMon v3. Used on Core2 and later.
+ * Intel PerfMon, used on Core and later.
*/
static const u64 intel_perfmon_event_map[] =
{
@@ -27,8 +27,14 @@ static struct event_constraint intel_core_event_constraints[] =
static struct event_constraint intel_core2_event_constraints[] =
{
- FIXED_EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */
- FIXED_EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */
+ FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
+ FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
+ /*
+ * Core2 has Fixed Counter 2 listed as CPU_CLK_UNHALTED.REF and event
+ * 0x013c as CPU_CLK_UNHALTED.BUS and specifies there is a fixed
+ * ratio between these counters.
+ */
+ /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
@@ -37,14 +43,16 @@ static struct event_constraint intel_core2_event_constraints[] =
INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
+ INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
EVENT_CONSTRAINT_END
};
static struct event_constraint intel_nehalem_event_constraints[] =
{
- FIXED_EVENT_CONSTRAINT(0xc0, (0xf|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */
- FIXED_EVENT_CONSTRAINT(0x3c, (0xf|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */
+ FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
+ FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
+ /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
@@ -58,8 +66,9 @@ static struct event_constraint intel_nehalem_event_constraints[] =
static struct event_constraint intel_westmere_event_constraints[] =
{
- FIXED_EVENT_CONSTRAINT(0xc0, (0xf|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */
- FIXED_EVENT_CONSTRAINT(0x3c, (0xf|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */
+ FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
+ FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
+ /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
@@ -68,8 +77,9 @@ static struct event_constraint intel_westmere_event_constraints[] =
static struct event_constraint intel_gen_event_constraints[] =
{
- FIXED_EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */
- FIXED_EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */
+ FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
+ FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
+ /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
EVENT_CONSTRAINT_END
};
@@ -580,10 +590,9 @@ static void intel_pmu_drain_bts_buffer(void)
ds->bts_index = ds->bts_buffer_base;
+ perf_sample_data_init(&data, 0);
data.period = event->hw.last_period;
- data.addr = 0;
- data.raw = NULL;
regs.ip = 0;
/*
@@ -732,8 +741,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
int bit, loops;
u64 ack, status;
- data.addr = 0;
- data.raw = NULL;
+ perf_sample_data_init(&data, 0);
cpuc = &__get_cpu_var(cpu_hw_events);
@@ -935,7 +943,7 @@ static __init int intel_pmu_init(void)
x86_pmu.event_constraints = intel_nehalem_event_constraints;
pr_cont("Nehalem/Corei7 events, ");
break;
- case 28:
+ case 28: /* Atom */
memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
@@ -951,6 +959,7 @@ static __init int intel_pmu_init(void)
x86_pmu.event_constraints = intel_westmere_event_constraints;
pr_cont("Westmere events, ");
break;
+
default:
/*
* default constraints for v2 and up
diff --git a/arch/x86/kernel/cpu/perf_event_p6.c b/arch/x86/kernel/cpu/perf_event_p6.c
index 1ca5ba078af..a4e67b99d91 100644
--- a/arch/x86/kernel/cpu/perf_event_p6.c
+++ b/arch/x86/kernel/cpu/perf_event_p6.c
@@ -62,7 +62,7 @@ static void p6_pmu_disable_all(void)
/* p6 only has one enable register */
rdmsrl(MSR_P6_EVNTSEL0, val);
- val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
+ val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
wrmsrl(MSR_P6_EVNTSEL0, val);
}
@@ -72,7 +72,7 @@ static void p6_pmu_enable_all(void)
/* p6 only has one enable register */
rdmsrl(MSR_P6_EVNTSEL0, val);
- val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
+ val |= ARCH_PERFMON_EVENTSEL_ENABLE;
wrmsrl(MSR_P6_EVNTSEL0, val);
}
@@ -83,7 +83,7 @@ p6_pmu_disable_event(struct hw_perf_event *hwc, int idx)
u64 val = P6_NOP_EVENT;
if (cpuc->enabled)
- val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
+ val |= ARCH_PERFMON_EVENTSEL_ENABLE;
(void)checking_wrmsrl(hwc->config_base + idx, val);
}
@@ -95,7 +95,7 @@ static void p6_pmu_enable_event(struct hw_perf_event *hwc, int idx)
val = hwc->config;
if (cpuc->enabled)
- val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
+ val |= ARCH_PERFMON_EVENTSEL_ENABLE;
(void)checking_wrmsrl(hwc->config_base + idx, val);
}
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
index 74f4e85a572..fb329e9f849 100644
--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
@@ -680,7 +680,7 @@ static int setup_intel_arch_watchdog(unsigned nmi_hz)
cpu_nmi_set_wd_enabled();
apic_write(APIC_LVTPC, APIC_DM_NMI);
- evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE;
+ evntsel |= ARCH_PERFMON_EVENTSEL_ENABLE;
wrmsr(evntsel_msr, evntsel, 0);
intel_arch_wd_ops.checkbit = 1ULL << (eax.split.bit_width - 1);
return 1;
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index dce99abb449..d5e2a2ebb62 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -120,9 +120,15 @@ fixup_bp_irq_link(unsigned long bp, unsigned long *stack,
{
#ifdef CONFIG_FRAME_POINTER
struct stack_frame *frame = (struct stack_frame *)bp;
+ unsigned long next;
- if (!in_irq_stack(stack, irq_stack, irq_stack_end))
- return (unsigned long)frame->next_frame;
+ if (!in_irq_stack(stack, irq_stack, irq_stack_end)) {
+ if (!probe_kernel_address(&frame->next_frame, next))
+ return next;
+ else
+ WARN_ONCE(1, "Perf: bad frame pointer = %p in "
+ "callchain\n", &frame->next_frame);
+ }
#endif
return bp;
}
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
index dca2802c666..d6cc065f519 100644
--- a/arch/x86/kernel/hw_breakpoint.c
+++ b/arch/x86/kernel/hw_breakpoint.c
@@ -344,13 +344,6 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp,
}
/*
- * For kernel-addresses, either the address or symbol name can be
- * specified.
- */
- if (info->name)
- info->address = (unsigned long)
- kallsyms_lookup_name(info->name);
- /*
* Check that the low-order bits of the address are appropriate
* for the alignment implied by len.
*/
@@ -535,8 +528,3 @@ void hw_breakpoint_pmu_read(struct perf_event *bp)
{
/* TODO */
}
-
-void hw_breakpoint_pmu_unthrottle(struct perf_event *bp)
-{
- /* TODO */
-}
diff --git a/arch/x86/kernel/k8.c b/arch/x86/kernel/k8.c
index cbc4332a77b..9b895464dd0 100644
--- a/arch/x86/kernel/k8.c
+++ b/arch/x86/kernel/k8.c
@@ -121,3 +121,17 @@ void k8_flush_garts(void)
}
EXPORT_SYMBOL_GPL(k8_flush_garts);
+static __init int init_k8_nbs(void)
+{
+ int err = 0;
+
+ err = cache_k8_northbridges();
+
+ if (err < 0)
+ printk(KERN_NOTICE "K8 NB: Cannot enumerate AMD northbridges.\n");
+
+ return err;
+}
+
+/* This has to go after the PCI subsystem */
+fs_initcall(init_k8_nbs);
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index 34de53b46f8..f3af115a573 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -735,7 +735,7 @@ int __init gart_iommu_init(void)
unsigned long scratch;
long i;
- if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0)
+ if (num_k8_northbridges == 0)
return 0;
#ifndef CONFIG_AGP_AMD64
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 02d678065d7..ad9540676fc 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -607,7 +607,7 @@ void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
{
#ifdef CONFIG_SMP
if (pm_idle == poll_idle && smp_num_siblings > 1) {
- printk(KERN_WARNING "WARNING: polling idle and HT enabled,"
+ printk_once(KERN_WARNING "WARNING: polling idle and HT enabled,"
" performance may degrade.\n");
}
#endif
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 1d4eb93d333..cf07c26d9a4 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -291,8 +291,29 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
*/
if (kernel_set_to_readonly &&
within(address, (unsigned long)_text,
- (unsigned long)__end_rodata_hpage_align))
- pgprot_val(forbidden) |= _PAGE_RW;
+ (unsigned long)__end_rodata_hpage_align)) {
+ unsigned int level;
+
+ /*
+ * Don't enforce the !RW mapping for the kernel text mapping,
+ * if the current mapping is already using small page mapping.
+ * No need to work hard to preserve large page mappings in this
+ * case.
+ *
+ * This also fixes the Linux Xen paravirt guest boot failure
+ * (because of unexpected read-only mappings for kernel identity
+ * mappings). In this paravirt guest case, the kernel text
+ * mapping and the kernel identity mapping share the same
+ * page-table pages. Thus we can't really use different
+ * protections for the kernel text and identity mappings. Also,
+ * these shared mappings are made of small page mappings.
+ * Thus this don't enforce !RW mapping for small page kernel
+ * text mapping logic will help Linux Xen parvirt guest boot
+ * aswell.
+ */
+ if (lookup_address(address, &level) && (level != PG_LEVEL_4K))
+ pgprot_val(forbidden) |= _PAGE_RW;
+ }
#endif
prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
index 6a58256dce9..090cbbec7db 100644
--- a/arch/x86/oprofile/op_model_amd.c
+++ b/arch/x86/oprofile/op_model_amd.c
@@ -46,17 +46,6 @@
static unsigned long reset_value[NUM_VIRT_COUNTERS];
-/* IbsFetchCtl bits/masks */
-#define IBS_FETCH_RAND_EN (1ULL<<57)
-#define IBS_FETCH_VAL (1ULL<<49)
-#define IBS_FETCH_ENABLE (1ULL<<48)
-#define IBS_FETCH_CNT_MASK 0xFFFF0000ULL
-
-/* IbsOpCtl bits */
-#define IBS_OP_CNT_CTL (1ULL<<19)
-#define IBS_OP_VAL (1ULL<<18)
-#define IBS_OP_ENABLE (1ULL<<17)
-
#define IBS_FETCH_SIZE 6
#define IBS_OP_SIZE 12
@@ -182,7 +171,7 @@ static void op_amd_setup_ctrs(struct op_x86_model_spec const *model,
continue;
}
rdmsrl(msrs->controls[i].addr, val);
- if (val & ARCH_PERFMON_EVENTSEL0_ENABLE)
+ if (val & ARCH_PERFMON_EVENTSEL_ENABLE)
op_x86_warn_in_use(i);
val &= model->reserved;
wrmsrl(msrs->controls[i].addr, val);
@@ -290,7 +279,7 @@ op_amd_handle_ibs(struct pt_regs * const regs,
oprofile_write_commit(&entry);
/* reenable the IRQ */
- ctl &= ~(IBS_FETCH_VAL | IBS_FETCH_CNT_MASK);
+ ctl &= ~(IBS_FETCH_VAL | IBS_FETCH_CNT);
ctl |= IBS_FETCH_ENABLE;
wrmsrl(MSR_AMD64_IBSFETCHCTL, ctl);
}
@@ -330,7 +319,7 @@ static inline void op_amd_start_ibs(void)
return;
if (ibs_config.fetch_enabled) {
- val = (ibs_config.max_cnt_fetch >> 4) & 0xFFFF;
+ val = (ibs_config.max_cnt_fetch >> 4) & IBS_FETCH_MAX_CNT;
val |= ibs_config.rand_en ? IBS_FETCH_RAND_EN : 0;
val |= IBS_FETCH_ENABLE;
wrmsrl(MSR_AMD64_IBSFETCHCTL, val);
@@ -352,7 +341,7 @@ static inline void op_amd_start_ibs(void)
* avoid underflows.
*/
ibs_op_ctl = min(ibs_op_ctl + IBS_RANDOM_MAXCNT_OFFSET,
- 0xFFFFULL);
+ IBS_OP_MAX_CNT);
}
if (ibs_caps & IBS_CAPS_OPCNT && ibs_config.dispatched_ops)
ibs_op_ctl |= IBS_OP_CNT_CTL;
@@ -409,7 +398,7 @@ static void op_amd_start(struct op_msrs const * const msrs)
if (!reset_value[op_x86_phys_to_virt(i)])
continue;
rdmsrl(msrs->controls[i].addr, val);
- val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
+ val |= ARCH_PERFMON_EVENTSEL_ENABLE;
wrmsrl(msrs->controls[i].addr, val);
}
@@ -429,7 +418,7 @@ static void op_amd_stop(struct op_msrs const * const msrs)
if (!reset_value[op_x86_phys_to_virt(i)])
continue;
rdmsrl(msrs->controls[i].addr, val);
- val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
+ val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
wrmsrl(msrs->controls[i].addr, val);
}
diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
index 5d1727ba409..2bf90fafa7b 100644
--- a/arch/x86/oprofile/op_model_ppro.c
+++ b/arch/x86/oprofile/op_model_ppro.c
@@ -88,7 +88,7 @@ static void ppro_setup_ctrs(struct op_x86_model_spec const *model,
continue;
}
rdmsrl(msrs->controls[i].addr, val);
- if (val & ARCH_PERFMON_EVENTSEL0_ENABLE)
+ if (val & ARCH_PERFMON_EVENTSEL_ENABLE)
op_x86_warn_in_use(i);
val &= model->reserved;
wrmsrl(msrs->controls[i].addr, val);
@@ -166,7 +166,7 @@ static void ppro_start(struct op_msrs const * const msrs)
for (i = 0; i < num_counters; ++i) {
if (reset_value[i]) {
rdmsrl(msrs->controls[i].addr, val);
- val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
+ val |= ARCH_PERFMON_EVENTSEL_ENABLE;
wrmsrl(msrs->controls[i].addr, val);
}
}
@@ -184,7 +184,7 @@ static void ppro_stop(struct op_msrs const * const msrs)
if (!reset_value[i])
continue;
rdmsrl(msrs->controls[i].addr, val);
- val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
+ val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
wrmsrl(msrs->controls[i].addr, val);
}
}